from langchain.text_splitter import RecursiveCharacterTextSplitter # type: ignore
from langchain_openai import OpenAIEmbeddings # type: ignore 
from app.database.vector_embedings.create_vector_search_index import create_vector_search_index
from app.database.fetch_data import get_mongo_client



def split_and_store(client,documents, character_id):
    """Split text & store embeddings in MongoDB collection named after character_id."""
    db = client["Knowledge_bank"]
    text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
    chunks = text_splitter.split_documents(documents)
    embeddings = OpenAIEmbeddings()
    
    # Get collection for the specific character
    collection = db[character_id]
    
    for chunk in chunks:
        vector = embeddings.embed_query(chunk.page_content)
        collection.insert_one({
            "text": chunk.page_content,
            "embedding": vector
        })
    
    # Create vector search index
    create_vector_search_index(collection)

    return f"Text split & embeddings stored in collection: {character_id} and vector search index created successfully."