Vector storage Qdrant initialization and configuration

This commit is contained in:
2026-02-04 01:10:07 +03:00
parent c37aec1d99
commit f36108d652
3 changed files with 139 additions and 9 deletions

View File

@@ -0,0 +1,129 @@
"""
Vector storage configuration for the RAG solution using LlamaIndex and Qdrant.
This module provides initialization and configuration for:
- Qdrant vector storage connection
- Ollama embedding model
- Automatic collection creation
"""
import os
from typing import Optional
from llama_index.core import VectorStoreIndex
from llama_index.vector_stores.qdrant import QdrantVectorStore
from llama_index.embeddings.ollama import OllamaEmbedding
from llama_index.llms.ollama import Ollama
from qdrant_client import QdrantClient
from loguru import logger
def initialize_vector_storage(
collection_name: str = "documents_llamaindex",
host: str = "localhost",
port: int = 6333,
grpc_port: int = 6334,
ollama_base_url: str = "http://localhost:11434",
ollama_embed_model: Optional[str] = None
) -> tuple[QdrantVectorStore, VectorStoreIndex]:
"""
Initialize Qdrant vector storage with Ollama embeddings.
Args:
collection_name: Name of the Qdrant collection
host: Qdrant host address
port: Qdrant REST API port
grpc_port: Qdrant gRPC API port
ollama_base_url: Base URL for Ollama API
ollama_embed_model: Name of the Ollama embedding model
Returns:
Tuple of (QdrantVectorStore, VectorStoreIndex)
"""
logger.info(f"Initializing vector storage with collection: {collection_name}")
# Get embedding model from environment if not provided
if ollama_embed_model is None:
ollama_embed_model = os.getenv("OLLAMA_EMBEDDING_MODEL", "nomic-embed-text")
logger.info(f"Using Ollama embedding model: {ollama_embed_model}")
try:
# Initialize Qdrant client
client = QdrantClient(host=host, port=port)
# Check if collection exists, create if it doesn't
collections = client.get_collections().collections
collection_names = [coll.name for coll in collections]
if collection_name not in collection_names:
logger.info(f"Collection '{collection_name}' does not exist, creating...")
client.create_collection(
collection_name=collection_name,
vectors_config={
"size": 4096, # Default size for most embedding models
"distance": "Cosine" # Cosine distance is commonly used
}
)
logger.info(f"Collection '{collection_name}' created successfully")
else:
logger.info(f"Collection '{collection_name}' already exists")
# Initialize the Qdrant vector store
vector_store = QdrantVectorStore(
client=client,
collection_name=collection_name
)
# Initialize Ollama embedding
embed_model = OllamaEmbedding(
model_name=ollama_embed_model,
base_url=ollama_base_url
)
# Create index from vector store with the embedding model
index = VectorStoreIndex.from_vector_store(
vector_store=vector_store,
embed_model=embed_model
)
logger.info("Vector storage initialized successfully")
return vector_store, index
except Exception as e:
logger.error(f"Failed to initialize vector storage: {str(e)}")
raise
# Optional: Alternative embedding configuration using OpenAI via OpenRouter
# Uncomment and configure as needed for future use
# from llama_index.embeddings.openai import OpenAIEmbedding
#
# def initialize_openai_embeddings():
# # Use OpenRouter API key from environment
# os.environ["OPENAI_API_KEY"] = os.getenv("OPENROUTER_API_KEY", "")
#
# embed_model = OpenAIEmbedding(
# model="openai/text-embedding-3-small", # Or another suitable model
# api_base="https://openrouter.ai/api/v1" # OpenRouter endpoint
# )
# return embed_model
def get_vector_store_and_index() -> tuple[QdrantVectorStore, VectorStoreIndex]:
"""
Convenience function to get the initialized vector store and index.
Returns:
Tuple of (QdrantVectorStore, VectorStoreIndex)
"""
return initialize_vector_storage()
if __name__ == "__main__":
# Example usage
logger.info("Testing vector storage initialization...")
try:
vector_store, index = get_vector_store_and_index()
logger.info("Vector storage test successful!")
except Exception as e:
logger.error(f"Vector storage test failed: {e}")