llamaindex update + unpacking archives in data

This commit is contained in:
2026-02-09 19:00:23 +03:00
parent 0adbc29692
commit f9c47c772f
11 changed files with 478 additions and 100 deletions

View File

@@ -16,33 +16,8 @@ from pathlib import Path
from vector_storage import get_vector_store_and_index
from llama_index.embeddings.ollama import OllamaEmbedding
import os
def setup_global_models():
"""Set up the global models to prevent defaulting to OpenAI."""
# Set up the embedding model
ollama_embed_model = os.getenv("OLLAMA_EMBEDDING_MODEL", "qwen3-embedding:4b")
ollama_base_url = "http://localhost:11434"
embed_model = OllamaEmbedding(
model_name=ollama_embed_model,
base_url=ollama_base_url
)
# Set as the global embedding model
Settings.embed_model = embed_model
# Set up the LLM model
ollama_chat_model = os.getenv("OLLAMA_CHAT_MODEL", "nemotron-mini:4b")
from llama_index.llms.ollama import Ollama
llm = Ollama(model=ollama_chat_model, base_url=ollama_base_url)
# Set as the global LLM
Settings.llm = llm
# Import the new configuration module
from config import setup_global_models
def initialize_retriever(