llamaindex update + unpacking archives in data
This commit is contained in:
@@ -16,33 +16,8 @@ from pathlib import Path
|
||||
|
||||
from vector_storage import get_vector_store_and_index
|
||||
|
||||
|
||||
from llama_index.embeddings.ollama import OllamaEmbedding
|
||||
import os
|
||||
|
||||
|
||||
def setup_global_models():
|
||||
"""Set up the global models to prevent defaulting to OpenAI."""
|
||||
# Set up the embedding model
|
||||
ollama_embed_model = os.getenv("OLLAMA_EMBEDDING_MODEL", "qwen3-embedding:4b")
|
||||
ollama_base_url = "http://localhost:11434"
|
||||
|
||||
embed_model = OllamaEmbedding(
|
||||
model_name=ollama_embed_model,
|
||||
base_url=ollama_base_url
|
||||
)
|
||||
|
||||
# Set as the global embedding model
|
||||
Settings.embed_model = embed_model
|
||||
|
||||
# Set up the LLM model
|
||||
ollama_chat_model = os.getenv("OLLAMA_CHAT_MODEL", "nemotron-mini:4b")
|
||||
|
||||
from llama_index.llms.ollama import Ollama
|
||||
llm = Ollama(model=ollama_chat_model, base_url=ollama_base_url)
|
||||
|
||||
# Set as the global LLM
|
||||
Settings.llm = llm
|
||||
# Import the new configuration module
|
||||
from config import setup_global_models
|
||||
|
||||
|
||||
def initialize_retriever(
|
||||
|
||||
Reference in New Issue
Block a user