fix for openai compatibility model with needed parameters
This commit is contained in:
@@ -93,12 +93,19 @@ def get_llm_model():
|
||||
return llm
|
||||
|
||||
elif strategy == "openai":
|
||||
from llama_index.llms.openai_like import OpenAILike
|
||||
# from helpers.openai_compatible_llm import OpenAICompatibleLLM
|
||||
from helpers.openai_compatible_llm import OpenAICompatibleLLM
|
||||
|
||||
openai_chat_url = os.getenv("OPENAI_CHAT_URL", "https://api.openai.com/v1")
|
||||
openai_chat_key = os.getenv("OPENAI_CHAT_KEY", "dummy_key_for_template")
|
||||
openai_chat_model = os.getenv("OPENAI_CHAT_MODEL", "gpt-3.5-turbo")
|
||||
openai_chat_temperature = float(os.getenv("OPENAI_CHAT_TEMPERATURE", "0.1"))
|
||||
openai_chat_max_tokens_env = os.getenv("OPENAI_CHAT_MAX_TOKENS", "").strip()
|
||||
openai_chat_max_tokens = (
|
||||
int(openai_chat_max_tokens_env) if openai_chat_max_tokens_env else 1024
|
||||
)
|
||||
openai_reasoning_effort = (
|
||||
os.getenv("OPENAI_CHAT_REASONING_EFFORT", "").strip() or None
|
||||
)
|
||||
openai_is_fc_model = (
|
||||
os.getenv("OPENAI_CHAT_IS_FUNCTION_CALLING_MODEL", "false").lower()
|
||||
== "true"
|
||||
@@ -109,13 +116,19 @@ def get_llm_model():
|
||||
|
||||
logger.info(
|
||||
f"Initializing OpenAI-compatible chat model: {openai_chat_model} "
|
||||
f"(base={openai_chat_url}, function_calling={openai_is_fc_model})"
|
||||
f"(base={openai_chat_url}, max_tokens={openai_chat_max_tokens}, "
|
||||
f"reasoning_effort={openai_reasoning_effort}, function_calling={openai_is_fc_model})"
|
||||
)
|
||||
|
||||
llm = OpenAILike(
|
||||
llm = OpenAICompatibleLLM(
|
||||
model=openai_chat_model,
|
||||
api_base=openai_chat_url,
|
||||
api_key=openai_chat_key,
|
||||
temperature=openai_chat_temperature,
|
||||
max_tokens=openai_chat_max_tokens,
|
||||
reasoning_effort=openai_reasoning_effort,
|
||||
timeout=120.0,
|
||||
is_function_calling_model=openai_is_fc_model,
|
||||
)
|
||||
|
||||
return llm
|
||||
|
||||
Reference in New Issue
Block a user