fix for openai compatibility model with needed parameters

This commit is contained in:
2026-03-09 10:21:39 +03:00
parent 969d25209c
commit 5721bad117
4 changed files with 39 additions and 4 deletions

View File

@@ -17,6 +17,7 @@ from typing import Any, Iterable, List
from llama_index.core import PromptTemplate
from llama_index.core.agent import AgentWorkflow
from llama_index.core.base.llms.types import ChatMessage, MessageRole
from llama_index.core.retrievers import VectorIndexRetriever
from llama_index.core.schema import NodeWithScore
from llama_index.core.tools import FunctionTool
@@ -230,6 +231,19 @@ def synthesize_answer(query: str, sources: list[dict[str, Any]], agent_draft: st
context_json=context_json,
)
logger.info("Synthesizing grounded answer from retrieved sources")
# Prefer chat API for chat-capable models; fallback to completion if unavailable.
try:
if hasattr(llm, "chat"):
chat_response = llm.chat(
[
ChatMessage(role=MessageRole.SYSTEM, content="You answer with grounded citations only."),
ChatMessage(role=MessageRole.USER, content=prompt),
]
)
return _normalize_text(getattr(chat_response, "message", chat_response).content)
except Exception as e:
logger.warning(f"LLM chat synthesis failed, falling back to completion: {e}")
response = llm.complete(prompt)
return _normalize_text(getattr(response, "text", response))