Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 37 additions & 2 deletions src/openlayer/lib/integrations/langchain_callback.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,29 @@
"amazon_bedrock_converse_chat": "Bedrock",
}

# LiteLLM model prefixes to provider names.
# When models are accessed via a LiteLLM proxy (e.g. "gemini/gemini-2.5-flash"),
# the LangChain _type is "openai-chat" which incorrectly maps to "OpenAI".
# This map resolves the actual provider from the model prefix.
LITELLM_PREFIX_TO_PROVIDER_MAP = {
"gemini": "Google",
"anthropic": "Anthropic",
"cohere": "Cohere",
"mistral": "Mistral",
"bedrock": "Bedrock",
"vertex_ai": "Google",
"azure": "Azure",
"huggingface": "Hugging Face",
"replicate": "Replicate",
"together_ai": "Together AI",
"groq": "Groq",
"deepseek": "DeepSeek",
"fireworks_ai": "Fireworks AI",
"perplexity": "Perplexity",
"ollama": "Ollama",
"openai": "OpenAI",
}


if HAVE_LANGCHAIN:
BaseCallbackHandlerClass = BaseCallbackHandler
Expand Down Expand Up @@ -380,6 +403,15 @@ def _extract_model_info(
or serialized.get("name")
)

# Handle LiteLLM model prefix (e.g. "gemini/gemini-2.5-flash"):
# extract the actual provider and strip the prefix from the model name.
if model and "/" in model:
prefix, model_name = model.split("/", 1)
litellm_provider = LITELLM_PREFIX_TO_PROVIDER_MAP.get(prefix)
if litellm_provider:
provider = litellm_provider
model = model_name

# Clean invocation params (remove internal LangChain params)
clean_params = {
k: v for k, v in invocation_params.items() if not k.startswith("_")
Expand Down Expand Up @@ -477,7 +509,7 @@ def _handle_llm_start(
serialized, invocation_params, metadata or {}
)

step_name = name or f"{model_info['provider'] or 'LLM'} Chat Completion"
step_name = f"{model_info['provider'] or 'LLM'} Chat Completion"
prompt = [{"role": "user", "content": text} for text in prompts]

self._start_step(
Expand Down Expand Up @@ -508,7 +540,9 @@ def _handle_chat_model_start(
serialized, invocation_params, metadata or {}
)

step_name = name or f"{model_info['provider'] or 'Chat Model'} Chat Completion"
# Always use provider-based name for chat completions (e.g. "Google Chat Completion")
# rather than the run_name from the caller (e.g. "Language Model") which is generic.
step_name = f"{model_info['provider'] or 'Chat Model'} Chat Completion"
prompt = self._messages_to_prompt_format(messages)

self._start_step(
Expand Down Expand Up @@ -683,6 +717,7 @@ def _handle_tool_start(
serialized = serialized or {}
tool_name = (
name
or serialized.get("name")
or (serialized.get("id", [])[-1] if serialized.get("id") else None)
or "Tool"
)
Expand Down