diff --git a/packages/components/nodes/agentflow/Agent/Agent.ts b/packages/components/nodes/agentflow/Agent/Agent.ts index ff0fc298aaf..dd536d4fb0a 100644 --- a/packages/components/nodes/agentflow/Agent/Agent.ts +++ b/packages/components/nodes/agentflow/Agent/Agent.ts @@ -686,6 +686,7 @@ class Agent_Agentflow implements INode { if (!model) { throw new Error('Model is required') } + const modelName = modelConfig?.model ?? modelConfig?.modelName // Extract tools const tools = nodeData.inputs?.agentTools as ITool[] @@ -1390,7 +1391,7 @@ class Agent_Agentflow implements INode { // End analytics tracking if (analyticHandlers && llmIds) { - await analyticHandlers.onLLMEnd(llmIds, finalResponse) + await analyticHandlers.onLLMEnd(llmIds, output, { model: modelName, provider: model }) } // Send additional streaming events if needed diff --git a/packages/components/nodes/agentflow/ConditionAgent/ConditionAgent.ts b/packages/components/nodes/agentflow/ConditionAgent/ConditionAgent.ts index b23dd198ff1..168aa30cb18 100644 --- a/packages/components/nodes/agentflow/ConditionAgent/ConditionAgent.ts +++ b/packages/components/nodes/agentflow/ConditionAgent/ConditionAgent.ts @@ -9,6 +9,7 @@ import { } from '../utils' import { CONDITION_AGENT_SYSTEM_PROMPT, DEFAULT_SUMMARIZER_TEMPLATE } from '../prompt' import { BaseChatModel } from '@langchain/core/language_models/chat_models' +import { findBestScenarioIndex } from './matchScenario' class ConditionAgent_Agentflow implements INode { label: string @@ -259,6 +260,8 @@ class ConditionAgent_Agentflow implements INode { if (!model) { throw new Error('Model is required') } + const modelName = modelConfig?.model ?? modelConfig?.modelName + const conditionAgentInput = nodeData.inputs?.conditionAgentInput as string let input = conditionAgentInput || question const conditionAgentInstructions = nodeData.inputs?.conditionAgentInstructions as string @@ -376,12 +379,20 @@ class ConditionAgent_Agentflow implements INode { const endTime = Date.now() const timeDelta = endTime - startTime - // End analytics tracking + // End analytics tracking (pass structured output with usage metadata) if (analyticHandlers && llmIds) { - await analyticHandlers.onLLMEnd( - llmIds, - typeof response.content === 'string' ? response.content : JSON.stringify(response.content) - ) + const analyticsOutput: any = { + content: typeof response.content === 'string' ? response.content : JSON.stringify(response.content) + } + // Include usage metadata if available + if (response.usage_metadata) { + analyticsOutput.usageMetadata = response.usage_metadata + } + // Include response metadata (contains model name) if available + if (response.response_metadata) { + analyticsOutput.responseMetadata = response.response_metadata + } + await analyticHandlers.onLLMEnd(llmIds, analyticsOutput, { model: modelName, provider: model }) } let calledOutputName: string @@ -406,10 +417,7 @@ class ConditionAgent_Agentflow implements INode { } } - // Find the first exact match - const matchedScenarioIndex = _conditionAgentScenarios.findIndex( - (scenario) => calledOutputName.toLowerCase() === scenario.scenario.toLowerCase() - ) + const matchedScenarioIndex = findBestScenarioIndex(_conditionAgentScenarios, calledOutputName) const conditions = _conditionAgentScenarios.map((scenario, index) => { return { diff --git a/packages/components/nodes/agentflow/ConditionAgent/matchScenario.test.ts b/packages/components/nodes/agentflow/ConditionAgent/matchScenario.test.ts new file mode 100644 index 00000000000..0d9b4ad45a9 --- /dev/null +++ b/packages/components/nodes/agentflow/ConditionAgent/matchScenario.test.ts @@ -0,0 +1,29 @@ +import { findBestScenarioIndex } from './matchScenario' + +describe('findBestScenarioIndex', () => { + const scenarios = [{ scenario: 'billing issue' }, { scenario: 'technical support' }, { scenario: 'other' }] + + it('matches exact scenario (case-insensitive)', () => { + expect(findBestScenarioIndex(scenarios, 'Technical Support')).toBe(1) + }) + + it('matches exact scenario with surrounding whitespace', () => { + expect(findBestScenarioIndex(scenarios, ' billing issue ')).toBe(0) + }) + + it('matches abbreviated output using startsWith fallback', () => { + expect(findBestScenarioIndex(scenarios, 'tech')).toBe(1) + }) + + it('matches substring output in either direction', () => { + expect(findBestScenarioIndex(scenarios, 'need help with billing issue today')).toBe(0) + }) + + it('falls back to last scenario when no match is found', () => { + expect(findBestScenarioIndex(scenarios, 'completely unrelated')).toBe(2) + }) + + it('returns -1 for empty scenarios list', () => { + expect(findBestScenarioIndex([], 'anything')).toBe(-1) + }) +}) diff --git a/packages/components/nodes/agentflow/ConditionAgent/matchScenario.ts b/packages/components/nodes/agentflow/ConditionAgent/matchScenario.ts new file mode 100644 index 00000000000..2a67fd0b865 --- /dev/null +++ b/packages/components/nodes/agentflow/ConditionAgent/matchScenario.ts @@ -0,0 +1,30 @@ +export type ConditionScenario = { scenario: string } + +export const findBestScenarioIndex = (scenarios: ConditionScenario[], calledOutputName: string): number => { + if (!Array.isArray(scenarios) || scenarios.length === 0) return -1 + + const normalizedOutput = calledOutputName.toLowerCase().trim() + + // try exact match first + let matchedScenarioIndex = scenarios.findIndex((scenario) => scenario.scenario.toLowerCase() === normalizedOutput) + + // fallback: check if LLM returned a partial/abbreviated scenario name + if (matchedScenarioIndex === -1) { + matchedScenarioIndex = scenarios.findIndex((scenario) => scenario.scenario.toLowerCase().startsWith(normalizedOutput)) + } + + // further fallback: substring match in either direction + if (matchedScenarioIndex === -1) { + matchedScenarioIndex = scenarios.findIndex( + (scenario) => + scenario.scenario.toLowerCase().includes(normalizedOutput) || normalizedOutput.includes(scenario.scenario.toLowerCase()) + ) + } + + // last resort: if still no match, use the last scenario as an "else" branch + if (matchedScenarioIndex === -1) { + matchedScenarioIndex = scenarios.length - 1 + } + + return matchedScenarioIndex +} diff --git a/packages/components/nodes/agentflow/LLM/LLM.ts b/packages/components/nodes/agentflow/LLM/LLM.ts index f1b0c9aee92..3f33560ad20 100644 --- a/packages/components/nodes/agentflow/LLM/LLM.ts +++ b/packages/components/nodes/agentflow/LLM/LLM.ts @@ -348,6 +348,7 @@ class LLM_Agentflow implements INode { if (!model) { throw new Error('Model is required') } + const modelName = modelConfig?.model ?? modelConfig?.modelName // Extract memory and configuration options const enableMemory = nodeData.inputs?.llmEnableMemory as boolean @@ -576,7 +577,7 @@ class LLM_Agentflow implements INode { // End analytics tracking if (analyticHandlers && llmIds) { - await analyticHandlers.onLLMEnd(llmIds, finalResponse) + await analyticHandlers.onLLMEnd(llmIds, output, { model: modelName, provider: model }) } // Send additional streaming events if needed diff --git a/packages/components/src/Interface.ts b/packages/components/src/Interface.ts index daa8ef79e7b..92e76de8fc9 100644 --- a/packages/components/src/Interface.ts +++ b/packages/components/src/Interface.ts @@ -458,7 +458,7 @@ export enum FollowUpPromptProvider { } export type FollowUpPromptProviderConfig = { - [key in FollowUpPromptProvider]: { + [_key in FollowUpPromptProvider]: { credentialId: string modelName: string baseUrl: string diff --git a/packages/components/src/handler.test.ts b/packages/components/src/handler.test.ts index 394ce7092e3..e0ebd97ef4e 100644 --- a/packages/components/src/handler.test.ts +++ b/packages/components/src/handler.test.ts @@ -48,3 +48,286 @@ describe('URL Handling For Phoenix Tracer', () => { ) }) }) + +/** + * Unit tests for onLLMEnd usage metadata extraction + * + * These tests verify the logic for extracting and formatting usage metadata + * from the onLLMEnd output parameter. Due to Jest configuration constraints + * with the complex OpenTelemetry and analytics dependencies, these tests are + * implemented as pure function tests that verify the extraction logic. + */ +describe('onLLMEnd Usage Metadata Extraction Logic', () => { + // Helper function that mirrors the extraction logic in handler.ts onLLMEnd (lines 1437-1465) + const extractOutputData = (output: string | Record, model?: string) => { + let outputText: string + let usageMetadata: Record | undefined + let modelName: string | undefined = model + + if (typeof output === 'string') { + outputText = output + } else { + outputText = output.content ?? '' + usageMetadata = output.usageMetadata ?? output.usage_metadata + if (usageMetadata) { + usageMetadata = { + input_tokens: usageMetadata.input_tokens ?? usageMetadata.prompt_tokens, + output_tokens: usageMetadata.output_tokens ?? usageMetadata.completion_tokens, + total_tokens: usageMetadata.total_tokens + } + } + const responseMetadata = output.responseMetadata ?? output.response_metadata + if (!model && responseMetadata) { + modelName = responseMetadata.model ?? responseMetadata.model_name ?? responseMetadata.modelId + } + } + return { outputText, usageMetadata, modelName } + } + + // Helper to format for Langfuse + const formatForLangfuse = (usageMetadata: Record | undefined) => { + if (!usageMetadata) return undefined + return { + promptTokens: usageMetadata.input_tokens, + completionTokens: usageMetadata.output_tokens, + totalTokens: usageMetadata.total_tokens + } + } + + // Helper to format for LangSmith + const formatForLangSmith = (usageMetadata: Record | undefined) => { + if (!usageMetadata) return undefined + return { + prompt_tokens: usageMetadata.input_tokens, + completion_tokens: usageMetadata.output_tokens, + total_tokens: usageMetadata.total_tokens + } + } + + describe('backward compatibility with string input', () => { + it('should handle plain string output', () => { + const result = extractOutputData('Hello, world!') + expect(result.outputText).toBe('Hello, world!') + expect(result.usageMetadata).toBeUndefined() + expect(result.modelName).toBeUndefined() + }) + + it('should handle empty string', () => { + const result = extractOutputData('') + expect(result.outputText).toBe('') + }) + }) + + describe('structured input with usage metadata', () => { + it('should extract usage metadata using LangChain field names (input_tokens/output_tokens)', () => { + const result = extractOutputData({ + content: 'Test response', + usageMetadata: { + input_tokens: 100, + output_tokens: 50, + total_tokens: 150 + }, + responseMetadata: { + model: 'gpt-4' + } + }) + + expect(result.outputText).toBe('Test response') + expect(result.usageMetadata).toEqual({ + input_tokens: 100, + output_tokens: 50, + total_tokens: 150 + }) + expect(result.modelName).toBe('gpt-4') + }) + + it('should handle OpenAI field names (prompt_tokens/completion_tokens)', () => { + const result = extractOutputData({ + content: 'Test response', + usageMetadata: { + prompt_tokens: 200, + completion_tokens: 100, + total_tokens: 300 + } + }) + + // Should normalize to input_tokens/output_tokens + expect(result.usageMetadata).toEqual({ + input_tokens: 200, + output_tokens: 100, + total_tokens: 300 + }) + }) + + it('should handle usage_metadata (snake_case) field name', () => { + const result = extractOutputData({ + content: 'Test response', + usage_metadata: { + input_tokens: 50, + output_tokens: 25, + total_tokens: 75 + } + }) + + expect(result.usageMetadata).toEqual({ + input_tokens: 50, + output_tokens: 25, + total_tokens: 75 + }) + }) + + it('should prefer usageMetadata over usage_metadata', () => { + const result = extractOutputData({ + content: 'Test', + usageMetadata: { input_tokens: 100, output_tokens: 50, total_tokens: 150 }, + usage_metadata: { input_tokens: 1, output_tokens: 1, total_tokens: 2 } + }) + + expect(result.usageMetadata?.input_tokens).toBe(100) + }) + }) + + describe('model name extraction', () => { + it('should extract model from responseMetadata.model', () => { + const result = extractOutputData({ + content: 'Test', + responseMetadata: { model: 'gpt-4-turbo' } + }) + expect(result.modelName).toBe('gpt-4-turbo') + }) + + it('should extract model from responseMetadata.model_name', () => { + const result = extractOutputData({ + content: 'Test', + responseMetadata: { model_name: 'claude-3-opus' } + }) + expect(result.modelName).toBe('claude-3-opus') + }) + + it('should extract model from responseMetadata.modelId', () => { + const result = extractOutputData({ + content: 'Test', + responseMetadata: { modelId: 'anthropic.claude-v2' } + }) + expect(result.modelName).toBe('anthropic.claude-v2') + }) + + it('should handle response_metadata (snake_case) field name', () => { + const result = extractOutputData({ + content: 'Test', + response_metadata: { model: 'gpt-3.5-turbo' } + }) + expect(result.modelName).toBe('gpt-3.5-turbo') + }) + + it('should prefer model over model_name over modelId', () => { + const result = extractOutputData({ + content: 'Test', + responseMetadata: { + model: 'preferred-model', + model_name: 'secondary-model', + modelId: 'tertiary-model' + } + }) + expect(result.modelName).toBe('preferred-model') + }) + + it('should prefer explicit model param over responseMetadata', () => { + const result = extractOutputData( + { + content: 'Test', + responseMetadata: { model: 'from-response-metadata' } + }, + 'explicit-model-param' + ) + expect(result.modelName).toBe('explicit-model-param') + }) + }) + + describe('Langfuse format conversion', () => { + it('should format usage for Langfuse OpenAIUsage schema', () => { + const result = extractOutputData({ + content: 'Test', + usageMetadata: { input_tokens: 100, output_tokens: 50, total_tokens: 150 } + }) + const langfuseUsage = formatForLangfuse(result.usageMetadata) + + expect(langfuseUsage).toEqual({ + promptTokens: 100, + completionTokens: 50, + totalTokens: 150 + }) + }) + + it('should return undefined for missing usage', () => { + const result = extractOutputData({ content: 'Test' }) + expect(formatForLangfuse(result.usageMetadata)).toBeUndefined() + }) + }) + + describe('LangSmith format conversion', () => { + it('should format usage for LangSmith token_usage schema', () => { + const result = extractOutputData({ + content: 'Test', + usageMetadata: { input_tokens: 100, output_tokens: 50, total_tokens: 150 } + }) + const langSmithUsage = formatForLangSmith(result.usageMetadata) + + expect(langSmithUsage).toEqual({ + prompt_tokens: 100, + completion_tokens: 50, + total_tokens: 150 + }) + }) + }) + + describe('missing fields handling', () => { + it('should handle structured output without usageMetadata', () => { + const result = extractOutputData({ content: 'Test response' }) + expect(result.outputText).toBe('Test response') + expect(result.usageMetadata).toBeUndefined() + expect(result.modelName).toBeUndefined() + }) + + it('should handle structured output with only model, no usage', () => { + const result = extractOutputData({ + content: 'Test response', + responseMetadata: { model: 'gpt-4' } + }) + expect(result.usageMetadata).toBeUndefined() + expect(result.modelName).toBe('gpt-4') + }) + + it('should handle empty content', () => { + const result = extractOutputData({ + content: '', + usageMetadata: { input_tokens: 10, output_tokens: 0, total_tokens: 10 } + }) + expect(result.outputText).toBe('') + expect(result.usageMetadata).toEqual({ + input_tokens: 10, + output_tokens: 0, + total_tokens: 10 + }) + }) + + it('should handle missing content field', () => { + const result = extractOutputData({ + usageMetadata: { input_tokens: 10, output_tokens: 5, total_tokens: 15 } + }) + expect(result.outputText).toBe('') + }) + + it('should handle undefined values in usage metadata', () => { + const result = extractOutputData({ + content: 'Test', + usageMetadata: { input_tokens: 100 } + }) + expect(result.usageMetadata).toEqual({ + input_tokens: 100, + output_tokens: undefined, + total_tokens: undefined + }) + }) + }) +}) diff --git a/packages/components/src/handler.ts b/packages/components/src/handler.ts index c7dec69c3a7..0101d8dacde 100644 --- a/packages/components/src/handler.ts +++ b/packages/components/src/handler.ts @@ -736,6 +736,40 @@ export class AnalyticHandler { } } + /** + * Helper method to end an OpenTelemetry span with output, token usage, and model name attributes. + * Used by arize, phoenix, and opik providers. + */ + private _endOtelSpan( + providerName: string, + returnIds: ICommonObject, + outputText: string, + usageMetadata?: { input_tokens?: number; output_tokens?: number; total_tokens?: number }, + modelName?: string + ): void { + const llmSpan: Span | undefined = this.handlers[providerName]?.llmSpan?.[returnIds[providerName]?.llmSpan] + if (llmSpan) { + llmSpan.setAttribute('output.value', JSON.stringify(outputText)) + llmSpan.setAttribute('output.mime_type', 'application/json') + if (usageMetadata) { + if (usageMetadata.input_tokens !== undefined) { + llmSpan.setAttribute('llm.token_count.prompt', usageMetadata.input_tokens) + } + if (usageMetadata.output_tokens !== undefined) { + llmSpan.setAttribute('llm.token_count.completion', usageMetadata.output_tokens) + } + if (usageMetadata.total_tokens !== undefined) { + llmSpan.setAttribute('llm.token_count.total', usageMetadata.total_tokens) + } + } + if (modelName) { + llmSpan.setAttribute('llm.model_name', modelName) + } + llmSpan.setStatus({ code: SpanStatusCode.OK }) + llmSpan.end() + } + } + async init() { if (this.initialized) return @@ -1400,25 +1434,104 @@ export class AnalyticHandler { return returnIds } - async onLLMEnd(returnIds: ICommonObject, output: string) { + async onLLMEnd( + returnIds: ICommonObject, + output: string | ICommonObject, + { model, provider }: { model?: string; provider?: string } = {} + ) { + // Extract content, usage metadata, and model name from output + // Support both string (backward compatible) and structured object formats + let outputText: string + let usageMetadata: ICommonObject | undefined + let modelName = model + + if (typeof output === 'string') { + outputText = output + } else { + // Extract text content + outputText = output.content ?? '' + + // Extract usage metadata (supports both LangChain and OpenAI field names) + usageMetadata = output.usageMetadata ?? output.usage_metadata + if (usageMetadata) { + // Normalize field names for consistent access + usageMetadata = { + input_tokens: usageMetadata.input_tokens ?? usageMetadata.prompt_tokens, + output_tokens: usageMetadata.output_tokens ?? usageMetadata.completion_tokens, + total_tokens: usageMetadata.total_tokens + } + } + + // Extract model name from response metadata + const responseMetadata = output.responseMetadata ?? output.response_metadata + if (!model && responseMetadata) { + modelName = responseMetadata.model ?? responseMetadata.model_name ?? responseMetadata.modelId + } + } + if (Object.prototype.hasOwnProperty.call(this.handlers, 'langSmith')) { const llmRun: RunTree | undefined = this.handlers['langSmith'].llmRun[returnIds['langSmith'].llmRun] if (llmRun) { - await llmRun.end({ - outputs: { - generations: [output] + const outputs: ICommonObject = { + generations: [outputText] + } + // Add usage_metadata and model info to run's extra.metadata for cost tracking + // LangSmith requires: usage_metadata (with input_tokens, output_tokens, total_tokens) + // and ls_model_name + ls_provider in metadata for automatic cost calculation + if (usageMetadata || modelName) { + if (!llmRun.extra) { + llmRun.extra = {} + } + if (!llmRun.extra.metadata) { + llmRun.extra.metadata = {} + } + if (usageMetadata) { + llmRun.extra.metadata.usage_metadata = { + input_tokens: usageMetadata.input_tokens, + output_tokens: usageMetadata.output_tokens, + total_tokens: usageMetadata.total_tokens + } + } + if (modelName) { + llmRun.extra.metadata.ls_model_name = modelName } + if (provider) { + let normalized = provider.toLowerCase() + if (normalized.startsWith('chat')) normalized = normalized.slice(4) + if (normalized.endsWith('chat')) normalized = normalized.slice(0, -4) + if (normalized.includes('bedrock')) normalized = 'amazon_bedrock' + llmRun.extra.metadata.ls_provider = normalized + } + } + await llmRun.end({ + outputs }) await llmRun.patchRun() } } if (Object.prototype.hasOwnProperty.call(this.handlers, 'langFuse')) { - const generation: LangfuseGenerationClient | undefined = this.handlers['langFuse'].generation[returnIds['langFuse'].generation] + const generation: LangfuseGenerationClient | undefined = this.handlers['langFuse'].generation[returnIds['langFuse']?.generation] if (generation) { - generation.end({ - output: output - }) + const endPayload: ICommonObject = { + output: outputText + } + // Add model name if available + if (modelName) { + endPayload.model = modelName + } + // Add usage data if available (using Langfuse's OpenAIUsage format) + if (usageMetadata) { + endPayload.usage = { + promptTokens: usageMetadata.input_tokens, + completionTokens: usageMetadata.output_tokens, + totalTokens: usageMetadata.total_tokens + } + } + generation.end(endPayload) + // Flush to ensure the generation update is sent before the request completes + const langfuse: Langfuse = this.handlers['langFuse'].client + await langfuse.flushAsync() } } @@ -1427,50 +1540,54 @@ export class AnalyticHandler { const monitor = this.handlers['lunary'].client if (monitor && llmEventId) { - await monitor.trackEvent('llm', 'end', { + const eventData: ICommonObject = { runId: llmEventId, - output - }) + output: outputText + } + // Add token usage if available + if (usageMetadata) { + eventData.tokensUsage = { + prompt: usageMetadata.input_tokens, + completion: usageMetadata.output_tokens + } + } + if (modelName) { + eventData.model = modelName + } + await monitor.trackEvent('llm', 'end', eventData) } } if (Object.prototype.hasOwnProperty.call(this.handlers, 'langWatch')) { const span: LangWatchSpan | undefined = this.handlers['langWatch'].span[returnIds['langWatch'].span] if (span) { - span.end({ - output: autoconvertTypedValues(output) - }) + const endPayload: ICommonObject = { + output: autoconvertTypedValues(outputText) + } + // Add metrics if usage available + if (usageMetadata) { + endPayload.metrics = { + promptTokens: usageMetadata.input_tokens, + completionTokens: usageMetadata.output_tokens + } + } + if (modelName) { + endPayload.model = modelName + } + span.end(endPayload) } } if (Object.prototype.hasOwnProperty.call(this.handlers, 'arize')) { - const llmSpan: Span | undefined = this.handlers['arize'].llmSpan[returnIds['arize'].llmSpan] - if (llmSpan) { - llmSpan.setAttribute('output.value', JSON.stringify(output)) - llmSpan.setAttribute('output.mime_type', 'application/json') - llmSpan.setStatus({ code: SpanStatusCode.OK }) - llmSpan.end() - } + this._endOtelSpan('arize', returnIds, outputText, usageMetadata, modelName) } if (Object.prototype.hasOwnProperty.call(this.handlers, 'phoenix')) { - const llmSpan: Span | undefined = this.handlers['phoenix'].llmSpan[returnIds['phoenix'].llmSpan] - if (llmSpan) { - llmSpan.setAttribute('output.value', JSON.stringify(output)) - llmSpan.setAttribute('output.mime_type', 'application/json') - llmSpan.setStatus({ code: SpanStatusCode.OK }) - llmSpan.end() - } + this._endOtelSpan('phoenix', returnIds, outputText, usageMetadata, modelName) } if (Object.prototype.hasOwnProperty.call(this.handlers, 'opik')) { - const llmSpan: Span | undefined = this.handlers['opik'].llmSpan[returnIds['opik'].llmSpan] - if (llmSpan) { - llmSpan.setAttribute('output.value', JSON.stringify(output)) - llmSpan.setAttribute('output.mime_type', 'application/json') - llmSpan.setStatus({ code: SpanStatusCode.OK }) - llmSpan.end() - } + this._endOtelSpan('opik', returnIds, outputText, usageMetadata, modelName) } } diff --git a/packages/server/src/utils/buildAgentflow.ts b/packages/server/src/utils/buildAgentflow.ts index aad3233a2fb..02803326df6 100644 --- a/packages/server/src/utils/buildAgentflow.ts +++ b/packages/server/src/utils/buildAgentflow.ts @@ -769,12 +769,20 @@ async function determineNodesToIgnore( if (isDecisionNode && result.output?.conditions) { const outputConditions: ICondition[] = result.output.conditions + // safety net: if no conditions were fulfilled, don't ignore ALL children + // treat the last condition as an else/default fallback + const anyFulfilled = outputConditions.some((c) => c.isFulfilled === true) + if (!anyFulfilled && outputConditions.length > 0) { + // mark the last condition as fulfilled so at least one branch executes + outputConditions[outputConditions.length - 1].isFulfilled = true + } + // Find indexes of unfulfilled conditions const unfulfilledIndexes = outputConditions - .map((condition: any, index: number) => + .map((condition, index) => condition.isFulfilled === false || !Object.prototype.hasOwnProperty.call(condition, 'isFulfilled') ? index : -1 ) - .filter((index: number) => index !== -1) + .filter((index) => index !== -1) // Find nodes to ignore based on unfulfilled conditions for (const index of unfulfilledIndexes) { @@ -1896,8 +1904,9 @@ export const executeAgentFlow = async ({ chatId }) await analyticHandlers.init() + const flowName = chatflow.name || 'Agentflow' parentTraceIds = await analyticHandlers.onChainStart( - 'Agentflow', + flowName, form && Object.keys(form).length > 0 ? JSON.stringify(form) : question || '' ) }