From b9bcdcb56d666e1131a960200e7fd3849ef04482 Mon Sep 17 00:00:00 2001 From: dingyi Date: Tue, 3 Feb 2026 18:28:17 +0800 Subject: [PATCH 1/8] feat(adapter-zhipu): add GLM-4.7-FlashX model support Add GLM-4.7-FlashX model with 200k token context window to the list of supported models in the Zhipu adapter. --- packages/adapter-zhipu/src/client.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/packages/adapter-zhipu/src/client.ts b/packages/adapter-zhipu/src/client.ts index 9f566b100..9d6c01a81 100644 --- a/packages/adapter-zhipu/src/client.ts +++ b/packages/adapter-zhipu/src/client.ts @@ -78,7 +78,8 @@ export class ZhipuClient extends PlatformModelAndEmbeddingsClient ['GLM-4.6V-FlashX', 128000], ['GLM-4.6V-Flash', 128000], ['GLM-4.6', 200_000], - ['GLM-4.7', 200_000] + ['GLM-4.7', 200_000], + ['GLM-4.7-FlashX', 200_000] // ['GLM-4-AllTools', 128000] ] as [string, number][] From e29a6d1b49da7baac5685d2c6b192bf8d20e8ddf Mon Sep 17 00:00:00 2001 From: dingyi Date: Tue, 3 Feb 2026 18:38:49 +0800 Subject: [PATCH 2/8] feat(adapter-wenxin): add image input support and expand model list Add support for vision-enabled models with image input capability. Expand model list to include new ERNIE 5.0 thinking models and ERNIE 4.5 variants. Update message types to support both text and image content formats. - Add ModelCapabilities.ImageInput support for vision models - Update langchainMessageToWenXinMessage to handle image content async - Support both string and mixed text/image message content - Add thinking model detection for ERNIE 5.0 thinking models - Include new ERNIE model variants (5.0 thinking, 4.5 turbo VL, etc.) --- packages/adapter-wenxin/src/client.ts | 25 +++- packages/adapter-wenxin/src/requester.ts | 9 +- packages/adapter-wenxin/src/types.ts | 18 ++- packages/adapter-wenxin/src/utils.ts | 138 +++++++++++++++++------ 4 files changed, 150 insertions(+), 40 deletions(-) diff --git a/packages/adapter-wenxin/src/client.ts b/packages/adapter-wenxin/src/client.ts index 39ff4ae21..810b2d406 100644 --- a/packages/adapter-wenxin/src/client.ts +++ b/packages/adapter-wenxin/src/client.ts @@ -6,6 +6,7 @@ import { ChatLunaEmbeddings } from 'koishi-plugin-chatluna/llm-core/platform/model' import { + ModelCapabilities, ModelInfo, ModelType } from 'koishi-plugin-chatluna/llm-core/platform/types' @@ -17,6 +18,7 @@ import { } from 'koishi-plugin-chatluna/utils/error' import { WenxinRequester } from './requester' import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat' +import { supportImageInput } from '@chatluna/v1-shared-adapter' export class WenxinClient extends PlatformModelAndEmbeddingsClient { platform = 'wenxin' @@ -40,6 +42,22 @@ export class WenxinClient extends PlatformModelAndEmbeddingsClient async refreshModels(): Promise { const rawModels = [ + ['ernie-5.0-thinking-preview', 128000], // ERNIE 5.0 Thinking Preview + ['ernie-5.0-thinking-latest', 128000], // ERNIE 5.0 Thinking Latest + ['ernie-5.0-thinking-exp', 128000], // ERNIE 5.0 Thinking Exp + ['ernie-4.5-turbo-128k', 128000], // ERNIE 4.5 Turbo 128K + ['ernie-4.5-turbo-128k-preview', 128000], // ERNIE 4.5 Turbo 128K Preview + ['ernie-4.5-turbo-32k', 32000], // ERNIE 4.5 Turbo 32K + ['ernie-4.5-turbo-latest', 128000], // ERNIE 4.5 Turbo Latest + ['ernie-4.5-turbo-vl-preview', 128000], // ERNIE 4.5 Turbo VL Preview + ['ernie-4.5-turbo-vl', 128000], // ERNIE 4.5 Turbo VL + ['ernie-4.5-turbo-vl-32k', 32000], // ERNIE 4.5 Turbo VL 32K + ['ernie-4.5-turbo-vl-32k-preview', 32000], // ERNIE 4.5 Turbo VL 32K Preview + ['ernie-4.5-turbo-vl-latest', 128000], // ERNIE 4.5 Turbo VL Latest + ['ernie-char-8k', 8000], // ERNIE Character 8K + ['ernie-4.5-0.3b', 128000], // ERNIE 4.5 0.3B + ['ernie-4.5-21b-a3b', 128000], // ERNIE 4.5 21B A3B + ['ernie-4.5-vl-28b-a3b', 32000], // ERNIE 4.5 VL 28B A3B ['ernie-4.0-8k', 8000], // ERNIE-4.0-8K ['ernie-4.0-8k-preview', 8000], // ERNIE-4.0-8K-Preview ['ernie-4.0-8k-latest', 8000], // ERNIE-4.0-8K-Latest @@ -70,7 +88,9 @@ export class WenxinClient extends PlatformModelAndEmbeddingsClient return { name: model, type: ModelType.llm, - capabilities: [], + capabilities: [ + supportImageInput(model) && ModelCapabilities.ImageInput + ].filter(Boolean), supportMode: ['all'], maxTokens } as ModelInfo @@ -111,7 +131,8 @@ export class WenxinClient extends PlatformModelAndEmbeddingsClient temperature: this._config.temperature, maxRetries: this._config.maxRetries, llmType: 'wenxin', - isThinkModel: model.includes('reasoner') + isThinkModel: + model.includes('reasoner') || model.includes('thinking') }) } diff --git a/packages/adapter-wenxin/src/requester.ts b/packages/adapter-wenxin/src/requester.ts index fadb0fa5a..1cd4b428d 100644 --- a/packages/adapter-wenxin/src/requester.ts +++ b/packages/adapter-wenxin/src/requester.ts @@ -47,9 +47,12 @@ export class WenxinRequester ): AsyncGenerator { await this.init() - const messagesMapped: WenxinMessage[] = langchainMessageToWenXinMessage( - params.input - ) + const messagesMapped: WenxinMessage[] = + await langchainMessageToWenXinMessage( + params.input, + this._plugin, + params.model + ) try { const response = await this._post( diff --git a/packages/adapter-wenxin/src/types.ts b/packages/adapter-wenxin/src/types.ts index 29d7a7bbd..fda64216d 100644 --- a/packages/adapter-wenxin/src/types.ts +++ b/packages/adapter-wenxin/src/types.ts @@ -5,9 +5,25 @@ export type WenxinMessageRole = | 'function' | 'tool' +export type WenxinMessageContent = + | string + | ( + | { + type: 'text' + text: string + } + | { + type: 'image_url' + image_url: { + url: string + detail?: 'low' | 'high' + } + } + )[] + export interface WenxinMessage { role: WenxinMessageRole - content?: string + content?: WenxinMessageContent name?: string tool_calls?: ChatCompletionRequestMessageToolCall[] diff --git a/packages/adapter-wenxin/src/utils.ts b/packages/adapter-wenxin/src/utils.ts index f65d4e9fb..8e71da2c0 100644 --- a/packages/adapter-wenxin/src/utils.ts +++ b/packages/adapter-wenxin/src/utils.ts @@ -5,6 +5,7 @@ import { ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, + MessageContentImageUrl, MessageType, SystemMessageChunk, ToolMessage, @@ -17,49 +18,118 @@ import { } from './types' import { StructuredTool } from '@langchain/core/tools' import { zodToJsonSchema } from 'zod-to-json-schema' -import { removeAdditionalProperties } from '@chatluna/v1-shared-adapter' +import { + fetchImageUrl, + removeAdditionalProperties, + supportImageInput +} from '@chatluna/v1-shared-adapter' import { isZodSchemaV3 } from '@langchain/core/utils/types' +import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat' +import { isMessageContentImageUrl } from 'koishi-plugin-chatluna/utils/string' -export function langchainMessageToWenXinMessage( - messages: BaseMessage[] -): WenxinMessage[] { - const mappedMessage = messages.map((rawMessage) => { - const role = messageTypeToWenXinRole(rawMessage.getType()) - - const msg = { - content: (rawMessage.content as string) || null, - name: - role === 'assistant' || role === 'tool' - ? rawMessage.name - : undefined, - role, - // function_call: rawMessage.additional_kwargs.function_call, - tool_calls: rawMessage.additional_kwargs.tool_calls, - tool_call_id: (rawMessage as ToolMessage).tool_call_id - } +export async function langchainMessageToWenXinMessage( + messages: BaseMessage[], + plugin: ChatLunaPlugin, + model?: string +): Promise { + const mappedMessage = await Promise.all( + messages.map(async (rawMessage) => { + const role = messageTypeToWenXinRole(rawMessage.getType()) - if (msg.tool_calls == null) { - delete msg.tool_calls - } + const msg = { + content: (rawMessage.content ?? + undefined) as WenxinMessage['content'], + name: + role === 'assistant' || role === 'tool' + ? rawMessage.name + : undefined, + role, + // function_call: rawMessage.additional_kwargs.function_call, + tool_calls: rawMessage.additional_kwargs.tool_calls, + tool_call_id: (rawMessage as ToolMessage).tool_call_id + } - if (msg.tool_call_id == null) { - delete msg.tool_call_id - } + if (msg.tool_calls == null) { + delete msg.tool_calls + } + + if (msg.tool_call_id == null) { + delete msg.tool_call_id + } - if (msg.tool_calls) { - for (const toolCall of msg.tool_calls) { - const tool = toolCall.function + if (msg.tool_calls) { + for (const toolCall of msg.tool_calls) { + const tool = toolCall.function - if (!tool.arguments) { - continue + if (!tool.arguments) { + continue + } + // Remove spaces, new line characters etc. + tool.arguments = JSON.stringify(JSON.parse(tool.arguments)) } - // Remove spaces, new line characters etc. - tool.arguments = JSON.stringify(JSON.parse(tool.arguments)) } - } - return msg - }) + const images = rawMessage.additional_kwargs.images as + | string[] + | null + + if (supportImageInput(model ?? '') && images != null) { + msg.content = [ + { + type: 'text', + text: (rawMessage.content as string) || '' + } + ] + + const imageContents = await Promise.all( + images.map(async (image) => { + try { + const url = await fetchImageUrl(plugin, { + type: 'image_url', + image_url: { url: image } + } as MessageContentImageUrl) + return { + type: 'image_url', + image_url: { + url, + detail: 'low' + } + } as const + } catch { + return null + } + }) + ) + + msg.content.push( + ...imageContents.filter((content) => content != null) + ) + } else if (Array.isArray(msg.content) && msg.content.length > 0) { + const mappedContent = await Promise.all( + msg.content.map(async (content) => { + if (!isMessageContentImageUrl(content)) return content + + try { + const url = await fetchImageUrl(plugin, content) + return { + type: 'image_url', + image_url: { + url, + detail: 'low' + } + } as const + } catch { + return null + } + }) + ) + + msg.content = mappedContent.filter((content) => content != null) + } + + return msg + }) + ) const result: WenxinMessage[] = [] From bbbb27b1fab2d0e10d78d199b4ecc05aa7a20b91 Mon Sep 17 00:00:00 2001 From: dingyi Date: Tue, 3 Feb 2026 19:09:33 +0800 Subject: [PATCH 3/8] feat(adapter-doubao,shared-adapter): refactor reasoning effort model variants Refactor reasoning effort model handling with centralized variant expansion. Update Doubao adapter with latest models and improve thinking model detection using standardized parsing. - Add expandReasoningEffortModelVariants utility for consistent model variant generation - Update parseOpenAIModelNameWithReasoningEffort usage in Doubao requester - Add latest Doubao models (seed-1-8, deepseek-v3-2, glm-4-7, kimi-k2-thinking) - Expand reasoning effort model list with new variants (non-thinking, minimal, low, medium, high, xhigh) - Add reasoning effort models configuration for automatic variant expansion - Extend image input support to doubao-seed-1-8 models --- packages/adapter-doubao/src/client.ts | 46 ++++++++++++++++++++---- packages/adapter-doubao/src/requester.ts | 26 +++++++------- packages/shared-adapter/src/client.ts | 14 ++++++++ packages/shared-adapter/src/requester.ts | 15 ++++---- 4 files changed, 73 insertions(+), 28 deletions(-) diff --git a/packages/adapter-doubao/src/client.ts b/packages/adapter-doubao/src/client.ts index b9e63906f..fd0a71c1e 100644 --- a/packages/adapter-doubao/src/client.ts +++ b/packages/adapter-doubao/src/client.ts @@ -18,6 +18,7 @@ import { import { Config } from '.' import { DoubaoRequester } from './requester' import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat' +import { expandReasoningEffortModelVariants } from '@chatluna/v1-shared-adapter' export class DouBaoClient extends PlatformModelAndEmbeddingsClient { platform = 'doubao' @@ -41,14 +42,16 @@ export class DouBaoClient extends PlatformModelAndEmbeddingsClient async refreshModels(): Promise { const rawModels: [string, number | undefined][] = [ + ['doubao-seed-1-8-251228', 256000], ['doubao-seed-1-6-flash-250715', 256000], - ['doubao-seed-1-6-thinking-250715', 256000], - ['doubao-seed-1-6-250615', 256000], - ['doubao-seed-1-6-250615-non-thinking', 256000], - ['doubao-seed-1-6-250615-thinking', 256000], - ['doubao-seed-1-6-vision-250815', 256000], + ['doubao-seed-1-6-251015', 256000], + ['doubao-seed-1-6-lite-251015', 256000], + ['doubao-seed-1-6-flash-250828', 256000], ['doubao-1.5-vision-pro-250328', 128000], ['deepseek-v3-1-250821', 128000], + ['deepseek-v3-2-251201', 128000], + ['glm-4-7-251222', 200000], + ['kimi-k2-thinking-251104', 256000], ['kimi-k2-250711', 128000], ['doubao-1.5-vision-lite-250315', 128000], ['doubao-1-5-thinking-vision-pro-250428', 128000], @@ -68,9 +71,38 @@ export class DouBaoClient extends PlatformModelAndEmbeddingsClient 'doubao-1.5-vision-lite-250315' ] - const imageInputSupportModels = ['doubao-seed-1-6', 'vision'] + const reasoningEffortModels = [ + 'doubao-seed-1-6-lite-251015', + 'doubao-seed-1-6-251015', + 'doubao-seed-1-8-251228' + ] + + const imageInputSupportModels = [ + 'doubao-seed-1-6', + 'vision', + 'doubao-seed-1-8' + ] + + const expandedModels: [string, number | undefined][] = [] + const seen = new Set() + + const push = (model: string, token?: number) => { + if (seen.has(model)) return + seen.add(model) + expandedModels.push([model, token]) + } + + for (const [model, token] of rawModels) { + push(model, token) + + if (!reasoningEffortModels.includes(model)) continue + + for (const variant of expandReasoningEffortModelVariants(model)) { + push(variant, token) + } + } - return rawModels.map(([model, token]) => { + return expandedModels.map(([model, token]) => { return { name: model, type: model.includes('embedding') diff --git a/packages/adapter-doubao/src/requester.ts b/packages/adapter-doubao/src/requester.ts index da72ff894..54c28814d 100644 --- a/packages/adapter-doubao/src/requester.ts +++ b/packages/adapter-doubao/src/requester.ts @@ -16,6 +16,7 @@ import { buildChatCompletionParams, createEmbeddings, createRequestContext, + parseOpenAIModelNameWithReasoningEffort, processStreamResponse } from '@chatluna/v1-shared-adapter' import * as fetchType from 'undici/types/fetch' @@ -49,18 +50,17 @@ export class DoubaoRequester this ) - let model = params.model - let enabledThinking: boolean | undefined = null - - if (model.includes('thinking') && model.slice(-8) === 'thinking') { - enabledThinking = !model.includes('-non-thinking') - model = model - .replace('-non-thinking', '-thinking') - .replace('-thinking', '') - } else if ( - model.includes('thinking') && - model.slice(-8) !== 'thinking' - ) { + const parsedModel = parseOpenAIModelNameWithReasoningEffort( + params.model + ) + const model = parsedModel.model + let enabledThinking: boolean | undefined + + if (parsedModel.reasoningEffort != null) { + enabledThinking = parsedModel.reasoningEffort !== 'none' + } + + if (enabledThinking == null && model.includes('thinking')) { enabledThinking = true } @@ -75,7 +75,7 @@ export class DoubaoRequester } } - if (enabledThinking !== null) { + if (enabledThinking != null) { baseRequest.thinking = { type: enabledThinking ? 'enabled' : 'disabled' } diff --git a/packages/shared-adapter/src/client.ts b/packages/shared-adapter/src/client.ts index 5aaf2feb9..0aa0f9b47 100644 --- a/packages/shared-adapter/src/client.ts +++ b/packages/shared-adapter/src/client.ts @@ -9,6 +9,20 @@ export type OpenAIReasoningEffort = | 'high' | 'xhigh' +export const reasoningEffortModelSuffixes = [ + 'non-thinking', + 'minimal-thinking', + 'low-thinking', + 'medium-thinking', + 'high-thinking', + 'xhigh-thinking', + 'thinking' +] as const + +export function expandReasoningEffortModelVariants(model: string): string[] { + return reasoningEffortModelSuffixes.map((suffix) => `${model}-${suffix}`) +} + export function parseOpenAIModelNameWithReasoningEffort(modelName: string): { model: string reasoningEffort?: OpenAIReasoningEffort diff --git a/packages/shared-adapter/src/requester.ts b/packages/shared-adapter/src/requester.ts index fcfffa84d..431560704 100644 --- a/packages/shared-adapter/src/requester.ts +++ b/packages/shared-adapter/src/requester.ts @@ -29,7 +29,10 @@ import { getMessageContent } from 'koishi-plugin-chatluna/utils/string' import { RunnableConfig } from '@langchain/core/runnables' import { trackLogToLocal } from 'koishi-plugin-chatluna/utils/logger' import { deepAssign } from 'koishi-plugin-chatluna/utils/object' -import { parseOpenAIModelNameWithReasoningEffort } from './client' +import { + expandReasoningEffortModelVariants, + parseOpenAIModelNameWithReasoningEffort +} from './client' interface RequestContext< T extends ClientConfig = ClientConfig, @@ -475,13 +478,9 @@ export async function getModels< // OpenAI-style "thinking" via model suffixes. These are virtual // variants that map to request params (e.g. reasoning_effort). - push(`${model}-non-thinking`) - push(`${model}`) - push(`${model}-minimal-thinking`) - push(`${model}-low-thinking`) - push(`${model}-medium-thinking`) - push(`${model}-high-thinking`) - push(`${model}-xhigh-thinking`) + for (const variant of expandReasoningEffortModelVariants(model)) { + push(variant) + } } return expanded From ad418143e47e18be85f7fdec967d099e2d4394a0 Mon Sep 17 00:00:00 2001 From: dingyi Date: Tue, 3 Feb 2026 19:14:19 +0800 Subject: [PATCH 4/8] feat(adapter-spark): update model from spark-x1 to spark-x1.5 Replace deprecated spark-x1 model with spark-x1.5, maintaining 128k token context and tool call capabilities. --- packages/adapter-spark/src/client.ts | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/packages/adapter-spark/src/client.ts b/packages/adapter-spark/src/client.ts index 7bb333015..df8668d99 100644 --- a/packages/adapter-spark/src/client.ts +++ b/packages/adapter-spark/src/client.ts @@ -43,7 +43,7 @@ export class SparkClient extends PlatformModelClient { ['spark-max', 8192], ['spark-max-32k', 32768], ['spark-4.0-ultra', 128000], - ['spark-x1', 128000] + ['spark-x1.5', 128000] ] as [string, number][] const result: SparkModelInfo[] = [] @@ -55,7 +55,7 @@ export class SparkClient extends PlatformModelClient { capabilities: [ (model.startsWith('spark-max') || model.startsWith('spark-4.0-ultra') || - model === 'spark-x1') && + model === 'spark-x1.5') && ModelCapabilities.ToolCall ] }) From 2adb93ab529c175e85e0953fecb83db4faf13413 Mon Sep 17 00:00:00 2001 From: dingyi Date: Tue, 3 Feb 2026 19:30:09 +0800 Subject: [PATCH 5/8] feat(adapter-claude,shared-adapter): update Claude models and add kimi-k2.5 image support Update Claude model list to latest versions and add kimi-k2.5 to vision model matchers. - Remove deprecated Claude 3 models (sonnet-20240620, opus-20240229, sonnet-20240229, haiku-20240307) - Add latest Claude models (3.7 sonnet thinking, sonnet-4.5, opus-4.5, opus-4-1) - Replace claude-3-5-haiku with claude-haiku-4-5-20251001 - Add kimi-k2.5 to image input model matchers --- packages/adapter-claude/src/client.ts | 7 ++----- packages/shared-adapter/src/client.ts | 1 + 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/packages/adapter-claude/src/client.ts b/packages/adapter-claude/src/client.ts index d397c743f..03badf9d4 100644 --- a/packages/adapter-claude/src/client.ts +++ b/packages/adapter-claude/src/client.ts @@ -33,9 +33,6 @@ export class ClaudeClient extends PlatformModelClient { async refreshModels(): Promise { const fallbackModels = [ - 'claude-3-5-sonnet-20240620', - 'claude-3-opus-20240229', - 'claude-3-sonnet-20240229', 'claude-3-5-sonnet-20241022', 'claude-3-7-sonnet-20250219', 'claude-3-7-sonnet-thinking-20250219', @@ -44,8 +41,8 @@ export class ClaudeClient extends PlatformModelClient { 'claude-sonnet-4-5-20250929', 'claude-opus-4-5-20251101', 'claude-opus-4-1-20250805', - 'claude-3-5-haiku-20241022', - 'claude-3-haiku-20240307' + 'claude-haiku-4-5-20251001', + 'claude-3-5-haiku-20241022' ] try { diff --git a/packages/shared-adapter/src/client.ts b/packages/shared-adapter/src/client.ts index 0aa0f9b47..ce9fe62ca 100644 --- a/packages/shared-adapter/src/client.ts +++ b/packages/shared-adapter/src/client.ts @@ -155,6 +155,7 @@ const imageModelMatchers = [ 'gpt-4.1', 'gpt-5', 'glm-*v', + 'kimi-k2.5', 'step3', 'grok-4' ].map((pattern) => createGlobMatcher(pattern)) From 12a027166099cdbf9a99a5de5aa08a40de633e7a Mon Sep 17 00:00:00 2001 From: dingyi Date: Tue, 3 Feb 2026 19:44:10 +0800 Subject: [PATCH 6/8] feat(adapter-qwen): add qwen3-max thinking model variants Add qwen3-max thinking and non-thinking variants with 262k token context window support. --- packages/adapter-qwen/src/client.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/adapter-qwen/src/client.ts b/packages/adapter-qwen/src/client.ts index 2f1ea3c6f..47c81e444 100644 --- a/packages/adapter-qwen/src/client.ts +++ b/packages/adapter-qwen/src/client.ts @@ -50,6 +50,8 @@ export class QWenClient extends PlatformModelAndEmbeddingsClient { ['qwen-max', 30720], ['qwen-max-latest', 131_072], ['qwen3-max', 262_144], + ['qwen3-max-2026-01-23-thinking', 262_144], + ['qwen3-max-2026-01-23-non-thinking', 262_144], ['qwen-plus-latest-non-thinking', 1_000_000], ['qwen-plus-latest-thinking', 1_000_000], ['qwen-turbo-latest-non-thinking', 1_000_000], From c817562a8190ed032f32b42da333d855fee45738 Mon Sep 17 00:00:00 2001 From: dingyi Date: Tue, 3 Feb 2026 19:57:38 +0800 Subject: [PATCH 7/8] fix(adapter-wenxin): add defensive normalization for content and images handling Add type-safe normalization helpers to prevent runtime errors when processing message content and images: - Add normalizeContentToString() to safely convert content from string or ContentBlock[] to string - Add extractImages() to safely validate and extract string[] from images with type guards - Use Array.isArray() to verify images before mapping, treating invalid types as empty array - Handle content normalization for all code paths (initial setup and array processing) - Add proper fallbacks for unknown types to prevent crashes - Maintain existing supportImageInput, fetchImageUrl, and isMessageContentImageUrl logic This guards against runtime errors from unexpected type casting and array operations. --- packages/adapter-wenxin/src/utils.ts | 48 +++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 5 deletions(-) diff --git a/packages/adapter-wenxin/src/utils.ts b/packages/adapter-wenxin/src/utils.ts index 8e71da2c0..4b2a82af9 100644 --- a/packages/adapter-wenxin/src/utils.ts +++ b/packages/adapter-wenxin/src/utils.ts @@ -27,6 +27,41 @@ import { isZodSchemaV3 } from '@langchain/core/utils/types' import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat' import { isMessageContentImageUrl } from 'koishi-plugin-chatluna/utils/string' +/** + * Safely normalize content to a string, handling both string and array formats + */ +function normalizeContentToString(content: unknown): string { + if (typeof content === 'string') { + return content + } + + if (Array.isArray(content)) { + // Extract text from array of content blocks + const textParts: string[] = [] + for (const block of content) { + if (block && typeof block === 'object') { + if ('type' in block && block.type === 'text' && 'text' in block && typeof block.text === 'string') { + textParts.push(block.text) + } + } + } + return textParts.join(' ') + } + + // Fallback for unknown types + return '' +} + +/** + * Safely extract images array, ensuring it's actually an array + */ +function extractImages(images: unknown): string[] { + if (Array.isArray(images)) { + return images.filter((img): img is string => typeof img === 'string') + } + return [] +} + export async function langchainMessageToWenXinMessage( messages: BaseMessage[], plugin: ChatLunaPlugin, @@ -69,15 +104,18 @@ export async function langchainMessageToWenXinMessage( } } - const images = rawMessage.additional_kwargs.images as - | string[] - | null + // Safely extract and normalize images + const rawImages = rawMessage.additional_kwargs.images + const images = extractImages(rawImages) + + // Normalize content to string for initial setup + const normalizedContent = normalizeContentToString(rawMessage.content) - if (supportImageInput(model ?? '') && images != null) { + if (supportImageInput(model ?? '') && images.length > 0) { msg.content = [ { type: 'text', - text: (rawMessage.content as string) || '' + text: normalizedContent } ] From e9ec6f71d12c8cd4e377f823d18f270a18a94f5a Mon Sep 17 00:00:00 2001 From: dingyi Date: Tue, 3 Feb 2026 19:58:52 +0800 Subject: [PATCH 8/8] style(adapter-wenxin): improve code formatting in normalization helpers Format long conditional checks and function calls for better readability across multiple lines. --- packages/adapter-wenxin/src/utils.ts | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/packages/adapter-wenxin/src/utils.ts b/packages/adapter-wenxin/src/utils.ts index 4b2a82af9..e7138af17 100644 --- a/packages/adapter-wenxin/src/utils.ts +++ b/packages/adapter-wenxin/src/utils.ts @@ -40,7 +40,12 @@ function normalizeContentToString(content: unknown): string { const textParts: string[] = [] for (const block of content) { if (block && typeof block === 'object') { - if ('type' in block && block.type === 'text' && 'text' in block && typeof block.text === 'string') { + if ( + 'type' in block && + block.type === 'text' && + 'text' in block && + typeof block.text === 'string' + ) { textParts.push(block.text) } } @@ -109,7 +114,9 @@ export async function langchainMessageToWenXinMessage( const images = extractImages(rawImages) // Normalize content to string for initial setup - const normalizedContent = normalizeContentToString(rawMessage.content) + const normalizedContent = normalizeContentToString( + rawMessage.content + ) if (supportImageInput(model ?? '') && images.length > 0) { msg.content = [