Skip to content
7 changes: 2 additions & 5 deletions packages/adapter-claude/src/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,6 @@ export class ClaudeClient extends PlatformModelClient<ClientConfig> {

async refreshModels(): Promise<ModelInfo[]> {
const fallbackModels = [
'claude-3-5-sonnet-20240620',
'claude-3-opus-20240229',
'claude-3-sonnet-20240229',
'claude-3-5-sonnet-20241022',
'claude-3-7-sonnet-20250219',
'claude-3-7-sonnet-thinking-20250219',
Expand All @@ -44,8 +41,8 @@ export class ClaudeClient extends PlatformModelClient<ClientConfig> {
'claude-sonnet-4-5-20250929',
'claude-opus-4-5-20251101',
'claude-opus-4-1-20250805',
'claude-3-5-haiku-20241022',
'claude-3-haiku-20240307'
'claude-haiku-4-5-20251001',
'claude-3-5-haiku-20241022'
]

try {
Expand Down
46 changes: 39 additions & 7 deletions packages/adapter-doubao/src/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ import {
import { Config } from '.'
import { DoubaoRequester } from './requester'
import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat'
import { expandReasoningEffortModelVariants } from '@chatluna/v1-shared-adapter'

export class DouBaoClient extends PlatformModelAndEmbeddingsClient<ClientConfig> {
platform = 'doubao'
Expand All @@ -41,14 +42,16 @@ export class DouBaoClient extends PlatformModelAndEmbeddingsClient<ClientConfig>

async refreshModels(): Promise<ModelInfo[]> {
const rawModels: [string, number | undefined][] = [
['doubao-seed-1-8-251228', 256000],
['doubao-seed-1-6-flash-250715', 256000],
['doubao-seed-1-6-thinking-250715', 256000],
['doubao-seed-1-6-250615', 256000],
['doubao-seed-1-6-250615-non-thinking', 256000],
['doubao-seed-1-6-250615-thinking', 256000],
['doubao-seed-1-6-vision-250815', 256000],
['doubao-seed-1-6-251015', 256000],
['doubao-seed-1-6-lite-251015', 256000],
['doubao-seed-1-6-flash-250828', 256000],
['doubao-1.5-vision-pro-250328', 128000],
['deepseek-v3-1-250821', 128000],
['deepseek-v3-2-251201', 128000],
['glm-4-7-251222', 200000],
['kimi-k2-thinking-251104', 256000],
['kimi-k2-250711', 128000],
['doubao-1.5-vision-lite-250315', 128000],
['doubao-1-5-thinking-vision-pro-250428', 128000],
Expand All @@ -68,9 +71,38 @@ export class DouBaoClient extends PlatformModelAndEmbeddingsClient<ClientConfig>
'doubao-1.5-vision-lite-250315'
]

const imageInputSupportModels = ['doubao-seed-1-6', 'vision']
const reasoningEffortModels = [
'doubao-seed-1-6-lite-251015',
'doubao-seed-1-6-251015',
'doubao-seed-1-8-251228'
]

const imageInputSupportModels = [
'doubao-seed-1-6',
'vision',
'doubao-seed-1-8'
]

const expandedModels: [string, number | undefined][] = []
const seen = new Set<string>()

const push = (model: string, token?: number) => {
if (seen.has(model)) return
seen.add(model)
expandedModels.push([model, token])
}

for (const [model, token] of rawModels) {
push(model, token)

if (!reasoningEffortModels.includes(model)) continue

for (const variant of expandReasoningEffortModelVariants(model)) {
push(variant, token)
}
}

return rawModels.map(([model, token]) => {
return expandedModels.map(([model, token]) => {
return {
name: model,
type: model.includes('embedding')
Expand Down
26 changes: 13 additions & 13 deletions packages/adapter-doubao/src/requester.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ import {
buildChatCompletionParams,
createEmbeddings,
createRequestContext,
parseOpenAIModelNameWithReasoningEffort,
processStreamResponse
} from '@chatluna/v1-shared-adapter'
import * as fetchType from 'undici/types/fetch'
Expand Down Expand Up @@ -49,18 +50,17 @@ export class DoubaoRequester
this
)

let model = params.model
let enabledThinking: boolean | undefined = null

if (model.includes('thinking') && model.slice(-8) === 'thinking') {
enabledThinking = !model.includes('-non-thinking')
model = model
.replace('-non-thinking', '-thinking')
.replace('-thinking', '')
} else if (
model.includes('thinking') &&
model.slice(-8) !== 'thinking'
) {
const parsedModel = parseOpenAIModelNameWithReasoningEffort(
params.model
)
const model = parsedModel.model
let enabledThinking: boolean | undefined

if (parsedModel.reasoningEffort != null) {
enabledThinking = parsedModel.reasoningEffort !== 'none'
}

if (enabledThinking == null && model.includes('thinking')) {
enabledThinking = true
}

Expand All @@ -75,7 +75,7 @@ export class DoubaoRequester
}
}

if (enabledThinking !== null) {
if (enabledThinking != null) {
baseRequest.thinking = {
type: enabledThinking ? 'enabled' : 'disabled'
}
Expand Down
2 changes: 2 additions & 0 deletions packages/adapter-qwen/src/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,8 @@ export class QWenClient extends PlatformModelAndEmbeddingsClient {
['qwen-max', 30720],
['qwen-max-latest', 131_072],
['qwen3-max', 262_144],
['qwen3-max-2026-01-23-thinking', 262_144],
['qwen3-max-2026-01-23-non-thinking', 262_144],
['qwen-plus-latest-non-thinking', 1_000_000],
['qwen-plus-latest-thinking', 1_000_000],
['qwen-turbo-latest-non-thinking', 1_000_000],
Expand Down
4 changes: 2 additions & 2 deletions packages/adapter-spark/src/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ export class SparkClient extends PlatformModelClient<SparkClientConfig> {
['spark-max', 8192],
['spark-max-32k', 32768],
['spark-4.0-ultra', 128000],
['spark-x1', 128000]
['spark-x1.5', 128000]
] as [string, number][]
const result: SparkModelInfo[] = []

Expand All @@ -55,7 +55,7 @@ export class SparkClient extends PlatformModelClient<SparkClientConfig> {
capabilities: [
(model.startsWith('spark-max') ||
model.startsWith('spark-4.0-ultra') ||
model === 'spark-x1') &&
model === 'spark-x1.5') &&
ModelCapabilities.ToolCall
]
})
Expand Down
25 changes: 23 additions & 2 deletions packages/adapter-wenxin/src/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import {
ChatLunaEmbeddings
} from 'koishi-plugin-chatluna/llm-core/platform/model'
import {
ModelCapabilities,
ModelInfo,
ModelType
} from 'koishi-plugin-chatluna/llm-core/platform/types'
Expand All @@ -17,6 +18,7 @@ import {
} from 'koishi-plugin-chatluna/utils/error'
import { WenxinRequester } from './requester'
import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat'
import { supportImageInput } from '@chatluna/v1-shared-adapter'

export class WenxinClient extends PlatformModelAndEmbeddingsClient<ClientConfig> {
platform = 'wenxin'
Expand All @@ -40,6 +42,22 @@ export class WenxinClient extends PlatformModelAndEmbeddingsClient<ClientConfig>

async refreshModels(): Promise<ModelInfo[]> {
const rawModels = [
['ernie-5.0-thinking-preview', 128000], // ERNIE 5.0 Thinking Preview
['ernie-5.0-thinking-latest', 128000], // ERNIE 5.0 Thinking Latest
['ernie-5.0-thinking-exp', 128000], // ERNIE 5.0 Thinking Exp
['ernie-4.5-turbo-128k', 128000], // ERNIE 4.5 Turbo 128K
['ernie-4.5-turbo-128k-preview', 128000], // ERNIE 4.5 Turbo 128K Preview
['ernie-4.5-turbo-32k', 32000], // ERNIE 4.5 Turbo 32K
['ernie-4.5-turbo-latest', 128000], // ERNIE 4.5 Turbo Latest
['ernie-4.5-turbo-vl-preview', 128000], // ERNIE 4.5 Turbo VL Preview
['ernie-4.5-turbo-vl', 128000], // ERNIE 4.5 Turbo VL
['ernie-4.5-turbo-vl-32k', 32000], // ERNIE 4.5 Turbo VL 32K
['ernie-4.5-turbo-vl-32k-preview', 32000], // ERNIE 4.5 Turbo VL 32K Preview
['ernie-4.5-turbo-vl-latest', 128000], // ERNIE 4.5 Turbo VL Latest
['ernie-char-8k', 8000], // ERNIE Character 8K
['ernie-4.5-0.3b', 128000], // ERNIE 4.5 0.3B
['ernie-4.5-21b-a3b', 128000], // ERNIE 4.5 21B A3B
['ernie-4.5-vl-28b-a3b', 32000], // ERNIE 4.5 VL 28B A3B
['ernie-4.0-8k', 8000], // ERNIE-4.0-8K
['ernie-4.0-8k-preview', 8000], // ERNIE-4.0-8K-Preview
['ernie-4.0-8k-latest', 8000], // ERNIE-4.0-8K-Latest
Expand Down Expand Up @@ -70,7 +88,9 @@ export class WenxinClient extends PlatformModelAndEmbeddingsClient<ClientConfig>
return {
name: model,
type: ModelType.llm,
capabilities: [],
capabilities: [
supportImageInput(model) && ModelCapabilities.ImageInput
].filter(Boolean),
supportMode: ['all'],
maxTokens
} as ModelInfo
Expand Down Expand Up @@ -111,7 +131,8 @@ export class WenxinClient extends PlatformModelAndEmbeddingsClient<ClientConfig>
temperature: this._config.temperature,
maxRetries: this._config.maxRetries,
llmType: 'wenxin',
isThinkModel: model.includes('reasoner')
isThinkModel:
model.includes('reasoner') || model.includes('thinking')
})
}

Expand Down
9 changes: 6 additions & 3 deletions packages/adapter-wenxin/src/requester.ts
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,12 @@ export class WenxinRequester
): AsyncGenerator<ChatGenerationChunk> {
await this.init()

const messagesMapped: WenxinMessage[] = langchainMessageToWenXinMessage(
params.input
)
const messagesMapped: WenxinMessage[] =
await langchainMessageToWenXinMessage(
params.input,
this._plugin,
params.model
)

try {
const response = await this._post(
Expand Down
18 changes: 17 additions & 1 deletion packages/adapter-wenxin/src/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,25 @@ export type WenxinMessageRole =
| 'function'
| 'tool'

export type WenxinMessageContent =
| string
| (
| {
type: 'text'
text: string
}
| {
type: 'image_url'
image_url: {
url: string
detail?: 'low' | 'high'
}
}
)[]

export interface WenxinMessage {
role: WenxinMessageRole
content?: string
content?: WenxinMessageContent

name?: string
tool_calls?: ChatCompletionRequestMessageToolCall[]
Expand Down
Loading
Loading