Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions BitFun-Installer/src/data/modelProviders.ts
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,8 @@ export const PROVIDER_DISPLAY_ORDER: string[] = [
'deepseek',
'volcengine',
'siliconflow',
'nvidia',
'openrouter',
'minimax',
'moonshot',
'anthropic',
Expand Down Expand Up @@ -161,6 +163,24 @@ export const PROVIDER_TEMPLATES: Record<string, ProviderTemplate> = {
},
],
},
nvidia: {
id: 'nvidia',
nameKey: 'model.providers.nvidia.name',
descriptionKey: 'model.providers.nvidia.description',
baseUrl: 'https://integrate.api.nvidia.com/v1',
format: 'openai',
models: [],
helpUrl: 'https://build.nvidia.com/settings/api-keys',
},
openrouter: {
id: 'openrouter',
nameKey: 'model.providers.openrouter.name',
descriptionKey: 'model.providers.openrouter.description',
baseUrl: 'https://openrouter.ai/api/v1',
format: 'openai',
models: [],
helpUrl: 'https://openrouter.ai/keys',
},
};

export function getOrderedProviders(): ProviderTemplate[] {
Expand Down
8 changes: 8 additions & 0 deletions BitFun-Installer/src/i18n/locales/en.json
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,14 @@
"default": "OpenAI Format - Default",
"anthropic": "Anthropic Format"
}
},
"nvidia": {
"name": "NVIDIA",
"description": "NVIDIA NIM Model Platform"
},
"openrouter": {
"name": "OpenRouter",
"description": "OpenRouter Model Platform"
}
},
"modelNameSelectPlaceholder": "Select a model...",
Expand Down
8 changes: 8 additions & 0 deletions BitFun-Installer/src/i18n/locales/zh.json
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,14 @@
"default": "OpenAI格式-默认",
"anthropic": "Anthropic格式"
}
},
"nvidia": {
"name": "NVIDIA",
"description": "NVIDIA NIM 大模型平台"
},
"openrouter": {
"name": "OpenRouter",
"description": "OpenRouter 大模型平台"
}
},
"modelNameSelectPlaceholder": "选择模型...",
Expand Down
2 changes: 1 addition & 1 deletion src/crates/core/src/agentic/image_analysis/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ impl ImageLimits {
/// Get limits based on model provider
pub fn for_provider(provider: &str) -> Self {
match provider.to_lowercase().as_str() {
"openai" | "response" | "responses" => Self {
"openai" | "response" | "responses" | "nvidia" | "openrouter" => Self {
max_size: 20 * 1024 * 1024, // 20MB
max_width: 2048,
max_height: 2048,
Expand Down
5 changes: 4 additions & 1 deletion src/crates/core/src/infrastructure/ai/client.rs
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,10 @@ impl AIClient {

fn build_test_connection_extra_body(&self) -> Option<serde_json::Value> {
let provider = self.config.format.to_ascii_lowercase();
if !matches!(provider.as_str(), "openai" | "response" | "responses") {
if !matches!(
provider.as_str(),
"openai" | "response" | "responses" | "nvidia" | "openrouter"
) {
return self.config.custom_request_body.clone();
}

Expand Down
18 changes: 17 additions & 1 deletion src/crates/core/src/util/types/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ fn resolve_request_url(base_url: &str, provider: &str, model_name: &str) -> Stri
}

match provider.trim().to_ascii_lowercase().as_str() {
"openai" => append_endpoint(&trimmed, "chat/completions"),
"openai" | "nvidia" | "openrouter" => append_endpoint(&trimmed, "chat/completions"),
"response" | "responses" => append_endpoint(&trimmed, "responses"),
"anthropic" => append_endpoint(&trimmed, "v1/messages"),
"gemini" | "google" => resolve_gemini_request_url(&trimmed, model_name),
Expand Down Expand Up @@ -151,6 +151,22 @@ mod tests {
"https://api.openbitfun.com/v1beta/models/gemini-2.5-pro:streamGenerateContent?alt=sse"
);
}

#[test]
fn resolves_nvidia_request_url() {
assert_eq!(
resolve_request_url("https://integrate.api.nvidia.com/v1", "nvidia", ""),
"https://integrate.api.nvidia.com/v1/chat/completions"
);
}

#[test]
fn resolves_openrouter_request_url() {
assert_eq!(
resolve_request_url("https://openrouter.ai/api/v1", "openrouter", ""),
"https://openrouter.ai/api/v1/chat/completions"
);
}
}

impl TryFrom<AIModelConfig> for AIConfig {
Expand Down
15 changes: 5 additions & 10 deletions src/web-ui/src/flow_chat/components/modern/VirtualMessageList.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -149,21 +149,16 @@ export const VirtualMessageList = forwardRef<VirtualMessageListRef>((_, ref) =>
scrollToBottom,
}), [scrollToTurn, scrollToIndex, scrollToBottom]);

// ── Core scroll policy: processing → auto-scroll to bottom ────────────
// ── Initial scroll to bottom when processing starts ──────────────────
// Note: followOutput handles continuous auto-scroll, so we only need
// an initial scroll here. The 300ms interval was removed because it
// conflicted with followOutput and caused visual jitter.
useEffect(() => {
if (!isProcessing) return;

if (virtuosoRef.current) {
virtuosoRef.current.scrollTo({ top: 999999999, behavior: 'smooth' });
virtuosoRef.current.scrollTo({ top: 999999999, behavior: 'auto' });
}

const intervalId = setInterval(() => {
if (virtuosoRef.current) {
virtuosoRef.current.scrollTo({ top: 999999999, behavior: 'smooth' });
}
}, 300);

return () => clearInterval(intervalId);
}, [isProcessing]);

const handleFollowOutput = useCallback(() => {
Expand Down
22 changes: 22 additions & 0 deletions src/web-ui/src/infrastructure/config/services/modelConfigs.ts
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,28 @@ export const PROVIDER_TEMPLATES: Record<string, ProviderTemplate> = {
{ url: 'https://api.siliconflow.cn/v1', format: 'openai', note: 'default' },
{ url: 'https://api.siliconflow.cn/v1/messages', format: 'anthropic', note: 'Anthropic' },
]
},

nvidia: {
id: 'nvidia',
name: t('settings/ai-model:providers.nvidia.name'),
baseUrl: 'https://integrate.api.nvidia.com/v1',
format: 'openai',
models: [],
requiresApiKey: true,
description: t('settings/ai-model:providers.nvidia.description'),
helpUrl: 'https://build.nvidia.com/settings/api-keys'
},

openrouter: {
id: 'openrouter',
name: t('settings/ai-model:providers.openrouter.name'),
baseUrl: 'https://openrouter.ai/api/v1',
format: 'openai',
models: [],
requiresApiKey: true,
description: t('settings/ai-model:providers.openrouter.description'),
helpUrl: 'https://openrouter.ai/keys'
}
};

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,14 @@ export const PROVIDER_URL_CATALOG: ProviderUrlCatalogItem[] = [
'https://api.siliconflow.cn/v1/messages',
],
},
{
id: 'nvidia',
baseUrl: 'https://integrate.api.nvidia.com/v1',
},
{
id: 'openrouter',
baseUrl: 'https://openrouter.ai/api/v1',
},
];

export function normalizeProviderBaseUrl(url: string): string {
Expand Down
8 changes: 8 additions & 0 deletions src/web-ui/src/locales/en-US/settings/ai-model.json
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,14 @@
"default": "OpenAI Format - Default",
"anthropic": "Anthropic Format"
}
},
"nvidia": {
"name": "NVIDIA",
"description": "NVIDIA NIM Model Platform"
},
"openrouter": {
"name": "OpenRouter",
"description": "OpenRouter Model Platform"
}
},
"tabs": {
Expand Down
8 changes: 8 additions & 0 deletions src/web-ui/src/locales/zh-CN/settings/ai-model.json
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,14 @@
"default": "OpenAI格式-默认",
"anthropic": "Anthropic格式"
}
},
"nvidia": {
"name": "NVIDIA",
"description": "NVIDIA NIM 大模型平台"
},
"openrouter": {
"name": "OpenRouter",
"description": "OpenRouter 大模型平台"
}
},
"tabs": {
Expand Down
Loading