From 2842c61f7cd54ce39b17e62302d78e225d08b0cf Mon Sep 17 00:00:00 2001 From: Will Chen Date: Wed, 3 Sep 2025 15:36:54 -0700 Subject: [PATCH] Improve model picker UX (#1180) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 1. Show less common AI providers (secondary) in submenu 2. Show $ signs for rough cost guide 3. Show "Pro" for supported AI providers with Pro is enabled --- ## Summary by cubic Improves the Model Picker UX by grouping less-used providers under an “Other AI providers” submenu and adding clear cost and Pro indicators. This makes picking models faster and more informative. - **New Features** - Grouped secondary providers under “Other AI providers” using a new provider.secondary flag (Azure marked secondary). - Added rough cost hints: models can set dollarSigns and the UI shows a “$” badge accordingly. - Shows a “Pro” badge on supported cloud providers when Pro is enabled; added a “Custom” badge for custom providers. - Extended types: LanguageModelProvider.secondary and LanguageModel.dollarSigns; populated values across OpenAI, Anthropic, Google, and OpenRouter. --- e2e-tests/engine.spec.ts | 4 +- e2e-tests/helpers/test_helper.ts | 1 + ...nd-message-to-engine---openai-gpt-5-1.txt} | 5 +- src/components/ModelPicker.tsx | 137 ++++++++++++++++-- src/ipc/ipc_types.ts | 3 + src/ipc/shared/language_model_helpers.ts | 46 +++--- 6 files changed, 153 insertions(+), 43 deletions(-) rename e2e-tests/snapshots/{engine.spec.ts_send-message-to-engine---openai-gpt-4-1-1.txt => engine.spec.ts_send-message-to-engine---openai-gpt-5-1.txt} (99%) diff --git a/e2e-tests/engine.spec.ts b/e2e-tests/engine.spec.ts index 4964d03..d8d432f 100644 --- a/e2e-tests/engine.spec.ts +++ b/e2e-tests/engine.spec.ts @@ -26,11 +26,11 @@ testSkipIfWindows( }, ); -testSkipIfWindows("send message to engine - openai gpt-4.1", async ({ po }) => { +testSkipIfWindows("send message to engine - openai gpt-5", async ({ po }) => { await po.setUpDyadPro(); // By default, it's using auto which points to Flash 2.5 and doesn't // use engine. - await po.selectModel({ provider: "OpenAI", model: "GPT 4.1" }); + await po.selectModel({ provider: "OpenAI", model: "GPT 5" }); await po.sendPrompt("[dump] tc=turbo-edits"); await po.snapshotServerDump("request"); diff --git a/e2e-tests/helpers/test_helper.ts b/e2e-tests/helpers/test_helper.ts index 527122e..45d5921 100644 --- a/e2e-tests/helpers/test_helper.ts +++ b/e2e-tests/helpers/test_helper.ts @@ -703,6 +703,7 @@ export class PageObject { async selectTestAzureModel() { await this.page.getByRole("button", { name: "Model: Auto" }).click(); + await this.page.getByText("Other AI providers").click(); await this.page.getByText("Azure OpenAI", { exact: true }).click(); await this.page.getByText("GPT-5", { exact: true }).click(); } diff --git a/e2e-tests/snapshots/engine.spec.ts_send-message-to-engine---openai-gpt-4-1-1.txt b/e2e-tests/snapshots/engine.spec.ts_send-message-to-engine---openai-gpt-5-1.txt similarity index 99% rename from e2e-tests/snapshots/engine.spec.ts_send-message-to-engine---openai-gpt-4-1-1.txt rename to e2e-tests/snapshots/engine.spec.ts_send-message-to-engine---openai-gpt-5-1.txt index d1baf18..7ec315e 100644 --- a/e2e-tests/snapshots/engine.spec.ts_send-message-to-engine---openai-gpt-4-1-1.txt +++ b/e2e-tests/snapshots/engine.spec.ts_send-message-to-engine---openai-gpt-5-1.txt @@ -1,8 +1,7 @@ { "body": { - "model": "gpt-4.1", - "max_tokens": 32768, - "temperature": 0, + "model": "gpt-5", + "temperature": 1, "messages": [ { "role": "system", diff --git a/src/components/ModelPicker.tsx b/src/components/ModelPicker.tsx index b715fa7..845c883 100644 --- a/src/components/ModelPicker.tsx +++ b/src/components/ModelPicker.tsx @@ -122,6 +122,23 @@ export function ModelPicker() { const isSmartAutoEnabled = settings.enableProSmartFilesContextMode && isDyadProEnabled(settings); const modelDisplayName = getModelDisplayName(); + // Split providers into primary and secondary groups (excluding auto) + const providerEntries = + !loading && modelsByProviders + ? Object.entries(modelsByProviders).filter( + ([providerId]) => providerId !== "auto", + ) + : []; + const primaryProviders = providerEntries.filter(([providerId, models]) => { + if (models.length === 0) return false; + const provider = providers?.find((p) => p.id === providerId); + return !(provider && provider.secondary); + }); + const secondaryProviders = providerEntries.filter(([providerId, models]) => { + if (models.length === 0) return false; + const provider = providers?.find((p) => p.id === providerId); + return !!(provider && provider.secondary); + }); return ( @@ -232,19 +249,28 @@ export function ModelPicker() { )} - {/* Group other providers into submenus */} - {Object.entries(modelsByProviders).map(([providerId, models]) => { - // Skip auto provider as it's already handled - if (providerId === "auto") return null; - + {/* Primary providers as submenus */} + {primaryProviders.map(([providerId, models]) => { const provider = providers?.find((p) => p.id === providerId); - if (models.length === 0) return null; - return ( -
- {provider?.name} +
+
+ {provider?.name ?? providerId} + {provider?.type === "cloud" && + !provider?.secondary && + isDyadProEnabled(settings) && ( + + Pro + + )} + {provider?.type === "custom" && ( + + Custom + + )} +
{models.length} models @@ -252,7 +278,7 @@ export function ModelPicker() { - {provider?.name} Models + {(provider?.name ?? providerId) + " Models"} {models.map((model) => ( @@ -278,6 +304,11 @@ export function ModelPicker() { >
{model.displayName} + {model.dollarSigns && ( + + {"$".repeat(model.dollarSigns)} + + )} {model.tag && ( {model.tag} @@ -295,6 +326,92 @@ export function ModelPicker() { ); })} + + {/* Secondary providers grouped under Other AI providers */} + {secondaryProviders.length > 0 && ( + + +
+ Other AI providers + + {secondaryProviders.length} providers + +
+
+ + Other AI providers + + {secondaryProviders.map(([providerId, models]) => { + const provider = providers?.find( + (p) => p.id === providerId, + ); + return ( + + +
+
+ {provider?.name ?? providerId} + {provider?.type === "custom" && ( + + Custom + + )} +
+ + {models.length} models + +
+
+ + + {(provider?.name ?? providerId) + " Models"} + + + {models.map((model) => ( + + + { + const customModelId = + model.type === "custom" + ? model.id + : undefined; + onModelSelect({ + name: model.apiName, + provider: providerId, + customModelId, + }); + setOpen(false); + }} + > +
+ {model.displayName} + {model.tag && ( + + {model.tag} + + )} +
+
+
+ + {model.description} + +
+ ))} +
+
+ ); + })} +
+
+ )} )} diff --git a/src/ipc/ipc_types.ts b/src/ipc/ipc_types.ts index 75a9499..9efabe3 100644 --- a/src/ipc/ipc_types.ts +++ b/src/ipc/ipc_types.ts @@ -173,6 +173,7 @@ export interface LanguageModelProvider { hasFreeTier?: boolean; websiteUrl?: string; gatewayPrefix?: string; + secondary?: boolean; envVarName?: string; apiBaseUrl?: string; type: "custom" | "local" | "cloud"; @@ -188,6 +189,7 @@ export type LanguageModel = maxOutputTokens?: number; contextWindow?: number; temperature?: number; + dollarSigns?: number; type: "custom"; } | { @@ -198,6 +200,7 @@ export type LanguageModel = maxOutputTokens?: number; contextWindow?: number; temperature?: number; + dollarSigns?: number; type: "local" | "cloud"; }; diff --git a/src/ipc/shared/language_model_helpers.ts b/src/ipc/shared/language_model_helpers.ts index 15cb713..cfe112e 100644 --- a/src/ipc/shared/language_model_helpers.ts +++ b/src/ipc/shared/language_model_helpers.ts @@ -15,6 +15,7 @@ export interface ModelOption { name: string; displayName: string; description: string; + dollarSigns?: number; temperature?: number; tag?: string; maxOutputTokens?: number; @@ -33,6 +34,7 @@ export const MODEL_OPTIONS: Record = { contextWindow: 400_000, // Requires temperature to be default value (1) temperature: 1, + dollarSigns: 3, }, // https://platform.openai.com/docs/models/gpt-5-mini { @@ -44,6 +46,7 @@ export const MODEL_OPTIONS: Record = { contextWindow: 400_000, // Requires temperature to be default value (1) temperature: 1, + dollarSigns: 2, }, // https://platform.openai.com/docs/models/gpt-5-nano { @@ -55,34 +58,7 @@ export const MODEL_OPTIONS: Record = { contextWindow: 400_000, // Requires temperature to be default value (1) temperature: 1, - }, - // https://platform.openai.com/docs/models/gpt-4.1 - { - name: "gpt-4.1", - displayName: "GPT 4.1", - description: "OpenAI's flagship model", - maxOutputTokens: 32_768, - contextWindow: 1_047_576, - temperature: 0, - }, - // https://platform.openai.com/docs/models/gpt-4.1-mini - { - name: "gpt-4.1-mini", - displayName: "GPT 4.1 Mini", - description: "OpenAI's lightweight, but intelligent model", - maxOutputTokens: 32_768, - contextWindow: 1_047_576, - temperature: 0, - }, - // https://platform.openai.com/docs/models/o3-mini - { - name: "o3-mini", - displayName: "o3 mini", - description: "Reasoning model", - // See o4-mini comment below for why we set this to 32k - maxOutputTokens: 32_000, - contextWindow: 200_000, - temperature: 0, + dollarSigns: 1, }, // https://platform.openai.com/docs/models/o4-mini { @@ -95,6 +71,7 @@ export const MODEL_OPTIONS: Record = { maxOutputTokens: 32_000, contextWindow: 200_000, temperature: 0, + dollarSigns: 2, }, ], // https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table @@ -107,6 +84,7 @@ export const MODEL_OPTIONS: Record = { maxOutputTokens: 16_000, contextWindow: 200_000, temperature: 0, + dollarSigns: 4, }, { name: "claude-3-7-sonnet-latest", @@ -119,6 +97,7 @@ export const MODEL_OPTIONS: Record = { maxOutputTokens: 16_000, contextWindow: 200_000, temperature: 0, + dollarSigns: 4, }, { name: "claude-3-5-sonnet-20241022", @@ -127,6 +106,7 @@ export const MODEL_OPTIONS: Record = { maxOutputTokens: 8_000, contextWindow: 200_000, temperature: 0, + dollarSigns: 4, }, { name: "claude-3-5-haiku-20241022", @@ -135,6 +115,7 @@ export const MODEL_OPTIONS: Record = { maxOutputTokens: 8_000, contextWindow: 200_000, temperature: 0, + dollarSigns: 2, }, ], google: [ @@ -148,6 +129,7 @@ export const MODEL_OPTIONS: Record = { // Gemini context window = input token + output token contextWindow: 1_048_576, temperature: 0, + dollarSigns: 3, }, // https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview { @@ -159,6 +141,7 @@ export const MODEL_OPTIONS: Record = { // Gemini context window = input token + output token contextWindow: 1_048_576, temperature: 0, + dollarSigns: 2, }, ], openrouter: [ @@ -169,6 +152,7 @@ export const MODEL_OPTIONS: Record = { maxOutputTokens: 32_000, contextWindow: 262_000, temperature: 0, + dollarSigns: 2, }, // https://openrouter.ai/deepseek/deepseek-chat-v3-0324:free { @@ -178,6 +162,7 @@ export const MODEL_OPTIONS: Record = { maxOutputTokens: 32_000, contextWindow: 128_000, temperature: 0, + dollarSigns: 2, }, // https://openrouter.ai/moonshotai/kimi-k2 { @@ -187,6 +172,7 @@ export const MODEL_OPTIONS: Record = { maxOutputTokens: 32_000, contextWindow: 131_000, temperature: 0, + dollarSigns: 2, }, { name: "deepseek/deepseek-r1-0528", @@ -195,6 +181,7 @@ export const MODEL_OPTIONS: Record = { maxOutputTokens: 32_000, contextWindow: 128_000, temperature: 0, + dollarSigns: 2, }, ], auto: [ @@ -262,6 +249,7 @@ export const CLOUD_PROVIDERS: Record< hasFreeTier?: boolean; websiteUrl?: string; gatewayPrefix: string; + secondary?: boolean; } > = { openai: { @@ -298,6 +286,7 @@ export const CLOUD_PROVIDERS: Record< hasFreeTier: false, websiteUrl: "https://portal.azure.com/", gatewayPrefix: "", + secondary: true, }, }; @@ -359,6 +348,7 @@ export async function getLanguageModelProviders(): Promise< hasFreeTier: providerDetails.hasFreeTier, websiteUrl: providerDetails.websiteUrl, gatewayPrefix: providerDetails.gatewayPrefix, + secondary: providerDetails.secondary, envVarName: PROVIDER_TO_ENV_VAR[key] ?? undefined, type: "cloud", // apiBaseUrl is not directly in PROVIDERS