Improve model picker UX (#1180)

1. Show less common AI providers (secondary) in submenu
2. Show $ signs for rough cost guide
3. Show "Pro" for supported AI providers with Pro is enabled

    
<!-- This is an auto-generated description by cubic. -->
---

## Summary by cubic
Improves the Model Picker UX by grouping less-used providers under an
“Other AI providers” submenu and adding clear cost and Pro indicators.
This makes picking models faster and more informative.

- **New Features**
- Grouped secondary providers under “Other AI providers” using a new
provider.secondary flag (Azure marked secondary).
- Added rough cost hints: models can set dollarSigns and the UI shows a
“$” badge accordingly.
- Shows a “Pro” badge on supported cloud providers when Pro is enabled;
added a “Custom” badge for custom providers.
- Extended types: LanguageModelProvider.secondary and
LanguageModel.dollarSigns; populated values across OpenAI, Anthropic,
Google, and OpenRouter.

<!-- End of auto-generated description by cubic. -->
This commit is contained in:
Will Chen
2025-09-03 15:36:54 -07:00
committed by GitHub
parent b5b637f73b
commit 2842c61f7c
6 changed files with 153 additions and 43 deletions

View File

@@ -122,6 +122,23 @@ export function ModelPicker() {
const isSmartAutoEnabled =
settings.enableProSmartFilesContextMode && isDyadProEnabled(settings);
const modelDisplayName = getModelDisplayName();
// Split providers into primary and secondary groups (excluding auto)
const providerEntries =
!loading && modelsByProviders
? Object.entries(modelsByProviders).filter(
([providerId]) => providerId !== "auto",
)
: [];
const primaryProviders = providerEntries.filter(([providerId, models]) => {
if (models.length === 0) return false;
const provider = providers?.find((p) => p.id === providerId);
return !(provider && provider.secondary);
});
const secondaryProviders = providerEntries.filter(([providerId, models]) => {
if (models.length === 0) return false;
const provider = providers?.find((p) => p.id === providerId);
return !!(provider && provider.secondary);
});
return (
<DropdownMenu open={open} onOpenChange={setOpen}>
@@ -232,19 +249,28 @@ export function ModelPicker() {
</>
)}
{/* Group other providers into submenus */}
{Object.entries(modelsByProviders).map(([providerId, models]) => {
// Skip auto provider as it's already handled
if (providerId === "auto") return null;
{/* Primary providers as submenus */}
{primaryProviders.map(([providerId, models]) => {
const provider = providers?.find((p) => p.id === providerId);
if (models.length === 0) return null;
return (
<DropdownMenuSub key={providerId}>
<DropdownMenuSubTrigger className="w-full font-normal">
<div className="flex flex-col items-start">
<span>{provider?.name}</span>
<div className="flex flex-col items-start w-full">
<div className="flex items-center gap-2">
<span>{provider?.name ?? providerId}</span>
{provider?.type === "cloud" &&
!provider?.secondary &&
isDyadProEnabled(settings) && (
<span className="text-[10px] bg-gradient-to-r from-indigo-600 via-indigo-500 to-indigo-600 bg-[length:200%_100%] animate-[shimmer_5s_ease-in-out_infinite] text-white px-1.5 py-0.5 rounded-full font-medium">
Pro
</span>
)}
{provider?.type === "custom" && (
<span className="text-[10px] bg-amber-500/20 text-amber-700 px-1.5 py-0.5 rounded-full font-medium">
Custom
</span>
)}
</div>
<span className="text-xs text-muted-foreground">
{models.length} models
</span>
@@ -252,7 +278,7 @@ export function ModelPicker() {
</DropdownMenuSubTrigger>
<DropdownMenuSubContent className="w-56">
<DropdownMenuLabel>
{provider?.name} Models
{(provider?.name ?? providerId) + " Models"}
</DropdownMenuLabel>
<DropdownMenuSeparator />
{models.map((model) => (
@@ -278,6 +304,11 @@ export function ModelPicker() {
>
<div className="flex justify-between items-start w-full">
<span>{model.displayName}</span>
{model.dollarSigns && (
<span className="text-[10px] bg-primary/10 text-primary px-1.5 py-0.5 rounded-full font-medium">
{"$".repeat(model.dollarSigns)}
</span>
)}
{model.tag && (
<span className="text-[10px] bg-primary/10 text-primary px-1.5 py-0.5 rounded-full font-medium">
{model.tag}
@@ -295,6 +326,92 @@ export function ModelPicker() {
</DropdownMenuSub>
);
})}
{/* Secondary providers grouped under Other AI providers */}
{secondaryProviders.length > 0 && (
<DropdownMenuSub>
<DropdownMenuSubTrigger className="w-full font-normal">
<div className="flex flex-col items-start">
<span>Other AI providers</span>
<span className="text-xs text-muted-foreground">
{secondaryProviders.length} providers
</span>
</div>
</DropdownMenuSubTrigger>
<DropdownMenuSubContent className="w-56">
<DropdownMenuLabel>Other AI providers</DropdownMenuLabel>
<DropdownMenuSeparator />
{secondaryProviders.map(([providerId, models]) => {
const provider = providers?.find(
(p) => p.id === providerId,
);
return (
<DropdownMenuSub key={providerId}>
<DropdownMenuSubTrigger className="w-full font-normal">
<div className="flex flex-col items-start w-full">
<div className="flex items-center gap-2">
<span>{provider?.name ?? providerId}</span>
{provider?.type === "custom" && (
<span className="text-[10px] bg-amber-500/20 text-amber-700 px-1.5 py-0.5 rounded-full font-medium">
Custom
</span>
)}
</div>
<span className="text-xs text-muted-foreground">
{models.length} models
</span>
</div>
</DropdownMenuSubTrigger>
<DropdownMenuSubContent className="w-56">
<DropdownMenuLabel>
{(provider?.name ?? providerId) + " Models"}
</DropdownMenuLabel>
<DropdownMenuSeparator />
{models.map((model) => (
<Tooltip key={`${providerId}-${model.apiName}`}>
<TooltipTrigger asChild>
<DropdownMenuItem
className={
selectedModel.provider === providerId &&
selectedModel.name === model.apiName
? "bg-secondary"
: ""
}
onClick={() => {
const customModelId =
model.type === "custom"
? model.id
: undefined;
onModelSelect({
name: model.apiName,
provider: providerId,
customModelId,
});
setOpen(false);
}}
>
<div className="flex justify-between items-start w-full">
<span>{model.displayName}</span>
{model.tag && (
<span className="text-[10px] bg-primary/10 text-primary px-1.5 py-0.5 rounded-full font-medium">
{model.tag}
</span>
)}
</div>
</DropdownMenuItem>
</TooltipTrigger>
<TooltipContent side="right">
{model.description}
</TooltipContent>
</Tooltip>
))}
</DropdownMenuSubContent>
</DropdownMenuSub>
);
})}
</DropdownMenuSubContent>
</DropdownMenuSub>
)}
</>
)}

View File

@@ -173,6 +173,7 @@ export interface LanguageModelProvider {
hasFreeTier?: boolean;
websiteUrl?: string;
gatewayPrefix?: string;
secondary?: boolean;
envVarName?: string;
apiBaseUrl?: string;
type: "custom" | "local" | "cloud";
@@ -188,6 +189,7 @@ export type LanguageModel =
maxOutputTokens?: number;
contextWindow?: number;
temperature?: number;
dollarSigns?: number;
type: "custom";
}
| {
@@ -198,6 +200,7 @@ export type LanguageModel =
maxOutputTokens?: number;
contextWindow?: number;
temperature?: number;
dollarSigns?: number;
type: "local" | "cloud";
};

View File

@@ -15,6 +15,7 @@ export interface ModelOption {
name: string;
displayName: string;
description: string;
dollarSigns?: number;
temperature?: number;
tag?: string;
maxOutputTokens?: number;
@@ -33,6 +34,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
contextWindow: 400_000,
// Requires temperature to be default value (1)
temperature: 1,
dollarSigns: 3,
},
// https://platform.openai.com/docs/models/gpt-5-mini
{
@@ -44,6 +46,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
contextWindow: 400_000,
// Requires temperature to be default value (1)
temperature: 1,
dollarSigns: 2,
},
// https://platform.openai.com/docs/models/gpt-5-nano
{
@@ -55,34 +58,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
contextWindow: 400_000,
// Requires temperature to be default value (1)
temperature: 1,
},
// https://platform.openai.com/docs/models/gpt-4.1
{
name: "gpt-4.1",
displayName: "GPT 4.1",
description: "OpenAI's flagship model",
maxOutputTokens: 32_768,
contextWindow: 1_047_576,
temperature: 0,
},
// https://platform.openai.com/docs/models/gpt-4.1-mini
{
name: "gpt-4.1-mini",
displayName: "GPT 4.1 Mini",
description: "OpenAI's lightweight, but intelligent model",
maxOutputTokens: 32_768,
contextWindow: 1_047_576,
temperature: 0,
},
// https://platform.openai.com/docs/models/o3-mini
{
name: "o3-mini",
displayName: "o3 mini",
description: "Reasoning model",
// See o4-mini comment below for why we set this to 32k
maxOutputTokens: 32_000,
contextWindow: 200_000,
temperature: 0,
dollarSigns: 1,
},
// https://platform.openai.com/docs/models/o4-mini
{
@@ -95,6 +71,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
maxOutputTokens: 32_000,
contextWindow: 200_000,
temperature: 0,
dollarSigns: 2,
},
],
// https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table
@@ -107,6 +84,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
maxOutputTokens: 16_000,
contextWindow: 200_000,
temperature: 0,
dollarSigns: 4,
},
{
name: "claude-3-7-sonnet-latest",
@@ -119,6 +97,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
maxOutputTokens: 16_000,
contextWindow: 200_000,
temperature: 0,
dollarSigns: 4,
},
{
name: "claude-3-5-sonnet-20241022",
@@ -127,6 +106,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
maxOutputTokens: 8_000,
contextWindow: 200_000,
temperature: 0,
dollarSigns: 4,
},
{
name: "claude-3-5-haiku-20241022",
@@ -135,6 +115,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
maxOutputTokens: 8_000,
contextWindow: 200_000,
temperature: 0,
dollarSigns: 2,
},
],
google: [
@@ -148,6 +129,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
// Gemini context window = input token + output token
contextWindow: 1_048_576,
temperature: 0,
dollarSigns: 3,
},
// https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview
{
@@ -159,6 +141,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
// Gemini context window = input token + output token
contextWindow: 1_048_576,
temperature: 0,
dollarSigns: 2,
},
],
openrouter: [
@@ -169,6 +152,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
maxOutputTokens: 32_000,
contextWindow: 262_000,
temperature: 0,
dollarSigns: 2,
},
// https://openrouter.ai/deepseek/deepseek-chat-v3-0324:free
{
@@ -178,6 +162,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
maxOutputTokens: 32_000,
contextWindow: 128_000,
temperature: 0,
dollarSigns: 2,
},
// https://openrouter.ai/moonshotai/kimi-k2
{
@@ -187,6 +172,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
maxOutputTokens: 32_000,
contextWindow: 131_000,
temperature: 0,
dollarSigns: 2,
},
{
name: "deepseek/deepseek-r1-0528",
@@ -195,6 +181,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
maxOutputTokens: 32_000,
contextWindow: 128_000,
temperature: 0,
dollarSigns: 2,
},
],
auto: [
@@ -262,6 +249,7 @@ export const CLOUD_PROVIDERS: Record<
hasFreeTier?: boolean;
websiteUrl?: string;
gatewayPrefix: string;
secondary?: boolean;
}
> = {
openai: {
@@ -298,6 +286,7 @@ export const CLOUD_PROVIDERS: Record<
hasFreeTier: false,
websiteUrl: "https://portal.azure.com/",
gatewayPrefix: "",
secondary: true,
},
};
@@ -359,6 +348,7 @@ export async function getLanguageModelProviders(): Promise<
hasFreeTier: providerDetails.hasFreeTier,
websiteUrl: providerDetails.websiteUrl,
gatewayPrefix: providerDetails.gatewayPrefix,
secondary: providerDetails.secondary,
envVarName: PROVIDER_TO_ENV_VAR[key] ?? undefined,
type: "cloud",
// apiBaseUrl is not directly in PROVIDERS