diff --git a/e2e-tests/engine.spec.ts b/e2e-tests/engine.spec.ts index 48f52ed..ddc5bdc 100644 --- a/e2e-tests/engine.spec.ts +++ b/e2e-tests/engine.spec.ts @@ -25,7 +25,7 @@ testSkipIfWindows( await po.setUpDyadPro(); // By default, it's using auto which points to Flash 2.5 and doesn't // use engine. - await po.selectModel({ provider: "Anthropic", model: "Claude 4 Sonnet" }); + await po.selectModel({ provider: "Anthropic", model: "Claude Sonnet 4" }); await po.sendPrompt("[dump] tc=turbo-edits"); await po.snapshotServerDump("request"); diff --git a/src/ipc/shared/language_model_constants.ts b/src/ipc/shared/language_model_constants.ts index f36987d..0fdae3d 100644 --- a/src/ipc/shared/language_model_constants.ts +++ b/src/ipc/shared/language_model_constants.ts @@ -122,9 +122,20 @@ export const MODEL_OPTIONS: Record = { ], // https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table anthropic: [ + { + name: "claude-opus-4-5", + displayName: "Claude Opus 4.5", + description: + "Anthropic's best model for coding (note: this model is very expensive!)", + // Set to 32k since context window is 1M tokens + maxOutputTokens: 32_000, + contextWindow: 200_000, + temperature: 0, + dollarSigns: 5, + }, { name: "claude-sonnet-4-5-20250929", - displayName: "Claude 4.5 Sonnet", + displayName: "Claude Sonnet 4.5", description: "Anthropic's best model for coding (note: >200k tokens is very expensive!)", // Set to 32k since context window is 1M tokens @@ -135,7 +146,7 @@ export const MODEL_OPTIONS: Record = { }, { name: "claude-sonnet-4-20250514", - displayName: "Claude 4 Sonnet", + displayName: "Claude Sonnet 4", description: "Excellent coder (note: >200k tokens is very expensive!)", // Set to 32k since context window is 1M tokens maxOutputTokens: 32_000, @@ -143,37 +154,6 @@ export const MODEL_OPTIONS: Record = { temperature: 0, dollarSigns: 5, }, - { - name: "claude-3-7-sonnet-latest", - displayName: "Claude 3.7 Sonnet", - description: "Excellent coder", - // Technically the max output tokens is 64k, *however* if the user has a lot of input tokens, - // then setting a high max output token will cause the request to fail because - // the max output tokens is *included* in the context window limit, see: - // https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#max-tokens-and-context-window-size-with-extended-thinking - maxOutputTokens: 16_000, - contextWindow: 200_000, - temperature: 0, - dollarSigns: 4, - }, - { - name: "claude-3-5-sonnet-20241022", - displayName: "Claude 3.5 Sonnet", - description: "Good coder, excellent at following instructions", - maxOutputTokens: 8_000, - contextWindow: 200_000, - temperature: 0, - dollarSigns: 4, - }, - { - name: "claude-3-5-haiku-20241022", - displayName: "Claude 3.5 Haiku", - description: "Lightweight coder", - maxOutputTokens: 8_000, - contextWindow: 200_000, - temperature: 0, - dollarSigns: 2, - }, ], google: [ // https://ai.google.dev/gemini-api/docs/models#gemini-3-pro @@ -466,22 +446,6 @@ export const MODEL_OPTIONS: Record = { contextWindow: 1_000_000, temperature: 0, }, - { - name: "us.anthropic.claude-3-7-sonnet-20250219-v1:0", - displayName: "Claude 3.7 Sonnet", - description: "Excellent coder", - maxOutputTokens: 16_000, - contextWindow: 200_000, - temperature: 0, - }, - { - name: "us.anthropic.claude-3-5-sonnet-20241022-v2:0", - displayName: "Claude 3.5 Sonnet", - description: "Good coder, excellent at following instructions", - maxOutputTokens: 8_000, - contextWindow: 200_000, - temperature: 0, - }, ], };