Add GPT 5 Codex and Sonnet 4.5 (#1398)

Fixes #1405 
    
<!-- This is an auto-generated description by cubic. -->

## Summary by cubic
Adds GPT-5 Codex (OpenAI and Azure) and Claude 4.5 Sonnet to the model
options to enable newer coding models and larger contexts. Also
increases Claude 4 Sonnet max output tokens to 32k.

<!-- End of auto-generated description by cubic. -->


<!-- CURSOR_SUMMARY -->
---

> [!NOTE]
> Adds GPT‑5 Codex (OpenAI/Azure) and Claude 4.5 Sonnet, and increases
Claude 4 Sonnet max output tokens to 32k across providers and tests.
> 
> - **Models**:
>   - **OpenAI**: add `gpt-5-codex` (400k context, default temp 1).
>   - **Anthropic**:
> - add `claude-sonnet-4-5-20250929` (1M context, `maxOutputTokens:
32_000`).
> - update `claude-sonnet-4-20250514` `maxOutputTokens` from `16_000` to
`32_000`.
> - **Azure**: add `gpt-5-codex` (400k context, `maxOutputTokens:
128_000`).
>   - **Bedrock**:
> - add `us.anthropic.claude-sonnet-4-5-20250929-v1:0` (1M context,
`maxOutputTokens: 32_000`).
> - update `us.anthropic.claude-sonnet-4-20250514-v1:0`
`maxOutputTokens` to `32_000`.
> - **E2E tests**:
> - Update snapshots to reflect `max_tokens` increased to `32000` for
`anthropic/claude-sonnet-4-20250514` in engine and gateway tests.
> 
> <sup>Written by [Cursor
Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit
73298d2da0c833468f957bb436f1e33400307483. This will update automatically
on new commits. Configure
[here](https://cursor.com/dashboard?tab=bugbot).</sup>
<!-- /CURSOR_SUMMARY -->
This commit is contained in:
Will Chen
2025-09-30 13:46:44 -07:00
committed by GitHub
parent 3b68fb4e48
commit 39266416c7
3 changed files with 58 additions and 13 deletions

View File

@@ -1,7 +1,7 @@
{ {
"body": { "body": {
"model": "anthropic/claude-sonnet-4-20250514", "model": "anthropic/claude-sonnet-4-20250514",
"max_tokens": 16000, "max_tokens": 32000,
"temperature": 0, "temperature": 0,
"messages": [ "messages": [
{ {

View File

@@ -1,7 +1,7 @@
{ {
"body": { "body": {
"model": "anthropic/claude-sonnet-4-20250514", "model": "anthropic/claude-sonnet-4-20250514",
"max_tokens": 16000, "max_tokens": 32000,
"temperature": 0, "temperature": 0,
"messages": [ "messages": [
{ {

View File

@@ -20,6 +20,18 @@ export interface ModelOption {
export const MODEL_OPTIONS: Record<string, ModelOption[]> = { export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
openai: [ openai: [
// https://platform.openai.com/docs/models/gpt-5-codex
{
name: "gpt-5-codex",
displayName: "GPT 5 Codex",
description: "OpenAI's flagship model optimized for coding",
// Technically it's 128k but OpenAI errors if you set max_tokens instead of max_completion_tokens
maxOutputTokens: undefined,
contextWindow: 400_000,
// Requires temperature to be default value (1)
temperature: 1,
dollarSigns: 3,
},
// https://platform.openai.com/docs/models/gpt-5 // https://platform.openai.com/docs/models/gpt-5
{ {
name: "gpt-5", name: "gpt-5",
@@ -72,12 +84,23 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
], ],
// https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table // https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table
anthropic: [ anthropic: [
{
name: "claude-sonnet-4-5-20250929",
displayName: "Claude 4.5 Sonnet",
description:
"Anthropic's best model for coding (note: >200k tokens is very expensive!)",
// Set to 32k since context window is 1M tokens
maxOutputTokens: 32_000,
contextWindow: 1_000_000,
temperature: 0,
dollarSigns: 5,
},
{ {
name: "claude-sonnet-4-20250514", name: "claude-sonnet-4-20250514",
displayName: "Claude 4 Sonnet", displayName: "Claude 4 Sonnet",
description: "Excellent coder (note: >200k tokens is very expensive!)", description: "Excellent coder (note: >200k tokens is very expensive!)",
// See comment below for Claude 3.7 Sonnet for why we set this to 16k // Set to 32k since context window is 1M tokens
maxOutputTokens: 16_000, maxOutputTokens: 32_000,
contextWindow: 1_000_000, contextWindow: 1_000_000,
temperature: 0, temperature: 0,
dollarSigns: 5, dollarSigns: 5,
@@ -265,37 +288,50 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
}, },
], ],
azure: [ azure: [
{
name: "gpt-5-codex",
displayName: "GPT-5 Codex",
description: "Azure OpenAI GPT-5 Codex model",
// See OpenAI comment above
// maxOutputTokens: 128_000,
contextWindow: 400_000,
temperature: 1,
},
{ {
name: "gpt-5", name: "gpt-5",
displayName: "GPT-5", displayName: "GPT-5",
description: "Azure OpenAI GPT-5 model with reasoning capabilities", description: "Azure OpenAI GPT-5 model with reasoning capabilities",
maxOutputTokens: 128_000, // See OpenAI comment above
// maxOutputTokens: 128_000,
contextWindow: 400_000, contextWindow: 400_000,
temperature: 0, temperature: 1,
}, },
{ {
name: "gpt-5-mini", name: "gpt-5-mini",
displayName: "GPT-5 Mini", displayName: "GPT-5 Mini",
description: "Azure OpenAI GPT-5 Mini model", description: "Azure OpenAI GPT-5 Mini model",
maxOutputTokens: 128_000, // See OpenAI comment above
// maxOutputTokens: 128_000,
contextWindow: 400_000, contextWindow: 400_000,
temperature: 0, temperature: 1,
}, },
{ {
name: "gpt-5-nano", name: "gpt-5-nano",
displayName: "GPT-5 Nano", displayName: "GPT-5 Nano",
description: "Azure OpenAI GPT-5 Nano model", description: "Azure OpenAI GPT-5 Nano model",
maxOutputTokens: 128_000, // See OpenAI comment above
// maxOutputTokens: 128_000,
contextWindow: 400_000, contextWindow: 400_000,
temperature: 0, temperature: 1,
}, },
{ {
name: "gpt-5-chat", name: "gpt-5-chat",
displayName: "GPT-5 Chat", displayName: "GPT-5 Chat",
description: "Azure OpenAI GPT-5 Chat model", description: "Azure OpenAI GPT-5 Chat model",
maxOutputTokens: 16_384, // See OpenAI comment above
// maxOutputTokens: 16_384,
contextWindow: 128_000, contextWindow: 128_000,
temperature: 0, temperature: 1,
}, },
], ],
xai: [ xai: [
@@ -329,11 +365,20 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
}, },
], ],
bedrock: [ bedrock: [
{
name: "us.anthropic.claude-sonnet-4-5-20250929-v1:0",
displayName: "Claude 4.5 Sonnet",
description:
"Anthropic's best model for coding (note: >200k tokens is very expensive!)",
maxOutputTokens: 32_000,
contextWindow: 1_000_000,
temperature: 0,
},
{ {
name: "us.anthropic.claude-sonnet-4-20250514-v1:0", name: "us.anthropic.claude-sonnet-4-20250514-v1:0",
displayName: "Claude 4 Sonnet", displayName: "Claude 4 Sonnet",
description: "Excellent coder (note: >200k tokens is very expensive!)", description: "Excellent coder (note: >200k tokens is very expensive!)",
maxOutputTokens: 16_000, maxOutputTokens: 32_000,
contextWindow: 1_000_000, contextWindow: 1_000_000,
temperature: 0, temperature: 0,
}, },