Add GPT 5 support (#902)
This commit is contained in:
31
package-lock.json
generated
31
package-lock.json
generated
@@ -1,17 +1,17 @@
|
|||||||
{
|
{
|
||||||
"name": "dyad",
|
"name": "dyad",
|
||||||
"version": "0.15.0-beta.2",
|
"version": "0.16.0",
|
||||||
"lockfileVersion": 3,
|
"lockfileVersion": 3,
|
||||||
"requires": true,
|
"requires": true,
|
||||||
"packages": {
|
"packages": {
|
||||||
"": {
|
"": {
|
||||||
"name": "dyad",
|
"name": "dyad",
|
||||||
"version": "0.15.0-beta.2",
|
"version": "0.16.0",
|
||||||
"license": "MIT",
|
"license": "MIT",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/anthropic": "^1.2.8",
|
"@ai-sdk/anthropic": "^1.2.8",
|
||||||
"@ai-sdk/google": "^1.2.19",
|
"@ai-sdk/google": "^1.2.19",
|
||||||
"@ai-sdk/openai": "^1.3.7",
|
"@ai-sdk/openai": "^1.3.24",
|
||||||
"@ai-sdk/openai-compatible": "^0.2.13",
|
"@ai-sdk/openai-compatible": "^0.2.13",
|
||||||
"@biomejs/biome": "^1.9.4",
|
"@biomejs/biome": "^1.9.4",
|
||||||
"@dyad-sh/supabase-management-js": "v1.0.0",
|
"@dyad-sh/supabase-management-js": "v1.0.0",
|
||||||
@@ -171,13 +171,13 @@
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
"node_modules/@ai-sdk/openai": {
|
"node_modules/@ai-sdk/openai": {
|
||||||
"version": "1.3.21",
|
"version": "1.3.24",
|
||||||
"resolved": "https://registry.npmjs.org/@ai-sdk/openai/-/openai-1.3.21.tgz",
|
"resolved": "https://registry.npmjs.org/@ai-sdk/openai/-/openai-1.3.24.tgz",
|
||||||
"integrity": "sha512-ipAhkRKUd2YaMmn7DAklX3N7Ywx/rCsJHVyb0V/lKRqPcc612qAFVbjg+Uve8QYJlbPxgfsM4s9JmCFp6PSdYw==",
|
"integrity": "sha512-GYXnGJTHRTZc4gJMSmFRgEQudjqd4PUN0ZjQhPwOAYH1yOAvQoG/Ikqs+HyISRbLPCrhbZnPKCNHuRU4OfpW0Q==",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/provider": "1.1.3",
|
"@ai-sdk/provider": "1.1.3",
|
||||||
"@ai-sdk/provider-utils": "2.2.7"
|
"@ai-sdk/provider-utils": "2.2.8"
|
||||||
},
|
},
|
||||||
"engines": {
|
"engines": {
|
||||||
"node": ">=18"
|
"node": ">=18"
|
||||||
@@ -202,6 +202,23 @@
|
|||||||
"zod": "^3.0.0"
|
"zod": "^3.0.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@ai-sdk/openai/node_modules/@ai-sdk/provider-utils": {
|
||||||
|
"version": "2.2.8",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/provider-utils/-/provider-utils-2.2.8.tgz",
|
||||||
|
"integrity": "sha512-fqhG+4sCVv8x7nFzYnFo19ryhAa3w096Kmc3hWxMQfW/TubPOmt3A6tYZhl4mUfQWWQMsuSkLrtjlWuXBVSGQA==",
|
||||||
|
"license": "Apache-2.0",
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "1.1.3",
|
||||||
|
"nanoid": "^3.3.8",
|
||||||
|
"secure-json-parse": "^2.7.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.23.8"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@ai-sdk/provider": {
|
"node_modules/@ai-sdk/provider": {
|
||||||
"version": "1.1.3",
|
"version": "1.1.3",
|
||||||
"resolved": "https://registry.npmjs.org/@ai-sdk/provider/-/provider-1.1.3.tgz",
|
"resolved": "https://registry.npmjs.org/@ai-sdk/provider/-/provider-1.1.3.tgz",
|
||||||
|
|||||||
@@ -85,7 +85,7 @@
|
|||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@ai-sdk/anthropic": "^1.2.8",
|
"@ai-sdk/anthropic": "^1.2.8",
|
||||||
"@ai-sdk/google": "^1.2.19",
|
"@ai-sdk/google": "^1.2.19",
|
||||||
"@ai-sdk/openai": "^1.3.7",
|
"@ai-sdk/openai": "^1.3.24",
|
||||||
"@ai-sdk/openai-compatible": "^0.2.13",
|
"@ai-sdk/openai-compatible": "^0.2.13",
|
||||||
"@biomejs/biome": "^1.9.4",
|
"@biomejs/biome": "^1.9.4",
|
||||||
"@dyad-sh/supabase-management-js": "v1.0.0",
|
"@dyad-sh/supabase-management-js": "v1.0.0",
|
||||||
|
|||||||
@@ -38,7 +38,7 @@ import * as path from "path";
|
|||||||
import * as os from "os";
|
import * as os from "os";
|
||||||
import * as crypto from "crypto";
|
import * as crypto from "crypto";
|
||||||
import { readFile, writeFile, unlink } from "fs/promises";
|
import { readFile, writeFile, unlink } from "fs/promises";
|
||||||
import { getMaxTokens } from "../utils/token_utils";
|
import { getMaxTokens, getTemperature } from "../utils/token_utils";
|
||||||
import { MAX_CHAT_TURNS_IN_CONTEXT } from "@/constants/settings_constants";
|
import { MAX_CHAT_TURNS_IN_CONTEXT } from "@/constants/settings_constants";
|
||||||
import { validateChatContext } from "../utils/context_paths_utils";
|
import { validateChatContext } from "../utils/context_paths_utils";
|
||||||
import { GoogleGenerativeAIProviderOptions } from "@ai-sdk/google";
|
import { GoogleGenerativeAIProviderOptions } from "@ai-sdk/google";
|
||||||
@@ -58,6 +58,7 @@ import {
|
|||||||
} from "../utils/dyad_tag_parser";
|
} from "../utils/dyad_tag_parser";
|
||||||
import { fileExists } from "../utils/file_utils";
|
import { fileExists } from "../utils/file_utils";
|
||||||
import { FileUploadsState } from "../utils/file_uploads_state";
|
import { FileUploadsState } from "../utils/file_uploads_state";
|
||||||
|
import { OpenAIResponsesProviderOptions } from "@ai-sdk/openai";
|
||||||
|
|
||||||
type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
|
type AsyncIterableStream<T> = AsyncIterable<T> & ReadableStream<T>;
|
||||||
|
|
||||||
@@ -593,7 +594,7 @@ This conversation includes one or more image attachments. When the user uploads
|
|||||||
}
|
}
|
||||||
return streamText({
|
return streamText({
|
||||||
maxTokens: await getMaxTokens(settings.selectedModel),
|
maxTokens: await getMaxTokens(settings.selectedModel),
|
||||||
temperature: 0,
|
temperature: await getTemperature(settings.selectedModel),
|
||||||
maxRetries: 2,
|
maxRetries: 2,
|
||||||
model: modelClient.model,
|
model: modelClient.model,
|
||||||
providerOptions: {
|
providerOptions: {
|
||||||
@@ -609,6 +610,9 @@ This conversation includes one or more image attachments. When the user uploads
|
|||||||
includeThoughts: true,
|
includeThoughts: true,
|
||||||
},
|
},
|
||||||
} satisfies GoogleGenerativeAIProviderOptions,
|
} satisfies GoogleGenerativeAIProviderOptions,
|
||||||
|
openai: {
|
||||||
|
reasoningSummary: "auto",
|
||||||
|
} satisfies OpenAIResponsesProviderOptions,
|
||||||
},
|
},
|
||||||
system: systemPrompt,
|
system: systemPrompt,
|
||||||
messages: chatMessages.filter((m) => m.content),
|
messages: chatMessages.filter((m) => m.content),
|
||||||
|
|||||||
@@ -183,6 +183,7 @@ export type LanguageModel =
|
|||||||
tag?: string;
|
tag?: string;
|
||||||
maxOutputTokens?: number;
|
maxOutputTokens?: number;
|
||||||
contextWindow?: number;
|
contextWindow?: number;
|
||||||
|
temperature?: number;
|
||||||
type: "custom";
|
type: "custom";
|
||||||
}
|
}
|
||||||
| {
|
| {
|
||||||
@@ -192,6 +193,7 @@ export type LanguageModel =
|
|||||||
tag?: string;
|
tag?: string;
|
||||||
maxOutputTokens?: number;
|
maxOutputTokens?: number;
|
||||||
contextWindow?: number;
|
contextWindow?: number;
|
||||||
|
temperature?: number;
|
||||||
type: "local" | "cloud";
|
type: "local" | "cloud";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,7 @@ export interface ModelOption {
|
|||||||
name: string;
|
name: string;
|
||||||
displayName: string;
|
displayName: string;
|
||||||
description: string;
|
description: string;
|
||||||
|
temperature?: number;
|
||||||
tag?: string;
|
tag?: string;
|
||||||
maxOutputTokens?: number;
|
maxOutputTokens?: number;
|
||||||
contextWindow?: number;
|
contextWindow?: number;
|
||||||
@@ -22,6 +23,39 @@ export interface ModelOption {
|
|||||||
|
|
||||||
export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
||||||
openai: [
|
openai: [
|
||||||
|
// https://platform.openai.com/docs/models/gpt-5
|
||||||
|
{
|
||||||
|
name: "gpt-5",
|
||||||
|
displayName: "GPT 5",
|
||||||
|
description: "OpenAI's flagship model",
|
||||||
|
// Technically it's 128k but OpenAI errors if you set max_tokens instead of max_completion_tokens
|
||||||
|
maxOutputTokens: undefined,
|
||||||
|
contextWindow: 400_000,
|
||||||
|
// Requires temperature to be default value (1)
|
||||||
|
temperature: 1,
|
||||||
|
},
|
||||||
|
// https://platform.openai.com/docs/models/gpt-5-mini
|
||||||
|
{
|
||||||
|
name: "gpt-5-mini",
|
||||||
|
displayName: "GPT 5 Mini",
|
||||||
|
description: "OpenAI's lightweight, but intelligent model",
|
||||||
|
// Technically it's 128k but OpenAI errors if you set max_tokens instead of max_completion_tokens
|
||||||
|
maxOutputTokens: undefined,
|
||||||
|
contextWindow: 400_000,
|
||||||
|
// Requires temperature to be default value (1)
|
||||||
|
temperature: 1,
|
||||||
|
},
|
||||||
|
// https://platform.openai.com/docs/models/gpt-5-nano
|
||||||
|
{
|
||||||
|
name: "gpt-5-nano",
|
||||||
|
displayName: "GPT 5 Nano",
|
||||||
|
description: "Fastest, most cost-efficient version of GPT-5",
|
||||||
|
// Technically it's 128k but OpenAI errors if you set max_tokens instead of max_completion_tokens
|
||||||
|
maxOutputTokens: undefined,
|
||||||
|
contextWindow: 400_000,
|
||||||
|
// Requires temperature to be default value (1)
|
||||||
|
temperature: 1,
|
||||||
|
},
|
||||||
// https://platform.openai.com/docs/models/gpt-4.1
|
// https://platform.openai.com/docs/models/gpt-4.1
|
||||||
{
|
{
|
||||||
name: "gpt-4.1",
|
name: "gpt-4.1",
|
||||||
@@ -29,6 +63,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
description: "OpenAI's flagship model",
|
description: "OpenAI's flagship model",
|
||||||
maxOutputTokens: 32_768,
|
maxOutputTokens: 32_768,
|
||||||
contextWindow: 1_047_576,
|
contextWindow: 1_047_576,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
// https://platform.openai.com/docs/models/gpt-4.1-mini
|
// https://platform.openai.com/docs/models/gpt-4.1-mini
|
||||||
{
|
{
|
||||||
@@ -37,6 +72,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
description: "OpenAI's lightweight, but intelligent model",
|
description: "OpenAI's lightweight, but intelligent model",
|
||||||
maxOutputTokens: 32_768,
|
maxOutputTokens: 32_768,
|
||||||
contextWindow: 1_047_576,
|
contextWindow: 1_047_576,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
// https://platform.openai.com/docs/models/o3-mini
|
// https://platform.openai.com/docs/models/o3-mini
|
||||||
{
|
{
|
||||||
@@ -46,6 +82,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
// See o4-mini comment below for why we set this to 32k
|
// See o4-mini comment below for why we set this to 32k
|
||||||
maxOutputTokens: 32_000,
|
maxOutputTokens: 32_000,
|
||||||
contextWindow: 200_000,
|
contextWindow: 200_000,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
// https://platform.openai.com/docs/models/o4-mini
|
// https://platform.openai.com/docs/models/o4-mini
|
||||||
{
|
{
|
||||||
@@ -57,6 +94,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
// the max output tokens is *included* in the context window limit.
|
// the max output tokens is *included* in the context window limit.
|
||||||
maxOutputTokens: 32_000,
|
maxOutputTokens: 32_000,
|
||||||
contextWindow: 200_000,
|
contextWindow: 200_000,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
// https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table
|
// https://docs.anthropic.com/en/docs/about-claude/models/all-models#model-comparison-table
|
||||||
@@ -68,6 +106,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
// See comment below for Claude 3.7 Sonnet for why we set this to 16k
|
// See comment below for Claude 3.7 Sonnet for why we set this to 16k
|
||||||
maxOutputTokens: 16_000,
|
maxOutputTokens: 16_000,
|
||||||
contextWindow: 200_000,
|
contextWindow: 200_000,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "claude-3-7-sonnet-latest",
|
name: "claude-3-7-sonnet-latest",
|
||||||
@@ -79,6 +118,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
// https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#max-tokens-and-context-window-size-with-extended-thinking
|
// https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking#max-tokens-and-context-window-size-with-extended-thinking
|
||||||
maxOutputTokens: 16_000,
|
maxOutputTokens: 16_000,
|
||||||
contextWindow: 200_000,
|
contextWindow: 200_000,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "claude-3-5-sonnet-20241022",
|
name: "claude-3-5-sonnet-20241022",
|
||||||
@@ -86,6 +126,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
description: "Good coder, excellent at following instructions",
|
description: "Good coder, excellent at following instructions",
|
||||||
maxOutputTokens: 8_000,
|
maxOutputTokens: 8_000,
|
||||||
contextWindow: 200_000,
|
contextWindow: 200_000,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "claude-3-5-haiku-20241022",
|
name: "claude-3-5-haiku-20241022",
|
||||||
@@ -93,6 +134,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
description: "Lightweight coder",
|
description: "Lightweight coder",
|
||||||
maxOutputTokens: 8_000,
|
maxOutputTokens: 8_000,
|
||||||
contextWindow: 200_000,
|
contextWindow: 200_000,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
google: [
|
google: [
|
||||||
@@ -105,6 +147,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
maxOutputTokens: 65_536 - 1,
|
maxOutputTokens: 65_536 - 1,
|
||||||
// Gemini context window = input token + output token
|
// Gemini context window = input token + output token
|
||||||
contextWindow: 1_048_576,
|
contextWindow: 1_048_576,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
// https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview
|
// https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview
|
||||||
{
|
{
|
||||||
@@ -115,6 +158,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
maxOutputTokens: 65_536 - 1,
|
maxOutputTokens: 65_536 - 1,
|
||||||
// Gemini context window = input token + output token
|
// Gemini context window = input token + output token
|
||||||
contextWindow: 1_048_576,
|
contextWindow: 1_048_576,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
openrouter: [
|
openrouter: [
|
||||||
@@ -124,6 +168,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
description: "Qwen's best coding model",
|
description: "Qwen's best coding model",
|
||||||
maxOutputTokens: 32_000,
|
maxOutputTokens: 32_000,
|
||||||
contextWindow: 262_000,
|
contextWindow: 262_000,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
// https://openrouter.ai/deepseek/deepseek-chat-v3-0324:free
|
// https://openrouter.ai/deepseek/deepseek-chat-v3-0324:free
|
||||||
{
|
{
|
||||||
@@ -132,6 +177,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
description: "Use for free (data may be used for training)",
|
description: "Use for free (data may be used for training)",
|
||||||
maxOutputTokens: 32_000,
|
maxOutputTokens: 32_000,
|
||||||
contextWindow: 128_000,
|
contextWindow: 128_000,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
// https://openrouter.ai/moonshotai/kimi-k2
|
// https://openrouter.ai/moonshotai/kimi-k2
|
||||||
{
|
{
|
||||||
@@ -140,6 +186,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
description: "Powerful cost-effective model",
|
description: "Powerful cost-effective model",
|
||||||
maxOutputTokens: 32_000,
|
maxOutputTokens: 32_000,
|
||||||
contextWindow: 131_000,
|
contextWindow: 131_000,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "deepseek/deepseek-r1-0528",
|
name: "deepseek/deepseek-r1-0528",
|
||||||
@@ -147,6 +194,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
description: "Good reasoning model with excellent price for performance",
|
description: "Good reasoning model with excellent price for performance",
|
||||||
maxOutputTokens: 32_000,
|
maxOutputTokens: 32_000,
|
||||||
contextWindow: 128_000,
|
contextWindow: 128_000,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
auto: [
|
auto: [
|
||||||
@@ -160,6 +208,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
|||||||
// and smart auto.
|
// and smart auto.
|
||||||
maxOutputTokens: 32_000,
|
maxOutputTokens: 32_000,
|
||||||
contextWindow: 1_000_000,
|
contextWindow: 1_000_000,
|
||||||
|
temperature: 0,
|
||||||
},
|
},
|
||||||
],
|
],
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -183,7 +183,7 @@ function getRegularModelClient(
|
|||||||
const provider = createOpenAI({ apiKey });
|
const provider = createOpenAI({ apiKey });
|
||||||
return {
|
return {
|
||||||
modelClient: {
|
modelClient: {
|
||||||
model: provider(model.name),
|
model: provider.responses(model.name),
|
||||||
builtinProviderId: providerId,
|
builtinProviderId: providerId,
|
||||||
},
|
},
|
||||||
backupModelClients: [],
|
backupModelClients: [],
|
||||||
|
|||||||
@@ -23,6 +23,11 @@ export function getExtraProviderOptions(
|
|||||||
if (!providerId) {
|
if (!providerId) {
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
if (providerId === "openai") {
|
||||||
|
return {
|
||||||
|
reasoning_effort: "medium",
|
||||||
|
};
|
||||||
|
}
|
||||||
if (PROVIDERS_THAT_SUPPORT_THINKING.includes(providerId)) {
|
if (PROVIDERS_THAT_SUPPORT_THINKING.includes(providerId)) {
|
||||||
const budgetTokens = getThinkingBudgetTokens(settings?.thinkingBudget);
|
const budgetTokens = getThinkingBudgetTokens(settings?.thinkingBudget);
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -24,10 +24,16 @@ export async function getContextWindow() {
|
|||||||
return modelOption?.contextWindow || DEFAULT_CONTEXT_WINDOW;
|
return modelOption?.contextWindow || DEFAULT_CONTEXT_WINDOW;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Most models support at least 8000 output tokens so we use it as a default value.
|
export async function getMaxTokens(
|
||||||
const DEFAULT_MAX_TOKENS = 8_000;
|
model: LargeLanguageModel,
|
||||||
|
): Promise<number | undefined> {
|
||||||
export async function getMaxTokens(model: LargeLanguageModel) {
|
|
||||||
const modelOption = await findLanguageModel(model);
|
const modelOption = await findLanguageModel(model);
|
||||||
return modelOption?.maxOutputTokens || DEFAULT_MAX_TOKENS;
|
return modelOption?.maxOutputTokens ?? undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function getTemperature(
|
||||||
|
model: LargeLanguageModel,
|
||||||
|
): Promise<number> {
|
||||||
|
const modelOption = await findLanguageModel(model);
|
||||||
|
return modelOption?.temperature ?? 0;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user