Mark which models are eligible for turbo edits (#172)

This commit is contained in:
Will Chen
2025-05-15 16:02:42 -07:00
committed by GitHub
parent 16e1dd7fc4
commit 09fc028f94
5 changed files with 38 additions and 22 deletions

View File

@@ -162,6 +162,7 @@ export type LanguageModel =
tag?: string;
maxOutputTokens?: number;
contextWindow?: number;
supportsTurboEdits?: boolean;
type: "local" | "cloud";
};

View File

@@ -13,6 +13,7 @@ export interface ModelOption {
tag?: string;
maxOutputTokens?: number;
contextWindow?: number;
supportsTurboEdits?: boolean;
}
export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
@@ -24,6 +25,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
description: "OpenAI's flagship model",
maxOutputTokens: 32_768,
contextWindow: 1_047_576,
supportsTurboEdits: true,
},
// https://platform.openai.com/docs/models/gpt-4.1-mini
{
@@ -50,6 +52,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
description: "Excellent coder",
maxOutputTokens: 64_000,
contextWindow: 200_000,
supportsTurboEdits: true,
},
{
name: "claude-3-5-haiku-20241022",
@@ -69,6 +72,7 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
maxOutputTokens: 65_536 - 1,
// Gemini context window = input token + output token
contextWindow: 1_048_576,
supportsTurboEdits: true,
},
// https://ai.google.dev/gemini-api/docs/models#gemini-2.5-flash-preview
{

View File

@@ -0,0 +1,22 @@
import { LargeLanguageModel } from "@/lib/schemas";
import { LanguageModel } from "../ipc_types";
import { getLanguageModels } from "../shared/language_model_helpers";
export async function findLanguageModel(
model: LargeLanguageModel,
): Promise<LanguageModel | undefined> {
const models = await getLanguageModels({
providerId: model.provider,
});
if (model.customModelId) {
const customModel = models.find(
(m) => m.type === "custom" && m.id === model.customModelId,
);
if (customModel) {
return customModel;
}
}
return models.find((m) => m.apiName === model.name);
}

View File

@@ -12,6 +12,7 @@ import { getLanguageModelProviders } from "../shared/language_model_helpers";
import { LanguageModelProvider } from "../ipc_types";
import { llmErrorStore } from "@/main/llm_error_store";
import { createDyadEngine } from "./llm_engine_provider";
import { findLanguageModel } from "./findLanguageModel";
const dyadLocalEngine = process.env.DYAD_LOCAL_ENGINE;
const dyadGatewayUrl = process.env.DYAD_GATEWAY_URL;
@@ -99,7 +100,14 @@ export async function getModelClient(
// Check if the selected provider supports Dyad Pro (has a gateway prefix) OR
// we're using local engine.
if (providerConfig.gatewayPrefix || dyadLocalEngine) {
const provider = settings.enableProLazyEditsMode
const languageModel = await findLanguageModel(model);
// Currently engine is only used for turbo edits.
const isEngineEnabled = Boolean(
settings.enableProLazyEditsMode &&
languageModel?.type === "cloud" &&
languageModel?.supportsTurboEdits,
);
const provider = isEngineEnabled
? createDyadEngine({
apiKey: dyadApiKey,
baseURL: dyadLocalEngine ?? "https://engine.dyad.sh/v1",
@@ -109,9 +117,7 @@ export async function getModelClient(
baseURL: dyadGatewayUrl ?? "https://llm-gateway.dyad.sh/v1",
});
logger.info(
`Using Dyad Pro API key. engine_enabled=${settings.enableProLazyEditsMode}`,
);
logger.info(`Using Dyad Pro API key. engine_enabled=${isEngineEnabled}`);
// Do not use free variant (for openrouter).
const modelName = model.name.split(":free")[0];
const autoModelClient = {

View File

@@ -2,7 +2,7 @@ import { LargeLanguageModel } from "@/lib/schemas";
import { readSettings } from "../../main/settings";
import { Message } from "../ipc_types";
import { getLanguageModels } from "../shared/language_model_helpers";
import { findLanguageModel } from "./findLanguageModel";
// Estimate tokens (4 characters per token)
export const estimateTokens = (text: string): number => {
@@ -31,20 +31,3 @@ export async function getMaxTokens(model: LargeLanguageModel) {
const modelOption = await findLanguageModel(model);
return modelOption?.maxOutputTokens || DEFAULT_MAX_TOKENS;
}
async function findLanguageModel(model: LargeLanguageModel) {
const models = await getLanguageModels({
providerId: model.provider,
});
if (model.customModelId) {
const customModel = models.find(
(m) => m.type === "custom" && m.id === model.customModelId,
);
if (customModel) {
return customModel;
}
}
return models.find((m) => m.apiName === model.name);
}