Remove budget saver mode (#378)

This code was quite complex and hairy and resulted in very opaque errors
(for both free and pro users). There's not much benefit to budget saver
because Google removed 2.5 Pro free quota a while ago (after it
graduated the model from experimental to preview). Dyad Pro users can
still use 2.5 Flash free quota by disabling Dyad Pro by clicking on the
Dyad Pro button at the top.
This commit is contained in:
Will Chen
2025-06-10 13:54:27 -07:00
committed by GitHub
parent 534cbad909
commit fa80014e16
6 changed files with 14 additions and 217 deletions

View File

@@ -18,10 +18,6 @@ import { IpcClient } from "@/ipc/ipc_client";
export function ProModeSelector() { export function ProModeSelector() {
const { settings, updateSettings } = useSettings(); const { settings, updateSettings } = useSettings();
const toggleSaverMode = () => {
updateSettings({ enableProSaverMode: !settings?.enableProSaverMode });
};
const toggleLazyEdits = () => { const toggleLazyEdits = () => {
updateSettings({ updateSettings({
enableProLazyEditsMode: !settings?.enableProLazyEditsMode, enableProLazyEditsMode: !settings?.enableProLazyEditsMode,
@@ -76,16 +72,6 @@ export function ProModeSelector() {
</a> </a>
</div> </div>
)} )}
<SelectorRow
id="saver-mode"
label="Saver Mode"
description="Uses your free Gemini API quota before consuming Dyad Pro AI credits"
tooltip="Note: using the free Gemini API lets Google use your data to
improve their models."
proEnabled={proEnabled}
settingEnabled={Boolean(settings?.enableProSaverMode)}
toggle={toggleSaverMode}
/>
<SelectorRow <SelectorRow
id="lazy-edits" id="lazy-edits"
label="Turbo Edits" label="Turbo Edits"

View File

@@ -1,5 +1,5 @@
import { ipcMain } from "electron"; import { ipcMain } from "electron";
import { CoreMessage, TextPart, ImagePart } from "ai"; import { CoreMessage, TextPart, ImagePart, streamText } from "ai";
import { db } from "../../db"; import { db } from "../../db";
import { chats, messages } from "../../db/schema"; import { chats, messages } from "../../db/schema";
import { and, eq, isNull } from "drizzle-orm"; import { and, eq, isNull } from "drizzle-orm";
@@ -32,7 +32,6 @@ import * as crypto from "crypto";
import { readFile, writeFile, unlink } from "fs/promises"; import { readFile, writeFile, unlink } from "fs/promises";
import { getMaxTokens } from "../utils/token_utils"; import { getMaxTokens } from "../utils/token_utils";
import { MAX_CHAT_TURNS_IN_CONTEXT } from "@/constants/settings_constants"; import { MAX_CHAT_TURNS_IN_CONTEXT } from "@/constants/settings_constants";
import { streamTextWithBackup } from "../utils/stream_utils";
import { validateChatContext } from "../utils/context_paths_utils"; import { validateChatContext } from "../utils/context_paths_utils";
const logger = log.scope("chat_stream_handlers"); const logger = log.scope("chat_stream_handlers");
@@ -244,8 +243,11 @@ export function registerChatStreamHandlers() {
"estimated tokens", "estimated tokens",
codebaseInfo.length / 4, codebaseInfo.length / 4,
); );
const { modelClient, backupModelClients, isEngineEnabled } = const { modelClient, isEngineEnabled } = await getModelClient(
await getModelClient(settings.selectedModel, settings, files); settings.selectedModel,
settings,
files,
);
// Prepare message history for the AI // Prepare message history for the AI
const messageHistory = updatedChat.messages.map((message) => ({ const messageHistory = updatedChat.messages.map((message) => ({
@@ -394,11 +396,11 @@ This conversation includes one or more image attachments. When the user uploads
} }
// When calling streamText, the messages need to be properly formatted for mixed content // When calling streamText, the messages need to be properly formatted for mixed content
const { textStream } = streamTextWithBackup({ const { textStream } = streamText({
maxTokens: await getMaxTokens(settings.selectedModel), maxTokens: await getMaxTokens(settings.selectedModel),
temperature: 0, temperature: 0,
model: modelClient, maxRetries: 2,
backupModelClients: backupModelClients, model: modelClient.model,
system: systemPrompt, system: systemPrompt,
messages: chatMessages.filter((m) => m.content), messages: chatMessages.filter((m) => m.content),
onError: (error: any) => { onError: (error: any) => {

View File

@@ -10,7 +10,6 @@ import { getEnvVar } from "./read_env";
import log from "electron-log"; import log from "electron-log";
import { getLanguageModelProviders } from "../shared/language_model_helpers"; import { getLanguageModelProviders } from "../shared/language_model_helpers";
import { LanguageModelProvider } from "../ipc_types"; import { LanguageModelProvider } from "../ipc_types";
import { llmErrorStore } from "@/main/llm_error_store";
import { createDyadEngine } from "./llm_engine_provider"; import { createDyadEngine } from "./llm_engine_provider";
import { findLanguageModel } from "./findLanguageModel"; import { findLanguageModel } from "./findLanguageModel";
import { LM_STUDIO_BASE_URL } from "./lm_studio_utils"; import { LM_STUDIO_BASE_URL } from "./lm_studio_utils";
@@ -50,7 +49,6 @@ export async function getModelClient(
files?: File[], files?: File[],
): Promise<{ ): Promise<{
modelClient: ModelClient; modelClient: ModelClient;
backupModelClients: ModelClient[];
isEngineEnabled?: boolean; isEngineEnabled?: boolean;
}> { }> {
const allProviders = await getLanguageModelProviders(); const allProviders = await getLanguageModelProviders();
@@ -143,42 +141,11 @@ export async function getModelClient(
), ),
builtinProviderId: "auto", builtinProviderId: "auto",
}; };
const googleSettings = settings.providerSettings?.google;
// Budget saver mode logic (all must be true):
// 1. Pro Saver Mode is enabled
// 2. Provider is Google
// 3. API Key is set
// 4. Has no recent errors
if (
settings.enableProSaverMode &&
providerConfig.id === "google" &&
googleSettings &&
googleSettings.apiKey?.value &&
llmErrorStore.modelHasNoRecentError({
model: model.name,
provider: providerConfig.id,
})
) {
return {
modelClient: getRegularModelClient(
{
provider: providerConfig.id,
name: model.name,
},
settings,
providerConfig,
).modelClient,
backupModelClients: [autoModelClient],
isEngineEnabled,
};
} else {
return { return {
modelClient: autoModelClient, modelClient: autoModelClient,
backupModelClients: [],
isEngineEnabled, isEngineEnabled,
}; };
}
} else { } else {
logger.warn( logger.warn(
`Dyad Pro enabled, but provider ${model.provider} does not have a gateway prefix defined. Falling back to direct provider connection.`, `Dyad Pro enabled, but provider ${model.provider} does not have a gateway prefix defined. Falling back to direct provider connection.`,

View File

@@ -1,123 +0,0 @@
import { streamText } from "ai";
import log from "electron-log";
import { ModelClient } from "./get_model_client";
import { llmErrorStore } from "@/main/llm_error_store";
const logger = log.scope("stream_utils");
export interface StreamTextWithBackupParams
extends Omit<Parameters<typeof streamText>[0], "model"> {
model: ModelClient; // primary client
backupModelClients?: ModelClient[]; // ordered fall-backs
}
export function streamTextWithBackup(params: StreamTextWithBackupParams): {
textStream: AsyncIterable<string>;
} {
const {
model: primaryModel,
backupModelClients = [],
onError: callerOnError,
abortSignal: callerAbort,
...rest
} = params;
const modelClients: ModelClient[] = [primaryModel, ...backupModelClients];
async function* combinedGenerator(): AsyncIterable<string> {
let lastErr: { error: unknown } | undefined = undefined;
for (let i = 0; i < modelClients.length; i++) {
const currentModelClient = modelClients[i];
/* Local abort controller for this single attempt */
const attemptAbort = new AbortController();
if (callerAbort) {
if (callerAbort.aborted) {
// Already aborted, trigger immediately
attemptAbort.abort();
} else {
callerAbort.addEventListener("abort", () => attemptAbort.abort(), {
once: true,
});
}
}
let errorFromCurrent: { error: unknown } | undefined = undefined; // set when onError fires
const providerId = currentModelClient.builtinProviderId;
if (providerId) {
llmErrorStore.clearModelError({
model: currentModelClient.model.modelId,
provider: providerId,
});
}
logger.info(
"Streaming text with model",
currentModelClient.model.modelId,
"provider",
currentModelClient.model.provider,
"builtinProviderId",
currentModelClient.builtinProviderId,
);
const { textStream } = streamText({
...rest,
maxRetries: i === modelClients.length - 1 ? 3 : 0,
model: currentModelClient.model,
abortSignal: attemptAbort.signal,
onError: (error) => {
const providerId = currentModelClient.builtinProviderId;
if (providerId) {
llmErrorStore.recordModelError({
model: currentModelClient.model.modelId,
provider: providerId,
});
}
logger.error(
`Error streaming text with ${providerId} and model ${currentModelClient.model.modelId}: ${error}`,
error,
);
errorFromCurrent = error;
attemptAbort.abort(); // kill fetch / SSE
},
});
try {
for await (const chunk of textStream) {
/* If onError fired during streaming, bail out immediately. */
if (errorFromCurrent) throw errorFromCurrent;
yield chunk;
}
/* Stream ended check if it actually failed */
if (errorFromCurrent) throw errorFromCurrent;
/* Completed successfully stop trying more models. */
return;
} catch (err) {
if (typeof err === "object" && err !== null && "error" in err) {
lastErr = err as { error: unknown };
} else {
lastErr = { error: err };
}
logger.warn(
`[streamTextWithBackup] model #${i} failed ${
i < modelClients.length - 1
? "switching to backup"
: "no backups left"
}`,
err,
);
/* loop continues to next model (if any) */
}
}
/* Every model failed */
if (!lastErr) {
throw new Error("Invariant in StreamTextWithbackup failed!");
}
callerOnError?.(lastErr);
logger.error("All model invocations failed", lastErr);
// throw lastErr ?? new Error("All model invocations failed");
}
return { textStream: combinedGenerator() };
}

View File

@@ -139,7 +139,6 @@ export const UserSettingsSchema = z.object({
experiments: ExperimentsSchema.optional(), experiments: ExperimentsSchema.optional(),
lastShownReleaseNotesVersion: z.string().optional(), lastShownReleaseNotesVersion: z.string().optional(),
maxChatTurnsInContext: z.number().optional(), maxChatTurnsInContext: z.number().optional(),
enableProSaverMode: z.boolean().optional(),
enableProLazyEditsMode: z.boolean().optional(), enableProLazyEditsMode: z.boolean().optional(),
enableProSmartFilesContextMode: z.boolean().optional(), enableProSmartFilesContextMode: z.boolean().optional(),
selectedTemplateId: z.string().optional(), selectedTemplateId: z.string().optional(),
@@ -154,6 +153,7 @@ export const UserSettingsSchema = z.object({
//////////////////////////////// ////////////////////////////////
// DEPRECATED. // DEPRECATED.
//////////////////////////////// ////////////////////////////////
enableProSaverMode: z.boolean().optional(),
dyadProBudget: DyadProBudgetSchema.optional(), dyadProBudget: DyadProBudgetSchema.optional(),
runtimeMode: RuntimeModeSchema.optional(), runtimeMode: RuntimeModeSchema.optional(),
}); });

View File

@@ -1,35 +0,0 @@
class LlmErrorStore {
private modelErrorToTimestamp: Record<string, number> = {};
constructor() {}
recordModelError({ model, provider }: { model: string; provider: string }) {
this.modelErrorToTimestamp[this.getKey({ model, provider })] = Date.now();
}
clearModelError({ model, provider }: { model: string; provider: string }) {
delete this.modelErrorToTimestamp[this.getKey({ model, provider })];
}
modelHasNoRecentError({
model,
provider,
}: {
model: string;
provider: string;
}): boolean {
const key = this.getKey({ model, provider });
const timestamp = this.modelErrorToTimestamp[key];
if (!timestamp) {
return true;
}
const oneHourAgo = Date.now() - 1000 * 60 * 60;
return timestamp < oneHourAgo;
}
private getKey({ model, provider }: { model: string; provider: string }) {
return `${provider}::${model}`;
}
}
export const llmErrorStore = new LlmErrorStore();