Remove budget saver mode (#378)

This code was quite complex and hairy and resulted in very opaque errors
(for both free and pro users). There's not much benefit to budget saver
because Google removed 2.5 Pro free quota a while ago (after it
graduated the model from experimental to preview). Dyad Pro users can
still use 2.5 Flash free quota by disabling Dyad Pro by clicking on the
Dyad Pro button at the top.
This commit is contained in:
Will Chen
2025-06-10 13:54:27 -07:00
committed by GitHub
parent 534cbad909
commit fa80014e16
6 changed files with 14 additions and 217 deletions

View File

@@ -1,5 +1,5 @@
import { ipcMain } from "electron";
import { CoreMessage, TextPart, ImagePart } from "ai";
import { CoreMessage, TextPart, ImagePart, streamText } from "ai";
import { db } from "../../db";
import { chats, messages } from "../../db/schema";
import { and, eq, isNull } from "drizzle-orm";
@@ -32,7 +32,6 @@ import * as crypto from "crypto";
import { readFile, writeFile, unlink } from "fs/promises";
import { getMaxTokens } from "../utils/token_utils";
import { MAX_CHAT_TURNS_IN_CONTEXT } from "@/constants/settings_constants";
import { streamTextWithBackup } from "../utils/stream_utils";
import { validateChatContext } from "../utils/context_paths_utils";
const logger = log.scope("chat_stream_handlers");
@@ -244,8 +243,11 @@ export function registerChatStreamHandlers() {
"estimated tokens",
codebaseInfo.length / 4,
);
const { modelClient, backupModelClients, isEngineEnabled } =
await getModelClient(settings.selectedModel, settings, files);
const { modelClient, isEngineEnabled } = await getModelClient(
settings.selectedModel,
settings,
files,
);
// Prepare message history for the AI
const messageHistory = updatedChat.messages.map((message) => ({
@@ -394,11 +396,11 @@ This conversation includes one or more image attachments. When the user uploads
}
// When calling streamText, the messages need to be properly formatted for mixed content
const { textStream } = streamTextWithBackup({
const { textStream } = streamText({
maxTokens: await getMaxTokens(settings.selectedModel),
temperature: 0,
model: modelClient,
backupModelClients: backupModelClients,
maxRetries: 2,
model: modelClient.model,
system: systemPrompt,
messages: chatMessages.filter((m) => m.content),
onError: (error: any) => {