diff --git a/src/components/ProModeSelector.tsx b/src/components/ProModeSelector.tsx
index 768ce3a..1a95a37 100644
--- a/src/components/ProModeSelector.tsx
+++ b/src/components/ProModeSelector.tsx
@@ -18,10 +18,6 @@ import { IpcClient } from "@/ipc/ipc_client";
export function ProModeSelector() {
const { settings, updateSettings } = useSettings();
- const toggleSaverMode = () => {
- updateSettings({ enableProSaverMode: !settings?.enableProSaverMode });
- };
-
const toggleLazyEdits = () => {
updateSettings({
enableProLazyEditsMode: !settings?.enableProLazyEditsMode,
@@ -76,16 +72,6 @@ export function ProModeSelector() {
)}
-
({
@@ -394,11 +396,11 @@ This conversation includes one or more image attachments. When the user uploads
}
// When calling streamText, the messages need to be properly formatted for mixed content
- const { textStream } = streamTextWithBackup({
+ const { textStream } = streamText({
maxTokens: await getMaxTokens(settings.selectedModel),
temperature: 0,
- model: modelClient,
- backupModelClients: backupModelClients,
+ maxRetries: 2,
+ model: modelClient.model,
system: systemPrompt,
messages: chatMessages.filter((m) => m.content),
onError: (error: any) => {
diff --git a/src/ipc/utils/get_model_client.ts b/src/ipc/utils/get_model_client.ts
index 693e3a0..04c99f6 100644
--- a/src/ipc/utils/get_model_client.ts
+++ b/src/ipc/utils/get_model_client.ts
@@ -10,7 +10,6 @@ import { getEnvVar } from "./read_env";
import log from "electron-log";
import { getLanguageModelProviders } from "../shared/language_model_helpers";
import { LanguageModelProvider } from "../ipc_types";
-import { llmErrorStore } from "@/main/llm_error_store";
import { createDyadEngine } from "./llm_engine_provider";
import { findLanguageModel } from "./findLanguageModel";
import { LM_STUDIO_BASE_URL } from "./lm_studio_utils";
@@ -50,7 +49,6 @@ export async function getModelClient(
files?: File[],
): Promise<{
modelClient: ModelClient;
- backupModelClients: ModelClient[];
isEngineEnabled?: boolean;
}> {
const allProviders = await getLanguageModelProviders();
@@ -143,42 +141,11 @@ export async function getModelClient(
),
builtinProviderId: "auto",
};
- const googleSettings = settings.providerSettings?.google;
- // Budget saver mode logic (all must be true):
- // 1. Pro Saver Mode is enabled
- // 2. Provider is Google
- // 3. API Key is set
- // 4. Has no recent errors
- if (
- settings.enableProSaverMode &&
- providerConfig.id === "google" &&
- googleSettings &&
- googleSettings.apiKey?.value &&
- llmErrorStore.modelHasNoRecentError({
- model: model.name,
- provider: providerConfig.id,
- })
- ) {
- return {
- modelClient: getRegularModelClient(
- {
- provider: providerConfig.id,
- name: model.name,
- },
- settings,
- providerConfig,
- ).modelClient,
- backupModelClients: [autoModelClient],
- isEngineEnabled,
- };
- } else {
- return {
- modelClient: autoModelClient,
- backupModelClients: [],
- isEngineEnabled,
- };
- }
+ return {
+ modelClient: autoModelClient,
+ isEngineEnabled,
+ };
} else {
logger.warn(
`Dyad Pro enabled, but provider ${model.provider} does not have a gateway prefix defined. Falling back to direct provider connection.`,
diff --git a/src/ipc/utils/stream_utils.ts b/src/ipc/utils/stream_utils.ts
deleted file mode 100644
index 59480f7..0000000
--- a/src/ipc/utils/stream_utils.ts
+++ /dev/null
@@ -1,123 +0,0 @@
-import { streamText } from "ai";
-import log from "electron-log";
-import { ModelClient } from "./get_model_client";
-import { llmErrorStore } from "@/main/llm_error_store";
-const logger = log.scope("stream_utils");
-
-export interface StreamTextWithBackupParams
- extends Omit[0], "model"> {
- model: ModelClient; // primary client
- backupModelClients?: ModelClient[]; // ordered fall-backs
-}
-
-export function streamTextWithBackup(params: StreamTextWithBackupParams): {
- textStream: AsyncIterable;
-} {
- const {
- model: primaryModel,
- backupModelClients = [],
- onError: callerOnError,
- abortSignal: callerAbort,
- ...rest
- } = params;
-
- const modelClients: ModelClient[] = [primaryModel, ...backupModelClients];
-
- async function* combinedGenerator(): AsyncIterable {
- let lastErr: { error: unknown } | undefined = undefined;
-
- for (let i = 0; i < modelClients.length; i++) {
- const currentModelClient = modelClients[i];
-
- /* Local abort controller for this single attempt */
- const attemptAbort = new AbortController();
- if (callerAbort) {
- if (callerAbort.aborted) {
- // Already aborted, trigger immediately
- attemptAbort.abort();
- } else {
- callerAbort.addEventListener("abort", () => attemptAbort.abort(), {
- once: true,
- });
- }
- }
-
- let errorFromCurrent: { error: unknown } | undefined = undefined; // set when onError fires
- const providerId = currentModelClient.builtinProviderId;
- if (providerId) {
- llmErrorStore.clearModelError({
- model: currentModelClient.model.modelId,
- provider: providerId,
- });
- }
- logger.info(
- "Streaming text with model",
- currentModelClient.model.modelId,
- "provider",
- currentModelClient.model.provider,
- "builtinProviderId",
- currentModelClient.builtinProviderId,
- );
- const { textStream } = streamText({
- ...rest,
- maxRetries: i === modelClients.length - 1 ? 3 : 0,
- model: currentModelClient.model,
- abortSignal: attemptAbort.signal,
- onError: (error) => {
- const providerId = currentModelClient.builtinProviderId;
- if (providerId) {
- llmErrorStore.recordModelError({
- model: currentModelClient.model.modelId,
- provider: providerId,
- });
- }
- logger.error(
- `Error streaming text with ${providerId} and model ${currentModelClient.model.modelId}: ${error}`,
- error,
- );
- errorFromCurrent = error;
- attemptAbort.abort(); // kill fetch / SSE
- },
- });
-
- try {
- for await (const chunk of textStream) {
- /* If onError fired during streaming, bail out immediately. */
- if (errorFromCurrent) throw errorFromCurrent;
- yield chunk;
- }
-
- /* Stream ended – check if it actually failed */
- if (errorFromCurrent) throw errorFromCurrent;
-
- /* Completed successfully – stop trying more models. */
- return;
- } catch (err) {
- if (typeof err === "object" && err !== null && "error" in err) {
- lastErr = err as { error: unknown };
- } else {
- lastErr = { error: err };
- }
- logger.warn(
- `[streamTextWithBackup] model #${i} failed – ${
- i < modelClients.length - 1
- ? "switching to backup"
- : "no backups left"
- }`,
- err,
- );
- /* loop continues to next model (if any) */
- }
- }
-
- /* Every model failed */
- if (!lastErr) {
- throw new Error("Invariant in StreamTextWithbackup failed!");
- }
- callerOnError?.(lastErr);
- logger.error("All model invocations failed", lastErr);
- // throw lastErr ?? new Error("All model invocations failed");
- }
-
- return { textStream: combinedGenerator() };
-}
diff --git a/src/lib/schemas.ts b/src/lib/schemas.ts
index 6e478b0..904c3d5 100644
--- a/src/lib/schemas.ts
+++ b/src/lib/schemas.ts
@@ -139,7 +139,6 @@ export const UserSettingsSchema = z.object({
experiments: ExperimentsSchema.optional(),
lastShownReleaseNotesVersion: z.string().optional(),
maxChatTurnsInContext: z.number().optional(),
- enableProSaverMode: z.boolean().optional(),
enableProLazyEditsMode: z.boolean().optional(),
enableProSmartFilesContextMode: z.boolean().optional(),
selectedTemplateId: z.string().optional(),
@@ -154,6 +153,7 @@ export const UserSettingsSchema = z.object({
////////////////////////////////
// DEPRECATED.
////////////////////////////////
+ enableProSaverMode: z.boolean().optional(),
dyadProBudget: DyadProBudgetSchema.optional(),
runtimeMode: RuntimeModeSchema.optional(),
});
diff --git a/src/main/llm_error_store.ts b/src/main/llm_error_store.ts
deleted file mode 100644
index 31fd4e6..0000000
--- a/src/main/llm_error_store.ts
+++ /dev/null
@@ -1,35 +0,0 @@
-class LlmErrorStore {
- private modelErrorToTimestamp: Record = {};
-
- constructor() {}
-
- recordModelError({ model, provider }: { model: string; provider: string }) {
- this.modelErrorToTimestamp[this.getKey({ model, provider })] = Date.now();
- }
-
- clearModelError({ model, provider }: { model: string; provider: string }) {
- delete this.modelErrorToTimestamp[this.getKey({ model, provider })];
- }
-
- modelHasNoRecentError({
- model,
- provider,
- }: {
- model: string;
- provider: string;
- }): boolean {
- const key = this.getKey({ model, provider });
- const timestamp = this.modelErrorToTimestamp[key];
- if (!timestamp) {
- return true;
- }
- const oneHourAgo = Date.now() - 1000 * 60 * 60;
- return timestamp < oneHourAgo;
- }
-
- private getKey({ model, provider }: { model: string; provider: string }) {
- return `${provider}::${model}`;
- }
-}
-
-export const llmErrorStore = new LlmErrorStore();