From d545babb6386d22913fc38e1a7f19bf8d6dc3a88 Mon Sep 17 00:00:00 2001 From: Will Chen Date: Tue, 13 May 2025 22:21:27 -0700 Subject: [PATCH] Quick fix for Google models (#160) --- src/ipc/shared/language_model_helpers.ts | 8 ++++---- src/ipc/utils/get_model_client.ts | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/ipc/shared/language_model_helpers.ts b/src/ipc/shared/language_model_helpers.ts index dd12988..66a0310 100644 --- a/src/ipc/shared/language_model_helpers.ts +++ b/src/ipc/shared/language_model_helpers.ts @@ -55,10 +55,9 @@ export const MODEL_OPTIONS: Record = { google: [ // https://ai.google.dev/gemini-api/docs/models#gemini-2.5-pro-preview-03-25 { - name: "gemini-2.5-pro-exp-03-25", + name: "gemini-2.5-pro-preview-05-06", displayName: "Gemini 2.5 Pro", - description: "Experimental version of Google's Gemini 2.5 Pro model", - tag: "Recommended", + description: "Preview version of Google's Gemini 2.5 Pro model", // See Flash 2.5 comment below (go 1 below just to be safe, even though it seems OK now). maxOutputTokens: 65_536 - 1, // Gemini context window = input token + output token @@ -68,7 +67,8 @@ export const MODEL_OPTIONS: Record = { { name: "gemini-2.5-flash-preview-04-17", displayName: "Gemini 2.5 Flash", - description: "Preview version of Google's Gemini 2.5 Flash model", + description: + "Preview version of Google's Gemini 2.5 Flash model (free tier available)", // Weirdly for Vertex AI, the output token limit is *exclusive* of the stated limit. maxOutputTokens: 65_536 - 1, // Gemini context window = input token + output token diff --git a/src/ipc/utils/get_model_client.ts b/src/ipc/utils/get_model_client.ts index 20fe6a4..b8aa146 100644 --- a/src/ipc/utils/get_model_client.ts +++ b/src/ipc/utils/get_model_client.ts @@ -15,7 +15,7 @@ import { llmErrorStore } from "@/main/llm_error_store"; const AUTO_MODELS = [ { provider: "google", - name: "gemini-2.5-pro-exp-03-25", + name: "gemini-2.5-flash-preview-04-17", }, { provider: "anthropic",