Quick fix for Google models (#160)
This commit is contained in:
@@ -55,10 +55,9 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
||||
google: [
|
||||
// https://ai.google.dev/gemini-api/docs/models#gemini-2.5-pro-preview-03-25
|
||||
{
|
||||
name: "gemini-2.5-pro-exp-03-25",
|
||||
name: "gemini-2.5-pro-preview-05-06",
|
||||
displayName: "Gemini 2.5 Pro",
|
||||
description: "Experimental version of Google's Gemini 2.5 Pro model",
|
||||
tag: "Recommended",
|
||||
description: "Preview version of Google's Gemini 2.5 Pro model",
|
||||
// See Flash 2.5 comment below (go 1 below just to be safe, even though it seems OK now).
|
||||
maxOutputTokens: 65_536 - 1,
|
||||
// Gemini context window = input token + output token
|
||||
@@ -68,7 +67,8 @@ export const MODEL_OPTIONS: Record<string, ModelOption[]> = {
|
||||
{
|
||||
name: "gemini-2.5-flash-preview-04-17",
|
||||
displayName: "Gemini 2.5 Flash",
|
||||
description: "Preview version of Google's Gemini 2.5 Flash model",
|
||||
description:
|
||||
"Preview version of Google's Gemini 2.5 Flash model (free tier available)",
|
||||
// Weirdly for Vertex AI, the output token limit is *exclusive* of the stated limit.
|
||||
maxOutputTokens: 65_536 - 1,
|
||||
// Gemini context window = input token + output token
|
||||
|
||||
@@ -15,7 +15,7 @@ import { llmErrorStore } from "@/main/llm_error_store";
|
||||
const AUTO_MODELS = [
|
||||
{
|
||||
provider: "google",
|
||||
name: "gemini-2.5-pro-exp-03-25",
|
||||
name: "gemini-2.5-flash-preview-04-17",
|
||||
},
|
||||
{
|
||||
provider: "anthropic",
|
||||
|
||||
Reference in New Issue
Block a user