Simplify provider logic and migrate getContextWindow (#142)
This commit is contained in:
@@ -15,7 +15,7 @@ import { extractCodebase } from "../../utils/codebase";
|
||||
import { processFullResponseActions } from "../processors/response_processor";
|
||||
import { streamTestResponse } from "./testing_chat_handlers";
|
||||
import { getTestResponse } from "./testing_chat_handlers";
|
||||
import { getMaxTokens, getModelClient } from "../utils/get_model_client";
|
||||
import { getModelClient } from "../utils/get_model_client";
|
||||
import log from "electron-log";
|
||||
import {
|
||||
getSupabaseContext,
|
||||
@@ -27,6 +27,7 @@ import * as path from "path";
|
||||
import * as os from "os";
|
||||
import * as crypto from "crypto";
|
||||
import { readFile, writeFile, unlink } from "fs/promises";
|
||||
import { getMaxTokens } from "../utils/token_utils";
|
||||
|
||||
const logger = log.scope("chat_stream_handlers");
|
||||
|
||||
@@ -332,7 +333,7 @@ This conversation includes one or more image attachments. When the user uploads
|
||||
|
||||
// When calling streamText, the messages need to be properly formatted for mixed content
|
||||
const { textStream } = streamText({
|
||||
maxTokens: getMaxTokens(settings.selectedModel),
|
||||
maxTokens: await getMaxTokens(settings.selectedModel),
|
||||
temperature: 0,
|
||||
model: modelClient,
|
||||
system: systemPrompt,
|
||||
|
||||
@@ -7,6 +7,7 @@ import type {
|
||||
import { createLoggedHandler } from "./safe_handle";
|
||||
import log from "electron-log";
|
||||
import {
|
||||
CUSTOM_PROVIDER_PREFIX,
|
||||
getLanguageModelProviders,
|
||||
getLanguageModels,
|
||||
getLanguageModelsByProviders,
|
||||
@@ -66,7 +67,7 @@ export function registerLanguageModelHandlers() {
|
||||
// Insert the new provider
|
||||
await db.insert(languageModelProvidersSchema).values({
|
||||
// Make sure we will never have accidental collisions with builtin providers
|
||||
id: "custom::" + id,
|
||||
id: CUSTOM_PROVIDER_PREFIX + id,
|
||||
name,
|
||||
api_base_url: apiBaseUrl,
|
||||
env_var_name: envVarName || null,
|
||||
@@ -297,11 +298,7 @@ export function registerLanguageModelHandlers() {
|
||||
if (provider.type === "local") {
|
||||
throw new Error("Local models cannot be fetched");
|
||||
}
|
||||
return getLanguageModels(
|
||||
provider.type === "cloud"
|
||||
? { builtinProviderId: params.providerId }
|
||||
: { customProviderId: params.providerId },
|
||||
);
|
||||
return getLanguageModels({ providerId: params.providerId });
|
||||
},
|
||||
);
|
||||
|
||||
|
||||
@@ -277,7 +277,7 @@ const getProposalHandler = async (
|
||||
);
|
||||
|
||||
const totalTokens = messagesTokenCount + codebaseTokenCount;
|
||||
const contextWindow = Math.min(getContextWindow(), 100_000);
|
||||
const contextWindow = Math.min(await getContextWindow(), 100_000);
|
||||
logger.log(
|
||||
`Token usage: ${totalTokens}/${contextWindow} (${
|
||||
(totalTokens / contextWindow) * 100
|
||||
|
||||
@@ -88,7 +88,7 @@ export function registerTokenCountHandlers() {
|
||||
codebaseTokens,
|
||||
inputTokens,
|
||||
systemPromptTokens,
|
||||
contextWindow: getContextWindow(),
|
||||
contextWindow: await getContextWindow(),
|
||||
};
|
||||
},
|
||||
);
|
||||
|
||||
Reference in New Issue
Block a user