Summarize into new chat suggested action (#34)
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
import { ipcMain } from "electron";
|
||||
import { streamText } from "ai";
|
||||
import { CoreMessage, streamText } from "ai";
|
||||
import { db } from "../../db";
|
||||
import { chats, messages } from "../../db/schema";
|
||||
import { and, eq, isNull } from "drizzle-orm";
|
||||
@@ -21,6 +21,7 @@ import {
|
||||
getSupabaseContext,
|
||||
getSupabaseClientCode,
|
||||
} from "../../supabase_admin/supabase_context";
|
||||
import { SUMMARIZE_CHAT_SYSTEM_PROMPT } from "../../prompts/summarize_chat_system_prompt";
|
||||
|
||||
const logger = log.scope("chat_stream_handlers");
|
||||
|
||||
@@ -165,22 +166,47 @@ export function registerChatStreamHandlers() {
|
||||
} else {
|
||||
systemPrompt += "\n\n" + SUPABASE_NOT_AVAILABLE_SYSTEM_PROMPT;
|
||||
}
|
||||
const isSummarizeIntent = req.prompt.startsWith(
|
||||
"Summarize from chat-id="
|
||||
);
|
||||
if (isSummarizeIntent) {
|
||||
systemPrompt = SUMMARIZE_CHAT_SYSTEM_PROMPT;
|
||||
}
|
||||
let chatMessages = [
|
||||
{
|
||||
role: "user",
|
||||
content: "This is my codebase. " + codebaseInfo,
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: "OK, got it. I'm ready to help",
|
||||
},
|
||||
...messageHistory,
|
||||
] satisfies CoreMessage[];
|
||||
if (isSummarizeIntent) {
|
||||
const previousChat = await db.query.chats.findFirst({
|
||||
where: eq(chats.id, parseInt(req.prompt.split("=")[1])),
|
||||
with: {
|
||||
messages: {
|
||||
orderBy: (messages, { asc }) => [asc(messages.createdAt)],
|
||||
},
|
||||
},
|
||||
});
|
||||
chatMessages = [
|
||||
{
|
||||
role: "user",
|
||||
content:
|
||||
"Summarize the following chat: " +
|
||||
formatMessages(previousChat?.messages ?? []),
|
||||
} satisfies CoreMessage,
|
||||
];
|
||||
}
|
||||
const { textStream } = streamText({
|
||||
maxTokens: getMaxTokens(settings.selectedModel),
|
||||
temperature: 0,
|
||||
model: modelClient,
|
||||
system: systemPrompt,
|
||||
messages: [
|
||||
{
|
||||
role: "user",
|
||||
content: "This is my codebase. " + codebaseInfo,
|
||||
},
|
||||
{
|
||||
role: "assistant",
|
||||
content: "OK, got it. I'm ready to help",
|
||||
},
|
||||
...messageHistory,
|
||||
],
|
||||
messages: chatMessages,
|
||||
onError: (error) => {
|
||||
logger.error("Error streaming text:", error);
|
||||
const message =
|
||||
@@ -362,3 +388,11 @@ export function registerChatStreamHandlers() {
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
export function formatMessages(
|
||||
messages: { role: string; content: string | undefined }[]
|
||||
) {
|
||||
return messages
|
||||
.map((m) => `<message role="${m.role}">${m.content}</message>`)
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
@@ -4,9 +4,10 @@ import type {
|
||||
FileChange,
|
||||
ProposalResult,
|
||||
SqlQuery,
|
||||
ActionProposal,
|
||||
} from "../../lib/schemas";
|
||||
import { db } from "../../db";
|
||||
import { messages } from "../../db/schema";
|
||||
import { messages, chats } from "../../db/schema";
|
||||
import { desc, eq, and, Update } from "drizzle-orm";
|
||||
import path from "node:path"; // Import path for basename
|
||||
// Import tag parsers
|
||||
@@ -21,6 +22,7 @@ import {
|
||||
} from "../processors/response_processor";
|
||||
import log from "electron-log";
|
||||
import { isServerFunction } from "../../supabase_admin/supabase_utils";
|
||||
import { estimateMessagesTokens, getContextWindow } from "../utils/token_utils";
|
||||
|
||||
const logger = log.scope("proposal_handlers");
|
||||
|
||||
@@ -60,10 +62,45 @@ const getProposalHandler = async (
|
||||
},
|
||||
});
|
||||
|
||||
if (latestAssistantMessage?.approvalState === "rejected") {
|
||||
return null;
|
||||
}
|
||||
if (latestAssistantMessage?.approvalState === "approved") {
|
||||
if (
|
||||
latestAssistantMessage?.approvalState === "rejected" ||
|
||||
latestAssistantMessage?.approvalState === "approved"
|
||||
) {
|
||||
// Get all chat messages to calculate token usage
|
||||
const chat = await db.query.chats.findFirst({
|
||||
where: eq(chats.id, chatId),
|
||||
with: {
|
||||
messages: {
|
||||
orderBy: (messages, { asc }) => [asc(messages.createdAt)],
|
||||
},
|
||||
},
|
||||
});
|
||||
|
||||
if (chat) {
|
||||
// Calculate total tokens from message history
|
||||
const totalTokens = estimateMessagesTokens(chat.messages);
|
||||
const contextWindow = Math.min(getContextWindow(), 100_000);
|
||||
logger.log(
|
||||
`Token usage: ${totalTokens}/${contextWindow} (${
|
||||
(totalTokens / contextWindow) * 100
|
||||
}%)`
|
||||
);
|
||||
|
||||
// If we're using more than 80% of the context window, suggest summarizing
|
||||
if (totalTokens > contextWindow * 0.8) {
|
||||
logger.log(
|
||||
`Token usage high (${totalTokens}/${contextWindow}), suggesting summarize action`
|
||||
);
|
||||
return {
|
||||
proposal: {
|
||||
type: "action-proposal",
|
||||
actions: [{ id: "summarize-in-new-chat" }],
|
||||
},
|
||||
chatId,
|
||||
messageId: latestAssistantMessage.id,
|
||||
};
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -131,7 +168,12 @@ const getProposalHandler = async (
|
||||
"packages=",
|
||||
proposal.packagesAdded.length
|
||||
);
|
||||
return { proposal, chatId, messageId }; // Return proposal and messageId
|
||||
|
||||
return {
|
||||
proposal: proposal,
|
||||
chatId,
|
||||
messageId,
|
||||
};
|
||||
} else {
|
||||
logger.log(
|
||||
"No relevant tags found in the latest assistant message content."
|
||||
@@ -228,7 +270,7 @@ const rejectProposalHandler = async (
|
||||
eq(messages.chatId, chatId),
|
||||
eq(messages.role, "assistant")
|
||||
),
|
||||
columns: { id: true }, // Only need to confirm existence
|
||||
columns: { id: true },
|
||||
});
|
||||
|
||||
if (!messageToReject) {
|
||||
|
||||
@@ -15,14 +15,10 @@ import { readSettings } from "../../main/settings";
|
||||
import { MODEL_OPTIONS } from "../../constants/models";
|
||||
import { TokenCountParams } from "../ipc_types";
|
||||
import { TokenCountResult } from "../ipc_types";
|
||||
import { estimateTokens, getContextWindow } from "../utils/token_utils";
|
||||
|
||||
const logger = log.scope("token_count_handlers");
|
||||
|
||||
// Estimate tokens (4 characters per token)
|
||||
const estimateTokens = (text: string): number => {
|
||||
return Math.ceil(text.length / 4);
|
||||
};
|
||||
|
||||
export function registerTokenCountHandlers() {
|
||||
ipcMain.handle(
|
||||
"chat:count-tokens",
|
||||
@@ -108,20 +104,3 @@ export function registerTokenCountHandlers() {
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
const DEFAULT_CONTEXT_WINDOW = 128_000;
|
||||
|
||||
function getContextWindow() {
|
||||
const settings = readSettings();
|
||||
const model = settings.selectedModel;
|
||||
if (!MODEL_OPTIONS[model.provider as keyof typeof MODEL_OPTIONS]) {
|
||||
logger.warn(
|
||||
`Model provider ${model.provider} not found in MODEL_OPTIONS. Using default max tokens.`
|
||||
);
|
||||
return DEFAULT_CONTEXT_WINDOW;
|
||||
}
|
||||
const modelOption = MODEL_OPTIONS[
|
||||
model.provider as keyof typeof MODEL_OPTIONS
|
||||
].find((m) => m.name === model.name);
|
||||
return modelOption?.contextWindow || DEFAULT_CONTEXT_WINDOW;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user