diff --git a/src/ipc/handlers/chat_stream_handlers.ts b/src/ipc/handlers/chat_stream_handlers.ts
index 073dc7f..eb8fd31 100644
--- a/src/ipc/handlers/chat_stream_handlers.ts
+++ b/src/ipc/handlers/chat_stream_handlers.ts
@@ -215,6 +215,7 @@ export function registerChatStreamHandlers() {
abortController,
updatedChat,
);
+ fullResponse = cleanThinkingByEscapingDyadTags(fullResponse);
} else {
// Normal AI processing for non-test prompts
const settings = readSettings();
@@ -348,7 +349,10 @@ This conversation includes one or more image attachments. When the user uploads
...codebasePrefix,
...limitedMessageHistory.map((msg) => ({
role: msg.role as "user" | "assistant" | "system",
- content: msg.content,
+ // Why remove thinking tags?
+ // Thinking tags are generally not critical for the context
+ // and eats up extra tokens.
+ content: removeThinkingTags(msg.content),
})),
];
@@ -411,6 +415,7 @@ This conversation includes one or more image attachments. When the user uploads
try {
for await (const textPart of textStream) {
fullResponse += textPart;
+ fullResponse = cleanThinkingByEscapingDyadTags(fullResponse);
if (
fullResponse.includes("$$SUPABASE_CLIENT_CODE$$") &&
updatedChat.app?.supabaseProjectId
@@ -707,3 +712,27 @@ async function prepareMessageWithAttachments(
content: contentParts,
};
}
+
+function cleanThinkingByEscapingDyadTags(text: string): string {
+ // Extract content inside tags
+ const thinkRegex = /([\s\S]*?)<\/think>/g;
+
+ return text.replace(thinkRegex, (match, content) => {
+ // We are replacing the opening tag with a look-alike character
+ // to avoid issues where thinking content includes dyad tags
+ // and are mishandled by:
+ // 1. FE markdown parser
+ // 2. Main process response processor
+ const processedContent = content
+ .replace(/${processedContent}`;
+ });
+}
+
+function removeThinkingTags(text: string): string {
+ const thinkRegex = /([\s\S]*?)<\/think>/g;
+ return text.replace(thinkRegex, "").trim();
+}
diff --git a/testing/fake-llm-server/index.ts b/testing/fake-llm-server/index.ts
index b1fa7c6..efa3b2a 100644
--- a/testing/fake-llm-server/index.ts
+++ b/testing/fake-llm-server/index.ts
@@ -32,9 +32,28 @@ function createStreamChunk(
return `data: ${JSON.stringify(chunk)}\n\n${isLast ? "data: [DONE]\n\n" : ""}`;
}
+const CANNED_MESSAGE = `
+
+ \`\`:
+ I'll think about the problem and write a bug report.
+
+
+
+
+ Fake dyad write
+
+
+
+
+ A file (2)
+
+ More
+ EOM`;
+
// Handle POST requests to /v1/chat/completions
app.post("/v1/chat/completions", (req, res) => {
const { stream = false, messages = [] } = req.body;
+ console.log("* Received messages", messages);
// Check if the last message contains "[429]" to simulate rate limiting
const lastMessage = messages[messages.length - 1];
@@ -61,7 +80,7 @@ app.post("/v1/chat/completions", (req, res) => {
index: 0,
message: {
role: "assistant",
- content: "hello world",
+ content: CANNED_MESSAGE,
},
finish_reason: "stop",
},
@@ -75,7 +94,7 @@ app.post("/v1/chat/completions", (req, res) => {
res.setHeader("Connection", "keep-alive");
// Split the "hello world" message into characters to simulate streaming
- const message = "hello world";
+ const message = CANNED_MESSAGE;
const messageChars = message.split("");
// Stream each character with a delay
@@ -94,7 +113,7 @@ app.post("/v1/chat/completions", (req, res) => {
clearInterval(interval);
res.end();
}
- }, 100);
+ }, 10);
});
// Start the server