Support concurrent chats (#1478)
Fixes #212 <!-- CURSOR_SUMMARY --> --- > [!NOTE] > Add concurrent chat support with per-chat state, chat activity UI, IPC per-chat handling, and accompanying tests. > > - **Frontend (Chat concurrency)** > - Replace global chat atoms with per-chat maps: `chatMessagesByIdAtom`, `isStreamingByIdAtom`, `chatErrorByIdAtom`, `chatStreamCountByIdAtom`, `recentStreamChatIdsAtom`. > - Update `ChatPanel`, `ChatInput`, `MessagesList`, `DyadMarkdownParser`, and `useVersions` to read/write per-chat state. > - Add `useSelectChat` to centralize selecting/navigating chats; wire into `ChatList`. > - **UI** > - Add chat activity popover: `ChatActivityButton` and list; integrate into `preview_panel/ActionHeader` (renamed from `PreviewHeader`) and swap in `TitleBar`. > - **IPC/Main** > - Send error payloads with `chatId` on `chat:response:error`; update `ipc_client` to route errors per chat. > - Persist streaming partial assistant content periodically; improve cancellation/end handling. > - Make `FileUploadsState` per-chat (`addFileUpload({chatId,fileId}, ...)`, `clear(chatId)`, `getFileUploadsForChat(chatId)`); update handlers/processors accordingly. > - **Testing** > - Add e2e `concurrent_chat.spec.ts` and snapshots; extend helpers (`snapshotMessages` timeout, chat activity helpers). > - Fake LLM server: support `tc=` with options, optional sleep delay to simulate concurrency. > > <sup>Written by [Cursor Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit 9035f30b73a1f2e5a366a0cac1c63411742b16f3. This will update automatically on new commits. Configure [here](https://cursor.com/dashboard?tab=bugbot).</sup> <!-- /CURSOR_SUMMARY -->
This commit is contained in:
@@ -206,7 +206,6 @@ export function registerChatStreamHandlers() {
|
||||
ipcMain.handle("chat:stream", async (event, req: ChatStreamParams) => {
|
||||
try {
|
||||
const fileUploadsState = FileUploadsState.getInstance();
|
||||
fileUploadsState.initialize({ chatId: req.chatId });
|
||||
|
||||
// Create an AbortController for this stream
|
||||
const abortController = new AbortController();
|
||||
@@ -288,10 +287,13 @@ export function registerChatStreamHandlers() {
|
||||
// For upload-to-codebase, create a unique file ID and store the mapping
|
||||
const fileId = `DYAD_ATTACHMENT_${index}`;
|
||||
|
||||
fileUploadsState.addFileUpload(fileId, {
|
||||
filePath,
|
||||
originalName: attachment.name,
|
||||
});
|
||||
fileUploadsState.addFileUpload(
|
||||
{ chatId: req.chatId, fileId },
|
||||
{
|
||||
filePath,
|
||||
originalName: attachment.name,
|
||||
},
|
||||
);
|
||||
|
||||
// Add instruction for AI to use dyad-write tag
|
||||
attachmentInfo += `\n\nFile to upload to codebase: ${attachment.name} (file id: ${fileId})\n`;
|
||||
@@ -793,10 +795,10 @@ This conversation includes one or more image attachments. When the user uploads
|
||||
const requestIdPrefix = isEngineEnabled
|
||||
? `[Request ID: ${dyadRequestId}] `
|
||||
: "";
|
||||
event.sender.send(
|
||||
"chat:response:error",
|
||||
`Sorry, there was an error from the AI: ${requestIdPrefix}${message}`,
|
||||
);
|
||||
event.sender.send("chat:response:error", {
|
||||
chatId: req.chatId,
|
||||
error: `Sorry, there was an error from the AI: ${requestIdPrefix}${message}`,
|
||||
});
|
||||
// Clean up the abort controller
|
||||
activeStreams.delete(req.chatId);
|
||||
},
|
||||
@@ -804,6 +806,8 @@ This conversation includes one or more image attachments. When the user uploads
|
||||
});
|
||||
};
|
||||
|
||||
let lastDbSaveAt = 0;
|
||||
|
||||
const processResponseChunkUpdate = async ({
|
||||
fullResponse,
|
||||
}: {
|
||||
@@ -823,6 +827,16 @@ This conversation includes one or more image attachments. When the user uploads
|
||||
}
|
||||
// Store the current partial response
|
||||
partialResponses.set(req.chatId, fullResponse);
|
||||
// Save to DB (in case user is switching chats during the stream)
|
||||
const now = Date.now();
|
||||
if (now - lastDbSaveAt >= 150) {
|
||||
await db
|
||||
.update(messages)
|
||||
.set({ content: fullResponse })
|
||||
.where(eq(messages.id, placeholderAssistantMessage.id));
|
||||
|
||||
lastDbSaveAt = now;
|
||||
}
|
||||
|
||||
// Update the placeholder assistant message content in the messages array
|
||||
const currentMessages = [...updatedChat.messages];
|
||||
@@ -1143,11 +1157,10 @@ ${problemReport.problems
|
||||
});
|
||||
|
||||
if (status.error) {
|
||||
safeSend(
|
||||
event.sender,
|
||||
"chat:response:error",
|
||||
`Sorry, there was an error applying the AI's changes: ${status.error}`,
|
||||
);
|
||||
safeSend(event.sender, "chat:response:error", {
|
||||
chatId: req.chatId,
|
||||
error: `Sorry, there was an error applying the AI's changes: ${status.error}`,
|
||||
});
|
||||
}
|
||||
|
||||
// Signal that the stream has completed
|
||||
@@ -1190,15 +1203,14 @@ ${problemReport.problems
|
||||
return req.chatId;
|
||||
} catch (error) {
|
||||
logger.error("Error calling LLM:", error);
|
||||
safeSend(
|
||||
event.sender,
|
||||
"chat:response:error",
|
||||
`Sorry, there was an error processing your request: ${error}`,
|
||||
);
|
||||
safeSend(event.sender, "chat:response:error", {
|
||||
chatId: req.chatId,
|
||||
error: `Sorry, there was an error processing your request: ${error}`,
|
||||
});
|
||||
// Clean up the abort controller
|
||||
activeStreams.delete(req.chatId);
|
||||
// Clean up file uploads state on error
|
||||
FileUploadsState.getInstance().clear();
|
||||
FileUploadsState.getInstance().clear(req.chatId);
|
||||
return "error";
|
||||
}
|
||||
});
|
||||
@@ -1222,6 +1234,11 @@ ${problemReport.problems
|
||||
updatedFiles: false,
|
||||
} satisfies ChatResponseEnd);
|
||||
|
||||
// Clean up uploads state for this chat
|
||||
try {
|
||||
FileUploadsState.getInstance().clear(chatId);
|
||||
} catch {}
|
||||
|
||||
return true;
|
||||
});
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user