Selected component engine (#1562)

<!-- This is an auto-generated description by cubic. -->

## Summary by cubic
Enable the Dyad Engine to prioritize a user-selected component by
flagging the file as focused and sending codebase files per request.
Keeps full context when the engine is on; falls back to path-scoped
context when it’s off.

- **New Features**
- Mark the selected component file as focused when the engine is
enabled.
- Send codebase files to the engine via dyadFiles, applied to
dyad_options unless disabled.
- Maintain full chatContext with engine; restrict to the selected file
path only when engine is off.

- **Refactors**
- Removed files from getModelClient and provider APIs; file transport
moved into request payload.
- Stream handlers now pass files to model calls and include
dyadDisableFiles/dyadFiles in the request.
  - Added focused flag to CodebaseFile.

<!-- End of auto-generated description by cubic. -->

<!-- CURSOR_SUMMARY -->
---

> [!NOTE]
> Sends codebase files per request to the Dyad Engine, focuses the
selected component when smart context is enabled, and refactors model
client/provider APIs to remove file parameters.
> 
> - **Engine integration**:
> - Send codebase files per request via `dyadFiles` in provider options;
propagate through `simpleStreamText` and `dyad-engine` options.
> - Add `isSmartContextEnabled` from `get_model_client` and gate context
behavior accordingly.
> - **Selected component focus**:
> - When smart context is on and a component is selected, mark its file
as `focused` in `CodebaseFile` and avoid broad smart context includes;
allow on-demand reads.
> - When smart context is off, restrict `chatContext` to the selected
file path.
> - **Refactors**:
> - Remove `files` parameter from `getModelClient` and Dyad provider;
move file transport into request body.
> - Update `llm_engine_provider` to read `dyadFiles` from request and
populate `dyad_options.files` unless `dyadDisableFiles`.
> - Extend `CodebaseFile` with optional `focused` flag; thread `files`
through `chat_stream_handlers` calls.
> 
> <sup>Written by [Cursor
Bugbot](https://cursor.com/dashboard?tab=bugbot) for commit
022b26d0197ab5b5d4f5b589f45bc230de36e0e5. This will update automatically
on new commits. Configure
[here](https://cursor.com/dashboard?tab=bugbot).</sup>
<!-- /CURSOR_SUMMARY -->
This commit is contained in:
Will Chen
2025-10-16 17:19:30 -07:00
committed by GitHub
parent eae22bed90
commit d571d303eb
4 changed files with 60 additions and 48 deletions

View File

@@ -25,7 +25,11 @@ import {
import { getDyadAppPath } from "../../paths/paths"; import { getDyadAppPath } from "../../paths/paths";
import { readSettings } from "../../main/settings"; import { readSettings } from "../../main/settings";
import type { ChatResponseEnd, ChatStreamParams } from "../ipc_types"; import type { ChatResponseEnd, ChatStreamParams } from "../ipc_types";
import { extractCodebase, readFileWithCache } from "../../utils/codebase"; import {
CodebaseFile,
extractCodebase,
readFileWithCache,
} from "../../utils/codebase";
import { processFullResponseActions } from "../processors/response_processor"; import { processFullResponseActions } from "../processors/response_processor";
import { streamTestResponse } from "./testing_chat_handlers"; import { streamTestResponse } from "./testing_chat_handlers";
import { getTestResponse } from "./testing_chat_handlers"; import { getTestResponse } from "./testing_chat_handlers";
@@ -437,21 +441,26 @@ ${componentSnippet}
); );
} else { } else {
// Normal AI processing for non-test prompts // Normal AI processing for non-test prompts
const { modelClient, isEngineEnabled, isSmartContextEnabled } =
await getModelClient(settings.selectedModel, settings);
const appPath = getDyadAppPath(updatedChat.app.path); const appPath = getDyadAppPath(updatedChat.app.path);
const chatContext = req.selectedComponent // When we don't have smart context enabled, we
? { // only include the selected component's file for codebase context.
contextPaths: [ //
{ // If we have selected component and smart context is enabled,
globPath: req.selectedComponent.relativePath, // we handle this specially below.
}, const chatContext =
], req.selectedComponent && !isSmartContextEnabled
smartContextAutoIncludes: [], ? {
} contextPaths: [
: validateChatContext(updatedChat.app.chatContext); {
globPath: req.selectedComponent.relativePath,
// Parse app mentions from the prompt },
const mentionedAppNames = parseAppMentions(req.prompt); ],
smartContextAutoIncludes: [],
}
: validateChatContext(updatedChat.app.chatContext);
// Extract codebase for current app // Extract codebase for current app
const { formattedOutput: codebaseInfo, files } = await extractCodebase({ const { formattedOutput: codebaseInfo, files } = await extractCodebase({
@@ -459,6 +468,20 @@ ${componentSnippet}
chatContext, chatContext,
}); });
// For smart context and selected component, we will mark the selected component's file as focused.
// This means that we don't do the regular smart context handling, but we'll allow fetching
// additional files through <dyad-read> as needed.
if (isSmartContextEnabled && req.selectedComponent) {
for (const file of files) {
if (file.path === req.selectedComponent.relativePath) {
file.focused = true;
}
}
}
// Parse app mentions from the prompt
const mentionedAppNames = parseAppMentions(req.prompt);
// Extract codebases for mentioned apps // Extract codebases for mentioned apps
const mentionedAppsCodebases = await extractMentionedAppsCodebases( const mentionedAppsCodebases = await extractMentionedAppsCodebases(
mentionedAppNames, mentionedAppNames,
@@ -489,11 +512,6 @@ ${componentSnippet}
"estimated tokens", "estimated tokens",
codebaseInfo.length / 4, codebaseInfo.length / 4,
); );
const { modelClient, isEngineEnabled } = await getModelClient(
settings.selectedModel,
settings,
files,
);
// Prepare message history for the AI // Prepare message history for the AI
const messageHistory = updatedChat.messages.map((message) => ({ const messageHistory = updatedChat.messages.map((message) => ({
@@ -709,9 +727,11 @@ This conversation includes one or more image attachments. When the user uploads
tools, tools,
systemPromptOverride = systemPrompt, systemPromptOverride = systemPrompt,
dyadDisableFiles = false, dyadDisableFiles = false,
files,
}: { }: {
chatMessages: ModelMessage[]; chatMessages: ModelMessage[];
modelClient: ModelClient; modelClient: ModelClient;
files: CodebaseFile[];
tools?: ToolSet; tools?: ToolSet;
systemPromptOverride?: string; systemPromptOverride?: string;
dyadDisableFiles?: boolean; dyadDisableFiles?: boolean;
@@ -729,6 +749,7 @@ This conversation includes one or more image attachments. When the user uploads
"dyad-engine": { "dyad-engine": {
dyadRequestId, dyadRequestId,
dyadDisableFiles, dyadDisableFiles,
dyadFiles: files,
dyadMentionedApps: mentionedAppsCodebases.map( dyadMentionedApps: mentionedAppsCodebases.map(
({ files, appName }) => ({ ({ files, appName }) => ({
appName, appName,
@@ -878,6 +899,7 @@ This conversation includes one or more image attachments. When the user uploads
aiRules: await readAiRules(getDyadAppPath(updatedChat.app.path)), aiRules: await readAiRules(getDyadAppPath(updatedChat.app.path)),
chatMode: "agent", chatMode: "agent",
}), }),
files: files,
dyadDisableFiles: true, dyadDisableFiles: true,
}); });
@@ -903,6 +925,7 @@ This conversation includes one or more image attachments. When the user uploads
const { fullStream } = await simpleStreamText({ const { fullStream } = await simpleStreamText({
chatMessages, chatMessages,
modelClient, modelClient,
files: files,
}); });
// Process the stream as before // Process the stream as before
@@ -939,6 +962,7 @@ This conversation includes one or more image attachments. When the user uploads
{ role: "assistant", content: fullResponse }, { role: "assistant", content: fullResponse },
], ],
modelClient, modelClient,
files: files,
}); });
for await (const part of contStream) { for await (const part of contStream) {
// If the stream was aborted, exit early // If the stream was aborted, exit early
@@ -1020,11 +1044,11 @@ ${problemReport.problems
const { modelClient } = await getModelClient( const { modelClient } = await getModelClient(
settings.selectedModel, settings.selectedModel,
settings, settings,
files,
); );
const { fullStream } = await simpleStreamText({ const { fullStream } = await simpleStreamText({
modelClient, modelClient,
files: files,
chatMessages: [ chatMessages: [
...chatMessages.map((msg, index) => { ...chatMessages.map((msg, index) => {
if ( if (

View File

@@ -52,19 +52,15 @@ export interface ModelClient {
builtinProviderId?: string; builtinProviderId?: string;
} }
interface File {
path: string;
content: string;
}
const logger = log.scope("getModelClient"); const logger = log.scope("getModelClient");
export async function getModelClient( export async function getModelClient(
model: LargeLanguageModel, model: LargeLanguageModel,
settings: UserSettings, settings: UserSettings,
files?: File[], // files?: File[],
): Promise<{ ): Promise<{
modelClient: ModelClient; modelClient: ModelClient;
isEngineEnabled?: boolean; isEngineEnabled?: boolean;
isSmartContextEnabled?: boolean;
}> { }> {
const allProviders = await getLanguageModelProviders(); const allProviders = await getLanguageModelProviders();
@@ -84,6 +80,7 @@ export async function getModelClient(
// IMPORTANT: some providers like OpenAI have an empty string gateway prefix, // IMPORTANT: some providers like OpenAI have an empty string gateway prefix,
// so we do a nullish and not a truthy check here. // so we do a nullish and not a truthy check here.
if (providerConfig.gatewayPrefix != null || dyadEngineUrl) { if (providerConfig.gatewayPrefix != null || dyadEngineUrl) {
const enableSmartFilesContext = settings.enableProSmartFilesContextMode;
const provider = createDyadEngine({ const provider = createDyadEngine({
apiKey: dyadApiKey, apiKey: dyadApiKey,
baseURL: dyadEngineUrl ?? "https://engine.dyad.sh/v1", baseURL: dyadEngineUrl ?? "https://engine.dyad.sh/v1",
@@ -93,7 +90,7 @@ export async function getModelClient(
settings.selectedChatMode === "ask" settings.selectedChatMode === "ask"
? false ? false
: settings.enableProLazyEditsMode, : settings.enableProLazyEditsMode,
enableSmartFilesContext: settings.enableProSmartFilesContextMode, enableSmartFilesContext,
// Keep in sync with getCurrentValue in ProModeSelector.tsx // Keep in sync with getCurrentValue in ProModeSelector.tsx
smartContextMode: settings.proSmartContextOption ?? "balanced", smartContextMode: settings.proSmartContextOption ?? "balanced",
enableWebSearch: settings.enableProWebSearch, enableWebSearch: settings.enableProWebSearch,
@@ -112,15 +109,14 @@ export async function getModelClient(
// Do not use free variant (for openrouter). // Do not use free variant (for openrouter).
const modelName = model.name.split(":free")[0]; const modelName = model.name.split(":free")[0];
const autoModelClient = { const autoModelClient = {
model: provider(`${providerConfig.gatewayPrefix || ""}${modelName}`, { model: provider(`${providerConfig.gatewayPrefix || ""}${modelName}`),
files,
}),
builtinProviderId: model.provider, builtinProviderId: model.provider,
}; };
return { return {
modelClient: autoModelClient, modelClient: autoModelClient,
isEngineEnabled: true, isEngineEnabled: true,
isSmartContextEnabled: enableSmartFilesContext,
}; };
} else { } else {
logger.warn( logger.warn(
@@ -176,7 +172,6 @@ export async function getModelClient(
name: autoModel.name, name: autoModel.name,
}, },
settings, settings,
files,
); );
} }
} }

View File

@@ -13,10 +13,7 @@ import { LanguageModelV2 } from "@ai-sdk/provider";
const logger = log.scope("llm_engine_provider"); const logger = log.scope("llm_engine_provider");
export type ExampleChatModelId = string & {}; export type ExampleChatModelId = string & {};
export interface ExampleChatSettings {}
export interface ExampleChatSettings {
files?: { path: string; content: string }[];
}
export interface ExampleProviderSettings { export interface ExampleProviderSettings {
/** /**
Example API key. Example API key.
@@ -106,13 +103,7 @@ export function createDyadEngine(
fetch: options.fetch, fetch: options.fetch,
}); });
const createChatModel = ( const createChatModel = (modelId: ExampleChatModelId) => {
modelId: ExampleChatModelId,
settings: ExampleChatSettings = {},
) => {
// Extract files from settings to process them appropriately
const { files } = settings;
// Create configuration with file handling // Create configuration with file handling
const config = { const config = {
...getCommonModelConfig(), ...getCommonModelConfig(),
@@ -134,6 +125,10 @@ export function createDyadEngine(
options.settings, options.settings,
), ),
}; };
const dyadFiles = parsedBody.dyadFiles;
if ("dyadFiles" in parsedBody) {
delete parsedBody.dyadFiles;
}
const requestId = parsedBody.dyadRequestId; const requestId = parsedBody.dyadRequestId;
if ("dyadRequestId" in parsedBody) { if ("dyadRequestId" in parsedBody) {
delete parsedBody.dyadRequestId; delete parsedBody.dyadRequestId;
@@ -156,9 +151,9 @@ export function createDyadEngine(
} }
// Add files to the request if they exist // Add files to the request if they exist
if (files?.length && !dyadDisableFiles) { if (dyadFiles?.length && !dyadDisableFiles) {
parsedBody.dyad_options = { parsedBody.dyad_options = {
files, files: dyadFiles,
enable_lazy_edits: options.dyadOptions.enableLazyEdits, enable_lazy_edits: options.dyadOptions.enableLazyEdits,
enable_smart_files_context: enable_smart_files_context:
options.dyadOptions.enableSmartFilesContext, options.dyadOptions.enableSmartFilesContext,
@@ -195,10 +190,7 @@ export function createDyadEngine(
return new OpenAICompatibleChatLanguageModel(modelId, config); return new OpenAICompatibleChatLanguageModel(modelId, config);
}; };
const provider = ( const provider = (modelId: ExampleChatModelId) => createChatModel(modelId);
modelId: ExampleChatModelId,
settings?: ExampleChatSettings,
) => createChatModel(modelId, settings);
provider.chatModel = createChatModel; provider.chatModel = createChatModel;

View File

@@ -406,6 +406,7 @@ ${content}
export type CodebaseFile = { export type CodebaseFile = {
path: string; path: string;
content: string; content: string;
focused?: boolean;
force?: boolean; force?: boolean;
}; };