Smart files context (#184)

This commit is contained in:
Will Chen
2025-05-16 22:21:45 -07:00
committed by GitHub
parent 2455c554ee
commit f9f33596bd
8 changed files with 224 additions and 31 deletions

View File

@@ -236,11 +236,8 @@ export function registerChatStreamHandlers() {
"estimated tokens",
codebaseInfo.length / 4,
);
const { modelClient, backupModelClients } = await getModelClient(
settings.selectedModel,
settings,
files,
);
const { modelClient, backupModelClients, isEngineEnabled } =
await getModelClient(settings.selectedModel, settings, files);
// Prepare message history for the AI
const messageHistory = updatedChat.messages.map((message) => ({
@@ -328,15 +325,22 @@ This conversation includes one or more image attachments. When the user uploads
`;
}
const codebasePrefix = isEngineEnabled
? // No codebase prefix if engine is set, we will take of it there.
[]
: ([
{
role: "user",
content: "This is my codebase. " + codebaseInfo,
},
{
role: "assistant",
content: "OK, got it. I'm ready to help",
},
] as const);
let chatMessages: CoreMessage[] = [
{
role: "user",
content: "This is my codebase. " + codebaseInfo,
},
{
role: "assistant",
content: "OK, got it. I'm ready to help",
},
...codebasePrefix,
...limitedMessageHistory.map((msg) => ({
role: msg.role as "user" | "assistant" | "system",
content: msg.content,

View File

@@ -50,6 +50,7 @@ export async function getModelClient(
): Promise<{
modelClient: ModelClient;
backupModelClients: ModelClient[];
isEngineEnabled?: boolean;
}> {
const allProviders = await getLanguageModelProviders();
@@ -103,9 +104,12 @@ export async function getModelClient(
// so we do a nullish and not a truthy check here.
if (providerConfig.gatewayPrefix != null || dyadLocalEngine) {
const languageModel = await findLanguageModel(model);
const engineProMode =
settings.enableProSmartFilesContextMode ||
settings.enableProLazyEditsMode;
// Currently engine is only used for turbo edits.
const isEngineEnabled = Boolean(
settings.enableProLazyEditsMode &&
engineProMode &&
languageModel?.type === "cloud" &&
languageModel?.supportsTurboEdits,
);
@@ -113,6 +117,10 @@ export async function getModelClient(
? createDyadEngine({
apiKey: dyadApiKey,
baseURL: dyadLocalEngine ?? "https://engine.dyad.sh/v1",
dyadOptions: {
enableLazyEdits: settings.enableProLazyEditsMode,
enableSmartFilesContext: settings.enableProSmartFilesContextMode,
},
})
: createOpenAICompatible({
name: "dyad-gateway",
@@ -126,7 +134,7 @@ export async function getModelClient(
const autoModelClient = {
model: provider(
`${providerConfig.gatewayPrefix || ""}${modelName}`,
settings.enableProLazyEditsMode
engineProMode
? {
files,
}
@@ -161,6 +169,7 @@ export async function getModelClient(
providerConfig,
).modelClient,
backupModelClients: [autoModelClient],
isEngineEnabled,
};
} else {
return {

View File

@@ -41,6 +41,11 @@ Custom fetch implementation. You can use it as a middleware to intercept request
or to provide a custom fetch implementation for e.g. testing.
*/
fetch?: FetchFunction;
dyadOptions: {
enableLazyEdits?: boolean;
enableSmartFilesContext?: boolean;
};
}
export interface DyadEngineProvider {
@@ -62,7 +67,7 @@ Creates a chat model for text generation.
}
export function createDyadEngine(
options: ExampleProviderSettings = {},
options: ExampleProviderSettings,
): DyadEngineProvider {
const baseURL = withoutTrailingSlash(
options.baseURL ?? "https://api.example.com/v1",
@@ -124,7 +129,9 @@ export function createDyadEngine(
if (files?.length) {
parsedBody.dyad_options = {
files,
enable_lazy_edits: true,
enable_lazy_edits: options.dyadOptions.enableLazyEdits,
enable_smart_files_context:
options.dyadOptions.enableSmartFilesContext,
};
}