Support turbo edits (pro) (#166)
This commit is contained in:
@@ -215,17 +215,16 @@ export function registerChatStreamHandlers() {
|
||||
} else {
|
||||
// Normal AI processing for non-test prompts
|
||||
const settings = readSettings();
|
||||
const { modelClient, backupModelClients } = await getModelClient(
|
||||
settings.selectedModel,
|
||||
settings,
|
||||
);
|
||||
|
||||
// Extract codebase information if app is associated with the chat
|
||||
let codebaseInfo = "";
|
||||
let files: { path: string; content: string }[] = [];
|
||||
if (updatedChat.app) {
|
||||
const appPath = getDyadAppPath(updatedChat.app.path);
|
||||
try {
|
||||
codebaseInfo = await extractCodebase(appPath);
|
||||
const out = await extractCodebase(appPath);
|
||||
codebaseInfo = out.formattedOutput;
|
||||
files = out.files;
|
||||
logger.log(`Extracted codebase information from ${appPath}`);
|
||||
} catch (error) {
|
||||
logger.error("Error extracting codebase:", error);
|
||||
@@ -237,6 +236,11 @@ export function registerChatStreamHandlers() {
|
||||
"estimated tokens",
|
||||
codebaseInfo.length / 4,
|
||||
);
|
||||
const { modelClient, backupModelClients } = await getModelClient(
|
||||
settings.selectedModel,
|
||||
settings,
|
||||
files,
|
||||
);
|
||||
|
||||
// Prepare message history for the AI
|
||||
const messageHistory = updatedChat.messages.map((message) => ({
|
||||
|
||||
@@ -140,7 +140,7 @@ export function registerDebugHandlers() {
|
||||
|
||||
// Extract codebase
|
||||
const appPath = getDyadAppPath(app.path);
|
||||
const codebase = await extractCodebase(appPath);
|
||||
const codebase = (await extractCodebase(appPath)).formattedOutput;
|
||||
|
||||
return {
|
||||
debugInfo,
|
||||
|
||||
@@ -92,7 +92,8 @@ async function getCodebaseTokenCount(
|
||||
|
||||
// Calculate and cache the token count
|
||||
logger.log(`Calculating codebase token count for chatId: ${chatId}`);
|
||||
const codebase = await extractCodebase(getDyadAppPath(appPath));
|
||||
const codebase = (await extractCodebase(getDyadAppPath(appPath)))
|
||||
.formattedOutput;
|
||||
const tokenCount = estimateTokens(codebase);
|
||||
|
||||
// Store in cache
|
||||
|
||||
@@ -68,7 +68,7 @@ export function registerTokenCountHandlers() {
|
||||
|
||||
if (chat.app) {
|
||||
const appPath = getDyadAppPath(chat.app.path);
|
||||
codebaseInfo = await extractCodebase(appPath);
|
||||
codebaseInfo = (await extractCodebase(appPath)).formattedOutput;
|
||||
codebaseTokens = estimateTokens(codebaseInfo);
|
||||
logger.log(
|
||||
`Extracted codebase information from ${appPath}, tokens: ${codebaseTokens}`,
|
||||
|
||||
@@ -11,6 +11,9 @@ import log from "electron-log";
|
||||
import { getLanguageModelProviders } from "../shared/language_model_helpers";
|
||||
import { LanguageModelProvider } from "../ipc_types";
|
||||
import { llmErrorStore } from "@/main/llm_error_store";
|
||||
import { createDyadEngine } from "./llm_engine_provider";
|
||||
|
||||
const dyadLocalEngine = process.env.DYAD_LOCAL_ENGINE;
|
||||
|
||||
const AUTO_MODELS = [
|
||||
{
|
||||
@@ -32,10 +35,16 @@ export interface ModelClient {
|
||||
builtinProviderId?: string;
|
||||
}
|
||||
|
||||
interface File {
|
||||
path: string;
|
||||
content: string;
|
||||
}
|
||||
|
||||
const logger = log.scope("getModelClient");
|
||||
export async function getModelClient(
|
||||
model: LargeLanguageModel,
|
||||
settings: UserSettings,
|
||||
files?: File[],
|
||||
): Promise<{
|
||||
modelClient: ModelClient;
|
||||
backupModelClients: ModelClient[];
|
||||
@@ -65,8 +74,9 @@ export async function getModelClient(
|
||||
{
|
||||
provider: autoModel.provider,
|
||||
name: autoModel.name,
|
||||
} as LargeLanguageModel,
|
||||
},
|
||||
settings,
|
||||
files,
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -85,17 +95,33 @@ export async function getModelClient(
|
||||
|
||||
// Handle Dyad Pro override
|
||||
if (dyadApiKey && settings.enableDyadPro) {
|
||||
// Check if the selected provider supports Dyad Pro (has a gateway prefix)
|
||||
if (providerConfig.gatewayPrefix) {
|
||||
const provider = createOpenAI({
|
||||
apiKey: dyadApiKey,
|
||||
baseURL: "https://llm-gateway.dyad.sh/v1",
|
||||
});
|
||||
logger.info("Using Dyad Pro API key via Gateway");
|
||||
// Check if the selected provider supports Dyad Pro (has a gateway prefix) OR
|
||||
// we're using local engine.
|
||||
if (providerConfig.gatewayPrefix || dyadLocalEngine) {
|
||||
const provider = settings.enableProLazyEditsMode
|
||||
? createDyadEngine({
|
||||
apiKey: dyadApiKey,
|
||||
baseURL: dyadLocalEngine ?? "https://engine.dyad.sh/v1",
|
||||
})
|
||||
: createOpenAI({
|
||||
apiKey: dyadApiKey,
|
||||
baseURL: "https://llm-gateway.dyad.sh/v1",
|
||||
});
|
||||
|
||||
logger.info(
|
||||
`Using Dyad Pro API key. engine_enabled=${settings.enableProLazyEditsMode}`,
|
||||
);
|
||||
// Do not use free variant (for openrouter).
|
||||
const modelName = model.name.split(":free")[0];
|
||||
const autoModelClient = {
|
||||
model: provider(`${providerConfig.gatewayPrefix}${modelName}`),
|
||||
model: provider(
|
||||
`${providerConfig.gatewayPrefix || ""}${modelName}`,
|
||||
settings.enableProLazyEditsMode
|
||||
? {
|
||||
files,
|
||||
}
|
||||
: undefined,
|
||||
),
|
||||
builtinProviderId: "auto",
|
||||
};
|
||||
const googleSettings = settings.providerSettings?.google;
|
||||
@@ -235,7 +261,7 @@ function getRegularModelClient(
|
||||
const provider = createOpenAICompatible({
|
||||
name: providerConfig.id,
|
||||
baseURL: providerConfig.apiBaseUrl,
|
||||
apiKey: apiKey,
|
||||
apiKey,
|
||||
});
|
||||
return {
|
||||
modelClient: {
|
||||
|
||||
159
src/ipc/utils/llm_engine_provider.ts
Normal file
159
src/ipc/utils/llm_engine_provider.ts
Normal file
@@ -0,0 +1,159 @@
|
||||
import {
|
||||
LanguageModelV1,
|
||||
LanguageModelV1ObjectGenerationMode,
|
||||
} from "@ai-sdk/provider";
|
||||
import { OpenAICompatibleChatLanguageModel } from "@ai-sdk/openai-compatible";
|
||||
import {
|
||||
FetchFunction,
|
||||
loadApiKey,
|
||||
withoutTrailingSlash,
|
||||
} from "@ai-sdk/provider-utils";
|
||||
|
||||
import { OpenAICompatibleChatSettings } from "@ai-sdk/openai-compatible";
|
||||
import log from "electron-log";
|
||||
|
||||
const logger = log.scope("llm_engine_provider");
|
||||
|
||||
export type ExampleChatModelId = string & {};
|
||||
|
||||
export interface ExampleChatSettings extends OpenAICompatibleChatSettings {
|
||||
files?: { path: string; content: string }[];
|
||||
}
|
||||
export interface ExampleProviderSettings {
|
||||
/**
|
||||
Example API key.
|
||||
*/
|
||||
apiKey?: string;
|
||||
/**
|
||||
Base URL for the API calls.
|
||||
*/
|
||||
baseURL?: string;
|
||||
/**
|
||||
Custom headers to include in the requests.
|
||||
*/
|
||||
headers?: Record<string, string>;
|
||||
/**
|
||||
Optional custom url query parameters to include in request urls.
|
||||
*/
|
||||
queryParams?: Record<string, string>;
|
||||
/**
|
||||
Custom fetch implementation. You can use it as a middleware to intercept requests,
|
||||
or to provide a custom fetch implementation for e.g. testing.
|
||||
*/
|
||||
fetch?: FetchFunction;
|
||||
}
|
||||
|
||||
export interface DyadEngineProvider {
|
||||
/**
|
||||
Creates a model for text generation.
|
||||
*/
|
||||
(
|
||||
modelId: ExampleChatModelId,
|
||||
settings?: ExampleChatSettings,
|
||||
): LanguageModelV1;
|
||||
|
||||
/**
|
||||
Creates a chat model for text generation.
|
||||
*/
|
||||
chatModel(
|
||||
modelId: ExampleChatModelId,
|
||||
settings?: ExampleChatSettings,
|
||||
): LanguageModelV1;
|
||||
}
|
||||
|
||||
export function createDyadEngine(
|
||||
options: ExampleProviderSettings = {},
|
||||
): DyadEngineProvider {
|
||||
const baseURL = withoutTrailingSlash(
|
||||
options.baseURL ?? "https://api.example.com/v1",
|
||||
);
|
||||
const getHeaders = () => ({
|
||||
Authorization: `Bearer ${loadApiKey({
|
||||
apiKey: options.apiKey,
|
||||
environmentVariableName: "DYAD_PRO_API_KEY",
|
||||
description: "Example API key",
|
||||
})}`,
|
||||
...options.headers,
|
||||
});
|
||||
|
||||
interface CommonModelConfig {
|
||||
provider: string;
|
||||
url: ({ path }: { path: string }) => string;
|
||||
headers: () => Record<string, string>;
|
||||
fetch?: FetchFunction;
|
||||
}
|
||||
|
||||
const getCommonModelConfig = (modelType: string): CommonModelConfig => ({
|
||||
provider: `example.${modelType}`,
|
||||
url: ({ path }) => {
|
||||
const url = new URL(`${baseURL}${path}`);
|
||||
if (options.queryParams) {
|
||||
url.search = new URLSearchParams(options.queryParams).toString();
|
||||
}
|
||||
return url.toString();
|
||||
},
|
||||
headers: getHeaders,
|
||||
fetch: options.fetch,
|
||||
});
|
||||
|
||||
const createChatModel = (
|
||||
modelId: ExampleChatModelId,
|
||||
settings: ExampleChatSettings = {},
|
||||
) => {
|
||||
// Extract files from settings to process them appropriately
|
||||
const { files, ...restSettings } = settings;
|
||||
|
||||
// Create configuration with file handling
|
||||
const config = {
|
||||
...getCommonModelConfig("chat"),
|
||||
defaultObjectGenerationMode:
|
||||
"tool" as LanguageModelV1ObjectGenerationMode,
|
||||
// Custom fetch implementation that adds files to the request
|
||||
fetch: files?.length
|
||||
? (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
// Use default fetch if no init or body
|
||||
if (!init || !init.body || typeof init.body !== "string") {
|
||||
return (options.fetch || fetch)(input, init);
|
||||
}
|
||||
|
||||
try {
|
||||
// Parse the request body to manipulate it
|
||||
const parsedBody = JSON.parse(init.body);
|
||||
|
||||
// Add files to the request if they exist
|
||||
if (files?.length) {
|
||||
parsedBody.dyad_options = {
|
||||
files,
|
||||
enable_lazy_edits: true,
|
||||
};
|
||||
}
|
||||
|
||||
// Return modified request with files included
|
||||
const modifiedInit = {
|
||||
...init,
|
||||
body: JSON.stringify(parsedBody),
|
||||
};
|
||||
|
||||
// Use the provided fetch or default fetch
|
||||
return (options.fetch || fetch)(input, modifiedInit);
|
||||
} catch (e) {
|
||||
logger.error("Error parsing request body", e);
|
||||
// If parsing fails, use original request
|
||||
return (options.fetch || fetch)(input, init);
|
||||
}
|
||||
}
|
||||
: options.fetch,
|
||||
};
|
||||
|
||||
return new OpenAICompatibleChatLanguageModel(modelId, restSettings, config);
|
||||
};
|
||||
|
||||
const provider = (
|
||||
modelId: ExampleChatModelId,
|
||||
settings?: ExampleChatSettings,
|
||||
) => createChatModel(modelId, settings);
|
||||
|
||||
provider.chatModel = createChatModel;
|
||||
|
||||
return provider;
|
||||
}
|
||||
Reference in New Issue
Block a user