Upgrade to AI sdk with codemod (#1000)
This commit is contained in:
@@ -1,7 +1,7 @@
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import { ipcMain } from "electron";
|
||||
import {
|
||||
CoreMessage,
|
||||
ModelMessage,
|
||||
TextPart,
|
||||
ImagePart,
|
||||
streamText,
|
||||
@@ -134,14 +134,14 @@ async function processStreamChunks({
|
||||
chunk = "</think>";
|
||||
inThinkingBlock = false;
|
||||
}
|
||||
chunk += part.textDelta;
|
||||
} else if (part.type === "reasoning") {
|
||||
chunk += part.text;
|
||||
} else if (part.type === "reasoning-delta") {
|
||||
if (!inThinkingBlock) {
|
||||
chunk = "<think>";
|
||||
inThinkingBlock = true;
|
||||
}
|
||||
|
||||
chunk += escapeDyadTags(part.textDelta);
|
||||
chunk += escapeDyadTags(part.text);
|
||||
}
|
||||
|
||||
if (!chunk) {
|
||||
@@ -603,7 +603,7 @@ This conversation includes one or more image attachments. When the user uploads
|
||||
] as const)
|
||||
: [];
|
||||
|
||||
let chatMessages: CoreMessage[] = [
|
||||
let chatMessages: ModelMessage[] = [
|
||||
...codebasePrefix,
|
||||
...otherCodebasePrefix,
|
||||
...limitedMessageHistory.map((msg) => ({
|
||||
@@ -647,7 +647,7 @@ This conversation includes one or more image attachments. When the user uploads
|
||||
content:
|
||||
"Summarize the following chat: " +
|
||||
formatMessagesForSummary(previousChat?.messages ?? []),
|
||||
} satisfies CoreMessage,
|
||||
} satisfies ModelMessage,
|
||||
];
|
||||
}
|
||||
|
||||
@@ -655,7 +655,7 @@ This conversation includes one or more image attachments. When the user uploads
|
||||
chatMessages,
|
||||
modelClient,
|
||||
}: {
|
||||
chatMessages: CoreMessage[];
|
||||
chatMessages: ModelMessage[];
|
||||
modelClient: ModelClient;
|
||||
}) => {
|
||||
const dyadRequestId = uuidv4();
|
||||
@@ -668,7 +668,7 @@ This conversation includes one or more image attachments. When the user uploads
|
||||
logger.log("sending AI request");
|
||||
}
|
||||
return streamText({
|
||||
maxTokens: await getMaxTokens(settings.selectedModel),
|
||||
maxOutputTokens: await getMaxTokens(settings.selectedModel),
|
||||
temperature: await getTemperature(settings.selectedModel),
|
||||
maxRetries: 2,
|
||||
model: modelClient.model,
|
||||
@@ -798,7 +798,7 @@ This conversation includes one or more image attachments. When the user uploads
|
||||
break;
|
||||
}
|
||||
if (part.type !== "text-delta") continue; // ignore reasoning for continuation
|
||||
fullResponse += part.textDelta;
|
||||
fullResponse += part.text;
|
||||
fullResponse = cleanFullResponse(fullResponse);
|
||||
fullResponse = await processResponseChunkUpdate({
|
||||
fullResponse,
|
||||
@@ -825,7 +825,7 @@ This conversation includes one or more image attachments. When the user uploads
|
||||
|
||||
let autoFixAttempts = 0;
|
||||
const originalFullResponse = fullResponse;
|
||||
const previousAttempts: CoreMessage[] = [];
|
||||
const previousAttempts: ModelMessage[] = [];
|
||||
while (
|
||||
problemReport.problems.length > 0 &&
|
||||
autoFixAttempts < 2 &&
|
||||
@@ -1161,9 +1161,9 @@ async function replaceTextAttachmentWithContent(
|
||||
|
||||
// Helper function to convert traditional message to one with proper image attachments
|
||||
async function prepareMessageWithAttachments(
|
||||
message: CoreMessage,
|
||||
message: ModelMessage,
|
||||
attachmentPaths: string[],
|
||||
): Promise<CoreMessage> {
|
||||
): Promise<ModelMessage> {
|
||||
let textContent = message.content;
|
||||
// Get the original text content
|
||||
if (typeof textContent !== "string") {
|
||||
|
||||
@@ -37,7 +37,9 @@ export function parseOllamaHost(host?: string): string {
|
||||
return `http://${host}:11434`;
|
||||
}
|
||||
|
||||
const OLLAMA_API_URL = parseOllamaHost(process.env.OLLAMA_HOST);
|
||||
export function getOllamaApiUrl(): string {
|
||||
return parseOllamaHost(process.env.OLLAMA_HOST);
|
||||
}
|
||||
|
||||
interface OllamaModel {
|
||||
name: string;
|
||||
@@ -55,7 +57,7 @@ interface OllamaModel {
|
||||
|
||||
export async function fetchOllamaModels(): Promise<LocalModelListResponse> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_API_URL}/api/tags`);
|
||||
const response = await fetch(`${getOllamaApiUrl()}/api/tags`);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch model: ${response.statusText}`);
|
||||
}
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import { LanguageModelV1 } from "ai";
|
||||
import { createOpenAI } from "@ai-sdk/openai";
|
||||
import { createGoogleGenerativeAI as createGoogle } from "@ai-sdk/google";
|
||||
import { createAnthropic } from "@ai-sdk/anthropic";
|
||||
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
||||
import { createOllama } from "ollama-ai-provider";
|
||||
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
||||
import type { LargeLanguageModel, UserSettings } from "../../lib/schemas";
|
||||
import { getEnvVar } from "./read_env";
|
||||
@@ -13,6 +11,9 @@ import { LanguageModelProvider } from "../ipc_types";
|
||||
import { createDyadEngine } from "./llm_engine_provider";
|
||||
|
||||
import { LM_STUDIO_BASE_URL } from "./lm_studio_utils";
|
||||
import { LanguageModel } from "ai";
|
||||
import { createOllamaProvider } from "./ollama_provider";
|
||||
import { getOllamaApiUrl } from "../handlers/local_model_ollama_handler";
|
||||
|
||||
const dyadEngineUrl = process.env.DYAD_ENGINE_URL;
|
||||
const dyadGatewayUrl = process.env.DYAD_GATEWAY_URL;
|
||||
@@ -33,7 +34,7 @@ const AUTO_MODELS = [
|
||||
];
|
||||
|
||||
export interface ModelClient {
|
||||
model: LanguageModelV1;
|
||||
model: LanguageModel;
|
||||
builtinProviderId?: string;
|
||||
}
|
||||
|
||||
@@ -168,7 +169,10 @@ function getRegularModelClient(
|
||||
model: LargeLanguageModel,
|
||||
settings: UserSettings,
|
||||
providerConfig: LanguageModelProvider,
|
||||
) {
|
||||
): {
|
||||
modelClient: ModelClient;
|
||||
backupModelClients: ModelClient[];
|
||||
} {
|
||||
// Get API key for the specific provider
|
||||
const apiKey =
|
||||
settings.providerSettings?.[model.provider]?.apiKey?.value ||
|
||||
@@ -220,13 +224,11 @@ function getRegularModelClient(
|
||||
};
|
||||
}
|
||||
case "ollama": {
|
||||
// Ollama typically runs locally and doesn't require an API key in the same way
|
||||
const provider = createOllama({
|
||||
baseURL: process.env.OLLAMA_HOST,
|
||||
});
|
||||
const provider = createOllamaProvider({ baseURL: getOllamaApiUrl() });
|
||||
return {
|
||||
modelClient: {
|
||||
model: provider(model.name),
|
||||
builtinProviderId: providerId,
|
||||
},
|
||||
backupModelClients: [],
|
||||
};
|
||||
|
||||
@@ -1,7 +1,4 @@
|
||||
import {
|
||||
LanguageModelV1,
|
||||
LanguageModelV1ObjectGenerationMode,
|
||||
} from "@ai-sdk/provider";
|
||||
import { LanguageModel } from "ai";
|
||||
import { OpenAICompatibleChatLanguageModel } from "@ai-sdk/openai-compatible";
|
||||
import {
|
||||
FetchFunction,
|
||||
@@ -9,7 +6,6 @@ import {
|
||||
withoutTrailingSlash,
|
||||
} from "@ai-sdk/provider-utils";
|
||||
|
||||
import { OpenAICompatibleChatSettings } from "@ai-sdk/openai-compatible";
|
||||
import log from "electron-log";
|
||||
import { getExtraProviderOptions } from "./thinking_utils";
|
||||
import type { UserSettings } from "../../lib/schemas";
|
||||
@@ -18,7 +14,7 @@ const logger = log.scope("llm_engine_provider");
|
||||
|
||||
export type ExampleChatModelId = string & {};
|
||||
|
||||
export interface ExampleChatSettings extends OpenAICompatibleChatSettings {
|
||||
export interface ExampleChatSettings {
|
||||
files?: { path: string; content: string }[];
|
||||
}
|
||||
export interface ExampleProviderSettings {
|
||||
@@ -56,10 +52,7 @@ export interface DyadEngineProvider {
|
||||
/**
|
||||
Creates a model for text generation.
|
||||
*/
|
||||
(
|
||||
modelId: ExampleChatModelId,
|
||||
settings?: ExampleChatSettings,
|
||||
): LanguageModelV1;
|
||||
(modelId: ExampleChatModelId, settings?: ExampleChatSettings): LanguageModel;
|
||||
|
||||
/**
|
||||
Creates a chat model for text generation.
|
||||
@@ -67,7 +60,7 @@ Creates a chat model for text generation.
|
||||
chatModel(
|
||||
modelId: ExampleChatModelId,
|
||||
settings?: ExampleChatSettings,
|
||||
): LanguageModelV1;
|
||||
): LanguageModel;
|
||||
}
|
||||
|
||||
export function createDyadEngine(
|
||||
@@ -113,13 +106,13 @@ export function createDyadEngine(
|
||||
settings: ExampleChatSettings = {},
|
||||
) => {
|
||||
// Extract files from settings to process them appropriately
|
||||
const { files, ...restSettings } = settings;
|
||||
const { files } = settings;
|
||||
|
||||
// Create configuration with file handling
|
||||
const config = {
|
||||
...getCommonModelConfig(),
|
||||
defaultObjectGenerationMode:
|
||||
"tool" as LanguageModelV1ObjectGenerationMode,
|
||||
// defaultObjectGenerationMode:
|
||||
// "tool" as LanguageModelV1ObjectGenerationMode,
|
||||
// Custom fetch implementation that adds files to the request
|
||||
fetch: (input: RequestInfo | URL, init?: RequestInit) => {
|
||||
// Use default fetch if no init or body
|
||||
@@ -181,7 +174,7 @@ export function createDyadEngine(
|
||||
},
|
||||
};
|
||||
|
||||
return new OpenAICompatibleChatLanguageModel(modelId, restSettings, config);
|
||||
return new OpenAICompatibleChatLanguageModel(modelId, config);
|
||||
};
|
||||
|
||||
const provider = (
|
||||
|
||||
39
src/ipc/utils/ollama_provider.ts
Normal file
39
src/ipc/utils/ollama_provider.ts
Normal file
@@ -0,0 +1,39 @@
|
||||
import { LanguageModel } from "ai";
|
||||
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
||||
import type { FetchFunction } from "@ai-sdk/provider-utils";
|
||||
import { withoutTrailingSlash } from "@ai-sdk/provider-utils";
|
||||
import type {} from "@ai-sdk/provider";
|
||||
|
||||
type OllamaChatModelId = string;
|
||||
|
||||
export interface OllamaProviderOptions {
|
||||
/**
|
||||
* Base URL for the Ollama API. For real Ollama, use e.g. http://localhost:11434/api
|
||||
* The provider will POST to `${baseURL}/chat`.
|
||||
* If undefined, defaults to http://localhost:11434/api
|
||||
*/
|
||||
baseURL?: string;
|
||||
headers?: Record<string, string>;
|
||||
fetch?: FetchFunction;
|
||||
}
|
||||
|
||||
export interface OllamaChatSettings {}
|
||||
|
||||
export interface OllamaProvider {
|
||||
(modelId: OllamaChatModelId, settings?: OllamaChatSettings): LanguageModel;
|
||||
}
|
||||
|
||||
export function createOllamaProvider(
|
||||
options?: OllamaProviderOptions,
|
||||
): OllamaProvider {
|
||||
const base = withoutTrailingSlash(
|
||||
options?.baseURL ?? "http://localhost:11434",
|
||||
)!;
|
||||
const v1Base = (base.endsWith("/v1") ? base : `${base}/v1`) as string;
|
||||
const provider = createOpenAICompatible({
|
||||
name: "ollama",
|
||||
baseURL: v1Base,
|
||||
headers: options?.headers,
|
||||
});
|
||||
return (modelId: OllamaChatModelId) => provider(modelId);
|
||||
}
|
||||
Reference in New Issue
Block a user