Add ollama support (#7)
This commit is contained in:
80
src/ipc/handlers/local_model_handlers.ts
Normal file
80
src/ipc/handlers/local_model_handlers.ts
Normal file
@@ -0,0 +1,80 @@
|
||||
import { ipcMain } from "electron";
|
||||
import log from "electron-log";
|
||||
import { LocalModelListResponse, LocalModel } from "../ipc_types";
|
||||
|
||||
const logger = log.scope("local_model_handlers");
|
||||
const OLLAMA_API_URL = "http://localhost:11434";
|
||||
|
||||
interface OllamaModel {
|
||||
name: string;
|
||||
modified_at: string;
|
||||
size: number;
|
||||
digest: string;
|
||||
details: {
|
||||
format: string;
|
||||
family: string;
|
||||
families: string[];
|
||||
parameter_size: string;
|
||||
quantization_level: string;
|
||||
};
|
||||
}
|
||||
|
||||
export function registerLocalModelHandlers() {
|
||||
// Get list of models from Ollama
|
||||
ipcMain.handle(
|
||||
"local-models:list",
|
||||
async (): Promise<LocalModelListResponse> => {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_API_URL}/api/tags`);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch models: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const ollamaModels: OllamaModel[] = data.models || [];
|
||||
|
||||
// Transform the data to return just what we need
|
||||
const models: LocalModel[] = ollamaModels.map((model) => {
|
||||
// Extract display name by cleaning up the model name
|
||||
// For names like "llama2:latest" we want to show "Llama 2"
|
||||
let displayName = model.name.split(":")[0]; // Remove tags like ":latest"
|
||||
|
||||
// Capitalize and add spaces for readability
|
||||
displayName = displayName
|
||||
.replace(/-/g, " ")
|
||||
.replace(/(\d+)/, " $1 ") // Add spaces around numbers
|
||||
.split(" ")
|
||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(" ")
|
||||
.trim();
|
||||
|
||||
return {
|
||||
modelName: model.name, // The actual model name used for API calls
|
||||
displayName, // The user-friendly name
|
||||
};
|
||||
});
|
||||
|
||||
logger.info(
|
||||
`Successfully fetched ${models.length} local models from Ollama`
|
||||
);
|
||||
return { models, error: null };
|
||||
} catch (error) {
|
||||
if (
|
||||
error instanceof TypeError &&
|
||||
(error as Error).message.includes("fetch failed")
|
||||
) {
|
||||
logger.error("Could not connect to Ollama. Is it running?");
|
||||
return {
|
||||
models: [],
|
||||
error:
|
||||
"Could not connect to Ollama. Make sure it's running at http://localhost:11434",
|
||||
};
|
||||
}
|
||||
|
||||
logger.error("Error fetching local models:", error);
|
||||
return { models: [], error: "Failed to fetch models from Ollama" };
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
@@ -17,6 +17,8 @@ import type {
|
||||
Message,
|
||||
Version,
|
||||
SystemDebugInfo,
|
||||
LocalModel,
|
||||
LocalModelListResponse,
|
||||
} from "./ipc_types";
|
||||
import type { CodeProposal, ProposalResult } from "@/lib/schemas";
|
||||
import { showError } from "@/lib/toast";
|
||||
@@ -729,14 +731,24 @@ export class IpcClient {
|
||||
// Get system debug information
|
||||
public async getSystemDebugInfo(): Promise<SystemDebugInfo> {
|
||||
try {
|
||||
const result = await this.ipcRenderer.invoke("get-system-debug-info");
|
||||
return result;
|
||||
const data = await this.ipcRenderer.invoke("get-system-debug-info");
|
||||
return data;
|
||||
} catch (error) {
|
||||
showError(error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
public async listLocalModels(): Promise<LocalModel[]> {
|
||||
const { models, error } = (await this.ipcRenderer.invoke(
|
||||
"local-models:list"
|
||||
)) as LocalModelListResponse;
|
||||
if (error) {
|
||||
throw new Error(error);
|
||||
}
|
||||
return models;
|
||||
}
|
||||
|
||||
// Listen for deep link events
|
||||
public onDeepLinkReceived(
|
||||
callback: (data: DeepLinkData) => void
|
||||
|
||||
@@ -9,6 +9,7 @@ import { registerNodeHandlers } from "./handlers/node_handlers";
|
||||
import { registerProposalHandlers } from "./handlers/proposal_handlers";
|
||||
import { registerDebugHandlers } from "./handlers/debug_handlers";
|
||||
import { registerSupabaseHandlers } from "./handlers/supabase_handlers";
|
||||
import { registerLocalModelHandlers } from "./handlers/local_model_handlers";
|
||||
|
||||
export function registerIpcHandlers() {
|
||||
// Register all IPC handlers by category
|
||||
@@ -23,4 +24,5 @@ export function registerIpcHandlers() {
|
||||
registerProposalHandlers();
|
||||
registerDebugHandlers();
|
||||
registerSupabaseHandlers();
|
||||
registerLocalModelHandlers();
|
||||
}
|
||||
|
||||
@@ -91,3 +91,13 @@ export interface SystemDebugInfo {
|
||||
architecture: string;
|
||||
logs: string;
|
||||
}
|
||||
|
||||
export interface LocalModel {
|
||||
modelName: string; // Name used for API calls (e.g., "llama2:latest")
|
||||
displayName: string; // User-friendly name (e.g., "Llama 2")
|
||||
}
|
||||
|
||||
export type LocalModelListResponse = {
|
||||
models: LocalModel[];
|
||||
error: string | null;
|
||||
};
|
||||
|
||||
@@ -2,6 +2,8 @@ import { createOpenAI } from "@ai-sdk/openai";
|
||||
import { createGoogleGenerativeAI as createGoogle } from "@ai-sdk/google";
|
||||
import { createAnthropic } from "@ai-sdk/anthropic";
|
||||
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
||||
import { createOllama } from "ollama-ai-provider";
|
||||
|
||||
import type { LargeLanguageModel, UserSettings } from "../../lib/schemas";
|
||||
import { PROVIDER_TO_ENV_VAR, AUTO_MODELS } from "../../constants/models";
|
||||
import { getEnvVar } from "./read_env";
|
||||
@@ -56,6 +58,10 @@ export function getModelClient(
|
||||
const provider = createOpenRouter({ apiKey });
|
||||
return provider(model.name);
|
||||
}
|
||||
case "ollama": {
|
||||
const provider = createOllama();
|
||||
return provider(model.name);
|
||||
}
|
||||
default: {
|
||||
// Ensure exhaustive check if more providers are added
|
||||
const _exhaustiveCheck: never = model.provider;
|
||||
|
||||
Reference in New Issue
Block a user