Add LM Studio support (#22)

This commit is contained in:
Piotr Wilkin (ilintar)
2025-05-02 23:51:32 +02:00
committed by GitHub
parent 3529627172
commit 5fc49231ee
15 changed files with 394 additions and 123 deletions

View File

@@ -1,80 +1,7 @@
import { ipcMain } from "electron";
import log from "electron-log";
import { LocalModelListResponse, LocalModel } from "../ipc_types";
const logger = log.scope("local_model_handlers");
const OLLAMA_API_URL = "http://localhost:11434";
interface OllamaModel {
name: string;
modified_at: string;
size: number;
digest: string;
details: {
format: string;
family: string;
families: string[];
parameter_size: string;
quantization_level: string;
};
}
import { registerOllamaHandlers } from "./local_model_ollama_handler";
import { registerLMStudioHandlers } from "./local_model_lmstudio_handler";
export function registerLocalModelHandlers() {
// Get list of models from Ollama
ipcMain.handle(
"local-models:list",
async (): Promise<LocalModelListResponse> => {
try {
const response = await fetch(`${OLLAMA_API_URL}/api/tags`);
if (!response.ok) {
throw new Error(`Failed to fetch models: ${response.statusText}`);
}
const data = await response.json();
const ollamaModels: OllamaModel[] = data.models || [];
// Transform the data to return just what we need
const models: LocalModel[] = ollamaModels.map((model) => {
// Extract display name by cleaning up the model name
// For names like "llama2:latest" we want to show "Llama 2"
let displayName = model.name.split(":")[0]; // Remove tags like ":latest"
// Capitalize and add spaces for readability
displayName = displayName
.replace(/-/g, " ")
.replace(/(\d+)/, " $1 ") // Add spaces around numbers
.split(" ")
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
.join(" ")
.trim();
return {
modelName: model.name, // The actual model name used for API calls
displayName, // The user-friendly name
};
});
logger.info(
`Successfully fetched ${models.length} local models from Ollama`
);
return { models, error: null };
} catch (error) {
if (
error instanceof TypeError &&
(error as Error).message.includes("fetch failed")
) {
logger.error("Could not connect to Ollama. Is it running?");
return {
models: [],
error:
"Could not connect to Ollama. Make sure it's running at http://localhost:11434",
};
}
logger.error("Error fetching local models:", error);
return { models: [], error: "Failed to fetch models from Ollama" };
}
}
);
registerOllamaHandlers();
registerLMStudioHandlers();
}

View File

@@ -0,0 +1,47 @@
import { ipcMain } from "electron";
import log from "electron-log";
import type { LocalModelListResponse, LocalModel } from "../ipc_types";
const logger = log.scope("lmstudio_handler");
export interface LMStudioModel {
type: "llm" | "embedding" | string;
id: string;
object: string;
publisher: string;
state: "loaded" | "not-loaded";
max_context_length: number;
quantization: string
compatibility_type: string
arch: string;
[key: string]: any;
}
export async function fetchLMStudioModels(): Promise<LocalModelListResponse> {
try {
const modelsResponse: Response = await fetch("http://localhost:1234/api/v0/models");
if (!modelsResponse.ok) {
throw new Error("Failed to fetch models from LM Studio");
}
const modelsJson = await modelsResponse.json();
const downloadedModels = modelsJson.data as LMStudioModel[];
const models: LocalModel[] = downloadedModels
.filter((model: any) => model.type === "llm")
.map((model: any) => ({
modelName: model.id,
displayName: model.id,
provider: "lmstudio"
}));
logger.info(`Successfully fetched ${models.length} models from LM Studio`);
return { models, error: null };
} catch (error) {
return { models: [], error: "Failed to fetch models from LM Studio" };
}
}
export function registerLMStudioHandlers() {
ipcMain.handle('local-models:list-lmstudio', async (): Promise<LocalModelListResponse> => {
return fetchLMStudioModels();
});
}

View File

@@ -0,0 +1,66 @@
import { ipcMain } from "electron";
import log from "electron-log";
import { LocalModelListResponse, LocalModel } from "../ipc_types";
const logger = log.scope("ollama_handler");
const OLLAMA_API_URL = "http://localhost:11434";
interface OllamaModel {
name: string;
modified_at: string;
size: number;
digest: string;
details: {
format: string;
family: string;
families: string[];
parameter_size: string;
quantization_level: string;
};
}
export async function fetchOllamaModels(): Promise<LocalModelListResponse> {
try {
const response = await fetch(`${OLLAMA_API_URL}/api/tags`);
if (!response.ok) {
throw new Error(`Failed to fetch model: ${response.statusText}`);
}
const data = await response.json();
const ollamaModels: OllamaModel[] = data.models || [];
const models: LocalModel[] = ollamaModels.map((model: OllamaModel) => {
const displayName = model.name.split(':')[0]
.replace(/-/g, ' ')
.replace(/(\d+)/, ' $1 ')
.split(' ')
.map(word => word.charAt(0).toUpperCase() + word.slice(1))
.join(' ')
.trim();
return {
modelName: model.name,
displayName,
provider: "ollama",
};
});
logger.info(`Successfully fetched ${models.length} models from Ollama`);
return { models, error: null };
} catch (error) {
if (error instanceof TypeError && (error as Error).message.includes('fetch failed')) {
logger.error("Could not connect to Ollama");
return {
models: [],
error: "Could not connect to Ollama. Make sure it's running at http://localhost:11434"
};
}
return { models: [], error: "Failed to fetch models from Ollama" };
}
}
export function registerOllamaHandlers() {
ipcMain.handle('local-models:list-ollama', async (): Promise<LocalModelListResponse> => {
return fetchOllamaModels();
});
}

View File

@@ -785,14 +785,28 @@ export class IpcClient {
}
}
public async listLocalModels(): Promise<LocalModel[]> {
const { models, error } = (await this.ipcRenderer.invoke(
"local-models:list"
)) as LocalModelListResponse;
if (error) {
throw new Error(error);
public async listLocalOllamaModels(): Promise<LocalModel[]> {
try {
const response = await this.ipcRenderer.invoke("local-models:list-ollama");
return response?.models || [];
} catch (error) {
if (error instanceof Error) {
throw new Error(`Failed to fetch Ollama models: ${error.message}`);
}
throw new Error('Failed to fetch Ollama models: Unknown error occurred');
}
}
public async listLocalLMStudioModels(): Promise<LocalModel[]> {
try {
const response = await this.ipcRenderer.invoke("local-models:list-lmstudio");
return response?.models || [];
} catch (error) {
if (error instanceof Error) {
throw new Error(`Failed to fetch LM Studio models: ${error.message}`);
}
throw new Error('Failed to fetch LM Studio models: Unknown error occurred');
}
return models;
}
// Listen for deep link events

View File

@@ -94,6 +94,7 @@ export interface SystemDebugInfo {
}
export interface LocalModel {
provider: "ollama" | "lmstudio";
modelName: string; // Name used for API calls (e.g., "llama2:latest")
displayName: string; // User-friendly name (e.g., "Llama 2")
}

View File

@@ -3,7 +3,7 @@ import { createGoogleGenerativeAI as createGoogle } from "@ai-sdk/google";
import { createAnthropic } from "@ai-sdk/anthropic";
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
import { createOllama } from "ollama-ai-provider";
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
import type { LargeLanguageModel, UserSettings } from "../../lib/schemas";
import {
PROVIDER_TO_ENV_VAR,
@@ -82,8 +82,14 @@ export function getModelClient(
case "ollama": {
const provider = createOllama();
return provider(model.name);
}
default: {
}
case "lmstudio": {
// Using LM Studio's OpenAI compatible API
const baseURL = "http://localhost:1234/v1"; // Default LM Studio OpenAI API URL
const provider = createOpenAICompatible({ name: "lmstudio", baseURL });
return provider(model.name);
}
default: {
// Ensure exhaustive check if more providers are added
const _exhaustiveCheck: never = model.provider;
throw new Error(`Unsupported model provider: ${model.provider}`);