Add LM Studio support (#22)
This commit is contained in:
committed by
GitHub
parent
3529627172
commit
5fc49231ee
@@ -1,80 +1,7 @@
|
||||
import { ipcMain } from "electron";
|
||||
import log from "electron-log";
|
||||
import { LocalModelListResponse, LocalModel } from "../ipc_types";
|
||||
|
||||
const logger = log.scope("local_model_handlers");
|
||||
const OLLAMA_API_URL = "http://localhost:11434";
|
||||
|
||||
interface OllamaModel {
|
||||
name: string;
|
||||
modified_at: string;
|
||||
size: number;
|
||||
digest: string;
|
||||
details: {
|
||||
format: string;
|
||||
family: string;
|
||||
families: string[];
|
||||
parameter_size: string;
|
||||
quantization_level: string;
|
||||
};
|
||||
}
|
||||
import { registerOllamaHandlers } from "./local_model_ollama_handler";
|
||||
import { registerLMStudioHandlers } from "./local_model_lmstudio_handler";
|
||||
|
||||
export function registerLocalModelHandlers() {
|
||||
// Get list of models from Ollama
|
||||
ipcMain.handle(
|
||||
"local-models:list",
|
||||
async (): Promise<LocalModelListResponse> => {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_API_URL}/api/tags`);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch models: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const ollamaModels: OllamaModel[] = data.models || [];
|
||||
|
||||
// Transform the data to return just what we need
|
||||
const models: LocalModel[] = ollamaModels.map((model) => {
|
||||
// Extract display name by cleaning up the model name
|
||||
// For names like "llama2:latest" we want to show "Llama 2"
|
||||
let displayName = model.name.split(":")[0]; // Remove tags like ":latest"
|
||||
|
||||
// Capitalize and add spaces for readability
|
||||
displayName = displayName
|
||||
.replace(/-/g, " ")
|
||||
.replace(/(\d+)/, " $1 ") // Add spaces around numbers
|
||||
.split(" ")
|
||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(" ")
|
||||
.trim();
|
||||
|
||||
return {
|
||||
modelName: model.name, // The actual model name used for API calls
|
||||
displayName, // The user-friendly name
|
||||
};
|
||||
});
|
||||
|
||||
logger.info(
|
||||
`Successfully fetched ${models.length} local models from Ollama`
|
||||
);
|
||||
return { models, error: null };
|
||||
} catch (error) {
|
||||
if (
|
||||
error instanceof TypeError &&
|
||||
(error as Error).message.includes("fetch failed")
|
||||
) {
|
||||
logger.error("Could not connect to Ollama. Is it running?");
|
||||
return {
|
||||
models: [],
|
||||
error:
|
||||
"Could not connect to Ollama. Make sure it's running at http://localhost:11434",
|
||||
};
|
||||
}
|
||||
|
||||
logger.error("Error fetching local models:", error);
|
||||
return { models: [], error: "Failed to fetch models from Ollama" };
|
||||
}
|
||||
}
|
||||
);
|
||||
registerOllamaHandlers();
|
||||
registerLMStudioHandlers();
|
||||
}
|
||||
|
||||
47
src/ipc/handlers/local_model_lmstudio_handler.ts
Normal file
47
src/ipc/handlers/local_model_lmstudio_handler.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import { ipcMain } from "electron";
|
||||
import log from "electron-log";
|
||||
import type { LocalModelListResponse, LocalModel } from "../ipc_types";
|
||||
|
||||
const logger = log.scope("lmstudio_handler");
|
||||
|
||||
export interface LMStudioModel {
|
||||
type: "llm" | "embedding" | string;
|
||||
id: string;
|
||||
object: string;
|
||||
publisher: string;
|
||||
state: "loaded" | "not-loaded";
|
||||
max_context_length: number;
|
||||
quantization: string
|
||||
compatibility_type: string
|
||||
arch: string;
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
export async function fetchLMStudioModels(): Promise<LocalModelListResponse> {
|
||||
try {
|
||||
const modelsResponse: Response = await fetch("http://localhost:1234/api/v0/models");
|
||||
if (!modelsResponse.ok) {
|
||||
throw new Error("Failed to fetch models from LM Studio");
|
||||
}
|
||||
const modelsJson = await modelsResponse.json();
|
||||
const downloadedModels = modelsJson.data as LMStudioModel[];
|
||||
const models: LocalModel[] = downloadedModels
|
||||
.filter((model: any) => model.type === "llm")
|
||||
.map((model: any) => ({
|
||||
modelName: model.id,
|
||||
displayName: model.id,
|
||||
provider: "lmstudio"
|
||||
}));
|
||||
|
||||
logger.info(`Successfully fetched ${models.length} models from LM Studio`);
|
||||
return { models, error: null };
|
||||
} catch (error) {
|
||||
return { models: [], error: "Failed to fetch models from LM Studio" };
|
||||
}
|
||||
}
|
||||
|
||||
export function registerLMStudioHandlers() {
|
||||
ipcMain.handle('local-models:list-lmstudio', async (): Promise<LocalModelListResponse> => {
|
||||
return fetchLMStudioModels();
|
||||
});
|
||||
}
|
||||
66
src/ipc/handlers/local_model_ollama_handler.ts
Normal file
66
src/ipc/handlers/local_model_ollama_handler.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
import { ipcMain } from "electron";
|
||||
import log from "electron-log";
|
||||
import { LocalModelListResponse, LocalModel } from "../ipc_types";
|
||||
|
||||
const logger = log.scope("ollama_handler");
|
||||
|
||||
const OLLAMA_API_URL = "http://localhost:11434";
|
||||
|
||||
interface OllamaModel {
|
||||
name: string;
|
||||
modified_at: string;
|
||||
size: number;
|
||||
digest: string;
|
||||
details: {
|
||||
format: string;
|
||||
family: string;
|
||||
families: string[];
|
||||
parameter_size: string;
|
||||
quantization_level: string;
|
||||
};
|
||||
}
|
||||
|
||||
export async function fetchOllamaModels(): Promise<LocalModelListResponse> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_API_URL}/api/tags`);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch model: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const ollamaModels: OllamaModel[] = data.models || [];
|
||||
|
||||
const models: LocalModel[] = ollamaModels.map((model: OllamaModel) => {
|
||||
const displayName = model.name.split(':')[0]
|
||||
.replace(/-/g, ' ')
|
||||
.replace(/(\d+)/, ' $1 ')
|
||||
.split(' ')
|
||||
.map(word => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(' ')
|
||||
.trim();
|
||||
|
||||
return {
|
||||
modelName: model.name,
|
||||
displayName,
|
||||
provider: "ollama",
|
||||
};
|
||||
});
|
||||
logger.info(`Successfully fetched ${models.length} models from Ollama`);
|
||||
return { models, error: null };
|
||||
} catch (error) {
|
||||
if (error instanceof TypeError && (error as Error).message.includes('fetch failed')) {
|
||||
logger.error("Could not connect to Ollama");
|
||||
return {
|
||||
models: [],
|
||||
error: "Could not connect to Ollama. Make sure it's running at http://localhost:11434"
|
||||
};
|
||||
}
|
||||
return { models: [], error: "Failed to fetch models from Ollama" };
|
||||
}
|
||||
}
|
||||
|
||||
export function registerOllamaHandlers() {
|
||||
ipcMain.handle('local-models:list-ollama', async (): Promise<LocalModelListResponse> => {
|
||||
return fetchOllamaModels();
|
||||
});
|
||||
}
|
||||
Reference in New Issue
Block a user