Add LM Studio support (#22)
This commit is contained in:
committed by
GitHub
parent
3529627172
commit
5fc49231ee
@@ -4,3 +4,7 @@ import { type LocalModel } from "@/ipc/ipc_types";
|
||||
export const localModelsAtom = atom<LocalModel[]>([]);
|
||||
export const localModelsLoadingAtom = atom<boolean>(false);
|
||||
export const localModelsErrorAtom = atom<Error | null>(null);
|
||||
|
||||
export const lmStudioModelsAtom = atom<LocalModel[]>([]);
|
||||
export const lmStudioModelsLoadingAtom = atom<boolean>(false);
|
||||
export const lmStudioModelsErrorAtom = atom<Error | null>(null);
|
||||
|
||||
@@ -19,8 +19,9 @@ import {
|
||||
import { useEffect, useState } from "react";
|
||||
import { MODEL_OPTIONS } from "@/constants/models";
|
||||
import { useLocalModels } from "@/hooks/useLocalModels";
|
||||
import { useLocalLMSModels } from "@/hooks/useLMStudioModels";
|
||||
import { ChevronDown } from "lucide-react";
|
||||
|
||||
import { LocalModel } from "@/ipc/ipc_types";
|
||||
interface ModelPickerProps {
|
||||
selectedModel: LargeLanguageModel;
|
||||
onModelSelect: (model: LargeLanguageModel) => void;
|
||||
@@ -31,29 +32,47 @@ export function ModelPicker({
|
||||
onModelSelect,
|
||||
}: ModelPickerProps) {
|
||||
const [open, setOpen] = useState(false);
|
||||
|
||||
// Ollama Models Hook
|
||||
const {
|
||||
models: localModels,
|
||||
loading: localModelsLoading,
|
||||
error: localModelsError,
|
||||
loadModels,
|
||||
models: ollamaModels,
|
||||
loading: ollamaLoading,
|
||||
error: ollamaError,
|
||||
loadModels: loadOllamaModels,
|
||||
} = useLocalModels();
|
||||
|
||||
// Load local models when the component mounts or the dropdown opens
|
||||
// LM Studio Models Hook
|
||||
const {
|
||||
models: lmStudioModels,
|
||||
loading: lmStudioLoading,
|
||||
error: lmStudioError,
|
||||
loadModels: loadLMStudioModels,
|
||||
} = useLocalLMSModels();
|
||||
|
||||
// Load models when the dropdown opens
|
||||
useEffect(() => {
|
||||
if (open) {
|
||||
loadModels();
|
||||
loadOllamaModels();
|
||||
loadLMStudioModels();
|
||||
}
|
||||
}, [open, loadModels]);
|
||||
}, [open, loadOllamaModels, loadLMStudioModels]);
|
||||
|
||||
// Get display name for the selected model
|
||||
const getModelDisplayName = () => {
|
||||
if (selectedModel.provider === "ollama") {
|
||||
return (
|
||||
localModels.find((model) => model.modelName === selectedModel.name)
|
||||
ollamaModels.find((model: LocalModel) => model.modelName === selectedModel.name)
|
||||
?.displayName || selectedModel.name
|
||||
);
|
||||
}
|
||||
if (selectedModel.provider === "lmstudio") {
|
||||
return (
|
||||
lmStudioModels.find((model: LocalModel) => model.modelName === selectedModel.name)
|
||||
?.displayName || selectedModel.name // Fallback to path if not found
|
||||
);
|
||||
}
|
||||
|
||||
// Fallback for cloud models
|
||||
return (
|
||||
MODEL_OPTIONS[selectedModel.provider]?.find(
|
||||
(model) => model.name === selectedModel.name
|
||||
@@ -63,8 +82,8 @@ export function ModelPicker({
|
||||
|
||||
const modelDisplayName = getModelDisplayName();
|
||||
|
||||
// Flatten the model options into a single array with provider information
|
||||
const allModels = Object.entries(MODEL_OPTIONS).flatMap(
|
||||
// Flatten the cloud model options
|
||||
const cloudModels = Object.entries(MODEL_OPTIONS).flatMap(
|
||||
([provider, models]) =>
|
||||
models.map((model) => ({
|
||||
...model,
|
||||
@@ -72,9 +91,9 @@ export function ModelPicker({
|
||||
}))
|
||||
);
|
||||
|
||||
// Determine if we have local models available
|
||||
const hasLocalModels =
|
||||
!localModelsLoading && !localModelsError && localModels.length > 0;
|
||||
// Determine availability of local models
|
||||
const hasOllamaModels = !ollamaLoading && !ollamaError && ollamaModels.length > 0;
|
||||
const hasLMStudioModels = !lmStudioLoading && !lmStudioError && lmStudioModels.length > 0;
|
||||
|
||||
return (
|
||||
<DropdownMenu open={open} onOpenChange={setOpen}>
|
||||
@@ -91,12 +110,12 @@ export function ModelPicker({
|
||||
<ChevronDown className="h-4 w-4" />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent className="w-56" align="start">
|
||||
<DropdownMenuContent className="w-64" align="start"> {/* Increased width slightly */}
|
||||
<DropdownMenuLabel>Cloud Models</DropdownMenuLabel>
|
||||
<DropdownMenuSeparator />
|
||||
|
||||
{/* Cloud models */}
|
||||
{allModels.map((model) => (
|
||||
{cloudModels.map((model) => (
|
||||
<Tooltip key={`${model.provider}-${model.name}`}>
|
||||
<TooltipTrigger asChild>
|
||||
<DropdownMenuItem
|
||||
@@ -135,25 +154,29 @@ export function ModelPicker({
|
||||
|
||||
<DropdownMenuSeparator />
|
||||
|
||||
{/* Ollama Models Dropdown */}
|
||||
{/* Ollama Models SubMenu */}
|
||||
<DropdownMenuSub>
|
||||
<DropdownMenuSubTrigger
|
||||
disabled={localModelsLoading || !hasLocalModels}
|
||||
disabled={ollamaLoading && !hasOllamaModels} // Disable if loading and no models yet
|
||||
className="w-full font-normal"
|
||||
>
|
||||
<div className="flex flex-col items-start">
|
||||
<span>Local models (Ollama)</span>
|
||||
{localModelsLoading ? (
|
||||
{ollamaLoading ? (
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Loading...
|
||||
</span>
|
||||
) : !hasLocalModels ? (
|
||||
) : ollamaError ? (
|
||||
<span className="text-xs text-red-500">
|
||||
Error loading
|
||||
</span>
|
||||
): !hasOllamaModels ? (
|
||||
<span className="text-xs text-muted-foreground">
|
||||
None available
|
||||
</span>
|
||||
) : (
|
||||
<span className="text-xs text-muted-foreground">
|
||||
{localModels.length} models
|
||||
{ollamaModels.length} models
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
@@ -162,27 +185,32 @@ export function ModelPicker({
|
||||
<DropdownMenuLabel>Ollama Models</DropdownMenuLabel>
|
||||
<DropdownMenuSeparator />
|
||||
|
||||
{localModelsLoading ? (
|
||||
{ollamaLoading && ollamaModels.length === 0 ? ( // Show loading only if no models are loaded yet
|
||||
<div className="text-xs text-center py-2 text-muted-foreground">
|
||||
Loading models...
|
||||
</div>
|
||||
) : localModelsError ? (
|
||||
<div className="text-xs text-center py-2 text-muted-foreground">
|
||||
Error loading models
|
||||
</div>
|
||||
) : localModels.length === 0 ? (
|
||||
) : ollamaError ? (
|
||||
<div className="px-2 py-1.5 text-sm text-red-600">
|
||||
<div className="flex flex-col">
|
||||
<span>Error loading models</span>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Is Ollama running?
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
) : !hasOllamaModels ? (
|
||||
<div className="px-2 py-1.5 text-sm">
|
||||
<div className="flex flex-col">
|
||||
<span>No local models available</span>
|
||||
<span>No local models found</span>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Start Ollama to use local models
|
||||
Ensure Ollama is running and models are pulled.
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
localModels.map((model) => (
|
||||
ollamaModels.map((model: LocalModel) => (
|
||||
<DropdownMenuItem
|
||||
key={`local-${model.modelName}`}
|
||||
key={`ollama-${model.modelName}`}
|
||||
className={
|
||||
selectedModel.provider === "ollama" &&
|
||||
selectedModel.name === model.modelName
|
||||
@@ -208,6 +236,92 @@ export function ModelPicker({
|
||||
)}
|
||||
</DropdownMenuSubContent>
|
||||
</DropdownMenuSub>
|
||||
|
||||
{/* LM Studio Models SubMenu */}
|
||||
<DropdownMenuSub>
|
||||
<DropdownMenuSubTrigger
|
||||
disabled={lmStudioLoading && !hasLMStudioModels} // Disable if loading and no models yet
|
||||
className="w-full font-normal"
|
||||
>
|
||||
<div className="flex flex-col items-start">
|
||||
<span>Local models (LM Studio)</span>
|
||||
{lmStudioLoading ? (
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Loading...
|
||||
</span>
|
||||
) : lmStudioError ? (
|
||||
<span className="text-xs text-red-500">
|
||||
Error loading
|
||||
</span>
|
||||
) : !hasLMStudioModels ? (
|
||||
<span className="text-xs text-muted-foreground">
|
||||
None available
|
||||
</span>
|
||||
) : (
|
||||
<span className="text-xs text-muted-foreground">
|
||||
{lmStudioModels.length} models
|
||||
</span>
|
||||
)}
|
||||
</div>
|
||||
</DropdownMenuSubTrigger>
|
||||
<DropdownMenuSubContent className="w-56">
|
||||
<DropdownMenuLabel>LM Studio Models</DropdownMenuLabel>
|
||||
<DropdownMenuSeparator />
|
||||
|
||||
{lmStudioLoading && lmStudioModels.length === 0 ? ( // Show loading only if no models are loaded yet
|
||||
<div className="text-xs text-center py-2 text-muted-foreground">
|
||||
Loading models...
|
||||
</div>
|
||||
) : lmStudioError ? (
|
||||
<div className="px-2 py-1.5 text-sm text-red-600">
|
||||
<div className="flex flex-col">
|
||||
<span>Error loading models</span>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
{lmStudioError.message} {/* Display specific error */}
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
) : !hasLMStudioModels ? (
|
||||
<div className="px-2 py-1.5 text-sm">
|
||||
<div className="flex flex-col">
|
||||
<span>No loaded models found</span>
|
||||
<span className="text-xs text-muted-foreground">
|
||||
Ensure LM Studio is running and models are loaded.
|
||||
</span>
|
||||
</div>
|
||||
</div>
|
||||
) : (
|
||||
lmStudioModels.map((model: LocalModel) => (
|
||||
<DropdownMenuItem
|
||||
key={`lmstudio-${model.modelName}`}
|
||||
className={
|
||||
selectedModel.provider === "lmstudio" &&
|
||||
selectedModel.name === model.modelName
|
||||
? "bg-secondary"
|
||||
: ""
|
||||
}
|
||||
onClick={() => {
|
||||
onModelSelect({
|
||||
name: model.modelName,
|
||||
provider: "lmstudio",
|
||||
});
|
||||
setOpen(false);
|
||||
}}
|
||||
>
|
||||
<div className="flex flex-col">
|
||||
{/* Display the user-friendly name */}
|
||||
<span>{model.displayName}</span>
|
||||
{/* Show the path as secondary info */}
|
||||
<span className="text-xs text-muted-foreground truncate">
|
||||
{model.modelName}
|
||||
</span>
|
||||
</div>
|
||||
</DropdownMenuItem>
|
||||
))
|
||||
)}
|
||||
</DropdownMenuSubContent>
|
||||
</DropdownMenuSub>
|
||||
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
);
|
||||
|
||||
@@ -8,7 +8,7 @@ export interface ModelOption {
|
||||
contextWindow?: number;
|
||||
}
|
||||
|
||||
type RegularModelProvider = Exclude<ModelProvider, "ollama">;
|
||||
type RegularModelProvider = Exclude<ModelProvider, "ollama" | "lmstudio">;
|
||||
export const MODEL_OPTIONS: Record<RegularModelProvider, ModelOption[]> = {
|
||||
openai: [
|
||||
// https://platform.openai.com/docs/models/gpt-4.1
|
||||
|
||||
43
src/hooks/useLMStudioModels.ts
Normal file
43
src/hooks/useLMStudioModels.ts
Normal file
@@ -0,0 +1,43 @@
|
||||
import { useCallback } from "react";
|
||||
import { useAtom } from "jotai";
|
||||
import {
|
||||
lmStudioModelsAtom,
|
||||
lmStudioModelsLoadingAtom,
|
||||
lmStudioModelsErrorAtom,
|
||||
} from "@/atoms/localModelsAtoms";
|
||||
import { IpcClient } from "@/ipc/ipc_client";
|
||||
|
||||
export function useLocalLMSModels() {
|
||||
const [models, setModels] = useAtom(lmStudioModelsAtom);
|
||||
const [loading, setLoading] = useAtom(lmStudioModelsLoadingAtom);
|
||||
const [error, setError] = useAtom(lmStudioModelsErrorAtom);
|
||||
|
||||
const ipcClient = IpcClient.getInstance();
|
||||
|
||||
/**
|
||||
* Load local models from Ollama
|
||||
*/
|
||||
const loadModels = useCallback(async () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
const modelList = await ipcClient.listLocalLMStudioModels();
|
||||
setModels(modelList);
|
||||
setError(null);
|
||||
|
||||
return modelList;
|
||||
} catch (error) {
|
||||
console.error("Error loading local LMStudio models:", error);
|
||||
setError(error instanceof Error ? error : new Error(String(error)));
|
||||
return [];
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
}, [ipcClient, setModels, setError, setLoading]);
|
||||
|
||||
return {
|
||||
models,
|
||||
loading,
|
||||
error,
|
||||
loadModels,
|
||||
};
|
||||
}
|
||||
@@ -20,13 +20,13 @@ export function useLocalModels() {
|
||||
const loadModels = useCallback(async () => {
|
||||
setLoading(true);
|
||||
try {
|
||||
const modelList = await ipcClient.listLocalModels();
|
||||
const modelList = await ipcClient.listLocalOllamaModels();
|
||||
setModels(modelList);
|
||||
setError(null);
|
||||
|
||||
return modelList;
|
||||
} catch (error) {
|
||||
console.error("Error loading local models:", error);
|
||||
console.error("Error loading local Ollama models:", error);
|
||||
setError(error instanceof Error ? error : new Error(String(error)));
|
||||
return [];
|
||||
} finally {
|
||||
|
||||
@@ -1,80 +1,7 @@
|
||||
import { ipcMain } from "electron";
|
||||
import log from "electron-log";
|
||||
import { LocalModelListResponse, LocalModel } from "../ipc_types";
|
||||
|
||||
const logger = log.scope("local_model_handlers");
|
||||
const OLLAMA_API_URL = "http://localhost:11434";
|
||||
|
||||
interface OllamaModel {
|
||||
name: string;
|
||||
modified_at: string;
|
||||
size: number;
|
||||
digest: string;
|
||||
details: {
|
||||
format: string;
|
||||
family: string;
|
||||
families: string[];
|
||||
parameter_size: string;
|
||||
quantization_level: string;
|
||||
};
|
||||
}
|
||||
import { registerOllamaHandlers } from "./local_model_ollama_handler";
|
||||
import { registerLMStudioHandlers } from "./local_model_lmstudio_handler";
|
||||
|
||||
export function registerLocalModelHandlers() {
|
||||
// Get list of models from Ollama
|
||||
ipcMain.handle(
|
||||
"local-models:list",
|
||||
async (): Promise<LocalModelListResponse> => {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_API_URL}/api/tags`);
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch models: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const ollamaModels: OllamaModel[] = data.models || [];
|
||||
|
||||
// Transform the data to return just what we need
|
||||
const models: LocalModel[] = ollamaModels.map((model) => {
|
||||
// Extract display name by cleaning up the model name
|
||||
// For names like "llama2:latest" we want to show "Llama 2"
|
||||
let displayName = model.name.split(":")[0]; // Remove tags like ":latest"
|
||||
|
||||
// Capitalize and add spaces for readability
|
||||
displayName = displayName
|
||||
.replace(/-/g, " ")
|
||||
.replace(/(\d+)/, " $1 ") // Add spaces around numbers
|
||||
.split(" ")
|
||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(" ")
|
||||
.trim();
|
||||
|
||||
return {
|
||||
modelName: model.name, // The actual model name used for API calls
|
||||
displayName, // The user-friendly name
|
||||
};
|
||||
});
|
||||
|
||||
logger.info(
|
||||
`Successfully fetched ${models.length} local models from Ollama`
|
||||
);
|
||||
return { models, error: null };
|
||||
} catch (error) {
|
||||
if (
|
||||
error instanceof TypeError &&
|
||||
(error as Error).message.includes("fetch failed")
|
||||
) {
|
||||
logger.error("Could not connect to Ollama. Is it running?");
|
||||
return {
|
||||
models: [],
|
||||
error:
|
||||
"Could not connect to Ollama. Make sure it's running at http://localhost:11434",
|
||||
};
|
||||
}
|
||||
|
||||
logger.error("Error fetching local models:", error);
|
||||
return { models: [], error: "Failed to fetch models from Ollama" };
|
||||
}
|
||||
}
|
||||
);
|
||||
registerOllamaHandlers();
|
||||
registerLMStudioHandlers();
|
||||
}
|
||||
|
||||
47
src/ipc/handlers/local_model_lmstudio_handler.ts
Normal file
47
src/ipc/handlers/local_model_lmstudio_handler.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import { ipcMain } from "electron";
|
||||
import log from "electron-log";
|
||||
import type { LocalModelListResponse, LocalModel } from "../ipc_types";
|
||||
|
||||
const logger = log.scope("lmstudio_handler");
|
||||
|
||||
export interface LMStudioModel {
|
||||
type: "llm" | "embedding" | string;
|
||||
id: string;
|
||||
object: string;
|
||||
publisher: string;
|
||||
state: "loaded" | "not-loaded";
|
||||
max_context_length: number;
|
||||
quantization: string
|
||||
compatibility_type: string
|
||||
arch: string;
|
||||
[key: string]: any;
|
||||
}
|
||||
|
||||
export async function fetchLMStudioModels(): Promise<LocalModelListResponse> {
|
||||
try {
|
||||
const modelsResponse: Response = await fetch("http://localhost:1234/api/v0/models");
|
||||
if (!modelsResponse.ok) {
|
||||
throw new Error("Failed to fetch models from LM Studio");
|
||||
}
|
||||
const modelsJson = await modelsResponse.json();
|
||||
const downloadedModels = modelsJson.data as LMStudioModel[];
|
||||
const models: LocalModel[] = downloadedModels
|
||||
.filter((model: any) => model.type === "llm")
|
||||
.map((model: any) => ({
|
||||
modelName: model.id,
|
||||
displayName: model.id,
|
||||
provider: "lmstudio"
|
||||
}));
|
||||
|
||||
logger.info(`Successfully fetched ${models.length} models from LM Studio`);
|
||||
return { models, error: null };
|
||||
} catch (error) {
|
||||
return { models: [], error: "Failed to fetch models from LM Studio" };
|
||||
}
|
||||
}
|
||||
|
||||
export function registerLMStudioHandlers() {
|
||||
ipcMain.handle('local-models:list-lmstudio', async (): Promise<LocalModelListResponse> => {
|
||||
return fetchLMStudioModels();
|
||||
});
|
||||
}
|
||||
66
src/ipc/handlers/local_model_ollama_handler.ts
Normal file
66
src/ipc/handlers/local_model_ollama_handler.ts
Normal file
@@ -0,0 +1,66 @@
|
||||
import { ipcMain } from "electron";
|
||||
import log from "electron-log";
|
||||
import { LocalModelListResponse, LocalModel } from "../ipc_types";
|
||||
|
||||
const logger = log.scope("ollama_handler");
|
||||
|
||||
const OLLAMA_API_URL = "http://localhost:11434";
|
||||
|
||||
interface OllamaModel {
|
||||
name: string;
|
||||
modified_at: string;
|
||||
size: number;
|
||||
digest: string;
|
||||
details: {
|
||||
format: string;
|
||||
family: string;
|
||||
families: string[];
|
||||
parameter_size: string;
|
||||
quantization_level: string;
|
||||
};
|
||||
}
|
||||
|
||||
export async function fetchOllamaModels(): Promise<LocalModelListResponse> {
|
||||
try {
|
||||
const response = await fetch(`${OLLAMA_API_URL}/api/tags`);
|
||||
if (!response.ok) {
|
||||
throw new Error(`Failed to fetch model: ${response.statusText}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
const ollamaModels: OllamaModel[] = data.models || [];
|
||||
|
||||
const models: LocalModel[] = ollamaModels.map((model: OllamaModel) => {
|
||||
const displayName = model.name.split(':')[0]
|
||||
.replace(/-/g, ' ')
|
||||
.replace(/(\d+)/, ' $1 ')
|
||||
.split(' ')
|
||||
.map(word => word.charAt(0).toUpperCase() + word.slice(1))
|
||||
.join(' ')
|
||||
.trim();
|
||||
|
||||
return {
|
||||
modelName: model.name,
|
||||
displayName,
|
||||
provider: "ollama",
|
||||
};
|
||||
});
|
||||
logger.info(`Successfully fetched ${models.length} models from Ollama`);
|
||||
return { models, error: null };
|
||||
} catch (error) {
|
||||
if (error instanceof TypeError && (error as Error).message.includes('fetch failed')) {
|
||||
logger.error("Could not connect to Ollama");
|
||||
return {
|
||||
models: [],
|
||||
error: "Could not connect to Ollama. Make sure it's running at http://localhost:11434"
|
||||
};
|
||||
}
|
||||
return { models: [], error: "Failed to fetch models from Ollama" };
|
||||
}
|
||||
}
|
||||
|
||||
export function registerOllamaHandlers() {
|
||||
ipcMain.handle('local-models:list-ollama', async (): Promise<LocalModelListResponse> => {
|
||||
return fetchOllamaModels();
|
||||
});
|
||||
}
|
||||
@@ -785,14 +785,28 @@ export class IpcClient {
|
||||
}
|
||||
}
|
||||
|
||||
public async listLocalModels(): Promise<LocalModel[]> {
|
||||
const { models, error } = (await this.ipcRenderer.invoke(
|
||||
"local-models:list"
|
||||
)) as LocalModelListResponse;
|
||||
if (error) {
|
||||
throw new Error(error);
|
||||
public async listLocalOllamaModels(): Promise<LocalModel[]> {
|
||||
try {
|
||||
const response = await this.ipcRenderer.invoke("local-models:list-ollama");
|
||||
return response?.models || [];
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
throw new Error(`Failed to fetch Ollama models: ${error.message}`);
|
||||
}
|
||||
throw new Error('Failed to fetch Ollama models: Unknown error occurred');
|
||||
}
|
||||
}
|
||||
|
||||
public async listLocalLMStudioModels(): Promise<LocalModel[]> {
|
||||
try {
|
||||
const response = await this.ipcRenderer.invoke("local-models:list-lmstudio");
|
||||
return response?.models || [];
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
throw new Error(`Failed to fetch LM Studio models: ${error.message}`);
|
||||
}
|
||||
throw new Error('Failed to fetch LM Studio models: Unknown error occurred');
|
||||
}
|
||||
return models;
|
||||
}
|
||||
|
||||
// Listen for deep link events
|
||||
|
||||
@@ -94,6 +94,7 @@ export interface SystemDebugInfo {
|
||||
}
|
||||
|
||||
export interface LocalModel {
|
||||
provider: "ollama" | "lmstudio";
|
||||
modelName: string; // Name used for API calls (e.g., "llama2:latest")
|
||||
displayName: string; // User-friendly name (e.g., "Llama 2")
|
||||
}
|
||||
|
||||
@@ -3,7 +3,7 @@ import { createGoogleGenerativeAI as createGoogle } from "@ai-sdk/google";
|
||||
import { createAnthropic } from "@ai-sdk/anthropic";
|
||||
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
||||
import { createOllama } from "ollama-ai-provider";
|
||||
|
||||
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
||||
import type { LargeLanguageModel, UserSettings } from "../../lib/schemas";
|
||||
import {
|
||||
PROVIDER_TO_ENV_VAR,
|
||||
@@ -82,8 +82,14 @@ export function getModelClient(
|
||||
case "ollama": {
|
||||
const provider = createOllama();
|
||||
return provider(model.name);
|
||||
}
|
||||
default: {
|
||||
}
|
||||
case "lmstudio": {
|
||||
// Using LM Studio's OpenAI compatible API
|
||||
const baseURL = "http://localhost:1234/v1"; // Default LM Studio OpenAI API URL
|
||||
const provider = createOpenAICompatible({ name: "lmstudio", baseURL });
|
||||
return provider(model.name);
|
||||
}
|
||||
default: {
|
||||
// Ensure exhaustive check if more providers are added
|
||||
const _exhaustiveCheck: never = model.provider;
|
||||
throw new Error(`Unsupported model provider: ${model.provider}`);
|
||||
|
||||
@@ -36,6 +36,7 @@ export const ModelProviderSchema = z.enum([
|
||||
"auto",
|
||||
"openrouter",
|
||||
"ollama",
|
||||
"lmstudio",
|
||||
]);
|
||||
|
||||
/**
|
||||
|
||||
@@ -49,7 +49,8 @@ const validInvokeChannels = [
|
||||
"supabase:list-projects",
|
||||
"supabase:set-app-project",
|
||||
"supabase:unset-app-project",
|
||||
"local-models:list",
|
||||
"local-models:list-ollama",
|
||||
"local-models:list-lmstudio",
|
||||
"window:minimize",
|
||||
"window:maximize",
|
||||
"window:close",
|
||||
|
||||
Reference in New Issue
Block a user