Add LM Studio support (#22)
This commit is contained in:
committed by
GitHub
parent
3529627172
commit
5fc49231ee
46
package-lock.json
generated
46
package-lock.json
generated
@@ -12,6 +12,7 @@
|
|||||||
"@ai-sdk/anthropic": "^1.2.8",
|
"@ai-sdk/anthropic": "^1.2.8",
|
||||||
"@ai-sdk/google": "^1.2.10",
|
"@ai-sdk/google": "^1.2.10",
|
||||||
"@ai-sdk/openai": "^1.3.7",
|
"@ai-sdk/openai": "^1.3.7",
|
||||||
|
"@ai-sdk/openai-compatible": "^0.2.13",
|
||||||
"@biomejs/biome": "^1.9.4",
|
"@biomejs/biome": "^1.9.4",
|
||||||
"@dyad-sh/supabase-management-js": "v1.0.0",
|
"@dyad-sh/supabase-management-js": "v1.0.0",
|
||||||
"@monaco-editor/react": "^4.7.0-rc.0",
|
"@monaco-editor/react": "^4.7.0-rc.0",
|
||||||
@@ -104,6 +105,51 @@
|
|||||||
"node": ">=20"
|
"node": ">=20"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
"node_modules/@ai-sdk/openai-compatible": {
|
||||||
|
"version": "0.2.13",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/openai-compatible/-/openai-compatible-0.2.13.tgz",
|
||||||
|
"integrity": "sha512-tB+lL8Z3j0qDod/mvxwjrPhbLUHp/aQW+NvMoJaqeTtP+Vmv5qR800pncGczxn5WN0pllQm+7aIRDnm69XeSbg==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "1.1.3",
|
||||||
|
"@ai-sdk/provider-utils": "2.2.7"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.0.0"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@ai-sdk/openai-compatible/node_modules/@ai-sdk/provider": {
|
||||||
|
"version": "1.1.3",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/provider/-/provider-1.1.3.tgz",
|
||||||
|
"integrity": "sha512-qZMxYJ0qqX/RfnuIaab+zp8UAeJn/ygXXAffR5I4N0n1IrvA6qBsjc8hXLmBiMV2zoXlifkacF7sEFnYnjBcqg==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"json-schema": "^0.4.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"node_modules/@ai-sdk/openai-compatible/node_modules/@ai-sdk/provider-utils": {
|
||||||
|
"version": "2.2.7",
|
||||||
|
"resolved": "https://registry.npmjs.org/@ai-sdk/provider-utils/-/provider-utils-2.2.7.tgz",
|
||||||
|
"integrity": "sha512-kM0xS3GWg3aMChh9zfeM+80vEZfXzR3JEUBdycZLtbRZ2TRT8xOj3WodGHPb06sUK5yD7pAXC/P7ctsi2fvUGQ==",
|
||||||
|
"dev": true,
|
||||||
|
"dependencies": {
|
||||||
|
"@ai-sdk/provider": "1.1.3",
|
||||||
|
"nanoid": "^3.3.8",
|
||||||
|
"secure-json-parse": "^2.7.0"
|
||||||
|
},
|
||||||
|
"engines": {
|
||||||
|
"node": ">=18"
|
||||||
|
},
|
||||||
|
"peerDependencies": {
|
||||||
|
"zod": "^3.23.8"
|
||||||
|
}
|
||||||
|
},
|
||||||
"node_modules/@ai-sdk/anthropic": {
|
"node_modules/@ai-sdk/anthropic": {
|
||||||
"version": "1.2.8",
|
"version": "1.2.8",
|
||||||
"resolved": "https://registry.npmjs.org/@ai-sdk/anthropic/-/anthropic-1.2.8.tgz",
|
"resolved": "https://registry.npmjs.org/@ai-sdk/anthropic/-/anthropic-1.2.8.tgz",
|
||||||
|
|||||||
@@ -63,6 +63,7 @@
|
|||||||
"vitest": "^3.1.1"
|
"vitest": "^3.1.1"
|
||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
|
"@ai-sdk/openai-compatible": "^0.2.13",
|
||||||
"@ai-sdk/anthropic": "^1.2.8",
|
"@ai-sdk/anthropic": "^1.2.8",
|
||||||
"@ai-sdk/google": "^1.2.10",
|
"@ai-sdk/google": "^1.2.10",
|
||||||
"@ai-sdk/openai": "^1.3.7",
|
"@ai-sdk/openai": "^1.3.7",
|
||||||
|
|||||||
@@ -4,3 +4,7 @@ import { type LocalModel } from "@/ipc/ipc_types";
|
|||||||
export const localModelsAtom = atom<LocalModel[]>([]);
|
export const localModelsAtom = atom<LocalModel[]>([]);
|
||||||
export const localModelsLoadingAtom = atom<boolean>(false);
|
export const localModelsLoadingAtom = atom<boolean>(false);
|
||||||
export const localModelsErrorAtom = atom<Error | null>(null);
|
export const localModelsErrorAtom = atom<Error | null>(null);
|
||||||
|
|
||||||
|
export const lmStudioModelsAtom = atom<LocalModel[]>([]);
|
||||||
|
export const lmStudioModelsLoadingAtom = atom<boolean>(false);
|
||||||
|
export const lmStudioModelsErrorAtom = atom<Error | null>(null);
|
||||||
|
|||||||
@@ -19,8 +19,9 @@ import {
|
|||||||
import { useEffect, useState } from "react";
|
import { useEffect, useState } from "react";
|
||||||
import { MODEL_OPTIONS } from "@/constants/models";
|
import { MODEL_OPTIONS } from "@/constants/models";
|
||||||
import { useLocalModels } from "@/hooks/useLocalModels";
|
import { useLocalModels } from "@/hooks/useLocalModels";
|
||||||
|
import { useLocalLMSModels } from "@/hooks/useLMStudioModels";
|
||||||
import { ChevronDown } from "lucide-react";
|
import { ChevronDown } from "lucide-react";
|
||||||
|
import { LocalModel } from "@/ipc/ipc_types";
|
||||||
interface ModelPickerProps {
|
interface ModelPickerProps {
|
||||||
selectedModel: LargeLanguageModel;
|
selectedModel: LargeLanguageModel;
|
||||||
onModelSelect: (model: LargeLanguageModel) => void;
|
onModelSelect: (model: LargeLanguageModel) => void;
|
||||||
@@ -31,29 +32,47 @@ export function ModelPicker({
|
|||||||
onModelSelect,
|
onModelSelect,
|
||||||
}: ModelPickerProps) {
|
}: ModelPickerProps) {
|
||||||
const [open, setOpen] = useState(false);
|
const [open, setOpen] = useState(false);
|
||||||
|
|
||||||
|
// Ollama Models Hook
|
||||||
const {
|
const {
|
||||||
models: localModels,
|
models: ollamaModels,
|
||||||
loading: localModelsLoading,
|
loading: ollamaLoading,
|
||||||
error: localModelsError,
|
error: ollamaError,
|
||||||
loadModels,
|
loadModels: loadOllamaModels,
|
||||||
} = useLocalModels();
|
} = useLocalModels();
|
||||||
|
|
||||||
// Load local models when the component mounts or the dropdown opens
|
// LM Studio Models Hook
|
||||||
|
const {
|
||||||
|
models: lmStudioModels,
|
||||||
|
loading: lmStudioLoading,
|
||||||
|
error: lmStudioError,
|
||||||
|
loadModels: loadLMStudioModels,
|
||||||
|
} = useLocalLMSModels();
|
||||||
|
|
||||||
|
// Load models when the dropdown opens
|
||||||
useEffect(() => {
|
useEffect(() => {
|
||||||
if (open) {
|
if (open) {
|
||||||
loadModels();
|
loadOllamaModels();
|
||||||
|
loadLMStudioModels();
|
||||||
}
|
}
|
||||||
}, [open, loadModels]);
|
}, [open, loadOllamaModels, loadLMStudioModels]);
|
||||||
|
|
||||||
// Get display name for the selected model
|
// Get display name for the selected model
|
||||||
const getModelDisplayName = () => {
|
const getModelDisplayName = () => {
|
||||||
if (selectedModel.provider === "ollama") {
|
if (selectedModel.provider === "ollama") {
|
||||||
return (
|
return (
|
||||||
localModels.find((model) => model.modelName === selectedModel.name)
|
ollamaModels.find((model: LocalModel) => model.modelName === selectedModel.name)
|
||||||
?.displayName || selectedModel.name
|
?.displayName || selectedModel.name
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
if (selectedModel.provider === "lmstudio") {
|
||||||
|
return (
|
||||||
|
lmStudioModels.find((model: LocalModel) => model.modelName === selectedModel.name)
|
||||||
|
?.displayName || selectedModel.name // Fallback to path if not found
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback for cloud models
|
||||||
return (
|
return (
|
||||||
MODEL_OPTIONS[selectedModel.provider]?.find(
|
MODEL_OPTIONS[selectedModel.provider]?.find(
|
||||||
(model) => model.name === selectedModel.name
|
(model) => model.name === selectedModel.name
|
||||||
@@ -63,8 +82,8 @@ export function ModelPicker({
|
|||||||
|
|
||||||
const modelDisplayName = getModelDisplayName();
|
const modelDisplayName = getModelDisplayName();
|
||||||
|
|
||||||
// Flatten the model options into a single array with provider information
|
// Flatten the cloud model options
|
||||||
const allModels = Object.entries(MODEL_OPTIONS).flatMap(
|
const cloudModels = Object.entries(MODEL_OPTIONS).flatMap(
|
||||||
([provider, models]) =>
|
([provider, models]) =>
|
||||||
models.map((model) => ({
|
models.map((model) => ({
|
||||||
...model,
|
...model,
|
||||||
@@ -72,9 +91,9 @@ export function ModelPicker({
|
|||||||
}))
|
}))
|
||||||
);
|
);
|
||||||
|
|
||||||
// Determine if we have local models available
|
// Determine availability of local models
|
||||||
const hasLocalModels =
|
const hasOllamaModels = !ollamaLoading && !ollamaError && ollamaModels.length > 0;
|
||||||
!localModelsLoading && !localModelsError && localModels.length > 0;
|
const hasLMStudioModels = !lmStudioLoading && !lmStudioError && lmStudioModels.length > 0;
|
||||||
|
|
||||||
return (
|
return (
|
||||||
<DropdownMenu open={open} onOpenChange={setOpen}>
|
<DropdownMenu open={open} onOpenChange={setOpen}>
|
||||||
@@ -91,12 +110,12 @@ export function ModelPicker({
|
|||||||
<ChevronDown className="h-4 w-4" />
|
<ChevronDown className="h-4 w-4" />
|
||||||
</Button>
|
</Button>
|
||||||
</DropdownMenuTrigger>
|
</DropdownMenuTrigger>
|
||||||
<DropdownMenuContent className="w-56" align="start">
|
<DropdownMenuContent className="w-64" align="start"> {/* Increased width slightly */}
|
||||||
<DropdownMenuLabel>Cloud Models</DropdownMenuLabel>
|
<DropdownMenuLabel>Cloud Models</DropdownMenuLabel>
|
||||||
<DropdownMenuSeparator />
|
<DropdownMenuSeparator />
|
||||||
|
|
||||||
{/* Cloud models */}
|
{/* Cloud models */}
|
||||||
{allModels.map((model) => (
|
{cloudModels.map((model) => (
|
||||||
<Tooltip key={`${model.provider}-${model.name}`}>
|
<Tooltip key={`${model.provider}-${model.name}`}>
|
||||||
<TooltipTrigger asChild>
|
<TooltipTrigger asChild>
|
||||||
<DropdownMenuItem
|
<DropdownMenuItem
|
||||||
@@ -135,25 +154,29 @@ export function ModelPicker({
|
|||||||
|
|
||||||
<DropdownMenuSeparator />
|
<DropdownMenuSeparator />
|
||||||
|
|
||||||
{/* Ollama Models Dropdown */}
|
{/* Ollama Models SubMenu */}
|
||||||
<DropdownMenuSub>
|
<DropdownMenuSub>
|
||||||
<DropdownMenuSubTrigger
|
<DropdownMenuSubTrigger
|
||||||
disabled={localModelsLoading || !hasLocalModels}
|
disabled={ollamaLoading && !hasOllamaModels} // Disable if loading and no models yet
|
||||||
className="w-full font-normal"
|
className="w-full font-normal"
|
||||||
>
|
>
|
||||||
<div className="flex flex-col items-start">
|
<div className="flex flex-col items-start">
|
||||||
<span>Local models (Ollama)</span>
|
<span>Local models (Ollama)</span>
|
||||||
{localModelsLoading ? (
|
{ollamaLoading ? (
|
||||||
<span className="text-xs text-muted-foreground">
|
<span className="text-xs text-muted-foreground">
|
||||||
Loading...
|
Loading...
|
||||||
</span>
|
</span>
|
||||||
) : !hasLocalModels ? (
|
) : ollamaError ? (
|
||||||
|
<span className="text-xs text-red-500">
|
||||||
|
Error loading
|
||||||
|
</span>
|
||||||
|
): !hasOllamaModels ? (
|
||||||
<span className="text-xs text-muted-foreground">
|
<span className="text-xs text-muted-foreground">
|
||||||
None available
|
None available
|
||||||
</span>
|
</span>
|
||||||
) : (
|
) : (
|
||||||
<span className="text-xs text-muted-foreground">
|
<span className="text-xs text-muted-foreground">
|
||||||
{localModels.length} models
|
{ollamaModels.length} models
|
||||||
</span>
|
</span>
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
@@ -162,27 +185,32 @@ export function ModelPicker({
|
|||||||
<DropdownMenuLabel>Ollama Models</DropdownMenuLabel>
|
<DropdownMenuLabel>Ollama Models</DropdownMenuLabel>
|
||||||
<DropdownMenuSeparator />
|
<DropdownMenuSeparator />
|
||||||
|
|
||||||
{localModelsLoading ? (
|
{ollamaLoading && ollamaModels.length === 0 ? ( // Show loading only if no models are loaded yet
|
||||||
<div className="text-xs text-center py-2 text-muted-foreground">
|
<div className="text-xs text-center py-2 text-muted-foreground">
|
||||||
Loading models...
|
Loading models...
|
||||||
</div>
|
</div>
|
||||||
) : localModelsError ? (
|
) : ollamaError ? (
|
||||||
<div className="text-xs text-center py-2 text-muted-foreground">
|
<div className="px-2 py-1.5 text-sm text-red-600">
|
||||||
Error loading models
|
<div className="flex flex-col">
|
||||||
|
<span>Error loading models</span>
|
||||||
|
<span className="text-xs text-muted-foreground">
|
||||||
|
Is Ollama running?
|
||||||
|
</span>
|
||||||
</div>
|
</div>
|
||||||
) : localModels.length === 0 ? (
|
</div>
|
||||||
|
) : !hasOllamaModels ? (
|
||||||
<div className="px-2 py-1.5 text-sm">
|
<div className="px-2 py-1.5 text-sm">
|
||||||
<div className="flex flex-col">
|
<div className="flex flex-col">
|
||||||
<span>No local models available</span>
|
<span>No local models found</span>
|
||||||
<span className="text-xs text-muted-foreground">
|
<span className="text-xs text-muted-foreground">
|
||||||
Start Ollama to use local models
|
Ensure Ollama is running and models are pulled.
|
||||||
</span>
|
</span>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
) : (
|
) : (
|
||||||
localModels.map((model) => (
|
ollamaModels.map((model: LocalModel) => (
|
||||||
<DropdownMenuItem
|
<DropdownMenuItem
|
||||||
key={`local-${model.modelName}`}
|
key={`ollama-${model.modelName}`}
|
||||||
className={
|
className={
|
||||||
selectedModel.provider === "ollama" &&
|
selectedModel.provider === "ollama" &&
|
||||||
selectedModel.name === model.modelName
|
selectedModel.name === model.modelName
|
||||||
@@ -208,6 +236,92 @@ export function ModelPicker({
|
|||||||
)}
|
)}
|
||||||
</DropdownMenuSubContent>
|
</DropdownMenuSubContent>
|
||||||
</DropdownMenuSub>
|
</DropdownMenuSub>
|
||||||
|
|
||||||
|
{/* LM Studio Models SubMenu */}
|
||||||
|
<DropdownMenuSub>
|
||||||
|
<DropdownMenuSubTrigger
|
||||||
|
disabled={lmStudioLoading && !hasLMStudioModels} // Disable if loading and no models yet
|
||||||
|
className="w-full font-normal"
|
||||||
|
>
|
||||||
|
<div className="flex flex-col items-start">
|
||||||
|
<span>Local models (LM Studio)</span>
|
||||||
|
{lmStudioLoading ? (
|
||||||
|
<span className="text-xs text-muted-foreground">
|
||||||
|
Loading...
|
||||||
|
</span>
|
||||||
|
) : lmStudioError ? (
|
||||||
|
<span className="text-xs text-red-500">
|
||||||
|
Error loading
|
||||||
|
</span>
|
||||||
|
) : !hasLMStudioModels ? (
|
||||||
|
<span className="text-xs text-muted-foreground">
|
||||||
|
None available
|
||||||
|
</span>
|
||||||
|
) : (
|
||||||
|
<span className="text-xs text-muted-foreground">
|
||||||
|
{lmStudioModels.length} models
|
||||||
|
</span>
|
||||||
|
)}
|
||||||
|
</div>
|
||||||
|
</DropdownMenuSubTrigger>
|
||||||
|
<DropdownMenuSubContent className="w-56">
|
||||||
|
<DropdownMenuLabel>LM Studio Models</DropdownMenuLabel>
|
||||||
|
<DropdownMenuSeparator />
|
||||||
|
|
||||||
|
{lmStudioLoading && lmStudioModels.length === 0 ? ( // Show loading only if no models are loaded yet
|
||||||
|
<div className="text-xs text-center py-2 text-muted-foreground">
|
||||||
|
Loading models...
|
||||||
|
</div>
|
||||||
|
) : lmStudioError ? (
|
||||||
|
<div className="px-2 py-1.5 text-sm text-red-600">
|
||||||
|
<div className="flex flex-col">
|
||||||
|
<span>Error loading models</span>
|
||||||
|
<span className="text-xs text-muted-foreground">
|
||||||
|
{lmStudioError.message} {/* Display specific error */}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
) : !hasLMStudioModels ? (
|
||||||
|
<div className="px-2 py-1.5 text-sm">
|
||||||
|
<div className="flex flex-col">
|
||||||
|
<span>No loaded models found</span>
|
||||||
|
<span className="text-xs text-muted-foreground">
|
||||||
|
Ensure LM Studio is running and models are loaded.
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
) : (
|
||||||
|
lmStudioModels.map((model: LocalModel) => (
|
||||||
|
<DropdownMenuItem
|
||||||
|
key={`lmstudio-${model.modelName}`}
|
||||||
|
className={
|
||||||
|
selectedModel.provider === "lmstudio" &&
|
||||||
|
selectedModel.name === model.modelName
|
||||||
|
? "bg-secondary"
|
||||||
|
: ""
|
||||||
|
}
|
||||||
|
onClick={() => {
|
||||||
|
onModelSelect({
|
||||||
|
name: model.modelName,
|
||||||
|
provider: "lmstudio",
|
||||||
|
});
|
||||||
|
setOpen(false);
|
||||||
|
}}
|
||||||
|
>
|
||||||
|
<div className="flex flex-col">
|
||||||
|
{/* Display the user-friendly name */}
|
||||||
|
<span>{model.displayName}</span>
|
||||||
|
{/* Show the path as secondary info */}
|
||||||
|
<span className="text-xs text-muted-foreground truncate">
|
||||||
|
{model.modelName}
|
||||||
|
</span>
|
||||||
|
</div>
|
||||||
|
</DropdownMenuItem>
|
||||||
|
))
|
||||||
|
)}
|
||||||
|
</DropdownMenuSubContent>
|
||||||
|
</DropdownMenuSub>
|
||||||
|
|
||||||
</DropdownMenuContent>
|
</DropdownMenuContent>
|
||||||
</DropdownMenu>
|
</DropdownMenu>
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ export interface ModelOption {
|
|||||||
contextWindow?: number;
|
contextWindow?: number;
|
||||||
}
|
}
|
||||||
|
|
||||||
type RegularModelProvider = Exclude<ModelProvider, "ollama">;
|
type RegularModelProvider = Exclude<ModelProvider, "ollama" | "lmstudio">;
|
||||||
export const MODEL_OPTIONS: Record<RegularModelProvider, ModelOption[]> = {
|
export const MODEL_OPTIONS: Record<RegularModelProvider, ModelOption[]> = {
|
||||||
openai: [
|
openai: [
|
||||||
// https://platform.openai.com/docs/models/gpt-4.1
|
// https://platform.openai.com/docs/models/gpt-4.1
|
||||||
|
|||||||
43
src/hooks/useLMStudioModels.ts
Normal file
43
src/hooks/useLMStudioModels.ts
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
import { useCallback } from "react";
|
||||||
|
import { useAtom } from "jotai";
|
||||||
|
import {
|
||||||
|
lmStudioModelsAtom,
|
||||||
|
lmStudioModelsLoadingAtom,
|
||||||
|
lmStudioModelsErrorAtom,
|
||||||
|
} from "@/atoms/localModelsAtoms";
|
||||||
|
import { IpcClient } from "@/ipc/ipc_client";
|
||||||
|
|
||||||
|
export function useLocalLMSModels() {
|
||||||
|
const [models, setModels] = useAtom(lmStudioModelsAtom);
|
||||||
|
const [loading, setLoading] = useAtom(lmStudioModelsLoadingAtom);
|
||||||
|
const [error, setError] = useAtom(lmStudioModelsErrorAtom);
|
||||||
|
|
||||||
|
const ipcClient = IpcClient.getInstance();
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Load local models from Ollama
|
||||||
|
*/
|
||||||
|
const loadModels = useCallback(async () => {
|
||||||
|
setLoading(true);
|
||||||
|
try {
|
||||||
|
const modelList = await ipcClient.listLocalLMStudioModels();
|
||||||
|
setModels(modelList);
|
||||||
|
setError(null);
|
||||||
|
|
||||||
|
return modelList;
|
||||||
|
} catch (error) {
|
||||||
|
console.error("Error loading local LMStudio models:", error);
|
||||||
|
setError(error instanceof Error ? error : new Error(String(error)));
|
||||||
|
return [];
|
||||||
|
} finally {
|
||||||
|
setLoading(false);
|
||||||
|
}
|
||||||
|
}, [ipcClient, setModels, setError, setLoading]);
|
||||||
|
|
||||||
|
return {
|
||||||
|
models,
|
||||||
|
loading,
|
||||||
|
error,
|
||||||
|
loadModels,
|
||||||
|
};
|
||||||
|
}
|
||||||
@@ -20,13 +20,13 @@ export function useLocalModels() {
|
|||||||
const loadModels = useCallback(async () => {
|
const loadModels = useCallback(async () => {
|
||||||
setLoading(true);
|
setLoading(true);
|
||||||
try {
|
try {
|
||||||
const modelList = await ipcClient.listLocalModels();
|
const modelList = await ipcClient.listLocalOllamaModels();
|
||||||
setModels(modelList);
|
setModels(modelList);
|
||||||
setError(null);
|
setError(null);
|
||||||
|
|
||||||
return modelList;
|
return modelList;
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
console.error("Error loading local models:", error);
|
console.error("Error loading local Ollama models:", error);
|
||||||
setError(error instanceof Error ? error : new Error(String(error)));
|
setError(error instanceof Error ? error : new Error(String(error)));
|
||||||
return [];
|
return [];
|
||||||
} finally {
|
} finally {
|
||||||
|
|||||||
@@ -1,80 +1,7 @@
|
|||||||
import { ipcMain } from "electron";
|
import { registerOllamaHandlers } from "./local_model_ollama_handler";
|
||||||
import log from "electron-log";
|
import { registerLMStudioHandlers } from "./local_model_lmstudio_handler";
|
||||||
import { LocalModelListResponse, LocalModel } from "../ipc_types";
|
|
||||||
|
|
||||||
const logger = log.scope("local_model_handlers");
|
|
||||||
const OLLAMA_API_URL = "http://localhost:11434";
|
|
||||||
|
|
||||||
interface OllamaModel {
|
|
||||||
name: string;
|
|
||||||
modified_at: string;
|
|
||||||
size: number;
|
|
||||||
digest: string;
|
|
||||||
details: {
|
|
||||||
format: string;
|
|
||||||
family: string;
|
|
||||||
families: string[];
|
|
||||||
parameter_size: string;
|
|
||||||
quantization_level: string;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
export function registerLocalModelHandlers() {
|
export function registerLocalModelHandlers() {
|
||||||
// Get list of models from Ollama
|
registerOllamaHandlers();
|
||||||
ipcMain.handle(
|
registerLMStudioHandlers();
|
||||||
"local-models:list",
|
|
||||||
async (): Promise<LocalModelListResponse> => {
|
|
||||||
try {
|
|
||||||
const response = await fetch(`${OLLAMA_API_URL}/api/tags`);
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
throw new Error(`Failed to fetch models: ${response.statusText}`);
|
|
||||||
}
|
|
||||||
|
|
||||||
const data = await response.json();
|
|
||||||
const ollamaModels: OllamaModel[] = data.models || [];
|
|
||||||
|
|
||||||
// Transform the data to return just what we need
|
|
||||||
const models: LocalModel[] = ollamaModels.map((model) => {
|
|
||||||
// Extract display name by cleaning up the model name
|
|
||||||
// For names like "llama2:latest" we want to show "Llama 2"
|
|
||||||
let displayName = model.name.split(":")[0]; // Remove tags like ":latest"
|
|
||||||
|
|
||||||
// Capitalize and add spaces for readability
|
|
||||||
displayName = displayName
|
|
||||||
.replace(/-/g, " ")
|
|
||||||
.replace(/(\d+)/, " $1 ") // Add spaces around numbers
|
|
||||||
.split(" ")
|
|
||||||
.map((word) => word.charAt(0).toUpperCase() + word.slice(1))
|
|
||||||
.join(" ")
|
|
||||||
.trim();
|
|
||||||
|
|
||||||
return {
|
|
||||||
modelName: model.name, // The actual model name used for API calls
|
|
||||||
displayName, // The user-friendly name
|
|
||||||
};
|
|
||||||
});
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
`Successfully fetched ${models.length} local models from Ollama`
|
|
||||||
);
|
|
||||||
return { models, error: null };
|
|
||||||
} catch (error) {
|
|
||||||
if (
|
|
||||||
error instanceof TypeError &&
|
|
||||||
(error as Error).message.includes("fetch failed")
|
|
||||||
) {
|
|
||||||
logger.error("Could not connect to Ollama. Is it running?");
|
|
||||||
return {
|
|
||||||
models: [],
|
|
||||||
error:
|
|
||||||
"Could not connect to Ollama. Make sure it's running at http://localhost:11434",
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.error("Error fetching local models:", error);
|
|
||||||
return { models: [], error: "Failed to fetch models from Ollama" };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|||||||
47
src/ipc/handlers/local_model_lmstudio_handler.ts
Normal file
47
src/ipc/handlers/local_model_lmstudio_handler.ts
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
import { ipcMain } from "electron";
|
||||||
|
import log from "electron-log";
|
||||||
|
import type { LocalModelListResponse, LocalModel } from "../ipc_types";
|
||||||
|
|
||||||
|
const logger = log.scope("lmstudio_handler");
|
||||||
|
|
||||||
|
export interface LMStudioModel {
|
||||||
|
type: "llm" | "embedding" | string;
|
||||||
|
id: string;
|
||||||
|
object: string;
|
||||||
|
publisher: string;
|
||||||
|
state: "loaded" | "not-loaded";
|
||||||
|
max_context_length: number;
|
||||||
|
quantization: string
|
||||||
|
compatibility_type: string
|
||||||
|
arch: string;
|
||||||
|
[key: string]: any;
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function fetchLMStudioModels(): Promise<LocalModelListResponse> {
|
||||||
|
try {
|
||||||
|
const modelsResponse: Response = await fetch("http://localhost:1234/api/v0/models");
|
||||||
|
if (!modelsResponse.ok) {
|
||||||
|
throw new Error("Failed to fetch models from LM Studio");
|
||||||
|
}
|
||||||
|
const modelsJson = await modelsResponse.json();
|
||||||
|
const downloadedModels = modelsJson.data as LMStudioModel[];
|
||||||
|
const models: LocalModel[] = downloadedModels
|
||||||
|
.filter((model: any) => model.type === "llm")
|
||||||
|
.map((model: any) => ({
|
||||||
|
modelName: model.id,
|
||||||
|
displayName: model.id,
|
||||||
|
provider: "lmstudio"
|
||||||
|
}));
|
||||||
|
|
||||||
|
logger.info(`Successfully fetched ${models.length} models from LM Studio`);
|
||||||
|
return { models, error: null };
|
||||||
|
} catch (error) {
|
||||||
|
return { models: [], error: "Failed to fetch models from LM Studio" };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function registerLMStudioHandlers() {
|
||||||
|
ipcMain.handle('local-models:list-lmstudio', async (): Promise<LocalModelListResponse> => {
|
||||||
|
return fetchLMStudioModels();
|
||||||
|
});
|
||||||
|
}
|
||||||
66
src/ipc/handlers/local_model_ollama_handler.ts
Normal file
66
src/ipc/handlers/local_model_ollama_handler.ts
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
import { ipcMain } from "electron";
|
||||||
|
import log from "electron-log";
|
||||||
|
import { LocalModelListResponse, LocalModel } from "../ipc_types";
|
||||||
|
|
||||||
|
const logger = log.scope("ollama_handler");
|
||||||
|
|
||||||
|
const OLLAMA_API_URL = "http://localhost:11434";
|
||||||
|
|
||||||
|
interface OllamaModel {
|
||||||
|
name: string;
|
||||||
|
modified_at: string;
|
||||||
|
size: number;
|
||||||
|
digest: string;
|
||||||
|
details: {
|
||||||
|
format: string;
|
||||||
|
family: string;
|
||||||
|
families: string[];
|
||||||
|
parameter_size: string;
|
||||||
|
quantization_level: string;
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function fetchOllamaModels(): Promise<LocalModelListResponse> {
|
||||||
|
try {
|
||||||
|
const response = await fetch(`${OLLAMA_API_URL}/api/tags`);
|
||||||
|
if (!response.ok) {
|
||||||
|
throw new Error(`Failed to fetch model: ${response.statusText}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const data = await response.json();
|
||||||
|
const ollamaModels: OllamaModel[] = data.models || [];
|
||||||
|
|
||||||
|
const models: LocalModel[] = ollamaModels.map((model: OllamaModel) => {
|
||||||
|
const displayName = model.name.split(':')[0]
|
||||||
|
.replace(/-/g, ' ')
|
||||||
|
.replace(/(\d+)/, ' $1 ')
|
||||||
|
.split(' ')
|
||||||
|
.map(word => word.charAt(0).toUpperCase() + word.slice(1))
|
||||||
|
.join(' ')
|
||||||
|
.trim();
|
||||||
|
|
||||||
|
return {
|
||||||
|
modelName: model.name,
|
||||||
|
displayName,
|
||||||
|
provider: "ollama",
|
||||||
|
};
|
||||||
|
});
|
||||||
|
logger.info(`Successfully fetched ${models.length} models from Ollama`);
|
||||||
|
return { models, error: null };
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof TypeError && (error as Error).message.includes('fetch failed')) {
|
||||||
|
logger.error("Could not connect to Ollama");
|
||||||
|
return {
|
||||||
|
models: [],
|
||||||
|
error: "Could not connect to Ollama. Make sure it's running at http://localhost:11434"
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return { models: [], error: "Failed to fetch models from Ollama" };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
export function registerOllamaHandlers() {
|
||||||
|
ipcMain.handle('local-models:list-ollama', async (): Promise<LocalModelListResponse> => {
|
||||||
|
return fetchOllamaModels();
|
||||||
|
});
|
||||||
|
}
|
||||||
@@ -785,14 +785,28 @@ export class IpcClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public async listLocalModels(): Promise<LocalModel[]> {
|
public async listLocalOllamaModels(): Promise<LocalModel[]> {
|
||||||
const { models, error } = (await this.ipcRenderer.invoke(
|
try {
|
||||||
"local-models:list"
|
const response = await this.ipcRenderer.invoke("local-models:list-ollama");
|
||||||
)) as LocalModelListResponse;
|
return response?.models || [];
|
||||||
if (error) {
|
} catch (error) {
|
||||||
throw new Error(error);
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`Failed to fetch Ollama models: ${error.message}`);
|
||||||
|
}
|
||||||
|
throw new Error('Failed to fetch Ollama models: Unknown error occurred');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public async listLocalLMStudioModels(): Promise<LocalModel[]> {
|
||||||
|
try {
|
||||||
|
const response = await this.ipcRenderer.invoke("local-models:list-lmstudio");
|
||||||
|
return response?.models || [];
|
||||||
|
} catch (error) {
|
||||||
|
if (error instanceof Error) {
|
||||||
|
throw new Error(`Failed to fetch LM Studio models: ${error.message}`);
|
||||||
|
}
|
||||||
|
throw new Error('Failed to fetch LM Studio models: Unknown error occurred');
|
||||||
}
|
}
|
||||||
return models;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Listen for deep link events
|
// Listen for deep link events
|
||||||
|
|||||||
@@ -94,6 +94,7 @@ export interface SystemDebugInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export interface LocalModel {
|
export interface LocalModel {
|
||||||
|
provider: "ollama" | "lmstudio";
|
||||||
modelName: string; // Name used for API calls (e.g., "llama2:latest")
|
modelName: string; // Name used for API calls (e.g., "llama2:latest")
|
||||||
displayName: string; // User-friendly name (e.g., "Llama 2")
|
displayName: string; // User-friendly name (e.g., "Llama 2")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ import { createGoogleGenerativeAI as createGoogle } from "@ai-sdk/google";
|
|||||||
import { createAnthropic } from "@ai-sdk/anthropic";
|
import { createAnthropic } from "@ai-sdk/anthropic";
|
||||||
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
import { createOpenRouter } from "@openrouter/ai-sdk-provider";
|
||||||
import { createOllama } from "ollama-ai-provider";
|
import { createOllama } from "ollama-ai-provider";
|
||||||
|
import { createOpenAICompatible } from "@ai-sdk/openai-compatible";
|
||||||
import type { LargeLanguageModel, UserSettings } from "../../lib/schemas";
|
import type { LargeLanguageModel, UserSettings } from "../../lib/schemas";
|
||||||
import {
|
import {
|
||||||
PROVIDER_TO_ENV_VAR,
|
PROVIDER_TO_ENV_VAR,
|
||||||
@@ -83,6 +83,12 @@ export function getModelClient(
|
|||||||
const provider = createOllama();
|
const provider = createOllama();
|
||||||
return provider(model.name);
|
return provider(model.name);
|
||||||
}
|
}
|
||||||
|
case "lmstudio": {
|
||||||
|
// Using LM Studio's OpenAI compatible API
|
||||||
|
const baseURL = "http://localhost:1234/v1"; // Default LM Studio OpenAI API URL
|
||||||
|
const provider = createOpenAICompatible({ name: "lmstudio", baseURL });
|
||||||
|
return provider(model.name);
|
||||||
|
}
|
||||||
default: {
|
default: {
|
||||||
// Ensure exhaustive check if more providers are added
|
// Ensure exhaustive check if more providers are added
|
||||||
const _exhaustiveCheck: never = model.provider;
|
const _exhaustiveCheck: never = model.provider;
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ export const ModelProviderSchema = z.enum([
|
|||||||
"auto",
|
"auto",
|
||||||
"openrouter",
|
"openrouter",
|
||||||
"ollama",
|
"ollama",
|
||||||
|
"lmstudio",
|
||||||
]);
|
]);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|||||||
@@ -49,7 +49,8 @@ const validInvokeChannels = [
|
|||||||
"supabase:list-projects",
|
"supabase:list-projects",
|
||||||
"supabase:set-app-project",
|
"supabase:set-app-project",
|
||||||
"supabase:unset-app-project",
|
"supabase:unset-app-project",
|
||||||
"local-models:list",
|
"local-models:list-ollama",
|
||||||
|
"local-models:list-lmstudio",
|
||||||
"window:minimize",
|
"window:minimize",
|
||||||
"window:maximize",
|
"window:maximize",
|
||||||
"window:close",
|
"window:close",
|
||||||
|
|||||||
Reference in New Issue
Block a user