diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index a3a9254..5259b9e 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -52,3 +52,9 @@ jobs:
name: playwright-report
path: playwright-report/
retention-days: 3
+ - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
+ if: failure()
+ with:
+ name: test-results
+ path: test-results/
+ retention-days: 3
diff --git a/e2e-tests/helpers/test_helper.ts b/e2e-tests/helpers/test_helper.ts
index 4fd0438..527e76e 100644
--- a/e2e-tests/helpers/test_helper.ts
+++ b/e2e-tests/helpers/test_helper.ts
@@ -80,6 +80,17 @@ class PageObject {
await this.page.getByText("Testollama", { exact: true }).click();
}
+ async selectTestLMStudioModel() {
+ await this.page.getByRole("button", { name: "Model: Auto" }).click();
+ await this.page.getByText("Local models").click();
+ await this.page.getByText("LM Studio", { exact: true }).click();
+ // Both of the elements that match "lmstudio-model-1" are the same button, so we just pick the first.
+ await this.page
+ .getByText("lmstudio-model-1", { exact: true })
+ .first()
+ .click();
+ }
+
async setUpTestProvider() {
await this.page.getByText("Add custom providerConnect to").click();
// Fill out provider dialog
@@ -209,6 +220,8 @@ export const test = base.extend<{
// parse the directory and find paths and other info
const appInfo = parseElectronApp(latestBuild);
process.env.OLLAMA_HOST = "http://localhost:3500/ollama";
+ process.env.LM_STUDIO_BASE_URL_FOR_TESTING =
+ "http://localhost:3500/lmstudio";
process.env.E2E_TEST_BUILD = "true";
// This is just a hack to avoid the AI setup screen.
process.env.OPENAI_API_KEY = "sk-test";
@@ -219,6 +232,9 @@ export const test = base.extend<{
`--user-data-dir=/tmp/dyad-e2e-tests-${Date.now()}`,
],
executablePath: appInfo.executable,
+ recordVideo: {
+ dir: "test-results",
+ },
});
console.log("electronApp launched!");
@@ -264,7 +280,15 @@ function prettifyDump(dumpContent: string) {
return parsedDump
.map((message) => {
- return `===\nrole: ${message.role}\nmessage: ${message.content}`;
+ const content = message.content
+ // We remove package.json because it's flaky.
+ // Depending on whether pnpm install is run, it will be modified,
+ // and the contents and timestamp (thus affecting order) will be affected.
+ .replace(
+ /\n[\s\S]*?<\/dyad-file>\n/g,
+ "",
+ );
+ return `===\nrole: ${message.role}\nmessage: ${content}`;
})
.join("\n\n");
}
diff --git a/e2e-tests/lm_studio.spec.ts b/e2e-tests/lm_studio.spec.ts
new file mode 100644
index 0000000..5700819
--- /dev/null
+++ b/e2e-tests/lm_studio.spec.ts
@@ -0,0 +1,7 @@
+import { test } from "./helpers/test_helper";
+
+test("send message to LM studio", async ({ po }) => {
+ await po.selectTestLMStudioModel();
+ await po.sendPrompt("hi");
+ await po.snapshotMessages();
+});
diff --git a/e2e-tests/snapshots/dump_messages.spec.ts_server-dump.txt b/e2e-tests/snapshots/dump_messages.spec.ts_server-dump.txt
index 79fb9d0..84d3f94 100644
--- a/e2e-tests/snapshots/dump_messages.spec.ts_server-dump.txt
+++ b/e2e-tests/snapshots/dump_messages.spec.ts_server-dump.txt
@@ -444,92 +444,6 @@ Available packages and libraries:
-
-{
- "name": "vite_react_shadcn_ts",
- "private": true,
- "version": "0.0.0",
- "type": "module",
- "scripts": {
- "dev": "vite",
- "build": "vite build",
- "build:dev": "vite build --mode development",
- "lint": "eslint .",
- "preview": "vite preview"
- },
- "dependencies": {
- "@hookform/resolvers": "^3.9.0",
- "@radix-ui/react-accordion": "^1.2.0",
- "@radix-ui/react-alert-dialog": "^1.1.1",
- "@radix-ui/react-aspect-ratio": "^1.1.0",
- "@radix-ui/react-avatar": "^1.1.0",
- "@radix-ui/react-checkbox": "^1.1.1",
- "@radix-ui/react-collapsible": "^1.1.0",
- "@radix-ui/react-context-menu": "^2.2.1",
- "@radix-ui/react-dialog": "^1.1.2",
- "@radix-ui/react-dropdown-menu": "^2.1.1",
- "@radix-ui/react-hover-card": "^1.1.1",
- "@radix-ui/react-label": "^2.1.0",
- "@radix-ui/react-menubar": "^1.1.1",
- "@radix-ui/react-navigation-menu": "^1.2.0",
- "@radix-ui/react-popover": "^1.1.1",
- "@radix-ui/react-progress": "^1.1.0",
- "@radix-ui/react-radio-group": "^1.2.0",
- "@radix-ui/react-scroll-area": "^1.1.0",
- "@radix-ui/react-select": "^2.1.1",
- "@radix-ui/react-separator": "^1.1.0",
- "@radix-ui/react-slider": "^1.2.0",
- "@radix-ui/react-slot": "^1.1.0",
- "@radix-ui/react-switch": "^1.1.0",
- "@radix-ui/react-tabs": "^1.1.0",
- "@radix-ui/react-toast": "^1.2.1",
- "@radix-ui/react-toggle": "^1.1.0",
- "@radix-ui/react-toggle-group": "^1.1.0",
- "@radix-ui/react-tooltip": "^1.1.4",
- "@tanstack/react-query": "^5.56.2",
- "class-variance-authority": "^0.7.1",
- "clsx": "^2.1.1",
- "cmdk": "^1.0.0",
- "date-fns": "^3.6.0",
- "embla-carousel-react": "^8.3.0",
- "input-otp": "^1.2.4",
- "lucide-react": "^0.462.0",
- "next-themes": "^0.3.0",
- "react": "^18.3.1",
- "react-day-picker": "^8.10.1",
- "react-dom": "^18.3.1",
- "react-hook-form": "^7.53.0",
- "react-resizable-panels": "^2.1.3",
- "react-router-dom": "^6.26.2",
- "recharts": "^2.12.7",
- "sonner": "^1.5.0",
- "tailwind-merge": "^2.5.2",
- "tailwindcss-animate": "^1.0.7",
- "vaul": "^0.9.3",
- "zod": "^3.23.8"
- },
- "devDependencies": {
- "@eslint/js": "^9.9.0",
- "@tailwindcss/typography": "^0.5.15",
- "@types/node": "^22.5.5",
- "@types/react": "^18.3.3",
- "@types/react-dom": "^18.3.0",
- "@vitejs/plugin-react-swc": "^3.9.0",
- "autoprefixer": "^10.4.20",
- "eslint": "^9.9.0",
- "eslint-plugin-react-hooks": "^5.1.0-rc.0",
- "eslint-plugin-react-refresh": "^0.4.9",
- "globals": "^15.9.0",
- "postcss": "^8.4.47",
- "tailwindcss": "^3.4.11",
- "typescript": "^5.5.3",
- "typescript-eslint": "^8.0.1",
- "vite": "^6.3.4"
- }
-}
-
-
-
export default {
plugins: {
diff --git a/e2e-tests/snapshots/lm_studio.spec.ts_send-message-to-LM-studio-1.aria.yml b/e2e-tests/snapshots/lm_studio.spec.ts_send-message-to-LM-studio-1.aria.yml
new file mode 100644
index 0000000..777e2f1
--- /dev/null
+++ b/e2e-tests/snapshots/lm_studio.spec.ts_send-message-to-LM-studio-1.aria.yml
@@ -0,0 +1,16 @@
+- paragraph: hi
+- 'button "Thinking `<dyad-write>`: I''ll think about the problem and write a bug report. <dyad-write> <dyad-write path=\"file1.txt\"> Fake dyad write </dyad-write>"':
+ - img
+ - img
+ - paragraph:
+ - code: "`<dyad-write>`"
+ - text: ": I'll think about the problem and write a bug report."
+ - paragraph: <dyad-write>
+ - paragraph: <dyad-write path="file1.txt"> Fake dyad write </dyad-write>
+- img
+- text: file1.txt
+- img
+- text: file1.txt
+- paragraph: More EOM
+- button "Retry":
+ - img
\ No newline at end of file
diff --git a/playwright.config.ts b/playwright.config.ts
index 7d650d6..7c14f89 100644
--- a/playwright.config.ts
+++ b/playwright.config.ts
@@ -3,7 +3,8 @@ import { PlaywrightTestConfig } from "@playwright/test";
const config: PlaywrightTestConfig = {
testDir: "./e2e-tests",
workers: 1,
- maxFailures: 1,
+ retries: process.env.CI ? 1 : 0,
+ // maxFailures: 1,
timeout: process.env.CI ? 60_000 : 15_000,
// Use a custom snapshot path template because Playwright's default
// is platform-specific which isn't necessary for Dyad e2e tests
diff --git a/src/ipc/handlers/local_model_lmstudio_handler.ts b/src/ipc/handlers/local_model_lmstudio_handler.ts
index 67b8dc8..6e10af7 100644
--- a/src/ipc/handlers/local_model_lmstudio_handler.ts
+++ b/src/ipc/handlers/local_model_lmstudio_handler.ts
@@ -1,6 +1,7 @@
import { ipcMain } from "electron";
import log from "electron-log";
import type { LocalModelListResponse, LocalModel } from "../ipc_types";
+import { LM_STUDIO_BASE_URL } from "../utils/lm_studio_utils";
const logger = log.scope("lmstudio_handler");
@@ -19,7 +20,7 @@ export interface LMStudioModel {
export async function fetchLMStudioModels(): Promise {
const modelsResponse: Response = await fetch(
- "http://localhost:1234/api/v0/models",
+ `${LM_STUDIO_BASE_URL}/api/v0/models`,
);
if (!modelsResponse.ok) {
throw new Error("Failed to fetch models from LM Studio");
diff --git a/src/ipc/utils/get_model_client.ts b/src/ipc/utils/get_model_client.ts
index 0293bc4..693e3a0 100644
--- a/src/ipc/utils/get_model_client.ts
+++ b/src/ipc/utils/get_model_client.ts
@@ -13,6 +13,7 @@ import { LanguageModelProvider } from "../ipc_types";
import { llmErrorStore } from "@/main/llm_error_store";
import { createDyadEngine } from "./llm_engine_provider";
import { findLanguageModel } from "./findLanguageModel";
+import { LM_STUDIO_BASE_URL } from "./lm_studio_utils";
const dyadLocalEngine = process.env.DYAD_LOCAL_ENGINE;
const dyadGatewayUrl = process.env.DYAD_GATEWAY_URL;
@@ -257,7 +258,7 @@ function getRegularModelClient(
}
case "lmstudio": {
// LM Studio uses OpenAI compatible API
- const baseURL = providerConfig.apiBaseUrl || "http://localhost:1234/v1";
+ const baseURL = providerConfig.apiBaseUrl || LM_STUDIO_BASE_URL + "/v1";
const provider = createOpenAICompatible({
name: "lmstudio",
baseURL,
diff --git a/src/ipc/utils/lm_studio_utils.ts b/src/ipc/utils/lm_studio_utils.ts
new file mode 100644
index 0000000..88aaaf4
--- /dev/null
+++ b/src/ipc/utils/lm_studio_utils.ts
@@ -0,0 +1,2 @@
+export const LM_STUDIO_BASE_URL =
+ process.env.LM_STUDIO_BASE_URL_FOR_TESTING || "http://localhost:1234";
diff --git a/testing/fake-llm-server/index.ts b/testing/fake-llm-server/index.ts
index b8668d7..75a8325 100644
--- a/testing/fake-llm-server/index.ts
+++ b/testing/fake-llm-server/index.ts
@@ -1,4 +1,4 @@
-import express from "express";
+import express, { Request, Response } from "express";
import { createServer } from "http";
import cors from "cors";
import fs from "fs";
@@ -95,7 +95,7 @@ app.get("/ollama/api/tags", (req, res) => {
let globalCounter = 0;
app.post("/ollama/chat", (req, res) => {
- // Tell the client we’re going to stream NDJSON
+ // Tell the client we're going to stream NDJSON
res.setHeader("Content-Type", "application/x-ndjson");
res.setHeader("Cache-Control", "no-cache");
@@ -139,8 +139,55 @@ app.post("/ollama/chat", (req, res) => {
}, 300); // 300 ms delay – tweak as you like
});
+// LM Studio specific endpoints
+app.get("/lmstudio/api/v0/models", (req, res) => {
+ const lmStudioModels = {
+ data: [
+ {
+ type: "llm",
+ id: "lmstudio-model-1",
+ object: "model",
+ publisher: "lmstudio",
+ state: "loaded",
+ max_context_length: 4096,
+ quantization: "Q4_0",
+ compatibility_type: "gguf",
+ arch: "llama",
+ },
+ {
+ type: "llm",
+ id: "lmstudio-model-2-chat",
+ object: "model",
+ publisher: "lmstudio",
+ state: "not-loaded",
+ max_context_length: 8192,
+ quantization: "Q5_K_M",
+ compatibility_type: "gguf",
+ arch: "mixtral",
+ },
+ {
+ type: "embedding", // Should be filtered out by client
+ id: "lmstudio-embedding-model",
+ object: "model",
+ publisher: "lmstudio",
+ state: "loaded",
+ max_context_length: 2048,
+ quantization: "F16",
+ compatibility_type: "gguf",
+ arch: "bert",
+ },
+ ],
+ };
+ console.log("* Sending fake LM Studio models");
+ res.json(lmStudioModels);
+});
+
+app.post("/lmstudio/v1/chat/completions", chatCompletionHandler);
+
// Handle POST requests to /v1/chat/completions
-app.post("/v1/chat/completions", (req, res) => {
+app.post("/v1/chat/completions", chatCompletionHandler);
+
+function chatCompletionHandler(req: Request, res: Response) {
const { stream = false, messages = [] } = req.body;
console.log("* Received messages", messages);
@@ -270,8 +317,7 @@ app.post("/v1/chat/completions", (req, res) => {
res.end();
}
}, 10);
-});
-
+}
// Start the server
const server = createServer(app);
server.listen(PORT, () => {