Files
moreminimore-vibe/e2e-tests/engine.spec.ts
Will Chen 56d0e76790 Make balanced smart context option the default (#1186)
<!-- This is an auto-generated description by cubic. -->

## Summary by cubic
Set “balanced” as the default smart context mode. Users now get balanced
when Smart Files Context is enabled and no mode is set; “conservative”
must be explicitly selected.

- **Refactors**
- Default fallback to balanced in UI and engine (proSmartContextOption
undefined -> "balanced").
- ProModeSelector saves "conservative" explicitly; selector reads
undefined as balanced.
  - Updated schema and types to allow "balanced" | "conservative".
- Engine payload now includes smart_context_mode with "balanced" by
default; e2e tests and snapshots updated.

- **Migration**
- No action needed. Existing users without an explicit mode will use
balanced by default; selecting conservative persists.

<!-- End of auto-generated description by cubic. -->
2025-09-04 11:06:46 -07:00

78 lines
2.4 KiB
TypeScript

import { testSkipIfWindows } from "./helpers/test_helper";
testSkipIfWindows("send message to engine", async ({ po }) => {
await po.setUpDyadPro();
await po.selectModel({ provider: "Google", model: "Gemini 2.5 Pro" });
await po.sendPrompt("[dump] tc=turbo-edits");
await po.snapshotServerDump("request");
await po.snapshotMessages({ replaceDumpPath: true });
});
testSkipIfWindows(
"send message to engine - smart context conservative",
async ({ po }) => {
await po.setUpDyadPro();
const proModesDialog = await po.openProModesDialog({
location: "home-chat-input-container",
});
await proModesDialog.setSmartContextMode("conservative");
await proModesDialog.close();
await po.selectModel({ provider: "Google", model: "Gemini 2.5 Pro" });
await po.sendPrompt("[dump] tc=turbo-edits");
await po.snapshotServerDump("request");
await po.snapshotMessages({ replaceDumpPath: true });
},
);
testSkipIfWindows("send message to engine - openai gpt-5", async ({ po }) => {
await po.setUpDyadPro();
// By default, it's using auto which points to Flash 2.5 and doesn't
// use engine.
await po.selectModel({ provider: "OpenAI", model: "GPT 5" });
await po.sendPrompt("[dump] tc=turbo-edits");
await po.snapshotServerDump("request");
});
testSkipIfWindows(
"send message to engine - anthropic claude sonnet 4",
async ({ po }) => {
await po.setUpDyadPro();
// By default, it's using auto which points to Flash 2.5 and doesn't
// use engine.
await po.selectModel({ provider: "Anthropic", model: "Claude 4 Sonnet" });
await po.sendPrompt("[dump] tc=turbo-edits");
await po.snapshotServerDump("request");
},
);
testSkipIfWindows(
"smart auto should send message to engine",
async ({ po }) => {
await po.setUpDyadPro();
await po.sendPrompt("[dump] tc=turbo-edits");
await po.snapshotServerDump("request");
await po.snapshotMessages({ replaceDumpPath: true });
},
);
testSkipIfWindows(
"regular auto should send message to engine",
async ({ po }) => {
await po.setUpDyadPro();
const proModesDialog = await po.openProModesDialog({
location: "home-chat-input-container",
});
await proModesDialog.setSmartContextMode("off");
await proModesDialog.close();
await po.sendPrompt("[dump] tc=turbo-edits");
await po.snapshotServerDump("request");
await po.snapshotMessages({ replaceDumpPath: true });
},
);