<!-- This is an auto-generated description by cubic. -->
## Summary by cubic
Adds Smart Context v3 with selectable modes (Off, Conservative,
Balanced) and surfaces token savings in chat. Also improves token
estimation by counting per-file tokens when Smart Context is enabled.
- **New Features**
- Smart Context selector in Pro settings with three options.
Conservative is the default when enabled without an explicit choice.
- New setting: proSmartContextOption ("balanced"); undefined implies
Conservative.
- Engine now receives enable_smart_files_context and smart_context_mode.
- Chat shows a DyadTokenSavings card when the message contains
token-savings?original-tokens=...&smart-context-tokens=..., with percent
saved and a tooltip for exact tokens.
- Token estimation uses extracted file contents for accuracy when Pro +
Smart Context is on; otherwise falls back to formatted codebase output.
<!-- End of auto-generated description by cubic. -->
78 lines
2.4 KiB
TypeScript
78 lines
2.4 KiB
TypeScript
import { testSkipIfWindows } from "./helpers/test_helper";
|
|
|
|
testSkipIfWindows("send message to engine", async ({ po }) => {
|
|
await po.setUpDyadPro();
|
|
await po.selectModel({ provider: "Google", model: "Gemini 2.5 Pro" });
|
|
await po.sendPrompt("[dump] tc=turbo-edits");
|
|
|
|
await po.snapshotServerDump("request");
|
|
await po.snapshotMessages({ replaceDumpPath: true });
|
|
});
|
|
|
|
testSkipIfWindows(
|
|
"send message to engine - smart context balanced",
|
|
async ({ po }) => {
|
|
await po.setUpDyadPro();
|
|
const proModesDialog = await po.openProModesDialog({
|
|
location: "home-chat-input-container",
|
|
});
|
|
await proModesDialog.setSmartContextMode("balanced");
|
|
await proModesDialog.close();
|
|
await po.selectModel({ provider: "Google", model: "Gemini 2.5 Pro" });
|
|
await po.sendPrompt("[dump] tc=turbo-edits");
|
|
|
|
await po.snapshotServerDump("request");
|
|
await po.snapshotMessages({ replaceDumpPath: true });
|
|
},
|
|
);
|
|
|
|
testSkipIfWindows("send message to engine - openai gpt-4.1", async ({ po }) => {
|
|
await po.setUpDyadPro();
|
|
// By default, it's using auto which points to Flash 2.5 and doesn't
|
|
// use engine.
|
|
await po.selectModel({ provider: "OpenAI", model: "GPT 4.1" });
|
|
await po.sendPrompt("[dump] tc=turbo-edits");
|
|
|
|
await po.snapshotServerDump("request");
|
|
});
|
|
|
|
testSkipIfWindows(
|
|
"send message to engine - anthropic claude sonnet 4",
|
|
async ({ po }) => {
|
|
await po.setUpDyadPro();
|
|
// By default, it's using auto which points to Flash 2.5 and doesn't
|
|
// use engine.
|
|
await po.selectModel({ provider: "Anthropic", model: "Claude 4 Sonnet" });
|
|
await po.sendPrompt("[dump] tc=turbo-edits");
|
|
|
|
await po.snapshotServerDump("request");
|
|
},
|
|
);
|
|
|
|
testSkipIfWindows(
|
|
"smart auto should send message to engine",
|
|
async ({ po }) => {
|
|
await po.setUpDyadPro();
|
|
await po.sendPrompt("[dump] tc=turbo-edits");
|
|
|
|
await po.snapshotServerDump("request");
|
|
await po.snapshotMessages({ replaceDumpPath: true });
|
|
},
|
|
);
|
|
|
|
testSkipIfWindows(
|
|
"regular auto should send message to engine",
|
|
async ({ po }) => {
|
|
await po.setUpDyadPro();
|
|
const proModesDialog = await po.openProModesDialog({
|
|
location: "home-chat-input-container",
|
|
});
|
|
await proModesDialog.setSmartContextMode("off");
|
|
await proModesDialog.close();
|
|
await po.sendPrompt("[dump] tc=turbo-edits");
|
|
|
|
await po.snapshotServerDump("request");
|
|
await po.snapshotMessages({ replaceDumpPath: true });
|
|
},
|
|
);
|