This uses Gemini's native [thinking summaries](https://cloud.google.com/vertex-ai/generative-ai/docs/thinking#thought-summaries) which were recently added to the API. Why? The grafted thinking would sometimes cause weird issues where the model, especially Gemini 2.5 Flash, got confused and put dyad tags like `<dyad-write>` inside the `<think>` tags. This also improves the UX because you can see the native thoughts rather than having the Gemini response load for a while without any feedback. I tried adding Anthropic extended thinking, however it requires temp to be set at 1, which isn't ideal for Dyad's use case where we need precise syntax following.
30 lines
1002 B
TypeScript
30 lines
1002 B
TypeScript
import { testSkipIfWindows } from "./helpers/test_helper";
|
|
|
|
testSkipIfWindows("gemini 2.5 flash", async ({ po }) => {
|
|
// Note: we do not need to disable pro modes because 2.5 flash doesn't
|
|
// use engine.
|
|
await po.setUpDyadPro();
|
|
await po.selectModel({ provider: "Google", model: "Gemini 2.5 Flash" });
|
|
await po.sendPrompt("[dump] tc=gateway-simple");
|
|
|
|
await po.snapshotServerDump("request");
|
|
await po.snapshotMessages({ replaceDumpPath: true });
|
|
});
|
|
|
|
testSkipIfWindows("claude 4 sonnet", async ({ po }) => {
|
|
await po.setUpDyadPro();
|
|
// Disable the pro modes so it routes to gateway.
|
|
const proModesDialog = await po.openProModesDialog({
|
|
location: "home-chat-input-container",
|
|
});
|
|
await proModesDialog.toggleTurboEdits();
|
|
await proModesDialog.toggleSmartContext();
|
|
await proModesDialog.close();
|
|
|
|
await po.selectModel({ provider: "Anthropic", model: "Claude 4 Sonnet" });
|
|
|
|
await po.sendPrompt("[dump] tc=gateway-simple");
|
|
|
|
await po.snapshotServerDump("request");
|
|
});
|