Emdash source with visual editor image upload fix
Fixes: 1. media.ts: wrap placeholder generation in try-catch 2. toolbar.ts: check r.ok, display error message in popover
This commit is contained in:
16
scripts/build-perf-d1.mjs
Normal file
16
scripts/build-perf-d1.mjs
Normal file
@@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env node
|
||||
import { spawnSync } from "node:child_process";
|
||||
import { writeFileSync } from "node:fs";
|
||||
import { dirname, resolve } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const fixtureDir = resolve(__dirname, "..", "fixtures/perf-site");
|
||||
|
||||
const r = spawnSync("pnpm", ["exec", "astro", "build"], {
|
||||
cwd: fixtureDir,
|
||||
stdio: "inherit",
|
||||
env: { ...process.env, EMDASH_FIXTURE_TARGET: "d1" },
|
||||
});
|
||||
if (r.status !== 0) process.exit(r.status ?? 1);
|
||||
writeFileSync(resolve(fixtureDir, "dist/.perf-target"), "d1\n");
|
||||
232
scripts/query-counts-dump.mjs
Normal file
232
scripts/query-counts-dump.mjs
Normal file
@@ -0,0 +1,232 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Sibling of scripts/query-counts.mjs that dumps raw query events to JSON
|
||||
* files under scripts/query-dumps/{target}/{routeSlug}.{phase}.json
|
||||
*
|
||||
* Each file is an array of { sql, params, durationMs, route, method, phase }.
|
||||
* The harness assumes the fixture is already built and seeded -- we only
|
||||
* spin servers, hit routes, and partition events. For sqlite, the main
|
||||
* `query-counts.mjs --target sqlite` flow builds and seeds; for d1, run
|
||||
* `query-counts.mjs --target d1` once first (or `build-perf-d1.mjs` to
|
||||
* build only) so wrangler state and dist/ exist.
|
||||
*
|
||||
* The dump JSON itself is gitignored — it's an analysis artifact that
|
||||
* regenerates from the harness in seconds. The helper scripts in
|
||||
* `query-dumps/` (classify.mjs, cold-only.mjs, inspect-other.mjs) are
|
||||
* the things worth keeping in source.
|
||||
*/
|
||||
|
||||
import { spawn } from "node:child_process";
|
||||
import { existsSync, mkdirSync, writeFileSync } from "node:fs";
|
||||
import { createConnection } from "node:net";
|
||||
import { dirname, resolve } from "node:path";
|
||||
import { createInterface } from "node:readline";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const repoRoot = resolve(__dirname, "..");
|
||||
const fixtureDir = resolve(repoRoot, "fixtures/perf-site");
|
||||
const dumpsDir = resolve(__dirname, "query-dumps");
|
||||
|
||||
const HOST = "127.0.0.1";
|
||||
const PORT = 14322;
|
||||
const BASE = `http://${HOST}:${PORT}`;
|
||||
const QUERY_LOG_PREFIX = "[emdash-query-log] ";
|
||||
|
||||
const ROUTES = [
|
||||
["GET", "/"],
|
||||
["GET", "/posts"],
|
||||
["GET", "/posts/building-for-the-long-term"],
|
||||
["GET", "/pages/about"],
|
||||
["GET", "/category/development"],
|
||||
["GET", "/tag/webdev"],
|
||||
["GET", "/rss.xml"],
|
||||
["GET", "/search?q=static"],
|
||||
];
|
||||
|
||||
function parseArgs(argv) {
|
||||
const out = { target: "sqlite", routesOnly: null };
|
||||
for (let i = 0; i < argv.length; i++) {
|
||||
const a = argv[i];
|
||||
if (a === "--target") out.target = argv[++i];
|
||||
else if (a.startsWith("--target=")) out.target = a.slice("--target=".length);
|
||||
else if (a === "--routes") out.routesOnly = argv[++i].split(",");
|
||||
}
|
||||
if (out.target !== "sqlite" && out.target !== "d1") {
|
||||
throw new Error(`bad --target ${out.target}`);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
const { target, routesOnly } = parseArgs(process.argv.slice(2));
|
||||
|
||||
function waitForPort(host, port, timeoutMs = 120_000) {
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
return new Promise((resolveReady, rejectReady) => {
|
||||
const attempt = () => {
|
||||
if (Date.now() > deadline) {
|
||||
rejectReady(new Error(`port ${host}:${port} did not open within ${timeoutMs}ms`));
|
||||
return;
|
||||
}
|
||||
const socket = createConnection({ host, port });
|
||||
socket.once("connect", () => {
|
||||
socket.destroy();
|
||||
resolveReady();
|
||||
});
|
||||
socket.once("error", () => {
|
||||
socket.destroy();
|
||||
setTimeout(attempt, 100);
|
||||
});
|
||||
};
|
||||
attempt();
|
||||
});
|
||||
}
|
||||
|
||||
function startServer(events) {
|
||||
let cmd, args;
|
||||
if (target === "sqlite") {
|
||||
cmd = "node";
|
||||
args = ["./dist/server/entry.mjs"];
|
||||
} else {
|
||||
cmd = "pnpm";
|
||||
args = ["exec", "astro", "preview", "--host", HOST, "--port", String(PORT)];
|
||||
}
|
||||
|
||||
const child = spawn(cmd, args, {
|
||||
cwd: fixtureDir,
|
||||
env: {
|
||||
...process.env,
|
||||
EMDASH_FIXTURE_TARGET: target,
|
||||
EMDASH_QUERY_LOG: "1",
|
||||
HOST,
|
||||
PORT: String(PORT),
|
||||
},
|
||||
stdio: ["ignore", "pipe", "inherit"],
|
||||
});
|
||||
|
||||
const ready = waitForPort(HOST, PORT);
|
||||
const rl = createInterface({ input: child.stdout });
|
||||
rl.on("line", (line) => {
|
||||
const idx = line.indexOf(QUERY_LOG_PREFIX);
|
||||
if (idx !== -1) {
|
||||
const payload = line.slice(idx + QUERY_LOG_PREFIX.length);
|
||||
try {
|
||||
events.push(JSON.parse(payload));
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
return;
|
||||
}
|
||||
process.stdout.write(line + "\n");
|
||||
});
|
||||
|
||||
const exited = new Promise((res) => child.once("exit", res));
|
||||
|
||||
async function stop() {
|
||||
child.kill("SIGTERM");
|
||||
await Promise.race([
|
||||
exited,
|
||||
new Promise((r) => setTimeout(r, 5_000)).then(() => child.kill("SIGKILL")),
|
||||
]);
|
||||
await new Promise((r) => setTimeout(r, 250));
|
||||
}
|
||||
|
||||
return { ready, stop };
|
||||
}
|
||||
|
||||
async function hit(method, path, phase) {
|
||||
let lastErr;
|
||||
for (let i = 0; i < 10; i++) {
|
||||
try {
|
||||
const r = await fetch(`${BASE}${path}`, {
|
||||
method,
|
||||
headers: { "x-perf-phase": phase },
|
||||
redirect: "manual",
|
||||
});
|
||||
await r.arrayBuffer();
|
||||
process.stdout.write(` ${phase.padEnd(5)} ${method} ${path} -> ${r.status}\n`);
|
||||
return r.status;
|
||||
} catch (err) {
|
||||
lastErr = err;
|
||||
await new Promise((r) => setTimeout(r, 200));
|
||||
}
|
||||
}
|
||||
throw lastErr;
|
||||
}
|
||||
|
||||
async function warmup() {
|
||||
const r = await fetch(BASE, { redirect: "manual" });
|
||||
await r.arrayBuffer();
|
||||
process.stdout.write(` warmup GET / -> ${r.status}\n`);
|
||||
}
|
||||
|
||||
const ROUTE_LEADING_SLASH = /^\//;
|
||||
const ROUTE_NON_ALNUM = /[^a-zA-Z0-9]+/g;
|
||||
|
||||
function routeSlug(path) {
|
||||
if (path === "/") return "root";
|
||||
return path.replace(ROUTE_LEADING_SLASH, "").replace(ROUTE_NON_ALNUM, "_");
|
||||
}
|
||||
|
||||
function dumpEventsByRoute(events, dumpTarget) {
|
||||
const targetDir = resolve(dumpsDir, dumpTarget);
|
||||
if (!existsSync(targetDir)) mkdirSync(targetDir, { recursive: true });
|
||||
|
||||
const groups = new Map();
|
||||
for (const e of events) {
|
||||
if (e.phase !== "cold" && e.phase !== "warm") continue;
|
||||
const key = `${routeSlug(e.route)}.${e.phase}`;
|
||||
if (!groups.has(key)) groups.set(key, []);
|
||||
groups.get(key).push(e);
|
||||
}
|
||||
for (const [key, list] of groups) {
|
||||
const file = resolve(targetDir, `${key}.json`);
|
||||
writeFileSync(file, JSON.stringify(list, null, "\t") + "\n");
|
||||
process.stdout.write(`wrote ${file} (${list.length})\n`);
|
||||
}
|
||||
const allFile = resolve(targetDir, "_all.json");
|
||||
writeFileSync(allFile, JSON.stringify(events, null, "\t") + "\n");
|
||||
process.stdout.write(`wrote ${allFile} (${events.length})\n`);
|
||||
}
|
||||
|
||||
async function runSqlite(events) {
|
||||
const server = startServer(events);
|
||||
try {
|
||||
await server.ready;
|
||||
await warmup();
|
||||
const routes = routesOnly ? ROUTES.filter(([_, p]) => routesOnly.includes(p)) : ROUTES;
|
||||
for (const [m, p] of routes) await hit(m, p, "cold");
|
||||
for (const [m, p] of routes) await hit(m, p, "warm");
|
||||
} finally {
|
||||
await server.stop();
|
||||
}
|
||||
}
|
||||
|
||||
async function runD1(events) {
|
||||
const routes = routesOnly ? ROUTES.filter(([_, p]) => routesOnly.includes(p)) : ROUTES;
|
||||
for (const [m, p] of routes) {
|
||||
process.stdout.write(`--- fresh isolate for ${m} ${p} ---\n`);
|
||||
const server = startServer(events);
|
||||
try {
|
||||
await server.ready;
|
||||
await hit(m, p, "cold");
|
||||
await hit(m, p, "warm");
|
||||
} finally {
|
||||
await server.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const events = [];
|
||||
if (target === "sqlite") await runSqlite(events);
|
||||
else await runD1(events);
|
||||
dumpEventsByRoute(events, target);
|
||||
}
|
||||
|
||||
main()
|
||||
.then(() => process.exit(0))
|
||||
.catch((err) => {
|
||||
process.stderr.write(`${err.stack ?? err.message ?? err}\n`);
|
||||
process.exit(1);
|
||||
});
|
||||
461
scripts/query-counts.mjs
Normal file
461
scripts/query-counts.mjs
Normal file
@@ -0,0 +1,461 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Query-count harness for the runtime perf fixture.
|
||||
*
|
||||
* Builds fixtures/perf-site with `astro build`, then serves it via the
|
||||
* production adapter entry (node or wrangler, never `astro dev`) so the
|
||||
* measured code paths match what real visitors hit. For each fixture
|
||||
* route we record cold and warm phase queries — the Kysely log hook
|
||||
* emits `[emdash-query-log]`-prefixed NDJSON on stdout, which the harness
|
||||
* captures.
|
||||
*
|
||||
* Two targets, two server strategies:
|
||||
* --target sqlite Node adapter standalone entry. One long-lived
|
||||
* process. First request warms the runtime (migrations
|
||||
* + auto-seed on first boot). Cold/warm is per-route
|
||||
* first-vs-second hit.
|
||||
*
|
||||
* --target d1 Cloudflare adapter via `astro preview` (wrangler dev
|
||||
* against the built worker). Because real D1 visitors
|
||||
* often land on a fresh isolate, we measure that:
|
||||
* seed once in a dedicated boot, stop; then spin a
|
||||
* fresh preview per route for one cold + one warm
|
||||
* hit, stop, next route.
|
||||
*
|
||||
* Seeding (per target):
|
||||
* sqlite: `emdash init && emdash seed` via the CLI — writes directly to
|
||||
* data.db, no HTTP layer involved.
|
||||
* d1: astro dev + POST /_emdash/api/setup/dev-bypass. The dev-bypass
|
||||
* endpoint is dead-code-eliminated from prod builds, so it's
|
||||
* only reachable via dev mode. Local D1 state persists in
|
||||
* .wrangler/state across dev → preview.
|
||||
*
|
||||
* Usage:
|
||||
* node scripts/query-counts.mjs # sqlite, compare
|
||||
* node scripts/query-counts.mjs --target d1 # d1, compare
|
||||
* node scripts/query-counts.mjs --update # rewrite snapshot
|
||||
* node scripts/query-counts.mjs --target d1 --update
|
||||
* node scripts/query-counts.mjs --skip-seed # reuse existing db
|
||||
* node scripts/query-counts.mjs --skip-build # reuse existing build
|
||||
*
|
||||
* --skip-seed and --skip-build compose. Passing both gives the fastest
|
||||
* local iteration loop once the fixture is set up.
|
||||
*
|
||||
* Prerequisite: `pnpm build` has run (the emdash CLI lives in dist/).
|
||||
*/
|
||||
|
||||
import { spawn, spawnSync } from "node:child_process";
|
||||
import { existsSync, readFileSync, rmSync, writeFileSync } from "node:fs";
|
||||
import { createConnection } from "node:net";
|
||||
import { dirname, resolve } from "node:path";
|
||||
import { createInterface } from "node:readline";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const repoRoot = resolve(__dirname, "..");
|
||||
const fixtureDir = resolve(repoRoot, "fixtures/perf-site");
|
||||
|
||||
const HOST = "127.0.0.1";
|
||||
const PORT = 14321;
|
||||
const BASE = `http://${HOST}:${PORT}`;
|
||||
|
||||
const ROUTES = [
|
||||
["GET", "/"],
|
||||
["GET", "/posts"],
|
||||
["GET", "/posts/building-for-the-long-term"],
|
||||
["GET", "/pages/about"],
|
||||
["GET", "/category/development"],
|
||||
["GET", "/tag/webdev"],
|
||||
["GET", "/rss.xml"],
|
||||
["GET", "/search?q=static"],
|
||||
];
|
||||
|
||||
const TRACKED_PHASES = new Set(["cold", "warm"]);
|
||||
const VALID_TARGETS = new Set(["sqlite", "d1"]);
|
||||
const QUERY_LOG_PREFIX = "[emdash-query-log] ";
|
||||
|
||||
/**
|
||||
* Resolve once a TCP connection to (host, port) succeeds, or reject on
|
||||
* timeout. Uses a raw TCP connect rather than an HTTP request so we
|
||||
* don't warm a fresh workerd isolate — workerd initialises the isolate
|
||||
* on the first HTTP request, not on TCP accept. This keeps the
|
||||
* per-route "cold" measurement genuinely cold on the D1 path.
|
||||
*/
|
||||
function waitForPort(host, port, timeoutMs = 120_000) {
|
||||
const deadline = Date.now() + timeoutMs;
|
||||
return new Promise((resolveReady, rejectReady) => {
|
||||
const attempt = () => {
|
||||
if (Date.now() > deadline) {
|
||||
rejectReady(new Error(`port ${host}:${port} did not open within ${timeoutMs}ms`));
|
||||
return;
|
||||
}
|
||||
const socket = createConnection({ host, port });
|
||||
socket.once("connect", () => {
|
||||
socket.destroy();
|
||||
resolveReady();
|
||||
});
|
||||
socket.once("error", () => {
|
||||
socket.destroy();
|
||||
setTimeout(attempt, 100);
|
||||
});
|
||||
};
|
||||
attempt();
|
||||
});
|
||||
}
|
||||
|
||||
function parseArgs(argv) {
|
||||
const out = { target: "sqlite", update: false, skipBuild: false, skipSeed: false };
|
||||
for (let i = 0; i < argv.length; i++) {
|
||||
const a = argv[i];
|
||||
if (a === "--update") out.update = true;
|
||||
else if (a === "--skip-build") out.skipBuild = true;
|
||||
else if (a === "--skip-seed") out.skipSeed = true;
|
||||
else if (a === "--target") {
|
||||
out.target = argv[++i];
|
||||
} else if (a.startsWith("--target=")) {
|
||||
out.target = a.slice("--target=".length);
|
||||
} else {
|
||||
throw new Error(`Unknown argument: ${a}`);
|
||||
}
|
||||
}
|
||||
if (!VALID_TARGETS.has(out.target)) {
|
||||
throw new Error(`--target must be one of: ${[...VALID_TARGETS].join(", ")}`);
|
||||
}
|
||||
return out;
|
||||
}
|
||||
|
||||
const { target, update, skipBuild, skipSeed } = parseArgs(process.argv.slice(2));
|
||||
const snapshotPath = resolve(__dirname, `query-counts.snapshot.${target}.json`);
|
||||
|
||||
function resetSqliteState() {
|
||||
for (const f of ["data.db", "data.db-wal", "data.db-shm"]) {
|
||||
rmSync(resolve(fixtureDir, f), { force: true });
|
||||
}
|
||||
rmSync(resolve(fixtureDir, "uploads"), { recursive: true, force: true });
|
||||
}
|
||||
|
||||
function resetD1State() {
|
||||
rmSync(resolve(fixtureDir, ".wrangler"), { recursive: true, force: true });
|
||||
}
|
||||
|
||||
const buildMarkerPath = resolve(fixtureDir, "dist/.perf-target");
|
||||
|
||||
function buildFixture() {
|
||||
process.stdout.write(`$ (cd ${fixtureDir}) astro build\n`);
|
||||
const r = spawnSync("pnpm", ["exec", "astro", "build"], {
|
||||
cwd: fixtureDir,
|
||||
stdio: "inherit",
|
||||
env: { ...process.env, EMDASH_FIXTURE_TARGET: target },
|
||||
});
|
||||
if (r.status !== 0) throw new Error("astro build failed");
|
||||
writeFileSync(buildMarkerPath, target + "\n");
|
||||
}
|
||||
|
||||
function assertExistingBuildMatchesTarget() {
|
||||
if (!existsSync(buildMarkerPath)) {
|
||||
throw new Error(
|
||||
`--skip-build was passed but dist/.perf-target is missing. Run without --skip-build to produce a build for target "${target}".`,
|
||||
);
|
||||
}
|
||||
const built = readFileSync(buildMarkerPath, "utf8").trim();
|
||||
if (built !== target) {
|
||||
throw new Error(
|
||||
`--skip-build was passed but existing build is for target "${built}", not "${target}". Drop --skip-build (or rebuild) to switch targets.`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// SQLite: seed the file DB via the emdash CLI directly — it runs
|
||||
// migrations, applies the virtual-module seed, and sets
|
||||
// `emdash:setup_complete`, all without going through the HTTP layer.
|
||||
//
|
||||
// We invoke the CLI entry by absolute path rather than via `pnpm exec
|
||||
// emdash` so the harness works in CI, where pnpm's bin-linking step
|
||||
// isn't run (see scripts/relink-bins-if-needed.mjs — it early-exits
|
||||
// under CI, expecting the CI job to handle bin links, which this job
|
||||
// intentionally does not).
|
||||
const emdashCliPath = resolve(repoRoot, "packages/core/dist/cli/index.mjs");
|
||||
|
||||
function seedSqliteCli() {
|
||||
for (const step of ["init", "seed"]) {
|
||||
process.stdout.write(`$ (cd ${fixtureDir}) node <emdash-cli> ${step}\n`);
|
||||
const r = spawnSync("node", [emdashCliPath, step], {
|
||||
cwd: fixtureDir,
|
||||
stdio: "inherit",
|
||||
env: { ...process.env, EMDASH_FIXTURE_TARGET: "sqlite" },
|
||||
});
|
||||
if (r.status !== 0) throw new Error(`emdash ${step} failed`);
|
||||
}
|
||||
}
|
||||
|
||||
// D1: the CLI can't reach D1 over the Workers protocol, so we seed by
|
||||
// running astro dev once (dev-bypass is gated on import.meta.env.DEV
|
||||
// and is stripped from prod builds) and hitting the dev-bypass endpoint.
|
||||
// Local D1 state persists in .wrangler/state across dev → preview.
|
||||
async function seedD1ViaDevBypass(events) {
|
||||
process.stdout.write(`--- seeding via astro dev + dev-bypass ---\n`);
|
||||
const child = spawn("pnpm", ["exec", "astro", "dev", "--host", HOST, "--port", String(PORT)], {
|
||||
cwd: fixtureDir,
|
||||
env: {
|
||||
...process.env,
|
||||
EMDASH_FIXTURE_TARGET: "d1",
|
||||
EMDASH_QUERY_LOG: "1",
|
||||
},
|
||||
stdio: ["ignore", "pipe", "inherit"],
|
||||
});
|
||||
|
||||
const rl = createInterface({ input: child.stdout });
|
||||
rl.on("line", (line) => {
|
||||
const idx = line.indexOf(QUERY_LOG_PREFIX);
|
||||
if (idx !== -1) {
|
||||
const payload = line.slice(idx + QUERY_LOG_PREFIX.length);
|
||||
try {
|
||||
events.push(JSON.parse(payload));
|
||||
} catch {
|
||||
// ignore
|
||||
}
|
||||
return;
|
||||
}
|
||||
process.stdout.write(line + "\n");
|
||||
});
|
||||
const exited = new Promise((res) => child.once("exit", res));
|
||||
|
||||
try {
|
||||
await waitForPort(HOST, PORT);
|
||||
const r = await fetch(`${BASE}/_emdash/api/setup/dev-bypass`, {
|
||||
method: "POST",
|
||||
redirect: "manual",
|
||||
});
|
||||
if (!r.ok) {
|
||||
const body = await r.text();
|
||||
throw new Error(`dev-bypass failed: ${r.status} ${body.slice(0, 200)}`);
|
||||
}
|
||||
await r.arrayBuffer();
|
||||
process.stdout.write(` seed via dev-bypass -> ${r.status}\n`);
|
||||
} finally {
|
||||
child.kill("SIGTERM");
|
||||
await Promise.race([
|
||||
exited,
|
||||
new Promise((r) => setTimeout(r, 5_000)).then(() => child.kill("SIGKILL")),
|
||||
]);
|
||||
await new Promise((r) => setTimeout(r, 250));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Spawn the prod server for the current target. Returns { ready, stop }.
|
||||
* sqlite: node ./dist/server/entry.mjs (HOST/PORT env)
|
||||
* d1: astro preview (cloudflare adapter → wrangler dev)
|
||||
* `ready` resolves on a successful TCP connection — no HTTP probing,
|
||||
* so a fresh workerd isolate stays cold until our first tagged request.
|
||||
*/
|
||||
function startServer({ collectedEvents }) {
|
||||
let cmd;
|
||||
let args;
|
||||
if (target === "sqlite") {
|
||||
cmd = "node";
|
||||
args = ["./dist/server/entry.mjs"];
|
||||
} else {
|
||||
cmd = "pnpm";
|
||||
args = ["exec", "astro", "preview", "--host", HOST, "--port", String(PORT)];
|
||||
}
|
||||
|
||||
const child = spawn(cmd, args, {
|
||||
cwd: fixtureDir,
|
||||
env: {
|
||||
...process.env,
|
||||
EMDASH_FIXTURE_TARGET: target,
|
||||
EMDASH_QUERY_LOG: "1",
|
||||
HOST,
|
||||
PORT: String(PORT),
|
||||
},
|
||||
stdio: ["ignore", "pipe", "inherit"],
|
||||
});
|
||||
|
||||
const ready = waitForPort(HOST, PORT);
|
||||
|
||||
const rl = createInterface({ input: child.stdout });
|
||||
rl.on("line", (line) => {
|
||||
const idx = line.indexOf(QUERY_LOG_PREFIX);
|
||||
if (idx !== -1) {
|
||||
const before = line.slice(0, idx);
|
||||
if (before.trim().length > 0) process.stdout.write(before + "\n");
|
||||
const payload = line.slice(idx + QUERY_LOG_PREFIX.length);
|
||||
try {
|
||||
collectedEvents.push(JSON.parse(payload));
|
||||
} catch {
|
||||
process.stderr.write(`bad query-log line: ${payload}\n`);
|
||||
}
|
||||
return;
|
||||
}
|
||||
process.stdout.write(line + "\n");
|
||||
});
|
||||
|
||||
const exited = new Promise((res) => child.once("exit", res));
|
||||
child.once("error", (err) => {
|
||||
process.stderr.write(`server spawn error: ${err.message}\n`);
|
||||
});
|
||||
|
||||
async function stop() {
|
||||
child.kill("SIGTERM");
|
||||
await Promise.race([
|
||||
exited,
|
||||
new Promise((r) => setTimeout(r, 5_000)).then(() => child.kill("SIGKILL")),
|
||||
]);
|
||||
// Small pause for the OS to release the port before the next spawn.
|
||||
await new Promise((r) => setTimeout(r, 250));
|
||||
}
|
||||
|
||||
return { ready, stop };
|
||||
}
|
||||
|
||||
async function hit(method, path, phase) {
|
||||
// Tiny retry for the very first hit against a just-spawned wrangler
|
||||
// preview — "ready" fires before the HTTP listener actually accepts
|
||||
// on some runs. We're not measuring these retry attempts (they're
|
||||
// in the "default" phase), just papering over a race.
|
||||
let lastErr;
|
||||
for (let i = 0; i < 10; i++) {
|
||||
try {
|
||||
const r = await fetch(`${BASE}${path}`, {
|
||||
method,
|
||||
headers: { "x-perf-phase": phase },
|
||||
redirect: "manual",
|
||||
});
|
||||
await r.arrayBuffer();
|
||||
process.stdout.write(` ${phase.padEnd(5)} ${method} ${path} -> ${r.status}\n`);
|
||||
return r.status;
|
||||
} catch (err) {
|
||||
lastErr = err;
|
||||
await new Promise((r) => setTimeout(r, 200));
|
||||
}
|
||||
}
|
||||
throw lastErr;
|
||||
}
|
||||
|
||||
// An untagged hit that triggers runtime init (migrations + auto-seed on
|
||||
// first boot). Events here land in "default" phase and are filtered out.
|
||||
async function warmup() {
|
||||
const r = await fetch(BASE, { redirect: "manual" });
|
||||
await r.arrayBuffer();
|
||||
process.stdout.write(` warmup GET / -> ${r.status}\n`);
|
||||
}
|
||||
|
||||
function aggregate(events) {
|
||||
const counts = {};
|
||||
for (const e of events) {
|
||||
if (!TRACKED_PHASES.has(e.phase)) continue;
|
||||
const key = `${e.method} ${e.route} (${e.phase})`;
|
||||
counts[key] = (counts[key] ?? 0) + 1;
|
||||
}
|
||||
return Object.fromEntries(Object.entries(counts).toSorted(([a], [b]) => a.localeCompare(b)));
|
||||
}
|
||||
|
||||
function diffSnapshot(actual) {
|
||||
if (!existsSync(snapshotPath)) {
|
||||
process.stderr.write(`No snapshot at ${snapshotPath}. Run with --update to create one.\n`);
|
||||
return 1;
|
||||
}
|
||||
const expected = JSON.parse(readFileSync(snapshotPath, "utf8"));
|
||||
const keys = [...new Set([...Object.keys(expected), ...Object.keys(actual)])].toSorted();
|
||||
const diffs = [];
|
||||
for (const k of keys) {
|
||||
if (expected[k] !== actual[k]) {
|
||||
diffs.push({ key: k, expected: expected[k], actual: actual[k] });
|
||||
}
|
||||
}
|
||||
if (diffs.length === 0) {
|
||||
process.stdout.write(`OK: query counts match ${snapshotPath}\n`);
|
||||
return 0;
|
||||
}
|
||||
process.stderr.write(`Query counts differ from ${snapshotPath}:\n`);
|
||||
for (const d of diffs) {
|
||||
const e = d.expected ?? "(missing)";
|
||||
const a = d.actual ?? "(missing)";
|
||||
process.stderr.write(` ${d.key}: expected=${e} actual=${a}\n`);
|
||||
}
|
||||
process.stderr.write(
|
||||
`\nIf the change is intentional, run: node scripts/query-counts.mjs --target ${target} --update\n`,
|
||||
);
|
||||
return 1;
|
||||
}
|
||||
|
||||
// SQLite: seed the file DB via CLI, build, then run one long-lived node
|
||||
// entry. Warmup hit absorbs runtime init queries (filtered as "default"
|
||||
// phase). Tagged cold = first visit to route (runtime warm); warm = second.
|
||||
async function runSqlite(events) {
|
||||
if (!skipSeed) {
|
||||
resetSqliteState();
|
||||
seedSqliteCli();
|
||||
}
|
||||
if (skipBuild) assertExistingBuildMatchesTarget();
|
||||
else buildFixture();
|
||||
const server = startServer({ collectedEvents: events });
|
||||
try {
|
||||
await server.ready;
|
||||
await warmup();
|
||||
for (const [m, p] of ROUTES) await hit(m, p, "cold");
|
||||
for (const [m, p] of ROUTES) await hit(m, p, "warm");
|
||||
} finally {
|
||||
await server.stop();
|
||||
}
|
||||
}
|
||||
|
||||
// D1: seed via dev-bypass (dev mode only — dev-bypass is stripped from
|
||||
// prod builds), then build the worker, then for each route spin up a
|
||||
// fresh `astro preview` (cloudflare adapter runs wrangler dev). The
|
||||
// first tagged hit lands on a genuinely cold workerd isolate; the
|
||||
// second hit shares that isolate.
|
||||
//
|
||||
// Seed must precede build: `astro dev` leaves `.wrangler/deploy/`
|
||||
// without the build-time `config.json` that `astro preview` requires,
|
||||
// so building afterwards is what makes the subsequent previews work.
|
||||
async function runD1(events) {
|
||||
if (!skipSeed) {
|
||||
resetD1State();
|
||||
// seeding uses its own event sink; we don't want to commingle
|
||||
// those with the measurement events (they're all "default" phase
|
||||
// anyway, but keeping them separate is tidier).
|
||||
await seedD1ViaDevBypass([]);
|
||||
}
|
||||
if (skipBuild) assertExistingBuildMatchesTarget();
|
||||
else buildFixture();
|
||||
|
||||
for (const [m, p] of ROUTES) {
|
||||
process.stdout.write(`--- fresh isolate for ${m} ${p} ---\n`);
|
||||
const server = startServer({ collectedEvents: events });
|
||||
try {
|
||||
await server.ready;
|
||||
await hit(m, p, "cold");
|
||||
await hit(m, p, "warm");
|
||||
} finally {
|
||||
await server.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function main() {
|
||||
const events = [];
|
||||
if (target === "sqlite") await runSqlite(events);
|
||||
else await runD1(events);
|
||||
|
||||
const counts = aggregate(events);
|
||||
if (update) {
|
||||
// Use tab indent so the output matches oxfmt's default and
|
||||
// doesn't thrash under `pnpm format`. Space-indented output
|
||||
// would be reformatted to tabs by the formatter, producing
|
||||
// a false-positive "drift" signal in CI (the raw harness
|
||||
// output wouldn't match the committed file).
|
||||
writeFileSync(snapshotPath, JSON.stringify(counts, null, "\t") + "\n");
|
||||
process.stdout.write(`Wrote ${Object.keys(counts).length} entries to ${snapshotPath}\n`);
|
||||
return 0;
|
||||
}
|
||||
return diffSnapshot(counts);
|
||||
}
|
||||
|
||||
main()
|
||||
.then((code) => process.exit(code ?? 0))
|
||||
.catch((err) => {
|
||||
process.stderr.write(`${err.stack ?? err.message ?? err}\n`);
|
||||
process.exit(1);
|
||||
});
|
||||
18
scripts/query-counts.snapshot.d1.json
Normal file
18
scripts/query-counts.snapshot.d1.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"GET / (cold)": 21,
|
||||
"GET / (warm)": 11,
|
||||
"GET /category/development (cold)": 25,
|
||||
"GET /category/development (warm)": 14,
|
||||
"GET /pages/about (cold)": 20,
|
||||
"GET /pages/about (warm)": 10,
|
||||
"GET /posts (cold)": 21,
|
||||
"GET /posts (warm)": 11,
|
||||
"GET /posts/building-for-the-long-term (cold)": 32,
|
||||
"GET /posts/building-for-the-long-term (warm)": 22,
|
||||
"GET /rss.xml (cold)": 13,
|
||||
"GET /rss.xml (warm)": 4,
|
||||
"GET /search (cold)": 22,
|
||||
"GET /search (warm)": 12,
|
||||
"GET /tag/webdev (cold)": 25,
|
||||
"GET /tag/webdev (warm)": 14
|
||||
}
|
||||
18
scripts/query-counts.snapshot.sqlite.json
Normal file
18
scripts/query-counts.snapshot.sqlite.json
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"GET / (cold)": 11,
|
||||
"GET / (warm)": 11,
|
||||
"GET /category/development (cold)": 15,
|
||||
"GET /category/development (warm)": 14,
|
||||
"GET /pages/about (cold)": 10,
|
||||
"GET /pages/about (warm)": 10,
|
||||
"GET /posts (cold)": 11,
|
||||
"GET /posts (warm)": 11,
|
||||
"GET /posts/building-for-the-long-term (cold)": 23,
|
||||
"GET /posts/building-for-the-long-term (warm)": 23,
|
||||
"GET /rss.xml (cold)": 4,
|
||||
"GET /rss.xml (warm)": 4,
|
||||
"GET /search (cold)": 12,
|
||||
"GET /search (warm)": 12,
|
||||
"GET /tag/webdev (cold)": 14,
|
||||
"GET /tag/webdev (warm)": 14
|
||||
}
|
||||
21
scripts/query-dumps/README.md
Normal file
21
scripts/query-dumps/README.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Query dumps for the perf fixture
|
||||
|
||||
Tooling for slicing the per-route × phase query dumps captured by `scripts/query-counts-dump.mjs`. Useful when investigating where queries are coming from on a specific route — the catalogue this produced drove the perf reductions in PRs #838, #839, #840.
|
||||
|
||||
## Layout
|
||||
|
||||
- `sqlite/`, `d1/` — generated dump JSON, one file per route × phase. Gitignored. Regenerate with `scripts/query-counts-dump.mjs --target {sqlite|d1}`.
|
||||
- `classification.{sqlite,d1}.md` — generated reports from `classify.mjs`. Gitignored — point-in-time snapshots that go stale on every code change.
|
||||
- `classify.mjs <target>` — produces the classification table from the dumps.
|
||||
- `cold-only.mjs` — diffs cold vs warm in the d1 dumps to surface the cold-isolate startup tax.
|
||||
- `inspect-other.mjs <target> <class>` — prints distinct SQL for a class.
|
||||
|
||||
Each dump `*.json` is an array of `{ sql, params, durationMs, route, method, phase }`. `_all.json` is the un-grouped feed.
|
||||
|
||||
## Workflow
|
||||
|
||||
```
|
||||
node scripts/query-counts.mjs --target sqlite # build + seed + run main harness
|
||||
node scripts/query-counts-dump.mjs --target sqlite # capture per-query dumps
|
||||
node scripts/query-dumps/classify.mjs sqlite # write classification.sqlite.md
|
||||
```
|
||||
173
scripts/query-dumps/classify.mjs
Normal file
173
scripts/query-dumps/classify.mjs
Normal file
@@ -0,0 +1,173 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Analyse the per-route query dumps and classify each query.
|
||||
*/
|
||||
import { readFileSync, readdirSync, writeFileSync } from "node:fs";
|
||||
import { dirname, resolve } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
|
||||
function classify(sql, params) {
|
||||
const s = sql.replace(/\s+/g, " ").trim();
|
||||
// Migrations / system
|
||||
if (/pragma_table_info/i.test(s)) return "pragma_table_info";
|
||||
if (/sqlite_master/i.test(s)) return "sqlite_master";
|
||||
if (/PRAGMA/i.test(s)) return "pragma";
|
||||
if (/from "kysely_migration"/i.test(s)) return "migrations_check";
|
||||
if (/from "_emdash_migrations_lock"/i.test(s)) return "migrations_lock";
|
||||
if (/from "_emdash_migrations"/i.test(s)) return "migrations_check";
|
||||
if (/from "_emdash_collections"/i.test(s)) return "schema_collections";
|
||||
if (/from "_emdash_fields"/i.test(s)) return "schema_fields";
|
||||
if (/from "_emdash_setup_state"/i.test(s)) return "setup_check";
|
||||
if (/from "_plugin_state"/i.test(s)) return "plugin_state";
|
||||
if (/from "_emdash_cron_tasks"/i.test(s) || /_emdash_cron_tasks SET/i.test(s))
|
||||
return "cron_recovery";
|
||||
if (/_emdash_404_log/i.test(s)) return "404_log_migration";
|
||||
if (/alter table/i.test(s)) return "ddl_alter";
|
||||
if (/create table/i.test(s)) return "ddl_create";
|
||||
if (/create.*index/i.test(s)) return "ddl_index";
|
||||
if (/drop index/i.test(s)) return "ddl_drop_index";
|
||||
if (/insert into "_emdash_migrations"/i.test(s)) return "migrations_record";
|
||||
if (/delete from "options"/i.test(s)) return "options_delete";
|
||||
if (/SELECT name FROM sqlite_master/i.test(s)) return "fts_table_check";
|
||||
// Auth
|
||||
if (/from "_emdash_sessions"/i.test(s)) return "auth_session";
|
||||
if (/from "_emdash_users"/i.test(s)) return "auth_user_lookup";
|
||||
if (/from "_emdash_passkeys"/i.test(s)) return "auth_passkey";
|
||||
// Settings/options
|
||||
if (/from "options"/i.test(s) && /LIKE/i.test(s)) {
|
||||
const p0 = params?.[0];
|
||||
if (typeof p0 === "string") return `options_prefix:${p0}`;
|
||||
return "options_prefix";
|
||||
}
|
||||
if (/from "options"/i.test(s) && /"name" in/i.test(s)) {
|
||||
return "options_in";
|
||||
}
|
||||
if (/from "options"/i.test(s) && /"name" = \?/i.test(s)) {
|
||||
const p0 = params?.[0];
|
||||
if (typeof p0 === "string") return `option:${p0}`;
|
||||
return "option:single";
|
||||
}
|
||||
if (/from "options"/i.test(s)) return "option:other";
|
||||
// Menus / widgets
|
||||
if (/from "_emdash_menus"/i.test(s)) return "menu_lookup";
|
||||
if (/from "_emdash_menu_items"/i.test(s)) return "menu_items";
|
||||
if (/from "_emdash_widget_areas"/i.test(s)) {
|
||||
const p0 = params?.[0];
|
||||
return `widget_area:${p0 ?? ""}`;
|
||||
}
|
||||
if (/from "_emdash_widgets"/i.test(s)) return "widget";
|
||||
// Bylines
|
||||
if (/from "_emdash_content_bylines"/i.test(s)) return "byline_hydration";
|
||||
if (/from "_emdash_bylines"/i.test(s)) return "byline_lookup";
|
||||
// Taxonomies
|
||||
if (/from "_emdash_taxonomy_defs"/i.test(s)) return "taxonomy_defs";
|
||||
if (/from "content_taxonomies"/i.test(s) && /count\(/i.test(s)) return "taxonomy_counts";
|
||||
if (/from "content_taxonomies"/i.test(s)) return "taxonomy_for_entries";
|
||||
if (/from "taxonomies"/i.test(s)) return "taxonomy_terms";
|
||||
// Author lookup (id, author_id)
|
||||
if (/SELECT id, author_id FROM "ec_/i.test(s)) return "author_id_lookup";
|
||||
// Content tables
|
||||
const ecMatch = s.match(/from "ec_([a-z_]+)"/i) || s.match(/FROM "ec_([a-z_]+)"/);
|
||||
if (ecMatch) {
|
||||
const coll = ecMatch[1];
|
||||
// detail vs list
|
||||
if (/slug = \?/i.test(s) && /id = \?/i.test(s)) return `entry_by_slug:${coll}`;
|
||||
if (/where "id" = \?/i.test(s)) return `entry_by_id:${coll}`;
|
||||
if (/LIMIT \?/i.test(s)) return `collection_list:${coll}`;
|
||||
if (/ORDER BY/i.test(s)) return `collection_list:${coll}`;
|
||||
return `collection_other:${coll}`;
|
||||
}
|
||||
// Media
|
||||
if (/from "_emdash_media"/i.test(s)) return "media_lookup";
|
||||
// Plugins / pages dispatch
|
||||
if (/from "_emdash_plugin/i.test(s)) return "plugin_lookup";
|
||||
// SEO redirects
|
||||
if (/from "_emdash_redirects"/i.test(s)) return "redirects";
|
||||
// Comments
|
||||
if (/from "_emdash_comments"/i.test(s)) return "comments";
|
||||
// Default
|
||||
return "other";
|
||||
}
|
||||
|
||||
const targetArg = process.argv[2] || "sqlite";
|
||||
const dir = resolve(__dirname, targetArg);
|
||||
const files = readdirSync(dir).filter((f) => f.endsWith(".json") && f !== "_all.json");
|
||||
|
||||
// per-route classification table
|
||||
const tables = {}; // { routePhase: { className: count } }
|
||||
const allByClass = {}; // { className: count }
|
||||
const totalDuration = {}; // { className: ms }
|
||||
|
||||
const routeOrder = [
|
||||
"root",
|
||||
"posts",
|
||||
"posts_building_for_the_long_term",
|
||||
"pages_about",
|
||||
"category_development",
|
||||
"tag_webdev",
|
||||
"rss_xml",
|
||||
"search",
|
||||
];
|
||||
|
||||
const phases = ["cold", "warm"];
|
||||
|
||||
for (const f of files) {
|
||||
const path = resolve(dir, f);
|
||||
const events = JSON.parse(readFileSync(path, "utf8"));
|
||||
const key = f.replace(/\.json$/, "");
|
||||
tables[key] = {};
|
||||
for (const e of events) {
|
||||
const cls = classify(e.sql, e.params);
|
||||
tables[key][cls] = (tables[key][cls] || 0) + 1;
|
||||
allByClass[cls] = (allByClass[cls] || 0) + 1;
|
||||
totalDuration[cls] = (totalDuration[cls] || 0) + (e.durationMs || 0);
|
||||
}
|
||||
}
|
||||
|
||||
// print table: rows = classes, cols = route.phase
|
||||
const headerRoutes = [];
|
||||
for (const r of routeOrder) for (const p of phases) headerRoutes.push(`${r}.${p}`);
|
||||
|
||||
const allClasses = Object.keys(allByClass).toSorted(
|
||||
(a, b) => (allByClass[b] || 0) - (allByClass[a] || 0),
|
||||
);
|
||||
|
||||
let out = `# Query classification (${targetArg})\n\n`;
|
||||
out += `Total events: ${Object.values(allByClass).reduce((a, b) => a + b, 0)}\n\n`;
|
||||
|
||||
out += `## Top classes by total count\n\n`;
|
||||
out += `| class | count | total_ms |\n|---|---:|---:|\n`;
|
||||
for (const c of allClasses.slice(0, 20)) {
|
||||
out += `| ${c} | ${allByClass[c]} | ${totalDuration[c].toFixed(2)} |\n`;
|
||||
}
|
||||
out += "\n";
|
||||
|
||||
out += `## Per-route × phase classification\n\n`;
|
||||
out += `| class |`;
|
||||
for (const h of headerRoutes) out += ` ${h} |`;
|
||||
out += ` total |\n|---|`;
|
||||
for (const _ of headerRoutes) out += "---:|";
|
||||
out += "---:|\n";
|
||||
for (const c of allClasses) {
|
||||
out += `| ${c} |`;
|
||||
let total = 0;
|
||||
for (const h of headerRoutes) {
|
||||
const v = tables[h]?.[c] || 0;
|
||||
total += v;
|
||||
out += ` ${v || ""} |`;
|
||||
}
|
||||
out += ` ${total} |\n`;
|
||||
}
|
||||
out += "\n";
|
||||
|
||||
out += `## Per-route totals\n\n`;
|
||||
out += `| route.phase | count |\n|---|---:|\n`;
|
||||
for (const h of headerRoutes) {
|
||||
const total = Object.values(tables[h] || {}).reduce((a, b) => a + b, 0);
|
||||
out += `| ${h} | ${total} |\n`;
|
||||
}
|
||||
|
||||
writeFileSync(resolve(__dirname, `classification.${targetArg}.md`), out);
|
||||
process.stdout.write(out);
|
||||
55
scripts/query-dumps/cold-only.mjs
Normal file
55
scripts/query-dumps/cold-only.mjs
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Identify queries that fire on cold but not warm in the d1 target —
|
||||
* the cold-isolate startup tax.
|
||||
*/
|
||||
import { readFileSync, readdirSync } from "node:fs";
|
||||
import { dirname, resolve } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const dir = resolve(__dirname, "d1");
|
||||
|
||||
function normalize(sql) {
|
||||
return sql.replace(/\s+/g, " ").trim();
|
||||
}
|
||||
|
||||
const routeOrder = [
|
||||
"root",
|
||||
"posts",
|
||||
"posts_building_for_the_long_term",
|
||||
"pages_about",
|
||||
"category_development",
|
||||
"tag_webdev",
|
||||
"rss_xml",
|
||||
"search",
|
||||
];
|
||||
|
||||
const coldOnlyAcc = new Map(); // sql -> count
|
||||
|
||||
for (const r of routeOrder) {
|
||||
const coldFile = resolve(dir, `${r}.cold.json`);
|
||||
const warmFile = resolve(dir, `${r}.warm.json`);
|
||||
const cold = JSON.parse(readFileSync(coldFile, "utf8"));
|
||||
const warm = JSON.parse(readFileSync(warmFile, "utf8"));
|
||||
const warmSet = new Map();
|
||||
for (const e of warm) warmSet.set(normalize(e.sql), (warmSet.get(normalize(e.sql)) || 0) + 1);
|
||||
for (const e of cold) {
|
||||
const n = normalize(e.sql);
|
||||
const wcount = warmSet.get(n) || 0;
|
||||
if (wcount > 0) {
|
||||
warmSet.set(n, wcount - 1);
|
||||
} else {
|
||||
coldOnlyAcc.set(n, (coldOnlyAcc.get(n) || 0) + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const sorted = [...coldOnlyAcc.entries()].sort((a, b) => b[1] - a[1]);
|
||||
process.stdout.write(
|
||||
`# Cold-only queries (d1)\n\nQueries that appear in cold-phase dumps but not in matching warm-phase dumps. Aggregated across all routes.\n\n`,
|
||||
);
|
||||
process.stdout.write(`| count | sql |\n|---:|---|\n`);
|
||||
for (const [sql, count] of sorted) {
|
||||
process.stdout.write(`| ${count} | ${sql.slice(0, 200)} |\n`);
|
||||
}
|
||||
73
scripts/query-dumps/inspect-other.mjs
Normal file
73
scripts/query-dumps/inspect-other.mjs
Normal file
@@ -0,0 +1,73 @@
|
||||
#!/usr/bin/env node
|
||||
import { readFileSync, readdirSync } from "node:fs";
|
||||
import { dirname, resolve } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
|
||||
function classify(sql, params) {
|
||||
const s = sql.replace(/\s+/g, " ").trim();
|
||||
if (/PRAGMA/i.test(s)) return "pragma";
|
||||
if (/from "kysely_migration"/i.test(s)) return "migrations_check";
|
||||
if (/from "_emdash_collections"/i.test(s)) return "schema_collections";
|
||||
if (/from "_emdash_fields"/i.test(s)) return "schema_fields";
|
||||
if (/from "_emdash_sessions"/i.test(s)) return "auth_session";
|
||||
if (/from "_emdash_users"/i.test(s)) return "auth_user_lookup";
|
||||
if (/from "_emdash_passkeys"/i.test(s)) return "auth_passkey";
|
||||
if (/from "options"/i.test(s) && /name = \?/i.test(s)) {
|
||||
const p0 = params?.[0];
|
||||
if (typeof p0 === "string") return `option:${p0}`;
|
||||
return "option:single";
|
||||
}
|
||||
if (/from "options"/i.test(s) && /LIKE/i.test(s)) {
|
||||
const p0 = params?.[0];
|
||||
if (typeof p0 === "string") return `options_prefix:${p0}`;
|
||||
return "options_prefix";
|
||||
}
|
||||
if (/from "_emdash_menus"/i.test(s)) return "menu_lookup";
|
||||
if (/from "_emdash_menu_items"/i.test(s)) return "menu_items";
|
||||
if (/from "_emdash_widget_areas"/i.test(s)) {
|
||||
const p0 = params?.[0];
|
||||
return `widget_area:${p0 ?? ""}`;
|
||||
}
|
||||
if (/from "_emdash_widgets"/i.test(s)) return "widget";
|
||||
if (/from "_emdash_content_bylines"/i.test(s)) return "byline_hydration";
|
||||
if (/from "_emdash_bylines"/i.test(s)) return "byline_lookup";
|
||||
if (/from "_emdash_taxonomy_defs"/i.test(s)) return "taxonomy_defs";
|
||||
if (/from "content_taxonomies"/i.test(s) && /count\(/i.test(s)) return "taxonomy_counts";
|
||||
if (/from "content_taxonomies"/i.test(s)) return "taxonomy_for_entries";
|
||||
if (/from "taxonomies"/i.test(s)) return "taxonomy_terms";
|
||||
if (/SELECT id, author_id FROM "ec_/i.test(s)) return "author_id_lookup";
|
||||
const ecMatch = s.match(/from "ec_([a-z_]+)"/i) || s.match(/FROM "ec_([a-z_]+)"/);
|
||||
if (ecMatch) {
|
||||
const coll = ecMatch[1];
|
||||
if (/slug = \?/i.test(s) && /id = \?/i.test(s)) return `entry_by_slug:${coll}`;
|
||||
if (/where "id" = \?/i.test(s)) return `entry_by_id:${coll}`;
|
||||
if (/LIMIT \?/i.test(s)) return `collection_list:${coll}`;
|
||||
if (/ORDER BY/i.test(s)) return `collection_list:${coll}`;
|
||||
return `collection_other:${coll}`;
|
||||
}
|
||||
if (/from "_emdash_media"/i.test(s)) return "media_lookup";
|
||||
if (/from "_emdash_plugin/i.test(s)) return "plugin_lookup";
|
||||
if (/from "_emdash_redirects"/i.test(s)) return "redirects";
|
||||
if (/from "_emdash_comments"/i.test(s)) return "comments";
|
||||
return "other";
|
||||
}
|
||||
|
||||
const targetArg = process.argv[2] || "sqlite";
|
||||
const wanted = process.argv[3] || "other";
|
||||
const dir = resolve(__dirname, targetArg);
|
||||
const files = readdirSync(dir).filter((f) => f.endsWith(".json") && f !== "_all.json");
|
||||
const seen = new Set();
|
||||
for (const f of files) {
|
||||
const events = JSON.parse(readFileSync(resolve(dir, f), "utf8"));
|
||||
for (const e of events) {
|
||||
const cls = classify(e.sql, e.params);
|
||||
if (cls !== wanted) continue;
|
||||
const norm = e.sql.replace(/\s+/g, " ").trim();
|
||||
if (!seen.has(norm)) {
|
||||
seen.add(norm);
|
||||
process.stdout.write(`--- ${f}\n${norm}\nparams: ${JSON.stringify(e.params)}\n\n`);
|
||||
}
|
||||
}
|
||||
}
|
||||
24
scripts/relink-bins-if-needed.mjs
Normal file
24
scripts/relink-bins-if-needed.mjs
Normal file
@@ -0,0 +1,24 @@
|
||||
#!/usr/bin/env node
|
||||
// Relink workspace bin symlinks after build, but only when needed.
|
||||
// pnpm only creates bin links when the target file exists at install time.
|
||||
// Since the CLI lives in dist/, it doesn't exist until after the first build.
|
||||
|
||||
import { execSync } from "node:child_process";
|
||||
import { existsSync } from "node:fs";
|
||||
|
||||
// Skip in CI — bins are handled by the CI setup step
|
||||
if (process.env.CI) process.exit(0);
|
||||
|
||||
const CLI_SRC = "packages/core/dist/cli/index.mjs";
|
||||
|
||||
// If the built CLI doesn't exist, the build itself failed — nothing to relink
|
||||
if (!existsSync(CLI_SRC)) process.exit(0);
|
||||
|
||||
const binDir = execSync("pnpm bin", { encoding: "utf-8" }).trim();
|
||||
const cliBin = `${binDir}/emdash`;
|
||||
|
||||
// If the bin symlink is missing or broken, relink
|
||||
if (!existsSync(cliBin)) {
|
||||
console.log("CLI bin missing — relinking...");
|
||||
execSync("pnpm install --frozen-lockfile", { stdio: "inherit" });
|
||||
}
|
||||
313
scripts/screenshot-all-templates.mjs
Executable file
313
scripts/screenshot-all-templates.mjs
Executable file
@@ -0,0 +1,313 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Screenshot all templates by starting each dev server, capturing screenshots, and stopping.
|
||||
*
|
||||
* Usage:
|
||||
* node scripts/screenshot-all-templates.mjs [template...]
|
||||
* node scripts/screenshot-all-templates.mjs # all templates
|
||||
* node scripts/screenshot-all-templates.mjs blog # just blog
|
||||
* node scripts/screenshot-all-templates.mjs blog marketing # blog and marketing
|
||||
*/
|
||||
|
||||
import { spawn, execSync } from "node:child_process";
|
||||
import { readFileSync } from "node:fs";
|
||||
import { join, dirname } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const ROOT = join(__dirname, "..");
|
||||
|
||||
const TEMPLATES = {
|
||||
blog: { dir: "templates/blog", port: 4321 },
|
||||
marketing: { dir: "templates/marketing", port: 4322 },
|
||||
portfolio: { dir: "templates/portfolio", port: 4323 },
|
||||
};
|
||||
|
||||
function loadConfig() {
|
||||
const configPath = join(ROOT, "templates", "screenshots.json");
|
||||
return JSON.parse(readFileSync(configPath, "utf-8"));
|
||||
}
|
||||
|
||||
/** Check if server is responding via HTTP */
|
||||
async function isServerReady(port) {
|
||||
try {
|
||||
const controller = new AbortController();
|
||||
const timeout = setTimeout(() => controller.abort(), 2000);
|
||||
const response = await fetch(`http://localhost:${port}/`, {
|
||||
signal: controller.signal,
|
||||
});
|
||||
clearTimeout(timeout);
|
||||
return response.ok || response.status === 404; // 404 is fine, server is up
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/** Wait for server to respond, with timeout */
|
||||
async function waitForServer(port, timeoutMs = 60000) {
|
||||
const start = Date.now();
|
||||
while (Date.now() - start < timeoutMs) {
|
||||
if (await isServerReady(port)) {
|
||||
return true;
|
||||
}
|
||||
await new Promise((r) => setTimeout(r, 500));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Check if port has processes (via lsof) */
|
||||
function hasProcessOnPort(port) {
|
||||
try {
|
||||
const result = execSync(`lsof -ti tcp:${port} 2>/dev/null || true`, { encoding: "utf-8" });
|
||||
return result.trim().length > 0;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/** Wait for port to be free */
|
||||
async function waitForPortFree(port, timeoutMs = 10000) {
|
||||
const start = Date.now();
|
||||
while (Date.now() - start < timeoutMs) {
|
||||
if (!hasProcessOnPort(port)) {
|
||||
return true;
|
||||
}
|
||||
await new Promise((r) => setTimeout(r, 200));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/** Kill all processes listening on a port (macOS/Linux) */
|
||||
function killProcessesOnPort(port) {
|
||||
try {
|
||||
// Get PIDs listening on this port
|
||||
const result = execSync(`lsof -ti tcp:${port} 2>/dev/null || true`, { encoding: "utf-8" });
|
||||
const pids = result
|
||||
.trim()
|
||||
.split("\n")
|
||||
.filter((p) => p);
|
||||
|
||||
for (const pid of pids) {
|
||||
try {
|
||||
process.kill(Number(pid), "SIGTERM");
|
||||
} catch {
|
||||
// Process may already be dead
|
||||
}
|
||||
}
|
||||
|
||||
// If still running after 2s, force kill
|
||||
if (pids.length > 0) {
|
||||
setTimeout(() => {
|
||||
for (const pid of pids) {
|
||||
try {
|
||||
process.kill(Number(pid), "SIGKILL");
|
||||
} catch {
|
||||
// Process may already be dead
|
||||
}
|
||||
}
|
||||
}, 2000);
|
||||
}
|
||||
} catch {
|
||||
// lsof may not be available or no processes found
|
||||
}
|
||||
}
|
||||
|
||||
function startDevServer(templateDir, port) {
|
||||
return new Promise((resolve, reject) => {
|
||||
// Run astro dev directly in the template directory
|
||||
const proc = spawn("pnpm", ["exec", "astro", "dev", "--port", String(port)], {
|
||||
cwd: join(ROOT, templateDir),
|
||||
stdio: ["ignore", "pipe", "pipe"],
|
||||
detached: false,
|
||||
});
|
||||
|
||||
let output = "";
|
||||
|
||||
const onData = (data) => {
|
||||
output += data.toString();
|
||||
process.stdout.write(data); // Show output for debugging
|
||||
};
|
||||
|
||||
proc.stdout.on("data", onData);
|
||||
proc.stderr.on("data", onData);
|
||||
|
||||
proc.on("error", (err) => {
|
||||
reject(err);
|
||||
});
|
||||
|
||||
proc.on("exit", (code) => {
|
||||
// If process exits before we resolve, that's an error
|
||||
reject(new Error(`Dev server exited with code ${code}`));
|
||||
});
|
||||
|
||||
// Wait for server to respond via HTTP
|
||||
waitForServer(port, 60000)
|
||||
.then((ready) => {
|
||||
if (ready) {
|
||||
// Remove exit handler since we're resolving successfully
|
||||
proc.removeAllListeners("exit");
|
||||
// Re-add a silent exit handler
|
||||
proc.on("exit", () => {});
|
||||
resolve({ proc, port });
|
||||
} else {
|
||||
proc.kill();
|
||||
reject(new Error(`Timeout waiting for server on port ${port}`));
|
||||
}
|
||||
return undefined;
|
||||
})
|
||||
.catch(reject);
|
||||
});
|
||||
}
|
||||
|
||||
function runScreenshots(template, url) {
|
||||
return new Promise((resolve, reject) => {
|
||||
const proc = spawn("node", [join(ROOT, "scripts", "screenshot-templates.mjs"), template, url], {
|
||||
cwd: ROOT,
|
||||
stdio: "inherit",
|
||||
});
|
||||
|
||||
proc.on("error", reject);
|
||||
proc.on("exit", (code) => {
|
||||
if (code === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`Screenshot script exited with code ${code}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function stopDevServer({ proc, port }) {
|
||||
// Kill the process tree
|
||||
try {
|
||||
proc.kill("SIGTERM");
|
||||
} catch {
|
||||
// May already be dead
|
||||
}
|
||||
|
||||
// Also kill anything on the port (catches child processes)
|
||||
killProcessesOnPort(port);
|
||||
|
||||
// Wait for port to actually be free
|
||||
const closed = await waitForPortFree(port, 10000);
|
||||
if (!closed) {
|
||||
console.warn(`Warning: Port ${port} still in use after stopping server`);
|
||||
// Force kill anything still on the port
|
||||
killProcessesOnPort(port);
|
||||
await waitForPortFree(port, 5000);
|
||||
}
|
||||
}
|
||||
|
||||
/** Run bootstrap (reset db and seed) for a template */
|
||||
async function bootstrapTemplate(templateDir) {
|
||||
return new Promise((resolve, reject) => {
|
||||
console.log(`Bootstrapping ${templateDir}...`);
|
||||
const proc = spawn("pnpm", ["bootstrap"], {
|
||||
cwd: join(ROOT, templateDir),
|
||||
stdio: "inherit",
|
||||
});
|
||||
|
||||
proc.on("error", reject);
|
||||
proc.on("exit", (code) => {
|
||||
if (code === 0) {
|
||||
resolve();
|
||||
} else {
|
||||
reject(new Error(`Bootstrap exited with code ${code}`));
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
async function processTemplate(template) {
|
||||
const config = TEMPLATES[template];
|
||||
if (!config) {
|
||||
console.error(`Unknown template: ${template}`);
|
||||
console.error(`Available: ${Object.keys(TEMPLATES).join(", ")}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
const screenshotsConfig = loadConfig();
|
||||
if (!screenshotsConfig[template]) {
|
||||
console.error(`No screenshot config for template: ${template}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Make sure port is free before starting
|
||||
if (hasProcessOnPort(config.port)) {
|
||||
console.log(`Port ${config.port} is in use, killing existing processes...`);
|
||||
killProcessesOnPort(config.port);
|
||||
await waitForPortFree(config.port, 5000);
|
||||
}
|
||||
|
||||
console.log(`\n${"=".repeat(60)}`);
|
||||
console.log(`${template} (${config.dir})`);
|
||||
console.log("=".repeat(60));
|
||||
|
||||
let server;
|
||||
try {
|
||||
// Bootstrap first to ensure database has seed content
|
||||
await bootstrapTemplate(config.dir);
|
||||
|
||||
console.log(`Starting dev server...`);
|
||||
server = await startDevServer(config.dir, config.port);
|
||||
console.log(`Dev server ready at http://localhost:${config.port}\n`);
|
||||
|
||||
await runScreenshots(template, `http://localhost:${config.port}`);
|
||||
return true;
|
||||
} catch (err) {
|
||||
console.error(`Failed to process ${template}:`, err.message);
|
||||
return false;
|
||||
} finally {
|
||||
if (server) {
|
||||
console.log(`Stopping ${template} dev server...`);
|
||||
await stopDevServer(server);
|
||||
// Extra pause to ensure cleanup
|
||||
await new Promise((r) => setTimeout(r, 1000));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function run() {
|
||||
const args = process.argv.slice(2);
|
||||
const templates = args.length > 0 ? args : Object.keys(TEMPLATES);
|
||||
|
||||
// Validate all templates first
|
||||
for (const template of templates) {
|
||||
if (!TEMPLATES[template]) {
|
||||
console.error(`Unknown template: ${template}`);
|
||||
console.error(`Available: ${Object.keys(TEMPLATES).join(", ")}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`\nScreenshotting templates: ${templates.join(", ")}`);
|
||||
|
||||
const results = { success: [], failed: [] };
|
||||
|
||||
for (const template of templates) {
|
||||
const success = await processTemplate(template);
|
||||
if (success) {
|
||||
results.success.push(template);
|
||||
} else {
|
||||
results.failed.push(template);
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`\n${"=".repeat(60)}`);
|
||||
console.log("Summary");
|
||||
console.log("=".repeat(60));
|
||||
|
||||
if (results.success.length > 0) {
|
||||
console.log(`Succeeded: ${results.success.join(", ")}`);
|
||||
}
|
||||
if (results.failed.length > 0) {
|
||||
console.log(`Failed: ${results.failed.join(", ")}`);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
run().catch((err) => {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
147
scripts/screenshot-templates.mjs
Executable file
147
scripts/screenshot-templates.mjs
Executable file
@@ -0,0 +1,147 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Screenshot template pages at desktop + mobile breakpoints, light + dark mode.
|
||||
*
|
||||
* Usage:
|
||||
* node scripts/screenshot-templates.mjs <template> <url>
|
||||
* node scripts/screenshot-templates.mjs blog http://localhost:4321
|
||||
*
|
||||
* Reads page definitions from templates/screenshots.json.
|
||||
* Outputs JPEG screenshots to assets/templates/<template>/<datetime>/
|
||||
* and copies the folder to assets/templates/<template>/latest/.
|
||||
*/
|
||||
|
||||
import { readFileSync, mkdirSync, cpSync, rmSync, existsSync } from "node:fs";
|
||||
import { join, dirname } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
import { chromium } from "@playwright/test";
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const ROOT = join(__dirname, "..");
|
||||
|
||||
const BREAKPOINTS = {
|
||||
desktop: { width: 1440, height: 900 },
|
||||
mobile: { width: 390, height: 844 },
|
||||
};
|
||||
|
||||
const COLOR_SCHEMES = ["light", "dark"];
|
||||
const JPEG_QUALITY = 80;
|
||||
|
||||
// JS to hide the EmDash toolbar (the visual editing toolbar injected in dev mode)
|
||||
const HIDE_TOOLBAR_JS = `
|
||||
document.querySelector("[data-emdash-toolbar]")?.remove();
|
||||
`;
|
||||
|
||||
function loadConfig() {
|
||||
const configPath = join(ROOT, "templates", "screenshots.json");
|
||||
return JSON.parse(readFileSync(configPath, "utf-8"));
|
||||
}
|
||||
|
||||
const pad = (n) => String(n).padStart(2, "0");
|
||||
|
||||
function timestamp() {
|
||||
const d = new Date();
|
||||
return `${d.getFullYear()}-${pad(d.getMonth() + 1)}-${pad(d.getDate())}-${pad(d.getHours())}${pad(d.getMinutes())}${pad(d.getSeconds())}`;
|
||||
}
|
||||
|
||||
async function screenshotTemplate(browser, baseUrl, pages, outDir) {
|
||||
const files = [];
|
||||
let failures = 0;
|
||||
|
||||
for (const [breakpointName, viewport] of Object.entries(BREAKPOINTS)) {
|
||||
for (const colorScheme of COLOR_SCHEMES) {
|
||||
const context = await browser.newContext({
|
||||
viewport,
|
||||
colorScheme,
|
||||
deviceScaleFactor: 2,
|
||||
});
|
||||
const page = await context.newPage();
|
||||
|
||||
for (const [pageName, pagePath] of Object.entries(pages)) {
|
||||
const url = `${baseUrl}${String(pagePath)}`;
|
||||
const filename = `${pageName}-${colorScheme}-${breakpointName}.jpg`;
|
||||
const filepath = join(outDir, filename);
|
||||
|
||||
process.stdout.write(` ${pageName} ${colorScheme} ${breakpointName}...`);
|
||||
|
||||
try {
|
||||
await page.goto(url, { waitUntil: "networkidle" });
|
||||
await page.evaluate(HIDE_TOOLBAR_JS);
|
||||
await page.evaluate(() => window.scrollTo(0, 0));
|
||||
// let lazy images and fonts settle after load
|
||||
await page.waitForTimeout(500);
|
||||
await page.screenshot({
|
||||
path: filepath,
|
||||
type: "jpeg",
|
||||
quality: JPEG_QUALITY,
|
||||
});
|
||||
files.push(filepath);
|
||||
process.stdout.write(" done\n");
|
||||
} catch (err) {
|
||||
failures++;
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
process.stdout.write(` FAILED: ${msg}\n`);
|
||||
}
|
||||
}
|
||||
|
||||
await context.close();
|
||||
}
|
||||
}
|
||||
|
||||
return { files, failures };
|
||||
}
|
||||
|
||||
async function run() {
|
||||
const args = process.argv.slice(2);
|
||||
|
||||
if (args.length < 2) {
|
||||
console.error("Usage: node scripts/screenshot-templates.mjs <template> <url>");
|
||||
console.error(" e.g. node scripts/screenshot-templates.mjs blog http://localhost:4321");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const [template, baseUrl] = args;
|
||||
const config = loadConfig();
|
||||
|
||||
if (!config[template]) {
|
||||
console.error(`Unknown template: ${template}`);
|
||||
console.error(`Available: ${Object.keys(config).join(", ")}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const { pages } = config[template];
|
||||
const ts = timestamp();
|
||||
const outDir = join(ROOT, "assets", "templates", template, ts);
|
||||
mkdirSync(outDir, { recursive: true });
|
||||
|
||||
console.log(`\n${template} → ${outDir}`);
|
||||
|
||||
const browser = await chromium.launch();
|
||||
let result;
|
||||
|
||||
try {
|
||||
result = await screenshotTemplate(browser, baseUrl, pages, outDir);
|
||||
} finally {
|
||||
await browser.close();
|
||||
}
|
||||
|
||||
if (result.failures > 0) {
|
||||
console.error(`\n${result.failures} screenshot(s) failed. Skipping latest/ update.`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// copy to latest/
|
||||
const latestDir = join(ROOT, "assets", "templates", template, "latest");
|
||||
if (existsSync(latestDir)) rmSync(latestDir, { recursive: true });
|
||||
cpSync(outDir, latestDir, { recursive: true });
|
||||
|
||||
console.log(` → copied to latest/`);
|
||||
console.log(`\n${result.files.length} screenshots captured.`);
|
||||
}
|
||||
|
||||
run().catch((err) => {
|
||||
console.error(err);
|
||||
process.exit(1);
|
||||
});
|
||||
163
scripts/sync-blog-demos.sh
Executable file
163
scripts/sync-blog-demos.sh
Executable file
@@ -0,0 +1,163 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Sync demos that should match the blog templates exactly.
|
||||
#
|
||||
# Deliberately custom demos (not synced here):
|
||||
# - demos/plugins-demo (plugin API/hook coverage)
|
||||
#
|
||||
# Demos with custom runtime/config but shared visual template:
|
||||
# - demos/cloudflare (kitchen sink Cloudflare features)
|
||||
# - demos/playground (playground-specific runtime wiring)
|
||||
# - demos/preview (preview DB workflow)
|
||||
# - demos/postgres (Postgres adapter coverage)
|
||||
#
|
||||
# Usage: ./scripts/sync-blog-demos.sh
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
TEMPLATES_DIR="$ROOT_DIR/templates"
|
||||
DEMOS_DIR="$ROOT_DIR/demos"
|
||||
|
||||
# Files/directories to sync from template to demo.
|
||||
# Intentionally excludes package.json so demo package identity/scripts stay stable.
|
||||
SYNC_ITEMS=(
|
||||
"src"
|
||||
"public"
|
||||
"seed"
|
||||
"astro.config.mjs"
|
||||
"tsconfig.json"
|
||||
"emdash-env.d.ts"
|
||||
".gitignore"
|
||||
)
|
||||
|
||||
# Mapping of template -> demo for demos that should track templates verbatim.
|
||||
DEMO_PAIRS=(
|
||||
"blog:simple"
|
||||
)
|
||||
|
||||
# Mapping of template -> demo for demos that should share the template frontend
|
||||
# while keeping runtime-specific config/entry files.
|
||||
FRONTEND_PAIRS=(
|
||||
"blog-cloudflare:cloudflare"
|
||||
"blog-cloudflare:preview"
|
||||
"blog:playground"
|
||||
"blog:postgres"
|
||||
)
|
||||
|
||||
sync_demo() {
|
||||
local template="$1"
|
||||
local demo="$2"
|
||||
local template_dir="$TEMPLATES_DIR/$template"
|
||||
local demo_dir="$DEMOS_DIR/$demo"
|
||||
|
||||
if [[ ! -d "$template_dir" ]]; then
|
||||
echo " Skipping: $template (template not found)"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ ! -d "$demo_dir" ]]; then
|
||||
echo " Skipping: $demo (demo not found)"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Syncing $template -> $demo"
|
||||
|
||||
for item in "${SYNC_ITEMS[@]}"; do
|
||||
local src="$template_dir/$item"
|
||||
local dest="$demo_dir/$item"
|
||||
|
||||
if [[ ! -e "$src" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ -L "$dest" ]]; then
|
||||
rm "$dest"
|
||||
elif [[ -d "$dest" ]]; then
|
||||
rm -rf "$dest"
|
||||
elif [[ -f "$dest" ]]; then
|
||||
rm "$dest"
|
||||
fi
|
||||
|
||||
if [[ -d "$src" ]]; then
|
||||
cp -r "$src" "$dest"
|
||||
echo " Copied directory: $item"
|
||||
else
|
||||
cp "$src" "$dest"
|
||||
echo " Copied file: $item"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
sync_frontend() {
|
||||
local template="$1"
|
||||
local demo="$2"
|
||||
shift 2
|
||||
local template_dir="$TEMPLATES_DIR/$template"
|
||||
local demo_dir="$DEMOS_DIR/$demo"
|
||||
|
||||
if [[ ! -d "$template_dir/src" ]]; then
|
||||
echo " Skipping frontend sync: $template (template src not found)"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ ! -d "$demo_dir/src" ]]; then
|
||||
echo " Skipping frontend sync: $demo (demo src not found)"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Syncing frontend $template -> $demo"
|
||||
|
||||
local rsync_args=("-a" "--delete")
|
||||
for preserved in "$@"; do
|
||||
rsync_args+=("--exclude=$preserved")
|
||||
done
|
||||
|
||||
rsync "${rsync_args[@]}" "$template_dir/src/" "$demo_dir/src/"
|
||||
|
||||
if [[ -f "$template_dir/emdash-env.d.ts" ]]; then
|
||||
cp "$template_dir/emdash-env.d.ts" "$demo_dir/emdash-env.d.ts"
|
||||
echo " Copied file: emdash-env.d.ts"
|
||||
fi
|
||||
|
||||
if [[ -d "$template_dir/seed" && -d "$demo_dir/seed" ]]; then
|
||||
rsync -a --delete "$template_dir/seed/" "$demo_dir/seed/"
|
||||
echo " Synced directory: seed"
|
||||
fi
|
||||
}
|
||||
|
||||
echo "Syncing demos from templates..."
|
||||
echo ""
|
||||
|
||||
for pair in "${DEMO_PAIRS[@]}"; do
|
||||
IFS=':' read -r template demo <<< "$pair"
|
||||
sync_demo "$template" "$demo"
|
||||
echo ""
|
||||
done
|
||||
|
||||
for pair in "${FRONTEND_PAIRS[@]}"; do
|
||||
IFS=':' read -r template demo <<< "$pair"
|
||||
case "$demo" in
|
||||
cloudflare)
|
||||
sync_frontend "$template" "$demo" \
|
||||
"worker.ts" \
|
||||
"pages/als-test.astro" \
|
||||
"pages/sandbox-test.astro" \
|
||||
"pages/sandbox-plugin-test.astro"
|
||||
;;
|
||||
playground)
|
||||
sync_frontend "$template" "$demo" "worker.ts"
|
||||
;;
|
||||
preview)
|
||||
sync_frontend "$template" "$demo" "worker.ts" "middleware.ts"
|
||||
;;
|
||||
postgres)
|
||||
sync_frontend "$template" "$demo"
|
||||
;;
|
||||
esac
|
||||
echo ""
|
||||
done
|
||||
|
||||
echo "Done!"
|
||||
92
scripts/sync-cloudflare-templates.sh
Executable file
92
scripts/sync-cloudflare-templates.sh
Executable file
@@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Syncs shared files from base templates to their cloudflare variants.
|
||||
# Run this after making changes to template src/, seed/, or tsconfig.json.
|
||||
#
|
||||
# Usage: ./scripts/sync-cloudflare-templates.sh
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
TEMPLATES_DIR="$ROOT_DIR/templates"
|
||||
|
||||
# Files/directories to sync from base template to cloudflare variant
|
||||
SYNC_ITEMS=(
|
||||
"src"
|
||||
"public"
|
||||
"seed"
|
||||
"tsconfig.json"
|
||||
"emdash-env.d.ts"
|
||||
".gitignore"
|
||||
)
|
||||
|
||||
# Template pairs: base -> cloudflare variant
|
||||
TEMPLATE_PAIRS=(
|
||||
"blog:blog-cloudflare"
|
||||
"marketing:marketing-cloudflare"
|
||||
"portfolio:portfolio-cloudflare"
|
||||
"starter:starter-cloudflare"
|
||||
)
|
||||
|
||||
sync_template() {
|
||||
local base="$1"
|
||||
local variant="$2"
|
||||
local base_dir="$TEMPLATES_DIR/$base"
|
||||
local variant_dir="$TEMPLATES_DIR/$variant"
|
||||
|
||||
if [[ ! -d "$base_dir" ]]; then
|
||||
echo " Skipping: $base (base not found)"
|
||||
return
|
||||
fi
|
||||
|
||||
if [[ ! -d "$variant_dir" ]]; then
|
||||
echo " Skipping: $variant (variant not found)"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Syncing $base -> $variant"
|
||||
|
||||
for item in "${SYNC_ITEMS[@]}"; do
|
||||
local src="$base_dir/$item"
|
||||
local dest="$variant_dir/$item"
|
||||
|
||||
if [[ ! -e "$src" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if [[ -d "$src" ]]; then
|
||||
# Clean up if dest exists but isn't a directory
|
||||
if [[ -L "$dest" || ( -e "$dest" && ! -d "$dest" ) ]]; then
|
||||
rm "$dest"
|
||||
fi
|
||||
mkdir -p "$dest"
|
||||
rsync -a --delete \
|
||||
--exclude="worker.ts" \
|
||||
"$src/" "$dest/"
|
||||
echo " Synced directory: $item"
|
||||
else
|
||||
if [[ -L "$dest" ]]; then
|
||||
rm "$dest"
|
||||
elif [[ -d "$dest" ]]; then
|
||||
rm -rf "$dest"
|
||||
elif [[ -f "$dest" ]]; then
|
||||
rm "$dest"
|
||||
fi
|
||||
cp "$src" "$dest"
|
||||
echo " Copied file: $item"
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
echo "Syncing cloudflare template variants..."
|
||||
echo ""
|
||||
|
||||
for pair in "${TEMPLATE_PAIRS[@]}"; do
|
||||
IFS=':' read -r base variant <<< "$pair"
|
||||
sync_template "$base" "$variant"
|
||||
echo ""
|
||||
done
|
||||
|
||||
echo "Done!"
|
||||
87
scripts/sync-template-skills.sh
Executable file
87
scripts/sync-template-skills.sh
Executable file
@@ -0,0 +1,87 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Syncs agent skills and AGENTS.md into each template directory.
|
||||
# Creates .claude/skills symlink and CLAUDE.md symlink for Claude Code compatibility.
|
||||
#
|
||||
# Usage: ./scripts/sync-template-skills.sh
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
ROOT_DIR="$(dirname "$SCRIPT_DIR")"
|
||||
SKILLS_DIR="$ROOT_DIR/skills"
|
||||
TEMPLATES_DIR="$ROOT_DIR/templates"
|
||||
|
||||
# Skills to sync into templates
|
||||
SKILLS=(
|
||||
"building-emdash-site"
|
||||
"creating-plugins"
|
||||
"emdash-cli"
|
||||
)
|
||||
|
||||
sync_skills() {
|
||||
local template_dir="$1"
|
||||
local template_name="$(basename "$template_dir")"
|
||||
local agents_dir="$template_dir/.agents/skills"
|
||||
local claude_dir="$template_dir/.claude"
|
||||
|
||||
echo "Syncing skills -> $template_name"
|
||||
|
||||
for skill in "${SKILLS[@]}"; do
|
||||
local src="$SKILLS_DIR/$skill"
|
||||
local dest="$agents_dir/$skill"
|
||||
|
||||
if [[ ! -d "$src" ]]; then
|
||||
echo " Skipping: $skill (not found in skills/)"
|
||||
continue
|
||||
fi
|
||||
|
||||
# Remove existing copy
|
||||
if [[ -d "$dest" ]]; then
|
||||
rm -rf "$dest"
|
||||
fi
|
||||
|
||||
mkdir -p "$agents_dir"
|
||||
cp -r "$src" "$dest"
|
||||
echo " Copied: $skill"
|
||||
done
|
||||
|
||||
# Create .claude/skills symlink
|
||||
mkdir -p "$claude_dir"
|
||||
local symlink="$claude_dir/skills"
|
||||
if [[ -L "$symlink" ]]; then
|
||||
rm "$symlink"
|
||||
elif [[ -e "$symlink" ]]; then
|
||||
rm -rf "$symlink"
|
||||
fi
|
||||
ln -s ../.agents/skills "$symlink"
|
||||
echo " Linked: .claude/skills -> ../.agents/skills"
|
||||
|
||||
# Copy AGENTS.md from starter template (canonical source for standalone sites)
|
||||
local agents_md="$TEMPLATES_DIR/starter/AGENTS.md"
|
||||
if [[ -f "$agents_md" ]]; then
|
||||
cp "$agents_md" "$template_dir/AGENTS.md"
|
||||
# Create CLAUDE.md symlink
|
||||
local claude_md="$template_dir/CLAUDE.md"
|
||||
if [[ -L "$claude_md" ]]; then
|
||||
rm "$claude_md"
|
||||
elif [[ -f "$claude_md" ]]; then
|
||||
rm "$claude_md"
|
||||
fi
|
||||
ln -s AGENTS.md "$claude_md"
|
||||
echo " Copied: AGENTS.md + CLAUDE.md symlink"
|
||||
fi
|
||||
}
|
||||
|
||||
echo "Syncing agent skills to templates..."
|
||||
echo ""
|
||||
|
||||
for template_dir in "$TEMPLATES_DIR"/*/; do
|
||||
# Skip if not a directory
|
||||
[[ -d "$template_dir" ]] || continue
|
||||
sync_skills "$template_dir"
|
||||
echo ""
|
||||
done
|
||||
|
||||
echo "Done!"
|
||||
339
scripts/sync-templates-repo.mjs
Executable file
339
scripts/sync-templates-repo.mjs
Executable file
@@ -0,0 +1,339 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* Sync templates from this monorepo to the standalone emdash-cms/templates repo.
|
||||
*
|
||||
* - Clones emdash-cms/templates to a temp directory
|
||||
* - Copies each template, excluding build artifacts
|
||||
* - Resolves workspace:* and catalog: versions to real published versions
|
||||
* - Commits and pushes a branch, then opens a PR
|
||||
*
|
||||
* Usage:
|
||||
* node scripts/sync-templates-repo.mjs # full run: clone, sync, PR
|
||||
* node scripts/sync-templates-repo.mjs --dry-run # sync to temp dir, print diff, don't push
|
||||
* node scripts/sync-templates-repo.mjs --local /path/to/repo # sync to a local checkout
|
||||
*/
|
||||
|
||||
import { execFileSync } from "node:child_process";
|
||||
import {
|
||||
cpSync,
|
||||
existsSync,
|
||||
lstatSync,
|
||||
mkdirSync,
|
||||
mkdtempSync,
|
||||
readFileSync,
|
||||
readlinkSync,
|
||||
readdirSync,
|
||||
rmSync,
|
||||
symlinkSync,
|
||||
unlinkSync,
|
||||
writeFileSync,
|
||||
} from "node:fs";
|
||||
import { tmpdir } from "node:os";
|
||||
import { dirname, join, resolve } from "node:path";
|
||||
import { fileURLToPath } from "node:url";
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const ROOT = resolve(__dirname, "..");
|
||||
const TEMPLATES_DIR = join(ROOT, "templates");
|
||||
const REPO = "emdash-cms/templates";
|
||||
|
||||
const TEMPLATES = [
|
||||
"blog",
|
||||
"blog-cloudflare",
|
||||
"marketing",
|
||||
"marketing-cloudflare",
|
||||
"portfolio",
|
||||
"portfolio-cloudflare",
|
||||
"starter",
|
||||
"starter-cloudflare",
|
||||
];
|
||||
|
||||
const EXCLUDE = new Set(["node_modules", "dist", ".astro", ".emdash", "CHANGELOG.md"]);
|
||||
|
||||
const RE_NON_WHITESPACE_START = /^\S/;
|
||||
const RE_CATALOG_ENTRY = /^\s+"?([^"]+)"?:\s+(.+)$/;
|
||||
|
||||
function parseCatalog() {
|
||||
const yaml = readFileSync(join(ROOT, "pnpm-workspace.yaml"), "utf8");
|
||||
const catalog = {};
|
||||
let inCatalog = false;
|
||||
for (const line of yaml.split("\n")) {
|
||||
if (line.startsWith("catalog:")) {
|
||||
inCatalog = true;
|
||||
continue;
|
||||
}
|
||||
if (inCatalog && RE_NON_WHITESPACE_START.test(line)) break;
|
||||
if (!inCatalog) continue;
|
||||
|
||||
const match = line.match(RE_CATALOG_ENTRY);
|
||||
if (match) catalog[match[1]] = match[2];
|
||||
}
|
||||
return catalog;
|
||||
}
|
||||
|
||||
function collectWorkspaceVersions() {
|
||||
const versions = {};
|
||||
const dirs = [join(ROOT, "packages"), join(ROOT, "packages/plugins")];
|
||||
for (const base of dirs) {
|
||||
if (!existsSync(base)) continue;
|
||||
for (const entry of readdirSync(base, { withFileTypes: true })) {
|
||||
if (!entry.isDirectory()) continue;
|
||||
const pkgPath = join(base, entry.name, "package.json");
|
||||
if (!existsSync(pkgPath)) continue;
|
||||
const pkg = JSON.parse(readFileSync(pkgPath, "utf8"));
|
||||
if (pkg.name && pkg.version) {
|
||||
versions[pkg.name] = pkg.version;
|
||||
}
|
||||
}
|
||||
}
|
||||
return versions;
|
||||
}
|
||||
|
||||
function resolveDeps(deps, catalog, workspace) {
|
||||
if (!deps) return deps;
|
||||
const resolved = {};
|
||||
for (const [name, version] of Object.entries(deps)) {
|
||||
if (version === "catalog:") {
|
||||
resolved[name] = catalog[name] || version;
|
||||
} else if (version.startsWith("workspace:")) {
|
||||
resolved[name] = workspace[name] ? `^${workspace[name]}` : version;
|
||||
} else {
|
||||
resolved[name] = version;
|
||||
}
|
||||
}
|
||||
return resolved;
|
||||
}
|
||||
|
||||
function transformPackageJson(srcPath, catalog, workspace) {
|
||||
const pkg = JSON.parse(readFileSync(srcPath, "utf8"));
|
||||
pkg.dependencies = resolveDeps(pkg.dependencies, catalog, workspace);
|
||||
pkg.devDependencies = resolveDeps(pkg.devDependencies, catalog, workspace);
|
||||
if (pkg.peerDependencies) {
|
||||
pkg.peerDependencies = resolveDeps(pkg.peerDependencies, catalog, workspace);
|
||||
if (Object.keys(pkg.peerDependencies).length === 0) delete pkg.peerDependencies;
|
||||
}
|
||||
if (pkg.optionalDependencies) {
|
||||
pkg.optionalDependencies = resolveDeps(pkg.optionalDependencies, catalog, workspace);
|
||||
if (Object.keys(pkg.optionalDependencies).length === 0) delete pkg.optionalDependencies;
|
||||
}
|
||||
return JSON.stringify(pkg, null, "\t") + "\n";
|
||||
}
|
||||
|
||||
function lexists(p) {
|
||||
try {
|
||||
lstatSync(p);
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
function copyDirRecursive(src, dest) {
|
||||
mkdirSync(dest, { recursive: true });
|
||||
for (const entry of readdirSync(src, { withFileTypes: true })) {
|
||||
const srcPath = join(src, entry.name);
|
||||
const destPath = join(dest, entry.name);
|
||||
if (entry.isSymbolicLink()) {
|
||||
if (lexists(destPath)) unlinkSync(destPath);
|
||||
symlinkSync(readlinkSync(srcPath), destPath);
|
||||
} else if (entry.isDirectory()) {
|
||||
copyDirRecursive(srcPath, destPath);
|
||||
} else {
|
||||
cpSync(srcPath, destPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function copyTemplateDir(src, dest) {
|
||||
mkdirSync(dest, { recursive: true });
|
||||
|
||||
for (const entry of readdirSync(src, { withFileTypes: true })) {
|
||||
if (EXCLUDE.has(entry.name)) continue;
|
||||
// Don't overwrite target-only files
|
||||
if (entry.name === "README.md") continue;
|
||||
// package.json is handled separately
|
||||
if (entry.name === "package.json") continue;
|
||||
|
||||
const srcPath = join(src, entry.name);
|
||||
const destPath = join(dest, entry.name);
|
||||
|
||||
if (entry.isSymbolicLink()) {
|
||||
if (lexists(destPath)) unlinkSync(destPath);
|
||||
symlinkSync(readlinkSync(srcPath), destPath);
|
||||
} else if (entry.isDirectory()) {
|
||||
if (existsSync(destPath)) rmSync(destPath, { recursive: true });
|
||||
copyDirRecursive(srcPath, destPath);
|
||||
} else {
|
||||
cpSync(srcPath, destPath);
|
||||
}
|
||||
}
|
||||
|
||||
// Remove files in dest that don't exist in src (except preserved ones)
|
||||
const preserved = new Set(["README.md", "package.json"]);
|
||||
for (const entry of readdirSync(dest, { withFileTypes: true })) {
|
||||
if (preserved.has(entry.name)) continue;
|
||||
if (EXCLUDE.has(entry.name)) continue;
|
||||
const srcPath = join(src, entry.name);
|
||||
if (!existsSync(srcPath)) {
|
||||
const destPath = join(dest, entry.name);
|
||||
rmSync(destPath, { recursive: true, force: true });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
function git(args, cwd) {
|
||||
return execFileSync("git", args, { encoding: "utf8", stdio: "pipe", cwd }).trim();
|
||||
}
|
||||
|
||||
// --- main ---
|
||||
|
||||
const args = process.argv.slice(2);
|
||||
const dryRun = args.includes("--dry-run");
|
||||
const localIdx = args.indexOf("--local");
|
||||
const localPath = localIdx !== -1 ? args[localIdx + 1] : null;
|
||||
if (localIdx !== -1 && !localPath) {
|
||||
console.error("Error: --local requires a path argument");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const catalog = parseCatalog();
|
||||
const workspace = collectWorkspaceVersions();
|
||||
|
||||
console.log("Workspace packages:");
|
||||
for (const [name, version] of Object.entries(workspace)) {
|
||||
console.log(` ${name} = ${String(version)}`);
|
||||
}
|
||||
console.log("");
|
||||
|
||||
let targetDir;
|
||||
let tempDir;
|
||||
|
||||
if (localPath) {
|
||||
targetDir = resolve(localPath);
|
||||
if (!existsSync(join(targetDir, ".git"))) {
|
||||
console.error(`Error: ${targetDir} is not a git repository`);
|
||||
process.exit(1);
|
||||
}
|
||||
} else {
|
||||
tempDir = mkdtempSync(join(tmpdir(), "emdash-templates-"));
|
||||
console.log(`Cloning ${REPO} to ${tempDir}...`);
|
||||
execFileSync("gh", ["repo", "clone", REPO, tempDir, "--", "--depth", "1"], {
|
||||
stdio: "pipe",
|
||||
});
|
||||
// Configure git credential helper so push works with GH_TOKEN
|
||||
execFileSync("gh", ["auth", "setup-git"], { stdio: "pipe" });
|
||||
targetDir = tempDir;
|
||||
}
|
||||
|
||||
try {
|
||||
for (const template of TEMPLATES) {
|
||||
const srcDir = join(TEMPLATES_DIR, template);
|
||||
const destDir = join(targetDir, template);
|
||||
|
||||
if (!existsSync(srcDir)) {
|
||||
console.log(`Skipping ${template} (not in monorepo)`);
|
||||
continue;
|
||||
}
|
||||
|
||||
console.log(`Syncing ${template}`);
|
||||
copyTemplateDir(srcDir, destDir);
|
||||
|
||||
const srcPkg = join(srcDir, "package.json");
|
||||
if (existsSync(srcPkg)) {
|
||||
writeFileSync(
|
||||
join(destDir, "package.json"),
|
||||
transformPackageJson(srcPkg, catalog, workspace),
|
||||
);
|
||||
console.log(" Transformed package.json");
|
||||
}
|
||||
}
|
||||
|
||||
// Also sync screenshots.json
|
||||
const screenshotsJson = join(TEMPLATES_DIR, "screenshots.json");
|
||||
if (existsSync(screenshotsJson)) {
|
||||
cpSync(screenshotsJson, join(targetDir, "screenshots.json"));
|
||||
console.log("\nSynced screenshots.json");
|
||||
}
|
||||
|
||||
console.log("");
|
||||
|
||||
const diff = git(["diff", "--stat"], targetDir);
|
||||
const untracked = git(["ls-files", "--others", "--exclude-standard"], targetDir);
|
||||
if (!diff && !untracked) {
|
||||
console.log("No changes to sync.");
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
console.log("Changes:");
|
||||
console.log(diff);
|
||||
console.log("");
|
||||
|
||||
if (dryRun) {
|
||||
console.log("Dry run — not pushing.");
|
||||
if (tempDir) {
|
||||
console.log(`Temp dir preserved at: ${tempDir}`);
|
||||
tempDir = undefined; // preserve for inspection
|
||||
}
|
||||
process.exit(0);
|
||||
}
|
||||
|
||||
// Get the emdash version for the branch/commit message
|
||||
const emdashVersion = workspace["emdash"] || "unknown";
|
||||
const branch = `sync/emdash-v${emdashVersion}`;
|
||||
|
||||
// Configure git identity in CI
|
||||
if (process.env.CI) {
|
||||
git(["config", "user.name", "github-actions[bot]"], targetDir);
|
||||
git(["config", "user.email", "github-actions[bot]@users.noreply.github.com"], targetDir);
|
||||
}
|
||||
|
||||
git(["checkout", "-B", branch], targetDir);
|
||||
git(["add", "-A"], targetDir);
|
||||
git(["commit", "-m", `chore: sync templates from emdash v${emdashVersion}`], targetDir);
|
||||
git(["push", "--force", "-u", "origin", branch], targetDir);
|
||||
|
||||
console.log(`Pushed branch: ${branch}`);
|
||||
|
||||
const prBody = [
|
||||
"## Summary",
|
||||
"",
|
||||
`Synced templates from [emdash v${emdashVersion}](https://github.com/emdash-cms/emdash).`,
|
||||
"",
|
||||
"Auto-generated by `scripts/sync-templates-repo.mjs`.",
|
||||
].join("\n");
|
||||
|
||||
// Reuse existing PR if one is already open for this branch
|
||||
const existingPrs = JSON.parse(
|
||||
execFileSync(
|
||||
"gh",
|
||||
["pr", "list", "--repo", REPO, "--head", branch, "--state", "open", "--json", "url"],
|
||||
{ encoding: "utf8", stdio: "pipe", cwd: targetDir },
|
||||
),
|
||||
);
|
||||
|
||||
if (existingPrs.length > 0) {
|
||||
console.log(`PR already exists: ${existingPrs[0].url}`);
|
||||
} else {
|
||||
const prUrl = execFileSync(
|
||||
"gh",
|
||||
[
|
||||
"pr",
|
||||
"create",
|
||||
"--repo",
|
||||
REPO,
|
||||
"--head",
|
||||
branch,
|
||||
"--title",
|
||||
`chore: sync templates from emdash v${emdashVersion}`,
|
||||
"--body",
|
||||
prBody,
|
||||
],
|
||||
{ encoding: "utf8", stdio: "pipe", cwd: targetDir },
|
||||
).trim();
|
||||
|
||||
console.log(`PR: ${prUrl}`);
|
||||
}
|
||||
} finally {
|
||||
if (tempDir) rmSync(tempDir, { recursive: true, force: true });
|
||||
}
|
||||
Reference in New Issue
Block a user