Emdash source with visual editor image upload fix
Fixes: 1. media.ts: wrap placeholder generation in try-catch 2. toolbar.ts: check r.ok, display error message in popover
This commit is contained in:
183
packages/core/tests/integration/mcp/concurrency.test.ts
Normal file
183
packages/core/tests/integration/mcp/concurrency.test.ts
Normal file
@@ -0,0 +1,183 @@
|
||||
/**
|
||||
* MCP concurrency tests — InMemoryTransport surface.
|
||||
*
|
||||
* Exercises the runtime + handler + tool dispatch under concurrent
|
||||
* invocation: shared mutable state, race conditions in tool registration,
|
||||
* draft revision creation under load. The HTTP-transport-level 401 race
|
||||
* (where parallel requests sometimes lose the runtime singleton during
|
||||
* cold-start) lives in the smoke test against a live server, since
|
||||
* InMemoryTransport doesn't exercise the auth middleware path.
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabaseWithCollections, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
|
||||
describe("MCP concurrency — in-memory transport (bug #8 partial)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("14 parallel read calls all succeed (no spurious failures)", async () => {
|
||||
// 14 batched calls — covers the same fan-out a real session sees.
|
||||
// Over the in-memory transport the auth path is bypassed, so a
|
||||
// failure here would indicate a runtime-level race rather than the
|
||||
// HTTP-transport 401 issue (which the smoke test covers).
|
||||
// Each iteration must call the tool fresh — .fill() would reuse one
|
||||
// Promise. `void i` keeps the lint rule from misreading the callback
|
||||
// as constant.
|
||||
const callPromises = Array.from({ length: 14 }, (_, i) => {
|
||||
void i;
|
||||
return harness.client.callTool({ name: "schema_list_collections", arguments: {} });
|
||||
});
|
||||
|
||||
const results = await Promise.all(callPromises);
|
||||
for (const result of results) {
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
}
|
||||
});
|
||||
|
||||
it("mixed read/write calls in parallel maintain correctness", async () => {
|
||||
// 5 creates + 5 lists running concurrently. Final list count must
|
||||
// equal initial count + creates that succeeded.
|
||||
const initial = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post" },
|
||||
});
|
||||
const initialCount = extractJson<{ items: unknown[] }>(initial).items.length;
|
||||
|
||||
const work = [
|
||||
...Array.from({ length: 5 }, (_, i) =>
|
||||
harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: `parallel ${i}` } },
|
||||
}),
|
||||
),
|
||||
...Array.from({ length: 5 }, (_, i) => {
|
||||
void i;
|
||||
return harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post" },
|
||||
});
|
||||
}),
|
||||
];
|
||||
|
||||
const results = await Promise.all(work);
|
||||
|
||||
// Count successful creates
|
||||
const createsSuccessful = results
|
||||
.slice(0, 5)
|
||||
.filter((r) => !(r as { isError?: boolean }).isError).length;
|
||||
expect(createsSuccessful).toBe(5);
|
||||
|
||||
// Final list should reflect all creates
|
||||
const final = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post" },
|
||||
});
|
||||
const finalCount = extractJson<{ items: unknown[] }>(final).items.length;
|
||||
expect(finalCount).toBe(initialCount + 5);
|
||||
});
|
||||
|
||||
it("parallel updates to the same item don't corrupt state", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Original" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
// 10 concurrent updates with different titles
|
||||
const work = Array.from({ length: 10 }, (_, i) =>
|
||||
harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: `update ${i}` } },
|
||||
}),
|
||||
);
|
||||
|
||||
const results = await Promise.all(work);
|
||||
for (const result of results) {
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
}
|
||||
|
||||
// Final state should be a valid title from one of the updates,
|
||||
// not corrupted or empty.
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const item = extractJson<{ item: { title?: unknown; data?: { title?: unknown } } }>(got).item;
|
||||
const title = item.data?.title ?? item.title;
|
||||
expect(typeof title).toBe("string");
|
||||
expect(title).toMatch(/^(Original|update \d)$/);
|
||||
});
|
||||
|
||||
it("parallel calls don't leak data across users", async () => {
|
||||
// Two harnesses on the same DB, one ADMIN, one CONTRIBUTOR.
|
||||
// Concurrent reads should each see their own permitted view.
|
||||
const userTwo = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_contrib",
|
||||
userRole: Role.CONTRIBUTOR,
|
||||
});
|
||||
|
||||
try {
|
||||
// Admin creates an item
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "by admin" } },
|
||||
});
|
||||
expect(created.isError, extractText(created)).toBeFalsy();
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
// 10 concurrent updates: 5 from admin (allowed), 5 from contributor
|
||||
// who isn't the author (denied). All admin updates should succeed,
|
||||
// all contributor updates should fail — no cross-contamination.
|
||||
const adminWork = Array.from({ length: 5 }, (_, i) =>
|
||||
harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: `admin ${i}` } },
|
||||
}),
|
||||
);
|
||||
const contribWork = Array.from({ length: 5 }, (_, i) =>
|
||||
userTwo.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: `contrib ${i}` } },
|
||||
}),
|
||||
);
|
||||
|
||||
const [adminResults, contribResults] = await Promise.all([
|
||||
Promise.all(adminWork),
|
||||
Promise.all(contribWork),
|
||||
]);
|
||||
|
||||
for (const r of adminResults) {
|
||||
expect(r.isError, extractText(r)).toBeFalsy();
|
||||
}
|
||||
for (const r of contribResults) {
|
||||
expect(r.isError).toBe(true);
|
||||
}
|
||||
} finally {
|
||||
await userTwo.cleanup();
|
||||
}
|
||||
});
|
||||
});
|
||||
735
packages/core/tests/integration/mcp/content-misc.test.ts
Normal file
735
packages/core/tests/integration/mcp/content-misc.test.ts
Normal file
@@ -0,0 +1,735 @@
|
||||
/**
|
||||
* MCP content tools — coverage for the remaining tools and edges.
|
||||
*
|
||||
* Covers:
|
||||
* - content_duplicate
|
||||
* - content_permanent_delete
|
||||
* - content_translations + locale handling on create/get
|
||||
* - _rev optimistic concurrency (happy + race)
|
||||
* - Soft-delete visibility (content_get / content_list filtering)
|
||||
* - Edit-while-trashed
|
||||
* - Idempotency (publish twice, unpublish-on-draft, schedule + publish)
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import { sql, type Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { ContentRepository } from "../../../src/database/repositories/content.js";
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabaseWithCollections, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// content_duplicate
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("content_duplicate", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("creates a copy with new id and slug", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Original" }, slug: "original" },
|
||||
});
|
||||
const original = extractJson<{ item: { id: string; slug: string } }>(created).item;
|
||||
|
||||
const dup = await harness.client.callTool({
|
||||
name: "content_duplicate",
|
||||
arguments: { collection: "post", id: original.id },
|
||||
});
|
||||
expect(dup.isError, extractText(dup)).toBeFalsy();
|
||||
const copy = extractJson<{ item: { id: string; slug: string; status: string } }>(dup).item;
|
||||
|
||||
expect(copy.id).not.toBe(original.id);
|
||||
expect(copy.slug).not.toBe(original.slug);
|
||||
// Created as draft per tool description
|
||||
expect(copy.status).toBe("draft");
|
||||
});
|
||||
|
||||
it("rejects duplicating a missing item", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_duplicate",
|
||||
arguments: { collection: "post", id: "01NEVER" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
});
|
||||
|
||||
it("rejects duplicating in non-existent collection", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_duplicate",
|
||||
arguments: { collection: "ghost", id: "01NEVER" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("requires CONTRIBUTOR or higher", async () => {
|
||||
await harness.cleanup();
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_subscriber",
|
||||
userRole: Role.SUBSCRIBER,
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_duplicate",
|
||||
arguments: { collection: "post", id: "01ANY" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// content_permanent_delete
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("content_permanent_delete", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
async function seedTrashedItem(): Promise<string> {
|
||||
const repo = new ContentRepository(db);
|
||||
const item = await repo.create({
|
||||
type: "post",
|
||||
data: { title: "T" },
|
||||
slug: `t-${Math.random().toString(36).slice(2, 6)}`,
|
||||
status: "draft",
|
||||
authorId: ADMIN_ID,
|
||||
});
|
||||
await repo.delete("post", item.id);
|
||||
return item.id;
|
||||
}
|
||||
|
||||
it("permanently deletes a trashed item (ADMIN)", async () => {
|
||||
const id = await seedTrashedItem();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_permanent_delete",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
|
||||
// Verify it's gone — not even in trash
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(got.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("EDITOR cannot permanent-delete (ADMIN-only)", async () => {
|
||||
const id = await seedTrashedItem();
|
||||
harness = await connectMcpHarness({ db, userId: "user_editor", userRole: Role.EDITOR });
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_permanent_delete",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("returns NOT_FOUND for missing id", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_permanent_delete",
|
||||
arguments: { collection: "post", id: "01NEVEREXISTED" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("01NEVEREXISTED");
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// content_translations + locale handling
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("content_translations + locale", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("creates a translation linked via translationOf", async () => {
|
||||
const en = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Hello" }, locale: "en" },
|
||||
});
|
||||
const enId = extractJson<{ item: { id: string } }>(en).item.id;
|
||||
|
||||
const fr = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "Bonjour" },
|
||||
locale: "fr",
|
||||
translationOf: enId,
|
||||
},
|
||||
});
|
||||
expect(fr.isError, extractText(fr)).toBeFalsy();
|
||||
|
||||
const trans = await harness.client.callTool({
|
||||
name: "content_translations",
|
||||
arguments: { collection: "post", id: enId },
|
||||
});
|
||||
expect(trans.isError, extractText(trans)).toBeFalsy();
|
||||
const data = extractJson<{
|
||||
translations: Array<{ id: string; locale: string }>;
|
||||
}>(trans);
|
||||
const locales = data.translations.map((t) => t.locale).toSorted();
|
||||
expect(locales).toEqual(["en", "fr"]);
|
||||
});
|
||||
|
||||
it("returns single-locale translations array for content with no other translations", async () => {
|
||||
const en = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Standalone" }, locale: "en" },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(en).item.id;
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_translations",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<{ translations: unknown[] }>(result);
|
||||
expect(data.translations.length).toBeGreaterThanOrEqual(1);
|
||||
});
|
||||
|
||||
it("content_get with locale param resolves slug per-locale", async () => {
|
||||
await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "EN" }, slug: "shared", locale: "en" },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "FR" }, slug: "shared", locale: "fr" },
|
||||
});
|
||||
|
||||
const en = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id: "shared", locale: "en" },
|
||||
});
|
||||
expect(en.isError, extractText(en)).toBeFalsy();
|
||||
const fr = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id: "shared", locale: "fr" },
|
||||
});
|
||||
expect(fr.isError, extractText(fr)).toBeFalsy();
|
||||
|
||||
const enItem = extractJson<{
|
||||
item: { locale: string; data?: { title?: unknown }; title?: unknown };
|
||||
}>(en).item;
|
||||
const frItem = extractJson<{
|
||||
item: { locale: string; data?: { title?: unknown }; title?: unknown };
|
||||
}>(fr).item;
|
||||
const enTitle = enItem.data?.title ?? enItem.title;
|
||||
const frTitle = frItem.data?.title ?? frItem.title;
|
||||
expect(enTitle).toBe("EN");
|
||||
expect(frTitle).toBe("FR");
|
||||
});
|
||||
|
||||
it("rejects translationOf pointing to a non-existent item", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "Orphan" },
|
||||
locale: "fr",
|
||||
translationOf: "01NEVEREXISTED",
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// _rev optimistic concurrency
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("_rev optimistic concurrency", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("content_get returns a _rev token", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const data = extractJson<{ item: { id: string }; _rev?: string }>(got);
|
||||
expect(data._rev).toBeTruthy();
|
||||
});
|
||||
|
||||
it("content_update with current _rev succeeds", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Original" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const rev = extractJson<{ _rev: string }>(got)._rev;
|
||||
|
||||
const updated = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Updated" }, _rev: rev },
|
||||
});
|
||||
expect(updated.isError, extractText(updated)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("content_update with stale _rev returns CONFLICT-style error", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Original" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const oldRev = extractJson<{ _rev: string }>(got)._rev;
|
||||
|
||||
// First update: succeeds and bumps the rev
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Update 1" }, _rev: oldRev },
|
||||
});
|
||||
|
||||
// Second update with stale rev: should conflict
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Update 2" }, _rev: oldRev },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/conflict|stale|outdated|modified|rev/i);
|
||||
});
|
||||
|
||||
it("content_update without _rev still succeeds (opt-in concurrency)", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "U" } },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Soft-delete visibility
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("soft-delete visibility", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("content_get on a trashed item returns NOT_FOUND (not the item)", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_delete",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(got.isError).toBe(true);
|
||||
expect(extractText(got)).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
});
|
||||
|
||||
it("content_list does NOT include trashed items by default", async () => {
|
||||
const a = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Live" } },
|
||||
});
|
||||
const b = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Trashed" } },
|
||||
});
|
||||
const trashedId = extractJson<{ item: { id: string } }>(b).item.id;
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_delete",
|
||||
arguments: { collection: "post", id: trashedId },
|
||||
});
|
||||
|
||||
const list = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post" },
|
||||
});
|
||||
const ids = extractJson<{ items: Array<{ id: string }> }>(list).items.map((i) => i.id);
|
||||
expect(ids).not.toContain(trashedId);
|
||||
expect(ids).toContain(extractJson<{ item: { id: string } }>(a).item.id);
|
||||
});
|
||||
|
||||
it("content_list_trashed returns only trashed items", async () => {
|
||||
await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Live" } },
|
||||
});
|
||||
const b = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Trashed" } },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_delete",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id: extractJson<{ item: { id: string } }>(b).item.id,
|
||||
},
|
||||
});
|
||||
|
||||
const trashed = await harness.client.callTool({
|
||||
name: "content_list_trashed",
|
||||
arguments: { collection: "post" },
|
||||
});
|
||||
const items = extractJson<{ items: Array<{ id: string }> }>(trashed).items;
|
||||
expect(items).toHaveLength(1);
|
||||
expect(items[0]?.id).toBe(extractJson<{ item: { id: string } }>(b).item.id);
|
||||
});
|
||||
});
|
||||
|
||||
describe("edit-while-trashed", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("content_update on a trashed item is rejected (item not visible)", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_delete",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const updated = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Edit while dead" } },
|
||||
});
|
||||
expect(updated.isError).toBe(true);
|
||||
expect(extractText(updated)).toMatch(/\bNOT_FOUND\b|\bnot found\b|trash/i);
|
||||
});
|
||||
|
||||
it("content_publish on a trashed item is rejected", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_delete",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Idempotency
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("idempotency", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("publish twice is idempotent: second call succeeds, status stays published, publishedAt is preserved", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const first = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(first.isError, extractText(first)).toBeFalsy();
|
||||
const firstItem = extractJson<{
|
||||
item: { status: string; publishedAt: string | null };
|
||||
}>(first).item;
|
||||
expect(firstItem.status).toBe("published");
|
||||
expect(firstItem.publishedAt).toBeTruthy();
|
||||
|
||||
// Pin publishedAt to a known fixed value so the comparison can't be
|
||||
// satisfied by coincidence (two publishes within the same ms would
|
||||
// produce identical ISO strings even on a regression that drops the
|
||||
// COALESCE preservation).
|
||||
const KNOWN = "2020-01-01T00:00:00.000Z";
|
||||
await sql`UPDATE ec_post SET published_at = ${KNOWN} WHERE id = ${id}`.execute(db);
|
||||
|
||||
const second = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
// Contract: publish is idempotent. Second call succeeds, status
|
||||
// remains published, and publishedAt is preserved (the repository
|
||||
// uses COALESCE so the existing timestamp survives a re-publish).
|
||||
expect(second.isError, extractText(second)).toBeFalsy();
|
||||
const secondItem = extractJson<{
|
||||
item: { status: string; publishedAt: string | null };
|
||||
}>(second).item;
|
||||
expect(secondItem.status).toBe("published");
|
||||
expect(secondItem.publishedAt).toBe(KNOWN);
|
||||
});
|
||||
|
||||
it("unpublish on a draft (already unpublished) is idempotent: status stays draft", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const createdItem = extractJson<{
|
||||
item: { id: string; version: number };
|
||||
}>(created).item;
|
||||
const id = createdItem.id;
|
||||
const versionBefore = createdItem.version;
|
||||
|
||||
// Item is born as draft. Contract: unpublish is idempotent — succeeds
|
||||
// and the item stays draft.
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_unpublish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const item = extractJson<{
|
||||
item: { status: string; publishedAt: string | null; version: number };
|
||||
}>(result).item;
|
||||
expect(item.status).toBe("draft");
|
||||
expect(item.publishedAt).toBeNull();
|
||||
// Idempotent: nothing meaningful changed. A regression that always
|
||||
// bumps the version or creates a phantom revision would surface here.
|
||||
// (updated_at can tick because the UPDATE re-runs; version is the
|
||||
// stricter invariant.)
|
||||
expect(item.version).toBe(versionBefore);
|
||||
});
|
||||
|
||||
it("schedule then publish: schedule is preserved or cleared cleanly", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const future = new Date(Date.now() + 3600_000).toISOString();
|
||||
await harness.client.callTool({
|
||||
name: "content_schedule",
|
||||
arguments: { collection: "post", id, scheduledAt: future },
|
||||
});
|
||||
|
||||
const publish = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(publish.isError, extractText(publish)).toBeFalsy();
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const item = extractJson<{
|
||||
item: { status: string; scheduledAt: string | null };
|
||||
}>(got).item;
|
||||
expect(item.status).toBe("published");
|
||||
// Once published, the future schedule is moot — should be cleared.
|
||||
expect(item.scheduledAt).toBeNull();
|
||||
});
|
||||
|
||||
it("delete twice is safe — second call returns NOT_FOUND, not a crash", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_delete",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const second = await harness.client.callTool({
|
||||
name: "content_delete",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(second.isError).toBe(true);
|
||||
expect(extractText(second)).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// content_unschedule gap (no MCP tool for this, only on runtime)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("content_unschedule gap", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("MCP exposes content_unschedule", async () => {
|
||||
const tools = await harness.client.listTools();
|
||||
const names = tools.tools.map((t) => t.name);
|
||||
expect(names).toContain("content_unschedule");
|
||||
});
|
||||
|
||||
it("schedule + unschedule clears scheduledAt and re-publish still works (F12)", async () => {
|
||||
// Create a draft item.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Scheduled post" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
// Schedule for the near future.
|
||||
const future = new Date(Date.now() + 60_000).toISOString();
|
||||
const schedule = await harness.client.callTool({
|
||||
name: "content_schedule",
|
||||
arguments: { collection: "post", id, scheduledAt: future },
|
||||
});
|
||||
expect(schedule.isError, extractText(schedule)).toBeFalsy();
|
||||
|
||||
// Sanity: scheduledAt is set.
|
||||
const afterSchedule = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const scheduled = extractJson<{ item: { scheduledAt: string | null; status: string } }>(
|
||||
afterSchedule,
|
||||
).item;
|
||||
expect(scheduled.scheduledAt).toBeTruthy();
|
||||
|
||||
// Unschedule.
|
||||
const unschedule = await harness.client.callTool({
|
||||
name: "content_unschedule",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(unschedule.isError, extractText(unschedule)).toBeFalsy();
|
||||
|
||||
// scheduledAt is now null.
|
||||
const afterUnschedule = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const cleared = extractJson<{ item: { scheduledAt: string | null } }>(afterUnschedule).item;
|
||||
expect(cleared.scheduledAt).toBeNull();
|
||||
|
||||
// Re-publish still works after unschedule.
|
||||
const republish = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(republish.isError, extractText(republish)).toBeFalsy();
|
||||
const final = extractJson<{ item: { status: string } }>(republish).item;
|
||||
expect(final.status).toBe("published");
|
||||
});
|
||||
});
|
||||
645
packages/core/tests/integration/mcp/drafts.test.ts
Normal file
645
packages/core/tests/integration/mcp/drafts.test.ts
Normal file
@@ -0,0 +1,645 @@
|
||||
/**
|
||||
* MCP draft / revision data round-trip tests.
|
||||
*
|
||||
* For collections that support revisions, `content_update` writes the
|
||||
* new data into a draft revision rather than the content table columns
|
||||
* (the columns hold the live/published values). `content_get` and
|
||||
* `content_update` hydrate the response item with the draft revision's
|
||||
* data when one exists, exposing the previously-published values as
|
||||
* `liveData` alongside.
|
||||
*
|
||||
* The user-visible contract: "if I update X to Y, then read back, I see Y"
|
||||
* — even for revision-supporting collections.
|
||||
*
|
||||
* Slug updates and `revision_restore` round-trips share the same response
|
||||
* shape, so they're tested here too.
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import { SchemaRegistry } from "../../../src/schema/registry.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabase, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
|
||||
interface ItemEnvelope {
|
||||
item: {
|
||||
id: string;
|
||||
slug: string | null;
|
||||
status: string;
|
||||
liveRevisionId: string | null;
|
||||
draftRevisionId: string | null;
|
||||
version: number;
|
||||
publishedAt: string | null;
|
||||
updatedAt: string;
|
||||
// Field columns flattened onto item — title is what we care about
|
||||
title?: unknown;
|
||||
// Some response variants nest the typed values under `data`
|
||||
data?: { title?: unknown };
|
||||
};
|
||||
_rev?: string;
|
||||
}
|
||||
|
||||
/** Read whatever the response thinks the current title is, regardless of shape. */
|
||||
function readTitle(item: ItemEnvelope["item"]): unknown {
|
||||
if (item.data && typeof item.data === "object" && "title" in item.data) {
|
||||
return item.data.title;
|
||||
}
|
||||
return item.title;
|
||||
}
|
||||
|
||||
describe("MCP drafts — content_get and content_update round-trip (bug #2)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
|
||||
// Collection that supports revisions — this is the surface area
|
||||
// where the bug surfaces. Without "revisions" in supports, updates
|
||||
// write directly to content columns and the round-trip is trivially
|
||||
// correct.
|
||||
await registry.createCollection({
|
||||
slug: "post",
|
||||
label: "Posts",
|
||||
labelSingular: "Post",
|
||||
supports: ["drafts", "revisions"],
|
||||
});
|
||||
await registry.createField("post", { slug: "title", label: "Title", type: "string" });
|
||||
await registry.createField("post", { slug: "body", label: "Body", type: "text" });
|
||||
|
||||
// Collection without revision support — for contrast/regression
|
||||
await registry.createCollection({
|
||||
slug: "page",
|
||||
label: "Pages",
|
||||
labelSingular: "Page",
|
||||
supports: [],
|
||||
});
|
||||
await registry.createField("page", { slug: "title", label: "Title", type: "string" });
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
// ----- Core round-trip: update should be visible on get -----
|
||||
|
||||
describe("revision-supporting collection", () => {
|
||||
it("content_update response data reflects the new title", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Original" } },
|
||||
});
|
||||
const createdItem = extractJson<ItemEnvelope>(created);
|
||||
|
||||
const updated = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id: createdItem.item.id,
|
||||
data: { title: "Updated" },
|
||||
},
|
||||
});
|
||||
expect(updated.isError, extractText(updated)).toBeFalsy();
|
||||
const updatedItem = extractJson<ItemEnvelope>(updated);
|
||||
|
||||
// Bug #2: today this returns "Original". After fix: "Updated".
|
||||
expect(readTitle(updatedItem.item)).toBe("Updated");
|
||||
});
|
||||
|
||||
it("content_get returns the latest draft data after update", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Original" } },
|
||||
});
|
||||
const createdItem = extractJson<ItemEnvelope>(created);
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id: createdItem.item.id,
|
||||
data: { title: "Updated via draft" },
|
||||
},
|
||||
});
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id: createdItem.item.id },
|
||||
});
|
||||
const gotItem = extractJson<ItemEnvelope>(got);
|
||||
|
||||
expect(readTitle(gotItem.item)).toBe("Updated via draft");
|
||||
});
|
||||
|
||||
it("multiple sequential updates all reflect on read", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "v1" } },
|
||||
});
|
||||
const id = extractJson<ItemEnvelope>(created).item.id;
|
||||
|
||||
for (const title of ["v2", "v3", "v4"]) {
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title } },
|
||||
});
|
||||
}
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(readTitle(extractJson<ItemEnvelope>(got).item)).toBe("v4");
|
||||
});
|
||||
|
||||
it("publishing a draft makes its data the new live data on read", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Original" } },
|
||||
});
|
||||
const id = extractJson<ItemEnvelope>(created).item.id;
|
||||
|
||||
// Publish initial as live
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
// Update creates a draft revision
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Draft change" } },
|
||||
});
|
||||
|
||||
// Publish promotes draft to live
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(readTitle(extractJson<ItemEnvelope>(got).item)).toBe("Draft change");
|
||||
});
|
||||
|
||||
it("partial updates merge with current draft (only title changes, body preserved)", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T1", body: "B1" } },
|
||||
});
|
||||
const id = extractJson<ItemEnvelope>(created).item.id;
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "T2" } },
|
||||
});
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const item = extractJson<ItemEnvelope>(got).item;
|
||||
|
||||
expect(readTitle(item)).toBe("T2");
|
||||
// Read body the same way
|
||||
const body =
|
||||
item.data && typeof item.data === "object" && "body" in item.data
|
||||
? // eslint-disable-next-line typescript-eslint(no-unsafe-type-assertion) -- shape narrowed by 'in' check
|
||||
(item.data as { body?: unknown }).body
|
||||
: (item as Record<string, unknown>).body;
|
||||
expect(body).toBe("B1");
|
||||
});
|
||||
});
|
||||
|
||||
// ----- content_compare must still expose both sides -----
|
||||
|
||||
describe("content_compare", () => {
|
||||
it("returns both live and draft data when a draft exists", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Original" } },
|
||||
});
|
||||
const id = extractJson<ItemEnvelope>(created).item.id;
|
||||
|
||||
// Publish, then update to create a draft on top of live
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Drafted" } },
|
||||
});
|
||||
|
||||
const compare = await harness.client.callTool({
|
||||
name: "content_compare",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(compare.isError, extractText(compare)).toBeFalsy();
|
||||
|
||||
const result = extractJson<{
|
||||
live: { title?: unknown; data?: { title?: unknown } } | null;
|
||||
draft: { title?: unknown; data?: { title?: unknown } } | null;
|
||||
hasChanges?: boolean;
|
||||
}>(compare);
|
||||
|
||||
expect(result.live).not.toBeNull();
|
||||
expect(result.draft).not.toBeNull();
|
||||
const liveTitle = result.live?.data?.title ?? result.live?.title;
|
||||
const draftTitle = result.draft?.data?.title ?? result.draft?.title;
|
||||
expect(liveTitle).toBe("Original");
|
||||
expect(draftTitle).toBe("Drafted");
|
||||
});
|
||||
});
|
||||
|
||||
// ----- content_discard_draft -----
|
||||
|
||||
describe("content_discard_draft", () => {
|
||||
it("after discard, content_get returns published live data", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Live title" } },
|
||||
});
|
||||
const id = extractJson<ItemEnvelope>(created).item.id;
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Draft title" } },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_discard_draft",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(readTitle(extractJson<ItemEnvelope>(got).item)).toBe("Live title");
|
||||
});
|
||||
});
|
||||
|
||||
// ----- regression guard: non-revision collection still works -----
|
||||
|
||||
describe("non-revision-supporting collection (regression guard)", () => {
|
||||
it("content_update on collection without revisions support reflects on read", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "page", data: { title: "Page A" } },
|
||||
});
|
||||
const id = extractJson<ItemEnvelope>(created).item.id;
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "page", id, data: { title: "Page A Updated" } },
|
||||
});
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "page", id },
|
||||
});
|
||||
expect(readTitle(extractJson<ItemEnvelope>(got).item)).toBe("Page A Updated");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("MCP drafts — slug updates (bug #9)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({
|
||||
slug: "post",
|
||||
label: "Posts",
|
||||
supports: ["drafts", "revisions"],
|
||||
});
|
||||
await registry.createField("post", { slug: "title", label: "Title", type: "string" });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("content_update with a new slug actually changes the slug visible on read", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Original" }, slug: "original-slug" },
|
||||
});
|
||||
const id = extractJson<ItemEnvelope>(created).item.id;
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, slug: "new-slug" },
|
||||
});
|
||||
|
||||
// After publish, slug change should be visible.
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(extractJson<ItemEnvelope>(got).item.slug).toBe("new-slug");
|
||||
});
|
||||
|
||||
it("content_get by new slug works after slug update + publish", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" }, slug: "old" },
|
||||
});
|
||||
const id = extractJson<ItemEnvelope>(created).item.id;
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, slug: "new" },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const gotByNew = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id: "new" },
|
||||
});
|
||||
expect(gotByNew.isError, extractText(gotByNew)).toBeFalsy();
|
||||
expect(extractJson<ItemEnvelope>(gotByNew).item.id).toBe(id);
|
||||
});
|
||||
});
|
||||
|
||||
describe("MCP drafts — revision_restore semantics (bug #17)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({
|
||||
slug: "post",
|
||||
label: "Posts",
|
||||
supports: ["drafts", "revisions"],
|
||||
});
|
||||
await registry.createField("post", { slug: "title", label: "Title", type: "string" });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
/**
|
||||
* Bug #17 repro from MCP_BUGS.md: live=v1, draft=v2 (unpublished),
|
||||
* restore v1. Per the tool contract ("Replaces the current draft..."),
|
||||
* the live row must remain v1 and the draft must become v1. The
|
||||
* pre-fix behavior wrote v1 onto the live row and left the draft
|
||||
* pointing at v2.
|
||||
*/
|
||||
it("restore replaces the current draft and leaves the live row alone", async () => {
|
||||
// Create v1, publish so live = v1.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "v1" } },
|
||||
});
|
||||
const id = extractJson<ItemEnvelope>(created).item.id;
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
// Find the v1 revision id BEFORE updating to v2 — once we update
|
||||
// without publishing, v1 is still in revision history.
|
||||
const revsBeforeUpdate = await harness.client.callTool({
|
||||
name: "revision_list",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const v1Rev = extractJson<{
|
||||
items: Array<{ id: string; data?: { title?: unknown } }>;
|
||||
}>(revsBeforeUpdate).items.find((r) => r.data?.title === "v1");
|
||||
expect(v1Rev, "v1 revision must exist after publish").toBeTruthy();
|
||||
|
||||
// Update to v2 (creates a draft revision; live remains v1).
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "v2" } },
|
||||
});
|
||||
|
||||
// Sanity: before restore, get returns v2 (the draft) and liveData=v1.
|
||||
const preRestore = extractJson<ItemEnvelope>(
|
||||
await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
}),
|
||||
).item as ItemEnvelope["item"] & { liveData?: { title?: unknown } };
|
||||
expect(readTitle(preRestore)).toBe("v2");
|
||||
expect(preRestore.liveData?.title).toBe("v1");
|
||||
const v2DraftId = preRestore.draftRevisionId;
|
||||
expect(v2DraftId, "v2 draft revision id must be set").toBeTruthy();
|
||||
|
||||
// Restore v1.
|
||||
const restored = await harness.client.callTool({
|
||||
name: "revision_restore",
|
||||
arguments: { collection: "post", id, revisionId: v1Rev!.id },
|
||||
});
|
||||
expect(restored.isError, extractText(restored)).toBeFalsy();
|
||||
|
||||
// The restore response itself must show the new draft state (v1),
|
||||
// not stale data. Same shape as the bug-#2 fix for content_update.
|
||||
const restoredItem = extractJson<ItemEnvelope>(restored).item;
|
||||
expect(readTitle(restoredItem)).toBe("v1");
|
||||
|
||||
// And a follow-up content_get must agree.
|
||||
const postRestore = extractJson<ItemEnvelope>(
|
||||
await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
}),
|
||||
).item;
|
||||
expect(readTitle(postRestore)).toBe("v1");
|
||||
|
||||
// The live row must still hold v1 (unchanged from the original
|
||||
// publish — restore must NOT overwrite live).
|
||||
const dbRow = (await db
|
||||
.selectFrom("ec_post" as never)
|
||||
.select(["title", "live_revision_id", "draft_revision_id"] as never)
|
||||
.where("id" as never, "=", id)
|
||||
.executeTakeFirst()) as
|
||||
| {
|
||||
title: unknown;
|
||||
live_revision_id: string | null;
|
||||
draft_revision_id: string | null;
|
||||
}
|
||||
| undefined;
|
||||
expect(dbRow?.title).toBe("v1");
|
||||
// A new draft revision was created. It is distinct from BOTH the
|
||||
// original v1 revision id (we created a new revision row carrying
|
||||
// v1's data — we don't reuse history rows) AND the v2 draft id
|
||||
// (the v2 draft was abandoned). This is the strongest differentia
|
||||
// from the pre-fix behavior, which left v2's draft pointer
|
||||
// in place.
|
||||
expect(dbRow?.draft_revision_id).toBeTruthy();
|
||||
expect(dbRow?.draft_revision_id).not.toBe(v1Rev!.id);
|
||||
expect(dbRow?.draft_revision_id).not.toBe(v2DraftId);
|
||||
});
|
||||
|
||||
/**
|
||||
* Companion case: restoring while no draft exists should still create
|
||||
* a new draft (rather than no-op or overwrite live).
|
||||
*/
|
||||
it("restore creates a new draft when none exists", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "v1" } },
|
||||
});
|
||||
const id = extractJson<ItemEnvelope>(created).item.id;
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
// Update + publish v2 so there's no live draft.
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "v2" } },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const revs = extractJson<{
|
||||
items: Array<{ id: string; data?: { title?: unknown } }>;
|
||||
}>(
|
||||
await harness.client.callTool({
|
||||
name: "revision_list",
|
||||
arguments: { collection: "post", id },
|
||||
}),
|
||||
);
|
||||
const v1Rev = revs.items.find((r) => r.data?.title === "v1");
|
||||
expect(v1Rev).toBeTruthy();
|
||||
|
||||
// Now live = v2, no draft. Restore v1.
|
||||
const restored = await harness.client.callTool({
|
||||
name: "revision_restore",
|
||||
arguments: { collection: "post", id, revisionId: v1Rev!.id },
|
||||
});
|
||||
expect(restored.isError, extractText(restored)).toBeFalsy();
|
||||
expect(readTitle(extractJson<ItemEnvelope>(restored).item)).toBe("v1");
|
||||
|
||||
// Live row should still hold v2; a new draft now exists pointing
|
||||
// at v1.
|
||||
const dbRow = (await db
|
||||
.selectFrom("ec_post" as never)
|
||||
.select(["title", "draft_revision_id"] as never)
|
||||
.where("id" as never, "=", id)
|
||||
.executeTakeFirst()) as
|
||||
| {
|
||||
title: unknown;
|
||||
draft_revision_id: string | null;
|
||||
}
|
||||
| undefined;
|
||||
expect(dbRow?.title).toBe("v2");
|
||||
expect(dbRow?.draft_revision_id).toBeTruthy();
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// F13: liveData carries the published values when a draft revision exists.
|
||||
// When no draft exists, liveData is undefined.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("MCP drafts — liveData hydration (F13)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({
|
||||
slug: "post",
|
||||
label: "Posts",
|
||||
supports: ["drafts", "revisions"],
|
||||
});
|
||||
await registry.createField("post", { slug: "title", label: "Title", type: "string" });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("liveData is undefined when there is no draft revision", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "First" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const item = extractJson<{
|
||||
item: { data: { title: string }; liveData?: { title?: string } };
|
||||
}>(got).item;
|
||||
expect(item.data.title).toBe("First");
|
||||
expect(item.liveData).toBeUndefined();
|
||||
});
|
||||
|
||||
it("liveData carries the published values when a draft revision exists", async () => {
|
||||
// Create + publish, so the live value is "published title".
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "published title" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
// Update writes a draft revision (data column stays at "published title").
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "draft title" } },
|
||||
});
|
||||
|
||||
// Read back: data reflects the draft, liveData carries the published value.
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const item = extractJson<{
|
||||
item: { data: { title: string }; liveData?: { title?: string } };
|
||||
}>(got).item;
|
||||
expect(item.data.title).toBe("draft title");
|
||||
expect(item.liveData?.title).toBe("published title");
|
||||
});
|
||||
});
|
||||
403
packages/core/tests/integration/mcp/errors.test.ts
Normal file
403
packages/core/tests/integration/mcp/errors.test.ts
Normal file
@@ -0,0 +1,403 @@
|
||||
/**
|
||||
* MCP error envelope fidelity tests.
|
||||
*
|
||||
* Specific failure modes (unknown collection, duplicate slug, unknown
|
||||
* field, bad orderBy, etc.) must return discriminated error codes so
|
||||
* callers can act on them programmatically:
|
||||
*
|
||||
* - Handlers detect known failure shapes and return one of:
|
||||
* `SLUG_CONFLICT`, `COLLECTION_NOT_FOUND`, `UNKNOWN_FIELD`,
|
||||
* `INVALID_ORDER_BY`, `VALIDATION_ERROR`.
|
||||
* - The MCP envelope emits the code as a `[CODE]` prefix on the
|
||||
* message text and as `_meta.code` for SDK-aware clients.
|
||||
*
|
||||
* Each test asserts:
|
||||
* (a) the response is `isError: true`
|
||||
* (b) the code/message names the specific failure, not a generic
|
||||
* "Failed to ..." string
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { ContentRepository } from "../../../src/database/repositories/content.js";
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import { connectMcpHarness, extractText, type McpHarness } from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabaseWithCollections, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
// Generic placeholders that should NOT survive after the fix.
|
||||
const GENERIC_CREATE = /^Failed to create content$/;
|
||||
const GENERIC_LIST = /^Failed to list content$/;
|
||||
const GENERIC_UPDATE = /^Failed to update content$/;
|
||||
const UNKNOWN_ERROR = /^Unknown error$/;
|
||||
|
||||
describe("MCP error envelope — content_create (bug #3)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_admin",
|
||||
userRole: Role.ADMIN,
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("unknown collection slug returns a discriminated NOT_FOUND-style error", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "nonexistent", data: { title: "Hi" } },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
// Message should name the specific failure (collection not found).
|
||||
expect(text).not.toMatch(GENERIC_CREATE);
|
||||
expect(text).not.toMatch(UNKNOWN_ERROR);
|
||||
// Tight match: explicitly the COLLECTION_NOT_FOUND code (or message),
|
||||
// rather than any text that happens to contain "collection".
|
||||
expect(text).toMatch(/COLLECTION_NOT_FOUND|Collection ['"]?nonexistent['"]? not found/i);
|
||||
});
|
||||
|
||||
it("duplicate slug returns a SLUG_CONFLICT-style error", async () => {
|
||||
// Seed an item with a known slug
|
||||
const repo = new ContentRepository(db);
|
||||
await repo.create({
|
||||
type: "post",
|
||||
data: { title: "First" },
|
||||
slug: "duplicate-me",
|
||||
status: "draft",
|
||||
authorId: "seed",
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "Second" },
|
||||
slug: "duplicate-me",
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
expect(text).not.toMatch(GENERIC_CREATE);
|
||||
expect(text).not.toMatch(UNKNOWN_ERROR);
|
||||
// Either explicit "slug" wording or a UNIQUE/conflict signal.
|
||||
expect(text).toMatch(/slug|unique|conflict|duplicate|exists/i);
|
||||
});
|
||||
|
||||
it("unknown field in data returns an UNKNOWN_FIELD-style error", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
// `nonexistent_field` was never created on the post collection
|
||||
data: { title: "Hello", nonexistent_field: "boom" },
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
expect(text).not.toMatch(GENERIC_CREATE);
|
||||
expect(text).not.toMatch(UNKNOWN_ERROR);
|
||||
expect(text).toMatch(/field|unknown|nonexistent_field|column/i);
|
||||
});
|
||||
});
|
||||
|
||||
describe("MCP error envelope — content_list (bug #3)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_admin",
|
||||
userRole: Role.ADMIN,
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("unknown collection returns a COLLECTION_NOT_FOUND-style error, not a generic one", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "nonexistent" },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
expect(text).not.toMatch(GENERIC_LIST);
|
||||
expect(text).not.toMatch(UNKNOWN_ERROR);
|
||||
expect(text).toMatch(/COLLECTION_NOT_FOUND|Collection ['"]?nonexistent['"]? not found/i);
|
||||
});
|
||||
|
||||
it("invalid orderBy column returns an INVALID_ORDER_BY-style error", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", orderBy: "definitely_not_a_column" },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
expect(text).not.toMatch(GENERIC_LIST);
|
||||
expect(text).not.toMatch(UNKNOWN_ERROR);
|
||||
// Concrete: response must echo the offending column AND carry a
|
||||
// stable validation-style code. Avoids matching unrelated phrases
|
||||
// that happen to contain "order" or "column".
|
||||
expect(text).toContain("definitely_not_a_column");
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("VALIDATION_ERROR");
|
||||
});
|
||||
});
|
||||
|
||||
describe("MCP error envelope — content_get (bug #3)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_admin",
|
||||
userRole: Role.ADMIN,
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("missing item returns a clear NOT_FOUND error including the id (already works — regression guard)", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id: "01NONEXISTENT" },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
expect(text).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
expect(text).toContain("01NONEXISTENT");
|
||||
});
|
||||
});
|
||||
|
||||
describe("MCP error envelope — content_update (bug #3)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_admin",
|
||||
userRole: Role.ADMIN,
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("update on missing id returns a NOT_FOUND-style error", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id: "01NEVEREXISTED", data: { title: "x" } },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
expect(text).not.toMatch(GENERIC_UPDATE);
|
||||
expect(text).not.toMatch(UNKNOWN_ERROR);
|
||||
expect(text).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
expect(text).toContain("01NEVEREXISTED");
|
||||
});
|
||||
|
||||
it("stale _rev returns a CONFLICT-style error (not a generic one)", async () => {
|
||||
const repo = new ContentRepository(db);
|
||||
const item = await repo.create({
|
||||
type: "post",
|
||||
data: { title: "Original" },
|
||||
slug: "rev-test",
|
||||
status: "draft",
|
||||
authorId: "user_admin",
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id: item.id,
|
||||
data: { title: "x" },
|
||||
_rev: "obviously-stale-rev",
|
||||
},
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
expect(text).not.toMatch(GENERIC_UPDATE);
|
||||
expect(text).not.toMatch(UNKNOWN_ERROR);
|
||||
expect(text).toMatch(/conflict|rev|stale|outdated|modified/i);
|
||||
});
|
||||
});
|
||||
|
||||
describe("MCP error envelope — error code preservation through unwrap()", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_admin",
|
||||
userRole: Role.ADMIN,
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
/**
|
||||
* The MCP SDK forwards `_meta` on tool results when present — once
|
||||
* `unwrap()` propagates it, callers can read structured codes
|
||||
* programmatically. Until then, codes must at least appear in the
|
||||
* message text so callers can match on a stable token.
|
||||
*/
|
||||
it("a NOT_FOUND error from a handler surfaces 'NOT_FOUND' or equivalent", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id: "01MISSING" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
// Either the structured _meta carries the code, or the message
|
||||
// includes a stable token. Today: only `Content item not found:` —
|
||||
// no machine-readable code.
|
||||
const text = extractText(result);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
const codeFromMeta = meta?.code;
|
||||
expect(codeFromMeta === "NOT_FOUND" || /\bNOT_FOUND\b/.test(text)).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// F7: error envelope correctly carries codes for SchemaError, McpError,
|
||||
// and SDK-thrown auth errors.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("MCP error envelope — F7 (codes propagated for SchemaError + auth)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("INSUFFICIENT_SCOPE for a token without the required scope", async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
// Only grant content:read; content_create needs content:write.
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_admin",
|
||||
userRole: Role.ADMIN,
|
||||
tokenScopes: ["content:read"],
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "x" } },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_SCOPE");
|
||||
expect(extractText(result)).toMatch(/INSUFFICIENT_SCOPE/);
|
||||
});
|
||||
|
||||
it("backwards compat: content:write token can call menu_create (implicit grant)", async () => {
|
||||
// PATs issued before menus:manage was split out of content:write
|
||||
// must continue to work. Verify the implicit grant flows through
|
||||
// the full MCP stack.
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_admin",
|
||||
userRole: Role.ADMIN,
|
||||
tokenScopes: ["content:write"],
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "menu_create",
|
||||
arguments: { name: "main", label: "Main" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("menus:manage token cannot call content_create (no reverse grant)", async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_admin",
|
||||
userRole: Role.ADMIN,
|
||||
tokenScopes: ["menus:manage"],
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "x" } },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_SCOPE");
|
||||
});
|
||||
|
||||
it("INSUFFICIENT_PERMISSIONS for a role that's too low", async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_subscriber",
|
||||
userRole: Role.SUBSCRIBER,
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "x" } },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_PERMISSIONS");
|
||||
});
|
||||
|
||||
it("SchemaError code (RESERVED_SLUG) propagates through schema_create_collection", async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_admin",
|
||||
userRole: Role.ADMIN,
|
||||
});
|
||||
// '_emdash_collections' is the prefix used for system tables — that
|
||||
// kind of slug is reserved. Pick a guaranteed reserved slug
|
||||
// (the '_emdash' prefix or e.g. 'media' — see RESERVED_COLLECTION_SLUGS).
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "media", label: "Reserved" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
// SchemaError carries `code` directly; respondHandlerError should
|
||||
// forward it. Whichever specific reserved-slug code applies is fine
|
||||
// — just assert it's a stable string that isn't the generic fallback.
|
||||
expect(meta?.code).toBeDefined();
|
||||
expect(meta?.code).not.toBe("INTERNAL_ERROR");
|
||||
expect(meta?.code).not.toBe("");
|
||||
});
|
||||
});
|
||||
72
packages/core/tests/integration/mcp/harness-smoke.test.ts
Normal file
72
packages/core/tests/integration/mcp/harness-smoke.test.ts
Normal file
@@ -0,0 +1,72 @@
|
||||
/**
|
||||
* Smoke test for the MCP integration harness.
|
||||
*
|
||||
* Verifies the `connectMcpHarness()` plumbing is sound: real DB, real
|
||||
* runtime, real MCP client/server pair. This is not bug coverage — it
|
||||
* just guards against regressions in the harness itself. Bug-specific
|
||||
* tests live in the other files in this directory.
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { connectMcpHarness, extractJson, type McpHarness } from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabaseWithCollections, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
describe("MCP harness smoke", () => {
|
||||
let harness: McpHarness;
|
||||
let dbCleanup: () => Promise<void>;
|
||||
|
||||
beforeEach(async () => {
|
||||
const db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: "user_admin",
|
||||
userRole: Role.ADMIN,
|
||||
});
|
||||
dbCleanup = () => teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
await harness.cleanup();
|
||||
await dbCleanup();
|
||||
});
|
||||
|
||||
it("exposes registered MCP tools via tools/list", async () => {
|
||||
const tools = await harness.client.listTools();
|
||||
const names = tools.tools.map((t) => t.name);
|
||||
expect(names).toContain("content_list");
|
||||
expect(names).toContain("content_create");
|
||||
expect(names).toContain("schema_list_collections");
|
||||
});
|
||||
|
||||
it("can call schema_list_collections and get the seeded test collections", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_list_collections",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError).toBeFalsy();
|
||||
const { items } = extractJson<{ items: Array<{ slug: string }> }>(result);
|
||||
const slugs = items.map((c) => c.slug);
|
||||
expect(slugs).toContain("post");
|
||||
expect(slugs).toContain("page");
|
||||
});
|
||||
|
||||
it("can round-trip a simple content_create + content_get", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Hello" } },
|
||||
});
|
||||
expect(created.isError).toBeFalsy();
|
||||
const createdItem = extractJson<{ item: { id: string; slug: string } }>(created);
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id: createdItem.item.id },
|
||||
});
|
||||
expect(got.isError).toBeFalsy();
|
||||
const gotItem = extractJson<{ item: { id: string; slug: string } }>(got);
|
||||
expect(gotItem.item.id).toBe(createdItem.item.id);
|
||||
expect(gotItem.item.slug).toBe("hello");
|
||||
});
|
||||
});
|
||||
162
packages/core/tests/integration/mcp/input-schemas.test.ts
Normal file
162
packages/core/tests/integration/mcp/input-schemas.test.ts
Normal file
@@ -0,0 +1,162 @@
|
||||
/**
|
||||
* MCP tool input schema tests.
|
||||
*
|
||||
* The MCP SDK validates `arguments` against each tool's `inputSchema`
|
||||
* (Zod) before the handler runs. These tests pin down what happens at
|
||||
* that boundary: missing required fields, wrong types, invalid enum
|
||||
* values, out-of-range numeric inputs, etc.
|
||||
*
|
||||
* The expected behavior is consistent: invalid arguments produce a
|
||||
* structured error response (`isError: true`) with a message that names
|
||||
* the offending field. We assert specifically that errors at this layer
|
||||
* remain user-friendly across the omnibus fix.
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import { connectMcpHarness, extractText, type McpHarness } from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabaseWithCollections, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
|
||||
describe("MCP input schema validation", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("content_create rejects missing required collection argument", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { data: { title: "T" } } as unknown as Record<string, unknown>,
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("content_create rejects wrong-type for data field (string instead of object)", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: "not-an-object" } as unknown as Record<
|
||||
string,
|
||||
unknown
|
||||
>,
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("content_create with status enum value outside the enum is rejected", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "T" },
|
||||
status: "weird-status",
|
||||
} as unknown as Record<string, unknown>,
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("content_list rejects out-of-range limit (e.g. negative)", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", limit: -5 },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("content_list rejects non-integer limit", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", limit: 5.7 },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("content_list rejects order outside enum", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", order: "sideways" } as unknown as Record<string, unknown>,
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("schema_create_collection rejects supports value outside enum", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: {
|
||||
slug: "x",
|
||||
label: "X",
|
||||
supports: ["drafts", "garbage"],
|
||||
} as unknown as Record<string, unknown>,
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("schema_create_field rejects type outside enum", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
slug: "x",
|
||||
label: "X",
|
||||
type: "magic",
|
||||
} as unknown as Record<string, unknown>,
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("content_get rejects missing id", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post" } as unknown as Record<string, unknown>,
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("content_schedule rejects missing scheduledAt", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_schedule",
|
||||
arguments: { collection: "post", id: "01ANY" } as unknown as Record<string, unknown>,
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("media_list with limit > 100 is rejected by inputSchema", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_list",
|
||||
arguments: { limit: 500 },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("revision_list with limit > 50 is rejected by inputSchema", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "revision_list",
|
||||
arguments: { collection: "post", id: "01x", limit: 500 },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("input validation error messages name the offending field", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "Has-Caps", label: "Bad" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
// Ideally the error names the field "slug" or shows the regex /
|
||||
// pattern violation. Today the SDK error usually does — pin that
|
||||
// behavior so it doesn't regress.
|
||||
expect(extractText(result)).toMatch(/slug|pattern|regex|invalid/i);
|
||||
});
|
||||
});
|
||||
228
packages/core/tests/integration/mcp/lifecycle.test.ts
Normal file
228
packages/core/tests/integration/mcp/lifecycle.test.ts
Normal file
@@ -0,0 +1,228 @@
|
||||
/**
|
||||
* MCP content lifecycle tests.
|
||||
*
|
||||
* Covers two contracts that callers rely on:
|
||||
*
|
||||
* - `content_unpublish` clears `published_at` so a missing/null timestamp
|
||||
* unambiguously means the item is not currently live. Re-publishing
|
||||
* assigns a fresh timestamp.
|
||||
* - `schema_create_collection` applies its documented default of
|
||||
* `['drafts', 'revisions']` for `supports` when the caller omits it.
|
||||
* Explicit `[]` is preserved as an opt-out.
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import {
|
||||
setupTestDatabaseWithCollections,
|
||||
teardownTestDatabase,
|
||||
setupTestDatabase,
|
||||
} from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Bug #10: unpublish publishedAt
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("MCP content_unpublish — publishedAt clearing (bug #10)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("unpublish clears publishedAt so 'currently live' is unambiguous", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Will publish" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
// Publish — populates publishedAt
|
||||
const published = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const publishedItem = extractJson<{ item: { publishedAt: string | null } }>(published);
|
||||
expect(publishedItem.item.publishedAt).toBeTruthy();
|
||||
|
||||
// Unpublish — should clear publishedAt
|
||||
const unpublished = await harness.client.callTool({
|
||||
name: "content_unpublish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const unpubItem = extractJson<{
|
||||
item: { publishedAt: string | null; status: string };
|
||||
}>(unpublished);
|
||||
|
||||
expect(unpubItem.item.status).toBe("draft");
|
||||
// Bug #10: today, publishedAt is still the old timestamp.
|
||||
expect(unpubItem.item.publishedAt).toBeNull();
|
||||
});
|
||||
|
||||
it("content_get after unpublish reflects null publishedAt and status=draft", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_unpublish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const gotItem = extractJson<{
|
||||
item: { publishedAt: string | null; status: string };
|
||||
}>(got);
|
||||
expect(gotItem.item.status).toBe("draft");
|
||||
expect(gotItem.item.publishedAt).toBeNull();
|
||||
});
|
||||
|
||||
it("re-publish after unpublish gets a fresh publishedAt timestamp", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const firstPub = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const firstTs = extractJson<{ item: { publishedAt: string } }>(firstPub).item.publishedAt;
|
||||
expect(firstTs).toBeTruthy();
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_unpublish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
// Wait briefly so the new timestamp is distinguishable
|
||||
await new Promise((r) => setTimeout(r, 5));
|
||||
|
||||
const secondPub = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const secondTs = extractJson<{ item: { publishedAt: string } }>(secondPub).item.publishedAt;
|
||||
expect(secondTs).toBeTruthy();
|
||||
// Should be a new timestamp, not the old one.
|
||||
expect(secondTs).not.toBe(firstTs);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Bug #11: schema_create_collection supports default
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("MCP schema_create_collection — supports default (bug #11)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("creating a collection without `supports` uses documented default ['drafts', 'revisions']", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "article", label: "Articles" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const created = extractJson<{ supports: string[] }>(result);
|
||||
|
||||
// Bug #11: today this is [] or null. After fix: ['drafts', 'revisions'].
|
||||
expect(created.supports).toEqual(expect.arrayContaining(["drafts", "revisions"]));
|
||||
});
|
||||
|
||||
it("explicit empty supports array is preserved (regression guard — opt-out)", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "minimal", label: "Minimal", supports: [] },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const created = extractJson<{ supports: string[] }>(result);
|
||||
expect(created.supports).toEqual([]);
|
||||
});
|
||||
|
||||
it("explicit supports list is preserved exactly (regression guard)", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: {
|
||||
slug: "blog",
|
||||
label: "Blog",
|
||||
supports: ["drafts", "revisions", "scheduling"],
|
||||
},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const created = extractJson<{ supports: string[] }>(result);
|
||||
expect(created.supports.toSorted()).toEqual(["drafts", "revisions", "scheduling"].toSorted());
|
||||
});
|
||||
|
||||
it("default-supports collection accepts publish/unpublish/revision flows immediately", async () => {
|
||||
// Default supports should include drafts + revisions, so the standard
|
||||
// publish/unpublish lifecycle should work without further config.
|
||||
await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "story", label: "Stories" },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: { collection: "story", slug: "title", label: "Title", type: "string" },
|
||||
});
|
||||
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "story", data: { title: "T" } },
|
||||
});
|
||||
expect(created.isError, extractText(created)).toBeFalsy();
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
// Update should create a draft revision (only meaningful if 'revisions' is in supports)
|
||||
await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "story", id, data: { title: "Updated" } },
|
||||
});
|
||||
|
||||
const revs = await harness.client.callTool({
|
||||
name: "revision_list",
|
||||
arguments: { collection: "story", id },
|
||||
});
|
||||
// If supports doesn't include 'revisions', revision_list returns empty
|
||||
// or fails. After fix: revisions exist.
|
||||
expect(revs.isError, extractText(revs)).toBeFalsy();
|
||||
const items = extractJson<{ items: unknown[] }>(revs).items;
|
||||
expect(items.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
469
packages/core/tests/integration/mcp/media.test.ts
Normal file
469
packages/core/tests/integration/mcp/media.test.ts
Normal file
@@ -0,0 +1,469 @@
|
||||
/**
|
||||
* MCP media tools — comprehensive integration tests.
|
||||
*
|
||||
* Covers:
|
||||
* - media_list (incl. mimeType filter, pagination)
|
||||
* - media_get
|
||||
* - media_update (incl. ownership)
|
||||
* - media_delete (incl. ownership)
|
||||
*
|
||||
* Plus regression for bug #14 (no media_upload tool gap) and bug #1
|
||||
* variants for media (the MCP code already handles null authorId
|
||||
* correctly for media — `media_update`/`media_delete` use `... || ""`,
|
||||
* unlike content extraction).
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { MediaRepository } from "../../../src/database/repositories/media.js";
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabase, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
const EDITOR_ID = "user_editor";
|
||||
const AUTHOR_ID = "user_author";
|
||||
const OTHER_AUTHOR_ID = "user_other_author";
|
||||
const SUBSCRIBER_ID = "user_subscriber";
|
||||
|
||||
async function seedMedia(
|
||||
db: Kysely<Database>,
|
||||
overrides: Partial<{
|
||||
filename: string;
|
||||
mimeType: string;
|
||||
size: number;
|
||||
authorId: string | null;
|
||||
}> = {},
|
||||
): Promise<string> {
|
||||
const repo = new MediaRepository(db);
|
||||
const item = await repo.create({
|
||||
filename: overrides.filename ?? `file-${Math.random().toString(36).slice(2, 8)}.png`,
|
||||
mimeType: overrides.mimeType ?? "image/png",
|
||||
size: overrides.size ?? 1024,
|
||||
storageKey: `media/${Math.random().toString(36).slice(2, 10)}`,
|
||||
...(overrides.authorId !== null ? { authorId: overrides.authorId ?? ADMIN_ID } : {}),
|
||||
});
|
||||
return item.id;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// media_list
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("media_list", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("returns empty list when no media exists", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_list",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const { items } = extractJson<{ items: unknown[] }>(result);
|
||||
expect(items).toEqual([]);
|
||||
});
|
||||
|
||||
it("lists all uploaded media", async () => {
|
||||
await seedMedia(db);
|
||||
await seedMedia(db);
|
||||
await seedMedia(db);
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_list",
|
||||
arguments: {},
|
||||
});
|
||||
const { items } = extractJson<{ items: unknown[] }>(result);
|
||||
expect(items).toHaveLength(3);
|
||||
});
|
||||
|
||||
it("filters by mimeType prefix", async () => {
|
||||
await seedMedia(db, { mimeType: "image/png" });
|
||||
await seedMedia(db, { mimeType: "image/jpeg" });
|
||||
await seedMedia(db, { mimeType: "application/pdf" });
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_list",
|
||||
arguments: { mimeType: "image/" },
|
||||
});
|
||||
const { items } = extractJson<{ items: Array<{ mimeType: string }> }>(result);
|
||||
expect(items).toHaveLength(2);
|
||||
for (const item of items) {
|
||||
expect(item.mimeType.startsWith("image/")).toBe(true);
|
||||
}
|
||||
});
|
||||
|
||||
it("paginates with cursor", async () => {
|
||||
for (let i = 0; i < 5; i++) await seedMedia(db);
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
const page1 = await harness.client.callTool({
|
||||
name: "media_list",
|
||||
arguments: { limit: 2 },
|
||||
});
|
||||
const p1 = extractJson<{ items: Array<{ id: string }>; nextCursor?: string }>(page1);
|
||||
expect(p1.items).toHaveLength(2);
|
||||
expect(p1.nextCursor).toBeTruthy();
|
||||
|
||||
const page2 = await harness.client.callTool({
|
||||
name: "media_list",
|
||||
arguments: { limit: 2, cursor: p1.nextCursor },
|
||||
});
|
||||
const p2 = extractJson<{ items: Array<{ id: string }> }>(page2);
|
||||
expect(p2.items).toHaveLength(2);
|
||||
|
||||
const p1Ids = p1.items.map((i) => i.id);
|
||||
for (const item of p2.items) expect(p1Ids).not.toContain(item.id);
|
||||
});
|
||||
|
||||
it("any logged-in user can list media", async () => {
|
||||
await seedMedia(db);
|
||||
harness = await connectMcpHarness({ db, userId: SUBSCRIBER_ID, userRole: Role.SUBSCRIBER });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_list",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// media_get
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("media_get", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("returns full media metadata", async () => {
|
||||
const id = await seedMedia(db, {
|
||||
filename: "logo.png",
|
||||
mimeType: "image/png",
|
||||
size: 2048,
|
||||
});
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_get",
|
||||
arguments: { id },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<{
|
||||
item: { id: string; filename: string; mimeType: string; size: number };
|
||||
}>(result);
|
||||
expect(data.item.id).toBe(id);
|
||||
expect(data.item.filename).toBe("logo.png");
|
||||
expect(data.item.mimeType).toBe("image/png");
|
||||
expect(data.item.size).toBe(2048);
|
||||
});
|
||||
|
||||
it("returns NOT_FOUND for missing id", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_get",
|
||||
arguments: { id: "01NOTAMEDIAID" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("01NOTAMEDIAID");
|
||||
});
|
||||
|
||||
it("any logged-in user can get media", async () => {
|
||||
const id = await seedMedia(db);
|
||||
harness = await connectMcpHarness({ db, userId: SUBSCRIBER_ID, userRole: Role.SUBSCRIBER });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_get",
|
||||
arguments: { id },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// media_update
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("media_update", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("updates alt text and caption", async () => {
|
||||
const id = await seedMedia(db, { authorId: ADMIN_ID });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_update",
|
||||
arguments: { id, alt: "Logo image", caption: "Brand logo" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<{ item: { alt: string; caption: string } }>(result);
|
||||
expect(data.item.alt).toBe("Logo image");
|
||||
expect(data.item.caption).toBe("Brand logo");
|
||||
});
|
||||
|
||||
it("updates dimensions", async () => {
|
||||
const id = await seedMedia(db);
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_update",
|
||||
arguments: { id, width: 1920, height: 1080 },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<{ item: { width: number; height: number } }>(result);
|
||||
expect(data.item.width).toBe(1920);
|
||||
expect(data.item.height).toBe(1080);
|
||||
});
|
||||
|
||||
it("AUTHOR can update their own media", async () => {
|
||||
const id = await seedMedia(db, { authorId: AUTHOR_ID });
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_update",
|
||||
arguments: { id, alt: "Mine" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("AUTHOR cannot update another user's media", async () => {
|
||||
const id = await seedMedia(db, { authorId: OTHER_AUTHOR_ID });
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_update",
|
||||
arguments: { id, alt: "Theirs" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("EDITOR can update any user's media", async () => {
|
||||
const id = await seedMedia(db, { authorId: OTHER_AUTHOR_ID });
|
||||
harness = await connectMcpHarness({ db, userId: EDITOR_ID, userRole: Role.EDITOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_update",
|
||||
arguments: { id, alt: "Editor override" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("ADMIN can update media with null authorId (regression: this works for media but not content — bug #1 inconsistency)", async () => {
|
||||
const id = await seedMedia(db, { authorId: null });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_update",
|
||||
arguments: { id, alt: "No author" },
|
||||
});
|
||||
// Already works correctly for media — confirms the fix path for
|
||||
// content (use `... || ""` instead of throwing).
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("AUTHOR cannot update media with null authorId (no ownership claim)", async () => {
|
||||
const id = await seedMedia(db, { authorId: null });
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_update",
|
||||
arguments: { id, alt: "Should fail" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("returns NOT_FOUND-style error for missing id", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_update",
|
||||
arguments: { id: "01NEVEREXISTED", alt: "x" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("01NEVEREXISTED");
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// media_delete
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("media_delete", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("deletes a media item", async () => {
|
||||
const id = await seedMedia(db, { authorId: ADMIN_ID });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_delete",
|
||||
arguments: { id },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
|
||||
// Verify it's gone
|
||||
const got = await harness.client.callTool({
|
||||
name: "media_get",
|
||||
arguments: { id },
|
||||
});
|
||||
expect(got.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("AUTHOR cannot delete another user's media", async () => {
|
||||
const id = await seedMedia(db, { authorId: OTHER_AUTHOR_ID });
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_delete",
|
||||
arguments: { id },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("EDITOR can delete any user's media", async () => {
|
||||
const id = await seedMedia(db, { authorId: OTHER_AUTHOR_ID });
|
||||
harness = await connectMcpHarness({ db, userId: EDITOR_ID, userRole: Role.EDITOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_delete",
|
||||
arguments: { id },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("returns NOT_FOUND for missing id", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_delete",
|
||||
arguments: { id: "01NOPE" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("01NOPE");
|
||||
});
|
||||
|
||||
it("delete is idempotent — second delete on same id returns NOT_FOUND, not crash", async () => {
|
||||
const id = await seedMedia(db, { authorId: ADMIN_ID });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
await harness.client.callTool({ name: "media_delete", arguments: { id } });
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_delete",
|
||||
arguments: { id },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Bug #14 — gap: media_create tool is now available
|
||||
// F1: media_create persists authorId so ownership checks subsequently succeed
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("media_create (bug #14 / F1)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("MCP exposes media_create", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const tools = await harness.client.listTools();
|
||||
const names = new Set(tools.tools.map((t) => t.name));
|
||||
expect(names.has("media_create")).toBe(true);
|
||||
});
|
||||
|
||||
it("AUTHOR creates media; subsequent media_get returns it; same author can update; different author cannot", async () => {
|
||||
// AUTHOR creates the media item via media_create.
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
const create = await harness.client.callTool({
|
||||
name: "media_create",
|
||||
arguments: {
|
||||
filename: "logo.png",
|
||||
mimeType: "image/png",
|
||||
storageKey: "media/logo-key",
|
||||
size: 4096,
|
||||
},
|
||||
});
|
||||
expect(create.isError, extractText(create)).toBeFalsy();
|
||||
const created = extractJson<{ item: { id: string; filename: string } }>(create);
|
||||
expect(created.item.filename).toBe("logo.png");
|
||||
|
||||
// media_get returns the same id.
|
||||
const got = await harness.client.callTool({
|
||||
name: "media_get",
|
||||
arguments: { id: created.item.id },
|
||||
});
|
||||
expect(got.isError, extractText(got)).toBeFalsy();
|
||||
const fetched = extractJson<{ item: { id: string } }>(got);
|
||||
expect(fetched.item.id).toBe(created.item.id);
|
||||
|
||||
// Same AUTHOR can update — proves authorId was persisted.
|
||||
const ownUpdate = await harness.client.callTool({
|
||||
name: "media_update",
|
||||
arguments: { id: created.item.id, alt: "company logo" },
|
||||
});
|
||||
expect(ownUpdate.isError, extractText(ownUpdate)).toBeFalsy();
|
||||
await harness.cleanup();
|
||||
|
||||
// A different AUTHOR is denied.
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: OTHER_AUTHOR_ID,
|
||||
userRole: Role.AUTHOR,
|
||||
});
|
||||
const otherUpdate = await harness.client.callTool({
|
||||
name: "media_update",
|
||||
arguments: { id: created.item.id, alt: "intruder caption" },
|
||||
});
|
||||
expect(otherUpdate.isError).toBe(true);
|
||||
expect(extractText(otherUpdate)).toMatch(/insufficient|permission|forbidden/i);
|
||||
});
|
||||
});
|
||||
421
packages/core/tests/integration/mcp/menu.test.ts
Normal file
421
packages/core/tests/integration/mcp/menu.test.ts
Normal file
@@ -0,0 +1,421 @@
|
||||
/**
|
||||
* MCP menu tools — comprehensive integration tests.
|
||||
*
|
||||
* Covers:
|
||||
* - menu_list
|
||||
* - menu_get
|
||||
*
|
||||
* Plus regression for bug #15 (no menu mutation tools — gap).
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { ulid } from "ulidx";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabase, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
const SUBSCRIBER_ID = "user_subscriber";
|
||||
|
||||
async function seedMenu(
|
||||
db: Kysely<Database>,
|
||||
name: string,
|
||||
label: string,
|
||||
items: Array<{
|
||||
label: string;
|
||||
url?: string;
|
||||
sort_order?: number;
|
||||
parent_id?: string | null;
|
||||
}> = [],
|
||||
): Promise<string> {
|
||||
const menuId = ulid();
|
||||
const now = new Date().toISOString();
|
||||
await db
|
||||
.insertInto("_emdash_menus" as never)
|
||||
.values({ id: menuId, name, label, created_at: now, updated_at: now } as never)
|
||||
.execute();
|
||||
|
||||
for (const [i, item] of items.entries()) {
|
||||
await db
|
||||
.insertInto("_emdash_menu_items" as never)
|
||||
.values({
|
||||
id: ulid(),
|
||||
menu_id: menuId,
|
||||
label: item.label,
|
||||
custom_url: item.url ?? null,
|
||||
type: "custom",
|
||||
sort_order: item.sort_order ?? i,
|
||||
parent_id: item.parent_id ?? null,
|
||||
created_at: now,
|
||||
} as never)
|
||||
.execute();
|
||||
}
|
||||
return menuId;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// menu_list
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("menu_list", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("returns empty list when no menus exist", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "menu_list",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson(result);
|
||||
expect(Array.isArray(data) ? data : []).toEqual([]);
|
||||
});
|
||||
|
||||
it("lists multiple menus in alphabetical order", async () => {
|
||||
await seedMenu(db, "main", "Main Menu");
|
||||
await seedMenu(db, "footer", "Footer");
|
||||
await seedMenu(db, "sidebar", "Sidebar");
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "menu_list",
|
||||
arguments: {},
|
||||
});
|
||||
const data = extractJson<Array<{ name: string; label: string }>>(result);
|
||||
expect(data.map((m) => m.name)).toEqual(["footer", "main", "sidebar"]);
|
||||
});
|
||||
|
||||
it("itemCount reflects per-menu item count (LEFT JOIN correctness)", async () => {
|
||||
// handleMenuList uses a single LEFT JOIN + GROUP BY for the count.
|
||||
// A regression to INNER JOIN would drop empty menus; a regression
|
||||
// in the count column or join key would silently report wrong
|
||||
// numbers per menu. Seed three menus with known, distinct counts.
|
||||
await seedMenu(db, "empty", "Empty");
|
||||
await seedMenu(db, "single", "Single", [{ label: "Home", url: "/" }]);
|
||||
await seedMenu(db, "triple", "Triple", [
|
||||
{ label: "Home", url: "/" },
|
||||
{ label: "About", url: "/about" },
|
||||
{ label: "Blog", url: "/blog" },
|
||||
]);
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({ name: "menu_list", arguments: {} });
|
||||
const data = extractJson<Array<{ name: string; itemCount: number }>>(result);
|
||||
|
||||
const empty = data.find((m) => m.name === "empty");
|
||||
const single = data.find((m) => m.name === "single");
|
||||
const triple = data.find((m) => m.name === "triple");
|
||||
expect(empty?.itemCount).toBe(0);
|
||||
expect(single?.itemCount).toBe(1);
|
||||
expect(triple?.itemCount).toBe(3);
|
||||
// Empty menu must still be present — guards against an INNER JOIN
|
||||
// regression where it would disappear.
|
||||
expect(data.map((m) => m.name)).toContain("empty");
|
||||
});
|
||||
|
||||
it("any logged-in user can list menus", async () => {
|
||||
await seedMenu(db, "main", "Main");
|
||||
harness = await connectMcpHarness({ db, userId: SUBSCRIBER_ID, userRole: Role.SUBSCRIBER });
|
||||
const result = await harness.client.callTool({
|
||||
name: "menu_list",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// menu_get
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("menu_get", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("returns menu with items in sort order", async () => {
|
||||
await seedMenu(db, "main", "Main", [
|
||||
{ label: "Home", url: "/", sort_order: 0 },
|
||||
{ label: "Blog", url: "/blog", sort_order: 1 },
|
||||
{ label: "About", url: "/about", sort_order: 2 },
|
||||
]);
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "menu_get",
|
||||
arguments: { name: "main" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const menu = extractJson<{
|
||||
name: string;
|
||||
items: Array<{ label: string; sort_order: number }>;
|
||||
}>(result);
|
||||
expect(menu.name).toBe("main");
|
||||
expect(menu.items).toHaveLength(3);
|
||||
expect(menu.items.map((i) => i.label)).toEqual(["Home", "Blog", "About"]);
|
||||
});
|
||||
|
||||
it("returns NOT_FOUND error for missing menu", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "menu_get",
|
||||
arguments: { name: "ghost" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("ghost");
|
||||
});
|
||||
|
||||
it("empty menu returns empty items array", async () => {
|
||||
await seedMenu(db, "empty", "Empty Menu", []);
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "menu_get",
|
||||
arguments: { name: "empty" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const menu = extractJson<{ items: unknown[] }>(result);
|
||||
expect(menu.items).toEqual([]);
|
||||
});
|
||||
|
||||
it("any logged-in user can get a menu", async () => {
|
||||
await seedMenu(db, "main", "Main", [{ label: "Home", url: "/" }]);
|
||||
harness = await connectMcpHarness({ db, userId: SUBSCRIBER_ID, userRole: Role.SUBSCRIBER });
|
||||
const result = await harness.client.callTool({
|
||||
name: "menu_get",
|
||||
arguments: { name: "main" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Bug #15 / F6 / F12 — happy paths for menu mutation tools.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("menu mutations (bug #15 / F6 / F12)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("MCP exposes menu_create, menu_update, menu_set_items, menu_delete", async () => {
|
||||
const tools = await harness.client.listTools();
|
||||
const names = new Set(tools.tools.map((t) => t.name));
|
||||
expect(names.has("menu_create")).toBe(true);
|
||||
expect(names.has("menu_update")).toBe(true);
|
||||
expect(names.has("menu_set_items")).toBe(true);
|
||||
expect(names.has("menu_delete")).toBe(true);
|
||||
});
|
||||
|
||||
it("menu_create + menu_get round-trip", async () => {
|
||||
const create = await harness.client.callTool({
|
||||
name: "menu_create",
|
||||
arguments: { name: "main", label: "Main Menu" },
|
||||
});
|
||||
expect(create.isError, extractText(create)).toBeFalsy();
|
||||
|
||||
const get = await harness.client.callTool({
|
||||
name: "menu_get",
|
||||
arguments: { name: "main" },
|
||||
});
|
||||
expect(get.isError, extractText(get)).toBeFalsy();
|
||||
const menu = extractJson<{ name: string; label: string; items: unknown[] }>(get);
|
||||
expect(menu.name).toBe("main");
|
||||
expect(menu.label).toBe("Main Menu");
|
||||
expect(menu.items).toEqual([]);
|
||||
});
|
||||
|
||||
it("menu_create with a duplicate name returns CONFLICT", async () => {
|
||||
await harness.client.callTool({
|
||||
name: "menu_create",
|
||||
arguments: { name: "main", label: "Main" },
|
||||
});
|
||||
const dup = await harness.client.callTool({
|
||||
name: "menu_create",
|
||||
arguments: { name: "main", label: "Other" },
|
||||
});
|
||||
expect(dup.isError).toBe(true);
|
||||
expect(extractText(dup)).toMatch(/CONFLICT|already exists/i);
|
||||
});
|
||||
|
||||
it("menu_update changes the label", async () => {
|
||||
await harness.client.callTool({
|
||||
name: "menu_create",
|
||||
arguments: { name: "main", label: "Original" },
|
||||
});
|
||||
const update = await harness.client.callTool({
|
||||
name: "menu_update",
|
||||
arguments: { name: "main", label: "Renamed" },
|
||||
});
|
||||
expect(update.isError, extractText(update)).toBeFalsy();
|
||||
|
||||
const get = await harness.client.callTool({
|
||||
name: "menu_get",
|
||||
arguments: { name: "main" },
|
||||
});
|
||||
const menu = extractJson<{ label: string }>(get);
|
||||
expect(menu.label).toBe("Renamed");
|
||||
});
|
||||
|
||||
it("menu_set_items with empty list clears all items", async () => {
|
||||
await seedMenu(db, "main", "Main", [
|
||||
{ label: "Home", url: "/" },
|
||||
{ label: "Blog", url: "/blog" },
|
||||
]);
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "menu_set_items",
|
||||
arguments: { name: "main", items: [] },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
|
||||
const get = await harness.client.callTool({
|
||||
name: "menu_get",
|
||||
arguments: { name: "main" },
|
||||
});
|
||||
const menu = extractJson<{ items: unknown[] }>(get);
|
||||
expect(menu.items).toEqual([]);
|
||||
});
|
||||
|
||||
it("menu_set_items supports 3-level nesting via parentIndex chain", async () => {
|
||||
await harness.client.callTool({
|
||||
name: "menu_create",
|
||||
arguments: { name: "main", label: "Main" },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "menu_set_items",
|
||||
arguments: {
|
||||
name: "main",
|
||||
items: [
|
||||
{ label: "Root", type: "custom", customUrl: "/" },
|
||||
{ label: "Child", type: "custom", customUrl: "/child", parentIndex: 0 },
|
||||
{ label: "Grandchild", type: "custom", customUrl: "/gc", parentIndex: 1 },
|
||||
],
|
||||
},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
|
||||
const get = await harness.client.callTool({
|
||||
name: "menu_get",
|
||||
arguments: { name: "main" },
|
||||
});
|
||||
const menu = extractJson<{
|
||||
items: Array<{ id: string; label: string; parent_id: string | null; sort_order: number }>;
|
||||
}>(get);
|
||||
expect(menu.items).toHaveLength(3);
|
||||
|
||||
const byLabel = new Map(menu.items.map((i) => [i.label, i]));
|
||||
const root = byLabel.get("Root");
|
||||
const child = byLabel.get("Child");
|
||||
const grand = byLabel.get("Grandchild");
|
||||
expect(root?.parent_id).toBeNull();
|
||||
expect(child?.parent_id).toBe(root?.id);
|
||||
expect(grand?.parent_id).toBe(child?.id);
|
||||
});
|
||||
|
||||
it("menu_set_items rejects parentIndex >= i (must be earlier)", async () => {
|
||||
await harness.client.callTool({
|
||||
name: "menu_create",
|
||||
arguments: { name: "main", label: "Main" },
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "menu_set_items",
|
||||
arguments: {
|
||||
name: "main",
|
||||
items: [
|
||||
{ label: "A", type: "custom", customUrl: "/a", parentIndex: 0 }, // self-ref
|
||||
],
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/VALIDATION_ERROR|parentIndex/);
|
||||
});
|
||||
|
||||
it("F6: menu_delete removes both menu and items (D1 cascade safe)", async () => {
|
||||
await harness.client.callTool({
|
||||
name: "menu_create",
|
||||
arguments: { name: "main", label: "Main" },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "menu_set_items",
|
||||
arguments: {
|
||||
name: "main",
|
||||
items: [
|
||||
{ label: "A", type: "custom", customUrl: "/a" },
|
||||
{ label: "B", type: "custom", customUrl: "/b" },
|
||||
{ label: "C", type: "custom", customUrl: "/c" },
|
||||
],
|
||||
},
|
||||
});
|
||||
|
||||
// Sanity: menu_get sees 3 items.
|
||||
const before = await harness.client.callTool({
|
||||
name: "menu_get",
|
||||
arguments: { name: "main" },
|
||||
});
|
||||
const menuBefore = extractJson<{
|
||||
id: string;
|
||||
items: unknown[];
|
||||
}>(before);
|
||||
expect(menuBefore.items).toHaveLength(3);
|
||||
|
||||
// Delete.
|
||||
const del = await harness.client.callTool({
|
||||
name: "menu_delete",
|
||||
arguments: { name: "main" },
|
||||
});
|
||||
expect(del.isError, extractText(del)).toBeFalsy();
|
||||
|
||||
// Items table is empty for that menu_id.
|
||||
const orphans = await db
|
||||
.selectFrom("_emdash_menu_items" as never)
|
||||
.select(["id" as never])
|
||||
.where("menu_id" as never, "=", menuBefore.id as never)
|
||||
.execute();
|
||||
expect(orphans).toEqual([]);
|
||||
|
||||
// menu_get returns NOT_FOUND.
|
||||
const after = await harness.client.callTool({
|
||||
name: "menu_get",
|
||||
arguments: { name: "main" },
|
||||
});
|
||||
expect(after.isError).toBe(true);
|
||||
expect(extractText(after)).toMatch(/NOT_FOUND/);
|
||||
});
|
||||
});
|
||||
331
packages/core/tests/integration/mcp/ownership.test.ts
Normal file
331
packages/core/tests/integration/mcp/ownership.test.ts
Normal file
@@ -0,0 +1,331 @@
|
||||
/**
|
||||
* MCP ownership / authorization integration tests.
|
||||
*
|
||||
* The MCP server's `extractContentAuthorId()` returns "" (empty string)
|
||||
* for content with null authorId — mirroring the REST handler. Then
|
||||
* `canActOnOwn(user, "", own, any)` defers to the "any" permission so
|
||||
* EDITOR+ can edit seed-imported content while CONTRIBUTOR/AUTHOR are
|
||||
* denied with a clean permission error.
|
||||
*
|
||||
* These tests cover every permutation of role × ownership × null-author.
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { ContentRepository } from "../../../src/database/repositories/content.js";
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import { connectMcpHarness, extractText, type McpHarness } from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabaseWithCollections, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
const EDITOR_ID = "user_editor";
|
||||
const AUTHOR_ID = "user_author";
|
||||
const CONTRIBUTOR_ID = "user_contributor";
|
||||
|
||||
const NULL_AUTHOR_ERROR = /no.*authorId|content has no authorId/i;
|
||||
|
||||
describe("MCP ownership — null authorId (bug #1)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
async function seedItemWithAuthor(authorId: string | null): Promise<string> {
|
||||
const repo = new ContentRepository(db);
|
||||
const item = await repo.create({
|
||||
type: "post",
|
||||
data: { title: "Seeded Post" },
|
||||
slug: `seeded-${Math.random().toString(36).slice(2, 8)}`,
|
||||
status: "published",
|
||||
...(authorId !== null ? { authorId } : {}),
|
||||
});
|
||||
return item.id;
|
||||
}
|
||||
|
||||
async function connect(role: keyof typeof userIdByRole): Promise<void> {
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: userIdByRole[role],
|
||||
userRole: roleByName[role],
|
||||
});
|
||||
}
|
||||
|
||||
const userIdByRole = {
|
||||
admin: ADMIN_ID,
|
||||
editor: EDITOR_ID,
|
||||
author: AUTHOR_ID,
|
||||
contributor: CONTRIBUTOR_ID,
|
||||
} as const;
|
||||
|
||||
const roleByName = {
|
||||
admin: Role.ADMIN,
|
||||
editor: Role.EDITOR,
|
||||
author: Role.AUTHOR,
|
||||
contributor: Role.CONTRIBUTOR,
|
||||
} as const;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
// ----- content_update -----
|
||||
|
||||
describe("content_update", () => {
|
||||
it("ADMIN can update content with null authorId", async () => {
|
||||
const id = await seedItemWithAuthor(null);
|
||||
await connect("admin");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Updated by admin" } },
|
||||
});
|
||||
|
||||
// Currently fails with NULL_AUTHOR_ERROR. After fix: succeeds.
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("EDITOR can update content with null authorId", async () => {
|
||||
const id = await seedItemWithAuthor(null);
|
||||
await connect("editor");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Updated by editor" } },
|
||||
});
|
||||
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("AUTHOR cannot update content with null authorId (no ownership claim)", async () => {
|
||||
const id = await seedItemWithAuthor(null);
|
||||
await connect("author");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Should fail" } },
|
||||
});
|
||||
|
||||
// AUTHOR has only content:edit_own — without an authorId match,
|
||||
// they have no "own" claim and lack content:edit_any.
|
||||
expect(result.isError).toBe(true);
|
||||
// Negative: NOT the null-author internal error.
|
||||
expect(extractText(result)).not.toMatch(NULL_AUTHOR_ERROR);
|
||||
// Positive: clean permission error with the structured code.
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_PERMISSIONS");
|
||||
});
|
||||
|
||||
it("CONTRIBUTOR cannot update content with null authorId", async () => {
|
||||
const id = await seedItemWithAuthor(null);
|
||||
await connect("contributor");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Should fail" } },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).not.toMatch(NULL_AUTHOR_ERROR);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_PERMISSIONS");
|
||||
});
|
||||
});
|
||||
|
||||
// ----- content_delete -----
|
||||
|
||||
describe("content_delete (trash)", () => {
|
||||
it("ADMIN can trash content with null authorId", async () => {
|
||||
const id = await seedItemWithAuthor(null);
|
||||
await connect("admin");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_delete",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("AUTHOR cannot trash content with null authorId", async () => {
|
||||
const id = await seedItemWithAuthor(null);
|
||||
await connect("author");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_delete",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).not.toMatch(NULL_AUTHOR_ERROR);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_PERMISSIONS");
|
||||
});
|
||||
});
|
||||
|
||||
// ----- content_publish / content_unpublish -----
|
||||
|
||||
describe("publish / unpublish", () => {
|
||||
it("ADMIN can publish content with null authorId", async () => {
|
||||
// Create as draft so publish is meaningful
|
||||
const repo = new ContentRepository(db);
|
||||
const item = await repo.create({
|
||||
type: "post",
|
||||
data: { title: "Draft" },
|
||||
slug: "draft-null-author",
|
||||
status: "draft",
|
||||
});
|
||||
await connect("admin");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id: item.id },
|
||||
});
|
||||
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("ADMIN can unpublish content with null authorId", async () => {
|
||||
const id = await seedItemWithAuthor(null);
|
||||
await connect("admin");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_unpublish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
// ----- content_schedule -----
|
||||
|
||||
describe("content_schedule", () => {
|
||||
it("ADMIN can schedule content with null authorId", async () => {
|
||||
const repo = new ContentRepository(db);
|
||||
const item = await repo.create({
|
||||
type: "post",
|
||||
data: { title: "Sched draft" },
|
||||
slug: "sched-null-author",
|
||||
status: "draft",
|
||||
});
|
||||
await connect("admin");
|
||||
|
||||
const future = new Date(Date.now() + 60 * 60 * 1000).toISOString();
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_schedule",
|
||||
arguments: { collection: "post", id: item.id, scheduledAt: future },
|
||||
});
|
||||
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
// ----- content_restore (from trash) -----
|
||||
|
||||
describe("content_restore", () => {
|
||||
it("ADMIN can restore trashed content with null authorId", async () => {
|
||||
const id = await seedItemWithAuthor(null);
|
||||
// Trash via repo to bypass MCP (which we're testing)
|
||||
const repo = new ContentRepository(db);
|
||||
await repo.delete("post", id);
|
||||
|
||||
await connect("admin");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_restore",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
// ----- Sanity checks: ownership behavior unchanged for non-null cases -----
|
||||
|
||||
describe("regression guard — ownership still enforced when authorId is set", () => {
|
||||
it("AUTHOR can update their own content (authorId matches)", async () => {
|
||||
const id = await seedItemWithAuthor(AUTHOR_ID);
|
||||
await connect("author");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Updated own" } },
|
||||
});
|
||||
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("AUTHOR cannot update someone else's content (authorId set to other user)", async () => {
|
||||
const id = await seedItemWithAuthor("user_someone_else");
|
||||
await connect("author");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Updated other" } },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("EDITOR can update anyone's content (any-permission)", async () => {
|
||||
const id = await seedItemWithAuthor("user_someone_else");
|
||||
await connect("editor");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "Editor override" } },
|
||||
});
|
||||
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("MCP ownership — error shape consistency", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("denied-by-permissions error does NOT mention 'authorId' (internal detail)", async () => {
|
||||
const repo = new ContentRepository(db);
|
||||
const item = await repo.create({
|
||||
type: "post",
|
||||
data: { title: "Test" },
|
||||
slug: "perm-test",
|
||||
status: "published",
|
||||
});
|
||||
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: AUTHOR_ID,
|
||||
userRole: Role.AUTHOR,
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id: item.id, data: { title: "Nope" } },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
// Negative: "authorId" is an internal column name and must not leak
|
||||
// to the user-facing message.
|
||||
expect(extractText(result)).not.toMatch(/authorId/);
|
||||
// Positive: the response carries a permissions code so callers can
|
||||
// distinguish "you can't do this" from any other failure mode.
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_PERMISSIONS");
|
||||
});
|
||||
});
|
||||
264
packages/core/tests/integration/mcp/pagination.test.ts
Normal file
264
packages/core/tests/integration/mcp/pagination.test.ts
Normal file
@@ -0,0 +1,264 @@
|
||||
/**
|
||||
* MCP pagination / cursor tests.
|
||||
*
|
||||
* Malformed cursors must produce a structured `INVALID_CURSOR` error
|
||||
* instead of silently returning the first page (the latter would let UI
|
||||
* pagination bugs re-fetch the whole table without any signal).
|
||||
*
|
||||
* `decodeCursor()` throws `InvalidCursorError` on invalid input; handler
|
||||
* catches translate that to `INVALID_CURSOR`. The MCP boundary also
|
||||
* applies `z.string().min(1).max(2048)` to reject obvious DoS attempts
|
||||
* before they reach the decoder.
|
||||
*
|
||||
* Tests cover the MCP-visible list surface: content_list,
|
||||
* content_list_trashed, media_list, revision_list, taxonomy_list_terms.
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { ContentRepository } from "../../../src/database/repositories/content.js";
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabaseWithCollections, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
|
||||
const INVALID_CURSOR = /cursor|invalid|malformed/i;
|
||||
|
||||
async function seedPosts(db: Kysely<Database>, count: number, prefix = "post"): Promise<string[]> {
|
||||
const repo = new ContentRepository(db);
|
||||
const ids: string[] = [];
|
||||
for (let i = 0; i < count; i++) {
|
||||
const item = await repo.create({
|
||||
type: "post",
|
||||
data: { title: `${prefix} ${i}` },
|
||||
slug: `${prefix}-${i}`,
|
||||
status: "draft",
|
||||
authorId: ADMIN_ID,
|
||||
});
|
||||
ids.push(item.id);
|
||||
}
|
||||
return ids;
|
||||
}
|
||||
|
||||
describe("MCP cursor pagination — content_list (bug #12)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("rejects garbage cursor with a structured error (does NOT silently return first page)", async () => {
|
||||
await seedPosts(db, 5);
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", cursor: "obviously-malformed-cursor" },
|
||||
});
|
||||
|
||||
// Currently: returns the full first page.
|
||||
// After fix: returns isError with INVALID_CURSOR-style message.
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(INVALID_CURSOR);
|
||||
});
|
||||
|
||||
it("rejects empty-string cursor with a structured error", async () => {
|
||||
await seedPosts(db, 5);
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", cursor: "" },
|
||||
});
|
||||
|
||||
// An empty cursor is unambiguously invalid — should error rather
|
||||
// than silently treating it as "no cursor".
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects base64-decodable but structurally-wrong cursor", async () => {
|
||||
await seedPosts(db, 5);
|
||||
|
||||
// Valid base64 but doesn't match the expected `{orderValue, id}` shape.
|
||||
const bogus = Buffer.from(JSON.stringify({ wrong: "shape" })).toString("base64");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", cursor: bogus },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(INVALID_CURSOR);
|
||||
});
|
||||
|
||||
it("rejects cursor with non-string id field", async () => {
|
||||
await seedPosts(db, 5);
|
||||
|
||||
const bogus = Buffer.from(JSON.stringify({ orderValue: "x", id: 42 })).toString("base64");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", cursor: bogus },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("valid cursor returns the correct next page (regression guard)", async () => {
|
||||
await seedPosts(db, 5);
|
||||
|
||||
const first = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", limit: 2 },
|
||||
});
|
||||
const firstData = extractJson<{
|
||||
items: Array<{ id: string }>;
|
||||
nextCursor?: string;
|
||||
}>(first);
|
||||
expect(firstData.items).toHaveLength(2);
|
||||
expect(firstData.nextCursor).toBeTruthy();
|
||||
|
||||
const second = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", limit: 2, cursor: firstData.nextCursor },
|
||||
});
|
||||
const secondData = extractJson<{
|
||||
items: Array<{ id: string }>;
|
||||
}>(second);
|
||||
expect(secondData.items).toHaveLength(2);
|
||||
// Different ids than the first page
|
||||
const firstIds = firstData.items.map((i) => i.id);
|
||||
const secondIds = secondData.items.map((i) => i.id);
|
||||
for (const id of secondIds) {
|
||||
expect(firstIds).not.toContain(id);
|
||||
}
|
||||
});
|
||||
|
||||
it("rejects oversized cursor without attempting to decode it (DoS guard)", async () => {
|
||||
await seedPosts(db, 3);
|
||||
|
||||
// Cursors we issue are well under 200 chars. A multi-KB cursor is
|
||||
// almost certainly an attacker probing the base64 decoder. The
|
||||
// MCP input schema caps cursors at 2048 chars; this test forces a
|
||||
// rejection at the schema boundary rather than letting the
|
||||
// decoder allocate against a giant string.
|
||||
const huge = "A".repeat(10_000);
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", cursor: huge },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("malformed cursor on second page does not skip back to start", async () => {
|
||||
await seedPosts(db, 5);
|
||||
|
||||
const first = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", limit: 2 },
|
||||
});
|
||||
const firstData = extractJson<{ items: Array<{ id: string }>; nextCursor?: string }>(first);
|
||||
|
||||
// Tamper with the cursor — change one character
|
||||
const tampered = firstData.nextCursor ? firstData.nextCursor.slice(0, -1) + "X" : "garbage";
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", limit: 2, cursor: tampered },
|
||||
});
|
||||
|
||||
// Bug today: returns first page again (callers re-process duplicates).
|
||||
// After fix: errors so callers can detect the bug.
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("MCP cursor pagination — other list tools (bug #12 propagation)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("content_list_trashed rejects malformed cursor", async () => {
|
||||
const ids = await seedPosts(db, 3);
|
||||
const repo = new ContentRepository(db);
|
||||
for (const id of ids) await repo.delete("post", id);
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list_trashed",
|
||||
arguments: { collection: "post", cursor: "garbage" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
// (revision_list cursor test deleted: the tool's input schema doesn't
|
||||
// declare a cursor parameter, and Zod's default behavior is to drop
|
||||
// unknown keys silently — so the previous "expect(result).toBeDefined()"
|
||||
// was meaningless. Forcing schema strict-mode is out of scope.)
|
||||
|
||||
it("media_list rejects malformed cursor", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "media_list",
|
||||
arguments: { cursor: "garbage" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
describe("MCP cursor pagination — limit clamping (regression guard)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("limit beyond max is clamped, not rejected", async () => {
|
||||
await seedPosts(db, 3);
|
||||
|
||||
// Per AGENTS.md: max limit is 100. Higher should be clamped, not error.
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post", limit: 1000 },
|
||||
});
|
||||
// Either Zod rejects via inputSchema (also fine) or the handler clamps.
|
||||
// Both are valid; what's NOT valid is silently honoring 1000 against
|
||||
// a real backend.
|
||||
if (result.isError) {
|
||||
// Rejection branch: must be a validation error, not a generic 500.
|
||||
expect(extractText(result)).toMatch(/limit|max|exceed|invalid/i);
|
||||
} else {
|
||||
const data = extractJson<{ items: unknown[] }>(result);
|
||||
expect(data.items.length).toBeLessThanOrEqual(100);
|
||||
}
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,560 @@
|
||||
/**
|
||||
* MCP content_publish + content_update field-coverage tests.
|
||||
*
|
||||
* Pins the contracts for:
|
||||
*
|
||||
* - **#622** `content_publish` accepts an optional `publishedAt` ISO 8601
|
||||
* datetime that overrides the publication timestamp. The behavior is
|
||||
* gated on `content:publish_any` because backdating overwrites historical
|
||||
* record. Without `publishedAt`, idempotent re-publish preserves the
|
||||
* existing timestamp (regression guard for the COALESCE behavior).
|
||||
*
|
||||
* - **#621** `content_update` persists `seo`, `bylines`, and `publishedAt`
|
||||
* alongside field updates. The MCP tool exposes the same fields the REST
|
||||
* API has accepted since #777; before this PR the tool's input schema
|
||||
* silently dropped them.
|
||||
*
|
||||
* Failure modes covered:
|
||||
* - non-admin (AUTHOR) trying to set `publishedAt` -> INSUFFICIENT_PERMISSIONS
|
||||
* - SEO on a collection that doesn't have SEO enabled -> VALIDATION_ERROR
|
||||
* - bylines pointing at a non-existent byline ID -> handler-level FK error
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { BylineRepository } from "../../../src/database/repositories/byline.js";
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
isErrorResult,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabaseWithCollections, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
const AUTHOR_ID = "user_author";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// content_publish — publishedAt override (#622)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("MCP content_publish — publishedAt override (#622)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("backdates publishedAt when caller passes an explicit ISO timestamp", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Imported post" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const PAST = "2020-01-15T10:00:00.000Z";
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id, publishedAt: PAST },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
|
||||
const item = extractJson<{ item: { publishedAt: string | null; status: string } }>(result).item;
|
||||
expect(item.status).toBe("published");
|
||||
// Repository normalizes to ISO so we compare via Date round-trip.
|
||||
expect(new Date(item.publishedAt!).toISOString()).toBe(PAST);
|
||||
});
|
||||
|
||||
it("re-publishing with a new publishedAt overwrites the previous timestamp", async () => {
|
||||
// First publish without an override — gets a current timestamp.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const first = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const firstTs = extractJson<{ item: { publishedAt: string } }>(first).item.publishedAt;
|
||||
expect(firstTs).toBeTruthy();
|
||||
|
||||
// Re-publish with explicit override — should overwrite.
|
||||
const PAST = "2019-06-01T00:00:00.000Z";
|
||||
const second = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id, publishedAt: PAST },
|
||||
});
|
||||
const secondItem = extractJson<{ item: { publishedAt: string | null } }>(second).item;
|
||||
expect(new Date(secondItem.publishedAt!).toISOString()).toBe(PAST);
|
||||
expect(secondItem.publishedAt).not.toBe(firstTs);
|
||||
});
|
||||
|
||||
it("rejects non-ISO-8601 publishedAt at the schema layer", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id, publishedAt: "yesterday" },
|
||||
});
|
||||
// Schema validation produces an isError envelope. We assert the schema's
|
||||
// own message wording — not just that the field name appears anywhere
|
||||
// (which would let an echoed input or stack trace satisfy the test for
|
||||
// the wrong reason).
|
||||
expect(isErrorResult(result)).toBe(true);
|
||||
expect(extractText(result)).toContain("must be an ISO 8601 datetime");
|
||||
});
|
||||
|
||||
it("accepts ISO 8601 with explicit timezone offset (offset: true)", async () => {
|
||||
// Positive companion to the rejection test: pins that the schema's
|
||||
// `offset: true` actually accepts non-Z offsets, not just Z.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id,
|
||||
publishedAt: "2020-01-15T10:00:00+05:30",
|
||||
},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const item = extractJson<{ item: { publishedAt: string | null } }>(result).item;
|
||||
// Date round-trip normalizes the offset to UTC.
|
||||
expect(new Date(item.publishedAt!).toISOString()).toBe(
|
||||
new Date("2020-01-15T10:00:00+05:30").toISOString(),
|
||||
);
|
||||
});
|
||||
|
||||
it("requires content:publish_any to set publishedAt — AUTHOR (owner) is denied", async () => {
|
||||
// Switch to AUTHOR role: AUTHOR has publish_own but NOT publish_any.
|
||||
await harness.cleanup();
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Author's post" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
// Plain publish (no publishedAt) — AUTHOR can do this for their own item.
|
||||
const ok = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
expect(ok.isError, extractText(ok)).toBeFalsy();
|
||||
|
||||
// Publish with backdated publishedAt — AUTHOR is denied even on their
|
||||
// own item, because backdating overwrites historical record.
|
||||
const denied = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id, publishedAt: "2020-01-01T00:00:00.000Z" },
|
||||
});
|
||||
expect(isErrorResult(denied)).toBe(true);
|
||||
expect(extractText(denied)).toContain("INSUFFICIENT_PERMISSIONS");
|
||||
expect(extractText(denied).toLowerCase()).toContain("publish_any");
|
||||
});
|
||||
|
||||
it("AUTHOR cannot publish someone else's item with publishedAt (ownership denies first)", async () => {
|
||||
// First create as ADMIN so the item belongs to a different user.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Admin's post" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
// Switch to AUTHOR — now they're not the owner.
|
||||
await harness.cleanup();
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
|
||||
const denied = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id, publishedAt: "2020-01-01T00:00:00.000Z" },
|
||||
});
|
||||
// Whichever check fires first (ownership or publishedAt gate), the
|
||||
// denial is the correct outcome. We pin the structural failure shape,
|
||||
// not the specific code, because either order is correct.
|
||||
expect(isErrorResult(denied)).toBe(true);
|
||||
expect(extractText(denied)).toContain("INSUFFICIENT_PERMISSIONS");
|
||||
});
|
||||
|
||||
it("idempotent re-publish without publishedAt preserves the original timestamp", async () => {
|
||||
// Regression guard: the COALESCE preserve-on-re-publish behavior
|
||||
// shouldn't change just because the repo signature now accepts an
|
||||
// optional override.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const first = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const firstTs = extractJson<{ item: { publishedAt: string } }>(first).item.publishedAt;
|
||||
|
||||
// Wait so a regression that always uses `now` would surface as a new ts.
|
||||
await new Promise((r) => setTimeout(r, 5));
|
||||
|
||||
const second = await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const secondTs = extractJson<{ item: { publishedAt: string } }>(second).item.publishedAt;
|
||||
expect(secondTs).toBe(firstTs);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// content_update — seo / bylines / publishedAt (#621)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("MCP content_update — seo / bylines / publishedAt (#621)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
let bylineId: string;
|
||||
let bylineId2: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabaseWithCollections();
|
||||
// Enable SEO on the post collection (mirrors integration/seo/seo.test.ts).
|
||||
await db
|
||||
.updateTable("_emdash_collections")
|
||||
.set({ has_seo: 1 })
|
||||
.where("slug", "=", "post")
|
||||
.execute();
|
||||
|
||||
// Pre-create two bylines so we can attach them via content_update.
|
||||
const bylineRepo = new BylineRepository(db);
|
||||
const b1 = await bylineRepo.create({
|
||||
slug: "jane-doe",
|
||||
displayName: "Jane Doe",
|
||||
isGuest: false,
|
||||
});
|
||||
const b2 = await bylineRepo.create({
|
||||
slug: "john-smith",
|
||||
displayName: "John Smith",
|
||||
isGuest: false,
|
||||
});
|
||||
bylineId = b1.id;
|
||||
bylineId2 = b2.id;
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("rejects SEO canonical URL with non-http scheme (XSS guard)", async () => {
|
||||
// Pins that the MCP `content_update.seo` schema reuses the REST
|
||||
// `contentSeoInput` schema, which validates `canonical` through
|
||||
// `httpUrl` (rejects javascript:/data: URIs that would otherwise
|
||||
// become stored XSS in the rendered <link rel="canonical">).
|
||||
// A regression that swapped this back to a plain `z.string()` would
|
||||
// silently accept the malicious URL and persist it.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id,
|
||||
seo: { canonical: "javascript:alert(1)" },
|
||||
},
|
||||
});
|
||||
expect(isErrorResult(result)).toBe(true);
|
||||
});
|
||||
|
||||
it("persists SEO fields passed to content_update", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const updated = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id,
|
||||
seo: {
|
||||
title: "SEO Title",
|
||||
description: "SEO description goes here.",
|
||||
noIndex: true,
|
||||
},
|
||||
},
|
||||
});
|
||||
expect(updated.isError, extractText(updated)).toBeFalsy();
|
||||
|
||||
// Round-trip via content_get — confirms persistence, not just the
|
||||
// echo from the update response.
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const item = extractJson<{
|
||||
item: {
|
||||
seo?: {
|
||||
title: string | null;
|
||||
description: string | null;
|
||||
noIndex: boolean;
|
||||
};
|
||||
};
|
||||
}>(got).item;
|
||||
expect(item.seo?.title).toBe("SEO Title");
|
||||
expect(item.seo?.description).toBe("SEO description goes here.");
|
||||
expect(item.seo?.noIndex).toBe(true);
|
||||
});
|
||||
|
||||
it("persists bylines passed to content_update and sets primary byline", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const updated = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id,
|
||||
bylines: [
|
||||
{ bylineId, roleLabel: "Author" },
|
||||
{ bylineId: bylineId2, roleLabel: "Editor" },
|
||||
],
|
||||
},
|
||||
});
|
||||
expect(updated.isError, extractText(updated)).toBeFalsy();
|
||||
|
||||
// Round-trip via content_get rather than relying on the update response
|
||||
// echoing the input — confirms persistence rather than just the in-memory
|
||||
// pass-through. (A regression that silently dropped the DB write but
|
||||
// echoed the byline list in the response would still pass an
|
||||
// update-response-only assertion.)
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const item = extractJson<{
|
||||
item: {
|
||||
primaryBylineId: string | null;
|
||||
bylines?: Array<{ byline: { id: string }; roleLabel: string | null }>;
|
||||
};
|
||||
}>(got).item;
|
||||
|
||||
// First entry becomes the primary byline.
|
||||
expect(item.primaryBylineId).toBe(bylineId);
|
||||
expect(item.bylines).toHaveLength(2);
|
||||
expect(item.bylines?.[0]?.byline.id).toBe(bylineId);
|
||||
expect(item.bylines?.[0]?.roleLabel).toBe("Author");
|
||||
expect(item.bylines?.[1]?.byline.id).toBe(bylineId2);
|
||||
});
|
||||
|
||||
it("backdates publishedAt when content_update receives one", async () => {
|
||||
// Publish first (so the item has a published_at to overwrite).
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const PAST = "2018-03-15T12:00:00.000Z";
|
||||
const updated = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, publishedAt: PAST },
|
||||
});
|
||||
expect(updated.isError, extractText(updated)).toBeFalsy();
|
||||
|
||||
// Round-trip via content_get to confirm persistence.
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const item = extractJson<{ item: { publishedAt: string | null } }>(got).item;
|
||||
expect(new Date(item.publishedAt!).toISOString()).toBe(PAST);
|
||||
});
|
||||
|
||||
it("AUTHOR (owner) cannot set publishedAt via content_update", async () => {
|
||||
await harness.cleanup();
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Author's post" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
// AUTHOR owns this item, so ownership passes — the publishedAt gate
|
||||
// fires next and denies. This pins that the gate fires regardless of
|
||||
// ownership (backdating overwrites historical record).
|
||||
const denied = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id,
|
||||
publishedAt: "2020-01-01T00:00:00.000Z",
|
||||
},
|
||||
});
|
||||
expect(isErrorResult(denied)).toBe(true);
|
||||
expect(extractText(denied)).toContain("INSUFFICIENT_PERMISSIONS");
|
||||
expect(extractText(denied).toLowerCase()).toContain("publish_any");
|
||||
});
|
||||
|
||||
it("AUTHOR cannot set publishedAt on someone else's item via content_update", async () => {
|
||||
// Create as ADMIN so the item belongs to someone else.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Admin's post" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
// Switch to AUTHOR — now they're not the owner.
|
||||
await harness.cleanup();
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
|
||||
const denied = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id,
|
||||
publishedAt: "2020-01-01T00:00:00.000Z",
|
||||
},
|
||||
});
|
||||
// Either ownership or the publishedAt gate denies — whichever fires
|
||||
// first. Both produce INSUFFICIENT_PERMISSIONS so the cross-product is
|
||||
// pinned without depending on check order.
|
||||
expect(isErrorResult(denied)).toBe(true);
|
||||
expect(extractText(denied)).toContain("INSUFFICIENT_PERMISSIONS");
|
||||
});
|
||||
|
||||
it("rejects SEO on a collection without SEO enabled", async () => {
|
||||
// page collection from the test fixture does NOT have SEO.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "page", data: { title: "Page" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: {
|
||||
collection: "page",
|
||||
id,
|
||||
seo: { title: "Should fail" },
|
||||
},
|
||||
});
|
||||
expect(isErrorResult(result)).toBe(true);
|
||||
expect(extractText(result)).toContain("VALIDATION_ERROR");
|
||||
});
|
||||
|
||||
it("content_update with status='published' + publishedAt publishes AND backdates", async () => {
|
||||
// Pins the interaction between the status='published' branch and the
|
||||
// publishedAt override. The branch calls handleContentUpdate (which
|
||||
// writes published_at to the column) and then handleContentPublish
|
||||
// (which preserves the column via COALESCE). If either side regresses,
|
||||
// the backdated timestamp won't survive the publish.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
|
||||
const PAST = "2017-04-20T00:00:00.000Z";
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id,
|
||||
status: "published",
|
||||
publishedAt: PAST,
|
||||
},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
|
||||
// Round-trip via content_get to confirm both status AND backdated
|
||||
// timestamp landed.
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const item = extractJson<{ item: { status: string; publishedAt: string | null } }>(got).item;
|
||||
expect(item.status).toBe("published");
|
||||
expect(new Date(item.publishedAt!).toISOString()).toBe(PAST);
|
||||
});
|
||||
|
||||
it("seo / bylines / publishedAt and field updates apply atomically", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Original" } },
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const PAST = "2021-06-01T00:00:00.000Z";
|
||||
const updated = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id,
|
||||
data: { title: "Updated" },
|
||||
seo: { title: "SEO" },
|
||||
bylines: [{ bylineId }],
|
||||
publishedAt: PAST,
|
||||
},
|
||||
});
|
||||
expect(updated.isError, extractText(updated)).toBeFalsy();
|
||||
|
||||
const got = await harness.client.callTool({
|
||||
name: "content_get",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
const item = extractJson<{
|
||||
item: {
|
||||
data: { title?: string };
|
||||
publishedAt: string | null;
|
||||
primaryBylineId: string | null;
|
||||
seo?: { title: string | null };
|
||||
};
|
||||
}>(got).item;
|
||||
|
||||
// All four updates landed.
|
||||
expect(item.data.title).toBe("Updated");
|
||||
expect(item.seo?.title).toBe("SEO");
|
||||
expect(item.primaryBylineId).toBe(bylineId);
|
||||
expect(new Date(item.publishedAt!).toISOString()).toBe(PAST);
|
||||
});
|
||||
});
|
||||
712
packages/core/tests/integration/mcp/schema.test.ts
Normal file
712
packages/core/tests/integration/mcp/schema.test.ts
Normal file
@@ -0,0 +1,712 @@
|
||||
/**
|
||||
* MCP schema tools — comprehensive integration tests.
|
||||
*
|
||||
* Covers every branch of:
|
||||
* - schema_list_collections
|
||||
* - schema_get_collection
|
||||
* - schema_create_collection (also bug #11 — supports default)
|
||||
* - schema_delete_collection
|
||||
* - schema_create_field
|
||||
* - schema_delete_field
|
||||
*
|
||||
* For each tool: happy path, edge cases (empty, missing, duplicate,
|
||||
* reserved names), permission gates, error envelope (bug #3 — currently
|
||||
* generic). Where the omnibus fix is expected to introduce structured
|
||||
* errors, the assertions name the specific failure mode so they fail
|
||||
* usefully today.
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import { SchemaRegistry } from "../../../src/schema/registry.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabase, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
const EDITOR_ID = "user_editor";
|
||||
const AUTHOR_ID = "user_author";
|
||||
|
||||
const VALIDATION_ERROR = /validation|invalid|reserved|pattern|format|required/i;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// schema_list_collections
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("schema_list_collections", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("returns empty list when no collections exist", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_list_collections",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const { items } = extractJson<{ items: unknown[] }>(result);
|
||||
expect(items).toEqual([]);
|
||||
});
|
||||
|
||||
it("lists multiple collections in stable order", async () => {
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({ slug: "post", label: "Posts" });
|
||||
await registry.createCollection({ slug: "page", label: "Pages" });
|
||||
await registry.createCollection({ slug: "product", label: "Products" });
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_list_collections",
|
||||
arguments: {},
|
||||
});
|
||||
const { items } = extractJson<{ items: Array<{ slug: string }> }>(result);
|
||||
const slugs = items.map((c) => c.slug).toSorted();
|
||||
expect(slugs).toEqual(["page", "post", "product"]);
|
||||
});
|
||||
|
||||
it("requires EDITOR or higher", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_list_collections",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("EDITOR can list collections", async () => {
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({ slug: "post", label: "Posts" });
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: EDITOR_ID, userRole: Role.EDITOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_list_collections",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// schema_get_collection
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("schema_get_collection", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({
|
||||
slug: "post",
|
||||
label: "Posts",
|
||||
labelSingular: "Post",
|
||||
supports: ["drafts", "revisions"],
|
||||
});
|
||||
await registry.createField("post", { slug: "title", label: "Title", type: "string" });
|
||||
await registry.createField("post", { slug: "body", label: "Body", type: "text" });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("returns collection with its fields", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_get_collection",
|
||||
arguments: { slug: "post" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const collection = extractJson<{
|
||||
slug: string;
|
||||
label: string;
|
||||
labelSingular?: string;
|
||||
supports: string[];
|
||||
fields: Array<{ slug: string; type: string }>;
|
||||
}>(result);
|
||||
expect(collection.slug).toBe("post");
|
||||
expect(collection.label).toBe("Posts");
|
||||
expect(collection.labelSingular).toBe("Post");
|
||||
expect(collection.supports).toEqual(expect.arrayContaining(["drafts", "revisions"]));
|
||||
const fieldSlugs = collection.fields.map((f) => f.slug).toSorted();
|
||||
expect(fieldSlugs).toEqual(["body", "title"]);
|
||||
});
|
||||
|
||||
it("returns NOT_FOUND-style error for missing collection", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_get_collection",
|
||||
arguments: { slug: "nonexistent" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/COLLECTION_NOT_FOUND|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("nonexistent");
|
||||
});
|
||||
|
||||
it("requires EDITOR or higher", async () => {
|
||||
await harness.cleanup();
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_get_collection",
|
||||
arguments: { slug: "post" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// schema_create_collection
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("schema_create_collection", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("creates a collection with minimal arguments", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "article", label: "Articles" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const created = extractJson<{ slug: string; label: string }>(result);
|
||||
expect(created.slug).toBe("article");
|
||||
expect(created.label).toBe("Articles");
|
||||
});
|
||||
|
||||
it("creates with all optional fields", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: {
|
||||
slug: "story",
|
||||
label: "Stories",
|
||||
labelSingular: "Story",
|
||||
description: "A story collection",
|
||||
icon: "book",
|
||||
supports: ["drafts", "revisions", "scheduling"],
|
||||
},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const created = extractJson<{
|
||||
slug: string;
|
||||
label: string;
|
||||
labelSingular?: string;
|
||||
description?: string;
|
||||
icon?: string;
|
||||
supports: string[];
|
||||
}>(result);
|
||||
expect(created.labelSingular).toBe("Story");
|
||||
expect(created.description).toBe("A story collection");
|
||||
expect(created.icon).toBe("book");
|
||||
expect(created.supports.toSorted()).toEqual(["drafts", "revisions", "scheduling"].toSorted());
|
||||
});
|
||||
|
||||
it("rejects slug that doesn't match the collection slug pattern", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "Has-Caps", label: "Bad" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects slug starting with a number", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "1posts", label: "Posts" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects empty slug", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "", label: "Empty" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects duplicate slug", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "post", label: "Posts" },
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "post", label: "Posts Two" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/exist|duplicate|conflict|already/i);
|
||||
});
|
||||
|
||||
it("rejects reserved slug like 'media' or 'options'", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
// `options` is a reserved table name
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "options", label: "Options" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("requires ADMIN role (EDITOR is blocked)", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: EDITOR_ID, userRole: Role.EDITOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "blocked", label: "Blocked" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/permission|insufficient/i);
|
||||
});
|
||||
|
||||
it("accepts SQL-injection attempt as a normal slug rejection (regression)", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "drop_tables); --", label: "x" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// schema_delete_collection
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("schema_delete_collection", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({ slug: "post", label: "Posts" });
|
||||
await registry.createField("post", { slug: "title", label: "Title", type: "string" });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("deletes an empty collection", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_delete_collection",
|
||||
arguments: { slug: "post" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
|
||||
// Verify it's gone
|
||||
const list = await harness.client.callTool({
|
||||
name: "schema_list_collections",
|
||||
arguments: {},
|
||||
});
|
||||
const { items } = extractJson<{ items: Array<{ slug: string }> }>(list);
|
||||
expect(items.find((c) => c.slug === "post")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("rejects deleting a collection with content unless force is true", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "A" } },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_delete_collection",
|
||||
arguments: { slug: "post" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
// Tight: the error must say "has content" and tell the caller how
|
||||
// to override (force: true). Loose word matches like /empty|content/
|
||||
// passed against unrelated 500s, hiding regressions.
|
||||
const text = extractText(result);
|
||||
expect(text).toMatch(/has content/i);
|
||||
expect(text).toContain("force: true");
|
||||
});
|
||||
|
||||
it("force deletes a collection with content", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "A" } },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_delete_collection",
|
||||
arguments: { slug: "post", force: true },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("returns clear NOT_FOUND error for missing collection", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_delete_collection",
|
||||
arguments: { slug: "nonexistent" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/COLLECTION_NOT_FOUND|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("nonexistent");
|
||||
});
|
||||
|
||||
it("requires ADMIN role", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: EDITOR_ID, userRole: Role.EDITOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_delete_collection",
|
||||
arguments: { slug: "post" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// schema_create_field
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("schema_create_field", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({ slug: "post", label: "Posts" });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("creates a string field with minimal args", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: { collection: "post", slug: "title", label: "Title", type: "string" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const field = extractJson<{ slug: string; type: string; required?: boolean }>(result);
|
||||
expect(field.slug).toBe("title");
|
||||
expect(field.type).toBe("string");
|
||||
});
|
||||
|
||||
it.each([
|
||||
["text", "f_text"],
|
||||
["number", "f_number"],
|
||||
["integer", "f_integer"],
|
||||
["boolean", "f_bool"],
|
||||
["datetime", "f_dt"],
|
||||
["portableText", "f_portable_text"],
|
||||
["json", "f_json"],
|
||||
["slug", "f_slug"],
|
||||
])("creates a %s field", async (fieldType, slug) => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: { collection: "post", slug, label: fieldType, type: fieldType },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("creates a select field with options", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
slug: "priority",
|
||||
label: "Priority",
|
||||
type: "select",
|
||||
validation: { options: ["low", "high"] },
|
||||
},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("creates a reference field with target collection", async () => {
|
||||
await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "page", label: "Pages" },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
slug: "parent",
|
||||
label: "Parent",
|
||||
type: "reference",
|
||||
options: { collection: "page" },
|
||||
},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("rejects field slug not matching the slug pattern", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
slug: "Has-Caps",
|
||||
label: "Bad",
|
||||
type: "string",
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects duplicate field slug on the same collection", async () => {
|
||||
await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: { collection: "post", slug: "title", label: "Title", type: "string" },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: { collection: "post", slug: "title", label: "Title v2", type: "string" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/exist|duplicate|already/i);
|
||||
});
|
||||
|
||||
it("rejects field on non-existent collection", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: {
|
||||
collection: "ghost",
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/COLLECTION_NOT_FOUND|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("ghost");
|
||||
});
|
||||
|
||||
it("rejects field type not in the enum", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
slug: "weird",
|
||||
label: "Weird",
|
||||
type: "not_a_real_type",
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects reserved field slug like 'id' or 'created_at'", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
slug: "id",
|
||||
label: "ID",
|
||||
type: "string",
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("requires ADMIN role", async () => {
|
||||
await harness.cleanup();
|
||||
harness = await connectMcpHarness({ db, userId: EDITOR_ID, userRole: Role.EDITOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("required field is reflected in the response", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_field",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
required: true,
|
||||
},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const field = extractJson<{ required?: boolean }>(result);
|
||||
expect(field.required).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// schema_delete_field
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("schema_delete_field", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({ slug: "post", label: "Posts" });
|
||||
await registry.createField("post", { slug: "title", label: "Title", type: "string" });
|
||||
await registry.createField("post", { slug: "body", label: "Body", type: "text" });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("deletes an unused field", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_delete_field",
|
||||
arguments: { collection: "post", fieldSlug: "body" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
|
||||
// Verify it's gone
|
||||
const get = await harness.client.callTool({
|
||||
name: "schema_get_collection",
|
||||
arguments: { slug: "post" },
|
||||
});
|
||||
const collection = extractJson<{ fields: Array<{ slug: string }> }>(get);
|
||||
expect(collection.fields.find((f) => f.slug === "body")).toBeUndefined();
|
||||
});
|
||||
|
||||
it("returns clear error for missing field slug", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_delete_field",
|
||||
arguments: { collection: "post", fieldSlug: "ghost" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/FIELD_NOT_FOUND|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("ghost");
|
||||
});
|
||||
|
||||
it("returns clear error for missing collection", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_delete_field",
|
||||
arguments: { collection: "noplace", fieldSlug: "title" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/COLLECTION_NOT_FOUND|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("noplace");
|
||||
});
|
||||
|
||||
it("requires ADMIN role", async () => {
|
||||
await harness.cleanup();
|
||||
harness = await connectMcpHarness({ db, userId: EDITOR_ID, userRole: Role.EDITOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_delete_field",
|
||||
arguments: { collection: "post", fieldSlug: "body" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("deleting a field with existing content also drops the data (no orphan)", async () => {
|
||||
// Create content using the field
|
||||
await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "T", body: "Body content" } },
|
||||
});
|
||||
|
||||
// Delete the field
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_delete_field",
|
||||
arguments: { collection: "post", fieldSlug: "body" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
|
||||
// content_get should return data without the body field
|
||||
const list = await harness.client.callTool({
|
||||
name: "content_list",
|
||||
arguments: { collection: "post" },
|
||||
});
|
||||
const items = extractJson<{ items: Array<Record<string, unknown>> }>(list).items;
|
||||
// At minimum, the API shouldn't crash. The field should not appear,
|
||||
// and the data fetch should still succeed.
|
||||
expect(items.length).toBeGreaterThan(0);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Cross-cutting: error envelope quality (bug #3 lens)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("schema tools — error envelope quality (bug #3 lens)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("schema_create_collection on duplicate names a discriminated CONFLICT-like error", async () => {
|
||||
await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "post", label: "Posts" },
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "post", label: "Posts" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
// Today: probably leaks raw SQLite UNIQUE error or generic. After fix:
|
||||
// a stable signal like "already exists" / CONFLICT.
|
||||
expect(text).toMatch(/exist|conflict|duplicate|unique|already/i);
|
||||
expect(text).not.toMatch(/^Failed to /);
|
||||
});
|
||||
|
||||
it("validation error names the offending field/value in the message", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "schema_create_collection",
|
||||
arguments: { slug: "Bad-Slug", label: "Bad" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(VALIDATION_ERROR);
|
||||
});
|
||||
});
|
||||
337
packages/core/tests/integration/mcp/search.test.ts
Normal file
337
packages/core/tests/integration/mcp/search.test.ts
Normal file
@@ -0,0 +1,337 @@
|
||||
/**
|
||||
* MCP search tool — comprehensive integration tests.
|
||||
*
|
||||
* Covers:
|
||||
* - search query → matching results
|
||||
* - empty index / no searchable collections
|
||||
* - collection scoping
|
||||
* - locale filtering
|
||||
* - special characters / FTS5 syntax
|
||||
* - permission gating
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import { SchemaRegistry } from "../../../src/schema/registry.js";
|
||||
import { FTSManager } from "../../../src/search/fts-manager.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabase, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
const SUBSCRIBER_ID = "user_subscriber";
|
||||
|
||||
async function setupSearchablePostCollection(db: Kysely<Database>): Promise<void> {
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({
|
||||
slug: "post",
|
||||
label: "Posts",
|
||||
supports: ["drafts", "revisions", "search"],
|
||||
});
|
||||
await registry.createField("post", {
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
searchable: true,
|
||||
});
|
||||
await registry.createField("post", {
|
||||
slug: "body",
|
||||
label: "Body",
|
||||
type: "text",
|
||||
searchable: true,
|
||||
});
|
||||
// Activate the FTS index. Production sites do this either via the seed
|
||||
// pipeline or the admin "Enable search" toggle. Without it, the FTS
|
||||
// table and triggers don't exist and the test would silently miss real
|
||||
// indexing bugs.
|
||||
await new FTSManager(db).enableSearch("post");
|
||||
}
|
||||
|
||||
describe("search", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("returns empty results when no collections are searchable", async () => {
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({ slug: "post", label: "Posts" }); // no search support
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "search",
|
||||
arguments: { query: "anything" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<{ items: unknown[] }>(result);
|
||||
expect(data.items).toEqual([]);
|
||||
});
|
||||
|
||||
it("returns empty results for a query with no matches", async () => {
|
||||
await setupSearchablePostCollection(db);
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Hello world", body: "Lorem ipsum" } },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id: "hello-world" },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "search",
|
||||
arguments: { query: "ZZZZZQuantumZebra" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<{ items: unknown[] }>(result);
|
||||
expect(data.items).toEqual([]);
|
||||
});
|
||||
|
||||
it("returns matching items for a query that hits", async () => {
|
||||
await setupSearchablePostCollection(db);
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "Hello world", body: "Lorem ipsum about searching" },
|
||||
},
|
||||
});
|
||||
const id = extractJson<{ item: { id: string } }>(created).item.id;
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: { collection: "post", id },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "search",
|
||||
arguments: { query: "Hello" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<{ items: Array<{ id: string }> }>(result);
|
||||
expect(data.items.length).toBeGreaterThan(0);
|
||||
expect(data.items.find((i) => i.id === id)).toBeTruthy();
|
||||
});
|
||||
|
||||
it("scopes search by collections argument", async () => {
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({
|
||||
slug: "post",
|
||||
label: "Posts",
|
||||
supports: ["search"],
|
||||
});
|
||||
await registry.createField("post", {
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
searchable: true,
|
||||
});
|
||||
await registry.createCollection({
|
||||
slug: "page",
|
||||
label: "Pages",
|
||||
supports: ["search"],
|
||||
});
|
||||
await registry.createField("page", {
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
searchable: true,
|
||||
});
|
||||
const fts = new FTSManager(db);
|
||||
await fts.enableSearch("post");
|
||||
await fts.enableSearch("page");
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const post = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "rocket post" } },
|
||||
});
|
||||
const page = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "page", data: { title: "rocket page" } },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id: extractJson<{ item: { id: string } }>(post).item.id,
|
||||
},
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: {
|
||||
collection: "page",
|
||||
id: extractJson<{ item: { id: string } }>(page).item.id,
|
||||
},
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "search",
|
||||
arguments: { query: "rocket", collections: ["post"] },
|
||||
});
|
||||
const data = extractJson<{ items: Array<{ collection?: string; type?: string }> }>(result);
|
||||
// We seeded one post and one page that both match "rocket". Scoping
|
||||
// to ["post"] must keep at least the post hit and exclude the page.
|
||||
expect(data.items.length).toBeGreaterThan(0);
|
||||
for (const item of data.items) {
|
||||
const c = item.collection ?? item.type;
|
||||
expect(c).toBe("post");
|
||||
}
|
||||
});
|
||||
|
||||
it("handles empty query string gracefully", async () => {
|
||||
await setupSearchablePostCollection(db);
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
// Seed a published item so a regression that interprets an empty
|
||||
// query as "match all" would produce a non-empty list and fail.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "matchable", body: "indexed content" },
|
||||
},
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id: extractJson<{ item: { id: string } }>(created).item.id,
|
||||
},
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "search",
|
||||
arguments: { query: "" },
|
||||
});
|
||||
// Empty queries are sanitized to a no-op and return zero matches.
|
||||
// They must not surface as an error AND must not match all items.
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<{ items: unknown[] }>(result);
|
||||
expect(data.items).toEqual([]);
|
||||
});
|
||||
|
||||
it("handles special characters in query without leaking FTS5 syntax errors", async () => {
|
||||
await setupSearchablePostCollection(db);
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
// Seed a published item so a regression that lets malformed input
|
||||
// fall through to "match all" would surface a non-empty list.
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "matchable", body: "indexed content" },
|
||||
},
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id: extractJson<{ item: { id: string } }>(created).item.id,
|
||||
},
|
||||
});
|
||||
|
||||
// FTS5 has special operators: AND OR NOT NEAR " * ( ) :
|
||||
// `searchSingleCollection` swallows malformed-input FTS5 errors and
|
||||
// returns no matches; the response is a clean empty list.
|
||||
const result = await harness.client.callTool({
|
||||
name: "search",
|
||||
arguments: { query: 'NOT "quotes" AND* (' },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<{ items: unknown[] }>(result);
|
||||
expect(data.items).toEqual([]);
|
||||
});
|
||||
|
||||
it("respects the limit parameter", async () => {
|
||||
await setupSearchablePostCollection(db);
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
// Create 10 items containing the same word
|
||||
for (let i = 0; i < 10; i++) {
|
||||
const c = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: `searchable item ${i}`, body: "common-text" },
|
||||
},
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id: extractJson<{ item: { id: string } }>(c).item.id,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "search",
|
||||
arguments: { query: "common-text", limit: 3 },
|
||||
});
|
||||
const data = extractJson<{ items: unknown[] }>(result);
|
||||
expect(data.items.length).toBeLessThanOrEqual(3);
|
||||
});
|
||||
|
||||
it("only returns published items (not drafts) regardless of caller role", async () => {
|
||||
await setupSearchablePostCollection(db);
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
// Create one draft, one published
|
||||
await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "draft-only-content" } },
|
||||
});
|
||||
const pubItem = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "published-content" } },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "content_publish",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
id: extractJson<{ item: { id: string } }>(pubItem).item.id,
|
||||
},
|
||||
});
|
||||
|
||||
const draftQuery = await harness.client.callTool({
|
||||
name: "search",
|
||||
arguments: { query: "draft-only-content" },
|
||||
});
|
||||
expect(extractJson<{ items: unknown[] }>(draftQuery).items).toEqual([]);
|
||||
|
||||
const pubQuery = await harness.client.callTool({
|
||||
name: "search",
|
||||
arguments: { query: "published-content" },
|
||||
});
|
||||
expect(extractJson<{ items: unknown[] }>(pubQuery).items.length).toBeGreaterThan(0);
|
||||
});
|
||||
|
||||
it("any logged-in user (SUBSCRIBER) can search", async () => {
|
||||
await setupSearchablePostCollection(db);
|
||||
harness = await connectMcpHarness({ db, userId: SUBSCRIBER_ID, userRole: Role.SUBSCRIBER });
|
||||
const result = await harness.client.callTool({
|
||||
name: "search",
|
||||
arguments: { query: "anything" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
364
packages/core/tests/integration/mcp/settings.test.ts
Normal file
364
packages/core/tests/integration/mcp/settings.test.ts
Normal file
@@ -0,0 +1,364 @@
|
||||
/**
|
||||
* MCP settings tools — integration tests.
|
||||
*
|
||||
* Covers:
|
||||
* - settings_get
|
||||
* - settings_update
|
||||
*
|
||||
* Plus regression for bug #16 (no MCP tool for site settings).
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { ulid } from "ulidx";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabase, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
const EDITOR_ID = "user_editor";
|
||||
const SUBSCRIBER_ID = "user_subscriber";
|
||||
|
||||
interface SiteSettingsResponse {
|
||||
title?: string;
|
||||
tagline?: string;
|
||||
logo?: { mediaId: string; alt?: string; url?: string };
|
||||
favicon?: { mediaId: string; alt?: string; url?: string };
|
||||
url?: string;
|
||||
postsPerPage?: number;
|
||||
dateFormat?: string;
|
||||
timezone?: string;
|
||||
social?: Record<string, string | undefined>;
|
||||
seo?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
async function seedMedia(db: Kysely<Database>, opts?: { id?: string }): Promise<string> {
|
||||
const id = opts?.id ?? ulid();
|
||||
const now = new Date().toISOString();
|
||||
await db
|
||||
.insertInto("media" as never)
|
||||
.values({
|
||||
id,
|
||||
filename: "logo.png",
|
||||
mime_type: "image/png",
|
||||
size: 1024,
|
||||
storage_key: `media/${id}.png`,
|
||||
created_at: now,
|
||||
} as never)
|
||||
.execute();
|
||||
return id;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tool registration — bug #16 regression.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("settings tools registered (bug #16)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("MCP exposes settings_get and settings_update", async () => {
|
||||
const tools = await harness.client.listTools();
|
||||
const names = new Set(tools.tools.map((t) => t.name));
|
||||
expect(names.has("settings_get")).toBe(true);
|
||||
expect(names.has("settings_update")).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// settings_get
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("settings_get", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("returns an empty object when no settings are set", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_get",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<SiteSettingsResponse>(result);
|
||||
expect(data).toEqual({});
|
||||
});
|
||||
|
||||
it("returns previously-set settings", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { title: "My Site", tagline: "Welcome" },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_get",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<SiteSettingsResponse>(result);
|
||||
expect(data.title).toBe("My Site");
|
||||
expect(data.tagline).toBe("Welcome");
|
||||
});
|
||||
|
||||
it("resolves logo media reference URL", async () => {
|
||||
const mediaId = await seedMedia(db);
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { logo: { mediaId, alt: "Site logo" } },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_get",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<SiteSettingsResponse>(result);
|
||||
expect(data.logo?.mediaId).toBe(mediaId);
|
||||
expect(data.logo?.alt).toBe("Site logo");
|
||||
// URL is resolved to the media file route
|
||||
expect(data.logo?.url).toMatch(/^\/_emdash\/api\/media\/file\//);
|
||||
});
|
||||
|
||||
it("editor can read settings", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: EDITOR_ID, userRole: Role.EDITOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_get",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("subscriber cannot read settings (INSUFFICIENT_PERMISSIONS)", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: SUBSCRIBER_ID, userRole: Role.SUBSCRIBER });
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_get",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_PERMISSIONS");
|
||||
});
|
||||
|
||||
it("rejects token without settings:read scope (INSUFFICIENT_SCOPE)", async () => {
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: ADMIN_ID,
|
||||
userRole: Role.ADMIN,
|
||||
tokenScopes: ["content:read"],
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_get",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_SCOPE");
|
||||
});
|
||||
|
||||
it("settings:read token is sufficient for settings_get", async () => {
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: ADMIN_ID,
|
||||
userRole: Role.ADMIN,
|
||||
tokenScopes: ["settings:read"],
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_get",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("admin scope grants settings_get access", async () => {
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: ADMIN_ID,
|
||||
userRole: Role.ADMIN,
|
||||
tokenScopes: ["admin"],
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_get",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// settings_update
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("settings_update", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("updates title and tagline", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { title: "EmDash Demo", tagline: "Hello" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<SiteSettingsResponse>(result);
|
||||
expect(data.title).toBe("EmDash Demo");
|
||||
expect(data.tagline).toBe("Hello");
|
||||
});
|
||||
|
||||
it("partial update preserves other fields", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { title: "First", tagline: "Original tagline" },
|
||||
});
|
||||
|
||||
// Update only tagline; title should be preserved
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { tagline: "Updated tagline" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<SiteSettingsResponse>(result);
|
||||
expect(data.title).toBe("First");
|
||||
expect(data.tagline).toBe("Updated tagline");
|
||||
});
|
||||
|
||||
it("accepts an http url and rejects javascript: scheme", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const ok = await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { url: "https://example.com" },
|
||||
});
|
||||
expect(ok.isError, extractText(ok)).toBeFalsy();
|
||||
|
||||
const bad = await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
// eslint-disable-next-line no-script-url -- intentional for validation test
|
||||
arguments: { url: "javascript:alert(1)" },
|
||||
});
|
||||
expect(bad.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("accepts empty string for url (clears it)", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { url: "" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("rejects out-of-range postsPerPage", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { postsPerPage: 9999 },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("accepts nested seo and social objects", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: {
|
||||
social: { twitter: "@emdash", github: "emdash-cms" },
|
||||
seo: { titleSeparator: " | ", googleVerification: "abc123" },
|
||||
},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const data = extractJson<SiteSettingsResponse>(result);
|
||||
expect(data.social?.twitter).toBe("@emdash");
|
||||
expect(data.social?.github).toBe("emdash-cms");
|
||||
expect((data.seo as { titleSeparator?: string }).titleSeparator).toBe(" | ");
|
||||
});
|
||||
|
||||
it("editor cannot update settings (INSUFFICIENT_PERMISSIONS — admin only)", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: EDITOR_ID, userRole: Role.EDITOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { title: "Nope" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_PERMISSIONS");
|
||||
});
|
||||
|
||||
it("subscriber cannot update settings", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: SUBSCRIBER_ID, userRole: Role.SUBSCRIBER });
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { title: "Nope" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_PERMISSIONS");
|
||||
});
|
||||
|
||||
it("settings:read token cannot call settings_update (INSUFFICIENT_SCOPE)", async () => {
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: ADMIN_ID,
|
||||
userRole: Role.ADMIN,
|
||||
tokenScopes: ["settings:read"],
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { title: "x" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INSUFFICIENT_SCOPE");
|
||||
});
|
||||
|
||||
it("settings:manage token can call settings_update", async () => {
|
||||
harness = await connectMcpHarness({
|
||||
db,
|
||||
userId: ADMIN_ID,
|
||||
userRole: Role.ADMIN,
|
||||
tokenScopes: ["settings:manage"],
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "settings_update",
|
||||
arguments: { title: "x" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
771
packages/core/tests/integration/mcp/taxonomy.test.ts
Normal file
771
packages/core/tests/integration/mcp/taxonomy.test.ts
Normal file
@@ -0,0 +1,771 @@
|
||||
/**
|
||||
* MCP taxonomy tools — comprehensive integration tests.
|
||||
*
|
||||
* Covers:
|
||||
* - taxonomy_list
|
||||
* - taxonomy_list_terms
|
||||
* - taxonomy_create_term
|
||||
*
|
||||
* Plus regression coverage for:
|
||||
* - bug #7 (orphan taxonomy collection inconsistency)
|
||||
* - bug #13 (no delete/update term tool — gap test)
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { handleTaxonomyCreate } from "../../../src/api/handlers/taxonomies.js";
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import { SchemaRegistry } from "../../../src/schema/registry.js";
|
||||
import {
|
||||
connectMcpHarness,
|
||||
extractJson,
|
||||
extractText,
|
||||
type McpHarness,
|
||||
} from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabase, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
const AUTHOR_ID = "user_author";
|
||||
const SUBSCRIBER_ID = "user_subscriber";
|
||||
|
||||
async function setupTaxonomy(
|
||||
db: Kysely<Database>,
|
||||
input: { name: string; label: string; hierarchical?: boolean; collections?: string[] },
|
||||
): Promise<void> {
|
||||
const result = await handleTaxonomyCreate(db, input);
|
||||
if (!result.success) {
|
||||
throw new Error(`Failed to set up taxonomy: ${result.error?.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// taxonomy_list
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("taxonomy_list", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("returns only the seeded defaults when no extra taxonomies are added", async () => {
|
||||
// Migration 006 seeds two default taxonomies: 'category' (hierarchical)
|
||||
// and 'tag' (flat), both linked to the 'posts' collection. A fresh
|
||||
// install always has these.
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_list",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const { taxonomies } = extractJson<{
|
||||
taxonomies: Array<{ name: string }>;
|
||||
}>(result);
|
||||
const names = taxonomies.map((t) => t.name).toSorted();
|
||||
expect(names).toEqual(["category", "tag"]);
|
||||
});
|
||||
|
||||
it("lists user-created taxonomies alongside the defaults", async () => {
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({ slug: "post", label: "Posts" });
|
||||
// Use names that don't collide with the seeded `category` / `tag`.
|
||||
await setupTaxonomy(db, {
|
||||
name: "section",
|
||||
label: "Sections",
|
||||
hierarchical: true,
|
||||
collections: ["post"],
|
||||
});
|
||||
await setupTaxonomy(db, {
|
||||
name: "topic",
|
||||
label: "Topics",
|
||||
collections: ["post"],
|
||||
});
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_list",
|
||||
arguments: {},
|
||||
});
|
||||
const { taxonomies } = extractJson<{
|
||||
taxonomies: Array<{ name: string; hierarchical?: boolean; collections?: string[] }>;
|
||||
}>(result);
|
||||
const names = taxonomies.map((t) => t.name).toSorted();
|
||||
expect(names).toEqual(["category", "section", "tag", "topic"]);
|
||||
|
||||
const section = taxonomies.find((t) => t.name === "section");
|
||||
expect(section?.hierarchical).toBe(true);
|
||||
expect(section?.collections).toEqual(["post"]);
|
||||
});
|
||||
|
||||
it("any logged-in user (SUBSCRIBER) can read taxonomies", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: SUBSCRIBER_ID, userRole: Role.SUBSCRIBER });
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_list",
|
||||
arguments: {},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("bug #7: orphaned collection slugs are filtered from taxonomy_list output", async () => {
|
||||
// The seed taxonomies (category, tag) both reference 'posts' — a
|
||||
// collection that doesn't exist in this test DB (no auto-seed). After
|
||||
// the bug #7 fix, `taxonomy_list` filters those orphans out. We don't
|
||||
// need to manufacture an orphan; the seed already gives us one.
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
const taxResult = await harness.client.callTool({
|
||||
name: "taxonomy_list",
|
||||
arguments: {},
|
||||
});
|
||||
const { taxonomies } = extractJson<{
|
||||
taxonomies: Array<{ name: string; collections?: string[] }>;
|
||||
}>(taxResult);
|
||||
|
||||
// Each seeded taxonomy referenced 'posts'. After filtering, that
|
||||
// orphan slug is gone — the array should be empty for both seeds.
|
||||
for (const t of taxonomies) {
|
||||
expect(t.collections).not.toContain("posts");
|
||||
}
|
||||
|
||||
// And schema_list_collections agrees: there is no 'posts' collection.
|
||||
const collResult = await harness.client.callTool({
|
||||
name: "schema_list_collections",
|
||||
arguments: {},
|
||||
});
|
||||
const { items } = extractJson<{ items: Array<{ slug: string }> }>(collResult);
|
||||
expect(items.find((c) => c.slug === "posts")).toBeUndefined();
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// taxonomy_list_terms
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("taxonomy_list_terms", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
await setupTaxonomy(db, { name: "categories", label: "Categories", hierarchical: true });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("returns empty list when taxonomy has no terms", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "categories" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const { items } = extractJson<{ items: unknown[] }>(result);
|
||||
expect(items).toEqual([]);
|
||||
});
|
||||
|
||||
it("returns terms after creation", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "categories", slug: "tech", label: "Tech" },
|
||||
});
|
||||
await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "categories", slug: "design", label: "Design" },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "categories" },
|
||||
});
|
||||
const { items } = extractJson<{
|
||||
items: Array<{ slug: string; label: string; parentId: string | null }>;
|
||||
}>(result);
|
||||
const slugs = items.map((t) => t.slug).toSorted();
|
||||
expect(slugs).toEqual(["design", "tech"]);
|
||||
});
|
||||
|
||||
it("returns clear error for missing taxonomy name", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "nonexistent" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("nonexistent");
|
||||
});
|
||||
|
||||
it("paginates with limit + cursor", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
// Insert 5 terms — labels chosen so alphabetical ordering is predictable
|
||||
for (const label of ["alpha", "bravo", "charlie", "delta", "echo"]) {
|
||||
await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "categories", slug: label, label },
|
||||
});
|
||||
}
|
||||
|
||||
const page1 = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "categories", limit: 2 },
|
||||
});
|
||||
const p1 = extractJson<{ items: Array<{ slug: string; id: string }>; nextCursor?: string }>(
|
||||
page1,
|
||||
);
|
||||
expect(p1.items).toHaveLength(2);
|
||||
expect(p1.nextCursor).toBeTruthy();
|
||||
|
||||
const page2 = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "categories", limit: 2, cursor: p1.nextCursor },
|
||||
});
|
||||
const p2 = extractJson<{ items: Array<{ slug: string }>; nextCursor?: string }>(page2);
|
||||
expect(p2.items).toHaveLength(2);
|
||||
|
||||
// No overlap
|
||||
const p1Slugs = p1.items.map((i) => i.slug);
|
||||
for (const t of p2.items) expect(p1Slugs).not.toContain(t.slug);
|
||||
});
|
||||
|
||||
it("paginates correctly when multiple terms share the same label", async () => {
|
||||
// Keyset pagination over (label, id) needs a stable id tiebreaker
|
||||
// at the SQL layer or tied-label rows can swap order between calls
|
||||
// — producing duplicates or skipped items. Three terms share
|
||||
// label "shared"; pagination must walk through them in a stable
|
||||
// order with no duplicates and no gaps.
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const slugs = ["shared-1", "shared-2", "shared-3", "unique-a"];
|
||||
for (const slug of slugs) {
|
||||
await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: {
|
||||
taxonomy: "categories",
|
||||
slug,
|
||||
label: slug.startsWith("shared") ? "shared" : slug,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
// Walk one item at a time so every cursor transition exercises the
|
||||
// (label, id) keyset.
|
||||
const collected: string[] = [];
|
||||
let cursor: string | undefined;
|
||||
// Hard cap to prevent the test hanging if pagination loops.
|
||||
for (let i = 0; i < 10; i++) {
|
||||
const page = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "categories", limit: 1, ...(cursor ? { cursor } : {}) },
|
||||
});
|
||||
const data = extractJson<{
|
||||
items: Array<{ slug: string; id: string }>;
|
||||
nextCursor?: string;
|
||||
}>(page);
|
||||
if (data.items.length === 0) break;
|
||||
for (const item of data.items) collected.push(item.slug);
|
||||
if (!data.nextCursor) break;
|
||||
cursor = data.nextCursor;
|
||||
}
|
||||
|
||||
// Each slug appears exactly once. Order doesn't matter for this
|
||||
// assertion — just no duplicates and no missing entries.
|
||||
expect(collected.toSorted()).toEqual(slugs.toSorted());
|
||||
});
|
||||
|
||||
it("survives concurrent deletion of the cursor-term", async () => {
|
||||
// The base64 keyset cursor encodes a (label, id) position rather
|
||||
// than a row reference, so deleting the cursor-term between pages
|
||||
// must not error — the next page just continues from the next
|
||||
// position in sort order.
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
for (const slug of ["alpha", "bravo", "charlie", "delta"]) {
|
||||
await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "categories", slug, label: slug },
|
||||
});
|
||||
}
|
||||
|
||||
const page1 = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "categories", limit: 2 },
|
||||
});
|
||||
const p1 = extractJson<{
|
||||
items: Array<{ slug: string }>;
|
||||
nextCursor?: string;
|
||||
}>(page1);
|
||||
expect(p1.items.map((i) => i.slug)).toEqual(["alpha", "bravo"]);
|
||||
expect(p1.nextCursor).toBeTruthy();
|
||||
|
||||
// Delete the cursor-term ('bravo') out of band.
|
||||
const { TaxonomyRepository } = await import("../../../src/database/repositories/taxonomy.js");
|
||||
const repo = new TaxonomyRepository(db);
|
||||
const bravo = await repo.findBySlug("categories", "bravo");
|
||||
if (!bravo) throw new Error("bravo missing — fixture broken");
|
||||
await db.deleteFrom("taxonomies").where("id", "=", bravo.id).execute();
|
||||
|
||||
// Page 2 must still work and return the items strictly after the
|
||||
// cursor's position. Pre-fix the cursor stored 'bravo's id and
|
||||
// findIndex would have returned -1 → INVALID_CURSOR. Post-fix the
|
||||
// cursor stores ('bravo', '<bravo-id>') and the keyset comparison
|
||||
// finds the first term with (label, id) > ('bravo', '<bravo-id>')
|
||||
// — that's 'charlie'.
|
||||
const page2 = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "categories", limit: 2, cursor: p1.nextCursor },
|
||||
});
|
||||
expect(page2.isError, extractText(page2)).toBeFalsy();
|
||||
const p2 = extractJson<{ items: Array<{ slug: string }> }>(page2);
|
||||
expect(p2.items.map((i) => i.slug)).toEqual(["charlie", "delta"]);
|
||||
});
|
||||
|
||||
it("malformed cursor returns INVALID_CURSOR", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "categories", slug: "t1", label: "T1" },
|
||||
});
|
||||
|
||||
// taxonomy_list_terms uses a base64 keyset cursor over (label, id).
|
||||
// A completely bogus value fails decodeCursor and surfaces as a
|
||||
// structured INVALID_CURSOR error.
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "categories", cursor: "garbage_cursor_xyz" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("INVALID_CURSOR");
|
||||
});
|
||||
|
||||
it("any logged-in user (SUBSCRIBER) can read terms", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: SUBSCRIBER_ID, userRole: Role.SUBSCRIBER });
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "categories" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// taxonomy_create_term
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("taxonomy_create_term", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
await setupTaxonomy(db, { name: "categories", label: "Categories", hierarchical: true });
|
||||
await setupTaxonomy(db, { name: "tags", label: "Tags" });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("creates a term with minimal arguments", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "categories", slug: "tech", label: "Tech" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const { term } = extractJson<{ term: { slug: string; label: string } }>(result);
|
||||
expect(term.slug).toBe("tech");
|
||||
expect(term.label).toBe("Tech");
|
||||
});
|
||||
|
||||
it("creates a child term with parentId", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const parent = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "categories", slug: "tech", label: "Tech" },
|
||||
});
|
||||
const parentId = extractJson<{ term: { id: string } }>(parent).term.id;
|
||||
|
||||
const child = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: {
|
||||
taxonomy: "categories",
|
||||
slug: "ai",
|
||||
label: "AI",
|
||||
parentId,
|
||||
},
|
||||
});
|
||||
expect(child.isError, extractText(child)).toBeFalsy();
|
||||
const { term } = extractJson<{ term: { parentId: string | null } }>(child);
|
||||
expect(term.parentId).toBe(parentId);
|
||||
});
|
||||
|
||||
it("rejects duplicate slug within the same taxonomy", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "categories", slug: "tech", label: "Tech" },
|
||||
});
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "categories", slug: "tech", label: "Tech 2" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/exist|duplicate|conflict|unique|already/i);
|
||||
});
|
||||
|
||||
it("allows same slug across different taxonomies", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const a = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "categories", slug: "shared", label: "Shared" },
|
||||
});
|
||||
const b = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "tags", slug: "shared", label: "Shared" },
|
||||
});
|
||||
expect(a.isError, extractText(a)).toBeFalsy();
|
||||
expect(b.isError, extractText(b)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("rejects creating a term in a non-existent taxonomy", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "ghost", slug: "x", label: "X" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/\bNOT_FOUND\b|\bnot found\b/i);
|
||||
expect(extractText(result)).toContain("ghost");
|
||||
});
|
||||
|
||||
it("rejects parentId pointing to a different taxonomy", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const tag = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "tags", slug: "stuff", label: "Stuff" },
|
||||
});
|
||||
const tagId = extractJson<{ term: { id: string } }>(tag).term.id;
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: {
|
||||
taxonomy: "categories",
|
||||
slug: "child",
|
||||
label: "Child",
|
||||
parentId: tagId,
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("rejects parentId pointing to a non-existent term", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: {
|
||||
taxonomy: "categories",
|
||||
slug: "orphan",
|
||||
label: "Orphan",
|
||||
parentId: "01NEVEREXISTED",
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
|
||||
it("requires EDITOR role (AUTHOR is blocked)", async () => {
|
||||
harness = await connectMcpHarness({ db, userId: AUTHOR_ID, userRole: Role.AUTHOR });
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "categories", slug: "x", label: "X" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Bug #13 / F2 / F3 / F12 — happy paths for taxonomy_update_term and
|
||||
// taxonomy_delete_term, plus parent validation, cycle detection, and
|
||||
// empty-string rejection.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("taxonomy_update_term (bug #13 / F2 / F12)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
async function createTerm(
|
||||
taxonomy: string,
|
||||
slug: string,
|
||||
label: string,
|
||||
parentId?: string,
|
||||
): Promise<string> {
|
||||
const args: Record<string, unknown> = { taxonomy, slug, label };
|
||||
if (parentId) args.parentId = parentId;
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: args,
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const { term } = extractJson<{ term: { id: string } }>(result);
|
||||
return term.id;
|
||||
}
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
await setupTaxonomy(db, { name: "tags", label: "Tags" });
|
||||
await setupTaxonomy(db, { name: "sections", label: "Sections" });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("MCP exposes taxonomy_update_term and taxonomy_delete_term", async () => {
|
||||
const tools = await harness.client.listTools();
|
||||
const names = tools.tools.map((t) => t.name);
|
||||
expect(names).toContain("taxonomy_update_term");
|
||||
expect(names).toContain("taxonomy_delete_term");
|
||||
});
|
||||
|
||||
it("renames the slug when the new slug is free", async () => {
|
||||
await createTerm("tags", "old-slug", "Original");
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_update_term",
|
||||
arguments: { taxonomy: "tags", termSlug: "old-slug", slug: "new-slug" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const { term } = extractJson<{ term: { slug: string } }>(result);
|
||||
expect(term.slug).toBe("new-slug");
|
||||
});
|
||||
|
||||
it("changes the label", async () => {
|
||||
await createTerm("tags", "x", "Old Label");
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_update_term",
|
||||
arguments: { taxonomy: "tags", termSlug: "x", label: "New Label" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
const { term } = extractJson<{ term: { label: string } }>(result);
|
||||
expect(term.label).toBe("New Label");
|
||||
});
|
||||
|
||||
it("reparents a term and detaches via parentId: null", async () => {
|
||||
const parentId = await createTerm("tags", "parent", "Parent");
|
||||
await createTerm("tags", "child", "Child");
|
||||
|
||||
const reparent = await harness.client.callTool({
|
||||
name: "taxonomy_update_term",
|
||||
arguments: { taxonomy: "tags", termSlug: "child", parentId },
|
||||
});
|
||||
expect(reparent.isError, extractText(reparent)).toBeFalsy();
|
||||
const reparented = extractJson<{ term: { parentId: string | null } }>(reparent);
|
||||
expect(reparented.term.parentId).toBe(parentId);
|
||||
|
||||
const detach = await harness.client.callTool({
|
||||
name: "taxonomy_update_term",
|
||||
arguments: { taxonomy: "tags", termSlug: "child", parentId: null },
|
||||
});
|
||||
expect(detach.isError, extractText(detach)).toBeFalsy();
|
||||
const detached = extractJson<{ term: { parentId: string | null } }>(detach);
|
||||
expect(detached.term.parentId).toBeNull();
|
||||
});
|
||||
|
||||
it("rejects parents from a different taxonomy", async () => {
|
||||
const sectionId = await createTerm("sections", "news", "News");
|
||||
await createTerm("tags", "alpha", "Alpha");
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_update_term",
|
||||
arguments: { taxonomy: "tags", termSlug: "alpha", parentId: sectionId },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/VALIDATION_ERROR/);
|
||||
});
|
||||
|
||||
it("rejects self-parent", async () => {
|
||||
const id = await createTerm("tags", "loop", "Loop");
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_update_term",
|
||||
arguments: { taxonomy: "tags", termSlug: "loop", parentId: id },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/own parent|VALIDATION_ERROR/i);
|
||||
});
|
||||
|
||||
it("rejects a 2-cycle (descendant becoming ancestor)", async () => {
|
||||
// A is parent of B. Now try to make B the parent of A — that's a cycle.
|
||||
const aId = await createTerm("tags", "a", "A");
|
||||
const bId = await createTerm("tags", "b", "B", aId);
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_update_term",
|
||||
arguments: { taxonomy: "tags", termSlug: "a", parentId: bId },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/cycle|VALIDATION_ERROR/i);
|
||||
});
|
||||
|
||||
it("rejects empty-string parentId on create", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "tags", slug: "x", label: "X", parentId: "" },
|
||||
});
|
||||
// Either returns a validation error, or treats it as no-parent.
|
||||
// We choose strict: empty string is normalized to undefined so it
|
||||
// succeeds with parentId === null (no parent attached). That's the
|
||||
// behavior we documented.
|
||||
if (result.isError) {
|
||||
expect(extractText(result)).toMatch(/VALIDATION_ERROR/);
|
||||
} else {
|
||||
const { term } = extractJson<{ term: { parentId: string | null } }>(result);
|
||||
expect(term.parentId).toBeNull();
|
||||
}
|
||||
});
|
||||
|
||||
// ----- MAX_DEPTH boundary -----
|
||||
// validateParentTerm walks up the parent chain bounded by MAX_DEPTH=100
|
||||
// to prevent a pathological pre-existing cycle from hanging the
|
||||
// validator. The boundary is "more than 100 ancestors": exactly-100 is
|
||||
// accepted, 101+ is rejected.
|
||||
|
||||
it("accepts a chain of exactly MAX_DEPTH (100) ancestors", async () => {
|
||||
const { TaxonomyRepository } = await import("../../../src/database/repositories/taxonomy.js");
|
||||
const repo = new TaxonomyRepository(db);
|
||||
// Build root → 1 → 2 → ... → 100. 101 terms total. The deepest
|
||||
// term has 100 ancestors; setting it as parent of a new term means
|
||||
// validateParentTerm walks 100 hops up before exhausting the chain.
|
||||
let parentId: string | undefined;
|
||||
const ids: string[] = [];
|
||||
for (let i = 0; i < 101; i++) {
|
||||
const term = await repo.create({
|
||||
name: "tags",
|
||||
slug: `chain-${i}`,
|
||||
label: `Chain ${i}`,
|
||||
parentId,
|
||||
});
|
||||
ids.push(term.id);
|
||||
parentId = term.id;
|
||||
}
|
||||
const deepest = ids.at(-1);
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "tags", slug: "leaf", label: "Leaf", parentId: deepest },
|
||||
});
|
||||
// New term's parent is the 100-deep tail. Walking up from there
|
||||
// reaches the root after exactly 100 hops; cursor becomes null,
|
||||
// the depth-exceeded check does NOT fire.
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("rejects a chain that exceeds MAX_DEPTH", async () => {
|
||||
const { TaxonomyRepository } = await import("../../../src/database/repositories/taxonomy.js");
|
||||
const repo = new TaxonomyRepository(db);
|
||||
// Build a 102-term chain. The deepest term has 101 ancestors —
|
||||
// one more than MAX_DEPTH allows.
|
||||
let parentId: string | undefined;
|
||||
const ids: string[] = [];
|
||||
for (let i = 0; i < 102; i++) {
|
||||
const term = await repo.create({
|
||||
name: "tags",
|
||||
slug: `chain-${i}`,
|
||||
label: `Chain ${i}`,
|
||||
parentId,
|
||||
});
|
||||
ids.push(term.id);
|
||||
parentId = term.id;
|
||||
}
|
||||
const deepest = ids.at(-1);
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "tags", slug: "leaf", label: "Leaf", parentId: deepest },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/maximum depth/i);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("VALIDATION_ERROR");
|
||||
});
|
||||
});
|
||||
|
||||
describe("taxonomy_delete_term (bug #13 / F12)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
await setupTaxonomy(db, { name: "tags", label: "Tags" });
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("rejects deletion when children exist (matches handler behavior)", async () => {
|
||||
const parent = await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "tags", slug: "parent", label: "Parent" },
|
||||
});
|
||||
const { term } = extractJson<{ term: { id: string } }>(parent);
|
||||
await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "tags", slug: "child", label: "Child", parentId: term.id },
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_delete_term",
|
||||
arguments: { taxonomy: "tags", termSlug: "parent" },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(/VALIDATION_ERROR|children/i);
|
||||
});
|
||||
|
||||
it("deletes a leaf term and the row is actually gone", async () => {
|
||||
await harness.client.callTool({
|
||||
name: "taxonomy_create_term",
|
||||
arguments: { taxonomy: "tags", slug: "leaf", label: "Leaf" },
|
||||
});
|
||||
|
||||
// Pre-condition: the term is listable.
|
||||
const before = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "tags" },
|
||||
});
|
||||
const beforeSlugs = extractJson<{ items: Array<{ slug: string }> }>(before).items.map(
|
||||
(t) => t.slug,
|
||||
);
|
||||
expect(beforeSlugs).toContain("leaf");
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "taxonomy_delete_term",
|
||||
arguments: { taxonomy: "tags", termSlug: "leaf" },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
|
||||
// Post-condition: the term is no longer listable. A regression where
|
||||
// the handler returns success: true without actually deleting the row
|
||||
// fails this assertion.
|
||||
const after = await harness.client.callTool({
|
||||
name: "taxonomy_list_terms",
|
||||
arguments: { taxonomy: "tags" },
|
||||
});
|
||||
const afterSlugs = extractJson<{ items: Array<{ slug: string }> }>(after).items.map(
|
||||
(t) => t.slug,
|
||||
);
|
||||
expect(afterSlugs).not.toContain("leaf");
|
||||
});
|
||||
});
|
||||
491
packages/core/tests/integration/mcp/validation.test.ts
Normal file
491
packages/core/tests/integration/mcp/validation.test.ts
Normal file
@@ -0,0 +1,491 @@
|
||||
/**
|
||||
* MCP field-level validation tests.
|
||||
*
|
||||
* `EmDashRuntime.handleContentCreate` and `handleContentUpdate` validate
|
||||
* `data` against the collection's schema before any write:
|
||||
*
|
||||
* - required fields must be present and non-empty
|
||||
* - select / multiSelect values must match the configured options
|
||||
* - reference fields must resolve to a real, non-trashed target
|
||||
*
|
||||
* Failures return `{ code: "VALIDATION_ERROR", message: "<field>: <reason>" }`
|
||||
* with all offending fields named in one message so callers can fix
|
||||
* everything in a single round trip. These tests cover both REST and MCP
|
||||
* because validation runs at the runtime layer and both transports go
|
||||
* through it.
|
||||
*/
|
||||
|
||||
import { Role } from "@emdash-cms/auth";
|
||||
import type { Kysely } from "kysely";
|
||||
import { afterEach, beforeEach, describe, expect, it } from "vitest";
|
||||
|
||||
import { ContentRepository } from "../../../src/database/repositories/content.js";
|
||||
import type { Database } from "../../../src/database/types.js";
|
||||
import { SchemaRegistry } from "../../../src/schema/registry.js";
|
||||
import { connectMcpHarness, extractText, type McpHarness } from "../../utils/mcp-runtime.js";
|
||||
import { setupTestDatabase, teardownTestDatabase } from "../../utils/test-db.js";
|
||||
|
||||
const ADMIN_ID = "user_admin";
|
||||
|
||||
const VALIDATION_ERROR = /validation|required|invalid/i;
|
||||
const GENERIC_FAILURE = /^Failed to (create|update) content$/;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Bug #4: required field validation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("MCP validation — required fields (bug #4)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
|
||||
await registry.createCollection({ slug: "post", label: "Posts" });
|
||||
// Required title, optional body
|
||||
await registry.createField("post", {
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
required: true,
|
||||
});
|
||||
await registry.createField("post", {
|
||||
slug: "body",
|
||||
label: "Body",
|
||||
type: "text",
|
||||
});
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("rejects create without required title", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { body: "no title" } },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
expect(text).not.toMatch(GENERIC_FAILURE);
|
||||
expect(text).toMatch(VALIDATION_ERROR);
|
||||
expect(text).toMatch(/title/i);
|
||||
});
|
||||
|
||||
it("rejects create with empty-string required title", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "" } },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(VALIDATION_ERROR);
|
||||
});
|
||||
|
||||
it("rejects create with explicitly-null required title", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: null } },
|
||||
});
|
||||
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(VALIDATION_ERROR);
|
||||
});
|
||||
|
||||
it("rejects create with non-string value for a string field", async () => {
|
||||
// Zod's `z.string()` rejects numbers/booleans/objects. The MCP
|
||||
// boundary lets these through (data is `z.record(z.string(),
|
||||
// z.unknown())`), so the check has to live in the runtime
|
||||
// validator. Guard against future regressions like swapping in
|
||||
// `z.coerce.string()`.
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
// eslint-disable-next-line typescript-eslint(no-explicit-any) -- intentionally bypass MCP type to hit runtime validation
|
||||
data: { title: 42 } as any,
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(VALIDATION_ERROR);
|
||||
expect(extractText(result)).toMatch(/title/i);
|
||||
const meta = (result as { _meta?: { code?: string } })._meta;
|
||||
expect(meta?.code).toBe("VALIDATION_ERROR");
|
||||
});
|
||||
|
||||
it("accepts create with required title present (regression guard)", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Has title" } },
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("rejects update that clears required title to empty string", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Initial" } },
|
||||
});
|
||||
expect(created.isError, extractText(created)).toBeFalsy();
|
||||
const id = JSON.parse(extractText(created)).item.id as string;
|
||||
|
||||
const updated = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { title: "" } },
|
||||
});
|
||||
expect(updated.isError).toBe(true);
|
||||
expect(extractText(updated)).toMatch(VALIDATION_ERROR);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Bug #5: select and multiSelect option enforcement
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("MCP validation — select and multiSelect options (bug #5)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
|
||||
await registry.createCollection({ slug: "post", label: "Posts" });
|
||||
await registry.createField("post", {
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
required: true,
|
||||
});
|
||||
await registry.createField("post", {
|
||||
slug: "priority",
|
||||
label: "Priority",
|
||||
type: "select",
|
||||
validation: { options: ["low", "medium", "high"] },
|
||||
});
|
||||
await registry.createField("post", {
|
||||
slug: "tags",
|
||||
label: "Tags",
|
||||
type: "multiSelect",
|
||||
validation: { options: ["news", "tech", "design"] },
|
||||
});
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("rejects select value not in options list", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "T", priority: "not-an-option" },
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(VALIDATION_ERROR);
|
||||
expect(extractText(result)).toMatch(/priority|select|option|not-an-option/i);
|
||||
});
|
||||
|
||||
it("accepts select value in options list (regression guard)", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "T", priority: "high" },
|
||||
},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("rejects multiSelect array containing an invalid value", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "T", tags: ["news", "bogus"] },
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(VALIDATION_ERROR);
|
||||
expect(extractText(result)).toMatch(/tags|multiSelect|option|bogus/i);
|
||||
});
|
||||
|
||||
it("accepts multiSelect with all valid values (regression guard)", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "T", tags: ["news", "tech"] },
|
||||
},
|
||||
});
|
||||
expect(result.isError, extractText(result)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("rejects update introducing an invalid select value", async () => {
|
||||
const created = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "T", priority: "low" },
|
||||
},
|
||||
});
|
||||
expect(created.isError, extractText(created)).toBeFalsy();
|
||||
const id = JSON.parse(extractText(created)).item.id as string;
|
||||
|
||||
const updated = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id, data: { priority: "URGENT" } },
|
||||
});
|
||||
expect(updated.isError).toBe(true);
|
||||
expect(extractText(updated)).toMatch(VALIDATION_ERROR);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Bug #6: reference field target existence
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("MCP validation — reference field targets (bug #6)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
|
||||
await registry.createCollection({ slug: "page", label: "Pages" });
|
||||
await registry.createField("page", {
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
required: true,
|
||||
});
|
||||
|
||||
await registry.createCollection({ slug: "post", label: "Posts" });
|
||||
await registry.createField("post", {
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
required: true,
|
||||
});
|
||||
await registry.createField("post", {
|
||||
slug: "parent_page",
|
||||
label: "Parent Page",
|
||||
type: "reference",
|
||||
validation: { collection: "page" },
|
||||
});
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("rejects reference to non-existent target id", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "T", parent_page: "01NOTAREALPAGE" },
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
expect(text).toMatch(VALIDATION_ERROR);
|
||||
// Tight match: the error must specifically mention the offending field,
|
||||
// echo the bad target id, AND say "not found" (one assertion per
|
||||
// concern so a regression where any signal disappears is caught).
|
||||
expect(text).toContain("parent_page");
|
||||
expect(text).toContain("01NOTAREALPAGE");
|
||||
expect(text).toMatch(/\bnot found\b/i);
|
||||
});
|
||||
|
||||
it("accepts reference to a real target id (regression guard)", async () => {
|
||||
// Create a page first
|
||||
const page = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "page", data: { title: "Real page" } },
|
||||
});
|
||||
expect(page.isError, extractText(page)).toBeFalsy();
|
||||
const pageId = JSON.parse(extractText(page)).item.id as string;
|
||||
|
||||
const post = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "T", parent_page: pageId },
|
||||
},
|
||||
});
|
||||
expect(post.isError, extractText(post)).toBeFalsy();
|
||||
});
|
||||
|
||||
it("rejects reference to id that exists in a different collection", async () => {
|
||||
// Create a post (which is NOT the page collection the reference is scoped to)
|
||||
const repo = new ContentRepository(db);
|
||||
const otherPost = await repo.create({
|
||||
type: "post",
|
||||
data: { title: "Other" },
|
||||
slug: "other",
|
||||
status: "draft",
|
||||
authorId: ADMIN_ID,
|
||||
});
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "T", parent_page: otherPost.id },
|
||||
},
|
||||
});
|
||||
// Reference points to a post id but field expects a page reference.
|
||||
// After fix this should fail.
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(VALIDATION_ERROR);
|
||||
});
|
||||
|
||||
it("rejects reference to a soft-deleted (trashed) target", async () => {
|
||||
const page = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "page", data: { title: "Will be trashed" } },
|
||||
});
|
||||
const pageId = JSON.parse(extractText(page)).item.id as string;
|
||||
// Trash via repo
|
||||
const repo = new ContentRepository(db);
|
||||
await repo.delete("page", pageId);
|
||||
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
collection: "post",
|
||||
data: { title: "T", parent_page: pageId },
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(VALIDATION_ERROR);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Combined: error message is structured even when multiple fields fail
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("MCP validation — multi-field error messaging", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
|
||||
await registry.createCollection({ slug: "post", label: "Posts" });
|
||||
await registry.createField("post", {
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
required: true,
|
||||
});
|
||||
await registry.createField("post", {
|
||||
slug: "priority",
|
||||
label: "Priority",
|
||||
type: "select",
|
||||
validation: { options: ["low", "high"] },
|
||||
});
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("when multiple fields fail validation, the error mentions all of them", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: {
|
||||
// missing required title AND invalid priority
|
||||
collection: "post",
|
||||
data: { priority: "URGENT" },
|
||||
},
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
const text = extractText(result);
|
||||
// Both field names should appear so a caller can fix everything in one round.
|
||||
expect(text).toMatch(/title/i);
|
||||
expect(text).toMatch(/priority/i);
|
||||
});
|
||||
});
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// F4: validation runs on UPDATE for revision-supporting collections.
|
||||
//
|
||||
// Before the fix, the runtime wrote the draft revision *before* the API
|
||||
// handler ran (and called the handler with `data: undefined`), so update-
|
||||
// time validation was bypassed for any collection that supports revisions.
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
describe("MCP validation — UPDATE on revision-supporting collections (F4)", () => {
|
||||
let db: Kysely<Database>;
|
||||
let harness: McpHarness;
|
||||
let postId: string;
|
||||
|
||||
beforeEach(async () => {
|
||||
db = await setupTestDatabase();
|
||||
const registry = new SchemaRegistry(db);
|
||||
await registry.createCollection({
|
||||
slug: "post",
|
||||
label: "Posts",
|
||||
supports: ["drafts", "revisions"],
|
||||
});
|
||||
await registry.createField("post", {
|
||||
slug: "title",
|
||||
label: "Title",
|
||||
type: "string",
|
||||
required: true,
|
||||
});
|
||||
|
||||
harness = await connectMcpHarness({ db, userId: ADMIN_ID, userRole: Role.ADMIN });
|
||||
|
||||
const create = await harness.client.callTool({
|
||||
name: "content_create",
|
||||
arguments: { collection: "post", data: { title: "Initial title" } },
|
||||
});
|
||||
expect(create.isError, extractText(create)).toBeFalsy();
|
||||
postId = JSON.parse(extractText(create)).item.id as string;
|
||||
});
|
||||
|
||||
afterEach(async () => {
|
||||
if (harness) await harness.cleanup();
|
||||
await teardownTestDatabase(db);
|
||||
});
|
||||
|
||||
it("rejects update with empty required field BEFORE creating a draft revision", async () => {
|
||||
const result = await harness.client.callTool({
|
||||
name: "content_update",
|
||||
arguments: { collection: "post", id: postId, data: { title: "" } },
|
||||
});
|
||||
expect(result.isError).toBe(true);
|
||||
expect(extractText(result)).toMatch(VALIDATION_ERROR);
|
||||
|
||||
// And no draft revision was written — listing revisions returns empty.
|
||||
const list = await harness.client.callTool({
|
||||
name: "revision_list",
|
||||
arguments: { collection: "post", id: postId },
|
||||
});
|
||||
expect(list.isError, extractText(list)).toBeFalsy();
|
||||
const { items } = JSON.parse(extractText(list)) as { items: unknown[] };
|
||||
expect(items).toEqual([]);
|
||||
});
|
||||
});
|
||||
Reference in New Issue
Block a user