refactor: move nested skills to root + add ui-ux-pro-max + ConsentOS
- Extract 9 nested skills from website-creator/ to root skills/ - Remove duplicate seo-analyzers, seo-geo, seo-multi-channel from website-creator - Add new ui-ux-pro-max skill with full UI/UX data - Update install-skills.sh to sync properly - Remove .DS_Store artifacts Moved skills: - api-and-interface-design - banner-design - brand - design-system - design - frontend-ui-engineering - slides - spec-driven-development - ui-styling
This commit is contained in:
99
skills/design-system/scripts/embed-tokens.cjs
Normal file
99
skills/design-system/scripts/embed-tokens.cjs
Normal file
@@ -0,0 +1,99 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* embed-tokens.cjs
|
||||
* Reads design-tokens.css and outputs embeddable inline CSS.
|
||||
* Use when generating standalone HTML files (infographics, slides, etc.)
|
||||
*
|
||||
* Usage:
|
||||
* node embed-tokens.cjs # Output full CSS
|
||||
* node embed-tokens.cjs --minimal # Output only commonly used tokens
|
||||
* node embed-tokens.cjs --style # Wrap in <style> tags
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
// Find project root (look for assets/design-tokens.css)
|
||||
function findProjectRoot(startDir) {
|
||||
let dir = startDir;
|
||||
while (dir !== '/') {
|
||||
if (fs.existsSync(path.join(dir, 'assets', 'design-tokens.css'))) {
|
||||
return dir;
|
||||
}
|
||||
dir = path.dirname(dir);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
const projectRoot = findProjectRoot(process.cwd());
|
||||
if (!projectRoot) {
|
||||
console.error('Error: Could not find assets/design-tokens.css');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const tokensPath = path.join(projectRoot, 'assets', 'design-tokens.css');
|
||||
|
||||
// Minimal tokens commonly used in infographics/slides
|
||||
const MINIMAL_TOKENS = [
|
||||
'--primitive-spacing-',
|
||||
'--primitive-fontSize-',
|
||||
'--primitive-fontWeight-',
|
||||
'--primitive-lineHeight-',
|
||||
'--primitive-radius-',
|
||||
'--primitive-shadow-glow-',
|
||||
'--primitive-gradient-',
|
||||
'--primitive-duration-',
|
||||
'--color-primary',
|
||||
'--color-secondary',
|
||||
'--color-accent',
|
||||
'--color-background',
|
||||
'--color-surface',
|
||||
'--color-foreground',
|
||||
'--color-border',
|
||||
'--typography-font-',
|
||||
'--card-',
|
||||
];
|
||||
|
||||
function extractTokens(css, minimal = false) {
|
||||
// Extract :root block
|
||||
const rootMatch = css.match(/:root\s*\{([^}]+)\}/g);
|
||||
if (!rootMatch) return '';
|
||||
|
||||
let allVars = [];
|
||||
for (const block of rootMatch) {
|
||||
const vars = block.match(/--[\w-]+:\s*[^;]+;/g) || [];
|
||||
allVars = allVars.concat(vars);
|
||||
}
|
||||
|
||||
if (minimal) {
|
||||
allVars = allVars.filter(v =>
|
||||
MINIMAL_TOKENS.some(token => v.includes(token))
|
||||
);
|
||||
}
|
||||
|
||||
// Dedupe
|
||||
allVars = [...new Set(allVars)];
|
||||
|
||||
return `:root {\n ${allVars.join('\n ')}\n}`;
|
||||
}
|
||||
|
||||
// Parse args
|
||||
const args = process.argv.slice(2);
|
||||
const minimal = args.includes('--minimal');
|
||||
const wrapStyle = args.includes('--style');
|
||||
|
||||
try {
|
||||
const css = fs.readFileSync(tokensPath, 'utf-8');
|
||||
let output = extractTokens(css, minimal);
|
||||
|
||||
if (wrapStyle) {
|
||||
output = `<style>\n/* Design Tokens (embedded for standalone HTML) */\n${output}\n</style>`;
|
||||
} else {
|
||||
output = `/* Design Tokens (embedded for standalone HTML) */\n${output}`;
|
||||
}
|
||||
|
||||
console.log(output);
|
||||
} catch (err) {
|
||||
console.error(`Error reading tokens: ${err.message}`);
|
||||
process.exit(1);
|
||||
}
|
||||
317
skills/design-system/scripts/fetch-background.py
Normal file
317
skills/design-system/scripts/fetch-background.py
Normal file
@@ -0,0 +1,317 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Background Image Fetcher
|
||||
Fetches real images from Pexels for slide backgrounds.
|
||||
Uses web scraping (no API key required) or WebFetch tool integration.
|
||||
"""
|
||||
|
||||
import json
|
||||
import csv
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
# Project root relative to this script
|
||||
PROJECT_ROOT = Path(__file__).parent.parent.parent.parent.parent
|
||||
TOKENS_PATH = PROJECT_ROOT / 'assets' / 'design-tokens.json'
|
||||
BACKGROUNDS_CSV = Path(__file__).parent.parent / 'data' / 'slide-backgrounds.csv'
|
||||
|
||||
|
||||
def resolve_token_reference(ref: str, tokens: dict) -> str:
|
||||
"""Resolve token reference like {primitive.color.ocean-blue.500} to hex value."""
|
||||
if not ref or not ref.startswith('{') or not ref.endswith('}'):
|
||||
return ref # Already a value, not a reference
|
||||
|
||||
# Parse reference: {primitive.color.ocean-blue.500}
|
||||
path = ref[1:-1].split('.') # ['primitive', 'color', 'ocean-blue', '500']
|
||||
current = tokens
|
||||
for key in path:
|
||||
if isinstance(current, dict):
|
||||
current = current.get(key)
|
||||
else:
|
||||
return None # Invalid path
|
||||
# Return $value if it's a token object
|
||||
if isinstance(current, dict) and '$value' in current:
|
||||
return current['$value']
|
||||
return current
|
||||
|
||||
|
||||
def load_brand_colors():
|
||||
"""Load colors from assets/design-tokens.json for overlay gradients.
|
||||
|
||||
Resolves semantic token references to actual hex values.
|
||||
"""
|
||||
try:
|
||||
with open(TOKENS_PATH) as f:
|
||||
tokens = json.load(f)
|
||||
|
||||
colors = tokens.get('primitive', {}).get('color', {})
|
||||
semantic = tokens.get('semantic', {}).get('color', {})
|
||||
|
||||
# Try semantic tokens first (preferred) - resolve references
|
||||
if semantic:
|
||||
primary_ref = semantic.get('primary', {}).get('$value')
|
||||
secondary_ref = semantic.get('secondary', {}).get('$value')
|
||||
accent_ref = semantic.get('accent', {}).get('$value')
|
||||
background_ref = semantic.get('background', {}).get('$value')
|
||||
|
||||
primary = resolve_token_reference(primary_ref, tokens)
|
||||
secondary = resolve_token_reference(secondary_ref, tokens)
|
||||
accent = resolve_token_reference(accent_ref, tokens)
|
||||
background = resolve_token_reference(background_ref, tokens)
|
||||
|
||||
if primary and secondary:
|
||||
return {
|
||||
'primary': primary,
|
||||
'secondary': secondary,
|
||||
'accent': accent or primary,
|
||||
'background': background or '#0D0D0D',
|
||||
}
|
||||
|
||||
# Fallback: find first color palette with 500 value (primary)
|
||||
primary_keys = ['ocean-blue', 'coral', 'blue', 'primary']
|
||||
secondary_keys = ['golden-amber', 'purple', 'amber', 'secondary']
|
||||
accent_keys = ['emerald', 'mint', 'green', 'accent']
|
||||
|
||||
primary_color = None
|
||||
secondary_color = None
|
||||
accent_color = None
|
||||
|
||||
for key in primary_keys:
|
||||
if key in colors and isinstance(colors[key], dict):
|
||||
primary_color = colors[key].get('500', {}).get('$value')
|
||||
if primary_color:
|
||||
break
|
||||
|
||||
for key in secondary_keys:
|
||||
if key in colors and isinstance(colors[key], dict):
|
||||
secondary_color = colors[key].get('500', {}).get('$value')
|
||||
if secondary_color:
|
||||
break
|
||||
|
||||
for key in accent_keys:
|
||||
if key in colors and isinstance(colors[key], dict):
|
||||
accent_color = colors[key].get('500', {}).get('$value')
|
||||
if accent_color:
|
||||
break
|
||||
|
||||
background = colors.get('dark', {}).get('800', {}).get('$value', '#0D0D0D')
|
||||
|
||||
return {
|
||||
'primary': primary_color or '#3B82F6',
|
||||
'secondary': secondary_color or '#F59E0B',
|
||||
'accent': accent_color or '#10B981',
|
||||
'background': background,
|
||||
}
|
||||
except (FileNotFoundError, KeyError, TypeError):
|
||||
# Fallback defaults
|
||||
return {
|
||||
'primary': '#3B82F6',
|
||||
'secondary': '#F59E0B',
|
||||
'accent': '#10B981',
|
||||
'background': '#0D0D0D',
|
||||
}
|
||||
|
||||
|
||||
def load_backgrounds_config():
|
||||
"""Load background configuration from CSV."""
|
||||
config = {}
|
||||
try:
|
||||
with open(BACKGROUNDS_CSV, newline='') as f:
|
||||
reader = csv.DictReader(f)
|
||||
for row in reader:
|
||||
config[row['slide_type']] = row
|
||||
except FileNotFoundError:
|
||||
print(f"Warning: {BACKGROUNDS_CSV} not found")
|
||||
return config
|
||||
|
||||
|
||||
def get_overlay_css(style: str, brand_colors: dict) -> str:
|
||||
"""Generate overlay CSS using brand colors from design-tokens.json."""
|
||||
overlays = {
|
||||
'gradient-dark': f"linear-gradient(135deg, {brand_colors['background']}E6, {brand_colors['background']}B3)",
|
||||
'gradient-brand': f"linear-gradient(135deg, {brand_colors['primary']}CC, {brand_colors['secondary']}99)",
|
||||
'gradient-accent': f"linear-gradient(135deg, {brand_colors['accent']}99, transparent)",
|
||||
'blur-dark': f"rgba(13,13,13,0.8)",
|
||||
'desaturate-dark': f"rgba(13,13,13,0.7)",
|
||||
}
|
||||
return overlays.get(style, overlays['gradient-dark'])
|
||||
|
||||
|
||||
# Curated high-quality images from Pexels (free to use, pre-selected for brand aesthetic)
|
||||
CURATED_IMAGES = {
|
||||
'hero': [
|
||||
'https://images.pexels.com/photos/3861969/pexels-photo-3861969.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/2582937/pexels-photo-2582937.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/1089438/pexels-photo-1089438.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
],
|
||||
'vision': [
|
||||
'https://images.pexels.com/photos/3183150/pexels-photo-3183150.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/3182812/pexels-photo-3182812.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/3184291/pexels-photo-3184291.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
],
|
||||
'team': [
|
||||
'https://images.pexels.com/photos/3184418/pexels-photo-3184418.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/3184338/pexels-photo-3184338.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/3182773/pexels-photo-3182773.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
],
|
||||
'testimonial': [
|
||||
'https://images.pexels.com/photos/3184465/pexels-photo-3184465.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/1181622/pexels-photo-1181622.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
],
|
||||
'cta': [
|
||||
'https://images.pexels.com/photos/3184339/pexels-photo-3184339.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/3184298/pexels-photo-3184298.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
],
|
||||
'problem': [
|
||||
'https://images.pexels.com/photos/3760529/pexels-photo-3760529.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/897817/pexels-photo-897817.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
],
|
||||
'solution': [
|
||||
'https://images.pexels.com/photos/3184292/pexels-photo-3184292.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/3184644/pexels-photo-3184644.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
],
|
||||
'hook': [
|
||||
'https://images.pexels.com/photos/2582937/pexels-photo-2582937.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/1089438/pexels-photo-1089438.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
],
|
||||
'social': [
|
||||
'https://images.pexels.com/photos/3184360/pexels-photo-3184360.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/3184287/pexels-photo-3184287.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
],
|
||||
'demo': [
|
||||
'https://images.pexels.com/photos/1181675/pexels-photo-1181675.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
'https://images.pexels.com/photos/3861958/pexels-photo-3861958.jpeg?auto=compress&cs=tinysrgb&w=1920',
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
def get_curated_images(slide_type: str) -> list:
|
||||
"""Get curated images for slide type."""
|
||||
return CURATED_IMAGES.get(slide_type, CURATED_IMAGES.get('hero', []))
|
||||
|
||||
|
||||
def get_pexels_search_url(keywords: str) -> str:
|
||||
"""Generate Pexels search URL for manual lookup."""
|
||||
import urllib.parse
|
||||
return f"https://www.pexels.com/search/{urllib.parse.quote(keywords)}/"
|
||||
|
||||
|
||||
def get_background_image(slide_type: str) -> dict:
|
||||
"""
|
||||
Get curated image matching slide type and brand aesthetic.
|
||||
Uses pre-selected Pexels images (no API/scraping needed).
|
||||
"""
|
||||
brand_colors = load_brand_colors()
|
||||
config = load_backgrounds_config()
|
||||
|
||||
slide_config = config.get(slide_type)
|
||||
overlay_style = 'gradient-dark'
|
||||
keywords = slide_type
|
||||
|
||||
if slide_config:
|
||||
keywords = slide_config.get('search_keywords', slide_config.get('image_category', slide_type))
|
||||
overlay_style = slide_config.get('overlay_style', 'gradient-dark')
|
||||
|
||||
# Get curated images
|
||||
urls = get_curated_images(slide_type)
|
||||
if urls:
|
||||
return {
|
||||
'url': urls[0],
|
||||
'all_urls': urls,
|
||||
'overlay': get_overlay_css(overlay_style, brand_colors),
|
||||
'attribution': 'Photo from Pexels (free to use)',
|
||||
'source': 'pexels-curated',
|
||||
'search_url': get_pexels_search_url(keywords),
|
||||
}
|
||||
|
||||
# Fallback: provide search URL for manual selection
|
||||
return {
|
||||
'url': None,
|
||||
'overlay': get_overlay_css(overlay_style, brand_colors),
|
||||
'keywords': keywords,
|
||||
'search_url': get_pexels_search_url(keywords),
|
||||
'available_types': list(CURATED_IMAGES.keys()),
|
||||
}
|
||||
|
||||
|
||||
def generate_css_for_background(result: dict, slide_class: str = '.slide-with-bg') -> str:
|
||||
"""Generate CSS for a background slide."""
|
||||
if not result.get('url'):
|
||||
search_url = result.get('search_url', '')
|
||||
return f"""/* No image scraped. Search manually: {search_url} */
|
||||
/* Overlay ready: {result.get('overlay', 'gradient-dark')} */
|
||||
"""
|
||||
|
||||
return f"""{slide_class} {{
|
||||
background-image: url('{result['url']}');
|
||||
background-size: cover;
|
||||
background-position: center;
|
||||
position: relative;
|
||||
}}
|
||||
|
||||
{slide_class}::before {{
|
||||
content: '';
|
||||
position: absolute;
|
||||
inset: 0;
|
||||
background: {result['overlay']};
|
||||
}}
|
||||
|
||||
{slide_class} .content {{
|
||||
position: relative;
|
||||
z-index: 1;
|
||||
}}
|
||||
|
||||
/* {result.get('attribution', 'Pexels')} - {result.get('search_url', '')} */
|
||||
"""
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI entry point."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description='Get background images for slides')
|
||||
parser.add_argument('slide_type', nargs='?', help='Slide type (hero, vision, team, etc.)')
|
||||
parser.add_argument('--list', action='store_true', help='List available slide types')
|
||||
parser.add_argument('--css', action='store_true', help='Output CSS for the background')
|
||||
parser.add_argument('--json', action='store_true', help='Output JSON')
|
||||
parser.add_argument('--colors', action='store_true', help='Show brand colors')
|
||||
parser.add_argument('--all', action='store_true', help='Show all curated URLs')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.colors:
|
||||
colors = load_brand_colors()
|
||||
print("\nBrand Colors (from design-tokens.json):")
|
||||
for name, value in colors.items():
|
||||
print(f" {name}: {value}")
|
||||
return
|
||||
|
||||
if args.list:
|
||||
print("\nAvailable slide types (curated images):")
|
||||
for slide_type, urls in CURATED_IMAGES.items():
|
||||
print(f" {slide_type}: {len(urls)} images")
|
||||
return
|
||||
|
||||
if not args.slide_type:
|
||||
parser.print_help()
|
||||
return
|
||||
|
||||
result = get_background_image(args.slide_type)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(result, indent=2))
|
||||
elif args.css:
|
||||
print(generate_css_for_background(result))
|
||||
elif args.all:
|
||||
print(f"\nAll images for '{args.slide_type}':")
|
||||
for i, url in enumerate(result.get('all_urls', []), 1):
|
||||
print(f" {i}. {url}")
|
||||
else:
|
||||
print(f"\nImage URL: {result['url']}")
|
||||
print(f"Alternatives: {len(result.get('all_urls', []))} available (use --all)")
|
||||
print(f"Overlay: {result['overlay']}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
753
skills/design-system/scripts/generate-slide.py
Normal file
753
skills/design-system/scripts/generate-slide.py
Normal file
@@ -0,0 +1,753 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Slide Generator - Generates HTML slides using design tokens
|
||||
ALL styles MUST use CSS variables from design-tokens.css
|
||||
NO hardcoded colors, fonts, or spacing allowed
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
|
||||
# Paths
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
DATA_DIR = SCRIPT_DIR.parent / "data"
|
||||
TOKENS_CSS = Path(__file__).resolve().parents[4] / "assets" / "design-tokens.css"
|
||||
TOKENS_JSON = Path(__file__).resolve().parents[4] / "assets" / "design-tokens.json"
|
||||
OUTPUT_DIR = Path(__file__).resolve().parents[4] / "assets" / "designs" / "slides"
|
||||
|
||||
# ============ BRAND-COMPLIANT SLIDE TEMPLATE ============
|
||||
# ALL values reference CSS variables from design-tokens.css
|
||||
|
||||
SLIDE_TEMPLATE = '''<!DOCTYPE html>
|
||||
<html lang="en" data-theme="dark">
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
||||
<title>{title}</title>
|
||||
|
||||
<!-- Brand Fonts -->
|
||||
<link rel="preconnect" href="https://fonts.googleapis.com">
|
||||
<link href="https://fonts.googleapis.com/css2?family=Space+Grotesk:wght@500;600;700&family=Inter:wght@400;500;600&family=JetBrains+Mono:wght@400&display=swap" rel="stylesheet">
|
||||
|
||||
<!-- Design Tokens - SINGLE SOURCE OF TRUTH -->
|
||||
<link rel="stylesheet" href="{tokens_css_path}">
|
||||
|
||||
<style>
|
||||
/* ============================================
|
||||
STRICT TOKEN USAGE - NO HARDCODED VALUES
|
||||
All styles MUST use var(--token-name)
|
||||
============================================ */
|
||||
|
||||
* {{
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
box-sizing: border-box;
|
||||
}}
|
||||
|
||||
html, body {{
|
||||
width: 100%;
|
||||
height: 100%;
|
||||
}}
|
||||
|
||||
body {{
|
||||
font-family: var(--typography-font-body);
|
||||
background: var(--color-background);
|
||||
color: var(--color-foreground);
|
||||
line-height: var(--primitive-lineHeight-relaxed);
|
||||
}}
|
||||
|
||||
/* Slide Container - 16:9 aspect ratio */
|
||||
.slide-deck {{
|
||||
width: 100%;
|
||||
max-width: 1920px;
|
||||
margin: 0 auto;
|
||||
}}
|
||||
|
||||
.slide {{
|
||||
width: 100%;
|
||||
aspect-ratio: 16 / 9;
|
||||
padding: var(--slide-padding);
|
||||
background: var(--slide-bg);
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
position: relative;
|
||||
overflow: hidden;
|
||||
}}
|
||||
|
||||
.slide + .slide {{
|
||||
margin-top: var(--primitive-spacing-8);
|
||||
}}
|
||||
|
||||
/* Background Variants */
|
||||
.slide--surface {{
|
||||
background: var(--slide-bg-surface);
|
||||
}}
|
||||
|
||||
.slide--gradient {{
|
||||
background: var(--slide-bg-gradient);
|
||||
}}
|
||||
|
||||
.slide--glow::before {{
|
||||
content: '';
|
||||
position: absolute;
|
||||
top: 50%;
|
||||
left: 50%;
|
||||
transform: translate(-50%, -50%);
|
||||
width: 150%;
|
||||
height: 150%;
|
||||
background: var(--primitive-gradient-glow);
|
||||
pointer-events: none;
|
||||
}}
|
||||
|
||||
/* Typography - MUST use token fonts and sizes */
|
||||
h1, h2, h3, h4, h5, h6 {{
|
||||
font-family: var(--typography-font-heading);
|
||||
font-weight: var(--primitive-fontWeight-bold);
|
||||
line-height: var(--primitive-lineHeight-tight);
|
||||
}}
|
||||
|
||||
.slide-title {{
|
||||
font-size: var(--slide-title-size);
|
||||
background: var(--primitive-gradient-primary);
|
||||
-webkit-background-clip: text;
|
||||
-webkit-text-fill-color: transparent;
|
||||
background-clip: text;
|
||||
}}
|
||||
|
||||
.slide-heading {{
|
||||
font-size: var(--slide-heading-size);
|
||||
color: var(--color-foreground);
|
||||
}}
|
||||
|
||||
.slide-subheading {{
|
||||
font-size: var(--primitive-fontSize-3xl);
|
||||
color: var(--color-foreground-secondary);
|
||||
font-weight: var(--primitive-fontWeight-medium);
|
||||
}}
|
||||
|
||||
.slide-body {{
|
||||
font-size: var(--slide-body-size);
|
||||
color: var(--color-foreground-secondary);
|
||||
max-width: 80ch;
|
||||
}}
|
||||
|
||||
/* Brand Colors - Primary/Secondary/Accent */
|
||||
.text-primary {{ color: var(--color-primary); }}
|
||||
.text-secondary {{ color: var(--color-secondary); }}
|
||||
.text-accent {{ color: var(--color-accent); }}
|
||||
.text-muted {{ color: var(--color-foreground-muted); }}
|
||||
|
||||
.bg-primary {{ background: var(--color-primary); }}
|
||||
.bg-secondary {{ background: var(--color-secondary); }}
|
||||
.bg-accent {{ background: var(--color-accent); }}
|
||||
.bg-surface {{ background: var(--color-surface); }}
|
||||
|
||||
/* Cards - Using component tokens */
|
||||
.card {{
|
||||
background: var(--card-bg);
|
||||
border: 1px solid var(--card-border);
|
||||
border-radius: var(--card-radius);
|
||||
padding: var(--card-padding);
|
||||
box-shadow: var(--card-shadow);
|
||||
transition: border-color var(--primitive-duration-base) var(--primitive-easing-out);
|
||||
}}
|
||||
|
||||
.card:hover {{
|
||||
border-color: var(--card-border-hover);
|
||||
}}
|
||||
|
||||
/* Buttons - Using component tokens */
|
||||
.btn {{
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
padding: var(--button-primary-padding-y) var(--button-primary-padding-x);
|
||||
border-radius: var(--button-primary-radius);
|
||||
font-size: var(--button-primary-font-size);
|
||||
font-weight: var(--button-primary-font-weight);
|
||||
font-family: var(--typography-font-body);
|
||||
text-decoration: none;
|
||||
cursor: pointer;
|
||||
border: none;
|
||||
transition: all var(--primitive-duration-base) var(--primitive-easing-out);
|
||||
}}
|
||||
|
||||
.btn-primary {{
|
||||
background: var(--button-primary-bg);
|
||||
color: var(--button-primary-fg);
|
||||
box-shadow: var(--button-primary-shadow);
|
||||
}}
|
||||
|
||||
.btn-primary:hover {{
|
||||
background: var(--button-primary-bg-hover);
|
||||
}}
|
||||
|
||||
.btn-secondary {{
|
||||
background: transparent;
|
||||
color: var(--color-primary);
|
||||
border: 2px solid var(--color-primary);
|
||||
}}
|
||||
|
||||
/* Layout Utilities */
|
||||
.flex {{ display: flex; }}
|
||||
.flex-col {{ flex-direction: column; }}
|
||||
.items-center {{ align-items: center; }}
|
||||
.justify-center {{ justify-content: center; }}
|
||||
.justify-between {{ justify-content: space-between; }}
|
||||
.gap-4 {{ gap: var(--primitive-spacing-4); }}
|
||||
.gap-6 {{ gap: var(--primitive-spacing-6); }}
|
||||
.gap-8 {{ gap: var(--primitive-spacing-8); }}
|
||||
|
||||
.grid {{ display: grid; }}
|
||||
.grid-2 {{ grid-template-columns: repeat(2, 1fr); }}
|
||||
.grid-3 {{ grid-template-columns: repeat(3, 1fr); }}
|
||||
.grid-4 {{ grid-template-columns: repeat(4, 1fr); }}
|
||||
|
||||
.text-center {{ text-align: center; }}
|
||||
.mt-auto {{ margin-top: auto; }}
|
||||
.mb-4 {{ margin-bottom: var(--primitive-spacing-4); }}
|
||||
.mb-6 {{ margin-bottom: var(--primitive-spacing-6); }}
|
||||
.mb-8 {{ margin-bottom: var(--primitive-spacing-8); }}
|
||||
|
||||
/* Metric Cards */
|
||||
.metric {{
|
||||
text-align: center;
|
||||
padding: var(--primitive-spacing-6);
|
||||
}}
|
||||
|
||||
.metric-value {{
|
||||
font-family: var(--typography-font-heading);
|
||||
font-size: var(--primitive-fontSize-6xl);
|
||||
font-weight: var(--primitive-fontWeight-bold);
|
||||
background: var(--primitive-gradient-primary);
|
||||
-webkit-background-clip: text;
|
||||
-webkit-text-fill-color: transparent;
|
||||
background-clip: text;
|
||||
}}
|
||||
|
||||
.metric-label {{
|
||||
font-size: var(--primitive-fontSize-lg);
|
||||
color: var(--color-foreground-secondary);
|
||||
margin-top: var(--primitive-spacing-2);
|
||||
}}
|
||||
|
||||
/* Feature List */
|
||||
.feature-item {{
|
||||
display: flex;
|
||||
align-items: flex-start;
|
||||
gap: var(--primitive-spacing-4);
|
||||
padding: var(--primitive-spacing-4) 0;
|
||||
}}
|
||||
|
||||
.feature-icon {{
|
||||
width: 48px;
|
||||
height: 48px;
|
||||
border-radius: var(--primitive-radius-lg);
|
||||
background: var(--color-surface-elevated);
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
color: var(--color-primary);
|
||||
font-size: var(--primitive-fontSize-xl);
|
||||
flex-shrink: 0;
|
||||
}}
|
||||
|
||||
.feature-content h4 {{
|
||||
font-size: var(--primitive-fontSize-xl);
|
||||
color: var(--color-foreground);
|
||||
margin-bottom: var(--primitive-spacing-2);
|
||||
}}
|
||||
|
||||
.feature-content p {{
|
||||
color: var(--color-foreground-secondary);
|
||||
font-size: var(--primitive-fontSize-base);
|
||||
}}
|
||||
|
||||
/* Testimonial */
|
||||
.testimonial {{
|
||||
background: var(--color-surface);
|
||||
border-radius: var(--primitive-radius-xl);
|
||||
padding: var(--primitive-spacing-8);
|
||||
border-left: 4px solid var(--color-primary);
|
||||
}}
|
||||
|
||||
.testimonial-quote {{
|
||||
font-size: var(--primitive-fontSize-2xl);
|
||||
color: var(--color-foreground);
|
||||
font-style: italic;
|
||||
margin-bottom: var(--primitive-spacing-6);
|
||||
}}
|
||||
|
||||
.testimonial-author {{
|
||||
font-size: var(--primitive-fontSize-lg);
|
||||
color: var(--color-primary);
|
||||
font-weight: var(--primitive-fontWeight-semibold);
|
||||
}}
|
||||
|
||||
.testimonial-role {{
|
||||
font-size: var(--primitive-fontSize-base);
|
||||
color: var(--color-foreground-muted);
|
||||
}}
|
||||
|
||||
/* Badge/Tag */
|
||||
.badge {{
|
||||
display: inline-block;
|
||||
padding: var(--primitive-spacing-2) var(--primitive-spacing-4);
|
||||
background: var(--color-surface-elevated);
|
||||
border-radius: var(--primitive-radius-full);
|
||||
font-size: var(--primitive-fontSize-sm);
|
||||
color: var(--color-accent);
|
||||
font-weight: var(--primitive-fontWeight-medium);
|
||||
}}
|
||||
|
||||
/* Chart Container */
|
||||
.chart-container {{
|
||||
background: var(--color-surface);
|
||||
border-radius: var(--primitive-radius-xl);
|
||||
padding: var(--primitive-spacing-6);
|
||||
height: 100%;
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
}}
|
||||
|
||||
.chart-title {{
|
||||
font-family: var(--typography-font-heading);
|
||||
font-size: var(--primitive-fontSize-xl);
|
||||
color: var(--color-foreground);
|
||||
margin-bottom: var(--primitive-spacing-4);
|
||||
}}
|
||||
|
||||
/* CSS-only Bar Chart */
|
||||
.bar-chart {{
|
||||
display: flex;
|
||||
align-items: flex-end;
|
||||
gap: var(--primitive-spacing-4);
|
||||
height: 200px;
|
||||
padding-top: var(--primitive-spacing-4);
|
||||
}}
|
||||
|
||||
.bar {{
|
||||
flex: 1;
|
||||
background: var(--primitive-gradient-primary);
|
||||
border-radius: var(--primitive-radius-md) var(--primitive-radius-md) 0 0;
|
||||
position: relative;
|
||||
min-width: 40px;
|
||||
}}
|
||||
|
||||
.bar-label {{
|
||||
position: absolute;
|
||||
bottom: -30px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
font-size: var(--primitive-fontSize-sm);
|
||||
color: var(--color-foreground-muted);
|
||||
white-space: nowrap;
|
||||
}}
|
||||
|
||||
.bar-value {{
|
||||
position: absolute;
|
||||
top: -25px;
|
||||
left: 50%;
|
||||
transform: translateX(-50%);
|
||||
font-size: var(--primitive-fontSize-sm);
|
||||
color: var(--color-foreground);
|
||||
font-weight: var(--primitive-fontWeight-semibold);
|
||||
}}
|
||||
|
||||
/* Progress Bar */
|
||||
.progress {{
|
||||
height: 12px;
|
||||
background: var(--color-surface-elevated);
|
||||
border-radius: var(--primitive-radius-full);
|
||||
overflow: hidden;
|
||||
}}
|
||||
|
||||
.progress-fill {{
|
||||
height: 100%;
|
||||
background: var(--primitive-gradient-primary);
|
||||
border-radius: var(--primitive-radius-full);
|
||||
}}
|
||||
|
||||
/* Footer */
|
||||
.slide-footer {{
|
||||
margin-top: auto;
|
||||
display: flex;
|
||||
justify-content: space-between;
|
||||
align-items: center;
|
||||
padding-top: var(--primitive-spacing-6);
|
||||
border-top: 1px solid var(--color-border);
|
||||
color: var(--color-foreground-muted);
|
||||
font-size: var(--primitive-fontSize-sm);
|
||||
}}
|
||||
|
||||
/* Glow Effects */
|
||||
.glow-coral {{
|
||||
box-shadow: var(--primitive-shadow-glow-coral);
|
||||
}}
|
||||
|
||||
.glow-purple {{
|
||||
box-shadow: var(--primitive-shadow-glow-purple);
|
||||
}}
|
||||
|
||||
.glow-mint {{
|
||||
box-shadow: var(--primitive-shadow-glow-mint);
|
||||
}}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="slide-deck">
|
||||
{slides_content}
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
'''
|
||||
|
||||
|
||||
# ============ SLIDE GENERATORS ============
|
||||
|
||||
def generate_title_slide(data):
|
||||
"""Title slide with gradient headline"""
|
||||
return f'''
|
||||
<section class="slide slide--glow flex flex-col items-center justify-center text-center">
|
||||
<div class="badge mb-6">{data.get('badge', 'Pitch Deck')}</div>
|
||||
<h1 class="slide-title mb-6">{data.get('title', 'Your Title Here')}</h1>
|
||||
<p class="slide-subheading mb-8">{data.get('subtitle', 'Your compelling subtitle')}</p>
|
||||
<div class="flex gap-4">
|
||||
<a href="#" class="btn btn-primary">{data.get('cta', 'Get Started')}</a>
|
||||
<a href="#" class="btn btn-secondary">{data.get('secondary_cta', 'Learn More')}</a>
|
||||
</div>
|
||||
<div class="slide-footer">
|
||||
<span>{data.get('company', 'Company Name')}</span>
|
||||
<span>{data.get('date', datetime.now().strftime('%B %Y'))}</span>
|
||||
</div>
|
||||
</section>
|
||||
'''
|
||||
|
||||
|
||||
def generate_problem_slide(data):
|
||||
"""Problem statement slide using PAS formula"""
|
||||
return f'''
|
||||
<section class="slide slide--surface">
|
||||
<div class="badge mb-6">The Problem</div>
|
||||
<h2 class="slide-heading mb-8">{data.get('headline', 'The problem your audience faces')}</h2>
|
||||
<div class="grid grid-3 gap-8">
|
||||
<div class="card">
|
||||
<div class="text-primary" style="font-size: var(--primitive-fontSize-4xl); margin-bottom: var(--primitive-spacing-4);">01</div>
|
||||
<h4 style="margin-bottom: var(--primitive-spacing-2); font-size: var(--primitive-fontSize-xl);">{data.get('pain_1_title', 'Pain Point 1')}</h4>
|
||||
<p class="text-muted">{data.get('pain_1_desc', 'Description of the first pain point')}</p>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="text-secondary" style="font-size: var(--primitive-fontSize-4xl); margin-bottom: var(--primitive-spacing-4);">02</div>
|
||||
<h4 style="margin-bottom: var(--primitive-spacing-2); font-size: var(--primitive-fontSize-xl);">{data.get('pain_2_title', 'Pain Point 2')}</h4>
|
||||
<p class="text-muted">{data.get('pain_2_desc', 'Description of the second pain point')}</p>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="text-accent" style="font-size: var(--primitive-fontSize-4xl); margin-bottom: var(--primitive-spacing-4);">03</div>
|
||||
<h4 style="margin-bottom: var(--primitive-spacing-2); font-size: var(--primitive-fontSize-xl);">{data.get('pain_3_title', 'Pain Point 3')}</h4>
|
||||
<p class="text-muted">{data.get('pain_3_desc', 'Description of the third pain point')}</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="slide-footer">
|
||||
<span>{data.get('company', 'Company Name')}</span>
|
||||
<span>{data.get('page', '2')}</span>
|
||||
</div>
|
||||
</section>
|
||||
'''
|
||||
|
||||
|
||||
def generate_solution_slide(data):
|
||||
"""Solution slide with feature highlights"""
|
||||
return f'''
|
||||
<section class="slide">
|
||||
<div class="badge mb-6">The Solution</div>
|
||||
<h2 class="slide-heading mb-8">{data.get('headline', 'How we solve this')}</h2>
|
||||
<div class="flex gap-8" style="flex: 1;">
|
||||
<div style="flex: 1;">
|
||||
<div class="feature-item">
|
||||
<div class="feature-icon">✓</div>
|
||||
<div class="feature-content">
|
||||
<h4>{data.get('feature_1_title', 'Feature 1')}</h4>
|
||||
<p>{data.get('feature_1_desc', 'Description of feature 1')}</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="feature-item">
|
||||
<div class="feature-icon">✓</div>
|
||||
<div class="feature-content">
|
||||
<h4>{data.get('feature_2_title', 'Feature 2')}</h4>
|
||||
<p>{data.get('feature_2_desc', 'Description of feature 2')}</p>
|
||||
</div>
|
||||
</div>
|
||||
<div class="feature-item">
|
||||
<div class="feature-icon">✓</div>
|
||||
<div class="feature-content">
|
||||
<h4>{data.get('feature_3_title', 'Feature 3')}</h4>
|
||||
<p>{data.get('feature_3_desc', 'Description of feature 3')}</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div style="flex: 1;" class="card flex items-center justify-center">
|
||||
<div class="text-center">
|
||||
<div class="text-accent" style="font-size: 80px; margin-bottom: var(--primitive-spacing-4);">◆</div>
|
||||
<p class="text-muted">Product screenshot or demo</p>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="slide-footer">
|
||||
<span>{data.get('company', 'Company Name')}</span>
|
||||
<span>{data.get('page', '3')}</span>
|
||||
</div>
|
||||
</section>
|
||||
'''
|
||||
|
||||
|
||||
def generate_metrics_slide(data):
|
||||
"""Traction/metrics slide with large numbers"""
|
||||
metrics = data.get('metrics', [
|
||||
{'value': '10K+', 'label': 'Active Users'},
|
||||
{'value': '95%', 'label': 'Retention Rate'},
|
||||
{'value': '3x', 'label': 'Revenue Growth'},
|
||||
{'value': '$2M', 'label': 'ARR'}
|
||||
])
|
||||
|
||||
metrics_html = ''.join([f'''
|
||||
<div class="card metric">
|
||||
<div class="metric-value">{m['value']}</div>
|
||||
<div class="metric-label">{m['label']}</div>
|
||||
</div>
|
||||
''' for m in metrics[:4]])
|
||||
|
||||
return f'''
|
||||
<section class="slide slide--surface slide--glow">
|
||||
<div class="badge mb-6">Traction</div>
|
||||
<h2 class="slide-heading mb-8 text-center">{data.get('headline', 'Our Growth')}</h2>
|
||||
<div class="grid grid-4 gap-6" style="flex: 1; align-items: center;">
|
||||
{metrics_html}
|
||||
</div>
|
||||
<div class="slide-footer">
|
||||
<span>{data.get('company', 'Company Name')}</span>
|
||||
<span>{data.get('page', '4')}</span>
|
||||
</div>
|
||||
</section>
|
||||
'''
|
||||
|
||||
|
||||
def generate_chart_slide(data):
|
||||
"""Chart slide with CSS bar chart"""
|
||||
bars = data.get('bars', [
|
||||
{'label': 'Q1', 'value': 40},
|
||||
{'label': 'Q2', 'value': 60},
|
||||
{'label': 'Q3', 'value': 80},
|
||||
{'label': 'Q4', 'value': 100}
|
||||
])
|
||||
|
||||
bars_html = ''.join([f'''
|
||||
<div class="bar" style="height: {b['value']}%;">
|
||||
<span class="bar-value">{b.get('display', str(b['value']) + '%')}</span>
|
||||
<span class="bar-label">{b['label']}</span>
|
||||
</div>
|
||||
''' for b in bars])
|
||||
|
||||
return f'''
|
||||
<section class="slide">
|
||||
<div class="badge mb-6">{data.get('badge', 'Growth')}</div>
|
||||
<h2 class="slide-heading mb-8">{data.get('headline', 'Revenue Growth')}</h2>
|
||||
<div class="chart-container" style="flex: 1;">
|
||||
<div class="chart-title">{data.get('chart_title', 'Quarterly Revenue')}</div>
|
||||
<div class="bar-chart" style="flex: 1; padding-bottom: 40px;">
|
||||
{bars_html}
|
||||
</div>
|
||||
</div>
|
||||
<div class="slide-footer">
|
||||
<span>{data.get('company', 'Company Name')}</span>
|
||||
<span>{data.get('page', '5')}</span>
|
||||
</div>
|
||||
</section>
|
||||
'''
|
||||
|
||||
|
||||
def generate_testimonial_slide(data):
|
||||
"""Social proof slide"""
|
||||
return f'''
|
||||
<section class="slide slide--surface flex flex-col justify-center">
|
||||
<div class="badge mb-6">What They Say</div>
|
||||
<div class="testimonial" style="max-width: 900px;">
|
||||
<p class="testimonial-quote">"{data.get('quote', 'This product changed how we work. Incredible results.')}"</p>
|
||||
<p class="testimonial-author">{data.get('author', 'Jane Doe')}</p>
|
||||
<p class="testimonial-role">{data.get('role', 'CEO, Example Company')}</p>
|
||||
</div>
|
||||
<div class="slide-footer">
|
||||
<span>{data.get('company', 'Company Name')}</span>
|
||||
<span>{data.get('page', '6')}</span>
|
||||
</div>
|
||||
</section>
|
||||
'''
|
||||
|
||||
|
||||
def generate_cta_slide(data):
|
||||
"""Closing CTA slide"""
|
||||
return f'''
|
||||
<section class="slide slide--gradient flex flex-col items-center justify-center text-center">
|
||||
<h2 class="slide-heading mb-6" style="color: var(--color-foreground);">{data.get('headline', 'Ready to get started?')}</h2>
|
||||
<p class="slide-body mb-8" style="color: rgba(255,255,255,0.8);">{data.get('subheadline', 'Join thousands of teams already using our solution.')}</p>
|
||||
<div class="flex gap-4">
|
||||
<a href="{data.get('cta_url', '#')}" class="btn" style="background: var(--color-foreground); color: var(--color-primary);">{data.get('cta', 'Start Free Trial')}</a>
|
||||
</div>
|
||||
<div class="slide-footer" style="border-color: rgba(255,255,255,0.2); color: rgba(255,255,255,0.6);">
|
||||
<span>{data.get('contact', 'contact@example.com')}</span>
|
||||
<span>{data.get('website', 'www.example.com')}</span>
|
||||
</div>
|
||||
</section>
|
||||
'''
|
||||
|
||||
|
||||
# Slide type mapping
|
||||
SLIDE_GENERATORS = {
|
||||
'title': generate_title_slide,
|
||||
'problem': generate_problem_slide,
|
||||
'solution': generate_solution_slide,
|
||||
'metrics': generate_metrics_slide,
|
||||
'traction': generate_metrics_slide,
|
||||
'chart': generate_chart_slide,
|
||||
'testimonial': generate_testimonial_slide,
|
||||
'cta': generate_cta_slide,
|
||||
'closing': generate_cta_slide
|
||||
}
|
||||
|
||||
|
||||
def generate_deck(slides_data, title="Pitch Deck"):
|
||||
"""Generate complete deck from slide data list"""
|
||||
slides_html = ""
|
||||
for slide in slides_data:
|
||||
slide_type = slide.get('type', 'title')
|
||||
generator = SLIDE_GENERATORS.get(slide_type)
|
||||
if generator:
|
||||
slides_html += generator(slide)
|
||||
else:
|
||||
print(f"Warning: Unknown slide type '{slide_type}'")
|
||||
|
||||
# Calculate relative path to tokens CSS
|
||||
tokens_rel_path = "../../../assets/design-tokens.css"
|
||||
|
||||
return SLIDE_TEMPLATE.format(
|
||||
title=title,
|
||||
tokens_css_path=tokens_rel_path,
|
||||
slides_content=slides_html
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description="Generate brand-compliant slides")
|
||||
parser.add_argument("--json", "-j", help="JSON file with slide data")
|
||||
parser.add_argument("--output", "-o", help="Output HTML file path")
|
||||
parser.add_argument("--demo", action="store_true", help="Generate demo deck")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.demo:
|
||||
# Demo deck showcasing all slide types
|
||||
demo_slides = [
|
||||
{
|
||||
'type': 'title',
|
||||
'badge': 'Investor Deck 2024',
|
||||
'title': 'ClaudeKit Marketing',
|
||||
'subtitle': 'Your AI marketing team. Always on.',
|
||||
'cta': 'Join Waitlist',
|
||||
'secondary_cta': 'See Demo',
|
||||
'company': 'ClaudeKit',
|
||||
'date': 'December 2024'
|
||||
},
|
||||
{
|
||||
'type': 'problem',
|
||||
'headline': 'Marketing teams are drowning',
|
||||
'pain_1_title': 'Content Overload',
|
||||
'pain_1_desc': 'Need to produce 10x content with same headcount',
|
||||
'pain_2_title': 'Tool Fatigue',
|
||||
'pain_2_desc': '15+ tools that don\'t talk to each other',
|
||||
'pain_3_title': 'No Time to Think',
|
||||
'pain_3_desc': 'Strategy suffers when execution consumes all hours',
|
||||
'company': 'ClaudeKit',
|
||||
'page': '2'
|
||||
},
|
||||
{
|
||||
'type': 'solution',
|
||||
'headline': 'AI agents that actually get marketing',
|
||||
'feature_1_title': 'Content Creation',
|
||||
'feature_1_desc': 'Blog posts, social, email - all on brand, all on time',
|
||||
'feature_2_title': 'Campaign Management',
|
||||
'feature_2_desc': 'Multi-channel orchestration with one command',
|
||||
'feature_3_title': 'Analytics & Insights',
|
||||
'feature_3_desc': 'Real-time optimization without the spreadsheets',
|
||||
'company': 'ClaudeKit',
|
||||
'page': '3'
|
||||
},
|
||||
{
|
||||
'type': 'metrics',
|
||||
'headline': 'Early traction speaks volumes',
|
||||
'metrics': [
|
||||
{'value': '500+', 'label': 'Beta Users'},
|
||||
{'value': '85%', 'label': 'Weekly Active'},
|
||||
{'value': '4.9', 'label': 'NPS Score'},
|
||||
{'value': '50hrs', 'label': 'Saved/Week'}
|
||||
],
|
||||
'company': 'ClaudeKit',
|
||||
'page': '4'
|
||||
},
|
||||
{
|
||||
'type': 'chart',
|
||||
'badge': 'Revenue',
|
||||
'headline': 'Growing month over month',
|
||||
'chart_title': 'MRR Growth ($K)',
|
||||
'bars': [
|
||||
{'label': 'Sep', 'value': 20, 'display': '$5K'},
|
||||
{'label': 'Oct', 'value': 40, 'display': '$12K'},
|
||||
{'label': 'Nov', 'value': 70, 'display': '$28K'},
|
||||
{'label': 'Dec', 'value': 100, 'display': '$45K'}
|
||||
],
|
||||
'company': 'ClaudeKit',
|
||||
'page': '5'
|
||||
},
|
||||
{
|
||||
'type': 'testimonial',
|
||||
'quote': 'ClaudeKit replaced 3 tools and 2 contractors. Our content output tripled while costs dropped 60%.',
|
||||
'author': 'Sarah Chen',
|
||||
'role': 'Head of Marketing, TechStartup',
|
||||
'company': 'ClaudeKit',
|
||||
'page': '6'
|
||||
},
|
||||
{
|
||||
'type': 'cta',
|
||||
'headline': 'Ship campaigns while you sleep',
|
||||
'subheadline': 'Early access available. Limited spots.',
|
||||
'cta': 'Join the Waitlist',
|
||||
'contact': 'hello@claudekit.ai',
|
||||
'website': 'claudekit.ai'
|
||||
}
|
||||
]
|
||||
|
||||
html = generate_deck(demo_slides, "ClaudeKit Marketing - Pitch Deck")
|
||||
|
||||
OUTPUT_DIR.mkdir(parents=True, exist_ok=True)
|
||||
output_path = OUTPUT_DIR / f"demo-pitch-{datetime.now().strftime('%y%m%d')}.html"
|
||||
output_path.write_text(html, encoding='utf-8')
|
||||
print(f"Demo deck generated: {output_path}")
|
||||
|
||||
elif args.json:
|
||||
with open(args.json, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
html = generate_deck(data.get('slides', []), data.get('title', 'Presentation'))
|
||||
|
||||
output_path = Path(args.output) if args.output else OUTPUT_DIR / f"deck-{datetime.now().strftime('%y%m%d-%H%M')}.html"
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
output_path.write_text(html, encoding='utf-8')
|
||||
print(f"Deck generated: {output_path}")
|
||||
|
||||
else:
|
||||
parser.print_help()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
205
skills/design-system/scripts/generate-tokens.cjs
Normal file
205
skills/design-system/scripts/generate-tokens.cjs
Normal file
@@ -0,0 +1,205 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Generate CSS variables from design tokens JSON
|
||||
*
|
||||
* Usage:
|
||||
* node generate-tokens.cjs --config tokens.json -o tokens.css
|
||||
* node generate-tokens.cjs --config tokens.json --format tailwind
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
/**
|
||||
* Parse command line arguments
|
||||
*/
|
||||
function parseArgs() {
|
||||
const args = process.argv.slice(2);
|
||||
const options = {
|
||||
config: null,
|
||||
output: null,
|
||||
format: 'css' // css | tailwind
|
||||
};
|
||||
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
if (args[i] === '--config' || args[i] === '-c') {
|
||||
options.config = args[++i];
|
||||
} else if (args[i] === '--output' || args[i] === '-o') {
|
||||
options.output = args[++i];
|
||||
} else if (args[i] === '--format' || args[i] === '-f') {
|
||||
options.format = args[++i];
|
||||
} else if (args[i] === '--help' || args[i] === '-h') {
|
||||
console.log(`
|
||||
Usage: node generate-tokens.cjs [options]
|
||||
|
||||
Options:
|
||||
-c, --config <file> Input JSON token file (required)
|
||||
-o, --output <file> Output file (default: stdout)
|
||||
-f, --format <type> Output format: css | tailwind (default: css)
|
||||
-h, --help Show this help
|
||||
`);
|
||||
process.exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
return options;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve token references like {primitive.color.blue.600}
|
||||
*/
|
||||
function resolveReference(value, tokens) {
|
||||
if (typeof value !== 'string' || !value.startsWith('{')) {
|
||||
return value;
|
||||
}
|
||||
|
||||
const path = value.slice(1, -1).split('.');
|
||||
let result = tokens;
|
||||
|
||||
for (const key of path) {
|
||||
result = result?.[key];
|
||||
}
|
||||
|
||||
if (result?.$value) {
|
||||
return resolveReference(result.$value, tokens);
|
||||
}
|
||||
|
||||
return result || value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert token name to CSS variable name
|
||||
*/
|
||||
function toCssVarName(path) {
|
||||
return '--' + path.join('-').replace(/\./g, '-');
|
||||
}
|
||||
|
||||
/**
|
||||
* Flatten tokens into CSS variables
|
||||
*/
|
||||
function flattenTokens(obj, tokens, prefix = [], result = {}) {
|
||||
for (const [key, value] of Object.entries(obj)) {
|
||||
const currentPath = [...prefix, key];
|
||||
|
||||
if (value && typeof value === 'object') {
|
||||
if (value.$value !== undefined) {
|
||||
// This is a token
|
||||
const cssVar = toCssVarName(currentPath);
|
||||
const resolvedValue = resolveReference(value.$value, tokens);
|
||||
result[cssVar] = resolvedValue;
|
||||
} else {
|
||||
// Recurse into nested object
|
||||
flattenTokens(value, tokens, currentPath, result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate CSS output
|
||||
*/
|
||||
function generateCSS(tokens) {
|
||||
const primitive = flattenTokens(tokens.primitive || {}, tokens, ['primitive']);
|
||||
const semantic = flattenTokens(tokens.semantic || {}, tokens, []);
|
||||
const component = flattenTokens(tokens.component || {}, tokens, []);
|
||||
const darkSemantic = flattenTokens(tokens.dark?.semantic || {}, tokens, []);
|
||||
|
||||
let css = `/* Design Tokens - Auto-generated */
|
||||
/* Do not edit directly - modify tokens.json instead */
|
||||
|
||||
/* === PRIMITIVES === */
|
||||
:root {
|
||||
${Object.entries(primitive).map(([k, v]) => ` ${k}: ${v};`).join('\n')}
|
||||
}
|
||||
|
||||
/* === SEMANTIC === */
|
||||
:root {
|
||||
${Object.entries(semantic).map(([k, v]) => ` ${k}: ${v};`).join('\n')}
|
||||
}
|
||||
|
||||
/* === COMPONENTS === */
|
||||
:root {
|
||||
${Object.entries(component).map(([k, v]) => ` ${k}: ${v};`).join('\n')}
|
||||
}
|
||||
`;
|
||||
|
||||
if (Object.keys(darkSemantic).length > 0) {
|
||||
css += `
|
||||
/* === DARK MODE === */
|
||||
.dark {
|
||||
${Object.entries(darkSemantic).map(([k, v]) => ` ${k}: ${v};`).join('\n')}
|
||||
}
|
||||
`;
|
||||
}
|
||||
|
||||
return css;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate Tailwind config output
|
||||
*/
|
||||
function generateTailwind(tokens) {
|
||||
const semantic = flattenTokens(tokens.semantic || {}, tokens, []);
|
||||
|
||||
// Extract colors for Tailwind
|
||||
const colors = {};
|
||||
for (const [key, value] of Object.entries(semantic)) {
|
||||
if (key.includes('color')) {
|
||||
const name = key.replace('--color-', '').replace(/-/g, '.');
|
||||
colors[name] = `var(${key})`;
|
||||
}
|
||||
}
|
||||
|
||||
return `// Tailwind color config - Auto-generated
|
||||
// Add to tailwind.config.ts theme.extend.colors
|
||||
|
||||
module.exports = {
|
||||
colors: ${JSON.stringify(colors, null, 2).replace(/"/g, "'")}
|
||||
};
|
||||
`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Main
|
||||
*/
|
||||
function main() {
|
||||
const options = parseArgs();
|
||||
|
||||
if (!options.config) {
|
||||
console.error('Error: --config is required');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Resolve config path
|
||||
const configPath = path.resolve(process.cwd(), options.config);
|
||||
|
||||
if (!fs.existsSync(configPath)) {
|
||||
console.error(`Error: Config file not found: ${configPath}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Read and parse tokens
|
||||
const tokens = JSON.parse(fs.readFileSync(configPath, 'utf-8'));
|
||||
|
||||
// Generate output
|
||||
let output;
|
||||
if (options.format === 'tailwind') {
|
||||
output = generateTailwind(tokens);
|
||||
} else {
|
||||
output = generateCSS(tokens);
|
||||
}
|
||||
|
||||
// Write output
|
||||
if (options.output) {
|
||||
const outputPath = path.resolve(process.cwd(), options.output);
|
||||
fs.mkdirSync(path.dirname(outputPath), { recursive: true });
|
||||
fs.writeFileSync(outputPath, output);
|
||||
console.log(`Generated: ${outputPath}`);
|
||||
} else {
|
||||
console.log(output);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
327
skills/design-system/scripts/html-token-validator.py
Normal file
327
skills/design-system/scripts/html-token-validator.py
Normal file
@@ -0,0 +1,327 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
HTML Design Token Validator
|
||||
Ensures all HTML assets (slides, infographics, etc.) use design tokens.
|
||||
Source of truth: assets/design-tokens.css
|
||||
|
||||
Usage:
|
||||
python html-token-validator.py # Validate all HTML assets
|
||||
python html-token-validator.py --type slides # Validate only slides
|
||||
python html-token-validator.py --type infographics # Validate only infographics
|
||||
python html-token-validator.py path/to/file.html # Validate specific file
|
||||
python html-token-validator.py --fix # Auto-fix issues (WIP)
|
||||
"""
|
||||
|
||||
import re
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional
|
||||
|
||||
# Project root relative to this script
|
||||
PROJECT_ROOT = Path(__file__).parent.parent.parent.parent.parent
|
||||
TOKENS_JSON_PATH = PROJECT_ROOT / 'assets' / 'design-tokens.json'
|
||||
TOKENS_CSS_PATH = PROJECT_ROOT / 'assets' / 'design-tokens.css'
|
||||
|
||||
# Asset directories to validate
|
||||
ASSET_DIRS = {
|
||||
'slides': PROJECT_ROOT / 'assets' / 'designs' / 'slides',
|
||||
'infographics': PROJECT_ROOT / 'assets' / 'infographics',
|
||||
}
|
||||
|
||||
# Patterns that indicate hardcoded values (should use tokens)
|
||||
FORBIDDEN_PATTERNS = [
|
||||
(r'#[0-9A-Fa-f]{3,8}\b', 'hex color'),
|
||||
(r'rgb\(\s*\d+\s*,\s*\d+\s*,\s*\d+\s*\)', 'rgb color'),
|
||||
(r'rgba\(\s*\d+\s*,\s*\d+\s*,\s*\d+\s*,\s*[\d.]+\s*\)', 'rgba color'),
|
||||
(r'hsl\([^)]+\)', 'hsl color'),
|
||||
(r"font-family:\s*'[^v][^a][^r][^']*',", 'hardcoded font'), # Exclude var()
|
||||
(r'font-family:\s*"[^v][^a][^r][^"]*",', 'hardcoded font'),
|
||||
]
|
||||
|
||||
# Allowed rgba patterns (brand colors with transparency - CSS limitation)
|
||||
# These are derived from brand tokens but need rgba for transparency
|
||||
ALLOWED_RGBA_PATTERNS = [
|
||||
r'rgba\(\s*59\s*,\s*130\s*,\s*246', # --color-primary (#3B82F6)
|
||||
r'rgba\(\s*245\s*,\s*158\s*,\s*11', # --color-secondary (#F59E0B)
|
||||
r'rgba\(\s*16\s*,\s*185\s*,\s*129', # --color-accent (#10B981)
|
||||
r'rgba\(\s*20\s*,\s*184\s*,\s*166', # --color-accent alt (#14B8A6)
|
||||
r'rgba\(\s*0\s*,\s*0\s*,\s*0', # black transparency (common)
|
||||
r'rgba\(\s*255\s*,\s*255\s*,\s*255', # white transparency (common)
|
||||
r'rgba\(\s*15\s*,\s*23\s*,\s*42', # --color-surface (#0F172A)
|
||||
r'rgba\(\s*7\s*,\s*11\s*,\s*20', # --color-background (#070B14)
|
||||
]
|
||||
|
||||
# Allowed exceptions (external images, etc.)
|
||||
ALLOWED_EXCEPTIONS = [
|
||||
'pexels.com', 'unsplash.com', 'youtube.com', 'ytimg.com',
|
||||
'googlefonts', 'fonts.googleapis.com', 'fonts.gstatic.com',
|
||||
]
|
||||
|
||||
|
||||
class ValidationResult:
|
||||
"""Validation result for a single file."""
|
||||
def __init__(self, file_path: Path):
|
||||
self.file_path = file_path
|
||||
self.errors: List[str] = []
|
||||
self.warnings: List[str] = []
|
||||
self.passed = True
|
||||
|
||||
def add_error(self, msg: str):
|
||||
self.errors.append(msg)
|
||||
self.passed = False
|
||||
|
||||
def add_warning(self, msg: str):
|
||||
self.warnings.append(msg)
|
||||
|
||||
|
||||
def load_css_variables() -> Dict[str, str]:
|
||||
"""Load CSS variables from design-tokens.css."""
|
||||
variables = {}
|
||||
if TOKENS_CSS_PATH.exists():
|
||||
content = TOKENS_CSS_PATH.read_text()
|
||||
# Extract --var-name: value patterns
|
||||
for match in re.finditer(r'(--[\w-]+):\s*([^;]+);', content):
|
||||
variables[match.group(1)] = match.group(2).strip()
|
||||
return variables
|
||||
|
||||
|
||||
def is_inside_block(content: str, match_pos: int, open_tag: str, close_tag: str) -> bool:
|
||||
"""Check if position is inside a specific HTML block."""
|
||||
pre = content[:match_pos]
|
||||
tag_open = pre.rfind(open_tag)
|
||||
tag_close = pre.rfind(close_tag)
|
||||
return tag_open > tag_close
|
||||
|
||||
|
||||
def is_allowed_exception(context: str) -> bool:
|
||||
"""Check if the hardcoded value is in an allowed exception context."""
|
||||
context_lower = context.lower()
|
||||
return any(exc in context_lower for exc in ALLOWED_EXCEPTIONS)
|
||||
|
||||
|
||||
def is_allowed_rgba(match_text: str) -> bool:
|
||||
"""Check if rgba pattern uses brand colors (allowed for transparency)."""
|
||||
return any(re.match(pattern, match_text) for pattern in ALLOWED_RGBA_PATTERNS)
|
||||
|
||||
|
||||
def get_context(content: str, pos: int, chars: int = 100) -> str:
|
||||
"""Get surrounding context for a match position."""
|
||||
start = max(0, pos - chars)
|
||||
end = min(len(content), pos + chars)
|
||||
return content[start:end]
|
||||
|
||||
|
||||
def validate_html(content: str, file_path: Path, verbose: bool = False) -> ValidationResult:
|
||||
"""
|
||||
Validate HTML content for design token compliance.
|
||||
|
||||
Checks:
|
||||
1. design-tokens.css import present
|
||||
2. No hardcoded colors in CSS (except in <script> for Chart.js)
|
||||
3. No hardcoded fonts
|
||||
4. Uses var(--token-name) pattern
|
||||
"""
|
||||
result = ValidationResult(file_path)
|
||||
|
||||
# 1. Check for design-tokens.css import
|
||||
if 'design-tokens.css' not in content:
|
||||
result.add_error("Missing design-tokens.css import")
|
||||
|
||||
# 2. Check for forbidden patterns in CSS
|
||||
for pattern, description in FORBIDDEN_PATTERNS:
|
||||
for match in re.finditer(pattern, content):
|
||||
match_text = match.group()
|
||||
match_pos = match.start()
|
||||
context = get_context(content, match_pos)
|
||||
|
||||
# Skip if in <script> block (Chart.js allowed)
|
||||
if is_inside_block(content, match_pos, '<script', '</script>'):
|
||||
if verbose:
|
||||
result.add_warning(f"Allowed in <script>: {match_text}")
|
||||
continue
|
||||
|
||||
# Skip if in allowed exception context (external URLs)
|
||||
if is_allowed_exception(context):
|
||||
if verbose:
|
||||
result.add_warning(f"Allowed external: {match_text}")
|
||||
continue
|
||||
|
||||
# Skip rgba using brand colors (needed for transparency effects)
|
||||
if description == 'rgba color' and is_allowed_rgba(match_text):
|
||||
if verbose:
|
||||
result.add_warning(f"Allowed brand rgba: {match_text}")
|
||||
continue
|
||||
|
||||
# Skip if part of var() reference (false positive)
|
||||
if 'var(' in context and match_text in context:
|
||||
# Check if it's a fallback value in var()
|
||||
var_pattern = rf'var\([^)]*{re.escape(match_text)}[^)]*\)'
|
||||
if re.search(var_pattern, context):
|
||||
continue
|
||||
|
||||
# Error if in <style> or inline style
|
||||
if is_inside_block(content, match_pos, '<style', '</style>'):
|
||||
result.add_error(f"Hardcoded {description} in <style>: {match_text}")
|
||||
elif 'style="' in context:
|
||||
result.add_error(f"Hardcoded {description} in inline style: {match_text}")
|
||||
|
||||
# 3. Check for required var() usage indicators
|
||||
token_patterns = [
|
||||
r'var\(--color-',
|
||||
r'var\(--primitive-',
|
||||
r'var\(--typography-',
|
||||
r'var\(--card-',
|
||||
r'var\(--button-',
|
||||
]
|
||||
token_count = sum(len(re.findall(p, content)) for p in token_patterns)
|
||||
|
||||
if token_count < 5:
|
||||
result.add_warning(f"Low token usage ({token_count} var() references). Consider using more design tokens.")
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def validate_file(file_path: Path, verbose: bool = False) -> ValidationResult:
|
||||
"""Validate a single HTML file."""
|
||||
if not file_path.exists():
|
||||
result = ValidationResult(file_path)
|
||||
result.add_error("File not found")
|
||||
return result
|
||||
|
||||
content = file_path.read_text()
|
||||
return validate_html(content, file_path, verbose)
|
||||
|
||||
|
||||
def validate_directory(dir_path: Path, verbose: bool = False) -> List[ValidationResult]:
|
||||
"""Validate all HTML files in a directory."""
|
||||
results = []
|
||||
if dir_path.exists():
|
||||
for html_file in sorted(dir_path.glob('*.html')):
|
||||
results.append(validate_file(html_file, verbose))
|
||||
return results
|
||||
|
||||
|
||||
def print_result(result: ValidationResult, verbose: bool = False):
|
||||
"""Print validation result for a file."""
|
||||
status = "✓" if result.passed else "✗"
|
||||
print(f" {status} {result.file_path.name}")
|
||||
|
||||
if result.errors:
|
||||
for error in result.errors[:5]: # Limit output
|
||||
print(f" ├─ {error}")
|
||||
if len(result.errors) > 5:
|
||||
print(f" └─ ... and {len(result.errors) - 5} more errors")
|
||||
|
||||
if verbose and result.warnings:
|
||||
for warning in result.warnings[:3]:
|
||||
print(f" [warn] {warning}")
|
||||
|
||||
|
||||
def print_summary(all_results: Dict[str, List[ValidationResult]]):
|
||||
"""Print summary of all validation results."""
|
||||
total_files = 0
|
||||
total_passed = 0
|
||||
total_errors = 0
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("HTML DESIGN TOKEN VALIDATION SUMMARY")
|
||||
print("=" * 60)
|
||||
|
||||
for asset_type, results in all_results.items():
|
||||
if not results:
|
||||
continue
|
||||
|
||||
passed = sum(1 for r in results if r.passed)
|
||||
failed = len(results) - passed
|
||||
errors = sum(len(r.errors) for r in results)
|
||||
|
||||
total_files += len(results)
|
||||
total_passed += passed
|
||||
total_errors += errors
|
||||
|
||||
status = "✓" if failed == 0 else "✗"
|
||||
print(f"\n{status} {asset_type.upper()}: {passed}/{len(results)} passed")
|
||||
|
||||
for result in results:
|
||||
if not result.passed:
|
||||
print_result(result)
|
||||
|
||||
print("\n" + "-" * 60)
|
||||
if total_errors == 0:
|
||||
print(f"✓ ALL PASSED: {total_passed}/{total_files} files valid")
|
||||
else:
|
||||
print(f"✗ FAILED: {total_files - total_passed}/{total_files} files have issues ({total_errors} total errors)")
|
||||
print("-" * 60)
|
||||
|
||||
return total_errors == 0
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI entry point."""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Validate HTML assets for design token compliance',
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
%(prog)s # Validate all HTML assets
|
||||
%(prog)s --type slides # Validate only slides
|
||||
%(prog)s --type infographics # Validate only infographics
|
||||
%(prog)s path/to/file.html # Validate specific file
|
||||
%(prog)s --colors # Show brand colors from tokens
|
||||
"""
|
||||
)
|
||||
parser.add_argument('files', nargs='*', help='Specific HTML files to validate')
|
||||
parser.add_argument('-t', '--type', choices=['slides', 'infographics', 'all'],
|
||||
default='all', help='Asset type to validate')
|
||||
parser.add_argument('-v', '--verbose', action='store_true', help='Show warnings')
|
||||
parser.add_argument('--colors', action='store_true', help='Print CSS variables from tokens')
|
||||
parser.add_argument('--fix', action='store_true', help='Auto-fix issues (experimental)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Show colors mode
|
||||
if args.colors:
|
||||
variables = load_css_variables()
|
||||
print("\nDesign Tokens (from design-tokens.css):")
|
||||
print("-" * 40)
|
||||
for name, value in sorted(variables.items())[:30]:
|
||||
print(f" {name}: {value}")
|
||||
if len(variables) > 30:
|
||||
print(f" ... and {len(variables) - 30} more")
|
||||
return
|
||||
|
||||
all_results: Dict[str, List[ValidationResult]] = {}
|
||||
|
||||
# Validate specific files
|
||||
if args.files:
|
||||
results = []
|
||||
for file_path in args.files:
|
||||
path = Path(file_path)
|
||||
if path.exists():
|
||||
results.append(validate_file(path, args.verbose))
|
||||
else:
|
||||
result = ValidationResult(path)
|
||||
result.add_error("File not found")
|
||||
results.append(result)
|
||||
all_results['specified'] = results
|
||||
else:
|
||||
# Validate by type
|
||||
types_to_check = ASSET_DIRS.keys() if args.type == 'all' else [args.type]
|
||||
|
||||
for asset_type in types_to_check:
|
||||
if asset_type in ASSET_DIRS:
|
||||
results = validate_directory(ASSET_DIRS[asset_type], args.verbose)
|
||||
all_results[asset_type] = results
|
||||
|
||||
# Print results
|
||||
success = print_summary(all_results)
|
||||
|
||||
if not success:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
218
skills/design-system/scripts/search-slides.py
Executable file
218
skills/design-system/scripts/search-slides.py
Executable file
@@ -0,0 +1,218 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Slide Search CLI - Search slide design databases for strategies, layouts, copy, and charts
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
from slide_search_core import (
|
||||
search, search_all, AVAILABLE_DOMAINS,
|
||||
search_with_context, get_layout_for_goal, get_typography_for_slide,
|
||||
get_color_for_emotion, get_background_config
|
||||
)
|
||||
|
||||
|
||||
def format_result(result, domain):
|
||||
"""Format a single search result for display"""
|
||||
output = []
|
||||
|
||||
if domain == "strategy":
|
||||
output.append(f"**{result.get('strategy_name', 'N/A')}**")
|
||||
output.append(f" Slides: {result.get('slide_count', 'N/A')}")
|
||||
output.append(f" Structure: {result.get('structure', 'N/A')}")
|
||||
output.append(f" Goal: {result.get('goal', 'N/A')}")
|
||||
output.append(f" Audience: {result.get('audience', 'N/A')}")
|
||||
output.append(f" Tone: {result.get('tone', 'N/A')}")
|
||||
output.append(f" Arc: {result.get('narrative_arc', 'N/A')}")
|
||||
output.append(f" Source: {result.get('sources', 'N/A')}")
|
||||
|
||||
elif domain == "layout":
|
||||
output.append(f"**{result.get('layout_name', 'N/A')}**")
|
||||
output.append(f" Use case: {result.get('use_case', 'N/A')}")
|
||||
output.append(f" Zones: {result.get('content_zones', 'N/A')}")
|
||||
output.append(f" Visual weight: {result.get('visual_weight', 'N/A')}")
|
||||
output.append(f" CTA: {result.get('cta_placement', 'N/A')}")
|
||||
output.append(f" Recommended: {result.get('recommended_for', 'N/A')}")
|
||||
output.append(f" Avoid: {result.get('avoid_for', 'N/A')}")
|
||||
output.append(f" CSS: {result.get('css_structure', 'N/A')}")
|
||||
|
||||
elif domain == "copy":
|
||||
output.append(f"**{result.get('formula_name', 'N/A')}**")
|
||||
output.append(f" Components: {result.get('components', 'N/A')}")
|
||||
output.append(f" Use case: {result.get('use_case', 'N/A')}")
|
||||
output.append(f" Template: {result.get('example_template', 'N/A')}")
|
||||
output.append(f" Emotion: {result.get('emotion_trigger', 'N/A')}")
|
||||
output.append(f" Slide type: {result.get('slide_type', 'N/A')}")
|
||||
output.append(f" Source: {result.get('source', 'N/A')}")
|
||||
|
||||
elif domain == "chart":
|
||||
output.append(f"**{result.get('chart_type', 'N/A')}**")
|
||||
output.append(f" Best for: {result.get('best_for', 'N/A')}")
|
||||
output.append(f" Data type: {result.get('data_type', 'N/A')}")
|
||||
output.append(f" When to use: {result.get('when_to_use', 'N/A')}")
|
||||
output.append(f" When to avoid: {result.get('when_to_avoid', 'N/A')}")
|
||||
output.append(f" Max categories: {result.get('max_categories', 'N/A')}")
|
||||
output.append(f" Slide context: {result.get('slide_context', 'N/A')}")
|
||||
output.append(f" CSS: {result.get('css_implementation', 'N/A')}")
|
||||
output.append(f" Accessibility: {result.get('accessibility_notes', 'N/A')}")
|
||||
|
||||
return "\n".join(output)
|
||||
|
||||
|
||||
def format_context(context):
|
||||
"""Format contextual recommendations for display."""
|
||||
output = []
|
||||
output.append(f"\n=== CONTEXTUAL RECOMMENDATIONS ===")
|
||||
output.append(f"Inferred Goal: {context.get('inferred_goal', 'N/A')}")
|
||||
output.append(f"Position: Slide {context.get('slide_position')} of {context.get('total_slides')}")
|
||||
|
||||
if context.get('recommended_layout'):
|
||||
output.append(f"\n📐 Layout: {context['recommended_layout']}")
|
||||
output.append(f" Direction: {context.get('layout_direction', 'N/A')}")
|
||||
output.append(f" Visual Weight: {context.get('visual_weight', 'N/A')}")
|
||||
|
||||
if context.get('typography'):
|
||||
typo = context['typography']
|
||||
output.append(f"\n📝 Typography:")
|
||||
output.append(f" Primary: {typo.get('primary_size', 'N/A')}")
|
||||
output.append(f" Secondary: {typo.get('secondary_size', 'N/A')}")
|
||||
output.append(f" Contrast: {typo.get('weight_contrast', 'N/A')}")
|
||||
|
||||
if context.get('color_treatment'):
|
||||
color = context['color_treatment']
|
||||
output.append(f"\n🎨 Color Treatment:")
|
||||
output.append(f" Background: {color.get('background', 'N/A')}")
|
||||
output.append(f" Text: {color.get('text_color', 'N/A')}")
|
||||
output.append(f" Accent: {color.get('accent_usage', 'N/A')}")
|
||||
|
||||
if context.get('should_break_pattern'):
|
||||
output.append(f"\n⚡ Pattern Break: YES (use contrasting layout)")
|
||||
|
||||
if context.get('should_use_full_bleed'):
|
||||
output.append(f"\n🖼️ Full Bleed: Recommended for emotional impact")
|
||||
|
||||
if context.get('use_background_image') and context.get('background'):
|
||||
bg = context['background']
|
||||
output.append(f"\n📸 Background Image:")
|
||||
output.append(f" Category: {bg.get('image_category', 'N/A')}")
|
||||
output.append(f" Overlay: {bg.get('overlay_style', 'N/A')}")
|
||||
output.append(f" Keywords: {bg.get('search_keywords', 'N/A')}")
|
||||
|
||||
output.append(f"\n✨ Animation: {context.get('animation_class', 'animate-fade-up')}")
|
||||
|
||||
return "\n".join(output)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Search slide design databases",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
search-slides.py "investor pitch" # Auto-detect domain (strategy)
|
||||
search-slides.py "funnel conversion" -d chart
|
||||
search-slides.py "headline hook" -d copy
|
||||
search-slides.py "two column" -d layout
|
||||
search-slides.py "startup funding" --all # Search all domains
|
||||
search-slides.py "metrics dashboard" --json # JSON output
|
||||
|
||||
Contextual Search (Premium System):
|
||||
search-slides.py "problem slide" --context --position 2 --total 9
|
||||
search-slides.py "cta" --context --position 9 --total 9 --prev-emotion frustration
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument("query", help="Search query")
|
||||
parser.add_argument("-d", "--domain", choices=AVAILABLE_DOMAINS,
|
||||
help="Specific domain to search (auto-detected if not specified)")
|
||||
parser.add_argument("-n", "--max-results", type=int, default=3,
|
||||
help="Maximum results to return (default: 3)")
|
||||
parser.add_argument("--all", action="store_true",
|
||||
help="Search across all domains")
|
||||
parser.add_argument("--json", action="store_true",
|
||||
help="Output as JSON")
|
||||
|
||||
# Contextual search options
|
||||
parser.add_argument("--context", action="store_true",
|
||||
help="Use contextual search with layout/typography/color recommendations")
|
||||
parser.add_argument("--position", type=int, default=1,
|
||||
help="Slide position in deck (1-based, default: 1)")
|
||||
parser.add_argument("--total", type=int, default=9,
|
||||
help="Total slides in deck (default: 9)")
|
||||
parser.add_argument("--prev-emotion", type=str, default=None,
|
||||
help="Previous slide's emotion for contrast calculation")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Contextual search mode
|
||||
if args.context:
|
||||
result = search_with_context(
|
||||
args.query,
|
||||
slide_position=args.position,
|
||||
total_slides=args.total,
|
||||
previous_emotion=args.prev_emotion
|
||||
)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
print(format_context(result['context']))
|
||||
|
||||
# Also show base search results
|
||||
if result.get('base_results'):
|
||||
print("\n\n=== RELATED SEARCH RESULTS ===")
|
||||
for domain, data in result['base_results'].items():
|
||||
print(f"\n--- {domain.upper()} ---")
|
||||
for item in data['results']:
|
||||
print(format_result(item, domain))
|
||||
print()
|
||||
return
|
||||
|
||||
if args.all:
|
||||
results = search_all(args.query, args.max_results)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(results, indent=2))
|
||||
else:
|
||||
if not results:
|
||||
print(f"No results found for: {args.query}")
|
||||
return
|
||||
|
||||
for domain, data in results.items():
|
||||
print(f"\n=== {domain.upper()} ===")
|
||||
print(f"File: {data['file']}")
|
||||
print(f"Results: {data['count']}")
|
||||
print()
|
||||
for result in data['results']:
|
||||
print(format_result(result, domain))
|
||||
print()
|
||||
else:
|
||||
result = search(args.query, args.domain, args.max_results)
|
||||
|
||||
if args.json:
|
||||
print(json.dumps(result, indent=2))
|
||||
else:
|
||||
if result.get("error"):
|
||||
print(f"Error: {result['error']}")
|
||||
return
|
||||
|
||||
print(f"Domain: {result['domain']}")
|
||||
print(f"Query: {result['query']}")
|
||||
print(f"File: {result['file']}")
|
||||
print(f"Results: {result['count']}")
|
||||
print()
|
||||
|
||||
if result['count'] == 0:
|
||||
print("No matching results found.")
|
||||
return
|
||||
|
||||
for i, item in enumerate(result['results'], 1):
|
||||
print(f"--- Result {i} ---")
|
||||
print(format_result(item, result['domain']))
|
||||
print()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
35
skills/design-system/scripts/slide-token-validator.py
Normal file
35
skills/design-system/scripts/slide-token-validator.py
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Slide Token Validator (Legacy Wrapper)
|
||||
Now delegates to html-token-validator.py for unified HTML validation.
|
||||
|
||||
For new usage, prefer:
|
||||
python html-token-validator.py --type slides
|
||||
python html-token-validator.py --type infographics
|
||||
python html-token-validator.py # All HTML assets
|
||||
"""
|
||||
|
||||
import sys
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
|
||||
SCRIPT_DIR = Path(__file__).parent
|
||||
UNIFIED_VALIDATOR = SCRIPT_DIR / 'html-token-validator.py'
|
||||
|
||||
|
||||
def main():
|
||||
"""Delegate to unified html-token-validator.py with --type slides."""
|
||||
args = sys.argv[1:]
|
||||
|
||||
# If no files specified, default to slides type
|
||||
if not args or all(arg.startswith('-') for arg in args):
|
||||
cmd = [sys.executable, str(UNIFIED_VALIDATOR), '--type', 'slides'] + args
|
||||
else:
|
||||
cmd = [sys.executable, str(UNIFIED_VALIDATOR)] + args
|
||||
|
||||
result = subprocess.run(cmd)
|
||||
sys.exit(result.returncode)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
453
skills/design-system/scripts/slide_search_core.py
Executable file
453
skills/design-system/scripts/slide_search_core.py
Executable file
@@ -0,0 +1,453 @@
|
||||
#!/usr/bin/env python3
|
||||
# -*- coding: utf-8 -*-
|
||||
"""
|
||||
Slide Search Core - BM25 search engine for slide design databases
|
||||
"""
|
||||
|
||||
import csv
|
||||
import re
|
||||
from pathlib import Path
|
||||
from math import log
|
||||
from collections import defaultdict
|
||||
|
||||
# ============ CONFIGURATION ============
|
||||
DATA_DIR = Path(__file__).parent.parent / "data"
|
||||
MAX_RESULTS = 3
|
||||
|
||||
CSV_CONFIG = {
|
||||
"strategy": {
|
||||
"file": "slide-strategies.csv",
|
||||
"search_cols": ["strategy_name", "keywords", "goal", "audience", "narrative_arc"],
|
||||
"output_cols": ["strategy_name", "keywords", "slide_count", "structure", "goal", "audience", "tone", "narrative_arc", "sources"]
|
||||
},
|
||||
"layout": {
|
||||
"file": "slide-layouts.csv",
|
||||
"search_cols": ["layout_name", "keywords", "use_case", "recommended_for"],
|
||||
"output_cols": ["layout_name", "keywords", "use_case", "content_zones", "visual_weight", "cta_placement", "recommended_for", "avoid_for", "css_structure"]
|
||||
},
|
||||
"copy": {
|
||||
"file": "slide-copy.csv",
|
||||
"search_cols": ["formula_name", "keywords", "use_case", "emotion_trigger", "slide_type"],
|
||||
"output_cols": ["formula_name", "keywords", "components", "use_case", "example_template", "emotion_trigger", "slide_type", "source"]
|
||||
},
|
||||
"chart": {
|
||||
"file": "slide-charts.csv",
|
||||
"search_cols": ["chart_type", "keywords", "best_for", "when_to_use", "slide_context"],
|
||||
"output_cols": ["chart_type", "keywords", "best_for", "data_type", "when_to_use", "when_to_avoid", "max_categories", "slide_context", "css_implementation", "accessibility_notes"]
|
||||
}
|
||||
}
|
||||
|
||||
AVAILABLE_DOMAINS = list(CSV_CONFIG.keys())
|
||||
|
||||
|
||||
# ============ BM25 IMPLEMENTATION ============
|
||||
class BM25:
|
||||
"""BM25 ranking algorithm for text search"""
|
||||
|
||||
def __init__(self, k1=1.5, b=0.75):
|
||||
self.k1 = k1
|
||||
self.b = b
|
||||
self.corpus = []
|
||||
self.doc_lengths = []
|
||||
self.avgdl = 0
|
||||
self.idf = {}
|
||||
self.doc_freqs = defaultdict(int)
|
||||
self.N = 0
|
||||
|
||||
def tokenize(self, text):
|
||||
"""Lowercase, split, remove punctuation, filter short words"""
|
||||
text = re.sub(r'[^\w\s]', ' ', str(text).lower())
|
||||
return [w for w in text.split() if len(w) > 2]
|
||||
|
||||
def fit(self, documents):
|
||||
"""Build BM25 index from documents"""
|
||||
self.corpus = [self.tokenize(doc) for doc in documents]
|
||||
self.N = len(self.corpus)
|
||||
if self.N == 0:
|
||||
return
|
||||
self.doc_lengths = [len(doc) for doc in self.corpus]
|
||||
self.avgdl = sum(self.doc_lengths) / self.N
|
||||
|
||||
for doc in self.corpus:
|
||||
seen = set()
|
||||
for word in doc:
|
||||
if word not in seen:
|
||||
self.doc_freqs[word] += 1
|
||||
seen.add(word)
|
||||
|
||||
for word, freq in self.doc_freqs.items():
|
||||
self.idf[word] = log((self.N - freq + 0.5) / (freq + 0.5) + 1)
|
||||
|
||||
def score(self, query):
|
||||
"""Score all documents against query"""
|
||||
query_tokens = self.tokenize(query)
|
||||
scores = []
|
||||
|
||||
for idx, doc in enumerate(self.corpus):
|
||||
score = 0
|
||||
doc_len = self.doc_lengths[idx]
|
||||
term_freqs = defaultdict(int)
|
||||
for word in doc:
|
||||
term_freqs[word] += 1
|
||||
|
||||
for token in query_tokens:
|
||||
if token in self.idf:
|
||||
tf = term_freqs[token]
|
||||
idf = self.idf[token]
|
||||
numerator = tf * (self.k1 + 1)
|
||||
denominator = tf + self.k1 * (1 - self.b + self.b * doc_len / self.avgdl)
|
||||
score += idf * numerator / denominator
|
||||
|
||||
scores.append((idx, score))
|
||||
|
||||
return sorted(scores, key=lambda x: x[1], reverse=True)
|
||||
|
||||
|
||||
# ============ SEARCH FUNCTIONS ============
|
||||
def _load_csv(filepath):
|
||||
"""Load CSV and return list of dicts"""
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
return list(csv.DictReader(f))
|
||||
|
||||
|
||||
def _search_csv(filepath, search_cols, output_cols, query, max_results):
|
||||
"""Core search function using BM25"""
|
||||
if not filepath.exists():
|
||||
return []
|
||||
|
||||
data = _load_csv(filepath)
|
||||
|
||||
# Build documents from search columns
|
||||
documents = [" ".join(str(row.get(col, "")) for col in search_cols) for row in data]
|
||||
|
||||
# BM25 search
|
||||
bm25 = BM25()
|
||||
bm25.fit(documents)
|
||||
ranked = bm25.score(query)
|
||||
|
||||
# Get top results with score > 0
|
||||
results = []
|
||||
for idx, score in ranked[:max_results]:
|
||||
if score > 0:
|
||||
row = data[idx]
|
||||
results.append({col: row.get(col, "") for col in output_cols if col in row})
|
||||
|
||||
return results
|
||||
|
||||
|
||||
def detect_domain(query):
|
||||
"""Auto-detect the most relevant domain from query"""
|
||||
query_lower = query.lower()
|
||||
|
||||
domain_keywords = {
|
||||
"strategy": ["pitch", "deck", "investor", "yc", "seed", "series", "demo", "sales", "webinar",
|
||||
"conference", "board", "qbr", "all-hands", "duarte", "kawasaki", "structure"],
|
||||
"layout": ["slide", "layout", "grid", "column", "title", "hero", "section", "cta",
|
||||
"screenshot", "quote", "timeline", "comparison", "pricing", "team"],
|
||||
"copy": ["headline", "copy", "formula", "aida", "pas", "hook", "cta", "benefit",
|
||||
"objection", "proof", "testimonial", "urgency", "scarcity"],
|
||||
"chart": ["chart", "graph", "bar", "line", "pie", "funnel", "metrics", "data",
|
||||
"visualization", "kpi", "trend", "comparison", "heatmap", "gauge"]
|
||||
}
|
||||
|
||||
scores = {domain: sum(1 for kw in keywords if kw in query_lower) for domain, keywords in domain_keywords.items()}
|
||||
best = max(scores, key=scores.get)
|
||||
return best if scores[best] > 0 else "strategy"
|
||||
|
||||
|
||||
def search(query, domain=None, max_results=MAX_RESULTS):
|
||||
"""Main search function with auto-domain detection"""
|
||||
if domain is None:
|
||||
domain = detect_domain(query)
|
||||
|
||||
config = CSV_CONFIG.get(domain, CSV_CONFIG["strategy"])
|
||||
filepath = DATA_DIR / config["file"]
|
||||
|
||||
if not filepath.exists():
|
||||
return {"error": f"File not found: {filepath}", "domain": domain}
|
||||
|
||||
results = _search_csv(filepath, config["search_cols"], config["output_cols"], query, max_results)
|
||||
|
||||
return {
|
||||
"domain": domain,
|
||||
"query": query,
|
||||
"file": config["file"],
|
||||
"count": len(results),
|
||||
"results": results
|
||||
}
|
||||
|
||||
|
||||
def search_all(query, max_results=2):
|
||||
"""Search across all domains for comprehensive results"""
|
||||
all_results = {}
|
||||
|
||||
for domain in AVAILABLE_DOMAINS:
|
||||
result = search(query, domain, max_results)
|
||||
if result.get("count", 0) > 0:
|
||||
all_results[domain] = result
|
||||
|
||||
return all_results
|
||||
|
||||
|
||||
# ============ CONTEXTUAL SEARCH (Premium Slide System) ============
|
||||
|
||||
# New CSV configurations for decision system
|
||||
DECISION_CSV_CONFIG = {
|
||||
"layout-logic": {
|
||||
"file": "slide-layout-logic.csv",
|
||||
"key_col": "goal"
|
||||
},
|
||||
"typography": {
|
||||
"file": "slide-typography.csv",
|
||||
"key_col": "content_type"
|
||||
},
|
||||
"color-logic": {
|
||||
"file": "slide-color-logic.csv",
|
||||
"key_col": "emotion"
|
||||
},
|
||||
"backgrounds": {
|
||||
"file": "slide-backgrounds.csv",
|
||||
"key_col": "slide_type"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def _load_decision_csv(csv_type):
|
||||
"""Load a decision CSV and return as dict keyed by primary column."""
|
||||
config = DECISION_CSV_CONFIG.get(csv_type)
|
||||
if not config:
|
||||
return {}
|
||||
|
||||
filepath = DATA_DIR / config["file"]
|
||||
if not filepath.exists():
|
||||
return {}
|
||||
|
||||
data = _load_csv(filepath)
|
||||
return {row[config["key_col"]]: row for row in data if config["key_col"] in row}
|
||||
|
||||
|
||||
def get_layout_for_goal(goal, previous_emotion=None):
|
||||
"""
|
||||
Get layout recommendation based on slide goal.
|
||||
Uses slide-layout-logic.csv for decision.
|
||||
"""
|
||||
layouts = _load_decision_csv("layout-logic")
|
||||
row = layouts.get(goal, layouts.get("features", {}))
|
||||
|
||||
result = dict(row) if row else {}
|
||||
|
||||
# Apply pattern-breaking logic
|
||||
if result.get("break_pattern") == "true" and previous_emotion:
|
||||
result["_pattern_break"] = True
|
||||
result["_contrast_with"] = previous_emotion
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def get_typography_for_slide(slide_type, has_metrics=False, has_quote=False):
|
||||
"""
|
||||
Get typography recommendation based on slide content.
|
||||
Uses slide-typography.csv for decision.
|
||||
"""
|
||||
typography = _load_decision_csv("typography")
|
||||
|
||||
if has_metrics:
|
||||
return typography.get("metric-callout", {})
|
||||
if has_quote:
|
||||
return typography.get("quote-block", {})
|
||||
|
||||
# Map slide types to typography
|
||||
type_map = {
|
||||
"hero": "hero-statement",
|
||||
"hook": "hero-statement",
|
||||
"title": "title-only",
|
||||
"problem": "subtitle-heavy",
|
||||
"agitation": "metric-callout",
|
||||
"solution": "subtitle-heavy",
|
||||
"features": "feature-grid",
|
||||
"proof": "metric-callout",
|
||||
"traction": "data-insight",
|
||||
"social": "quote-block",
|
||||
"testimonial": "testimonial",
|
||||
"pricing": "pricing",
|
||||
"team": "team",
|
||||
"cta": "cta-action",
|
||||
"comparison": "comparison",
|
||||
"timeline": "timeline",
|
||||
}
|
||||
|
||||
content_type = type_map.get(slide_type, "feature-grid")
|
||||
return typography.get(content_type, {})
|
||||
|
||||
|
||||
def get_color_for_emotion(emotion):
|
||||
"""
|
||||
Get color treatment based on emotional beat.
|
||||
Uses slide-color-logic.csv for decision.
|
||||
"""
|
||||
colors = _load_decision_csv("color-logic")
|
||||
return colors.get(emotion, colors.get("clarity", {}))
|
||||
|
||||
|
||||
def get_background_config(slide_type):
|
||||
"""
|
||||
Get background image configuration.
|
||||
Uses slide-backgrounds.csv for decision.
|
||||
"""
|
||||
backgrounds = _load_decision_csv("backgrounds")
|
||||
return backgrounds.get(slide_type, {})
|
||||
|
||||
|
||||
def should_use_full_bleed(slide_index, total_slides, emotion):
|
||||
"""
|
||||
Determine if slide should use full-bleed background.
|
||||
Premium decks use 2-3 full-bleed slides strategically.
|
||||
|
||||
Rules:
|
||||
1. Never consecutive full-bleed
|
||||
2. One in first third, one in middle, one at end
|
||||
3. Reserved for high-emotion beats (hope, urgency, fear)
|
||||
"""
|
||||
high_emotion_beats = ["hope", "urgency", "fear", "curiosity"]
|
||||
|
||||
if emotion not in high_emotion_beats:
|
||||
return False
|
||||
|
||||
if total_slides < 3:
|
||||
return False
|
||||
|
||||
third = total_slides // 3
|
||||
strategic_positions = [1, third, third * 2, total_slides - 1]
|
||||
|
||||
return slide_index in strategic_positions
|
||||
|
||||
|
||||
def calculate_pattern_break(slide_index, total_slides, previous_emotion=None):
|
||||
"""
|
||||
Determine if this slide should break the visual pattern.
|
||||
Used for emotional contrast (Duarte Sparkline technique).
|
||||
"""
|
||||
# Pattern breaks at strategic positions
|
||||
if total_slides < 5:
|
||||
return False
|
||||
|
||||
# Break at 1/3 and 2/3 points
|
||||
third = total_slides // 3
|
||||
if slide_index in [third, third * 2]:
|
||||
return True
|
||||
|
||||
# Break when switching between frustration and hope
|
||||
contrasting_emotions = {
|
||||
"frustration": ["hope", "relief"],
|
||||
"hope": ["frustration", "fear"],
|
||||
"fear": ["hope", "relief"],
|
||||
}
|
||||
|
||||
if previous_emotion in contrasting_emotions:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def search_with_context(query, slide_position=1, total_slides=9, previous_emotion=None):
|
||||
"""
|
||||
Enhanced search that considers deck context.
|
||||
|
||||
Args:
|
||||
query: Search query
|
||||
slide_position: Current slide index (1-based)
|
||||
total_slides: Total slides in deck
|
||||
previous_emotion: Emotion of previous slide (for contrast)
|
||||
|
||||
Returns:
|
||||
Search results enriched with contextual recommendations
|
||||
"""
|
||||
# Get base results from existing BM25 search
|
||||
base_results = search_all(query, max_results=2)
|
||||
|
||||
# Detect likely slide goal from query
|
||||
goal = detect_domain(query.lower())
|
||||
if "problem" in query.lower():
|
||||
goal = "problem"
|
||||
elif "solution" in query.lower():
|
||||
goal = "solution"
|
||||
elif "cta" in query.lower() or "call to action" in query.lower():
|
||||
goal = "cta"
|
||||
elif "hook" in query.lower() or "title" in query.lower():
|
||||
goal = "hook"
|
||||
elif "traction" in query.lower() or "metric" in query.lower():
|
||||
goal = "traction"
|
||||
|
||||
# Enrich with contextual recommendations
|
||||
context = {
|
||||
"slide_position": slide_position,
|
||||
"total_slides": total_slides,
|
||||
"previous_emotion": previous_emotion,
|
||||
"inferred_goal": goal,
|
||||
}
|
||||
|
||||
# Get layout recommendation
|
||||
layout = get_layout_for_goal(goal, previous_emotion)
|
||||
if layout:
|
||||
context["recommended_layout"] = layout.get("layout_pattern")
|
||||
context["layout_direction"] = layout.get("direction")
|
||||
context["visual_weight"] = layout.get("visual_weight")
|
||||
context["use_background_image"] = layout.get("use_bg_image") == "true"
|
||||
|
||||
# Get typography recommendation
|
||||
typography = get_typography_for_slide(goal)
|
||||
if typography:
|
||||
context["typography"] = {
|
||||
"primary_size": typography.get("primary_size"),
|
||||
"secondary_size": typography.get("secondary_size"),
|
||||
"weight_contrast": typography.get("weight_contrast"),
|
||||
}
|
||||
|
||||
# Get color treatment
|
||||
emotion = layout.get("emotion", "clarity") if layout else "clarity"
|
||||
color = get_color_for_emotion(emotion)
|
||||
if color:
|
||||
context["color_treatment"] = {
|
||||
"background": color.get("background"),
|
||||
"text_color": color.get("text_color"),
|
||||
"accent_usage": color.get("accent_usage"),
|
||||
"card_style": color.get("card_style"),
|
||||
}
|
||||
|
||||
# Calculate pattern breaking
|
||||
context["should_break_pattern"] = calculate_pattern_break(
|
||||
slide_position, total_slides, previous_emotion
|
||||
)
|
||||
context["should_use_full_bleed"] = should_use_full_bleed(
|
||||
slide_position, total_slides, emotion
|
||||
)
|
||||
|
||||
# Get background config if needed
|
||||
if context.get("use_background_image"):
|
||||
bg_config = get_background_config(goal)
|
||||
if bg_config:
|
||||
context["background"] = {
|
||||
"image_category": bg_config.get("image_category"),
|
||||
"overlay_style": bg_config.get("overlay_style"),
|
||||
"search_keywords": bg_config.get("search_keywords"),
|
||||
}
|
||||
|
||||
# Suggested animation classes
|
||||
animation_map = {
|
||||
"hook": "animate-fade-up",
|
||||
"problem": "animate-fade-up",
|
||||
"agitation": "animate-count animate-stagger",
|
||||
"solution": "animate-scale",
|
||||
"features": "animate-stagger",
|
||||
"traction": "animate-chart animate-count",
|
||||
"proof": "animate-stagger-scale",
|
||||
"social": "animate-fade-up",
|
||||
"cta": "animate-pulse",
|
||||
}
|
||||
context["animation_class"] = animation_map.get(goal, "animate-fade-up")
|
||||
|
||||
return {
|
||||
"query": query,
|
||||
"context": context,
|
||||
"base_results": base_results,
|
||||
}
|
||||
251
skills/design-system/scripts/validate-tokens.cjs
Normal file
251
skills/design-system/scripts/validate-tokens.cjs
Normal file
@@ -0,0 +1,251 @@
|
||||
#!/usr/bin/env node
|
||||
/**
|
||||
* Validate token usage in codebase
|
||||
* Finds hardcoded values that should use design tokens
|
||||
*
|
||||
* Usage:
|
||||
* node validate-tokens.cjs --dir src/
|
||||
* node validate-tokens.cjs --dir src/ --fix
|
||||
*/
|
||||
|
||||
const fs = require('fs');
|
||||
const path = require('path');
|
||||
|
||||
/**
|
||||
* Parse command line arguments
|
||||
*/
|
||||
function parseArgs() {
|
||||
const args = process.argv.slice(2);
|
||||
const options = {
|
||||
dir: null,
|
||||
fix: false,
|
||||
ignore: ['node_modules', '.git', 'dist', 'build', '.next']
|
||||
};
|
||||
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
if (args[i] === '--dir' || args[i] === '-d') {
|
||||
options.dir = args[++i];
|
||||
} else if (args[i] === '--fix') {
|
||||
options.fix = true;
|
||||
} else if (args[i] === '--ignore' || args[i] === '-i') {
|
||||
options.ignore.push(args[++i]);
|
||||
} else if (args[i] === '--help' || args[i] === '-h') {
|
||||
console.log(`
|
||||
Usage: node validate-tokens.cjs [options]
|
||||
|
||||
Options:
|
||||
-d, --dir <path> Directory to scan (required)
|
||||
--fix Show suggested fixes (no auto-fix)
|
||||
-i, --ignore <dir> Additional directories to ignore
|
||||
-h, --help Show this help
|
||||
|
||||
Checks for:
|
||||
- Hardcoded hex colors (#RGB, #RRGGBB)
|
||||
- Hardcoded pixel values (except 0, 1px)
|
||||
- Hardcoded rem values in CSS
|
||||
`);
|
||||
process.exit(0);
|
||||
}
|
||||
}
|
||||
|
||||
return options;
|
||||
}
|
||||
|
||||
/**
|
||||
* Patterns to detect hardcoded values
|
||||
*/
|
||||
const patterns = {
|
||||
hexColor: {
|
||||
regex: /#([0-9A-Fa-f]{3}){1,2}\b/g,
|
||||
message: 'Hardcoded hex color',
|
||||
suggestion: 'Use var(--color-*) token'
|
||||
},
|
||||
rgbColor: {
|
||||
regex: /rgb\s*\(\s*\d+\s*,\s*\d+\s*,\s*\d+\s*\)/gi,
|
||||
message: 'Hardcoded RGB color',
|
||||
suggestion: 'Use var(--color-*) token'
|
||||
},
|
||||
pixelValue: {
|
||||
regex: /:\s*(\d{2,})px/g, // 2+ digit px values
|
||||
message: 'Hardcoded pixel value',
|
||||
suggestion: 'Use var(--space-*) or var(--radius-*) token'
|
||||
},
|
||||
remValue: {
|
||||
regex: /:\s*\d+\.?\d*rem(?![^{]*\$value)/g, // rem not in token definition
|
||||
message: 'Hardcoded rem value',
|
||||
suggestion: 'Use var(--space-*) or var(--font-size-*) token'
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* File extensions to scan
|
||||
*/
|
||||
const extensions = ['.css', '.scss', '.tsx', '.jsx', '.ts', '.js', '.vue', '.svelte'];
|
||||
|
||||
/**
|
||||
* Files/patterns to skip
|
||||
*/
|
||||
const skipPatterns = [
|
||||
/\.min\.(css|js)$/,
|
||||
/tailwind\.config/,
|
||||
/globals\.css/, // Token definitions
|
||||
/tokens\.(css|json)/
|
||||
];
|
||||
|
||||
/**
|
||||
* Get all files recursively
|
||||
*/
|
||||
function getFiles(dir, ignore, files = []) {
|
||||
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
||||
|
||||
for (const entry of entries) {
|
||||
const fullPath = path.join(dir, entry.name);
|
||||
|
||||
if (entry.isDirectory()) {
|
||||
if (!ignore.includes(entry.name)) {
|
||||
getFiles(fullPath, ignore, files);
|
||||
}
|
||||
} else if (entry.isFile()) {
|
||||
const ext = path.extname(entry.name);
|
||||
if (extensions.includes(ext)) {
|
||||
files.push(fullPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return files;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if file should be skipped
|
||||
*/
|
||||
function shouldSkip(filePath) {
|
||||
return skipPatterns.some(pattern => pattern.test(filePath));
|
||||
}
|
||||
|
||||
/**
|
||||
* Scan file for violations
|
||||
*/
|
||||
function scanFile(filePath) {
|
||||
const content = fs.readFileSync(filePath, 'utf-8');
|
||||
const lines = content.split('\n');
|
||||
const violations = [];
|
||||
|
||||
lines.forEach((line, index) => {
|
||||
// Skip comments
|
||||
if (line.trim().startsWith('//') || line.trim().startsWith('/*')) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Skip lines that already use CSS variables
|
||||
if (line.includes('var(--')) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (const [name, pattern] of Object.entries(patterns)) {
|
||||
const matches = line.match(pattern.regex);
|
||||
if (matches) {
|
||||
matches.forEach(match => {
|
||||
// Skip common exceptions
|
||||
if (name === 'hexColor' && ['#000', '#fff', '#FFF', '#000000', '#FFFFFF'].includes(match.toUpperCase())) {
|
||||
return; // Skip black/white, often intentional
|
||||
}
|
||||
|
||||
violations.push({
|
||||
file: filePath,
|
||||
line: index + 1,
|
||||
column: line.indexOf(match) + 1,
|
||||
value: match,
|
||||
type: name,
|
||||
message: pattern.message,
|
||||
suggestion: pattern.suggestion,
|
||||
context: line.trim().substring(0, 80)
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return violations;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format violation report
|
||||
*/
|
||||
function formatReport(violations) {
|
||||
if (violations.length === 0) {
|
||||
return '✅ No token violations found';
|
||||
}
|
||||
|
||||
let report = `⚠️ Found ${violations.length} potential token violations:\n\n`;
|
||||
|
||||
// Group by file
|
||||
const byFile = {};
|
||||
violations.forEach(v => {
|
||||
if (!byFile[v.file]) byFile[v.file] = [];
|
||||
byFile[v.file].push(v);
|
||||
});
|
||||
|
||||
for (const [file, fileViolations] of Object.entries(byFile)) {
|
||||
report += `📁 ${file}\n`;
|
||||
fileViolations.forEach(v => {
|
||||
report += ` Line ${v.line}: ${v.message}\n`;
|
||||
report += ` Found: ${v.value}\n`;
|
||||
report += ` Suggestion: ${v.suggestion}\n`;
|
||||
report += ` Context: ${v.context}\n\n`;
|
||||
});
|
||||
}
|
||||
|
||||
// Summary
|
||||
const byType = {};
|
||||
violations.forEach(v => {
|
||||
byType[v.type] = (byType[v.type] || 0) + 1;
|
||||
});
|
||||
|
||||
report += `\n📊 Summary:\n`;
|
||||
for (const [type, count] of Object.entries(byType)) {
|
||||
report += ` ${patterns[type].message}: ${count}\n`;
|
||||
}
|
||||
|
||||
return report;
|
||||
}
|
||||
|
||||
/**
|
||||
* Main
|
||||
*/
|
||||
function main() {
|
||||
const options = parseArgs();
|
||||
|
||||
if (!options.dir) {
|
||||
console.error('Error: --dir is required');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const dirPath = path.resolve(process.cwd(), options.dir);
|
||||
|
||||
if (!fs.existsSync(dirPath)) {
|
||||
console.error(`Error: Directory not found: ${dirPath}`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log(`Scanning ${dirPath} for token violations...\n`);
|
||||
|
||||
const files = getFiles(dirPath, options.ignore);
|
||||
const allViolations = [];
|
||||
|
||||
for (const file of files) {
|
||||
if (shouldSkip(file)) continue;
|
||||
|
||||
const violations = scanFile(file);
|
||||
allViolations.push(...violations);
|
||||
}
|
||||
|
||||
console.log(formatReport(allViolations));
|
||||
|
||||
// Exit with error code if violations found
|
||||
if (allViolations.length > 0) {
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
||||
Reference in New Issue
Block a user