Update skills: add website-creator, mql-developer, ecommerce-astro
Changes: - Add FAL_KEY and GEMINI_API_KEY to .env.example - Update picture-it to use ~/.config/opencode/.env (unified creds) - Remove shodh-memory skill (no longer used) - Remove alphaear-* skills (deprecated) - Remove thai-frontend-dev skill (replaced by website-creator) - Remove theme-factory skill - Add mql-developer skill (MQL5 trading) - Add ecommerce-astro skill (Astro e-commerce) - Add website-creator skill (Next.js + Payload CMS) - Update install script for new skills
This commit is contained in:
205
skills/website-creator/seo-multi-channel/scripts/auto_publish.py
Normal file
205
skills/website-creator/seo-multi-channel/scripts/auto_publish.py
Normal file
@@ -0,0 +1,205 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Auto-Publish to Astro Content Collections
|
||||
|
||||
Publishes blog posts to Astro content collections,
|
||||
commits to git, and triggers auto-deploy.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import argparse
|
||||
import re
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Dict, Optional
|
||||
|
||||
|
||||
class AstroPublisher:
|
||||
"""Publish blog posts to Astro content collections"""
|
||||
|
||||
def __init__(self, website_repo: str):
|
||||
"""
|
||||
Initialize Astro publisher
|
||||
|
||||
Args:
|
||||
website_repo: Path to Astro website repository
|
||||
"""
|
||||
self.website_repo = website_repo
|
||||
self.content_dir = os.path.join(website_repo, 'src/content/blog')
|
||||
self.images_dir = os.path.join(website_repo, 'public/images/blog')
|
||||
|
||||
def detect_language(self, content: str) -> str:
|
||||
"""Detect if content is Thai or English"""
|
||||
thai_chars = sum(1 for c in content if '\u0E00' <= c <= '\u0E7F')
|
||||
total_chars = len(content)
|
||||
thai_ratio = thai_chars / total_chars if total_chars > 0 else 0
|
||||
return 'th' if thai_ratio > 0.3 else 'en'
|
||||
|
||||
def generate_slug(self, title: str, lang: str = 'en') -> str:
|
||||
"""Generate URL-friendly slug"""
|
||||
# Remove special characters
|
||||
slug = re.sub(r'[^\w\s-]', '', title.lower())
|
||||
# Replace whitespace with hyphens
|
||||
slug = re.sub(r'[-\s]+', '-', slug)
|
||||
# Remove leading/trailing hyphens
|
||||
slug = slug.strip('-_')
|
||||
# Limit length
|
||||
return slug[:100]
|
||||
|
||||
def parse_frontmatter(self, content: str) -> Dict:
|
||||
"""Parse frontmatter from markdown content"""
|
||||
import yaml
|
||||
|
||||
if not content.startswith('---'):
|
||||
return {}
|
||||
|
||||
try:
|
||||
# Extract frontmatter
|
||||
parts = content.split('---', 2)
|
||||
if len(parts) >= 2:
|
||||
frontmatter = yaml.safe_load(parts[1])
|
||||
return frontmatter or {}
|
||||
except:
|
||||
pass
|
||||
|
||||
return {}
|
||||
|
||||
def publish(self, markdown_content: str, images: list = None, use_git: bool = False) -> Dict:
|
||||
"""
|
||||
Publish blog post to Astro content collections
|
||||
|
||||
Args:
|
||||
markdown_content: Full markdown with frontmatter
|
||||
images: List of image paths to copy
|
||||
use_git: Whether to git commit and push (default: False - direct write only)
|
||||
|
||||
Returns:
|
||||
Publication result
|
||||
"""
|
||||
try:
|
||||
# Parse frontmatter
|
||||
frontmatter = self.parse_frontmatter(markdown_content)
|
||||
|
||||
# Get required fields
|
||||
title = frontmatter.get('title', 'Untitled')
|
||||
slug = frontmatter.get('slug') or self.generate_slug(title)
|
||||
lang = frontmatter.get('lang') or self.detect_language(markdown_content)
|
||||
|
||||
# Determine output path
|
||||
lang_folder = f'({lang})'
|
||||
output_dir = os.path.join(self.content_dir, lang_folder)
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
output_path = os.path.join(output_dir, f'{slug}.md')
|
||||
|
||||
# Write markdown file (ALWAYS do this)
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(markdown_content)
|
||||
|
||||
print(f"\n✓ Saved: {output_path}")
|
||||
|
||||
# Copy images if provided
|
||||
if images:
|
||||
images_output = os.path.join(self.images_dir, slug)
|
||||
os.makedirs(images_output, exist_ok=True)
|
||||
|
||||
for img_path in images:
|
||||
if os.path.exists(img_path):
|
||||
import shutil
|
||||
shutil.copy(img_path, images_output)
|
||||
print(f" ✓ Copied image: {os.path.basename(img_path)}")
|
||||
|
||||
# Git commit and push (OPTIONAL - only if requested and Gitea configured)
|
||||
git_result = None
|
||||
if use_git:
|
||||
git_result = self.git_commit_and_push(slug, lang)
|
||||
else:
|
||||
print(f" ✓ Direct write complete (no git)")
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'slug': slug,
|
||||
'language': lang,
|
||||
'path': output_path,
|
||||
'git_result': git_result,
|
||||
'method': 'direct_write' if not use_git else 'git_push'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'success': False,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
def git_commit_and_push(self, slug: str, lang: str) -> Dict:
|
||||
"""Commit and push changes to git"""
|
||||
try:
|
||||
# Check if git repo
|
||||
if not os.path.exists(os.path.join(self.website_repo, '.git')):
|
||||
return {'success': False, 'error': 'Not a git repository'}
|
||||
|
||||
# Git add
|
||||
subprocess.run(['git', 'add', '.'], cwd=self.website_repo, check=True, capture_output=True)
|
||||
|
||||
# Git commit
|
||||
message = f"Add blog post: {slug} ({lang})"
|
||||
subprocess.run(['git', 'commit', '-m', message], cwd=self.website_repo, check=True, capture_output=True)
|
||||
|
||||
# Git push
|
||||
subprocess.run(['git', 'push'], cwd=self.website_repo, check=True, capture_output=True)
|
||||
|
||||
print(f"✓ Committed: {message}")
|
||||
print(f"✓ Pushed to remote")
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'commit_message': message,
|
||||
'triggered_deploy': True
|
||||
}
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
print(f"✗ Git error: {e.stderr.decode() if e.stderr else str(e)}")
|
||||
return {'success': False, 'error': 'Git operation failed'}
|
||||
except Exception as e:
|
||||
print(f"✗ Error: {e}")
|
||||
return {'success': False, 'error': str(e)}
|
||||
|
||||
|
||||
def main():
|
||||
"""Test Astro publisher"""
|
||||
parser = argparse.ArgumentParser(description='Publish to Astro')
|
||||
parser.add_argument('--file', required=True, help='Markdown file to publish')
|
||||
parser.add_argument('--website-repo', required=True, help='Path to website repo')
|
||||
parser.add_argument('--image', action='append', help='Image files to copy')
|
||||
parser.add_argument('--use-git', action='store_true', help='Use git commit/push (default: direct write only)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
print(f"\n📝 Publishing to Astro\n")
|
||||
|
||||
# Read markdown file
|
||||
with open(args.file, 'r', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
|
||||
# Publish (default: direct write, no git)
|
||||
publisher = AstroPublisher(args.website_repo)
|
||||
result = publisher.publish(content, args.image, use_git=args.use_git)
|
||||
|
||||
if result['success']:
|
||||
print(f"\n✅ Published successfully!")
|
||||
print(f" Slug: {result['slug']}")
|
||||
print(f" Language: {result['language']}")
|
||||
print(f" Path: {result['path']}")
|
||||
print(f" Method: {result['method']}")
|
||||
|
||||
if result.get('git_result') and result['git_result'].get('success'):
|
||||
print(f" ✓ Committed and pushed to Gitea")
|
||||
print(f" ✓ Deployment triggered")
|
||||
else:
|
||||
print(f"\n❌ Publication failed: {result.get('error')}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,478 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SEO Multi-Channel Content Generator
|
||||
|
||||
Generate marketing content for multiple channels from a single topic.
|
||||
Supports Thai language with full PyThaiNLP integration.
|
||||
|
||||
Channels: Facebook > Facebook Ads > Google Ads > Blog > X (Twitter)
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
from typing import Dict, List, Optional, Any
|
||||
import yaml
|
||||
|
||||
# Load environment variables
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv()
|
||||
|
||||
# Thai language processing
|
||||
try:
|
||||
from pythainlp import word_tokenize, sent_tokenize
|
||||
from pythainlp.util import normalize
|
||||
THAI_SUPPORT = True
|
||||
except ImportError:
|
||||
THAI_SUPPORT = False
|
||||
print("Warning: PyThaiNLP not installed. Thai language support disabled.")
|
||||
print("Install with: pip install pythainlp")
|
||||
|
||||
|
||||
class ThaiTextProcessor:
|
||||
"""Thai language text processing utilities"""
|
||||
|
||||
@staticmethod
|
||||
def count_words(text: str) -> int:
|
||||
"""Count Thai words (no spaces between words)"""
|
||||
if not THAI_SUPPORT:
|
||||
return len(text.split())
|
||||
|
||||
tokens = word_tokenize(text, engine="newmm")
|
||||
return len([t for t in tokens if t.strip() and not t.isspace()])
|
||||
|
||||
@staticmethod
|
||||
def count_sentences(text: str) -> int:
|
||||
"""Count Thai sentences"""
|
||||
if not THAI_SUPPORT:
|
||||
return len(text.split('.'))
|
||||
|
||||
sentences = sent_tokenize(text, engine="whitespace")
|
||||
return len(sentences)
|
||||
|
||||
@staticmethod
|
||||
def calculate_keyword_density(text: str, keyword: str) -> float:
|
||||
"""Calculate keyword density for Thai text"""
|
||||
if not THAI_SUPPORT:
|
||||
text_words = text.lower().split()
|
||||
keyword_count = text.lower().count(keyword.lower())
|
||||
return (keyword_count / len(text_words) * 100) if text_words else 0
|
||||
|
||||
text_normalized = normalize(text)
|
||||
keyword_normalized = normalize(keyword)
|
||||
count = text_normalized.count(keyword_normalized)
|
||||
word_count = ThaiTextProcessor.count_words(text)
|
||||
return (count / word_count * 100) if word_count > 0 else 0
|
||||
|
||||
@staticmethod
|
||||
def detect_language(text: str) -> str:
|
||||
"""Detect if content is Thai or English"""
|
||||
thai_chars = sum(1 for c in text if '\u0E00' <= c <= '\u0E7F')
|
||||
total_chars = len(text)
|
||||
thai_ratio = thai_chars / total_chars if total_chars > 0 else 0
|
||||
|
||||
return 'th' if thai_ratio > 0.3 else 'en'
|
||||
|
||||
|
||||
class ChannelTemplate:
|
||||
"""Load and manage channel templates"""
|
||||
|
||||
def __init__(self, channel_name: str, templates_dir: str):
|
||||
self.channel_name = channel_name
|
||||
self.template_path = os.path.join(templates_dir, f"{channel_name}.yaml")
|
||||
self.template = self._load_template()
|
||||
|
||||
def _load_template(self) -> Dict:
|
||||
"""Load YAML template"""
|
||||
with open(self.template_path, 'r', encoding='utf-8') as f:
|
||||
return yaml.safe_load(f)
|
||||
|
||||
def get_specs(self) -> Dict:
|
||||
"""Get channel specifications"""
|
||||
return self.template.get('fields', {})
|
||||
|
||||
def get_quality_requirements(self) -> Dict:
|
||||
"""Get quality requirements"""
|
||||
return self.template.get('quality', {})
|
||||
|
||||
|
||||
class ImageHandler:
|
||||
"""Handle image generation and editing"""
|
||||
|
||||
def __init__(self, chutes_api_token: str):
|
||||
self.chutes_token = chutes_api_token
|
||||
self.output_base = "output"
|
||||
|
||||
def find_product_images(self, product_name: str, website_repo: str) -> List[str]:
|
||||
"""Find existing product images in website repo"""
|
||||
import glob
|
||||
|
||||
extensions = ['.jpg', '.jpeg', '.png', '.webp']
|
||||
found_images = []
|
||||
|
||||
search_patterns = [
|
||||
f"**/*{product_name}*{{ext}}" for ext in extensions
|
||||
] + [
|
||||
"public/images/**/*{ext}",
|
||||
"src/assets/**/*{ext}"
|
||||
]
|
||||
|
||||
for pattern in search_patterns:
|
||||
matches = glob.glob(
|
||||
os.path.join(website_repo, pattern.format(ext='*')),
|
||||
recursive=True
|
||||
)
|
||||
# Try specific extensions
|
||||
for ext in extensions:
|
||||
specific_matches = glob.glob(
|
||||
os.path.join(website_repo, pattern.format(ext=ext)),
|
||||
recursive=True
|
||||
)
|
||||
found_images.extend(specific_matches)
|
||||
|
||||
return list(set(found_images))[:10]
|
||||
|
||||
def generate_image_for_channel(self, topic: str, channel: str, content_type: str) -> str:
|
||||
"""
|
||||
Generate image for content.
|
||||
For product: browse repo first, then ask user or use image-edit
|
||||
For non-product: generate fresh with image-generation
|
||||
"""
|
||||
# This would call the image-generation or image-edit skills
|
||||
# For now, return placeholder
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
output_dir = os.path.join(
|
||||
self.output_base,
|
||||
self._slugify(topic),
|
||||
channel,
|
||||
"images"
|
||||
)
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
image_path = os.path.join(output_dir, f"generated_{timestamp}.png")
|
||||
|
||||
# Placeholder - in real implementation, would call image-generation skill
|
||||
print(f" [Image Generation] Would generate image for {channel}")
|
||||
print(f" Topic: {topic}, Type: {content_type}")
|
||||
|
||||
return image_path
|
||||
|
||||
def _slugify(self, text: str) -> str:
|
||||
"""Convert text to URL-friendly slug"""
|
||||
import re
|
||||
slug = re.sub(r'[^\w\s-]', '', text.lower())
|
||||
slug = re.sub(r'[-\s]+', '-', slug)
|
||||
return slug.strip('-_')
|
||||
|
||||
|
||||
class ContentGenerator:
|
||||
"""Main content generator class"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
topic: str,
|
||||
channels: List[str],
|
||||
website_repo: Optional[str] = None,
|
||||
auto_publish: bool = False,
|
||||
language: Optional[str] = None
|
||||
):
|
||||
self.topic = topic
|
||||
self.channels = channels
|
||||
self.website_repo = website_repo
|
||||
self.auto_publish = auto_publish
|
||||
self.language = language
|
||||
self.templates_dir = os.path.join(os.path.dirname(__file__), "templates")
|
||||
self.output_base = "output"
|
||||
|
||||
# Initialize components
|
||||
self.text_processor = ThaiTextProcessor()
|
||||
self.image_handler = ImageHandler(os.getenv("CHUTES_API_TOKEN", ""))
|
||||
|
||||
# Load templates
|
||||
self.templates = {}
|
||||
for channel in channels:
|
||||
template_name = self._get_template_name(channel)
|
||||
if template_name:
|
||||
self.templates[channel] = ChannelTemplate(template_name, self.templates_dir)
|
||||
|
||||
def _get_template_name(self, channel: str) -> Optional[str]:
|
||||
"""Map channel name to template file"""
|
||||
mapping = {
|
||||
'facebook': 'facebook',
|
||||
'facebook_ads': 'facebook_ads',
|
||||
'google_ads': 'google_ads',
|
||||
'blog': 'blog',
|
||||
'x': 'x_thread',
|
||||
'twitter': 'x_thread'
|
||||
}
|
||||
return mapping.get(channel.lower())
|
||||
|
||||
def generate_all(self) -> Dict[str, Any]:
|
||||
"""Generate content for all channels"""
|
||||
results = {
|
||||
'topic': self.topic,
|
||||
'generated_at': datetime.now().isoformat(),
|
||||
'channels': {},
|
||||
'summary': {}
|
||||
}
|
||||
|
||||
print(f"\n🎯 Generating content for: {self.topic}")
|
||||
print(f"📱 Channels: {', '.join(self.channels)}")
|
||||
print(f"🌐 Language: {self.language or 'auto-detect'}\n")
|
||||
|
||||
for channel in self.channels:
|
||||
if channel in self.templates:
|
||||
print(f" Generating {channel}...")
|
||||
channel_result = self._generate_for_channel(channel)
|
||||
results['channels'][channel] = channel_result
|
||||
|
||||
# Save results
|
||||
self._save_results(results)
|
||||
|
||||
return results
|
||||
|
||||
def _generate_for_channel(self, channel: str) -> Dict:
|
||||
"""Generate content for specific channel"""
|
||||
template = self.templates[channel]
|
||||
specs = template.get_specs()
|
||||
|
||||
# Detect language from topic
|
||||
lang = self.language or self.text_processor.detect_language(self.topic)
|
||||
|
||||
# Generate variations (placeholder - real implementation would use LLM)
|
||||
variations = []
|
||||
num_variations = template.template.get('output', {}).get('variations', 5)
|
||||
|
||||
for i in range(num_variations):
|
||||
variation = self._create_variation(channel, i, lang, specs)
|
||||
variations.append(variation)
|
||||
|
||||
return {
|
||||
'channel': channel,
|
||||
'language': lang,
|
||||
'variations': variations,
|
||||
'api_ready': template.template.get('api_ready', False)
|
||||
}
|
||||
|
||||
def _create_variation(
|
||||
self,
|
||||
channel: str,
|
||||
variation_num: int,
|
||||
language: str,
|
||||
specs: Dict
|
||||
) -> Dict:
|
||||
"""Create single content variation"""
|
||||
# This is a placeholder - real implementation would call LLM
|
||||
# with proper prompts based on channel template
|
||||
|
||||
base_variation = {
|
||||
'id': f"{channel}_var_{variation_num + 1}",
|
||||
'created_at': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
# Channel-specific structure
|
||||
if channel == 'facebook':
|
||||
base_variation.update({
|
||||
'primary_text': f"[Facebook Post {variation_num + 1}] {self.topic}...",
|
||||
'headline': f"[Headline] {self.topic}",
|
||||
'cta': "เรียนรู้เพิ่มเติม" if language == 'th' else "Learn More",
|
||||
'hashtags': [f"#{self.topic.replace(' ', '')}"],
|
||||
'image': {
|
||||
'path': self.image_handler.generate_image_for_channel(
|
||||
self.topic, channel, 'social'
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
elif channel == 'facebook_ads':
|
||||
base_variation.update({
|
||||
'primary_text': f"[FB Ad Primary Text] {self.topic}...",
|
||||
'headline': f"[FB Ad Headline - 40 chars]",
|
||||
'description': f"[FB Ad Description - 90 chars]",
|
||||
'cta': "SHOP_NOW",
|
||||
'api_ready': {
|
||||
'platform': 'meta',
|
||||
'api_version': 'v18.0',
|
||||
'endpoint': '/act_{ad_account_id}/adcreatives'
|
||||
}
|
||||
})
|
||||
|
||||
elif channel == 'google_ads':
|
||||
base_variation.update({
|
||||
'headlines': [
|
||||
{'text': f"[Headline {i+1}] {self.topic}"}
|
||||
for i in range(15)
|
||||
],
|
||||
'descriptions': [
|
||||
{'text': f"[Description {i+1}] Learn more about {self.topic}"}
|
||||
for i in range(4)
|
||||
],
|
||||
'keywords': [self.topic, f"บริการ {self.topic}"],
|
||||
'api_ready': {
|
||||
'platform': 'google',
|
||||
'api_version': 'v15.0',
|
||||
'endpoint': '/google.ads.googleads.v15.services/GoogleAdsService:Mutate'
|
||||
}
|
||||
})
|
||||
|
||||
elif channel == 'blog':
|
||||
base_variation.update({
|
||||
'markdown': self._generate_blog_markdown(language),
|
||||
'frontmatter': {
|
||||
'title': f"{self.topic} - Complete Guide",
|
||||
'description': f"Learn about {self.topic}",
|
||||
'slug': self._slugify(self.topic),
|
||||
'lang': language
|
||||
},
|
||||
'word_count': 2000 if language == 'en' else 1500,
|
||||
'publish_status': 'draft'
|
||||
})
|
||||
|
||||
elif channel in ['x', 'twitter']:
|
||||
base_variation.update({
|
||||
'tweets': [
|
||||
f"[Tweet {i+1}/7] Content about {self.topic}..."
|
||||
for i in range(7)
|
||||
],
|
||||
'thread_title': f"Everything about {self.topic} 🧵"
|
||||
})
|
||||
|
||||
return base_variation
|
||||
|
||||
def _generate_blog_markdown(self, language: str) -> str:
|
||||
"""Generate blog post in Markdown format"""
|
||||
slug = self._slugify(self.topic)
|
||||
|
||||
markdown = f"""---
|
||||
title: "{self.topic} - Complete Guide"
|
||||
description: "Learn everything about {self.topic} in this comprehensive guide"
|
||||
keywords: ["{self.topic}", "บริการ {self.topic}", "guide"]
|
||||
slug: {slug}
|
||||
lang: {language}
|
||||
category: guides
|
||||
tags: ["{self.topic}", "guide"]
|
||||
created: {datetime.now().strftime('%Y-%m-%d')}
|
||||
---
|
||||
|
||||
# {self.topic}: Complete Guide
|
||||
|
||||
## Introduction
|
||||
|
||||
[Opening hook about {self.topic}...]
|
||||
|
||||
## What is {self.topic}?
|
||||
|
||||
[Definition and explanation...]
|
||||
|
||||
## Why {self.topic} Matters
|
||||
|
||||
[Importance and benefits...]
|
||||
|
||||
## How to Get Started with {self.topic}
|
||||
|
||||
[Step-by-step guide...]
|
||||
|
||||
## Best Practices for {self.topic}
|
||||
|
||||
[Tips and recommendations...]
|
||||
|
||||
## Conclusion
|
||||
|
||||
[Summary and call-to-action...]
|
||||
"""
|
||||
return markdown
|
||||
|
||||
def _save_results(self, results: Dict):
|
||||
"""Save results to output directory"""
|
||||
output_dir = os.path.join(
|
||||
self.output_base,
|
||||
self._slugify(self.topic)
|
||||
)
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
|
||||
output_file = os.path.join(output_dir, "results.json")
|
||||
with open(output_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(results, f, indent=2, ensure_ascii=False)
|
||||
|
||||
print(f"\n✅ Results saved to: {output_file}")
|
||||
|
||||
def _slugify(self, text: str) -> str:
|
||||
"""Convert text to URL-friendly slug"""
|
||||
import re
|
||||
slug = re.sub(r'[^\w\s-]', '', text.lower())
|
||||
slug = re.sub(r'[-\s]+', '-', slug)
|
||||
return slug.strip('-_')
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Generate multi-channel marketing content from a single topic'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--topic', '-t',
|
||||
required=True,
|
||||
help='Topic to generate content about'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--channels', '-c',
|
||||
nargs='+',
|
||||
default=['facebook', 'facebook_ads', 'google_ads', 'blog', 'x'],
|
||||
choices=['facebook', 'facebook_ads', 'google_ads', 'blog', 'x', 'twitter'],
|
||||
help='Channels to generate content for'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--website-repo', '-w',
|
||||
help='Path to website repository (for blog auto-publish)'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--auto-publish',
|
||||
action='store_true',
|
||||
help='Auto-publish blog posts to website'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--language', '-l',
|
||||
choices=['th', 'en'],
|
||||
help='Content language (default: auto-detect)'
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
'--product-name', '-p',
|
||||
help='Product name (for product image handling)'
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Create generator
|
||||
generator = ContentGenerator(
|
||||
topic=args.topic,
|
||||
channels=args.channels,
|
||||
website_repo=args.website_repo,
|
||||
auto_publish=args.auto_publish,
|
||||
language=args.language
|
||||
)
|
||||
|
||||
# Generate content
|
||||
results = generator.generate_all()
|
||||
|
||||
# Print summary
|
||||
print("\n📊 Summary:")
|
||||
print(f" Topic: {results['topic']}")
|
||||
print(f" Channels generated: {len(results['channels'])}")
|
||||
|
||||
for channel, data in results['channels'].items():
|
||||
print(f" - {channel}: {len(data['variations'])} variations")
|
||||
|
||||
print(f"\n✨ Done!")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,313 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Image Integration Module
|
||||
|
||||
Integrates with image-generation and image-edit skills.
|
||||
Handles product vs non-product image workflows.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from typing import Optional, List
|
||||
|
||||
|
||||
class ImageIntegration:
|
||||
"""Integrate with image-generation and image-edit skills"""
|
||||
|
||||
def __init__(self, skills_base_path: str = None):
|
||||
"""
|
||||
Initialize image integration
|
||||
|
||||
Args:
|
||||
skills_base_path: Base path to skills directory
|
||||
"""
|
||||
if skills_base_path is None:
|
||||
# Default: assume we're in skills/seo-multi-channel/scripts/
|
||||
base = Path(__file__).parent.parent.parent
|
||||
self.skills_base = str(base)
|
||||
else:
|
||||
self.skills_base = skills_base
|
||||
|
||||
self.image_gen_script = os.path.join(self.skills_base, 'image-generation/scripts/image_gen.py')
|
||||
self.image_edit_script = os.path.join(self.skills_base, 'image-edit/scripts/image_edit.py')
|
||||
|
||||
def generate_image(self, prompt: str, output_dir: str, width: int = 1024,
|
||||
height: int = 1024, topic: str = None, channel: str = None) -> str:
|
||||
"""
|
||||
Generate image using image-generation skill
|
||||
|
||||
Args:
|
||||
prompt: Image generation prompt
|
||||
output_dir: Directory to save image
|
||||
width: Image width
|
||||
height: Image height
|
||||
topic: Topic name (for filename)
|
||||
channel: Channel name (for subfolder)
|
||||
|
||||
Returns:
|
||||
Path to generated image
|
||||
"""
|
||||
# Create output directory
|
||||
if topic and channel:
|
||||
output_path = os.path.join(output_dir, topic, channel, 'images')
|
||||
else:
|
||||
output_path = output_dir
|
||||
|
||||
os.makedirs(output_path, exist_ok=True)
|
||||
|
||||
# Build command
|
||||
cmd = [
|
||||
sys.executable,
|
||||
self.image_gen_script,
|
||||
'generate',
|
||||
prompt,
|
||||
'--width', str(width),
|
||||
'--height', str(height)
|
||||
]
|
||||
|
||||
print(f"\n🎨 Generating image...")
|
||||
print(f" Prompt: {prompt[:100]}...")
|
||||
print(f" Size: {width}x{height}")
|
||||
|
||||
try:
|
||||
# Run image generation
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, cwd=os.path.dirname(self.image_gen_script))
|
||||
|
||||
if result.returncode == 0:
|
||||
# Parse output (format: "filename.png [id]")
|
||||
output_line = result.stdout.strip().split('\n')[-1]
|
||||
image_path = output_line.split(' ')[0]
|
||||
|
||||
# Move to our output directory if needed
|
||||
if image_path and os.path.exists(image_path):
|
||||
dest_path = os.path.join(output_path, os.path.basename(image_path))
|
||||
if image_path != dest_path:
|
||||
import shutil
|
||||
shutil.copy(image_path, dest_path)
|
||||
print(f" ✓ Saved: {dest_path}")
|
||||
return dest_path
|
||||
|
||||
print(f" ✗ Generation failed: {result.stderr}")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return None
|
||||
|
||||
def edit_product_image(self, base_image_path: str, edit_prompt: str,
|
||||
output_dir: str, topic: str = None, channel: str = None) -> str:
|
||||
"""
|
||||
Edit product image using image-edit skill
|
||||
|
||||
Args:
|
||||
base_image_path: Path to existing product image
|
||||
edit_prompt: Edit instructions
|
||||
output_dir: Directory to save edited image
|
||||
topic: Topic name
|
||||
channel: Channel name
|
||||
|
||||
Returns:
|
||||
Path to edited image
|
||||
"""
|
||||
if not os.path.exists(base_image_path):
|
||||
print(f" ✗ Base image not found: {base_image_path}")
|
||||
return None
|
||||
|
||||
# Create output directory
|
||||
if topic and channel:
|
||||
output_path = os.path.join(output_dir, topic, channel, 'images')
|
||||
else:
|
||||
output_path = output_dir
|
||||
|
||||
os.makedirs(output_path, exist_ok=True)
|
||||
|
||||
# Build command
|
||||
cmd = [
|
||||
sys.executable,
|
||||
self.image_edit_script,
|
||||
edit_prompt,
|
||||
base_image_path
|
||||
]
|
||||
|
||||
print(f"\n✏️ Editing product image...")
|
||||
print(f" Base: {base_image_path}")
|
||||
print(f" Edit: {edit_prompt[:100]}...")
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, cwd=os.path.dirname(self.image_edit_script))
|
||||
|
||||
if result.returncode == 0:
|
||||
output_line = result.stdout.strip().split('\n')[-1]
|
||||
image_path = output_line.split(' ')[0]
|
||||
|
||||
if image_path and os.path.exists(image_path):
|
||||
dest_path = os.path.join(output_path, os.path.basename(image_path))
|
||||
if image_path != dest_path:
|
||||
import shutil
|
||||
shutil.copy(image_path, dest_path)
|
||||
print(f" ✓ Saved: {dest_path}")
|
||||
return dest_path
|
||||
|
||||
print(f" ✗ Edit failed: {result.stderr}")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return None
|
||||
|
||||
def find_product_images(self, product_name: str, website_repo: str) -> List[str]:
|
||||
"""
|
||||
Find existing product images in website repo
|
||||
|
||||
Args:
|
||||
product_name: Product name to search for
|
||||
website_repo: Path to website repository
|
||||
|
||||
Returns:
|
||||
List of image paths
|
||||
"""
|
||||
import glob
|
||||
|
||||
extensions = ['.jpg', '.jpeg', '.png', '.webp']
|
||||
found_images = []
|
||||
|
||||
# Search patterns
|
||||
patterns = [
|
||||
f"**/*{product_name}*{{ext}}",
|
||||
f"public/images/**/*{{ext}}",
|
||||
f"src/assets/**/*{{ext}}"
|
||||
]
|
||||
|
||||
for pattern in patterns:
|
||||
for ext in extensions:
|
||||
search_pattern = pattern.format(ext=ext)
|
||||
matches = glob.glob(os.path.join(website_repo, search_pattern), recursive=True)
|
||||
found_images.extend(matches[:5]) # Limit per pattern
|
||||
|
||||
return list(set(found_images))[:10] # Return unique, max 10
|
||||
|
||||
def handle_product_content(self, product_name: str, website_repo: str,
|
||||
edit_prompt: str, output_dir: str,
|
||||
topic: str, channel: str) -> Optional[str]:
|
||||
"""
|
||||
Handle image for product content
|
||||
|
||||
Workflow:
|
||||
1. Browse website repo for product images
|
||||
2. If found: edit with image-edit
|
||||
3. If not found: ask user to provide
|
||||
|
||||
Args:
|
||||
product_name: Product name
|
||||
website_repo: Path to website repo
|
||||
edit_prompt: Edit instructions
|
||||
output_dir: Output directory
|
||||
topic: Topic name
|
||||
channel: Channel name
|
||||
|
||||
Returns:
|
||||
Path to image or None
|
||||
"""
|
||||
print(f"\n🔍 Looking for product images: {product_name}")
|
||||
|
||||
# Step 1: Find existing images
|
||||
images = self.find_product_images(product_name, website_repo)
|
||||
|
||||
if images:
|
||||
print(f" ✓ Found {len(images)} image(s)")
|
||||
best_image = images[0] # Use first/best match
|
||||
|
||||
# Step 2: Edit image
|
||||
return self.edit_product_image(
|
||||
best_image,
|
||||
edit_prompt,
|
||||
output_dir,
|
||||
topic,
|
||||
channel
|
||||
)
|
||||
else:
|
||||
print(f" ✗ No product images found in repo")
|
||||
print(f" Please provide product image manually")
|
||||
return None
|
||||
|
||||
def handle_non_product_content(self, content_type: str, topic: str,
|
||||
output_dir: str, channel: str) -> Optional[str]:
|
||||
"""
|
||||
Generate fresh image for non-product content
|
||||
|
||||
Args:
|
||||
content_type: Type (service, stats, knowledge)
|
||||
topic: Topic name
|
||||
output_dir: Output directory
|
||||
channel: Channel name
|
||||
|
||||
Returns:
|
||||
Path to generated image
|
||||
"""
|
||||
# Create prompt based on content type
|
||||
prompts = {
|
||||
'service': f"Professional illustration of {topic}, modern flat design, business context, Thai-friendly aesthetic",
|
||||
'stats': f"Data visualization infographic for {topic}, clean charts, professional style",
|
||||
'knowledge': f"Educational illustration for {topic}, clear visual metaphor, engaging style",
|
||||
'default': f"Professional image for {topic}, modern design, high quality"
|
||||
}
|
||||
|
||||
prompt = prompts.get(content_type, prompts['default'])
|
||||
|
||||
# Generate image
|
||||
return self.generate_image(
|
||||
prompt,
|
||||
output_dir,
|
||||
topic=topic,
|
||||
channel=channel
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""Test image integration"""
|
||||
parser = argparse.ArgumentParser(description='Test Image Integration')
|
||||
parser.add_argument('--action', choices=['generate', 'edit', 'find'], required=True)
|
||||
parser.add_argument('--prompt', help='Image prompt or edit instructions')
|
||||
parser.add_argument('--topic', help='Topic name')
|
||||
parser.add_argument('--channel', help='Channel name')
|
||||
parser.add_argument('--output-dir', default='./output', help='Output directory')
|
||||
parser.add_argument('--product-name', help='Product name (for find action)')
|
||||
parser.add_argument('--website-repo', help='Website repo path (for find action)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
integration = ImageIntegration()
|
||||
|
||||
if args.action == 'generate':
|
||||
result = integration.handle_non_product_content(
|
||||
'service', args.topic, args.output_dir, args.channel
|
||||
)
|
||||
print(f"\nResult: {result}")
|
||||
|
||||
elif args.action == 'edit':
|
||||
if not args.product_name or not args.website_repo:
|
||||
print("Error: --product-name and --website-repo required for edit")
|
||||
return
|
||||
|
||||
result = integration.handle_product_content(
|
||||
args.product_name, args.website_repo, args.prompt,
|
||||
args.output_dir, args.topic, args.channel
|
||||
)
|
||||
print(f"\nResult: {result}")
|
||||
|
||||
elif args.action == 'find':
|
||||
if not args.product_name or not args.website_repo:
|
||||
print("Error: --product-name and --website-repo required for find")
|
||||
return
|
||||
|
||||
images = integration.find_product_images(args.product_name, args.website_repo)
|
||||
print(f"\nFound {len(images)} images:")
|
||||
for img in images:
|
||||
print(f" - {img}")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@@ -0,0 +1,40 @@
|
||||
# SEO Multi-Channel Generator - Dependencies
|
||||
|
||||
# Thai language processing
|
||||
pythainlp>=3.2.0
|
||||
|
||||
# HTTP and API requests
|
||||
requests>=2.31.0
|
||||
aiohttp>=3.9.0
|
||||
|
||||
# Configuration and environment
|
||||
python-dotenv>=1.0.0
|
||||
|
||||
# YAML parsing for templates
|
||||
pyyaml>=6.0.1
|
||||
|
||||
# Data handling
|
||||
pandas>=2.1.0
|
||||
|
||||
# Date/time handling
|
||||
python-dateutil>=2.8.2
|
||||
|
||||
# Image processing (for image generation/edit integration)
|
||||
Pillow>=10.0.0
|
||||
|
||||
# Markdown processing (for blog posts)
|
||||
markdown>=3.5.0
|
||||
python-frontmatter>=1.0.0
|
||||
|
||||
# Git operations (for auto-publish)
|
||||
GitPython>=3.1.40
|
||||
|
||||
# Utilities
|
||||
tqdm>=4.66.0 # Progress bars
|
||||
rich>=13.7.0 # Beautiful console output
|
||||
|
||||
# Optional: For async operations
|
||||
asyncio>=3.4.3
|
||||
|
||||
# Optional: For advanced text processing
|
||||
nltk>=3.8.0 # Only if needed for English NLP
|
||||
@@ -0,0 +1,192 @@
|
||||
# Blog SEO Article Template
|
||||
channel: blog
|
||||
priority: 4
|
||||
language: [th, en]
|
||||
|
||||
# Article structure
|
||||
structure:
|
||||
min_word_count:
|
||||
thai: 1500
|
||||
english: 2000
|
||||
max_word_count:
|
||||
thai: 3000
|
||||
english: 3000
|
||||
keyword_density:
|
||||
thai: 1.0-1.5%
|
||||
english: 1.5-2.0%
|
||||
|
||||
sections:
|
||||
- introduction:
|
||||
word_count: 150-250
|
||||
must_include:
|
||||
- hook
|
||||
- problem_statement
|
||||
- promise
|
||||
- primary_keyword_in_first_100_words
|
||||
|
||||
- body:
|
||||
h2_sections: 4-7
|
||||
h3_subsections: "as needed"
|
||||
keyword_in_h2: "at least 2-3"
|
||||
|
||||
- conclusion:
|
||||
word_count: 150-250
|
||||
must_include:
|
||||
- summary_of_key_points
|
||||
- primary_keyword
|
||||
- call_to_action
|
||||
|
||||
- cta_placement:
|
||||
recommended_locations:
|
||||
- after_first_value_section
|
||||
- after_comparison_proof_section
|
||||
- at_end
|
||||
min_cta_count: 2
|
||||
max_cta_count: 4
|
||||
|
||||
# Frontmatter requirements
|
||||
frontmatter:
|
||||
required_fields:
|
||||
- title: 50-60 chars
|
||||
- description: 150-160 chars (meta description)
|
||||
- keywords: array of 5-10 keywords
|
||||
- slug: url-friendly
|
||||
- lang: th_or_en
|
||||
- category: string
|
||||
- tags: array of strings
|
||||
- created: "YYYY-MM-DD"
|
||||
- author: string_optional
|
||||
|
||||
optional_fields:
|
||||
- updated: "YYYY-MM-DD"
|
||||
- draft: boolean
|
||||
- featured: boolean
|
||||
- image:
|
||||
src: path
|
||||
alt: string
|
||||
caption: string
|
||||
|
||||
# SEO requirements
|
||||
seo:
|
||||
meta_title:
|
||||
min_chars: 50
|
||||
max_chars: 60
|
||||
must_include_primary_keyword: true
|
||||
|
||||
meta_description:
|
||||
min_chars: 150
|
||||
max_chars: 160
|
||||
must_include_primary_keyword: true
|
||||
must_include_cta: true
|
||||
|
||||
url_slug:
|
||||
max_words: 5
|
||||
format: "lowercase-with-hyphens"
|
||||
include_primary_keyword: true
|
||||
thai: "use_transliteration_or_keep_thai"
|
||||
|
||||
headings:
|
||||
h1:
|
||||
count: 1
|
||||
include_primary_keyword: true
|
||||
|
||||
h2:
|
||||
count: 4-7
|
||||
include_keyword_variations: "2-3 minimum"
|
||||
|
||||
h3:
|
||||
count: "as needed"
|
||||
proper_nesting: true
|
||||
|
||||
internal_links:
|
||||
min_count: 3
|
||||
max_count: 7
|
||||
anchor_text: "descriptive_with_keywords"
|
||||
|
||||
external_links:
|
||||
min_count: 2
|
||||
max_count: 4
|
||||
authority_sources_only: true
|
||||
|
||||
images:
|
||||
min_count: 2
|
||||
max_count: 10
|
||||
alt_text_required: true
|
||||
descriptive_filenames: true
|
||||
compressed: true
|
||||
|
||||
# Image handling for blog
|
||||
images:
|
||||
hero_image:
|
||||
required: true
|
||||
size: "1200x630"
|
||||
location: "public/images/blog/{slug}/hero.png"
|
||||
|
||||
inline_images:
|
||||
recommended_frequency: "every 300-400 words"
|
||||
size: "800x600 or 1080x1080"
|
||||
location: "public/images/blog/{slug}/"
|
||||
|
||||
generation:
|
||||
for_product_content: "browse_repo_then_image_edit"
|
||||
for_non_product: "image_generation"
|
||||
|
||||
# Content quality requirements
|
||||
quality:
|
||||
min_score: 70
|
||||
checks:
|
||||
- keyword_optimization
|
||||
- brand_voice_alignment
|
||||
- thai_formality_level
|
||||
- readability_score
|
||||
- factual_accuracy
|
||||
- actionability
|
||||
- originality
|
||||
|
||||
readability:
|
||||
thai:
|
||||
avg_sentence_length: "15-25 words"
|
||||
grade_level: "ม.6-ม.12"
|
||||
formality: "auto-detect_from_context"
|
||||
|
||||
english:
|
||||
flesch_reading_ease: "60-70"
|
||||
flesch_kincaid_grade: "8-10"
|
||||
avg_sentence_length: "15-20 words"
|
||||
|
||||
# Output configuration
|
||||
output:
|
||||
format: markdown_with_frontmatter
|
||||
encoding: "utf-8"
|
||||
line_endings: "unix"
|
||||
|
||||
astro_integration:
|
||||
content_collection: "src/content/blog"
|
||||
language_folders:
|
||||
thai: "(th)"
|
||||
english: "(en)"
|
||||
image_folder: "public/images/blog/{slug}/"
|
||||
|
||||
publishing:
|
||||
auto_publish: "optional (user_choice)"
|
||||
git_commit: true
|
||||
git_push: true
|
||||
trigger_deploy: true
|
||||
|
||||
# API readiness (for future CMS integration)
|
||||
api_ready:
|
||||
cms_compatible:
|
||||
- "WordPress"
|
||||
- "Contentful"
|
||||
- "Sanity"
|
||||
- "Strapi"
|
||||
|
||||
schema_org:
|
||||
type: "BlogPosting"
|
||||
required_fields:
|
||||
- headline
|
||||
- description
|
||||
- image
|
||||
- datePublished
|
||||
- author
|
||||
- publisher
|
||||
@@ -0,0 +1,82 @@
|
||||
# Facebook Organic Post Template
|
||||
channel: facebook
|
||||
priority: 1
|
||||
language: [th, en]
|
||||
|
||||
# Field specifications
|
||||
fields:
|
||||
primary_text:
|
||||
max_chars: 5000
|
||||
recommended_chars: 125-250
|
||||
thai_note: "Thai text may be longer due to compound words. Aim for 200-400 Thai chars."
|
||||
|
||||
headline:
|
||||
max_chars: 100
|
||||
recommended_chars: 40-60
|
||||
|
||||
description:
|
||||
max_chars: 100
|
||||
optional: true
|
||||
|
||||
cta:
|
||||
type: selection
|
||||
options_th:
|
||||
- "เรียนรู้เพิ่มเติม"
|
||||
- "สมัครเลย"
|
||||
- "ซื้อเลย"
|
||||
- "ดูรายละเอียด"
|
||||
- "ลงทะเบียน"
|
||||
- "ดาวน์โหลด"
|
||||
options_en:
|
||||
- "Learn More"
|
||||
- "Sign Up"
|
||||
- "Shop Now"
|
||||
- "See Details"
|
||||
- "Register"
|
||||
- "Download"
|
||||
|
||||
hashtags:
|
||||
recommended_count: 3-5
|
||||
max_count: 30
|
||||
thai_note: "Use both Thai and English hashtags for broader reach"
|
||||
|
||||
image:
|
||||
recommended_size: "1200x630"
|
||||
aspect_ratio: "1.91:1"
|
||||
alternative_sizes:
|
||||
- "1080x1080" # 1:1 square
|
||||
- "1080x1350" # 4:5 portrait
|
||||
formats: ["jpg", "png"]
|
||||
max_file_size: "30MB"
|
||||
text_overlay:
|
||||
recommended: true
|
||||
thai_text: true
|
||||
max_text_percent: 20
|
||||
|
||||
# Output configuration
|
||||
output:
|
||||
variations: 5
|
||||
format: json
|
||||
include_api_metadata: true
|
||||
|
||||
# Quality requirements
|
||||
quality:
|
||||
min_score: 70
|
||||
checks:
|
||||
- keyword_density
|
||||
- brand_voice_alignment
|
||||
- thai_formality_level
|
||||
- cta_clarity
|
||||
- hashtag_relevance
|
||||
|
||||
# API readiness (for future Meta Graph API integration)
|
||||
api_ready:
|
||||
platform: meta
|
||||
api_version: v18.0
|
||||
endpoint: "/act_{ad_account_id}/adcreatives"
|
||||
method: POST
|
||||
field_mapping:
|
||||
primary_text: body
|
||||
headline: title
|
||||
cta: call_to_action.type
|
||||
image: story_id or link_data.picture
|
||||
@@ -0,0 +1,121 @@
|
||||
# Facebook Ads Template
|
||||
channel: facebook_ads
|
||||
priority: 2
|
||||
language: [th, en]
|
||||
|
||||
# Field specifications (matches Meta Ads API structure)
|
||||
fields:
|
||||
primary_text:
|
||||
max_chars: 5000
|
||||
recommended_chars: 125
|
||||
thai_note: "Thai text can be slightly longer. Focus on benefit in first 125 chars."
|
||||
|
||||
headline:
|
||||
max_chars: 40
|
||||
recommended_chars: 25-30
|
||||
thai_note: "Thai characters may display differently. Test on mobile."
|
||||
|
||||
description:
|
||||
max_chars: 90
|
||||
recommended_chars: 60-75
|
||||
optional: true
|
||||
thai_note: "Additional context below headline"
|
||||
|
||||
cta:
|
||||
type: selection
|
||||
button_types:
|
||||
- "LEARN_MORE" # เรียนรู้เพิ่มเติม
|
||||
- "SHOP_NOW" # ซื้อเลย
|
||||
- "SIGN_UP" # ลงทะเบียน
|
||||
- "CONTACT_US" # ติดต่อเรา
|
||||
- "DOWNLOAD" # ดาวน์โหลด
|
||||
- "GET_QUOTE" # ขอใบเสนอราคา
|
||||
|
||||
image:
|
||||
recommended_size: "1080x1080" # 1:1 square (best for feed)
|
||||
alternative_sizes:
|
||||
- "1200x628" # 1.91:1 link
|
||||
- "1080x1920" # 9:16 stories/reels
|
||||
aspect_ratios: ["1:1", "1.91:1", "9:16", "4:5"]
|
||||
formats: ["jpg", "png", "gif", "mp4", "mov"]
|
||||
max_file_size: "30MB"
|
||||
video_specs:
|
||||
max_duration: "240 minutes"
|
||||
recommended_duration: "15-60 seconds"
|
||||
|
||||
carousel:
|
||||
enabled: true
|
||||
min_cards: 2
|
||||
max_cards: 10
|
||||
card_specs:
|
||||
image_size: "1080x1080"
|
||||
headline_max_chars: 40
|
||||
description_max_chars: 90
|
||||
|
||||
audience_targeting:
|
||||
location: ["Thailand", "specific provinces"]
|
||||
age_range: "18-65+"
|
||||
interests: []
|
||||
behaviors: []
|
||||
custom_audiences: []
|
||||
lookalike_audiences: []
|
||||
|
||||
placement:
|
||||
automatic: true
|
||||
manual_options:
|
||||
- "facebook_feed"
|
||||
- "facebook_stories"
|
||||
- "instagram_feed"
|
||||
- "instagram_stories"
|
||||
- "messenger"
|
||||
- "audience_network"
|
||||
|
||||
budget:
|
||||
type: ["daily", "lifetime"]
|
||||
currency: "THB"
|
||||
min_daily: 50
|
||||
min_lifetime: 500
|
||||
|
||||
# Output configuration
|
||||
output:
|
||||
variations: 5
|
||||
format: json
|
||||
include_api_metadata: true
|
||||
ready_for_import: true
|
||||
|
||||
# Quality requirements
|
||||
quality:
|
||||
min_score: 75
|
||||
checks:
|
||||
- keyword_density
|
||||
- brand_voice_alignment
|
||||
- thai_formality_level
|
||||
- cta_clarity
|
||||
- compliance_check
|
||||
- landing_page_relevance
|
||||
|
||||
# API readiness (for future Meta Ads API integration)
|
||||
api_ready:
|
||||
platform: meta
|
||||
api_version: v18.0
|
||||
endpoints:
|
||||
creative: "/act_{ad_account_id}/adcreatives"
|
||||
ad: "/act_{ad_account_id}/ads"
|
||||
adset: "/act_{ad_account_id}/adsets"
|
||||
campaign: "/act_{ad_account_id}/campaigns"
|
||||
|
||||
field_mapping:
|
||||
primary_text: body
|
||||
headline: title
|
||||
description: description
|
||||
cta: call_to_action.type
|
||||
image: object_story_id or link_data
|
||||
audience: targeting
|
||||
placement: placements
|
||||
budget: daily_budget or lifetime_budget
|
||||
|
||||
future_integration_notes:
|
||||
- "Add pixel_id for conversion tracking"
|
||||
- "Add conversion_event for optimization goal"
|
||||
- "Add bid_strategy for bid optimization"
|
||||
- "Add frequency_cap for reach campaigns"
|
||||
@@ -0,0 +1,158 @@
|
||||
# Google Ads Template
|
||||
channel: google_ads
|
||||
priority: 3
|
||||
language: [th, en]
|
||||
|
||||
# Field specifications (matches Google Ads API structure)
|
||||
fields:
|
||||
headlines:
|
||||
count: 15
|
||||
max_chars: 30
|
||||
thai_note: "Thai characters may display differently. Test on mobile."
|
||||
pin_options:
|
||||
enabled: true
|
||||
positions: [1, 2, 3]
|
||||
|
||||
descriptions:
|
||||
count: 4
|
||||
max_chars: 90
|
||||
thai_note: "Use full 90 chars for Thai to convey complete message"
|
||||
pin_options:
|
||||
enabled: true
|
||||
positions: [1, 2]
|
||||
|
||||
keywords:
|
||||
suggested_count: 15-20
|
||||
match_types:
|
||||
- exact: "[keyword th]"
|
||||
- phrase: '"keyword th"'
|
||||
- broad: "keyword th"
|
||||
- negative: "-keyword th"
|
||||
|
||||
negative_keywords:
|
||||
suggested_count: 10-15
|
||||
purpose: "Exclude irrelevant traffic"
|
||||
|
||||
ad_extensions:
|
||||
sitelinks:
|
||||
count: 4
|
||||
fields:
|
||||
- link_text: "25 chars"
|
||||
- description_line_1: "35 chars"
|
||||
- description_line_2: "35 chars"
|
||||
- final_url: "full URL"
|
||||
|
||||
callouts:
|
||||
count: 4
|
||||
max_chars: 25
|
||||
examples_th:
|
||||
- "รองรับภาษาไทย"
|
||||
- "ทีมซัพพอร์ท 24/7"
|
||||
- "ยกเลิกเมื่อไหร่ก็ได้"
|
||||
|
||||
structured_snippets:
|
||||
header: ["Brands", "Services", "Types", etc.]
|
||||
values:
|
||||
count: 4-10
|
||||
max_chars: 25
|
||||
|
||||
call_extension:
|
||||
phone_number: "+66 XX XXX XXXX"
|
||||
country_code: "TH"
|
||||
|
||||
location_extension:
|
||||
business_name: "string"
|
||||
address: "string"
|
||||
|
||||
# Campaign settings
|
||||
campaign:
|
||||
type: "SEARCH"
|
||||
advertising_channel_sub_type: "SEARCH_STANDARD"
|
||||
bidding:
|
||||
strategy: "MAXIMIZE_CLICKS"
|
||||
target_cpa: null
|
||||
target_roas: null
|
||||
budget:
|
||||
type: "DAILY"
|
||||
amount: 1000 # THB
|
||||
delivery_method: "STANDARD"
|
||||
networks:
|
||||
google_search: true
|
||||
search_partners: true
|
||||
display_network: false
|
||||
location_targeting:
|
||||
- "Thailand"
|
||||
- optional: specific provinces
|
||||
language_targeting:
|
||||
- "Thai"
|
||||
- "English"
|
||||
|
||||
# Audience signals (for Performance Max campaigns)
|
||||
audience_signals:
|
||||
custom_segments:
|
||||
- based_on: "keywords or URLs"
|
||||
interest_categories: []
|
||||
remarketing_lists: []
|
||||
customer_match_lists: []
|
||||
|
||||
# Output configuration
|
||||
output:
|
||||
variations: 3 # Complete RSA variations
|
||||
format: json
|
||||
include_api_metadata: true
|
||||
ready_for_import: true
|
||||
|
||||
# Quality requirements
|
||||
quality:
|
||||
min_score: 75
|
||||
checks:
|
||||
- keyword_relevance
|
||||
- headline_diversity
|
||||
- cta_clarity
|
||||
- landing_page_relevance
|
||||
- policy_compliance
|
||||
- thai_language_quality
|
||||
|
||||
# API readiness (for future Google Ads API integration)
|
||||
api_ready:
|
||||
platform: google
|
||||
api_version: v15.0
|
||||
service: "GoogleAdsService"
|
||||
endpoint: "/google.ads.googleads.v15.services/GoogleAdsService:Mutate"
|
||||
|
||||
resource_hierarchy:
|
||||
- customer
|
||||
- campaign
|
||||
- ad_group
|
||||
- ad_group_ad
|
||||
- ad (RESPONSIVE_SEARCH_AD)
|
||||
|
||||
field_mapping:
|
||||
headlines: responsive_search_ad.headlines
|
||||
descriptions: responsive_search_ad.descriptions
|
||||
final_url: responsive_search_ad.final_urls
|
||||
display_path: responsive_search_ad.path1, path2
|
||||
keywords: ad_group_criterion
|
||||
bid_modifier: ad_group_criterion.cpc_bid_modifier
|
||||
|
||||
future_integration_notes:
|
||||
- "Add conversion_tracking_setup"
|
||||
- "Add value_track_parameters"
|
||||
- "Add ad_schedule_bid_modifiers"
|
||||
- "Add device_bid_modifiers"
|
||||
- "Add location_bid_modifiers"
|
||||
- "Setup enhanced conversions"
|
||||
|
||||
# Compliance
|
||||
compliance:
|
||||
google_ads_policies:
|
||||
- "No misleading claims"
|
||||
- "No prohibited content"
|
||||
- "Trademark compliance"
|
||||
- "Editorial requirements"
|
||||
- "Destination requirements"
|
||||
thailand_specific:
|
||||
- "FDA approval for health products"
|
||||
- "No gambling content"
|
||||
- "No adult content"
|
||||
- "Consumer Protection Board compliance"
|
||||
@@ -0,0 +1,197 @@
|
||||
# X (Twitter) Thread Template
|
||||
channel: x_twitter
|
||||
priority: 5
|
||||
language: [th, en]
|
||||
|
||||
# Thread structure
|
||||
structure:
|
||||
thread_length:
|
||||
min_tweets: 5
|
||||
max_tweets: 10
|
||||
optimal_tweets: 7-8
|
||||
|
||||
tweet_types:
|
||||
- hook_tweet:
|
||||
position: 1
|
||||
max_chars: 280
|
||||
purpose: "Grab attention, promise value"
|
||||
thai_note: "Thai may need more chars due to compound words"
|
||||
|
||||
- context_tweet:
|
||||
position: 2
|
||||
max_chars: 280
|
||||
purpose: "Set context, explain why this matters"
|
||||
|
||||
- body_tweets:
|
||||
position: "3 to (n-2)"
|
||||
count: "2-6"
|
||||
max_chars: 280
|
||||
purpose: "Deliver main content, one idea per tweet"
|
||||
|
||||
- summary_tweet:
|
||||
position: "n-1"
|
||||
max_chars: 280
|
||||
purpose: "Summarize key points"
|
||||
|
||||
- cta_tweet:
|
||||
position: n
|
||||
max_chars: 280
|
||||
purpose: "Call-to-action, engagement question"
|
||||
|
||||
# Tweet specifications
|
||||
tweet:
|
||||
max_chars: 280
|
||||
thai_considerations:
|
||||
- "Thai characters count as 1 char each"
|
||||
- "No spaces between words - can pack more meaning"
|
||||
- "Recommended: 200-250 Thai chars for readability"
|
||||
|
||||
hashtags:
|
||||
recommended_count: 2-3
|
||||
max_count: 5
|
||||
placement: "end_of_tweet"
|
||||
thai_english_mix: true
|
||||
|
||||
emojis:
|
||||
recommended: true
|
||||
per_tweet: "1-3"
|
||||
purpose: "Visual break, emphasis"
|
||||
|
||||
mentions:
|
||||
max_recommended: 2
|
||||
placement: "end_of_tweet"
|
||||
|
||||
media:
|
||||
images:
|
||||
count: "1-4 per tweet"
|
||||
size: "1200x675 (16:9) or 1080x1080 (1:1)"
|
||||
|
||||
video:
|
||||
max_duration: "2min 20sec"
|
||||
recommended: "30-90sec"
|
||||
size: "1280x720 or 1920x1080"
|
||||
|
||||
thread_title:
|
||||
optional: true
|
||||
format: "image_with_text"
|
||||
purpose: "Hook before first tweet"
|
||||
|
||||
# Hook formulas
|
||||
hooks:
|
||||
curiosity:
|
||||
- "I was wrong about [common belief]."
|
||||
- "The real reason [outcome] happens isn't what you think."
|
||||
- "[Impressive result] — and it only took [short time]."
|
||||
|
||||
story:
|
||||
- "Last week, [unexpected thing] happened."
|
||||
- "3 years ago, I [past state]. Today, [current state]."
|
||||
|
||||
value:
|
||||
- "How to [outcome] (without [pain]):"
|
||||
- "[Number] [things] that [result]:"
|
||||
- "Stop [mistake]. Do this instead:"
|
||||
|
||||
contrarian:
|
||||
- "Unpopular opinion: [bold statement]"
|
||||
- "[Common advice] is wrong. Here's why:"
|
||||
|
||||
# Engagement optimization
|
||||
engagement:
|
||||
best_posting_times:
|
||||
thailand:
|
||||
- "7:00-9:00 (morning commute)"
|
||||
- "12:00-13:00 (lunch break)"
|
||||
- "19:00-21:00 (evening)"
|
||||
global:
|
||||
- "9:00-12:00 EST"
|
||||
|
||||
posting_frequency:
|
||||
threads_per_week: "2-4"
|
||||
replies_per_day: "10-20"
|
||||
|
||||
follow_up:
|
||||
reply_to_comments: true
|
||||
pin_best_thread: true
|
||||
cross_promote: true
|
||||
|
||||
# Output configuration
|
||||
output:
|
||||
variations: 3 # Complete thread variations
|
||||
format: json
|
||||
include_thread_title: true
|
||||
include_visual_suggestions: true
|
||||
|
||||
# Quality requirements
|
||||
quality:
|
||||
min_score: 70
|
||||
checks:
|
||||
- hook_strength
|
||||
- value_density
|
||||
- clarity
|
||||
- engagement_potential
|
||||
- thai_language_quality
|
||||
- brand_voice_alignment
|
||||
|
||||
# API readiness (for future Twitter API v2 integration)
|
||||
api_ready:
|
||||
platform: twitter
|
||||
api_version: "2.0"
|
||||
endpoint: "/2/tweets"
|
||||
method: POST
|
||||
|
||||
field_mapping:
|
||||
text: tweet.text
|
||||
media: tweet.media.media_keys
|
||||
reply_settings: tweet.reply_settings
|
||||
thread: "use in_reply_to_user_id"
|
||||
|
||||
future_integration_notes:
|
||||
- "Add media upload via POST /2/media"
|
||||
- "Use media_keys to attach to tweet"
|
||||
- "For threads: chain tweets with in_reply_to_user_id"
|
||||
- "Add poll creation support"
|
||||
- "Add quote_tweet support"
|
||||
- "Schedule tweets with scheduled_at"
|
||||
|
||||
# Thread templates
|
||||
templates:
|
||||
how_to_thread:
|
||||
structure:
|
||||
- "Hook: How to [outcome] without [pain]"
|
||||
- "Context: Why this matters"
|
||||
- "Step 1"
|
||||
- "Step 2"
|
||||
- "Step 3"
|
||||
- "Step 4"
|
||||
- "Summary + CTA"
|
||||
|
||||
list_thread:
|
||||
structure:
|
||||
- "Hook: [Number] [things] that [result]"
|
||||
- "Context: Why these matter"
|
||||
- "Item 1 + explanation"
|
||||
- "Item 2 + explanation"
|
||||
- "Item 3 + explanation"
|
||||
- "Item 4 + explanation"
|
||||
- "Item 5 + summary"
|
||||
|
||||
story_thread:
|
||||
structure:
|
||||
- "Hook: Story setup"
|
||||
- "Background context"
|
||||
- "Challenge/problem"
|
||||
- "Action taken"
|
||||
- "Result"
|
||||
- "Lesson learned"
|
||||
- "CTA for engagement"
|
||||
|
||||
contrarian_thread:
|
||||
structure:
|
||||
- "Hook: Unpopular opinion"
|
||||
- "Common belief"
|
||||
- "Why it's wrong"
|
||||
- "Better alternative"
|
||||
- "Evidence/examples"
|
||||
- "Actionable advice"
|
||||
- "Question for engagement"
|
||||
Reference in New Issue
Block a user