Base code

This commit is contained in:
Kunthawat Greethong
2026-01-08 22:39:53 +07:00
parent 697115c61a
commit c35fa52117
2169 changed files with 626670 additions and 0 deletions

View File

@@ -0,0 +1,17 @@
-- Add EXA to subscription plans
ALTER TABLE subscription_plans
ADD COLUMN exa_calls_limit INT DEFAULT 0;
-- Add EXA to usage summaries
ALTER TABLE usage_summaries
ADD COLUMN exa_calls INT DEFAULT 0;
ALTER TABLE usage_summaries
ADD COLUMN exa_cost FLOAT DEFAULT 0.0;
-- Update default limits for existing plans
UPDATE subscription_plans SET exa_calls_limit = 100 WHERE tier = 'free';
UPDATE subscription_plans SET exa_calls_limit = 500 WHERE tier = 'basic';
UPDATE subscription_plans SET exa_calls_limit = 2000 WHERE tier = 'pro';
UPDATE subscription_plans SET exa_calls_limit = 0 WHERE tier = 'enterprise';

View File

@@ -0,0 +1,27 @@
-- Migration: Add user_business_info table
-- Description: Creates table for storing business information when users don't have websites
-- Date: 2024-01-XX
CREATE TABLE IF NOT EXISTS user_business_info (
id INTEGER PRIMARY KEY AUTOINCREMENT,
user_id INTEGER,
business_description TEXT NOT NULL,
industry VARCHAR(100),
target_audience TEXT,
business_goals TEXT,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
);
-- Create index for faster user lookups
CREATE INDEX IF NOT EXISTS idx_user_business_info_user_id ON user_business_info(user_id);
-- Add trigger to automatically update updated_at timestamp
CREATE TRIGGER IF NOT EXISTS update_user_business_info_timestamp
AFTER UPDATE ON user_business_info
FOR EACH ROW
BEGIN
UPDATE user_business_info
SET updated_at = CURRENT_TIMESTAMP
WHERE id = NEW.id;
END;

View File

@@ -0,0 +1,26 @@
-- Migration: Add persona_data table for onboarding step 4
-- Created: 2025-10-10
-- Description: Adds table to store persona generation data from onboarding step 4
CREATE TABLE IF NOT EXISTS persona_data (
id SERIAL PRIMARY KEY,
session_id INTEGER NOT NULL,
core_persona JSONB,
platform_personas JSONB,
quality_metrics JSONB,
selected_platforms JSONB,
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (session_id) REFERENCES onboarding_sessions(id) ON DELETE CASCADE
);
-- Add index for better query performance
CREATE INDEX IF NOT EXISTS idx_persona_data_session_id ON persona_data(session_id);
CREATE INDEX IF NOT EXISTS idx_persona_data_created_at ON persona_data(created_at);
-- Add comment to table
COMMENT ON TABLE persona_data IS 'Stores persona generation data from onboarding step 4';
COMMENT ON COLUMN persona_data.core_persona IS 'Core persona data (demographics, psychographics, etc.)';
COMMENT ON COLUMN persona_data.platform_personas IS 'Platform-specific personas (LinkedIn, Twitter, etc.)';
COMMENT ON COLUMN persona_data.quality_metrics IS 'Quality assessment metrics';
COMMENT ON COLUMN persona_data.selected_platforms IS 'Array of selected platforms';

View File

@@ -0,0 +1,20 @@
-- Migration: Add user_id column to task_execution_logs for user isolation
-- Date: 2025-01-XX
-- Purpose: Enable user isolation tracking in scheduler task execution logs
-- Add user_id column (nullable for backward compatibility with existing records)
ALTER TABLE task_execution_logs
ADD COLUMN user_id INTEGER NULL;
-- Create index for efficient user filtering and queries
CREATE INDEX IF NOT EXISTS idx_task_execution_logs_user_id
ON task_execution_logs(user_id);
-- Create composite index for common query patterns (user_id + status + execution_date)
CREATE INDEX IF NOT EXISTS idx_task_execution_logs_user_status_date
ON task_execution_logs(user_id, status, execution_date);
-- Note: Backfilling existing records would require joining with monitoring_tasks
-- and enhanced_content_strategies tables. This can be done in a separate migration
-- or during a maintenance window. For now, existing records will have user_id = NULL.

View File

@@ -0,0 +1,149 @@
-- Blog Writer Task Persistence Tables
-- Creates tables for storing task state, progress, and metrics
-- Tasks table - stores main task information
CREATE TABLE IF NOT EXISTS blog_writer_tasks (
id VARCHAR(36) PRIMARY KEY,
user_id VARCHAR(36) NOT NULL,
task_type VARCHAR(50) NOT NULL, -- 'research', 'outline', 'content', 'seo', 'medium_generation'
status VARCHAR(20) NOT NULL DEFAULT 'pending', -- 'pending', 'running', 'completed', 'failed', 'cancelled'
request_data JSONB, -- Original request parameters
result_data JSONB, -- Final result data
error_data JSONB, -- Error information if failed
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
completed_at TIMESTAMP WITH TIME ZONE,
correlation_id VARCHAR(36), -- For request tracing
operation VARCHAR(100), -- Specific operation being performed
retry_count INTEGER DEFAULT 0, -- Number of retry attempts
max_retries INTEGER DEFAULT 3, -- Maximum retry attempts allowed
priority INTEGER DEFAULT 0, -- Task priority (higher = more important)
metadata JSONB -- Additional metadata
);
-- Task progress table - stores progress updates
CREATE TABLE IF NOT EXISTS blog_writer_task_progress (
id SERIAL PRIMARY KEY,
task_id VARCHAR(36) NOT NULL REFERENCES blog_writer_tasks(id) ON DELETE CASCADE,
timestamp TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
message TEXT NOT NULL,
percentage DECIMAL(5,2) DEFAULT 0.00, -- 0.00 to 100.00
progress_type VARCHAR(50) DEFAULT 'info', -- 'info', 'warning', 'error', 'success'
metadata JSONB -- Additional progress metadata
);
-- Task metrics table - stores performance metrics
CREATE TABLE IF NOT EXISTS blog_writer_task_metrics (
id SERIAL PRIMARY KEY,
task_id VARCHAR(36) NOT NULL REFERENCES blog_writer_tasks(id) ON DELETE CASCADE,
operation VARCHAR(100) NOT NULL,
duration_ms INTEGER NOT NULL,
token_usage JSONB, -- Token usage statistics
api_calls INTEGER DEFAULT 0,
cache_hits INTEGER DEFAULT 0,
cache_misses INTEGER DEFAULT 0,
error_count INTEGER DEFAULT 0,
created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
metadata JSONB -- Additional metrics
);
-- Task recovery table - stores recovery information
CREATE TABLE IF NOT EXISTS blog_writer_task_recovery (
id SERIAL PRIMARY KEY,
task_id VARCHAR(36) NOT NULL REFERENCES blog_writer_tasks(id) ON DELETE CASCADE,
recovery_reason VARCHAR(100) NOT NULL, -- 'server_restart', 'timeout', 'error'
recovery_action VARCHAR(100) NOT NULL, -- 'resume', 'retry', 'fail'
checkpoint_data JSONB, -- State at recovery point
recovered_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(),
recovery_successful BOOLEAN DEFAULT FALSE,
metadata JSONB
);
-- Indexes for performance
CREATE INDEX IF NOT EXISTS idx_blog_writer_tasks_user_id ON blog_writer_tasks(user_id);
CREATE INDEX IF NOT EXISTS idx_blog_writer_tasks_status ON blog_writer_tasks(status);
CREATE INDEX IF NOT EXISTS idx_blog_writer_tasks_created_at ON blog_writer_tasks(created_at);
CREATE INDEX IF NOT EXISTS idx_blog_writer_tasks_task_type ON blog_writer_tasks(task_type);
CREATE INDEX IF NOT EXISTS idx_blog_writer_tasks_correlation_id ON blog_writer_tasks(correlation_id);
CREATE INDEX IF NOT EXISTS idx_blog_writer_task_progress_task_id ON blog_writer_task_progress(task_id);
CREATE INDEX IF NOT EXISTS idx_blog_writer_task_progress_timestamp ON blog_writer_task_progress(timestamp);
CREATE INDEX IF NOT EXISTS idx_blog_writer_task_metrics_task_id ON blog_writer_task_metrics(task_id);
CREATE INDEX IF NOT EXISTS idx_blog_writer_task_metrics_operation ON blog_writer_task_metrics(operation);
CREATE INDEX IF NOT EXISTS idx_blog_writer_task_metrics_created_at ON blog_writer_task_metrics(created_at);
CREATE INDEX IF NOT EXISTS idx_blog_writer_task_recovery_task_id ON blog_writer_task_recovery(task_id);
CREATE INDEX IF NOT EXISTS idx_blog_writer_task_recovery_recovered_at ON blog_writer_task_recovery(recovered_at);
-- Function to automatically update updated_at timestamp
CREATE OR REPLACE FUNCTION update_blog_writer_tasks_updated_at()
RETURNS TRIGGER AS $$
BEGIN
NEW.updated_at = NOW();
RETURN NEW;
END;
$$ language 'plpgsql';
-- Trigger to automatically update updated_at
CREATE TRIGGER update_blog_writer_tasks_updated_at
BEFORE UPDATE ON blog_writer_tasks
FOR EACH ROW
EXECUTE FUNCTION update_blog_writer_tasks_updated_at();
-- Function to clean up old completed tasks (older than 7 days)
CREATE OR REPLACE FUNCTION cleanup_old_blog_writer_tasks()
RETURNS INTEGER AS $$
DECLARE
deleted_count INTEGER;
BEGIN
DELETE FROM blog_writer_tasks
WHERE status IN ('completed', 'failed', 'cancelled')
AND created_at < NOW() - INTERVAL '7 days';
GET DIAGNOSTICS deleted_count = ROW_COUNT;
RETURN deleted_count;
END;
$$ language 'plpgsql';
-- Create a view for task analytics
CREATE OR REPLACE VIEW blog_writer_task_analytics AS
SELECT
task_type,
status,
COUNT(*) as task_count,
AVG(EXTRACT(EPOCH FROM (completed_at - created_at))) as avg_duration_seconds,
AVG(EXTRACT(EPOCH FROM (updated_at - created_at))) as avg_processing_time_seconds,
COUNT(CASE WHEN status = 'completed' THEN 1 END) as completed_count,
COUNT(CASE WHEN status = 'failed' THEN 1 END) as failed_count,
COUNT(CASE WHEN status = 'running' THEN 1 END) as running_count,
ROUND(
COUNT(CASE WHEN status = 'completed' THEN 1 END) * 100.0 / COUNT(*),
2
) as success_rate_percentage
FROM blog_writer_tasks
WHERE created_at >= NOW() - INTERVAL '30 days'
GROUP BY task_type, status
ORDER BY task_type, status;
-- Create a view for performance metrics
CREATE OR REPLACE VIEW blog_writer_performance_metrics AS
SELECT
t.task_type,
t.operation,
COUNT(m.id) as metric_count,
AVG(m.duration_ms) as avg_duration_ms,
MIN(m.duration_ms) as min_duration_ms,
MAX(m.duration_ms) as max_duration_ms,
SUM(m.api_calls) as total_api_calls,
SUM(m.cache_hits) as total_cache_hits,
SUM(m.cache_misses) as total_cache_misses,
ROUND(
SUM(m.cache_hits) * 100.0 / NULLIF(SUM(m.cache_hits + m.cache_misses), 0),
2
) as cache_hit_rate_percentage
FROM blog_writer_tasks t
LEFT JOIN blog_writer_task_metrics m ON t.id = m.task_id
WHERE t.created_at >= NOW() - INTERVAL '7 days'
GROUP BY t.task_type, t.operation
ORDER BY t.task_type, t.operation;

View File

@@ -0,0 +1,16 @@
-- Migration: Update onboarding_sessions.user_id from INTEGER to STRING
-- This migration updates the user_id column to support Clerk user IDs (strings)
-- Step 1: Alter the user_id column type from INTEGER to VARCHAR(255)
ALTER TABLE onboarding_sessions
ALTER COLUMN user_id TYPE VARCHAR(255);
-- Step 2: Create an index on user_id for faster lookups
CREATE INDEX IF NOT EXISTS idx_onboarding_sessions_user_id ON onboarding_sessions(user_id);
-- Note: This migration assumes no existing data needs to be preserved
-- If you have existing data with integer user_ids, you may need to:
-- 1. Backup the data first
-- 2. Clear the table or convert the integers to strings
-- 3. Then apply this migration