Base code
This commit is contained in:
1
backend/models/__init__.py
Normal file
1
backend/models/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# Models package for Alwrity
|
||||
102
backend/models/api_monitoring.py
Normal file
102
backend/models/api_monitoring.py
Normal file
@@ -0,0 +1,102 @@
|
||||
"""
|
||||
API Monitoring Database Models
|
||||
Persistent storage for API monitoring statistics.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, DateTime, Float, Boolean, JSON, Index, Text
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from datetime import datetime
|
||||
import json
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class APIRequest(Base):
|
||||
"""Store individual API requests for monitoring."""
|
||||
|
||||
__tablename__ = "api_requests"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
timestamp = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
path = Column(String(500), nullable=False)
|
||||
method = Column(String(10), nullable=False)
|
||||
status_code = Column(Integer, nullable=False)
|
||||
duration = Column(Float, nullable=False) # Response time in seconds
|
||||
user_id = Column(String(50), nullable=True)
|
||||
cache_hit = Column(Boolean, nullable=True)
|
||||
request_size = Column(Integer, nullable=True)
|
||||
response_size = Column(Integer, nullable=True)
|
||||
user_agent = Column(String(500), nullable=True)
|
||||
ip_address = Column(String(45), nullable=True)
|
||||
|
||||
# Indexes for fast queries
|
||||
__table_args__ = (
|
||||
Index('idx_timestamp', 'timestamp'),
|
||||
Index('idx_path_method', 'path', 'method'),
|
||||
Index('idx_status_code', 'status_code'),
|
||||
Index('idx_user_id', 'user_id'),
|
||||
)
|
||||
|
||||
class APIEndpointStats(Base):
|
||||
"""Aggregated statistics per endpoint."""
|
||||
|
||||
__tablename__ = "api_endpoint_stats"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
endpoint = Column(String(500), nullable=False, unique=True) # "GET /api/endpoint"
|
||||
total_requests = Column(Integer, default=0)
|
||||
total_errors = Column(Integer, default=0)
|
||||
total_duration = Column(Float, default=0.0)
|
||||
avg_duration = Column(Float, default=0.0)
|
||||
min_duration = Column(Float, nullable=True)
|
||||
max_duration = Column(Float, nullable=True)
|
||||
last_called = Column(DateTime, nullable=True)
|
||||
cache_hits = Column(Integer, default=0)
|
||||
cache_misses = Column(Integer, default=0)
|
||||
cache_hit_rate = Column(Float, default=0.0)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
__table_args__ = (
|
||||
Index('idx_endpoint', 'endpoint'),
|
||||
Index('idx_total_requests', 'total_requests'),
|
||||
Index('idx_avg_duration', 'avg_duration'),
|
||||
)
|
||||
|
||||
class SystemHealth(Base):
|
||||
"""System health snapshots."""
|
||||
|
||||
__tablename__ = "system_health"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
timestamp = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
status = Column(String(20), nullable=False) # healthy, warning, critical
|
||||
total_requests = Column(Integer, default=0)
|
||||
total_errors = Column(Integer, default=0)
|
||||
error_rate = Column(Float, default=0.0)
|
||||
avg_response_time = Column(Float, default=0.0)
|
||||
cache_hit_rate = Column(Float, default=0.0)
|
||||
active_endpoints = Column(Integer, default=0)
|
||||
metrics = Column(JSON, nullable=True) # Additional metrics
|
||||
|
||||
__table_args__ = (
|
||||
Index('idx_timestamp', 'timestamp'),
|
||||
Index('idx_status', 'status'),
|
||||
)
|
||||
|
||||
class CachePerformance(Base):
|
||||
"""Cache performance metrics."""
|
||||
|
||||
__tablename__ = "cache_performance"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
timestamp = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
cache_type = Column(String(50), nullable=False) # "comprehensive_user_data", "redis", etc.
|
||||
hits = Column(Integer, default=0)
|
||||
misses = Column(Integer, default=0)
|
||||
hit_rate = Column(Float, default=0.0)
|
||||
avg_response_time = Column(Float, default=0.0)
|
||||
total_requests = Column(Integer, default=0)
|
||||
|
||||
__table_args__ = (
|
||||
Index('idx_timestamp', 'timestamp'),
|
||||
Index('idx_cache_type', 'cache_type'),
|
||||
)
|
||||
209
backend/models/bing_analytics_models.py
Normal file
209
backend/models/bing_analytics_models.py
Normal file
@@ -0,0 +1,209 @@
|
||||
"""
|
||||
Bing Analytics Database Models
|
||||
|
||||
Models for storing and analyzing Bing Webmaster Tools analytics data
|
||||
including raw query data, aggregated metrics, and trend analysis.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, Float, DateTime, Text, Boolean, Index
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.sql import func
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List, Optional
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
class BingQueryStats(Base):
|
||||
"""Raw query statistics from Bing Webmaster Tools API"""
|
||||
__tablename__ = 'bing_query_stats'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
user_id = Column(String(255), nullable=False, index=True)
|
||||
site_url = Column(String(500), nullable=False, index=True)
|
||||
|
||||
# Query data
|
||||
query = Column(Text, nullable=False, index=True)
|
||||
clicks = Column(Integer, default=0)
|
||||
impressions = Column(Integer, default=0)
|
||||
avg_click_position = Column(Float, default=-1)
|
||||
avg_impression_position = Column(Float, default=-1)
|
||||
ctr = Column(Float, default=0) # Calculated: clicks/impressions * 100
|
||||
|
||||
# Date information
|
||||
query_date = Column(DateTime, nullable=False, index=True)
|
||||
collected_at = Column(DateTime, default=func.now(), index=True)
|
||||
|
||||
# Additional metadata
|
||||
query_length = Column(Integer, default=0) # For analysis
|
||||
is_brand_query = Column(Boolean, default=False) # Contains brand name
|
||||
category = Column(String(100), default='general') # ai_writing, business, etc.
|
||||
|
||||
# Indexes for performance
|
||||
__table_args__ = (
|
||||
Index('idx_user_site_date', 'user_id', 'site_url', 'query_date'),
|
||||
Index('idx_query_performance', 'query', 'clicks', 'impressions'),
|
||||
Index('idx_collected_at', 'collected_at'),
|
||||
)
|
||||
|
||||
|
||||
class BingDailyMetrics(Base):
|
||||
"""Daily aggregated metrics for Bing analytics"""
|
||||
__tablename__ = 'bing_daily_metrics'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
user_id = Column(String(255), nullable=False, index=True)
|
||||
site_url = Column(String(500), nullable=False, index=True)
|
||||
|
||||
# Date
|
||||
metric_date = Column(DateTime, nullable=False, index=True)
|
||||
collected_at = Column(DateTime, default=func.now())
|
||||
|
||||
# Aggregated metrics
|
||||
total_clicks = Column(Integer, default=0)
|
||||
total_impressions = Column(Integer, default=0)
|
||||
total_queries = Column(Integer, default=0)
|
||||
avg_ctr = Column(Float, default=0)
|
||||
avg_position = Column(Float, default=0)
|
||||
|
||||
# Top performing queries (JSON)
|
||||
top_queries = Column(Text) # JSON string of top 10 queries
|
||||
top_clicks = Column(Text) # JSON string of queries with most clicks
|
||||
top_impressions = Column(Text) # JSON string of queries with most impressions
|
||||
|
||||
# Trend indicators (compared to previous day)
|
||||
clicks_change = Column(Float, default=0) # Percentage change
|
||||
impressions_change = Column(Float, default=0)
|
||||
ctr_change = Column(Float, default=0)
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index('idx_user_site_metric_date', 'user_id', 'site_url', 'metric_date'),
|
||||
)
|
||||
|
||||
|
||||
class BingTrendAnalysis(Base):
|
||||
"""Weekly/Monthly trend analysis data"""
|
||||
__tablename__ = 'bing_trend_analysis'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
user_id = Column(String(255), nullable=False, index=True)
|
||||
site_url = Column(String(500), nullable=False, index=True)
|
||||
|
||||
# Period information
|
||||
period_start = Column(DateTime, nullable=False, index=True)
|
||||
period_end = Column(DateTime, nullable=False, index=True)
|
||||
period_type = Column(String(20), nullable=False) # 'weekly', 'monthly'
|
||||
|
||||
# Trend metrics
|
||||
total_clicks = Column(Integer, default=0)
|
||||
total_impressions = Column(Integer, default=0)
|
||||
total_queries = Column(Integer, default=0)
|
||||
avg_ctr = Column(Float, default=0)
|
||||
avg_position = Column(Float, default=0)
|
||||
|
||||
# Growth indicators
|
||||
clicks_growth = Column(Float, default=0) # vs previous period
|
||||
impressions_growth = Column(Float, default=0)
|
||||
ctr_growth = Column(Float, default=0)
|
||||
|
||||
# Top categories and queries
|
||||
top_categories = Column(Text) # JSON of category performance
|
||||
trending_queries = Column(Text) # JSON of trending queries
|
||||
declining_queries = Column(Text) # JSON of declining queries
|
||||
|
||||
created_at = Column(DateTime, default=func.now(), index=True)
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index('idx_user_site_period', 'user_id', 'site_url', 'period_type', 'period_start'),
|
||||
)
|
||||
|
||||
|
||||
class BingAlertRules(Base):
|
||||
"""Alert rules for Bing analytics monitoring"""
|
||||
__tablename__ = 'bing_alert_rules'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
user_id = Column(String(255), nullable=False, index=True)
|
||||
site_url = Column(String(500), nullable=False, index=True)
|
||||
|
||||
# Alert configuration
|
||||
rule_name = Column(String(255), nullable=False)
|
||||
alert_type = Column(String(50), nullable=False) # 'ctr_drop', 'query_spike', 'position_drop'
|
||||
|
||||
# Thresholds
|
||||
threshold_value = Column(Float, nullable=False)
|
||||
comparison_operator = Column(String(10), nullable=False) # '>', '<', '>=', '<=', '=='
|
||||
|
||||
# Alert settings
|
||||
is_active = Column(Boolean, default=True)
|
||||
last_triggered = Column(DateTime)
|
||||
trigger_count = Column(Integer, default=0)
|
||||
|
||||
created_at = Column(DateTime, default=func.now())
|
||||
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
|
||||
|
||||
|
||||
class BingAlertHistory(Base):
|
||||
"""History of triggered alerts"""
|
||||
__tablename__ = 'bing_alert_history'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
user_id = Column(String(255), nullable=False, index=True)
|
||||
site_url = Column(String(500), nullable=False, index=True)
|
||||
alert_rule_id = Column(Integer, nullable=False, index=True)
|
||||
|
||||
# Alert details
|
||||
alert_type = Column(String(50), nullable=False)
|
||||
trigger_value = Column(Float, nullable=False)
|
||||
threshold_value = Column(Float, nullable=False)
|
||||
message = Column(Text, nullable=False)
|
||||
|
||||
# Context data
|
||||
context_data = Column(Text) # JSON with additional context
|
||||
|
||||
triggered_at = Column(DateTime, default=func.now(), index=True)
|
||||
is_resolved = Column(Boolean, default=False)
|
||||
resolved_at = Column(DateTime)
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index('idx_user_alert_triggered', 'user_id', 'triggered_at'),
|
||||
Index('idx_alert_rule_triggered', 'alert_rule_id', 'triggered_at'),
|
||||
)
|
||||
|
||||
|
||||
class BingSitePerformance(Base):
|
||||
"""Overall site performance summary"""
|
||||
__tablename__ = 'bing_site_performance'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
user_id = Column(String(255), nullable=False, index=True)
|
||||
site_url = Column(String(500), nullable=False, index=True)
|
||||
|
||||
# Performance summary
|
||||
total_clicks_all_time = Column(Integer, default=0)
|
||||
total_impressions_all_time = Column(Integer, default=0)
|
||||
total_queries_all_time = Column(Integer, default=0)
|
||||
best_avg_ctr = Column(Float, default=0)
|
||||
best_avg_position = Column(Float, default=0)
|
||||
|
||||
# Top performers
|
||||
best_performing_query = Column(Text)
|
||||
best_performing_date = Column(DateTime)
|
||||
most_impressions_query = Column(Text)
|
||||
most_clicks_query = Column(Text)
|
||||
|
||||
# Rankings and insights
|
||||
query_diversity_score = Column(Float, default=0) # Unique queries / total queries
|
||||
brand_query_percentage = Column(Float, default=0)
|
||||
|
||||
# Last updated
|
||||
last_updated = Column(DateTime, default=func.now(), onupdate=func.now())
|
||||
data_collection_start = Column(DateTime)
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index('idx_user_site_performance', 'user_id', 'site_url'),
|
||||
)
|
||||
351
backend/models/blog_models.py
Normal file
351
backend/models/blog_models.py
Normal file
@@ -0,0 +1,351 @@
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List, Optional, Dict, Any, Union
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class PersonaInfo(BaseModel):
|
||||
persona_id: Optional[str] = None
|
||||
tone: Optional[str] = None
|
||||
audience: Optional[str] = None
|
||||
industry: Optional[str] = None
|
||||
|
||||
|
||||
class ResearchSource(BaseModel):
|
||||
title: str
|
||||
url: str
|
||||
excerpt: Optional[str] = None
|
||||
credibility_score: Optional[float] = None
|
||||
published_at: Optional[str] = None
|
||||
index: Optional[int] = None
|
||||
source_type: Optional[str] = None # e.g., 'web'
|
||||
|
||||
|
||||
class GroundingChunk(BaseModel):
|
||||
title: str
|
||||
url: str
|
||||
confidence_score: Optional[float] = None
|
||||
|
||||
|
||||
class GroundingSupport(BaseModel):
|
||||
confidence_scores: List[float] = []
|
||||
grounding_chunk_indices: List[int] = []
|
||||
segment_text: str = ""
|
||||
start_index: Optional[int] = None
|
||||
end_index: Optional[int] = None
|
||||
|
||||
|
||||
class Citation(BaseModel):
|
||||
citation_type: str # e.g., 'inline'
|
||||
start_index: int
|
||||
end_index: int
|
||||
text: str
|
||||
source_indices: List[int] = []
|
||||
reference: str # e.g., 'Source 1'
|
||||
|
||||
|
||||
class GroundingMetadata(BaseModel):
|
||||
grounding_chunks: List[GroundingChunk] = []
|
||||
grounding_supports: List[GroundingSupport] = []
|
||||
citations: List[Citation] = []
|
||||
search_entry_point: Optional[str] = None
|
||||
web_search_queries: List[str] = []
|
||||
|
||||
|
||||
class ResearchMode(str, Enum):
|
||||
"""Research modes for different depth levels."""
|
||||
BASIC = "basic"
|
||||
COMPREHENSIVE = "comprehensive"
|
||||
TARGETED = "targeted"
|
||||
|
||||
|
||||
class SourceType(str, Enum):
|
||||
"""Types of sources to include in research."""
|
||||
WEB = "web"
|
||||
ACADEMIC = "academic"
|
||||
NEWS = "news"
|
||||
INDUSTRY = "industry"
|
||||
EXPERT = "expert"
|
||||
|
||||
|
||||
class DateRange(str, Enum):
|
||||
"""Date range filters for research."""
|
||||
LAST_WEEK = "last_week"
|
||||
LAST_MONTH = "last_month"
|
||||
LAST_3_MONTHS = "last_3_months"
|
||||
LAST_6_MONTHS = "last_6_months"
|
||||
LAST_YEAR = "last_year"
|
||||
ALL_TIME = "all_time"
|
||||
|
||||
|
||||
class ResearchProvider(str, Enum):
|
||||
"""Research provider options."""
|
||||
GOOGLE = "google" # Gemini native grounding
|
||||
EXA = "exa" # Exa neural search
|
||||
TAVILY = "tavily" # Tavily AI-powered search
|
||||
|
||||
|
||||
class ResearchConfig(BaseModel):
|
||||
"""Configuration for research execution."""
|
||||
mode: ResearchMode = ResearchMode.BASIC
|
||||
provider: ResearchProvider = ResearchProvider.GOOGLE
|
||||
date_range: Optional[DateRange] = None
|
||||
source_types: List[SourceType] = []
|
||||
max_sources: int = 10
|
||||
include_statistics: bool = True
|
||||
include_expert_quotes: bool = True
|
||||
include_competitors: bool = True
|
||||
include_trends: bool = True
|
||||
|
||||
# Exa-specific options
|
||||
exa_category: Optional[str] = None # company, research paper, news, linkedin profile, github, tweet, movie, song, personal site, pdf, financial report
|
||||
exa_include_domains: List[str] = [] # Domain whitelist
|
||||
exa_exclude_domains: List[str] = [] # Domain blacklist
|
||||
exa_search_type: Optional[str] = "auto" # "auto", "keyword", "neural"
|
||||
|
||||
# Tavily-specific options
|
||||
tavily_topic: Optional[str] = "general" # general, news, finance
|
||||
tavily_search_depth: Optional[str] = "basic" # basic (1 credit), advanced (2 credits)
|
||||
tavily_include_domains: List[str] = [] # Domain whitelist (max 300)
|
||||
tavily_exclude_domains: List[str] = [] # Domain blacklist (max 150)
|
||||
tavily_include_answer: Union[bool, str] = False # basic, advanced, true, false
|
||||
tavily_include_raw_content: Union[bool, str] = False # markdown, text, true, false
|
||||
tavily_include_images: bool = False
|
||||
tavily_include_image_descriptions: bool = False
|
||||
tavily_include_favicon: bool = False
|
||||
tavily_time_range: Optional[str] = None # day, week, month, year, d, w, m, y
|
||||
tavily_start_date: Optional[str] = None # YYYY-MM-DD
|
||||
tavily_end_date: Optional[str] = None # YYYY-MM-DD
|
||||
tavily_country: Optional[str] = None # Country code (only for general topic)
|
||||
tavily_chunks_per_source: int = 3 # 1-3 (only for advanced search)
|
||||
tavily_auto_parameters: bool = False # Auto-configure parameters based on query
|
||||
|
||||
|
||||
class BlogResearchRequest(BaseModel):
|
||||
keywords: List[str]
|
||||
topic: Optional[str] = None
|
||||
industry: Optional[str] = None
|
||||
target_audience: Optional[str] = None
|
||||
tone: Optional[str] = None
|
||||
word_count_target: Optional[int] = 1500
|
||||
persona: Optional[PersonaInfo] = None
|
||||
research_mode: Optional[ResearchMode] = ResearchMode.BASIC
|
||||
config: Optional[ResearchConfig] = None
|
||||
|
||||
|
||||
class BlogResearchResponse(BaseModel):
|
||||
success: bool = True
|
||||
sources: List[ResearchSource] = []
|
||||
keyword_analysis: Dict[str, Any] = {}
|
||||
competitor_analysis: Dict[str, Any] = {}
|
||||
suggested_angles: List[str] = []
|
||||
search_widget: Optional[str] = None # HTML content for search widget
|
||||
search_queries: List[str] = [] # Search queries generated by Gemini
|
||||
grounding_metadata: Optional[GroundingMetadata] = None # Google grounding metadata
|
||||
original_keywords: List[str] = [] # Original user-provided keywords for caching
|
||||
error_message: Optional[str] = None # Error message for graceful failures
|
||||
retry_suggested: Optional[bool] = None # Whether retry is recommended
|
||||
error_code: Optional[str] = None # Specific error code
|
||||
actionable_steps: List[str] = [] # Steps user can take to resolve the issue
|
||||
|
||||
|
||||
class BlogOutlineSection(BaseModel):
|
||||
id: str
|
||||
heading: str
|
||||
subheadings: List[str] = []
|
||||
key_points: List[str] = []
|
||||
references: List[ResearchSource] = []
|
||||
target_words: Optional[int] = None
|
||||
keywords: List[str] = []
|
||||
|
||||
|
||||
class BlogOutlineRequest(BaseModel):
|
||||
research: BlogResearchResponse
|
||||
persona: Optional[PersonaInfo] = None
|
||||
word_count: Optional[int] = 1500
|
||||
custom_instructions: Optional[str] = None
|
||||
|
||||
|
||||
class SourceMappingStats(BaseModel):
|
||||
total_sources_mapped: int = 0
|
||||
coverage_percentage: float = 0.0
|
||||
average_relevance_score: float = 0.0
|
||||
high_confidence_mappings: int = 0
|
||||
|
||||
class GroundingInsights(BaseModel):
|
||||
confidence_analysis: Optional[Dict[str, Any]] = None
|
||||
authority_analysis: Optional[Dict[str, Any]] = None
|
||||
temporal_analysis: Optional[Dict[str, Any]] = None
|
||||
content_relationships: Optional[Dict[str, Any]] = None
|
||||
citation_insights: Optional[Dict[str, Any]] = None
|
||||
search_intent_insights: Optional[Dict[str, Any]] = None
|
||||
quality_indicators: Optional[Dict[str, Any]] = None
|
||||
|
||||
class OptimizationResults(BaseModel):
|
||||
overall_quality_score: float = 0.0
|
||||
improvements_made: List[str] = []
|
||||
optimization_focus: str = "general optimization"
|
||||
|
||||
class ResearchCoverage(BaseModel):
|
||||
sources_utilized: int = 0
|
||||
content_gaps_identified: int = 0
|
||||
competitive_advantages: List[str] = []
|
||||
|
||||
class BlogOutlineResponse(BaseModel):
|
||||
success: bool = True
|
||||
title_options: List[str] = []
|
||||
outline: List[BlogOutlineSection] = []
|
||||
|
||||
# Additional metadata for enhanced UI
|
||||
source_mapping_stats: Optional[SourceMappingStats] = None
|
||||
grounding_insights: Optional[GroundingInsights] = None
|
||||
optimization_results: Optional[OptimizationResults] = None
|
||||
research_coverage: Optional[ResearchCoverage] = None
|
||||
|
||||
|
||||
class BlogOutlineRefineRequest(BaseModel):
|
||||
outline: List[BlogOutlineSection]
|
||||
operation: str
|
||||
section_id: Optional[str] = None
|
||||
payload: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class BlogSectionRequest(BaseModel):
|
||||
section: BlogOutlineSection
|
||||
keywords: List[str] = []
|
||||
tone: Optional[str] = None
|
||||
persona: Optional[PersonaInfo] = None
|
||||
mode: Optional[str] = "polished" # 'draft' | 'polished'
|
||||
|
||||
|
||||
class BlogSectionResponse(BaseModel):
|
||||
success: bool = True
|
||||
markdown: str
|
||||
citations: List[ResearchSource] = []
|
||||
continuity_metrics: Optional[Dict[str, float]] = None
|
||||
|
||||
|
||||
class BlogOptimizeRequest(BaseModel):
|
||||
content: str
|
||||
goals: List[str] = []
|
||||
|
||||
|
||||
class BlogOptimizeResponse(BaseModel):
|
||||
success: bool = True
|
||||
optimized: str
|
||||
diff_preview: Optional[str] = None
|
||||
|
||||
|
||||
class BlogSEOAnalyzeRequest(BaseModel):
|
||||
content: str
|
||||
blog_title: Optional[str] = None
|
||||
keywords: List[str] = []
|
||||
research_data: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
class BlogSEOAnalyzeResponse(BaseModel):
|
||||
success: bool = True
|
||||
seo_score: float
|
||||
density: Dict[str, Any] = {}
|
||||
structure: Dict[str, Any] = {}
|
||||
readability: Dict[str, Any] = {}
|
||||
link_suggestions: List[Dict[str, Any]] = []
|
||||
image_alt_status: Dict[str, Any] = {}
|
||||
recommendations: List[str] = []
|
||||
|
||||
|
||||
class BlogSEOMetadataRequest(BaseModel):
|
||||
content: str
|
||||
title: Optional[str] = None
|
||||
keywords: List[str] = []
|
||||
research_data: Optional[Dict[str, Any]] = None
|
||||
outline: Optional[List[Dict[str, Any]]] = None # Add outline structure
|
||||
seo_analysis: Optional[Dict[str, Any]] = None # Add SEO analysis results
|
||||
|
||||
|
||||
class BlogSEOMetadataResponse(BaseModel):
|
||||
success: bool = True
|
||||
title_options: List[str] = []
|
||||
meta_descriptions: List[str] = []
|
||||
seo_title: Optional[str] = None
|
||||
meta_description: Optional[str] = None
|
||||
url_slug: Optional[str] = None
|
||||
blog_tags: List[str] = []
|
||||
blog_categories: List[str] = []
|
||||
social_hashtags: List[str] = []
|
||||
open_graph: Dict[str, Any] = {}
|
||||
twitter_card: Dict[str, Any] = {}
|
||||
json_ld_schema: Dict[str, Any] = {}
|
||||
canonical_url: Optional[str] = None
|
||||
reading_time: float = 0.0
|
||||
focus_keyword: Optional[str] = None
|
||||
generated_at: Optional[str] = None
|
||||
optimization_score: int = 0
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class BlogPublishRequest(BaseModel):
|
||||
platform: str = Field(pattern="^(wix|wordpress)$")
|
||||
html: str
|
||||
metadata: BlogSEOMetadataResponse
|
||||
schedule_time: Optional[str] = None
|
||||
|
||||
|
||||
class BlogPublishResponse(BaseModel):
|
||||
success: bool = True
|
||||
platform: str
|
||||
url: Optional[str] = None
|
||||
post_id: Optional[str] = None
|
||||
|
||||
|
||||
class HallucinationCheckRequest(BaseModel):
|
||||
content: str
|
||||
sources: List[str] = []
|
||||
|
||||
|
||||
class HallucinationCheckResponse(BaseModel):
|
||||
success: bool = True
|
||||
claims: List[Dict[str, Any]] = []
|
||||
suggestions: List[Dict[str, Any]] = []
|
||||
|
||||
|
||||
# -----------------------
|
||||
# Medium Blog Generation
|
||||
# -----------------------
|
||||
|
||||
class MediumSectionOutline(BaseModel):
|
||||
"""Lightweight outline payload for medium blog generation."""
|
||||
id: str
|
||||
heading: str
|
||||
keyPoints: List[str] = []
|
||||
subheadings: List[str] = []
|
||||
keywords: List[str] = []
|
||||
targetWords: Optional[int] = None
|
||||
references: List[ResearchSource] = []
|
||||
|
||||
|
||||
class MediumBlogGenerateRequest(BaseModel):
|
||||
"""Request to generate an entire medium-length blog in one pass."""
|
||||
title: str
|
||||
sections: List[MediumSectionOutline]
|
||||
persona: Optional[PersonaInfo] = None
|
||||
tone: Optional[str] = None
|
||||
audience: Optional[str] = None
|
||||
globalTargetWords: Optional[int] = 1000
|
||||
researchKeywords: Optional[List[str]] = None # Original research keywords for better caching
|
||||
|
||||
|
||||
class MediumGeneratedSection(BaseModel):
|
||||
id: str
|
||||
heading: str
|
||||
content: str
|
||||
wordCount: int
|
||||
sources: Optional[List[ResearchSource]] = None
|
||||
|
||||
|
||||
class MediumBlogGenerateResult(BaseModel):
|
||||
success: bool = True
|
||||
title: str
|
||||
sections: List[MediumGeneratedSection]
|
||||
model: Optional[str] = None
|
||||
generation_time_ms: Optional[int] = None
|
||||
safety_flags: Optional[Dict[str, Any]] = None
|
||||
24
backend/models/business_info_request.py
Normal file
24
backend/models/business_info_request.py
Normal file
@@ -0,0 +1,24 @@
|
||||
"""Business Information Request Models for ALwrity backend."""
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
|
||||
class BusinessInfoRequest(BaseModel):
|
||||
user_id: Optional[int] = None
|
||||
business_description: str = Field(..., min_length=10, max_length=1000, description="Description of the business")
|
||||
industry: Optional[str] = Field(None, max_length=100, description="Industry sector")
|
||||
target_audience: Optional[str] = Field(None, max_length=500, description="Target audience description")
|
||||
business_goals: Optional[str] = Field(None, max_length=1000, description="Business goals and objectives")
|
||||
|
||||
class BusinessInfoResponse(BaseModel):
|
||||
id: int
|
||||
user_id: Optional[int]
|
||||
business_description: str
|
||||
industry: Optional[str]
|
||||
target_audience: Optional[str]
|
||||
business_goals: Optional[str]
|
||||
created_at: datetime
|
||||
updated_at: datetime
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
260
backend/models/component_logic.py
Normal file
260
backend/models/component_logic.py
Normal file
@@ -0,0 +1,260 @@
|
||||
"""Pydantic models for component logic requests and responses."""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from pydantic import BaseModel, EmailStr, validator
|
||||
import re
|
||||
|
||||
# AI Research Models
|
||||
|
||||
class UserInfoRequest(BaseModel):
|
||||
"""Request model for user information validation."""
|
||||
full_name: str
|
||||
email: str
|
||||
company: str
|
||||
role: str
|
||||
|
||||
@validator('full_name')
|
||||
def validate_full_name(cls, v):
|
||||
if not v or len(v.strip()) < 2:
|
||||
raise ValueError('Full name must be at least 2 characters long')
|
||||
return v.strip()
|
||||
|
||||
@validator('email')
|
||||
def validate_email(cls, v):
|
||||
# Basic email validation
|
||||
email_pattern = re.compile(r'^[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$')
|
||||
if not email_pattern.match(v):
|
||||
raise ValueError('Invalid email format')
|
||||
return v.lower()
|
||||
|
||||
@validator('company')
|
||||
def validate_company(cls, v):
|
||||
if not v or len(v.strip()) < 1:
|
||||
raise ValueError('Company name is required')
|
||||
return v.strip()
|
||||
|
||||
@validator('role')
|
||||
def validate_role(cls, v):
|
||||
valid_roles = ["Content Creator", "Marketing Manager", "Business Owner", "Other"]
|
||||
if v not in valid_roles:
|
||||
raise ValueError(f'Role must be one of: {", ".join(valid_roles)}')
|
||||
return v
|
||||
|
||||
class ResearchPreferencesRequest(BaseModel):
|
||||
"""Request model for research preferences configuration."""
|
||||
research_depth: str
|
||||
content_types: List[str]
|
||||
auto_research: bool
|
||||
factual_content: bool = True # Default to True
|
||||
|
||||
@validator('research_depth')
|
||||
def validate_research_depth(cls, v):
|
||||
valid_depths = ["Basic", "Standard", "Deep", "Comprehensive"]
|
||||
if v not in valid_depths:
|
||||
raise ValueError(f'Research depth must be one of: {", ".join(valid_depths)}')
|
||||
return v
|
||||
|
||||
@validator('content_types')
|
||||
def validate_content_types(cls, v):
|
||||
valid_types = ["Blog Posts", "Social Media", "Technical Articles", "News", "Academic Papers"]
|
||||
if not v:
|
||||
raise ValueError('At least one content type must be selected')
|
||||
for content_type in v:
|
||||
if content_type not in valid_types:
|
||||
raise ValueError(f'Invalid content type: {content_type}')
|
||||
return v
|
||||
|
||||
class ResearchRequest(BaseModel):
|
||||
"""Request model for research processing."""
|
||||
topic: str
|
||||
preferences: ResearchPreferencesRequest
|
||||
|
||||
@validator('topic')
|
||||
def validate_topic(cls, v):
|
||||
if not v or len(v.strip()) < 3:
|
||||
raise ValueError('Topic must be at least 3 characters long')
|
||||
return v.strip()
|
||||
|
||||
class UserInfoResponse(BaseModel):
|
||||
"""Response model for user information validation."""
|
||||
valid: bool
|
||||
user_info: Optional[Dict[str, Any]] = None
|
||||
errors: List[str] = []
|
||||
|
||||
class ResearchPreferencesResponse(BaseModel):
|
||||
"""Response model for research preferences configuration."""
|
||||
valid: bool
|
||||
preferences: Optional[Dict[str, Any]] = None
|
||||
errors: List[str] = []
|
||||
|
||||
class ResearchResponse(BaseModel):
|
||||
"""Response model for research processing."""
|
||||
success: bool
|
||||
topic: str
|
||||
results: Optional[Dict[str, Any]] = None
|
||||
error: Optional[str] = None
|
||||
|
||||
# Personalization Models
|
||||
|
||||
class ContentStyleRequest(BaseModel):
|
||||
"""Request model for content style configuration."""
|
||||
writing_style: str
|
||||
tone: str
|
||||
content_length: str
|
||||
|
||||
@validator('writing_style')
|
||||
def validate_writing_style(cls, v):
|
||||
valid_styles = ["Professional", "Casual", "Technical", "Conversational", "Academic"]
|
||||
if v not in valid_styles:
|
||||
raise ValueError(f'Writing style must be one of: {", ".join(valid_styles)}')
|
||||
return v
|
||||
|
||||
@validator('tone')
|
||||
def validate_tone(cls, v):
|
||||
valid_tones = ["Formal", "Semi-Formal", "Neutral", "Friendly", "Humorous"]
|
||||
if v not in valid_tones:
|
||||
raise ValueError(f'Tone must be one of: {", ".join(valid_tones)}')
|
||||
return v
|
||||
|
||||
@validator('content_length')
|
||||
def validate_content_length(cls, v):
|
||||
valid_lengths = ["Concise", "Standard", "Detailed", "Comprehensive"]
|
||||
if v not in valid_lengths:
|
||||
raise ValueError(f'Content length must be one of: {", ".join(valid_lengths)}')
|
||||
return v
|
||||
|
||||
class BrandVoiceRequest(BaseModel):
|
||||
"""Request model for brand voice configuration."""
|
||||
personality_traits: List[str]
|
||||
voice_description: Optional[str] = None
|
||||
keywords: Optional[str] = None
|
||||
|
||||
@validator('personality_traits')
|
||||
def validate_personality_traits(cls, v):
|
||||
valid_traits = ["Professional", "Innovative", "Friendly", "Trustworthy", "Creative", "Expert"]
|
||||
if not v:
|
||||
raise ValueError('At least one personality trait must be selected')
|
||||
for trait in v:
|
||||
if trait not in valid_traits:
|
||||
raise ValueError(f'Invalid personality trait: {trait}')
|
||||
return v
|
||||
|
||||
@validator('voice_description')
|
||||
def validate_voice_description(cls, v):
|
||||
if v and len(v.strip()) < 10:
|
||||
raise ValueError('Voice description must be at least 10 characters long')
|
||||
return v.strip() if v else None
|
||||
|
||||
class AdvancedSettingsRequest(BaseModel):
|
||||
"""Request model for advanced content generation settings."""
|
||||
seo_optimization: bool
|
||||
readability_level: str
|
||||
content_structure: List[str]
|
||||
|
||||
@validator('readability_level')
|
||||
def validate_readability_level(cls, v):
|
||||
valid_levels = ["Simple", "Standard", "Advanced", "Expert"]
|
||||
if v not in valid_levels:
|
||||
raise ValueError(f'Readability level must be one of: {", ".join(valid_levels)}')
|
||||
return v
|
||||
|
||||
@validator('content_structure')
|
||||
def validate_content_structure(cls, v):
|
||||
valid_structures = ["Introduction", "Key Points", "Examples", "Conclusion", "Call-to-Action"]
|
||||
if not v:
|
||||
raise ValueError('At least one content structure element must be selected')
|
||||
for structure in v:
|
||||
if structure not in valid_structures:
|
||||
raise ValueError(f'Invalid content structure: {structure}')
|
||||
return v
|
||||
|
||||
class PersonalizationSettingsRequest(BaseModel):
|
||||
"""Request model for complete personalization settings."""
|
||||
content_style: ContentStyleRequest
|
||||
brand_voice: BrandVoiceRequest
|
||||
advanced_settings: AdvancedSettingsRequest
|
||||
|
||||
class ContentStyleResponse(BaseModel):
|
||||
"""Response model for content style validation."""
|
||||
valid: bool
|
||||
style_config: Optional[Dict[str, Any]] = None
|
||||
errors: List[str] = []
|
||||
|
||||
class BrandVoiceResponse(BaseModel):
|
||||
"""Response model for brand voice configuration."""
|
||||
valid: bool
|
||||
brand_config: Optional[Dict[str, Any]] = None
|
||||
errors: List[str] = []
|
||||
|
||||
class PersonalizationSettingsResponse(BaseModel):
|
||||
"""Response model for complete personalization settings."""
|
||||
valid: bool
|
||||
settings: Optional[Dict[str, Any]] = None
|
||||
errors: List[str] = []
|
||||
|
||||
# Research Utilities Models
|
||||
|
||||
class ResearchTopicRequest(BaseModel):
|
||||
"""Request model for topic research."""
|
||||
topic: str
|
||||
api_keys: Dict[str, str]
|
||||
|
||||
@validator('topic')
|
||||
def validate_topic(cls, v):
|
||||
if not v or len(v.strip()) < 3:
|
||||
raise ValueError('Topic must be at least 3 characters long')
|
||||
return v.strip()
|
||||
|
||||
class ResearchResultResponse(BaseModel):
|
||||
"""Response model for research results."""
|
||||
success: bool
|
||||
topic: str
|
||||
data: Optional[Dict[str, Any]] = None
|
||||
error: Optional[str] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
|
||||
# Style Detection Models
|
||||
class StyleAnalysisRequest(BaseModel):
|
||||
"""Request model for style analysis."""
|
||||
content: Dict[str, Any]
|
||||
analysis_type: str = "comprehensive" # comprehensive, patterns, guidelines
|
||||
|
||||
class StyleAnalysisResponse(BaseModel):
|
||||
"""Response model for style analysis."""
|
||||
success: bool
|
||||
analysis: Optional[Dict[str, Any]] = None
|
||||
patterns: Optional[Dict[str, Any]] = None
|
||||
guidelines: Optional[Dict[str, Any]] = None
|
||||
error: Optional[str] = None
|
||||
timestamp: str
|
||||
|
||||
class WebCrawlRequest(BaseModel):
|
||||
"""Request model for web crawling."""
|
||||
url: Optional[str] = None
|
||||
text_sample: Optional[str] = None
|
||||
|
||||
class WebCrawlResponse(BaseModel):
|
||||
"""Response model for web crawling."""
|
||||
success: bool
|
||||
content: Optional[Dict[str, Any]] = None
|
||||
metrics: Optional[Dict[str, Any]] = None
|
||||
error: Optional[str] = None
|
||||
timestamp: str
|
||||
|
||||
class StyleDetectionRequest(BaseModel):
|
||||
"""Request model for complete style detection workflow."""
|
||||
url: Optional[str] = None
|
||||
text_sample: Optional[str] = None
|
||||
include_patterns: bool = True
|
||||
include_guidelines: bool = True
|
||||
|
||||
class StyleDetectionResponse(BaseModel):
|
||||
"""Response model for complete style detection workflow."""
|
||||
success: bool
|
||||
crawl_result: Optional[Dict[str, Any]] = None
|
||||
style_analysis: Optional[Dict[str, Any]] = None
|
||||
style_patterns: Optional[Dict[str, Any]] = None
|
||||
style_guidelines: Optional[Dict[str, Any]] = None
|
||||
error: Optional[str] = None
|
||||
warning: Optional[str] = None
|
||||
timestamp: str
|
||||
72
backend/models/comprehensive_user_data_cache.py
Normal file
72
backend/models/comprehensive_user_data_cache.py
Normal file
@@ -0,0 +1,72 @@
|
||||
"""
|
||||
Comprehensive User Data Cache Model
|
||||
Caches expensive comprehensive user data operations to improve performance.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, DateTime, JSON, Index, ForeignKey
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime, timedelta
|
||||
import hashlib
|
||||
import json
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class ComprehensiveUserDataCache(Base):
|
||||
"""Cache for comprehensive user data to avoid redundant expensive operations."""
|
||||
|
||||
__tablename__ = "comprehensive_user_data_cache"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
strategy_id = Column(Integer, nullable=True)
|
||||
data_hash = Column(String(64), nullable=False) # For cache invalidation
|
||||
comprehensive_data = Column(JSON, nullable=False)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
expires_at = Column(DateTime, nullable=False)
|
||||
last_accessed = Column(DateTime, default=datetime.utcnow)
|
||||
access_count = Column(Integer, default=0)
|
||||
|
||||
# Indexes for fast lookups
|
||||
__table_args__ = (
|
||||
Index('idx_user_strategy', 'user_id', 'strategy_id'),
|
||||
Index('idx_expires_at', 'expires_at'),
|
||||
Index('idx_data_hash', 'data_hash'),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ComprehensiveUserDataCache(user_id={self.user_id}, strategy_id={self.strategy_id}, expires_at={self.expires_at})>"
|
||||
|
||||
@staticmethod
|
||||
def generate_data_hash(user_id: int, strategy_id: int = None, **kwargs) -> str:
|
||||
"""Generate a hash for cache invalidation based on input parameters."""
|
||||
data_string = f"{user_id}_{strategy_id}_{json.dumps(kwargs, sort_keys=True)}"
|
||||
return hashlib.sha256(data_string.encode()).hexdigest()
|
||||
|
||||
@staticmethod
|
||||
def get_default_expiry() -> datetime:
|
||||
"""Get default expiry time (1 hour from now)."""
|
||||
return datetime.utcnow() + timedelta(hours=1)
|
||||
|
||||
def is_expired(self) -> bool:
|
||||
"""Check if the cache entry has expired."""
|
||||
return datetime.utcnow() > self.expires_at
|
||||
|
||||
def touch(self):
|
||||
"""Update last accessed time and increment access count."""
|
||||
self.last_accessed = datetime.utcnow()
|
||||
self.access_count += 1
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert cache entry to dictionary."""
|
||||
return {
|
||||
"id": self.id,
|
||||
"user_id": self.user_id,
|
||||
"strategy_id": self.strategy_id,
|
||||
"data_hash": self.data_hash,
|
||||
"comprehensive_data": self.comprehensive_data,
|
||||
"created_at": self.created_at.isoformat(),
|
||||
"expires_at": self.expires_at.isoformat(),
|
||||
"last_accessed": self.last_accessed.isoformat(),
|
||||
"access_count": self.access_count
|
||||
}
|
||||
152
backend/models/content_asset_models.py
Normal file
152
backend/models/content_asset_models.py
Normal file
@@ -0,0 +1,152 @@
|
||||
"""
|
||||
Content Asset Models
|
||||
Unified database models for tracking all AI-generated content assets across all modules.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, DateTime, Float, Boolean, JSON, Text, ForeignKey, Enum, Index, func
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
import enum
|
||||
|
||||
# Use the same Base as subscription models for consistency
|
||||
from models.subscription_models import Base
|
||||
|
||||
|
||||
class AssetType(enum.Enum):
|
||||
"""Types of content assets."""
|
||||
TEXT = "text"
|
||||
IMAGE = "image"
|
||||
VIDEO = "video"
|
||||
AUDIO = "audio"
|
||||
|
||||
|
||||
class AssetSource(enum.Enum):
|
||||
# Add youtube_creator to the enum
|
||||
"""Source module/tool that generated the asset."""
|
||||
# Core Content Generation
|
||||
STORY_WRITER = "story_writer"
|
||||
IMAGE_STUDIO = "image_studio"
|
||||
MAIN_TEXT_GENERATION = "main_text_generation"
|
||||
MAIN_IMAGE_GENERATION = "main_image_generation"
|
||||
MAIN_VIDEO_GENERATION = "main_video_generation"
|
||||
MAIN_AUDIO_GENERATION = "main_audio_generation"
|
||||
|
||||
# Social Media Writers
|
||||
BLOG_WRITER = "blog_writer"
|
||||
LINKEDIN_WRITER = "linkedin_writer"
|
||||
FACEBOOK_WRITER = "facebook_writer"
|
||||
|
||||
# SEO & Content Tools
|
||||
SEO_TOOLS = "seo_tools"
|
||||
CONTENT_PLANNING = "content_planning"
|
||||
WRITING_ASSISTANT = "writing_assistant"
|
||||
|
||||
# Research & Strategy
|
||||
RESEARCH_TOOLS = "research_tools"
|
||||
CONTENT_STRATEGY = "content_strategy"
|
||||
|
||||
# Product Marketing Suite
|
||||
PRODUCT_MARKETING = "product_marketing"
|
||||
|
||||
# Podcast Maker
|
||||
PODCAST_MAKER = "podcast_maker"
|
||||
|
||||
# YouTube Creator
|
||||
YOUTUBE_CREATOR = "youtube_creator"
|
||||
|
||||
|
||||
class ContentAsset(Base):
|
||||
"""
|
||||
Unified model for tracking all AI-generated content assets.
|
||||
Similar to subscription tracking, this provides a centralized way to manage all content.
|
||||
"""
|
||||
|
||||
__tablename__ = "content_assets"
|
||||
|
||||
# Primary fields
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(String(255), nullable=False, index=True) # Clerk user ID
|
||||
|
||||
# Asset identification
|
||||
asset_type = Column(Enum(AssetType), nullable=False, index=True)
|
||||
source_module = Column(Enum(AssetSource), nullable=False, index=True)
|
||||
|
||||
# File information
|
||||
filename = Column(String(500), nullable=False)
|
||||
file_path = Column(String(1000), nullable=True) # Server file path
|
||||
file_url = Column(String(1000), nullable=False) # Public URL
|
||||
file_size = Column(Integer, nullable=True) # Size in bytes
|
||||
mime_type = Column(String(100), nullable=True) # MIME type
|
||||
|
||||
# Asset metadata
|
||||
title = Column(String(500), nullable=True)
|
||||
description = Column(Text, nullable=True)
|
||||
prompt = Column(Text, nullable=True) # Original prompt used for generation
|
||||
tags = Column(JSON, nullable=True) # Array of tags for search/filtering
|
||||
asset_metadata = Column(JSON, nullable=True) # Additional module-specific metadata (renamed from 'metadata' to avoid SQLAlchemy conflict)
|
||||
|
||||
# Generation details
|
||||
provider = Column(String(100), nullable=True) # AI provider used (e.g., "stability", "gemini")
|
||||
model = Column(String(100), nullable=True) # Model used
|
||||
cost = Column(Float, nullable=True, default=0.0) # Generation cost in USD
|
||||
generation_time = Column(Float, nullable=True) # Time taken in seconds
|
||||
|
||||
# Organization
|
||||
is_favorite = Column(Boolean, default=False, index=True)
|
||||
collection_id = Column(Integer, ForeignKey('asset_collections.id'), nullable=True)
|
||||
|
||||
# Usage tracking
|
||||
download_count = Column(Integer, default=0)
|
||||
share_count = Column(Integer, default=0)
|
||||
last_accessed = Column(DateTime, nullable=True)
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False, index=True)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
collection = relationship(
|
||||
"AssetCollection",
|
||||
back_populates="assets",
|
||||
foreign_keys=[collection_id]
|
||||
)
|
||||
|
||||
# Composite indexes for common query patterns
|
||||
__table_args__ = (
|
||||
Index('idx_user_type_source', 'user_id', 'asset_type', 'source_module'),
|
||||
Index('idx_user_favorite_created', 'user_id', 'is_favorite', 'created_at'),
|
||||
Index('idx_user_tags', 'user_id', 'tags'),
|
||||
)
|
||||
|
||||
|
||||
class AssetCollection(Base):
|
||||
"""
|
||||
Collections/albums for organizing assets.
|
||||
"""
|
||||
|
||||
__tablename__ = "asset_collections"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(String(255), nullable=False, index=True)
|
||||
name = Column(String(255), nullable=False)
|
||||
description = Column(Text, nullable=True)
|
||||
is_public = Column(Boolean, default=False)
|
||||
cover_asset_id = Column(Integer, ForeignKey('content_assets.id'), nullable=True)
|
||||
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
assets = relationship(
|
||||
"ContentAsset",
|
||||
back_populates="collection",
|
||||
foreign_keys="[ContentAsset.collection_id]",
|
||||
cascade="all, delete-orphan" # Cascade delete on the "one" side (one-to-many)
|
||||
)
|
||||
cover_asset = relationship(
|
||||
"ContentAsset",
|
||||
foreign_keys=[cover_asset_id],
|
||||
uselist=False
|
||||
)
|
||||
|
||||
239
backend/models/content_planning.py
Normal file
239
backend/models/content_planning.py
Normal file
@@ -0,0 +1,239 @@
|
||||
"""
|
||||
Content Planning Database Models
|
||||
Defines the database schema for content strategy, calendar events, and analytics.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, Text, DateTime, Float, JSON, ForeignKey
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class ContentStrategy(Base):
|
||||
"""Content Strategy model."""
|
||||
|
||||
__tablename__ = "content_strategies"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
name = Column(String(255), nullable=False)
|
||||
industry = Column(String(100), nullable=True)
|
||||
target_audience = Column(JSON, nullable=True) # Store audience demographics and preferences
|
||||
content_pillars = Column(JSON, nullable=True) # Store content pillar definitions
|
||||
ai_recommendations = Column(JSON, nullable=True) # Store AI-generated recommendations
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
calendar_events = relationship("CalendarEvent", back_populates="strategy")
|
||||
analytics = relationship("ContentAnalytics", back_populates="strategy")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ContentStrategy(id={self.id}, name='{self.name}', industry='{self.industry}')>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'user_id': self.user_id,
|
||||
'name': self.name,
|
||||
'industry': self.industry,
|
||||
'target_audience': self.target_audience,
|
||||
'content_pillars': self.content_pillars,
|
||||
'ai_recommendations': self.ai_recommendations,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
class CalendarEvent(Base):
|
||||
"""Calendar Event model."""
|
||||
|
||||
__tablename__ = "calendar_events"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
strategy_id = Column(Integer, ForeignKey("content_strategies.id"), nullable=False)
|
||||
title = Column(String(255), nullable=False)
|
||||
description = Column(Text, nullable=True)
|
||||
content_type = Column(String(50), nullable=False) # blog_post, video, social_post, etc.
|
||||
platform = Column(String(50), nullable=False) # website, linkedin, youtube, etc.
|
||||
scheduled_date = Column(DateTime, nullable=False)
|
||||
status = Column(String(20), default="draft") # draft, scheduled, published, cancelled
|
||||
ai_recommendations = Column(JSON, nullable=True) # Store AI recommendations for the event
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
strategy = relationship("ContentStrategy", back_populates="calendar_events")
|
||||
analytics = relationship("ContentAnalytics", back_populates="event")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<CalendarEvent(id={self.id}, title='{self.title}', status='{self.status}')>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'strategy_id': self.strategy_id,
|
||||
'title': self.title,
|
||||
'description': self.description,
|
||||
'content_type': self.content_type,
|
||||
'platform': self.platform,
|
||||
'scheduled_date': self.scheduled_date.isoformat() if self.scheduled_date else None,
|
||||
'status': self.status,
|
||||
'ai_recommendations': self.ai_recommendations,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
class ContentAnalytics(Base):
|
||||
"""Content Analytics model."""
|
||||
|
||||
__tablename__ = "content_analytics"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
event_id = Column(Integer, ForeignKey("calendar_events.id"), nullable=True)
|
||||
strategy_id = Column(Integer, ForeignKey("content_strategies.id"), nullable=True)
|
||||
platform = Column(String(50), nullable=False) # website, linkedin, youtube, etc.
|
||||
metrics = Column(JSON, nullable=True) # Store various performance metrics
|
||||
performance_score = Column(Float, nullable=True) # Overall performance score
|
||||
recorded_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
event = relationship("CalendarEvent", back_populates="analytics")
|
||||
strategy = relationship("ContentStrategy", back_populates="analytics")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ContentAnalytics(id={self.id}, platform='{self.platform}', score={self.performance_score})>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'event_id': self.event_id,
|
||||
'strategy_id': self.strategy_id,
|
||||
'platform': self.platform,
|
||||
'metrics': self.metrics,
|
||||
'performance_score': self.performance_score,
|
||||
'recorded_at': self.recorded_at.isoformat() if self.recorded_at else None
|
||||
}
|
||||
|
||||
class ContentGapAnalysis(Base):
|
||||
"""Content Gap Analysis model."""
|
||||
|
||||
__tablename__ = "content_gap_analyses"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
website_url = Column(String(500), nullable=False)
|
||||
competitor_urls = Column(JSON, nullable=True) # Store competitor URLs
|
||||
target_keywords = Column(JSON, nullable=True) # Store target keywords
|
||||
analysis_results = Column(JSON, nullable=True) # Store complete analysis results
|
||||
recommendations = Column(JSON, nullable=True) # Store AI recommendations
|
||||
opportunities = Column(JSON, nullable=True) # Store identified opportunities
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ContentGapAnalysis(id={self.id}, website='{self.website_url}')>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'user_id': self.user_id,
|
||||
'website_url': self.website_url,
|
||||
'competitor_urls': self.competitor_urls,
|
||||
'target_keywords': self.target_keywords,
|
||||
'analysis_results': self.analysis_results,
|
||||
'recommendations': self.recommendations,
|
||||
'opportunities': self.opportunities,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
class ContentRecommendation(Base):
|
||||
"""Content Recommendation model."""
|
||||
|
||||
__tablename__ = "content_recommendations"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
strategy_id = Column(Integer, ForeignKey("content_strategies.id"), nullable=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
recommendation_type = Column(String(50), nullable=False) # blog_post, video, case_study, etc.
|
||||
title = Column(String(255), nullable=False)
|
||||
description = Column(Text, nullable=True)
|
||||
target_keywords = Column(JSON, nullable=True) # Store target keywords
|
||||
estimated_length = Column(String(100), nullable=True) # Estimated content length
|
||||
priority = Column(String(20), default="medium") # low, medium, high
|
||||
platforms = Column(JSON, nullable=True) # Store target platforms
|
||||
estimated_performance = Column(String(100), nullable=True) # Performance prediction
|
||||
status = Column(String(20), default="pending") # pending, accepted, rejected, implemented
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
strategy = relationship("ContentStrategy")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ContentRecommendation(id={self.id}, title='{self.title}', type='{self.recommendation_type}')>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'strategy_id': self.strategy_id,
|
||||
'user_id': self.user_id,
|
||||
'recommendation_type': self.recommendation_type,
|
||||
'title': self.title,
|
||||
'description': self.description,
|
||||
'target_keywords': self.target_keywords,
|
||||
'estimated_length': self.estimated_length,
|
||||
'priority': self.priority,
|
||||
'platforms': self.platforms,
|
||||
'estimated_performance': self.estimated_performance,
|
||||
'status': self.status,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
class AIAnalysisResult(Base):
|
||||
"""AI Analysis Result model for storing AI-generated insights and recommendations."""
|
||||
|
||||
__tablename__ = "ai_analysis_results"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
strategy_id = Column(Integer, ForeignKey("content_strategies.id"), nullable=True)
|
||||
analysis_type = Column(String(50), nullable=False) # performance_trends, strategic_intelligence, content_evolution, gap_analysis
|
||||
insights = Column(JSON, nullable=True) # Store AI-generated insights
|
||||
recommendations = Column(JSON, nullable=True) # Store AI-generated recommendations
|
||||
performance_metrics = Column(JSON, nullable=True) # Store performance data
|
||||
personalized_data_used = Column(JSON, nullable=True) # Store the onboarding data used for personalization
|
||||
processing_time = Column(Float, nullable=True) # Store processing time in seconds
|
||||
ai_service_status = Column(String(20), default="operational") # operational, fallback, error
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
strategy = relationship("ContentStrategy")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<AIAnalysisResult(id={self.id}, type='{self.analysis_type}', user_id={self.user_id})>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'user_id': self.user_id,
|
||||
'strategy_id': self.strategy_id,
|
||||
'analysis_type': self.analysis_type,
|
||||
'insights': self.insights,
|
||||
'recommendations': self.recommendations,
|
||||
'performance_metrics': self.performance_metrics,
|
||||
'personalized_data_used': self.personalized_data_used,
|
||||
'processing_time': self.processing_time,
|
||||
'ai_service_status': self.ai_service_status,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
269
backend/models/enhanced_calendar_models.py
Normal file
269
backend/models/enhanced_calendar_models.py
Normal file
@@ -0,0 +1,269 @@
|
||||
"""
|
||||
Enhanced Calendar Models for AI-Powered Content Planning
|
||||
Defines additional database schema for intelligent calendar generation and optimization.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, Text, DateTime, Float, JSON, ForeignKey, Boolean
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class ContentCalendarTemplate(Base):
|
||||
"""Template for industry-specific content calendars."""
|
||||
|
||||
__tablename__ = "content_calendar_templates"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
industry = Column(String(100), nullable=False)
|
||||
business_size = Column(String(50), nullable=True) # startup, sme, enterprise
|
||||
content_pillars = Column(JSON, nullable=True) # Core content themes
|
||||
posting_frequency = Column(JSON, nullable=True) # Platform-specific frequency
|
||||
platform_strategies = Column(JSON, nullable=True) # Platform-specific content types
|
||||
optimal_timing = Column(JSON, nullable=True) # Best posting times per platform
|
||||
content_mix = Column(JSON, nullable=True) # Content type distribution
|
||||
seasonal_themes = Column(JSON, nullable=True) # Seasonal content opportunities
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ContentCalendarTemplate(id={self.id}, industry='{self.industry}')>"
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'industry': self.industry,
|
||||
'business_size': self.business_size,
|
||||
'content_pillars': self.content_pillars,
|
||||
'posting_frequency': self.posting_frequency,
|
||||
'platform_strategies': self.platform_strategies,
|
||||
'optimal_timing': self.optimal_timing,
|
||||
'content_mix': self.content_mix,
|
||||
'seasonal_themes': self.seasonal_themes,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
class AICalendarRecommendation(Base):
|
||||
"""AI-generated calendar recommendations and suggestions."""
|
||||
|
||||
__tablename__ = "ai_calendar_recommendations"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
strategy_id = Column(Integer, ForeignKey("content_strategies.id"), nullable=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
recommendation_type = Column(String(50), nullable=False) # calendar_generation, content_optimization, performance_analysis
|
||||
content_suggestions = Column(JSON, nullable=True) # Suggested content topics and themes
|
||||
optimal_timing = Column(JSON, nullable=True) # Recommended posting times
|
||||
performance_prediction = Column(JSON, nullable=True) # Predicted performance metrics
|
||||
platform_recommendations = Column(JSON, nullable=True) # Platform-specific suggestions
|
||||
content_repurposing = Column(JSON, nullable=True) # Repurposing opportunities
|
||||
trending_topics = Column(JSON, nullable=True) # Trending topics to incorporate
|
||||
competitor_insights = Column(JSON, nullable=True) # Competitor analysis insights
|
||||
ai_confidence = Column(Float, nullable=True) # AI confidence score
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
strategy = relationship("ContentStrategy")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<AICalendarRecommendation(id={self.id}, type='{self.recommendation_type}')>"
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'strategy_id': self.strategy_id,
|
||||
'user_id': self.user_id,
|
||||
'recommendation_type': self.recommendation_type,
|
||||
'content_suggestions': self.content_suggestions,
|
||||
'optimal_timing': self.optimal_timing,
|
||||
'performance_prediction': self.performance_prediction,
|
||||
'platform_recommendations': self.platform_recommendations,
|
||||
'content_repurposing': self.content_repurposing,
|
||||
'trending_topics': self.trending_topics,
|
||||
'competitor_insights': self.competitor_insights,
|
||||
'ai_confidence': self.ai_confidence,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
class ContentPerformanceTracking(Base):
|
||||
"""Detailed content performance tracking and analytics."""
|
||||
|
||||
__tablename__ = "content_performance_tracking"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
event_id = Column(Integer, ForeignKey("calendar_events.id"), nullable=True)
|
||||
strategy_id = Column(Integer, ForeignKey("content_strategies.id"), nullable=True)
|
||||
platform = Column(String(50), nullable=False) # website, linkedin, instagram, etc.
|
||||
content_type = Column(String(50), nullable=False) # blog_post, video, social_post, etc.
|
||||
metrics = Column(JSON, nullable=True) # Engagement, reach, clicks, conversions, etc.
|
||||
performance_score = Column(Float, nullable=True) # Overall performance score (0-100)
|
||||
audience_demographics = Column(JSON, nullable=True) # Audience insights
|
||||
engagement_rate = Column(Float, nullable=True) # Engagement rate percentage
|
||||
reach_count = Column(Integer, nullable=True) # Total reach
|
||||
click_count = Column(Integer, nullable=True) # Total clicks
|
||||
conversion_count = Column(Integer, nullable=True) # Total conversions
|
||||
roi = Column(Float, nullable=True) # Return on investment
|
||||
recorded_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
event = relationship("CalendarEvent")
|
||||
strategy = relationship("ContentStrategy")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ContentPerformanceTracking(id={self.id}, platform='{self.platform}', score={self.performance_score})>"
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'event_id': self.event_id,
|
||||
'strategy_id': self.strategy_id,
|
||||
'platform': self.platform,
|
||||
'content_type': self.content_type,
|
||||
'metrics': self.metrics,
|
||||
'performance_score': self.performance_score,
|
||||
'audience_demographics': self.audience_demographics,
|
||||
'engagement_rate': self.engagement_rate,
|
||||
'reach_count': self.reach_count,
|
||||
'click_count': self.click_count,
|
||||
'conversion_count': self.conversion_count,
|
||||
'roi': self.roi,
|
||||
'recorded_at': self.recorded_at.isoformat() if self.recorded_at else None
|
||||
}
|
||||
|
||||
class ContentTrendAnalysis(Base):
|
||||
"""Trend analysis and topic recommendations."""
|
||||
|
||||
__tablename__ = "content_trend_analysis"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
strategy_id = Column(Integer, ForeignKey("content_strategies.id"), nullable=True)
|
||||
industry = Column(String(100), nullable=False)
|
||||
trending_topics = Column(JSON, nullable=True) # Trending topics in the industry
|
||||
keyword_opportunities = Column(JSON, nullable=True) # High-value keywords
|
||||
content_gaps = Column(JSON, nullable=True) # Identified content gaps
|
||||
seasonal_opportunities = Column(JSON, nullable=True) # Seasonal content opportunities
|
||||
competitor_analysis = Column(JSON, nullable=True) # Competitor content analysis
|
||||
viral_potential = Column(JSON, nullable=True) # Content with viral potential
|
||||
audience_interests = Column(JSON, nullable=True) # Current audience interests
|
||||
analysis_date = Column(DateTime, default=datetime.utcnow)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
strategy = relationship("ContentStrategy")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ContentTrendAnalysis(id={self.id}, industry='{self.industry}')>"
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'user_id': self.user_id,
|
||||
'strategy_id': self.strategy_id,
|
||||
'industry': self.industry,
|
||||
'trending_topics': self.trending_topics,
|
||||
'keyword_opportunities': self.keyword_opportunities,
|
||||
'content_gaps': self.content_gaps,
|
||||
'seasonal_opportunities': self.seasonal_opportunities,
|
||||
'competitor_analysis': self.competitor_analysis,
|
||||
'viral_potential': self.viral_potential,
|
||||
'audience_interests': self.audience_interests,
|
||||
'analysis_date': self.analysis_date.isoformat() if self.analysis_date else None,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None
|
||||
}
|
||||
|
||||
class ContentOptimization(Base):
|
||||
"""Content optimization recommendations and suggestions."""
|
||||
|
||||
__tablename__ = "content_optimizations"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
event_id = Column(Integer, ForeignKey("calendar_events.id"), nullable=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
original_content = Column(JSON, nullable=True) # Original content details
|
||||
optimized_content = Column(JSON, nullable=True) # Optimized content suggestions
|
||||
platform_adaptations = Column(JSON, nullable=True) # Platform-specific adaptations
|
||||
visual_recommendations = Column(JSON, nullable=True) # Visual content suggestions
|
||||
hashtag_suggestions = Column(JSON, nullable=True) # Hashtag recommendations
|
||||
keyword_optimization = Column(JSON, nullable=True) # SEO keyword optimization
|
||||
tone_adjustments = Column(JSON, nullable=True) # Tone and style adjustments
|
||||
length_optimization = Column(JSON, nullable=True) # Content length optimization
|
||||
performance_prediction = Column(JSON, nullable=True) # Predicted performance
|
||||
optimization_score = Column(Float, nullable=True) # Optimization effectiveness score
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
event = relationship("CalendarEvent")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ContentOptimization(id={self.id}, score={self.optimization_score})>"
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'event_id': self.event_id,
|
||||
'user_id': self.user_id,
|
||||
'original_content': self.original_content,
|
||||
'optimized_content': self.optimized_content,
|
||||
'platform_adaptations': self.platform_adaptations,
|
||||
'visual_recommendations': self.visual_recommendations,
|
||||
'hashtag_suggestions': self.hashtag_suggestions,
|
||||
'keyword_optimization': self.keyword_optimization,
|
||||
'tone_adjustments': self.tone_adjustments,
|
||||
'length_optimization': self.length_optimization,
|
||||
'performance_prediction': self.performance_prediction,
|
||||
'optimization_score': self.optimization_score,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None
|
||||
}
|
||||
|
||||
class CalendarGenerationSession(Base):
|
||||
"""AI calendar generation sessions and results."""
|
||||
|
||||
__tablename__ = "calendar_generation_sessions"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
strategy_id = Column(Integer, ForeignKey("content_strategies.id"), nullable=True)
|
||||
session_type = Column(String(50), nullable=False) # monthly, weekly, custom
|
||||
generation_params = Column(JSON, nullable=True) # Parameters used for generation
|
||||
generated_calendar = Column(JSON, nullable=True) # Generated calendar data
|
||||
ai_insights = Column(JSON, nullable=True) # AI insights and recommendations
|
||||
performance_predictions = Column(JSON, nullable=True) # Performance predictions
|
||||
content_themes = Column(JSON, nullable=True) # Content themes and pillars
|
||||
platform_distribution = Column(JSON, nullable=True) # Platform content distribution
|
||||
optimal_schedule = Column(JSON, nullable=True) # Optimal posting schedule
|
||||
repurposing_opportunities = Column(JSON, nullable=True) # Content repurposing
|
||||
generation_status = Column(String(20), default="processing") # processing, completed, failed
|
||||
ai_confidence = Column(Float, nullable=True) # Overall AI confidence
|
||||
processing_time = Column(Float, nullable=True) # Processing time in seconds
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
strategy = relationship("ContentStrategy")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<CalendarGenerationSession(id={self.id}, type='{self.session_type}', status='{self.generation_status}')>"
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
'id': self.id,
|
||||
'user_id': self.user_id,
|
||||
'strategy_id': self.strategy_id,
|
||||
'session_type': self.session_type,
|
||||
'generation_params': self.generation_params,
|
||||
'generated_calendar': self.generated_calendar,
|
||||
'ai_insights': self.ai_insights,
|
||||
'performance_predictions': self.performance_predictions,
|
||||
'content_themes': self.content_themes,
|
||||
'platform_distribution': self.platform_distribution,
|
||||
'optimal_schedule': self.optimal_schedule,
|
||||
'repurposing_opportunities': self.repurposing_opportunities,
|
||||
'generation_status': self.generation_status,
|
||||
'ai_confidence': self.ai_confidence,
|
||||
'processing_time': self.processing_time,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None
|
||||
}
|
||||
164
backend/models/enhanced_persona_models.py
Normal file
164
backend/models/enhanced_persona_models.py
Normal file
@@ -0,0 +1,164 @@
|
||||
"""
|
||||
Enhanced Persona Database Models
|
||||
Improved schema for better writing style mimicry and quality tracking.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, Text, DateTime, Float, JSON, ForeignKey, Boolean, Index
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class EnhancedWritingPersona(Base):
|
||||
"""Enhanced writing persona model with improved linguistic analysis."""
|
||||
|
||||
__tablename__ = "enhanced_writing_personas"
|
||||
|
||||
# Primary fields
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(Integer, nullable=False, index=True)
|
||||
persona_name = Column(String(255), nullable=False)
|
||||
|
||||
# Core Identity
|
||||
archetype = Column(String(100), nullable=True)
|
||||
core_belief = Column(Text, nullable=True)
|
||||
brand_voice_description = Column(Text, nullable=True)
|
||||
|
||||
# Enhanced Linguistic Fingerprint
|
||||
linguistic_fingerprint = Column(JSON, nullable=True) # More detailed analysis
|
||||
writing_style_signature = Column(JSON, nullable=True) # Unique style markers
|
||||
vocabulary_profile = Column(JSON, nullable=True) # Detailed vocabulary analysis
|
||||
sentence_patterns = Column(JSON, nullable=True) # Sentence structure patterns
|
||||
rhetorical_style = Column(JSON, nullable=True) # Rhetorical device preferences
|
||||
|
||||
# Quality Metrics
|
||||
style_consistency_score = Column(Float, nullable=True) # 0-100
|
||||
authenticity_score = Column(Float, nullable=True) # 0-100
|
||||
readability_score = Column(Float, nullable=True) # 0-100
|
||||
engagement_potential = Column(Float, nullable=True) # 0-100
|
||||
|
||||
# Learning & Adaptation
|
||||
feedback_history = Column(JSON, nullable=True) # User feedback over time
|
||||
performance_metrics = Column(JSON, nullable=True) # Content performance data
|
||||
adaptation_history = Column(JSON, nullable=True) # How persona evolved
|
||||
|
||||
# Source data tracking
|
||||
onboarding_session_id = Column(Integer, nullable=True)
|
||||
source_website_analysis = Column(JSON, nullable=True)
|
||||
source_research_preferences = Column(JSON, nullable=True)
|
||||
|
||||
# AI Analysis metadata
|
||||
ai_analysis_version = Column(String(50), nullable=True)
|
||||
confidence_score = Column(Float, nullable=True)
|
||||
analysis_date = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
is_active = Column(Boolean, default=True)
|
||||
|
||||
# Indexes for performance
|
||||
__table_args__ = (
|
||||
Index('idx_user_active', 'user_id', 'is_active'),
|
||||
Index('idx_created_at', 'created_at'),
|
||||
)
|
||||
|
||||
class EnhancedPlatformPersona(Base):
|
||||
"""Enhanced platform-specific persona with detailed optimization."""
|
||||
|
||||
__tablename__ = "enhanced_platform_personas"
|
||||
|
||||
# Primary fields
|
||||
id = Column(Integer, primary_key=True)
|
||||
writing_persona_id = Column(Integer, ForeignKey("enhanced_writing_personas.id"), nullable=False)
|
||||
platform_type = Column(String(50), nullable=False, index=True)
|
||||
|
||||
# Enhanced Platform-specific Analysis
|
||||
platform_linguistic_adaptation = Column(JSON, nullable=True) # How language adapts to platform
|
||||
platform_engagement_patterns = Column(JSON, nullable=True) # Detailed engagement analysis
|
||||
platform_content_optimization = Column(JSON, nullable=True) # Content optimization rules
|
||||
platform_algorithm_insights = Column(JSON, nullable=True) # Algorithm-specific insights
|
||||
|
||||
# Performance Tracking
|
||||
content_performance_history = Column(JSON, nullable=True) # Historical performance data
|
||||
engagement_metrics = Column(JSON, nullable=True) # Engagement statistics
|
||||
optimization_suggestions = Column(JSON, nullable=True) # AI-generated optimization tips
|
||||
|
||||
# Quality Assurance
|
||||
platform_compliance_score = Column(Float, nullable=True) # 0-100
|
||||
optimization_effectiveness = Column(Float, nullable=True) # 0-100
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
is_active = Column(Boolean, default=True)
|
||||
|
||||
# Relationships
|
||||
writing_persona = relationship("EnhancedWritingPersona", back_populates="platform_personas")
|
||||
|
||||
# Indexes
|
||||
__table_args__ = (
|
||||
Index('idx_platform_active', 'platform_type', 'is_active'),
|
||||
Index('idx_persona_platform', 'writing_persona_id', 'platform_type'),
|
||||
)
|
||||
|
||||
class PersonaQualityMetrics(Base):
|
||||
"""Tracks persona quality and improvement over time."""
|
||||
|
||||
__tablename__ = "persona_quality_metrics"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
writing_persona_id = Column(Integer, ForeignKey("enhanced_writing_personas.id"), nullable=False)
|
||||
platform_persona_id = Column(Integer, ForeignKey("enhanced_platform_personas.id"), nullable=True)
|
||||
|
||||
# Quality Scores
|
||||
style_accuracy = Column(Float, nullable=True) # How well it mimics user style
|
||||
content_quality = Column(Float, nullable=True) # Overall content quality
|
||||
engagement_rate = Column(Float, nullable=True) # Engagement performance
|
||||
consistency_score = Column(Float, nullable=True) # Consistency across content
|
||||
|
||||
# User Feedback
|
||||
user_satisfaction = Column(Float, nullable=True) # User rating
|
||||
user_feedback = Column(Text, nullable=True) # Qualitative feedback
|
||||
improvement_requests = Column(JSON, nullable=True) # Specific improvement requests
|
||||
|
||||
# AI Analysis
|
||||
ai_quality_assessment = Column(JSON, nullable=True) # AI's quality analysis
|
||||
improvement_suggestions = Column(JSON, nullable=True) # AI suggestions for improvement
|
||||
|
||||
# Metadata
|
||||
assessment_date = Column(DateTime, default=datetime.utcnow)
|
||||
assessor_type = Column(String(50), nullable=True) # user, ai, automated
|
||||
|
||||
# Relationships
|
||||
writing_persona = relationship("EnhancedWritingPersona")
|
||||
platform_persona = relationship("EnhancedPlatformPersona")
|
||||
|
||||
class PersonaLearningData(Base):
|
||||
"""Stores learning data for persona improvement."""
|
||||
|
||||
__tablename__ = "persona_learning_data"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
writing_persona_id = Column(Integer, ForeignKey("enhanced_writing_personas.id"), nullable=False)
|
||||
|
||||
# Learning Inputs
|
||||
user_writing_samples = Column(JSON, nullable=True) # Additional user writing samples
|
||||
successful_content_examples = Column(JSON, nullable=True) # High-performing content
|
||||
user_preferences = Column(JSON, nullable=True) # User preferences and adjustments
|
||||
|
||||
# Learning Outputs
|
||||
style_refinements = Column(JSON, nullable=True) # Refinements made to persona
|
||||
vocabulary_updates = Column(JSON, nullable=True) # Vocabulary additions/removals
|
||||
pattern_adjustments = Column(JSON, nullable=True) # Pattern adjustments
|
||||
|
||||
# Metadata
|
||||
learning_date = Column(DateTime, default=datetime.utcnow)
|
||||
learning_type = Column(String(50), nullable=True) # feedback, sample, preference
|
||||
|
||||
# Relationships
|
||||
writing_persona = relationship("EnhancedWritingPersona")
|
||||
|
||||
# Add relationships
|
||||
EnhancedWritingPersona.platform_personas = relationship("EnhancedPlatformPersona", back_populates="writing_persona", cascade="all, delete-orphan")
|
||||
307
backend/models/enhanced_strategy_models.py
Normal file
307
backend/models/enhanced_strategy_models.py
Normal file
@@ -0,0 +1,307 @@
|
||||
"""
|
||||
Enhanced Strategy Database Models
|
||||
Defines the enhanced database schema for content strategy with 30+ strategic inputs.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, Text, DateTime, Float, JSON, ForeignKey, Boolean
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class EnhancedContentStrategy(Base):
|
||||
"""Enhanced Content Strategy model with 30+ strategic inputs."""
|
||||
|
||||
__tablename__ = "enhanced_content_strategies"
|
||||
|
||||
# Primary fields
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
name = Column(String(255), nullable=False)
|
||||
industry = Column(String(100), nullable=True)
|
||||
|
||||
# Business Context (8 inputs)
|
||||
business_objectives = Column(JSON, nullable=True) # Primary and secondary business goals
|
||||
target_metrics = Column(JSON, nullable=True) # KPIs and success metrics
|
||||
content_budget = Column(Float, nullable=True) # Monthly/annual content budget
|
||||
team_size = Column(Integer, nullable=True) # Content team size
|
||||
implementation_timeline = Column(String(100), nullable=True) # 3 months, 6 months, 1 year, etc.
|
||||
market_share = Column(String(50), nullable=True) # Current market share percentage
|
||||
competitive_position = Column(String(50), nullable=True) # Leader, challenger, niche, emerging
|
||||
performance_metrics = Column(JSON, nullable=True) # Current performance data
|
||||
|
||||
# Audience Intelligence (6 inputs)
|
||||
content_preferences = Column(JSON, nullable=True) # Preferred content formats and topics
|
||||
consumption_patterns = Column(JSON, nullable=True) # When and how audience consumes content
|
||||
audience_pain_points = Column(JSON, nullable=True) # Key challenges and pain points
|
||||
buying_journey = Column(JSON, nullable=True) # Customer journey stages and touchpoints
|
||||
seasonal_trends = Column(JSON, nullable=True) # Seasonal content opportunities
|
||||
engagement_metrics = Column(JSON, nullable=True) # Current engagement data
|
||||
|
||||
# Competitive Intelligence (5 inputs)
|
||||
top_competitors = Column(JSON, nullable=True) # List of main competitors
|
||||
competitor_content_strategies = Column(JSON, nullable=True) # Analysis of competitor approaches
|
||||
market_gaps = Column(JSON, nullable=True) # Identified market opportunities
|
||||
industry_trends = Column(JSON, nullable=True) # Current industry trends
|
||||
emerging_trends = Column(JSON, nullable=True) # Upcoming trends and opportunities
|
||||
|
||||
# Content Strategy (7 inputs)
|
||||
preferred_formats = Column(JSON, nullable=True) # Blog posts, videos, infographics, etc.
|
||||
content_mix = Column(JSON, nullable=True) # Distribution of content types
|
||||
content_frequency = Column(String(50), nullable=True) # Daily, weekly, monthly, etc.
|
||||
optimal_timing = Column(JSON, nullable=True) # Best times for publishing
|
||||
quality_metrics = Column(JSON, nullable=True) # Content quality standards
|
||||
editorial_guidelines = Column(JSON, nullable=True) # Style and tone guidelines
|
||||
brand_voice = Column(JSON, nullable=True) # Brand personality and voice
|
||||
|
||||
# Performance & Analytics (4 inputs)
|
||||
traffic_sources = Column(JSON, nullable=True) # Primary traffic sources
|
||||
conversion_rates = Column(JSON, nullable=True) # Current conversion data
|
||||
content_roi_targets = Column(JSON, nullable=True) # ROI goals and targets
|
||||
ab_testing_capabilities = Column(Boolean, default=False) # A/B testing availability
|
||||
|
||||
# Legacy fields for backward compatibility
|
||||
target_audience = Column(JSON, nullable=True) # Store audience demographics and preferences
|
||||
content_pillars = Column(JSON, nullable=True) # Store content pillar definitions
|
||||
ai_recommendations = Column(JSON, nullable=True) # Store AI-generated recommendations
|
||||
|
||||
# Enhanced AI Analysis fields
|
||||
comprehensive_ai_analysis = Column(JSON, nullable=True) # Enhanced AI analysis results
|
||||
onboarding_data_used = Column(JSON, nullable=True) # Track onboarding data integration
|
||||
strategic_scores = Column(JSON, nullable=True) # Strategic performance scores
|
||||
market_positioning = Column(JSON, nullable=True) # Market positioning analysis
|
||||
competitive_advantages = Column(JSON, nullable=True) # Identified competitive advantages
|
||||
strategic_risks = Column(JSON, nullable=True) # Risk assessment
|
||||
opportunity_analysis = Column(JSON, nullable=True) # Opportunity identification
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
completion_percentage = Column(Float, default=0.0) # Track input completion
|
||||
data_source_transparency = Column(JSON, nullable=True) # Track data sources for auto-population
|
||||
|
||||
# Relationships
|
||||
autofill_insights = relationship("ContentStrategyAutofillInsights", back_populates="strategy", cascade="all, delete-orphan")
|
||||
|
||||
# Monitoring relationships
|
||||
monitoring_plans = relationship("StrategyMonitoringPlan", back_populates="strategy", cascade="all, delete-orphan")
|
||||
monitoring_tasks = relationship("MonitoringTask", back_populates="strategy", cascade="all, delete-orphan")
|
||||
performance_metrics = relationship("StrategyPerformanceMetrics", back_populates="strategy", cascade="all, delete-orphan")
|
||||
activation_status = relationship("StrategyActivationStatus", back_populates="strategy", cascade="all, delete-orphan")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<EnhancedContentStrategy(id={self.id}, name='{self.name}', industry='{self.industry}')>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary with enhanced structure."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'user_id': self.user_id,
|
||||
'name': self.name,
|
||||
'industry': self.industry,
|
||||
|
||||
# Business Context
|
||||
'business_objectives': self.business_objectives,
|
||||
'target_metrics': self.target_metrics,
|
||||
'content_budget': self.content_budget,
|
||||
'team_size': self.team_size,
|
||||
'implementation_timeline': self.implementation_timeline,
|
||||
'market_share': self.market_share,
|
||||
'competitive_position': self.competitive_position,
|
||||
'performance_metrics': self.performance_metrics,
|
||||
|
||||
# Audience Intelligence
|
||||
'content_preferences': self.content_preferences,
|
||||
'consumption_patterns': self.consumption_patterns,
|
||||
'audience_pain_points': self.audience_pain_points,
|
||||
'buying_journey': self.buying_journey,
|
||||
'seasonal_trends': self.seasonal_trends,
|
||||
'engagement_metrics': self.engagement_metrics,
|
||||
|
||||
# Competitive Intelligence
|
||||
'top_competitors': self.top_competitors,
|
||||
'competitor_content_strategies': self.competitor_content_strategies,
|
||||
'market_gaps': self.market_gaps,
|
||||
'industry_trends': self.industry_trends,
|
||||
'emerging_trends': self.emerging_trends,
|
||||
|
||||
# Content Strategy
|
||||
'preferred_formats': self.preferred_formats,
|
||||
'content_mix': self.content_mix,
|
||||
'content_frequency': self.content_frequency,
|
||||
'optimal_timing': self.optimal_timing,
|
||||
'quality_metrics': self.quality_metrics,
|
||||
'editorial_guidelines': self.editorial_guidelines,
|
||||
'brand_voice': self.brand_voice,
|
||||
|
||||
# Performance & Analytics
|
||||
'traffic_sources': self.traffic_sources,
|
||||
'conversion_rates': self.conversion_rates,
|
||||
'content_roi_targets': self.content_roi_targets,
|
||||
'ab_testing_capabilities': self.ab_testing_capabilities,
|
||||
|
||||
# Legacy fields
|
||||
'target_audience': self.target_audience,
|
||||
'content_pillars': self.content_pillars,
|
||||
'ai_recommendations': self.ai_recommendations,
|
||||
|
||||
# Enhanced AI Analysis
|
||||
'comprehensive_ai_analysis': self.comprehensive_ai_analysis,
|
||||
'onboarding_data_used': self.onboarding_data_used,
|
||||
'strategic_scores': self.strategic_scores,
|
||||
'market_positioning': self.market_positioning,
|
||||
'competitive_advantages': self.competitive_advantages,
|
||||
'strategic_risks': self.strategic_risks,
|
||||
'opportunity_analysis': self.opportunity_analysis,
|
||||
|
||||
# Metadata
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None,
|
||||
'completion_percentage': self.completion_percentage,
|
||||
'data_source_transparency': self.data_source_transparency
|
||||
}
|
||||
|
||||
def calculate_completion_percentage(self):
|
||||
"""Calculate the percentage of required fields that have been filled."""
|
||||
required_fields = [
|
||||
'business_objectives', 'target_metrics', 'content_budget', 'team_size',
|
||||
'implementation_timeline', 'market_share', 'competitive_position',
|
||||
'content_preferences', 'consumption_patterns', 'audience_pain_points',
|
||||
'buying_journey', 'seasonal_trends', 'engagement_metrics',
|
||||
'top_competitors', 'competitor_content_strategies', 'market_gaps',
|
||||
'industry_trends', 'emerging_trends', 'preferred_formats',
|
||||
'content_mix', 'content_frequency', 'optimal_timing',
|
||||
'quality_metrics', 'editorial_guidelines', 'brand_voice',
|
||||
'traffic_sources', 'conversion_rates', 'content_roi_targets'
|
||||
]
|
||||
|
||||
filled_fields = sum(1 for field in required_fields if getattr(self, field) is not None)
|
||||
self.completion_percentage = (filled_fields / len(required_fields)) * 100
|
||||
return self.completion_percentage
|
||||
|
||||
class EnhancedAIAnalysisResult(Base):
|
||||
"""Enhanced AI Analysis Result model for storing comprehensive AI-generated insights."""
|
||||
|
||||
__tablename__ = "enhanced_ai_analysis_results"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
strategy_id = Column(Integer, ForeignKey("enhanced_content_strategies.id"), nullable=True)
|
||||
|
||||
# Analysis type for the 5 specialized prompts
|
||||
analysis_type = Column(String(50), nullable=False) # comprehensive_strategy, audience_intelligence, competitive_intelligence, performance_optimization, content_calendar_optimization
|
||||
|
||||
# Comprehensive analysis results
|
||||
comprehensive_insights = Column(JSON, nullable=True) # Holistic strategy insights
|
||||
audience_intelligence = Column(JSON, nullable=True) # Detailed audience analysis
|
||||
competitive_intelligence = Column(JSON, nullable=True) # Competitive landscape analysis
|
||||
performance_optimization = Column(JSON, nullable=True) # Performance improvement recommendations
|
||||
content_calendar_optimization = Column(JSON, nullable=True) # Calendar optimization insights
|
||||
|
||||
# Enhanced data tracking
|
||||
onboarding_data_used = Column(JSON, nullable=True) # Track onboarding data integration
|
||||
data_confidence_scores = Column(JSON, nullable=True) # Confidence scores for data sources
|
||||
recommendation_quality_scores = Column(JSON, nullable=True) # Quality scores for recommendations
|
||||
|
||||
# Performance metrics
|
||||
processing_time = Column(Float, nullable=True) # Processing time in seconds
|
||||
ai_service_status = Column(String(20), default="operational") # operational, fallback, error
|
||||
prompt_version = Column(String(50), nullable=True) # Version of AI prompt used
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<EnhancedAIAnalysisResult(id={self.id}, type='{self.analysis_type}', user_id={self.user_id})>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'user_id': self.user_id,
|
||||
'strategy_id': self.strategy_id,
|
||||
'analysis_type': self.analysis_type,
|
||||
'comprehensive_insights': self.comprehensive_insights,
|
||||
'audience_intelligence': self.audience_intelligence,
|
||||
'competitive_intelligence': self.competitive_intelligence,
|
||||
'performance_optimization': self.performance_optimization,
|
||||
'content_calendar_optimization': self.content_calendar_optimization,
|
||||
'onboarding_data_used': self.onboarding_data_used,
|
||||
'data_confidence_scores': self.data_confidence_scores,
|
||||
'recommendation_quality_scores': self.recommendation_quality_scores,
|
||||
'processing_time': self.processing_time,
|
||||
'ai_service_status': self.ai_service_status,
|
||||
'prompt_version': self.prompt_version,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
class OnboardingDataIntegration(Base):
|
||||
"""Model for tracking onboarding data integration with enhanced strategy."""
|
||||
|
||||
__tablename__ = "onboarding_data_integrations"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
strategy_id = Column(Integer, ForeignKey("enhanced_content_strategies.id"), nullable=True)
|
||||
|
||||
# Legacy onboarding storage fields (match existing DB schema)
|
||||
website_analysis_data = Column(JSON, nullable=True) # Data from website analysis
|
||||
research_preferences_data = Column(JSON, nullable=True) # Data from research preferences
|
||||
api_keys_data = Column(JSON, nullable=True) # API configuration data
|
||||
|
||||
# Integration mapping and user edits
|
||||
field_mappings = Column(JSON, nullable=True) # Mapping of onboarding fields to strategy fields
|
||||
auto_populated_fields = Column(JSON, nullable=True) # Fields auto-populated from onboarding
|
||||
user_overrides = Column(JSON, nullable=True) # Fields manually overridden by user
|
||||
|
||||
# Data quality and transparency
|
||||
data_quality_scores = Column(JSON, nullable=True) # Quality scores for each data source
|
||||
confidence_levels = Column(JSON, nullable=True) # Confidence levels for auto-populated data
|
||||
data_freshness = Column(JSON, nullable=True) # How recent the onboarding data is
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary (legacy fields)."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'user_id': self.user_id,
|
||||
'strategy_id': self.strategy_id,
|
||||
'website_analysis_data': self.website_analysis_data,
|
||||
'research_preferences_data': self.research_preferences_data,
|
||||
'api_keys_data': self.api_keys_data,
|
||||
'field_mappings': self.field_mappings,
|
||||
'auto_populated_fields': self.auto_populated_fields,
|
||||
'user_overrides': self.user_overrides,
|
||||
'data_quality_scores': self.data_quality_scores,
|
||||
'confidence_levels': self.confidence_levels,
|
||||
'data_freshness': self.data_freshness,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
# New model to persist accepted auto-fill inputs used to create a strategy
|
||||
class ContentStrategyAutofillInsights(Base):
|
||||
__tablename__ = "content_strategy_autofill_insights"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
strategy_id = Column(Integer, ForeignKey("enhanced_content_strategies.id"), nullable=False)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
|
||||
# Full snapshot of accepted inputs and transparency at time of strategy creation/confirmation
|
||||
accepted_fields = Column(JSON, nullable=False)
|
||||
sources = Column(JSON, nullable=True)
|
||||
input_data_points = Column(JSON, nullable=True)
|
||||
quality_scores = Column(JSON, nullable=True)
|
||||
confidence_levels = Column(JSON, nullable=True)
|
||||
data_freshness = Column(JSON, nullable=True)
|
||||
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationship back to strategy
|
||||
strategy = relationship("EnhancedContentStrategy", back_populates="autofill_insights")
|
||||
85
backend/models/hallucination_models.py
Normal file
85
backend/models/hallucination_models.py
Normal file
@@ -0,0 +1,85 @@
|
||||
"""
|
||||
Pydantic models for hallucination detection API endpoints.
|
||||
"""
|
||||
|
||||
from typing import List, Dict, Any, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
|
||||
class AssessmentType(str, Enum):
|
||||
"""Assessment types for claim verification."""
|
||||
SUPPORTED = "supported"
|
||||
REFUTED = "refuted"
|
||||
INSUFFICIENT_INFORMATION = "insufficient_information"
|
||||
|
||||
class SourceDocument(BaseModel):
|
||||
"""Represents a source document used for fact-checking."""
|
||||
title: str = Field(..., description="Title of the source document")
|
||||
url: str = Field(..., description="URL of the source document")
|
||||
text: str = Field(..., description="Relevant text content from the source")
|
||||
published_date: Optional[str] = Field(None, description="Publication date of the source")
|
||||
author: Optional[str] = Field(None, description="Author of the source")
|
||||
score: float = Field(0.5, description="Relevance score of the source (0.0-1.0)")
|
||||
|
||||
class Claim(BaseModel):
|
||||
"""Represents a single verifiable claim extracted from text."""
|
||||
text: str = Field(..., description="The claim text")
|
||||
confidence: float = Field(..., description="Confidence score for the claim assessment (0.0-1.0)")
|
||||
assessment: AssessmentType = Field(..., description="Assessment result for the claim")
|
||||
supporting_sources: List[SourceDocument] = Field(default_factory=list, description="Sources that support the claim")
|
||||
refuting_sources: List[SourceDocument] = Field(default_factory=list, description="Sources that refute the claim")
|
||||
reasoning: Optional[str] = Field(None, description="Explanation for the assessment")
|
||||
|
||||
class HallucinationDetectionRequest(BaseModel):
|
||||
"""Request model for hallucination detection."""
|
||||
text: str = Field(..., description="Text to analyze for factual accuracy", min_length=10, max_length=5000)
|
||||
include_sources: bool = Field(True, description="Whether to include source documents in the response")
|
||||
max_claims: int = Field(10, description="Maximum number of claims to extract and verify", ge=1, le=20)
|
||||
|
||||
class HallucinationDetectionResponse(BaseModel):
|
||||
"""Response model for hallucination detection."""
|
||||
success: bool = Field(..., description="Whether the analysis was successful")
|
||||
claims: List[Claim] = Field(default_factory=list, description="List of extracted and verified claims")
|
||||
overall_confidence: float = Field(..., description="Overall confidence score for the analysis (0.0-1.0)")
|
||||
total_claims: int = Field(..., description="Total number of claims extracted")
|
||||
supported_claims: int = Field(..., description="Number of claims that are supported by sources")
|
||||
refuted_claims: int = Field(..., description="Number of claims that are refuted by sources")
|
||||
insufficient_claims: int = Field(..., description="Number of claims with insufficient information")
|
||||
timestamp: str = Field(..., description="Timestamp of the analysis")
|
||||
processing_time_ms: Optional[int] = Field(None, description="Processing time in milliseconds")
|
||||
error: Optional[str] = Field(None, description="Error message if analysis failed")
|
||||
|
||||
class ClaimExtractionRequest(BaseModel):
|
||||
"""Request model for claim extraction only."""
|
||||
text: str = Field(..., description="Text to extract claims from", min_length=10, max_length=5000)
|
||||
max_claims: int = Field(10, description="Maximum number of claims to extract", ge=1, le=20)
|
||||
|
||||
class ClaimExtractionResponse(BaseModel):
|
||||
"""Response model for claim extraction."""
|
||||
success: bool = Field(..., description="Whether the extraction was successful")
|
||||
claims: List[str] = Field(default_factory=list, description="List of extracted claim texts")
|
||||
total_claims: int = Field(..., description="Total number of claims extracted")
|
||||
timestamp: str = Field(..., description="Timestamp of the extraction")
|
||||
error: Optional[str] = Field(None, description="Error message if extraction failed")
|
||||
|
||||
class ClaimVerificationRequest(BaseModel):
|
||||
"""Request model for verifying a single claim."""
|
||||
claim: str = Field(..., description="Claim to verify", min_length=5, max_length=500)
|
||||
include_sources: bool = Field(True, description="Whether to include source documents in the response")
|
||||
|
||||
class ClaimVerificationResponse(BaseModel):
|
||||
"""Response model for claim verification."""
|
||||
success: bool = Field(..., description="Whether the verification was successful")
|
||||
claim: Claim = Field(..., description="Verified claim with assessment results")
|
||||
timestamp: str = Field(..., description="Timestamp of the verification")
|
||||
processing_time_ms: Optional[int] = Field(None, description="Processing time in milliseconds")
|
||||
error: Optional[str] = Field(None, description="Error message if verification failed")
|
||||
|
||||
class HealthCheckResponse(BaseModel):
|
||||
"""Response model for health check."""
|
||||
status: str = Field(..., description="Service status")
|
||||
version: str = Field(..., description="Service version")
|
||||
exa_api_available: bool = Field(..., description="Whether Exa API is available")
|
||||
openai_api_available: bool = Field(..., description="Whether OpenAI API is available")
|
||||
timestamp: str = Field(..., description="Timestamp of the health check")
|
||||
454
backend/models/linkedin_models.py
Normal file
454
backend/models/linkedin_models.py
Normal file
@@ -0,0 +1,454 @@
|
||||
"""
|
||||
LinkedIn Content Generation Models for ALwrity
|
||||
|
||||
This module defines the data models for LinkedIn content generation endpoints.
|
||||
Enhanced to support grounding capabilities with source integration and quality metrics.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field, validator
|
||||
from typing import List, Optional, Dict, Any, Literal
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class LinkedInPostType(str, Enum):
|
||||
"""Types of LinkedIn posts."""
|
||||
PROFESSIONAL = "professional"
|
||||
THOUGHT_LEADERSHIP = "thought_leadership"
|
||||
INDUSTRY_NEWS = "industry_news"
|
||||
PERSONAL_STORY = "personal_story"
|
||||
COMPANY_UPDATE = "company_update"
|
||||
POLL = "poll"
|
||||
|
||||
|
||||
class LinkedInTone(str, Enum):
|
||||
"""LinkedIn content tone options."""
|
||||
PROFESSIONAL = "professional"
|
||||
CONVERSATIONAL = "conversational"
|
||||
AUTHORITATIVE = "authoritative"
|
||||
INSPIRATIONAL = "inspirational"
|
||||
EDUCATIONAL = "educational"
|
||||
FRIENDLY = "friendly"
|
||||
|
||||
|
||||
class SearchEngine(str, Enum):
|
||||
"""Available search engines for research."""
|
||||
METAPHOR = "metaphor"
|
||||
GOOGLE = "google"
|
||||
TAVILY = "tavily"
|
||||
|
||||
|
||||
class GroundingLevel(str, Enum):
|
||||
"""Levels of content grounding."""
|
||||
NONE = "none"
|
||||
BASIC = "basic"
|
||||
ENHANCED = "enhanced"
|
||||
ENTERPRISE = "enterprise"
|
||||
|
||||
|
||||
class LinkedInPostRequest(BaseModel):
|
||||
"""Request model for LinkedIn post generation."""
|
||||
topic: str = Field(..., description="Main topic for the post", min_length=3, max_length=200)
|
||||
industry: str = Field(..., description="Target industry context", min_length=2, max_length=100)
|
||||
post_type: LinkedInPostType = Field(default=LinkedInPostType.PROFESSIONAL, description="Type of LinkedIn post")
|
||||
tone: LinkedInTone = Field(default=LinkedInTone.PROFESSIONAL, description="Tone of the post")
|
||||
target_audience: Optional[str] = Field(None, description="Specific target audience", max_length=200)
|
||||
key_points: Optional[List[str]] = Field(None, description="Key points to include", max_items=10)
|
||||
include_hashtags: bool = Field(default=True, description="Whether to include hashtags")
|
||||
include_call_to_action: bool = Field(default=True, description="Whether to include call to action")
|
||||
research_enabled: bool = Field(default=True, description="Whether to include research-backed content")
|
||||
search_engine: SearchEngine = Field(default=SearchEngine.GOOGLE, description="Search engine for research")
|
||||
max_length: int = Field(default=3000, description="Maximum character count", ge=100, le=3000)
|
||||
grounding_level: GroundingLevel = Field(default=GroundingLevel.ENHANCED, description="Level of content grounding")
|
||||
include_citations: bool = Field(default=True, description="Whether to include inline citations")
|
||||
user_id: Optional[int] = Field(default=1, description="User id for persona lookup")
|
||||
persona_override: Optional[Dict[str, Any]] = Field(default=None, description="Session-only persona overrides to apply without saving")
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"topic": "AI in healthcare transformation",
|
||||
"industry": "Healthcare",
|
||||
"post_type": "thought_leadership",
|
||||
"tone": "professional",
|
||||
"target_audience": "Healthcare executives and professionals",
|
||||
"key_points": ["AI diagnostics", "Patient outcomes", "Cost reduction"],
|
||||
"include_hashtags": True,
|
||||
"include_call_to_action": True,
|
||||
"research_enabled": True,
|
||||
"search_engine": "google",
|
||||
"max_length": 2000,
|
||||
"grounding_level": "enhanced",
|
||||
"include_citations": True
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class LinkedInArticleRequest(BaseModel):
|
||||
"""Request model for LinkedIn article generation."""
|
||||
topic: str = Field(..., description="Main topic for the article", min_length=3, max_length=200)
|
||||
industry: str = Field(..., description="Target industry context", min_length=2, max_length=100)
|
||||
tone: LinkedInTone = Field(default=LinkedInTone.PROFESSIONAL, description="Tone of the article")
|
||||
target_audience: Optional[str] = Field(None, description="Specific target audience", max_length=200)
|
||||
key_sections: Optional[List[str]] = Field(None, description="Key sections to include", max_items=10)
|
||||
include_images: bool = Field(default=True, description="Whether to generate image suggestions")
|
||||
seo_optimization: bool = Field(default=True, description="Whether to include SEO optimization")
|
||||
research_enabled: bool = Field(default=True, description="Whether to include research-backed content")
|
||||
search_engine: SearchEngine = Field(default=SearchEngine.GOOGLE, description="Search engine for research")
|
||||
word_count: int = Field(default=1500, description="Target word count", ge=500, le=5000)
|
||||
grounding_level: GroundingLevel = Field(default=GroundingLevel.ENHANCED, description="Level of content grounding")
|
||||
include_citations: bool = Field(default=True, description="Whether to include inline citations")
|
||||
user_id: Optional[int] = Field(default=1, description="User id for persona lookup")
|
||||
persona_override: Optional[Dict[str, Any]] = Field(default=None, description="Session-only persona overrides to apply without saving")
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"topic": "Digital transformation in manufacturing",
|
||||
"industry": "Manufacturing",
|
||||
"tone": "professional",
|
||||
"target_audience": "Manufacturing leaders and technology professionals",
|
||||
"key_sections": ["Current challenges", "Technology solutions", "Implementation strategies"],
|
||||
"include_images": True,
|
||||
"seo_optimization": True,
|
||||
"research_enabled": True,
|
||||
"search_engine": "google",
|
||||
"word_count": 2000,
|
||||
"grounding_level": "enhanced",
|
||||
"include_citations": True
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class LinkedInCarouselRequest(BaseModel):
|
||||
"""Request model for LinkedIn carousel generation."""
|
||||
topic: str = Field(..., description="Main topic for the carousel", min_length=3, max_length=200)
|
||||
industry: str = Field(..., description="Target industry context", min_length=2, max_length=100)
|
||||
tone: LinkedInTone = Field(default=LinkedInTone.PROFESSIONAL, description="Tone of the carousel")
|
||||
target_audience: Optional[str] = Field(None, description="Specific target audience", max_length=200)
|
||||
number_of_slides: int = Field(default=5, description="Number of slides", ge=3, le=10)
|
||||
include_cover_slide: bool = Field(default=True, description="Whether to include a cover slide")
|
||||
include_cta_slide: bool = Field(default=True, description="Whether to include a call-to-action slide")
|
||||
research_enabled: bool = Field(default=True, description="Whether to include research-backed content")
|
||||
search_engine: SearchEngine = Field(default=SearchEngine.GOOGLE, description="Search engine for research")
|
||||
grounding_level: GroundingLevel = Field(default=GroundingLevel.ENHANCED, description="Level of content grounding")
|
||||
include_citations: bool = Field(default=True, description="Whether to include inline citations")
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"topic": "Future of remote work",
|
||||
"industry": "Technology",
|
||||
"tone": "professional",
|
||||
"target_audience": "HR professionals and business leaders",
|
||||
"number_of_slides": 6,
|
||||
"include_cover_slide": True,
|
||||
"include_cta_slide": True,
|
||||
"research_enabled": True,
|
||||
"search_engine": "google",
|
||||
"grounding_level": "enhanced",
|
||||
"include_citations": True
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class LinkedInVideoScriptRequest(BaseModel):
|
||||
"""Request model for LinkedIn video script generation."""
|
||||
topic: str = Field(..., description="Main topic for the video script", min_length=3, max_length=200)
|
||||
industry: str = Field(..., description="Target industry context", min_length=2, max_length=100)
|
||||
tone: LinkedInTone = Field(default=LinkedInTone.PROFESSIONAL, description="Tone of the video script")
|
||||
target_audience: Optional[str] = Field(None, description="Specific target audience", max_length=200)
|
||||
video_duration: int = Field(default=60, description="Target video duration in seconds", ge=30, le=300)
|
||||
include_captions: bool = Field(default=True, description="Whether to include captions")
|
||||
include_thumbnail_suggestions: bool = Field(default=True, description="Whether to include thumbnail suggestions")
|
||||
research_enabled: bool = Field(default=True, description="Whether to include research-backed content")
|
||||
search_engine: SearchEngine = Field(default=SearchEngine.GOOGLE, description="Search engine for research")
|
||||
grounding_level: GroundingLevel = Field(default=GroundingLevel.ENHANCED, description="Level of content grounding")
|
||||
include_citations: bool = Field(default=True, description="Whether to include inline citations")
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"topic": "Cybersecurity best practices",
|
||||
"industry": "Technology",
|
||||
"tone": "educational",
|
||||
"target_audience": "IT professionals and business leaders",
|
||||
"video_duration": 90,
|
||||
"include_captions": True,
|
||||
"include_thumbnail_suggestions": True,
|
||||
"research_enabled": True,
|
||||
"search_engine": "google",
|
||||
"grounding_level": "enhanced",
|
||||
"include_citations": True
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
class LinkedInCommentResponseRequest(BaseModel):
|
||||
"""Request model for LinkedIn comment response generation."""
|
||||
original_comment: str = Field(..., description="Original comment to respond to", min_length=10, max_length=1000)
|
||||
post_context: str = Field(..., description="Context of the post being commented on", min_length=10, max_length=500)
|
||||
industry: str = Field(..., description="Industry context", min_length=2, max_length=100)
|
||||
tone: LinkedInTone = Field(default=LinkedInTone.FRIENDLY, description="Tone of the response")
|
||||
response_length: str = Field(default="medium", description="Length of response: short, medium, long")
|
||||
include_questions: bool = Field(default=True, description="Whether to include engaging questions")
|
||||
research_enabled: bool = Field(default=False, description="Whether to include research-backed content")
|
||||
search_engine: SearchEngine = Field(default=SearchEngine.GOOGLE, description="Search engine for research")
|
||||
grounding_level: GroundingLevel = Field(default=GroundingLevel.BASIC, description="Level of content grounding")
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"original_comment": "Great insights on AI implementation!",
|
||||
"post_context": "Post about AI transformation in healthcare",
|
||||
"industry": "Healthcare",
|
||||
"tone": "friendly",
|
||||
"response_length": "medium",
|
||||
"include_questions": True,
|
||||
"research_enabled": False,
|
||||
"search_engine": "google",
|
||||
"grounding_level": "basic"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Enhanced Research Source Model
|
||||
class ResearchSource(BaseModel):
|
||||
"""Enhanced model for research source information with grounding capabilities."""
|
||||
title: str
|
||||
url: str
|
||||
content: str
|
||||
relevance_score: Optional[float] = Field(None, description="Relevance score (0.0-1.0)")
|
||||
credibility_score: Optional[float] = Field(None, description="Credibility score (0.0-1.0)")
|
||||
domain_authority: Optional[float] = Field(None, description="Domain authority score (0.0-1.0)")
|
||||
source_type: Optional[str] = Field(None, description="Type of source (academic, business_news, etc.)")
|
||||
publication_date: Optional[str] = Field(None, description="Publication date if available")
|
||||
raw_result: Optional[Dict[str, Any]] = Field(None, description="Raw search result data")
|
||||
|
||||
|
||||
# Enhanced Hashtag Suggestion Model
|
||||
class HashtagSuggestion(BaseModel):
|
||||
"""Enhanced model for hashtag suggestions."""
|
||||
hashtag: str
|
||||
category: str
|
||||
popularity_score: Optional[float] = Field(None, description="Popularity score (0.0-1.0)")
|
||||
relevance_score: Optional[float] = Field(None, description="Relevance to topic (0.0-1.0)")
|
||||
industry_alignment: Optional[float] = Field(None, description="Industry alignment score (0.0-1.0)")
|
||||
|
||||
|
||||
# Enhanced Image Suggestion Model
|
||||
class ImageSuggestion(BaseModel):
|
||||
"""Enhanced model for image suggestions."""
|
||||
description: str
|
||||
alt_text: str
|
||||
style: Optional[str] = Field(None, description="Visual style description")
|
||||
placement: Optional[str] = Field(None, description="Suggested placement in content")
|
||||
relevance_score: Optional[float] = Field(None, description="Relevance to content (0.0-1.0)")
|
||||
|
||||
|
||||
# New Quality Metrics Model
|
||||
class ContentQualityMetrics(BaseModel):
|
||||
"""Model for content quality assessment metrics."""
|
||||
overall_score: float = Field(..., description="Overall quality score (0.0-1.0)")
|
||||
factual_accuracy: float = Field(..., description="Factual accuracy score (0.0-1.0)")
|
||||
source_verification: float = Field(..., description="Source verification score (0.0-1.0)")
|
||||
professional_tone: float = Field(..., description="Professional tone score (0.0-1.0)")
|
||||
industry_relevance: float = Field(..., description="Industry relevance score (0.0-1.0)")
|
||||
citation_coverage: float = Field(..., description="Citation coverage score (0.0-1.0)")
|
||||
content_length: int = Field(..., description="Content length in characters")
|
||||
word_count: int = Field(..., description="Word count")
|
||||
analysis_timestamp: str = Field(..., description="Timestamp of quality analysis")
|
||||
recommendations: Optional[List[str]] = Field(default_factory=list, description="List of improvement recommendations")
|
||||
|
||||
|
||||
# New Citation Model
|
||||
class Citation(BaseModel):
|
||||
"""Model for inline citations in content."""
|
||||
type: str = Field(..., description="Type of citation (inline, footnote, etc.)")
|
||||
reference: str = Field(..., description="Citation reference (e.g., 'Source 1')")
|
||||
position: Optional[int] = Field(None, description="Position in content")
|
||||
source_index: Optional[int] = Field(None, description="Index of source in research_sources")
|
||||
|
||||
|
||||
# Enhanced Post Content Model
|
||||
class PostContent(BaseModel):
|
||||
"""Enhanced model for generated post content with grounding capabilities."""
|
||||
content: str
|
||||
character_count: int
|
||||
hashtags: List[HashtagSuggestion]
|
||||
call_to_action: Optional[str] = None
|
||||
engagement_prediction: Optional[Dict[str, Any]] = None
|
||||
citations: List[Citation] = Field(default_factory=list, description="Inline citations")
|
||||
source_list: Optional[str] = Field(None, description="Formatted source list")
|
||||
quality_metrics: Optional[ContentQualityMetrics] = Field(None, description="Content quality metrics")
|
||||
grounding_enabled: bool = Field(default=False, description="Whether grounding was used")
|
||||
search_queries: Optional[List[str]] = Field(default_factory=list, description="Search queries used for research")
|
||||
|
||||
|
||||
# Enhanced Article Content Model
|
||||
class ArticleContent(BaseModel):
|
||||
"""Enhanced model for generated article content with grounding capabilities."""
|
||||
title: str
|
||||
content: str
|
||||
word_count: int
|
||||
sections: List[Dict[str, str]]
|
||||
seo_metadata: Optional[Dict[str, Any]] = None
|
||||
image_suggestions: List[ImageSuggestion]
|
||||
reading_time: Optional[int] = None
|
||||
citations: List[Citation] = Field(default_factory=list, description="Inline citations")
|
||||
source_list: Optional[str] = Field(None, description="Formatted source list")
|
||||
quality_metrics: Optional[ContentQualityMetrics] = Field(None, description="Content quality metrics")
|
||||
grounding_enabled: bool = Field(default=False, description="Whether grounding was used")
|
||||
search_queries: Optional[List[str]] = Field(default_factory=list, description="Search queries used for research")
|
||||
|
||||
|
||||
# Enhanced Carousel Slide Model
|
||||
class CarouselSlide(BaseModel):
|
||||
"""Enhanced model for carousel slide content."""
|
||||
slide_number: int
|
||||
title: str
|
||||
content: str
|
||||
visual_elements: List[str]
|
||||
design_notes: Optional[str] = None
|
||||
citations: List[Citation] = Field(default_factory=list, description="Inline citations for this slide")
|
||||
|
||||
|
||||
# Enhanced Carousel Content Model
|
||||
class CarouselContent(BaseModel):
|
||||
"""Enhanced model for generated carousel content with grounding capabilities."""
|
||||
title: str
|
||||
slides: List[CarouselSlide]
|
||||
cover_slide: Optional[CarouselSlide] = None
|
||||
cta_slide: Optional[CarouselSlide] = None
|
||||
design_guidelines: Dict[str, str]
|
||||
citations: List[Citation] = Field(default_factory=list, description="Overall citations")
|
||||
source_list: Optional[str] = Field(None, description="Formatted source list")
|
||||
quality_metrics: Optional[ContentQualityMetrics] = Field(None, description="Content quality metrics")
|
||||
grounding_enabled: bool = Field(default=False, description="Whether grounding was used")
|
||||
|
||||
|
||||
# Enhanced Video Script Model
|
||||
class VideoScript(BaseModel):
|
||||
"""Enhanced model for video script content with grounding capabilities."""
|
||||
hook: str
|
||||
main_content: List[Dict[str, str]] # scene_number, content, duration, visual_notes
|
||||
conclusion: str
|
||||
captions: Optional[List[str]] = None
|
||||
thumbnail_suggestions: List[str]
|
||||
video_description: str
|
||||
citations: List[Citation] = Field(default_factory=list, description="Inline citations")
|
||||
source_list: Optional[str] = Field(None, description="Formatted source list")
|
||||
quality_metrics: Optional[ContentQualityMetrics] = Field(None, description="Content quality metrics")
|
||||
grounding_enabled: bool = Field(default=False, description="Whether grounding was used")
|
||||
|
||||
|
||||
# Enhanced LinkedIn Post Response Model
|
||||
class LinkedInPostResponse(BaseModel):
|
||||
"""Enhanced response model for LinkedIn post generation with grounding capabilities."""
|
||||
success: bool = True
|
||||
data: Optional[PostContent] = None
|
||||
research_sources: List[ResearchSource] = []
|
||||
generation_metadata: Dict[str, Any] = {}
|
||||
error: Optional[str] = None
|
||||
grounding_status: Optional[Dict[str, Any]] = Field(None, description="Grounding operation status")
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"success": True,
|
||||
"data": {
|
||||
"content": "🚀 AI is revolutionizing healthcare...",
|
||||
"character_count": 1250,
|
||||
"hashtags": [
|
||||
{"hashtag": "#AIinHealthcare", "category": "industry", "popularity_score": 0.9},
|
||||
{"hashtag": "#DigitalTransformation", "category": "general", "popularity_score": 0.8}
|
||||
],
|
||||
"call_to_action": "What's your experience with AI in healthcare? Share in the comments!",
|
||||
"engagement_prediction": {"estimated_likes": 120, "estimated_comments": 15},
|
||||
"citations": [
|
||||
{"type": "inline", "reference": "Source 1", "position": 45}
|
||||
],
|
||||
"source_list": "**Sources:**\n1. **AI in Healthcare: Current Trends**\n - URL: [https://example.com/ai-healthcare](https://example.com/ai-healthcare)",
|
||||
"quality_metrics": {
|
||||
"overall_score": 0.85,
|
||||
"factual_accuracy": 0.9,
|
||||
"source_verification": 0.8,
|
||||
"professional_tone": 0.9,
|
||||
"industry_relevance": 0.85,
|
||||
"citation_coverage": 0.8,
|
||||
"content_length": 1250,
|
||||
"word_count": 180,
|
||||
"analysis_timestamp": "2025-01-15T10:30:00Z"
|
||||
},
|
||||
"grounding_enabled": True
|
||||
},
|
||||
"research_sources": [
|
||||
{
|
||||
"title": "AI in Healthcare: Current Trends",
|
||||
"url": "https://example.com/ai-healthcare",
|
||||
"content": "Summary of AI healthcare trends...",
|
||||
"relevance_score": 0.95,
|
||||
"credibility_score": 0.85,
|
||||
"domain_authority": 0.9,
|
||||
"source_type": "business_news"
|
||||
}
|
||||
],
|
||||
"generation_metadata": {
|
||||
"model_used": "gemini-2.0-flash-001",
|
||||
"generation_time": 3.2,
|
||||
"research_time": 5.1,
|
||||
"grounding_enabled": True
|
||||
},
|
||||
"grounding_status": {
|
||||
"status": "success",
|
||||
"sources_used": 3,
|
||||
"citation_coverage": 0.8,
|
||||
"quality_score": 0.85
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
# Enhanced LinkedIn Article Response Model
|
||||
class LinkedInArticleResponse(BaseModel):
|
||||
"""Enhanced response model for LinkedIn article generation with grounding capabilities."""
|
||||
success: bool = True
|
||||
data: Optional[ArticleContent] = None
|
||||
research_sources: List[ResearchSource] = []
|
||||
generation_metadata: Dict[str, Any] = {}
|
||||
error: Optional[str] = None
|
||||
grounding_status: Optional[Dict[str, Any]] = Field(None, description="Grounding operation status")
|
||||
|
||||
|
||||
# Enhanced LinkedIn Carousel Response Model
|
||||
class LinkedInCarouselResponse(BaseModel):
|
||||
"""Enhanced response model for LinkedIn carousel generation with grounding capabilities."""
|
||||
success: bool = True
|
||||
data: Optional[CarouselContent] = None
|
||||
research_sources: List[ResearchSource] = []
|
||||
generation_metadata: Dict[str, Any] = {}
|
||||
error: Optional[str] = None
|
||||
grounding_status: Optional[Dict[str, Any]] = Field(None, description="Grounding operation status")
|
||||
|
||||
|
||||
# Enhanced LinkedIn Video Script Response Model
|
||||
class LinkedInVideoScriptResponse(BaseModel):
|
||||
"""Enhanced response model for LinkedIn video script generation with grounding capabilities."""
|
||||
success: bool = True
|
||||
data: Optional[VideoScript] = None
|
||||
research_sources: List[ResearchSource] = []
|
||||
generation_metadata: Dict[str, Any] = {}
|
||||
error: Optional[str] = None
|
||||
grounding_status: Optional[Dict[str, Any]] = Field(None, description="Grounding operation status")
|
||||
|
||||
|
||||
# Enhanced LinkedIn Comment Response Result Model
|
||||
class LinkedInCommentResponseResult(BaseModel):
|
||||
"""Enhanced response model for LinkedIn comment response generation with grounding capabilities."""
|
||||
success: bool = True
|
||||
response: Optional[str] = None
|
||||
alternative_responses: List[str] = []
|
||||
tone_analysis: Optional[Dict[str, Any]] = None
|
||||
generation_metadata: Dict[str, Any] = {}
|
||||
error: Optional[str] = None
|
||||
grounding_status: Optional[Dict[str, Any]] = Field(None, description="Grounding operation status")
|
||||
99
backend/models/monitoring_models.py
Normal file
99
backend/models/monitoring_models.py
Normal file
@@ -0,0 +1,99 @@
|
||||
from sqlalchemy import Column, Integer, String, Text, DateTime, Boolean, JSON, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
|
||||
# Import the same Base from enhanced_strategy_models
|
||||
from models.enhanced_strategy_models import Base
|
||||
|
||||
class StrategyMonitoringPlan(Base):
|
||||
"""Model for storing strategy monitoring plans"""
|
||||
__tablename__ = "strategy_monitoring_plans"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
strategy_id = Column(Integer, ForeignKey("enhanced_content_strategies.id"), nullable=False)
|
||||
plan_data = Column(JSON, nullable=False) # Store the complete monitoring plan
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationship to strategy
|
||||
strategy = relationship("EnhancedContentStrategy", back_populates="monitoring_plans")
|
||||
|
||||
class MonitoringTask(Base):
|
||||
"""Model for storing individual monitoring tasks"""
|
||||
__tablename__ = "monitoring_tasks"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
strategy_id = Column(Integer, ForeignKey("enhanced_content_strategies.id"), nullable=False)
|
||||
component_name = Column(String(100), nullable=False)
|
||||
task_title = Column(String(200), nullable=False)
|
||||
task_description = Column(Text, nullable=False)
|
||||
assignee = Column(String(50), nullable=False) # 'ALwrity' or 'Human'
|
||||
frequency = Column(String(50), nullable=False) # 'Daily', 'Weekly', 'Monthly', 'Quarterly'
|
||||
metric = Column(String(100), nullable=False)
|
||||
measurement_method = Column(Text, nullable=False)
|
||||
success_criteria = Column(Text, nullable=False)
|
||||
alert_threshold = Column(Text, nullable=False)
|
||||
status = Column(String(50), default='pending') # 'pending', 'active', 'completed', 'failed'
|
||||
last_executed = Column(DateTime, nullable=True)
|
||||
next_execution = Column(DateTime, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
strategy = relationship("EnhancedContentStrategy", back_populates="monitoring_tasks")
|
||||
execution_logs = relationship("TaskExecutionLog", back_populates="task", cascade="all, delete-orphan")
|
||||
|
||||
class TaskExecutionLog(Base):
|
||||
"""Model for storing task execution logs"""
|
||||
__tablename__ = "task_execution_logs"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
task_id = Column(Integer, ForeignKey("monitoring_tasks.id"), nullable=False)
|
||||
user_id = Column(Integer, nullable=True) # User ID for user isolation (nullable for backward compatibility)
|
||||
execution_date = Column(DateTime, default=datetime.utcnow)
|
||||
status = Column(String(50), nullable=False) # 'success', 'failed', 'skipped', 'running'
|
||||
result_data = Column(JSON, nullable=True)
|
||||
error_message = Column(Text, nullable=True)
|
||||
execution_time_ms = Column(Integer, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationship to monitoring task
|
||||
task = relationship("MonitoringTask", back_populates="execution_logs")
|
||||
|
||||
class StrategyPerformanceMetrics(Base):
|
||||
"""Model for storing strategy performance metrics"""
|
||||
__tablename__ = "strategy_performance_metrics"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
strategy_id = Column(Integer, ForeignKey("enhanced_content_strategies.id"), nullable=False)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
metric_date = Column(DateTime, default=datetime.utcnow)
|
||||
traffic_growth_percentage = Column(Integer, nullable=True)
|
||||
engagement_rate_percentage = Column(Integer, nullable=True)
|
||||
conversion_rate_percentage = Column(Integer, nullable=True)
|
||||
roi_ratio = Column(Integer, nullable=True)
|
||||
strategy_adoption_rate = Column(Integer, nullable=True)
|
||||
content_quality_score = Column(Integer, nullable=True)
|
||||
competitive_position_rank = Column(Integer, nullable=True)
|
||||
audience_growth_percentage = Column(Integer, nullable=True)
|
||||
data_source = Column(String(100), nullable=True)
|
||||
confidence_score = Column(Integer, nullable=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationship to strategy
|
||||
strategy = relationship("EnhancedContentStrategy", back_populates="performance_metrics")
|
||||
|
||||
class StrategyActivationStatus(Base):
|
||||
"""Model for storing strategy activation status"""
|
||||
__tablename__ = "strategy_activation_status"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
strategy_id = Column(Integer, ForeignKey("enhanced_content_strategies.id"), nullable=False)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
activation_date = Column(DateTime, default=datetime.utcnow)
|
||||
status = Column(String(50), default='active') # 'active', 'inactive', 'paused'
|
||||
performance_score = Column(Integer, nullable=True)
|
||||
last_updated = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationship to strategy
|
||||
strategy = relationship("EnhancedContentStrategy", back_populates="activation_status")
|
||||
102
backend/models/oauth_token_monitoring_models.py
Normal file
102
backend/models/oauth_token_monitoring_models.py
Normal file
@@ -0,0 +1,102 @@
|
||||
"""
|
||||
OAuth Token Monitoring Models
|
||||
Database models for tracking OAuth token status and monitoring tasks.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, Text, DateTime, Boolean, JSON, Index, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
|
||||
# Import the same Base from enhanced_strategy_models
|
||||
from models.enhanced_strategy_models import Base
|
||||
|
||||
|
||||
class OAuthTokenMonitoringTask(Base):
|
||||
"""
|
||||
Model for storing OAuth token monitoring tasks.
|
||||
|
||||
Tracks per-user, per-platform token monitoring with weekly checks.
|
||||
"""
|
||||
__tablename__ = "oauth_token_monitoring_tasks"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
|
||||
# User and Platform Identification
|
||||
user_id = Column(String(255), nullable=False, index=True) # Clerk user ID (string)
|
||||
platform = Column(String(50), nullable=False) # 'gsc', 'bing', 'wordpress', 'wix'
|
||||
|
||||
# Task Status
|
||||
status = Column(String(50), default='active') # 'active', 'failed', 'paused', 'needs_intervention'
|
||||
|
||||
# Execution Tracking
|
||||
last_check = Column(DateTime, nullable=True)
|
||||
last_success = Column(DateTime, nullable=True)
|
||||
last_failure = Column(DateTime, nullable=True)
|
||||
failure_reason = Column(Text, nullable=True)
|
||||
|
||||
# Failure Pattern Tracking
|
||||
consecutive_failures = Column(Integer, default=0) # Count of consecutive failures
|
||||
failure_pattern = Column(JSON, nullable=True) # JSON storing failure analysis
|
||||
|
||||
# Scheduling
|
||||
next_check = Column(DateTime, nullable=True, index=True) # Next scheduled check time
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Execution Logs Relationship
|
||||
execution_logs = relationship(
|
||||
"OAuthTokenExecutionLog",
|
||||
back_populates="task",
|
||||
cascade="all, delete-orphan"
|
||||
)
|
||||
|
||||
# Indexes for efficient queries
|
||||
__table_args__ = (
|
||||
Index('idx_user_platform', 'user_id', 'platform'),
|
||||
Index('idx_next_check', 'next_check'),
|
||||
Index('idx_status', 'status'),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<OAuthTokenMonitoringTask(id={self.id}, user_id={self.user_id}, platform={self.platform}, status={self.status})>"
|
||||
|
||||
|
||||
class OAuthTokenExecutionLog(Base):
|
||||
"""
|
||||
Model for storing OAuth token monitoring execution logs.
|
||||
|
||||
Tracks individual execution attempts with results and error details.
|
||||
"""
|
||||
__tablename__ = "oauth_token_execution_logs"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
|
||||
# Task Reference
|
||||
task_id = Column(Integer, ForeignKey("oauth_token_monitoring_tasks.id"), nullable=False, index=True)
|
||||
|
||||
# Execution Details
|
||||
execution_date = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
status = Column(String(50), nullable=False) # 'success', 'failed', 'skipped'
|
||||
|
||||
# Results
|
||||
result_data = Column(JSON, nullable=True) # Token status, expiration info, etc.
|
||||
error_message = Column(Text, nullable=True)
|
||||
execution_time_ms = Column(Integer, nullable=True)
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationship to task
|
||||
task = relationship("OAuthTokenMonitoringTask", back_populates="execution_logs")
|
||||
|
||||
# Indexes for efficient queries
|
||||
__table_args__ = (
|
||||
Index('idx_task_execution_date', 'task_id', 'execution_date'),
|
||||
Index('idx_status', 'status'),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<OAuthTokenExecutionLog(id={self.id}, task_id={self.task_id}, status={self.status}, execution_date={self.execution_date})>"
|
||||
|
||||
234
backend/models/onboarding.py
Normal file
234
backend/models/onboarding.py
Normal file
@@ -0,0 +1,234 @@
|
||||
from sqlalchemy import Column, Integer, String, Float, DateTime, ForeignKey, func, JSON, Text, Boolean
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import relationship
|
||||
import datetime
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class OnboardingSession(Base):
|
||||
__tablename__ = 'onboarding_sessions'
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
user_id = Column(String(255), nullable=False) # Clerk user ID (string)
|
||||
current_step = Column(Integer, default=1)
|
||||
progress = Column(Float, default=0.0)
|
||||
started_at = Column(DateTime, default=func.now())
|
||||
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
|
||||
api_keys = relationship('APIKey', back_populates='session', cascade="all, delete-orphan")
|
||||
website_analyses = relationship('WebsiteAnalysis', back_populates='session', cascade="all, delete-orphan")
|
||||
research_preferences = relationship('ResearchPreferences', back_populates='session', cascade="all, delete-orphan", uselist=False)
|
||||
persona_data = relationship('PersonaData', back_populates='session', cascade="all, delete-orphan", uselist=False)
|
||||
competitor_analyses = relationship('CompetitorAnalysis', back_populates='session', cascade="all, delete-orphan")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<OnboardingSession(id={self.id}, user_id={self.user_id}, step={self.current_step}, progress={self.progress})>"
|
||||
|
||||
class APIKey(Base):
|
||||
__tablename__ = 'api_keys'
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
session_id = Column(Integer, ForeignKey('onboarding_sessions.id'))
|
||||
provider = Column(String(64), nullable=False)
|
||||
key = Column(String(256), nullable=False)
|
||||
created_at = Column(DateTime, default=func.now())
|
||||
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
|
||||
session = relationship('OnboardingSession', back_populates='api_keys')
|
||||
|
||||
def __repr__(self):
|
||||
return f"<APIKey(id={self.id}, provider={self.provider}, session_id={self.session_id})>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert to dictionary for API responses."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'session_id': self.session_id,
|
||||
'provider': self.provider,
|
||||
'key': self.key, # Note: In production, you might want to mask this
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
class WebsiteAnalysis(Base):
|
||||
"""Stores website analysis results from onboarding step 2."""
|
||||
__tablename__ = 'website_analyses'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
session_id = Column(Integer, ForeignKey('onboarding_sessions.id', ondelete='CASCADE'), nullable=False)
|
||||
website_url = Column(String(500), nullable=False)
|
||||
analysis_date = Column(DateTime, default=func.now())
|
||||
|
||||
# Style analysis results
|
||||
writing_style = Column(JSON) # Tone, voice, complexity, engagement_level
|
||||
content_characteristics = Column(JSON) # Sentence structure, vocabulary, paragraph organization
|
||||
target_audience = Column(JSON) # Demographics, expertise level, industry focus
|
||||
content_type = Column(JSON) # Primary type, secondary types, purpose
|
||||
recommended_settings = Column(JSON) # Writing tone, target audience, content type
|
||||
# brand_analysis = Column(JSON) # Brand voice, values, positioning, competitive differentiation
|
||||
# content_strategy_insights = Column(JSON) # SWOT analysis, strengths, weaknesses, opportunities, threats
|
||||
|
||||
# Crawl results
|
||||
crawl_result = Column(JSON) # Raw crawl data
|
||||
style_patterns = Column(JSON) # Writing patterns analysis
|
||||
style_guidelines = Column(JSON) # Generated guidelines
|
||||
|
||||
# Metadata
|
||||
status = Column(String(50), default='completed') # completed, failed, in_progress
|
||||
error_message = Column(Text)
|
||||
warning_message = Column(Text)
|
||||
created_at = Column(DateTime, default=func.now())
|
||||
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
|
||||
|
||||
# Relationships
|
||||
session = relationship('OnboardingSession', back_populates='website_analyses')
|
||||
|
||||
def __repr__(self):
|
||||
return f"<WebsiteAnalysis(id={self.id}, url={self.website_url}, status={self.status})>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert to dictionary for API responses."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'website_url': self.website_url,
|
||||
'analysis_date': self.analysis_date.isoformat() if self.analysis_date else None,
|
||||
'writing_style': self.writing_style,
|
||||
'content_characteristics': self.content_characteristics,
|
||||
'target_audience': self.target_audience,
|
||||
'content_type': self.content_type,
|
||||
'recommended_settings': self.recommended_settings,
|
||||
# 'brand_analysis': self.brand_analysis,
|
||||
# 'content_strategy_insights': self.content_strategy_insights,
|
||||
'crawl_result': self.crawl_result,
|
||||
'style_patterns': self.style_patterns,
|
||||
'style_guidelines': self.style_guidelines,
|
||||
'status': self.status,
|
||||
'error_message': self.error_message,
|
||||
'warning_message': self.warning_message,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
class ResearchPreferences(Base):
|
||||
"""Stores research preferences from onboarding step 3."""
|
||||
__tablename__ = 'research_preferences'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
session_id = Column(Integer, ForeignKey('onboarding_sessions.id', ondelete='CASCADE'), nullable=False)
|
||||
|
||||
# Research configuration
|
||||
research_depth = Column(String(50), nullable=False) # Basic, Standard, Comprehensive, Expert
|
||||
content_types = Column(JSON, nullable=False) # Array of content types
|
||||
auto_research = Column(Boolean, default=True)
|
||||
factual_content = Column(Boolean, default=True)
|
||||
|
||||
# Style detection data (from step 2)
|
||||
writing_style = Column(JSON) # Tone, voice, complexity from website analysis
|
||||
content_characteristics = Column(JSON) # Sentence structure, vocabulary from analysis
|
||||
target_audience = Column(JSON) # Demographics, expertise level from analysis
|
||||
recommended_settings = Column(JSON) # AI-generated recommendations from analysis
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=func.now())
|
||||
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
|
||||
|
||||
# Relationships
|
||||
session = relationship('OnboardingSession', back_populates='research_preferences')
|
||||
|
||||
def __repr__(self):
|
||||
return f"<ResearchPreferences(id={self.id}, session_id={self.session_id}, depth={self.research_depth})>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert to dictionary for API responses."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'session_id': self.session_id,
|
||||
'research_depth': self.research_depth,
|
||||
'content_types': self.content_types,
|
||||
'auto_research': self.auto_research,
|
||||
'factual_content': self.factual_content,
|
||||
'writing_style': self.writing_style,
|
||||
'content_characteristics': self.content_characteristics,
|
||||
'target_audience': self.target_audience,
|
||||
'recommended_settings': self.recommended_settings,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
class PersonaData(Base):
|
||||
"""Stores persona generation data from onboarding step 4."""
|
||||
__tablename__ = 'persona_data'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
session_id = Column(Integer, ForeignKey('onboarding_sessions.id', ondelete='CASCADE'), nullable=False)
|
||||
|
||||
# Persona generation results
|
||||
core_persona = Column(JSON) # Core persona data (demographics, psychographics, etc.)
|
||||
platform_personas = Column(JSON) # Platform-specific personas (LinkedIn, Twitter, etc.)
|
||||
quality_metrics = Column(JSON) # Quality assessment metrics
|
||||
selected_platforms = Column(JSON) # Array of selected platforms
|
||||
research_persona = Column(JSON, nullable=True) # AI-generated research persona with personalized defaults
|
||||
research_persona_generated_at = Column(DateTime, nullable=True) # Timestamp for 7-day TTL cache validation
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=func.now())
|
||||
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
|
||||
|
||||
# Relationships
|
||||
session = relationship('OnboardingSession', back_populates='persona_data')
|
||||
|
||||
def __repr__(self):
|
||||
return f"<PersonaData(id={self.id}, session_id={self.session_id})>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert to dictionary for API responses."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'session_id': self.session_id,
|
||||
'core_persona': self.core_persona,
|
||||
'platform_personas': self.platform_personas,
|
||||
'quality_metrics': self.quality_metrics,
|
||||
'selected_platforms': self.selected_platforms,
|
||||
'research_persona': self.research_persona,
|
||||
'research_persona_generated_at': self.research_persona_generated_at.isoformat() if self.research_persona_generated_at else None,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
class CompetitorAnalysis(Base):
|
||||
"""Stores competitor website analysis results from scheduled analysis tasks."""
|
||||
__tablename__ = 'competitor_analyses'
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
session_id = Column(Integer, ForeignKey('onboarding_sessions.id', ondelete='CASCADE'), nullable=False)
|
||||
competitor_url = Column(String(500), nullable=False)
|
||||
competitor_domain = Column(String(255), nullable=True) # Extracted domain for easier queries
|
||||
analysis_date = Column(DateTime, default=func.now())
|
||||
|
||||
# Complete analysis data (same structure as WebsiteAnalysis)
|
||||
analysis_data = Column(JSON) # Contains style_analysis, crawl_result, style_patterns, style_guidelines
|
||||
|
||||
# Metadata
|
||||
status = Column(String(50), default='completed') # completed, failed, in_progress
|
||||
error_message = Column(Text, nullable=True)
|
||||
warning_message = Column(Text, nullable=True)
|
||||
created_at = Column(DateTime, default=func.now())
|
||||
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
|
||||
|
||||
# Relationships
|
||||
session = relationship('OnboardingSession', back_populates='competitor_analyses')
|
||||
|
||||
def __repr__(self):
|
||||
return f"<CompetitorAnalysis(id={self.id}, url={self.competitor_url}, status={self.status})>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert to dictionary for API responses."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'session_id': self.session_id,
|
||||
'competitor_url': self.competitor_url,
|
||||
'competitor_domain': self.competitor_domain,
|
||||
'analysis_date': self.analysis_date.isoformat() if self.analysis_date else None,
|
||||
'analysis_data': self.analysis_data,
|
||||
'status': self.status,
|
||||
'error_message': self.error_message,
|
||||
'warning_message': self.warning_message,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
247
backend/models/persona_models.py
Normal file
247
backend/models/persona_models.py
Normal file
@@ -0,0 +1,247 @@
|
||||
"""
|
||||
Writing Persona Database Models
|
||||
Defines database schema for storing writing personas based on onboarding data analysis.
|
||||
Each persona represents a platform-specific writing style derived from user's onboarding data.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, Text, DateTime, Float, JSON, ForeignKey, Boolean
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class WritingPersona(Base):
|
||||
"""Main writing persona model that stores the core persona profile."""
|
||||
|
||||
__tablename__ = "writing_personas"
|
||||
|
||||
# Primary fields
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(String(255), nullable=False) # Changed to String to support Clerk user IDs
|
||||
persona_name = Column(String(255), nullable=False) # e.g., "Professional LinkedIn Voice", "Casual Blog Writer"
|
||||
|
||||
# Core Identity
|
||||
archetype = Column(String(100), nullable=True) # e.g., "The Pragmatic Futurist", "The Thoughtful Educator"
|
||||
core_belief = Column(Text, nullable=True) # Central philosophy or belief system
|
||||
brand_voice_description = Column(Text, nullable=True) # Detailed brand voice description
|
||||
|
||||
# Linguistic Fingerprint - Quantitative Analysis
|
||||
linguistic_fingerprint = Column(JSON, nullable=True) # Complete linguistic analysis
|
||||
|
||||
# Platform-specific adaptations
|
||||
platform_adaptations = Column(JSON, nullable=True) # How persona adapts across platforms
|
||||
|
||||
# Source data tracking
|
||||
onboarding_session_id = Column(Integer, nullable=True) # Link to onboarding session
|
||||
source_website_analysis = Column(JSON, nullable=True) # Website analysis data used
|
||||
source_research_preferences = Column(JSON, nullable=True) # Research preferences used
|
||||
|
||||
# AI Analysis metadata
|
||||
ai_analysis_version = Column(String(50), nullable=True) # Version of AI analysis used
|
||||
confidence_score = Column(Float, nullable=True) # AI confidence in persona accuracy
|
||||
analysis_date = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
is_active = Column(Boolean, default=True)
|
||||
|
||||
# Relationships
|
||||
platform_personas = relationship("PlatformPersona", back_populates="writing_persona", cascade="all, delete-orphan")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<WritingPersona(id={self.id}, name='{self.persona_name}', user_id={self.user_id})>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'user_id': self.user_id,
|
||||
'persona_name': self.persona_name,
|
||||
'archetype': self.archetype,
|
||||
'core_belief': self.core_belief,
|
||||
'brand_voice_description': self.brand_voice_description,
|
||||
'linguistic_fingerprint': self.linguistic_fingerprint,
|
||||
'platform_adaptations': self.platform_adaptations,
|
||||
'onboarding_session_id': self.onboarding_session_id,
|
||||
'source_website_analysis': self.source_website_analysis,
|
||||
'source_research_preferences': self.source_research_preferences,
|
||||
'ai_analysis_version': self.ai_analysis_version,
|
||||
'confidence_score': self.confidence_score,
|
||||
'analysis_date': self.analysis_date.isoformat() if self.analysis_date else None,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None,
|
||||
'is_active': self.is_active
|
||||
}
|
||||
|
||||
class PlatformPersona(Base):
|
||||
"""Platform-specific persona adaptations for different social media platforms and blogging."""
|
||||
|
||||
__tablename__ = "platform_personas"
|
||||
|
||||
# Primary fields
|
||||
id = Column(Integer, primary_key=True)
|
||||
writing_persona_id = Column(Integer, ForeignKey("writing_personas.id"), nullable=False)
|
||||
platform_type = Column(String(50), nullable=False) # twitter, linkedin, instagram, facebook, blog, medium, substack
|
||||
|
||||
# Platform-specific linguistic constraints
|
||||
sentence_metrics = Column(JSON, nullable=True) # Platform-optimized sentence structure
|
||||
lexical_features = Column(JSON, nullable=True) # Platform-specific vocabulary and phrases
|
||||
rhetorical_devices = Column(JSON, nullable=True) # Platform-appropriate rhetorical patterns
|
||||
tonal_range = Column(JSON, nullable=True) # Permitted tones for this platform
|
||||
stylistic_constraints = Column(JSON, nullable=True) # Platform formatting rules
|
||||
|
||||
# Platform-specific content guidelines
|
||||
content_format_rules = Column(JSON, nullable=True) # Character limits, hashtag usage, etc.
|
||||
engagement_patterns = Column(JSON, nullable=True) # How to engage on this platform
|
||||
posting_frequency = Column(JSON, nullable=True) # Optimal posting schedule
|
||||
content_types = Column(JSON, nullable=True) # Preferred content types for platform
|
||||
|
||||
# Performance optimization
|
||||
platform_best_practices = Column(JSON, nullable=True) # Platform-specific best practices
|
||||
algorithm_considerations = Column(JSON, nullable=True) # Platform algorithm optimization
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
is_active = Column(Boolean, default=True)
|
||||
|
||||
# Relationships
|
||||
writing_persona = relationship("WritingPersona", back_populates="platform_personas")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<PlatformPersona(id={self.id}, platform='{self.platform_type}', persona_id={self.writing_persona_id})>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary."""
|
||||
result = {
|
||||
'id': self.id,
|
||||
'writing_persona_id': self.writing_persona_id,
|
||||
'platform_type': self.platform_type,
|
||||
'sentence_metrics': self.sentence_metrics,
|
||||
'lexical_features': self.lexical_features,
|
||||
'rhetorical_devices': self.rhetorical_devices,
|
||||
'tonal_range': self.tonal_range,
|
||||
'stylistic_constraints': self.stylistic_constraints,
|
||||
'content_format_rules': self.content_format_rules,
|
||||
'engagement_patterns': self.engagement_patterns,
|
||||
'posting_frequency': self.posting_frequency,
|
||||
'content_types': self.content_types,
|
||||
'platform_best_practices': self.platform_best_practices,
|
||||
'algorithm_considerations': self.algorithm_considerations,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None,
|
||||
'is_active': self.is_active
|
||||
}
|
||||
|
||||
# Add LinkedIn-specific fields if this is a LinkedIn persona
|
||||
if self.platform_type.lower() == "linkedin" and self.algorithm_considerations:
|
||||
linkedin_data = self.algorithm_considerations
|
||||
if isinstance(linkedin_data, dict):
|
||||
result.update({
|
||||
'professional_networking': linkedin_data.get('professional_networking', {}),
|
||||
'linkedin_features': linkedin_data.get('linkedin_features', {}),
|
||||
'algorithm_optimization': linkedin_data.get('algorithm_optimization', {}),
|
||||
'professional_context_optimization': linkedin_data.get('professional_context_optimization', {})
|
||||
})
|
||||
|
||||
return result
|
||||
|
||||
class PersonaAnalysisResult(Base):
|
||||
"""Stores AI analysis results used to generate personas."""
|
||||
|
||||
__tablename__ = "persona_analysis_results"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(Integer, nullable=False)
|
||||
writing_persona_id = Column(Integer, ForeignKey("writing_personas.id"), nullable=True)
|
||||
|
||||
# Analysis input data
|
||||
analysis_prompt = Column(Text, nullable=True) # The prompt used for analysis
|
||||
input_data = Column(JSON, nullable=True) # Raw input data from onboarding
|
||||
|
||||
# AI Analysis results
|
||||
linguistic_analysis = Column(JSON, nullable=True) # Detailed linguistic fingerprint analysis
|
||||
personality_analysis = Column(JSON, nullable=True) # Personality and archetype analysis
|
||||
platform_recommendations = Column(JSON, nullable=True) # Platform-specific recommendations
|
||||
style_guidelines = Column(JSON, nullable=True) # Generated style guidelines
|
||||
|
||||
# Quality metrics
|
||||
analysis_confidence = Column(Float, nullable=True) # AI confidence in analysis
|
||||
data_sufficiency_score = Column(Float, nullable=True) # How much data was available for analysis
|
||||
recommendation_quality = Column(Float, nullable=True) # Quality of generated recommendations
|
||||
|
||||
# AI service metadata
|
||||
ai_provider = Column(String(50), nullable=True) # gemini, openai, anthropic
|
||||
model_version = Column(String(100), nullable=True) # Specific model version used
|
||||
processing_time = Column(Float, nullable=True) # Processing time in seconds
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<PersonaAnalysisResult(id={self.id}, user_id={self.user_id}, provider='{self.ai_provider}')>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'user_id': self.user_id,
|
||||
'writing_persona_id': self.writing_persona_id,
|
||||
'analysis_prompt': self.analysis_prompt,
|
||||
'input_data': self.input_data,
|
||||
'linguistic_analysis': self.linguistic_analysis,
|
||||
'personality_analysis': self.personality_analysis,
|
||||
'platform_recommendations': self.platform_recommendations,
|
||||
'style_guidelines': self.style_guidelines,
|
||||
'analysis_confidence': self.analysis_confidence,
|
||||
'data_sufficiency_score': self.data_sufficiency_score,
|
||||
'recommendation_quality': self.recommendation_quality,
|
||||
'ai_provider': self.ai_provider,
|
||||
'model_version': self.model_version,
|
||||
'processing_time': self.processing_time,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None
|
||||
}
|
||||
|
||||
class PersonaValidationResult(Base):
|
||||
"""Stores validation results for generated personas."""
|
||||
|
||||
__tablename__ = "persona_validation_results"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
writing_persona_id = Column(Integer, ForeignKey("writing_personas.id"), nullable=False)
|
||||
platform_persona_id = Column(Integer, ForeignKey("platform_personas.id"), nullable=True)
|
||||
|
||||
# Validation metrics
|
||||
stylometric_accuracy = Column(Float, nullable=True) # How well persona matches original style
|
||||
consistency_score = Column(Float, nullable=True) # Consistency across generated content
|
||||
platform_compliance = Column(Float, nullable=True) # How well adapted to platform constraints
|
||||
|
||||
# Test results
|
||||
sample_outputs = Column(JSON, nullable=True) # Sample content generated with persona
|
||||
validation_feedback = Column(JSON, nullable=True) # User or automated feedback
|
||||
improvement_suggestions = Column(JSON, nullable=True) # Suggestions for persona refinement
|
||||
|
||||
# Metadata
|
||||
validation_date = Column(DateTime, default=datetime.utcnow)
|
||||
validator_type = Column(String(50), nullable=True) # automated, user, ai_review
|
||||
|
||||
def __repr__(self):
|
||||
return f"<PersonaValidationResult(id={self.id}, persona_id={self.writing_persona_id}, accuracy={self.stylometric_accuracy})>"
|
||||
|
||||
def to_dict(self):
|
||||
"""Convert model to dictionary."""
|
||||
return {
|
||||
'id': self.id,
|
||||
'writing_persona_id': self.writing_persona_id,
|
||||
'platform_persona_id': self.platform_persona_id,
|
||||
'stylometric_accuracy': self.stylometric_accuracy,
|
||||
'consistency_score': self.consistency_score,
|
||||
'platform_compliance': self.platform_compliance,
|
||||
'sample_outputs': self.sample_outputs,
|
||||
'validation_feedback': self.validation_feedback,
|
||||
'improvement_suggestions': self.improvement_suggestions,
|
||||
'validation_date': self.validation_date.isoformat() if self.validation_date else None,
|
||||
'validator_type': self.validator_type
|
||||
}
|
||||
104
backend/models/platform_insights_monitoring_models.py
Normal file
104
backend/models/platform_insights_monitoring_models.py
Normal file
@@ -0,0 +1,104 @@
|
||||
"""
|
||||
Platform Insights Monitoring Models
|
||||
Database models for tracking platform insights (GSC/Bing) fetch tasks.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, Text, DateTime, JSON, Index, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
|
||||
# Import the same Base from enhanced_strategy_models
|
||||
from models.enhanced_strategy_models import Base
|
||||
|
||||
|
||||
class PlatformInsightsTask(Base):
|
||||
"""
|
||||
Model for storing platform insights fetch tasks.
|
||||
|
||||
Tracks per-user, per-platform insights fetching with weekly updates.
|
||||
"""
|
||||
__tablename__ = "platform_insights_tasks"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
|
||||
# User and Platform Identification
|
||||
user_id = Column(String(255), nullable=False, index=True) # Clerk user ID (string)
|
||||
platform = Column(String(50), nullable=False) # 'gsc' or 'bing'
|
||||
site_url = Column(String(500), nullable=True) # Optional: specific site URL
|
||||
|
||||
# Task Status
|
||||
status = Column(String(50), default='active') # 'active', 'failed', 'paused', 'needs_intervention'
|
||||
|
||||
# Execution Tracking
|
||||
last_check = Column(DateTime, nullable=True)
|
||||
last_success = Column(DateTime, nullable=True)
|
||||
last_failure = Column(DateTime, nullable=True)
|
||||
failure_reason = Column(Text, nullable=True)
|
||||
|
||||
# Failure Pattern Tracking
|
||||
consecutive_failures = Column(Integer, default=0) # Count of consecutive failures
|
||||
failure_pattern = Column(JSON, nullable=True) # JSON storing failure analysis
|
||||
|
||||
# Scheduling
|
||||
next_check = Column(DateTime, nullable=True, index=True) # Next scheduled check time
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Execution Logs Relationship
|
||||
execution_logs = relationship(
|
||||
"PlatformInsightsExecutionLog",
|
||||
back_populates="task",
|
||||
cascade="all, delete-orphan"
|
||||
)
|
||||
|
||||
# Indexes for efficient queries
|
||||
__table_args__ = (
|
||||
Index('idx_platform_insights_user_platform', 'user_id', 'platform'),
|
||||
Index('idx_platform_insights_next_check', 'next_check'),
|
||||
Index('idx_platform_insights_status', 'status'),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<PlatformInsightsTask(id={self.id}, user_id={self.user_id}, platform={self.platform}, status={self.status})>"
|
||||
|
||||
|
||||
class PlatformInsightsExecutionLog(Base):
|
||||
"""
|
||||
Model for storing platform insights fetch execution logs.
|
||||
|
||||
Tracks individual execution attempts with results and error details.
|
||||
"""
|
||||
__tablename__ = "platform_insights_execution_logs"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
|
||||
# Task Reference
|
||||
task_id = Column(Integer, ForeignKey("platform_insights_tasks.id"), nullable=False, index=True)
|
||||
|
||||
# Execution Details
|
||||
execution_date = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
status = Column(String(50), nullable=False) # 'success', 'failed', 'skipped'
|
||||
|
||||
# Results
|
||||
result_data = Column(JSON, nullable=True) # Insights data, metrics, etc.
|
||||
error_message = Column(Text, nullable=True)
|
||||
execution_time_ms = Column(Integer, nullable=True)
|
||||
data_source = Column(String(50), nullable=True) # 'cached', 'api', 'onboarding'
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationship to task
|
||||
task = relationship("PlatformInsightsTask", back_populates="execution_logs")
|
||||
|
||||
# Indexes for efficient queries
|
||||
__table_args__ = (
|
||||
Index('idx_platform_insights_log_task_execution_date', 'task_id', 'execution_date'),
|
||||
Index('idx_platform_insights_log_status', 'status'),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<PlatformInsightsExecutionLog(id={self.id}, task_id={self.task_id}, status={self.status}, execution_date={self.execution_date})>"
|
||||
|
||||
68
backend/models/podcast_models.py
Normal file
68
backend/models/podcast_models.py
Normal file
@@ -0,0 +1,68 @@
|
||||
"""
|
||||
Podcast Maker Models
|
||||
|
||||
Database models for podcast project persistence and state management.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, DateTime, Float, Boolean, JSON, Text, Index
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from datetime import datetime
|
||||
|
||||
# Use the same Base as subscription models for consistency
|
||||
from models.subscription_models import Base
|
||||
|
||||
|
||||
class PodcastProject(Base):
|
||||
"""
|
||||
Database model for podcast project state.
|
||||
Stores complete project state to enable cross-device resume.
|
||||
"""
|
||||
|
||||
__tablename__ = "podcast_projects"
|
||||
|
||||
# Primary fields
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
project_id = Column(String(255), unique=True, nullable=False, index=True) # User-facing project ID
|
||||
user_id = Column(String(255), nullable=False, index=True) # Clerk user ID
|
||||
|
||||
# Project metadata
|
||||
idea = Column(String(1000), nullable=False) # Episode idea or URL
|
||||
duration = Column(Integer, nullable=False) # Duration in minutes
|
||||
speakers = Column(Integer, nullable=False, default=1) # Number of speakers
|
||||
budget_cap = Column(Float, nullable=False, default=50.0) # Budget cap in USD
|
||||
|
||||
# Project state (stored as JSON)
|
||||
# This mirrors the PodcastProjectState interface from frontend
|
||||
analysis = Column(JSON, nullable=True) # PodcastAnalysis
|
||||
queries = Column(JSON, nullable=True) # List[Query]
|
||||
selected_queries = Column(JSON, nullable=True) # Array of query IDs
|
||||
research = Column(JSON, nullable=True) # Research object
|
||||
raw_research = Column(JSON, nullable=True) # BlogResearchResponse
|
||||
estimate = Column(JSON, nullable=True) # PodcastEstimate
|
||||
script_data = Column(JSON, nullable=True) # Script object
|
||||
render_jobs = Column(JSON, nullable=True) # List[Job]
|
||||
knobs = Column(JSON, nullable=True) # Knobs settings
|
||||
research_provider = Column(String(50), nullable=True, default="google") # Research provider
|
||||
|
||||
# UI state
|
||||
show_script_editor = Column(Boolean, default=False)
|
||||
show_render_queue = Column(Boolean, default=False)
|
||||
current_step = Column(String(50), nullable=True) # 'create' | 'analysis' | 'research' | 'script' | 'render'
|
||||
|
||||
# Status
|
||||
status = Column(String(50), default="draft", nullable=False, index=True) # draft, in_progress, completed, archived
|
||||
is_favorite = Column(Boolean, default=False, index=True)
|
||||
|
||||
# Final combined video URL (persisted for reloads)
|
||||
final_video_url = Column(String(1000), nullable=True) # URL to final combined podcast video
|
||||
|
||||
# Timestamps
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False, index=True)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow, nullable=False, index=True)
|
||||
|
||||
# Composite indexes for common query patterns
|
||||
__table_args__ = (
|
||||
Index('idx_user_status_created', 'user_id', 'status', 'created_at'),
|
||||
Index('idx_user_favorite_updated', 'user_id', 'is_favorite', 'updated_at'),
|
||||
)
|
||||
|
||||
156
backend/models/product_asset_models.py
Normal file
156
backend/models/product_asset_models.py
Normal file
@@ -0,0 +1,156 @@
|
||||
"""
|
||||
Product Asset Models
|
||||
Database models for storing product-specific assets (separate from campaign assets).
|
||||
These models are for the Product Marketing Suite (product asset creation).
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, DateTime, Float, Boolean, JSON, Text, ForeignKey, Index
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
import enum
|
||||
|
||||
from models.subscription_models import Base
|
||||
|
||||
|
||||
class ProductAssetType(enum.Enum):
|
||||
"""Product asset type enum."""
|
||||
IMAGE = "image"
|
||||
VIDEO = "video"
|
||||
AUDIO = "audio"
|
||||
ANIMATION = "animation"
|
||||
|
||||
|
||||
class ProductImageStyle(enum.Enum):
|
||||
"""Product image style enum."""
|
||||
STUDIO = "studio"
|
||||
LIFESTYLE = "lifestyle"
|
||||
OUTDOOR = "outdoor"
|
||||
MINIMALIST = "minimalist"
|
||||
LUXURY = "luxury"
|
||||
TECHNICAL = "technical"
|
||||
|
||||
|
||||
class ProductAsset(Base):
|
||||
"""
|
||||
Product asset model.
|
||||
Stores product-specific assets (images, videos, audio) generated for product marketing.
|
||||
"""
|
||||
|
||||
__tablename__ = "product_assets"
|
||||
|
||||
# Primary fields
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
product_id = Column(String(255), nullable=False, index=True) # User-defined product ID
|
||||
user_id = Column(String(255), nullable=False, index=True) # Clerk user ID
|
||||
|
||||
# Product information
|
||||
product_name = Column(String(500), nullable=False)
|
||||
product_description = Column(Text, nullable=True)
|
||||
|
||||
# Asset details
|
||||
asset_type = Column(String(50), nullable=False, index=True) # image, video, audio, animation
|
||||
variant = Column(String(100), nullable=True) # color, size, angle, etc.
|
||||
style = Column(String(50), nullable=True) # studio, lifestyle, minimalist, etc.
|
||||
environment = Column(String(50), nullable=True) # studio, lifestyle, outdoor, etc.
|
||||
|
||||
# Link to ContentAsset (unified asset library)
|
||||
content_asset_id = Column(Integer, ForeignKey('content_assets.id', ondelete='SET NULL'), nullable=True, index=True)
|
||||
|
||||
# Generation details
|
||||
provider = Column(String(100), nullable=True)
|
||||
model = Column(String(100), nullable=True)
|
||||
cost = Column(Float, default=0.0)
|
||||
generation_time = Column(Float, nullable=True)
|
||||
prompt_used = Column(Text, nullable=True)
|
||||
|
||||
# E-commerce integration
|
||||
ecommerce_exported = Column(Boolean, default=False)
|
||||
exported_to = Column(JSON, nullable=True) # Array of platform names
|
||||
|
||||
# Status
|
||||
status = Column(String(50), default="completed", nullable=False) # completed, processing, failed
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False, index=True)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Additional metadata (renamed from 'metadata' to avoid SQLAlchemy reserved name conflict)
|
||||
# Using 'product_metadata' as column name in DB to avoid conflict with SQLAlchemy's reserved 'metadata' attribute
|
||||
product_metadata = Column('product_metadata', JSON, nullable=True) # Additional product-specific metadata
|
||||
|
||||
# Composite indexes
|
||||
__table_args__ = (
|
||||
Index('idx_user_product', 'user_id', 'product_id'),
|
||||
Index('idx_user_type', 'user_id', 'asset_type'),
|
||||
Index('idx_product_type', 'product_id', 'asset_type'),
|
||||
)
|
||||
|
||||
|
||||
class ProductStyleTemplate(Base):
|
||||
"""
|
||||
Brand style template for products.
|
||||
Stores reusable brand style configurations for product asset generation.
|
||||
"""
|
||||
|
||||
__tablename__ = "product_style_templates"
|
||||
|
||||
# Primary fields
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
user_id = Column(String(255), nullable=False, index=True)
|
||||
template_name = Column(String(255), nullable=False)
|
||||
|
||||
# Style configuration
|
||||
color_palette = Column(JSON, nullable=True) # Array of brand colors
|
||||
background_style = Column(String(50), nullable=True) # white, transparent, lifestyle, branded
|
||||
lighting_preset = Column(String(50), nullable=True) # natural, studio, dramatic, soft
|
||||
preferred_style = Column(String(50), nullable=True) # photorealistic, minimalist, luxury, technical
|
||||
preferred_environment = Column(String(50), nullable=True) # studio, lifestyle, outdoor
|
||||
|
||||
# Brand integration
|
||||
use_brand_colors = Column(Boolean, default=True)
|
||||
use_brand_logo = Column(Boolean, default=False)
|
||||
|
||||
# Metadata
|
||||
is_default = Column(Boolean, default=False) # Default template for user
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Composite indexes
|
||||
__table_args__ = (
|
||||
Index('idx_user_template', 'user_id', 'template_name'),
|
||||
)
|
||||
|
||||
|
||||
class EcommerceExport(Base):
|
||||
"""
|
||||
E-commerce platform export tracking.
|
||||
Tracks product asset exports to e-commerce platforms.
|
||||
"""
|
||||
|
||||
__tablename__ = "product_ecommerce_exports"
|
||||
|
||||
# Primary fields
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
user_id = Column(String(255), nullable=False, index=True)
|
||||
product_id = Column(String(255), nullable=False, index=True)
|
||||
|
||||
# Platform information
|
||||
platform = Column(String(50), nullable=False) # shopify, amazon, woocommerce
|
||||
platform_product_id = Column(String(255), nullable=True) # Product ID on the platform
|
||||
|
||||
# Export details
|
||||
exported_assets = Column(JSON, nullable=False) # Array of asset IDs exported
|
||||
export_status = Column(String(50), default="pending", nullable=False) # pending, completed, failed
|
||||
error_message = Column(Text, nullable=True)
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
exported_at = Column(DateTime, nullable=True)
|
||||
|
||||
# Composite indexes
|
||||
__table_args__ = (
|
||||
Index('idx_user_platform', 'user_id', 'platform'),
|
||||
Index('idx_product_platform', 'product_id', 'platform'),
|
||||
)
|
||||
|
||||
162
backend/models/product_marketing_models.py
Normal file
162
backend/models/product_marketing_models.py
Normal file
@@ -0,0 +1,162 @@
|
||||
"""
|
||||
Product Marketing Campaign Models
|
||||
Database models for storing campaign blueprints and asset proposals.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, DateTime, Float, Boolean, JSON, Text, ForeignKey, Index, func
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
import enum
|
||||
|
||||
from models.subscription_models import Base
|
||||
|
||||
|
||||
class CampaignStatus(enum.Enum):
|
||||
"""Campaign status enum."""
|
||||
DRAFT = "draft"
|
||||
GENERATING = "generating"
|
||||
READY = "ready"
|
||||
PUBLISHED = "published"
|
||||
ARCHIVED = "archived"
|
||||
|
||||
|
||||
class AssetNodeStatus(enum.Enum):
|
||||
"""Asset node status enum."""
|
||||
DRAFT = "draft"
|
||||
PROPOSED = "proposed"
|
||||
GENERATING = "generating"
|
||||
READY = "ready"
|
||||
APPROVED = "approved"
|
||||
REJECTED = "rejected"
|
||||
|
||||
|
||||
class Campaign(Base):
|
||||
"""
|
||||
Campaign blueprint model.
|
||||
Stores campaign information, phases, and asset nodes.
|
||||
"""
|
||||
|
||||
__tablename__ = "product_marketing_campaigns"
|
||||
|
||||
# Primary fields
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
campaign_id = Column(String(255), unique=True, nullable=False, index=True)
|
||||
user_id = Column(String(255), nullable=False, index=True) # Clerk user ID
|
||||
|
||||
# Campaign details
|
||||
campaign_name = Column(String(500), nullable=False)
|
||||
goal = Column(String(100), nullable=False) # product_launch, awareness, conversion, etc.
|
||||
kpi = Column(String(500), nullable=True)
|
||||
status = Column(String(50), default="draft", nullable=False, index=True)
|
||||
|
||||
# Campaign structure
|
||||
phases = Column(JSON, nullable=True) # Array of phase objects
|
||||
channels = Column(JSON, nullable=False) # Array of channel strings
|
||||
asset_nodes = Column(JSON, nullable=True) # Array of asset node objects
|
||||
|
||||
# Product context
|
||||
product_context = Column(JSON, nullable=True) # Product information
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False, index=True)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
proposals = relationship("CampaignProposal", back_populates="campaign", cascade="all, delete-orphan")
|
||||
generated_assets = relationship("CampaignAsset", back_populates="campaign", cascade="all, delete-orphan")
|
||||
|
||||
# Composite indexes
|
||||
__table_args__ = (
|
||||
Index('idx_user_status', 'user_id', 'status'),
|
||||
Index('idx_user_created', 'user_id', 'created_at'),
|
||||
)
|
||||
|
||||
|
||||
class CampaignProposal(Base):
|
||||
"""
|
||||
Asset proposals for a campaign.
|
||||
Stores AI-generated proposals for each asset node.
|
||||
"""
|
||||
|
||||
__tablename__ = "product_marketing_proposals"
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
campaign_id = Column(String(255), ForeignKey('product_marketing_campaigns.campaign_id', ondelete='CASCADE'), nullable=False, index=True)
|
||||
user_id = Column(String(255), nullable=False, index=True)
|
||||
|
||||
# Asset node reference
|
||||
asset_node_id = Column(String(255), nullable=False, index=True)
|
||||
asset_type = Column(String(50), nullable=False) # image, text, video, audio
|
||||
channel = Column(String(50), nullable=False)
|
||||
|
||||
# Proposal details
|
||||
proposed_prompt = Column(Text, nullable=False)
|
||||
recommended_template = Column(String(255), nullable=True)
|
||||
recommended_provider = Column(String(100), nullable=True)
|
||||
recommended_model = Column(String(100), nullable=True)
|
||||
cost_estimate = Column(Float, default=0.0)
|
||||
concept_summary = Column(Text, nullable=True)
|
||||
|
||||
# Status
|
||||
status = Column(String(50), default="proposed", nullable=False) # proposed, approved, rejected, generating
|
||||
approved_at = Column(DateTime, nullable=True)
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
campaign = relationship("Campaign", back_populates="proposals")
|
||||
generated_asset = relationship("CampaignAsset", back_populates="proposal", uselist=False)
|
||||
|
||||
# Composite indexes
|
||||
__table_args__ = (
|
||||
Index('idx_campaign_node', 'campaign_id', 'asset_node_id'),
|
||||
Index('idx_user_status', 'user_id', 'status'),
|
||||
)
|
||||
|
||||
|
||||
class CampaignAsset(Base):
|
||||
"""
|
||||
Generated assets for a campaign.
|
||||
Links to ContentAsset and stores campaign-specific metadata.
|
||||
"""
|
||||
|
||||
__tablename__ = "product_marketing_assets"
|
||||
|
||||
id = Column(Integer, primary_key=True, autoincrement=True)
|
||||
campaign_id = Column(String(255), ForeignKey('product_marketing_campaigns.campaign_id', ondelete='CASCADE'), nullable=False, index=True)
|
||||
proposal_id = Column(Integer, ForeignKey('product_marketing_proposals.id', ondelete='SET NULL'), nullable=True)
|
||||
user_id = Column(String(255), nullable=False, index=True)
|
||||
|
||||
# Asset node reference
|
||||
asset_node_id = Column(String(255), nullable=False, index=True)
|
||||
|
||||
# Link to ContentAsset
|
||||
content_asset_id = Column(Integer, ForeignKey('content_assets.id', ondelete='SET NULL'), nullable=True)
|
||||
|
||||
# Generation details
|
||||
provider = Column(String(100), nullable=True)
|
||||
model = Column(String(100), nullable=True)
|
||||
cost = Column(Float, default=0.0)
|
||||
generation_time = Column(Float, nullable=True)
|
||||
|
||||
# Status
|
||||
status = Column(String(50), default="generating", nullable=False) # generating, ready, approved, published
|
||||
approved_at = Column(DateTime, nullable=True)
|
||||
published_at = Column(DateTime, nullable=True)
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
campaign = relationship("Campaign", back_populates="generated_assets")
|
||||
proposal = relationship("CampaignProposal", back_populates="generated_asset")
|
||||
|
||||
# Composite indexes
|
||||
__table_args__ = (
|
||||
Index('idx_campaign_node', 'campaign_id', 'asset_node_id'),
|
||||
Index('idx_user_status', 'user_id', 'status'),
|
||||
)
|
||||
|
||||
355
backend/models/research_intent_models.py
Normal file
355
backend/models/research_intent_models.py
Normal file
@@ -0,0 +1,355 @@
|
||||
"""
|
||||
Research Intent Models
|
||||
|
||||
Pydantic models for understanding user research intent.
|
||||
These models capture what the user actually wants to accomplish from their research,
|
||||
enabling targeted query generation and intent-aware result analysis.
|
||||
|
||||
Author: ALwrity Team
|
||||
Version: 1.0
|
||||
"""
|
||||
|
||||
from enum import Enum
|
||||
from typing import Dict, Any, List, Optional, Union
|
||||
from pydantic import BaseModel, Field
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class ResearchPurpose(str, Enum):
|
||||
"""Why is the user researching?"""
|
||||
LEARN = "learn" # Understand a topic for personal knowledge
|
||||
CREATE_CONTENT = "create_content" # Write article/blog/podcast/video
|
||||
MAKE_DECISION = "make_decision" # Choose between options
|
||||
COMPARE = "compare" # Compare alternatives/competitors
|
||||
SOLVE_PROBLEM = "solve_problem" # Find solution to a problem
|
||||
FIND_DATA = "find_data" # Get statistics/facts/citations
|
||||
EXPLORE_TRENDS = "explore_trends" # Understand market/industry trends
|
||||
VALIDATE = "validate" # Verify claims/information
|
||||
GENERATE_IDEAS = "generate_ideas" # Brainstorm content ideas
|
||||
|
||||
|
||||
class ContentOutput(str, Enum):
|
||||
"""What content type will be created from this research?"""
|
||||
BLOG = "blog"
|
||||
PODCAST = "podcast"
|
||||
VIDEO = "video"
|
||||
SOCIAL_POST = "social_post"
|
||||
NEWSLETTER = "newsletter"
|
||||
PRESENTATION = "presentation"
|
||||
REPORT = "report"
|
||||
WHITEPAPER = "whitepaper"
|
||||
EMAIL = "email"
|
||||
GENERAL = "general" # No specific output
|
||||
|
||||
|
||||
class ExpectedDeliverable(str, Enum):
|
||||
"""What specific outputs the user expects from research."""
|
||||
KEY_STATISTICS = "key_statistics" # Numbers, data points, percentages
|
||||
EXPERT_QUOTES = "expert_quotes" # Authoritative statements
|
||||
CASE_STUDIES = "case_studies" # Real examples and success stories
|
||||
COMPARISONS = "comparisons" # Side-by-side analysis
|
||||
TRENDS = "trends" # Market/industry trends
|
||||
BEST_PRACTICES = "best_practices" # Recommendations and guidelines
|
||||
STEP_BY_STEP = "step_by_step" # Process/how-to instructions
|
||||
PROS_CONS = "pros_cons" # Advantages/disadvantages
|
||||
DEFINITIONS = "definitions" # Clear explanations of concepts
|
||||
CITATIONS = "citations" # Authoritative sources
|
||||
EXAMPLES = "examples" # Concrete examples
|
||||
PREDICTIONS = "predictions" # Future outlook
|
||||
|
||||
|
||||
class ResearchDepthLevel(str, Enum):
|
||||
"""How deep the research should go."""
|
||||
OVERVIEW = "overview" # Quick summary, surface level
|
||||
DETAILED = "detailed" # In-depth analysis
|
||||
EXPERT = "expert" # Comprehensive, expert-level research
|
||||
|
||||
|
||||
class InputType(str, Enum):
|
||||
"""Type of user input detected."""
|
||||
KEYWORDS = "keywords" # Simple keywords: "AI healthcare 2025"
|
||||
QUESTION = "question" # A question: "What are the best AI tools?"
|
||||
GOAL = "goal" # Goal statement: "I need to write a blog about..."
|
||||
MIXED = "mixed" # Combination of above
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Structured Deliverable Models
|
||||
# ============================================================================
|
||||
|
||||
class StatisticWithCitation(BaseModel):
|
||||
"""A statistic with full attribution."""
|
||||
statistic: str = Field(..., description="The full statistical statement")
|
||||
value: Optional[str] = Field(None, description="The numeric value (e.g., '72%')")
|
||||
context: str = Field(..., description="Context of when/where this was measured")
|
||||
source: str = Field(..., description="Source name/publication")
|
||||
url: str = Field(..., description="Source URL")
|
||||
credibility: float = Field(0.8, ge=0.0, le=1.0, description="Credibility score 0-1")
|
||||
recency: Optional[str] = Field(None, description="How recent the data is")
|
||||
|
||||
|
||||
class ExpertQuote(BaseModel):
|
||||
"""A quote from an authoritative source."""
|
||||
quote: str = Field(..., description="The actual quote")
|
||||
speaker: str = Field(..., description="Name of the speaker")
|
||||
title: Optional[str] = Field(None, description="Title/role of the speaker")
|
||||
organization: Optional[str] = Field(None, description="Organization/company")
|
||||
context: Optional[str] = Field(None, description="Context of the quote")
|
||||
source: str = Field(..., description="Source name")
|
||||
url: str = Field(..., description="Source URL")
|
||||
|
||||
|
||||
class CaseStudySummary(BaseModel):
|
||||
"""Summary of a case study."""
|
||||
title: str = Field(..., description="Case study title")
|
||||
organization: str = Field(..., description="Organization featured")
|
||||
challenge: str = Field(..., description="The challenge/problem faced")
|
||||
solution: str = Field(..., description="The solution implemented")
|
||||
outcome: str = Field(..., description="The results achieved")
|
||||
key_metrics: List[str] = Field(default_factory=list, description="Key metrics/numbers")
|
||||
source: str = Field(..., description="Source name")
|
||||
url: str = Field(..., description="Source URL")
|
||||
|
||||
|
||||
class TrendAnalysis(BaseModel):
|
||||
"""Analysis of a trend."""
|
||||
trend: str = Field(..., description="The trend description")
|
||||
direction: str = Field(..., description="growing, declining, emerging, stable")
|
||||
evidence: List[str] = Field(default_factory=list, description="Supporting evidence")
|
||||
impact: Optional[str] = Field(None, description="Potential impact")
|
||||
timeline: Optional[str] = Field(None, description="Timeline of the trend")
|
||||
sources: List[str] = Field(default_factory=list, description="Source URLs")
|
||||
|
||||
|
||||
class ComparisonItem(BaseModel):
|
||||
"""An item in a comparison."""
|
||||
name: str
|
||||
description: Optional[str] = None
|
||||
pros: List[str] = Field(default_factory=list)
|
||||
cons: List[str] = Field(default_factory=list)
|
||||
features: Dict[str, str] = Field(default_factory=dict)
|
||||
rating: Optional[float] = None
|
||||
source: Optional[str] = None
|
||||
|
||||
|
||||
class ComparisonTable(BaseModel):
|
||||
"""Comparison between options."""
|
||||
title: str = Field(..., description="Comparison title")
|
||||
criteria: List[str] = Field(default_factory=list, description="Comparison criteria")
|
||||
items: List[ComparisonItem] = Field(default_factory=list, description="Items being compared")
|
||||
winner: Optional[str] = Field(None, description="Recommended option if applicable")
|
||||
verdict: Optional[str] = Field(None, description="Summary verdict")
|
||||
|
||||
|
||||
class ProsCons(BaseModel):
|
||||
"""Pros and cons analysis."""
|
||||
subject: str = Field(..., description="What is being analyzed")
|
||||
pros: List[str] = Field(default_factory=list, description="Advantages")
|
||||
cons: List[str] = Field(default_factory=list, description="Disadvantages")
|
||||
balanced_verdict: str = Field(..., description="Balanced conclusion")
|
||||
|
||||
|
||||
class SourceWithRelevance(BaseModel):
|
||||
"""A source with relevance information."""
|
||||
title: str
|
||||
url: str
|
||||
excerpt: Optional[str] = None
|
||||
relevance_score: float = Field(0.8, ge=0.0, le=1.0)
|
||||
relevance_reason: Optional[str] = None
|
||||
content_type: Optional[str] = None # article, research paper, news, etc.
|
||||
published_date: Optional[str] = None
|
||||
credibility_score: float = Field(0.8, ge=0.0, le=1.0)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Intent Models
|
||||
# ============================================================================
|
||||
|
||||
class ResearchIntent(BaseModel):
|
||||
"""
|
||||
What the user actually wants from their research.
|
||||
This is inferred from user input + research persona.
|
||||
"""
|
||||
|
||||
# Core understanding
|
||||
primary_question: str = Field(..., description="The main question to answer")
|
||||
secondary_questions: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="Related questions that should be answered"
|
||||
)
|
||||
|
||||
# Purpose classification
|
||||
purpose: ResearchPurpose = Field(
|
||||
ResearchPurpose.LEARN,
|
||||
description="Why the user is researching"
|
||||
)
|
||||
content_output: ContentOutput = Field(
|
||||
ContentOutput.GENERAL,
|
||||
description="What content type will be created"
|
||||
)
|
||||
|
||||
# What they need from results
|
||||
expected_deliverables: List[ExpectedDeliverable] = Field(
|
||||
default_factory=list,
|
||||
description="Specific outputs the user expects"
|
||||
)
|
||||
|
||||
# Depth and focus
|
||||
depth: ResearchDepthLevel = Field(
|
||||
ResearchDepthLevel.DETAILED,
|
||||
description="How deep the research should go"
|
||||
)
|
||||
focus_areas: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="Specific aspects to focus on"
|
||||
)
|
||||
|
||||
# Constraints
|
||||
perspective: Optional[str] = Field(
|
||||
None,
|
||||
description="Perspective to research from (e.g., 'hospital administrator')"
|
||||
)
|
||||
time_sensitivity: Optional[str] = Field(
|
||||
None,
|
||||
description="Time constraint: 'real_time', 'recent', 'historical', 'evergreen'"
|
||||
)
|
||||
|
||||
# Detected input type
|
||||
input_type: InputType = Field(
|
||||
InputType.KEYWORDS,
|
||||
description="Type of user input detected"
|
||||
)
|
||||
|
||||
# Original user input (for reference)
|
||||
original_input: str = Field(..., description="The original user input")
|
||||
|
||||
# Confidence in inference
|
||||
confidence: float = Field(
|
||||
0.8,
|
||||
ge=0.0,
|
||||
le=1.0,
|
||||
description="Confidence in the intent inference"
|
||||
)
|
||||
needs_clarification: bool = Field(
|
||||
False,
|
||||
description="True if AI is uncertain and needs user clarification"
|
||||
)
|
||||
clarifying_questions: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="Questions to ask user if uncertain"
|
||||
)
|
||||
|
||||
class Config:
|
||||
use_enum_values = True
|
||||
|
||||
|
||||
class ResearchQuery(BaseModel):
|
||||
"""A targeted research query with purpose."""
|
||||
query: str = Field(..., description="The search query")
|
||||
purpose: ExpectedDeliverable = Field(..., description="What this query targets")
|
||||
provider: str = Field("exa", description="Preferred provider: exa, tavily, google")
|
||||
priority: int = Field(1, ge=1, le=5, description="Priority 1-5, higher = more important")
|
||||
expected_results: str = Field(..., description="What we expect to find with this query")
|
||||
|
||||
|
||||
class IntentInferenceRequest(BaseModel):
|
||||
"""Request to infer research intent from user input."""
|
||||
user_input: str = Field(..., description="User's keywords, question, or goal")
|
||||
keywords: List[str] = Field(default_factory=list, description="Extracted keywords")
|
||||
use_persona: bool = Field(True, description="Use research persona for context")
|
||||
use_competitor_data: bool = Field(True, description="Use competitor data for context")
|
||||
|
||||
|
||||
class IntentInferenceResponse(BaseModel):
|
||||
"""Response from intent inference."""
|
||||
success: bool = True
|
||||
intent: ResearchIntent
|
||||
analysis_summary: str = Field(..., description="AI's understanding of user intent")
|
||||
suggested_queries: List[ResearchQuery] = Field(
|
||||
default_factory=list,
|
||||
description="Generated research queries based on intent"
|
||||
)
|
||||
suggested_keywords: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="Enhanced/expanded keywords"
|
||||
)
|
||||
suggested_angles: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="Research angles to explore"
|
||||
)
|
||||
quick_options: List[Dict[str, Any]] = Field(
|
||||
default_factory=list,
|
||||
description="Quick options for user to confirm/modify intent"
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Intent-Driven Research Result
|
||||
# ============================================================================
|
||||
|
||||
class IntentDrivenResearchResult(BaseModel):
|
||||
"""
|
||||
Research results organized by what user needs.
|
||||
This is the final output after intent-aware analysis.
|
||||
"""
|
||||
|
||||
success: bool = True
|
||||
|
||||
# Direct answers
|
||||
primary_answer: str = Field(..., description="Direct answer to primary question")
|
||||
secondary_answers: Dict[str, str] = Field(
|
||||
default_factory=dict,
|
||||
description="Answers to secondary questions (question → answer)"
|
||||
)
|
||||
|
||||
# Deliverables (populated based on user's expected_deliverables)
|
||||
statistics: List[StatisticWithCitation] = Field(default_factory=list)
|
||||
expert_quotes: List[ExpertQuote] = Field(default_factory=list)
|
||||
case_studies: List[CaseStudySummary] = Field(default_factory=list)
|
||||
comparisons: List[ComparisonTable] = Field(default_factory=list)
|
||||
trends: List[TrendAnalysis] = Field(default_factory=list)
|
||||
best_practices: List[str] = Field(default_factory=list)
|
||||
step_by_step: List[str] = Field(default_factory=list)
|
||||
pros_cons: Optional[ProsCons] = None
|
||||
definitions: Dict[str, str] = Field(
|
||||
default_factory=dict,
|
||||
description="Term → definition mappings"
|
||||
)
|
||||
examples: List[str] = Field(default_factory=list)
|
||||
predictions: List[str] = Field(default_factory=list)
|
||||
|
||||
# Content-ready outputs
|
||||
executive_summary: str = Field("", description="2-3 sentence summary")
|
||||
key_takeaways: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="5-7 key bullet points"
|
||||
)
|
||||
suggested_outline: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="Suggested content outline if creating content"
|
||||
)
|
||||
|
||||
# Supporting data
|
||||
sources: List[SourceWithRelevance] = Field(default_factory=list)
|
||||
raw_content: Optional[str] = Field(None, description="Raw content for further processing")
|
||||
|
||||
# Research quality metadata
|
||||
confidence: float = Field(0.8, ge=0.0, le=1.0)
|
||||
gaps_identified: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="What we couldn't find"
|
||||
)
|
||||
follow_up_queries: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="Suggested additional research"
|
||||
)
|
||||
|
||||
# Original intent for reference
|
||||
original_intent: Optional[ResearchIntent] = None
|
||||
|
||||
# Error handling
|
||||
error_message: Optional[str] = None
|
||||
|
||||
class Config:
|
||||
use_enum_values = True
|
||||
|
||||
155
backend/models/research_persona_models.py
Normal file
155
backend/models/research_persona_models.py
Normal file
@@ -0,0 +1,155 @@
|
||||
"""
|
||||
Research Persona Models
|
||||
Pydantic models for AI-generated research personas.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from pydantic import BaseModel, Field
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
class ResearchPreset(BaseModel):
|
||||
"""Research preset configuration."""
|
||||
name: str
|
||||
keywords: str
|
||||
industry: str
|
||||
target_audience: str
|
||||
research_mode: str = Field(..., description="basic, comprehensive, or targeted")
|
||||
config: Dict[str, Any] = Field(default_factory=dict, description="Complete ResearchConfig object")
|
||||
description: Optional[str] = None
|
||||
icon: Optional[str] = None
|
||||
gradient: Optional[str] = None
|
||||
|
||||
|
||||
class ResearchPersona(BaseModel):
|
||||
"""AI-generated research persona providing personalized defaults and suggestions."""
|
||||
|
||||
# Smart Defaults
|
||||
default_industry: str = Field(..., description="Default industry from onboarding data")
|
||||
default_target_audience: str = Field(..., description="Default target audience from onboarding data")
|
||||
default_research_mode: str = Field(..., description="basic, comprehensive, or targeted")
|
||||
default_provider: str = Field(..., description="google or exa")
|
||||
|
||||
# Keyword Intelligence
|
||||
suggested_keywords: List[str] = Field(default_factory=list, description="8-12 relevant keywords")
|
||||
keyword_expansion_patterns: Dict[str, List[str]] = Field(
|
||||
default_factory=dict,
|
||||
description="Mapping of keywords to expanded, industry-specific terms"
|
||||
)
|
||||
|
||||
# Domain & Source Intelligence
|
||||
suggested_exa_domains: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="4-6 authoritative domains for the industry"
|
||||
)
|
||||
suggested_exa_category: Optional[str] = Field(
|
||||
None,
|
||||
description="Suggested Exa category based on industry"
|
||||
)
|
||||
suggested_exa_search_type: Optional[str] = Field(
|
||||
None,
|
||||
description="Suggested Exa search algorithm: auto, neural, keyword, fast, deep"
|
||||
)
|
||||
|
||||
# Tavily Provider Intelligence
|
||||
suggested_tavily_topic: Optional[str] = Field(
|
||||
None,
|
||||
description="Suggested Tavily topic: general, news, finance"
|
||||
)
|
||||
suggested_tavily_search_depth: Optional[str] = Field(
|
||||
None,
|
||||
description="Suggested Tavily search depth: basic, advanced, fast, ultra-fast"
|
||||
)
|
||||
suggested_tavily_include_answer: Optional[str] = Field(
|
||||
None,
|
||||
description="Suggested Tavily answer type: false, basic, advanced"
|
||||
)
|
||||
suggested_tavily_time_range: Optional[str] = Field(
|
||||
None,
|
||||
description="Suggested Tavily time range: day, week, month, year"
|
||||
)
|
||||
suggested_tavily_raw_content_format: Optional[str] = Field(
|
||||
None,
|
||||
description="Suggested Tavily raw content format: false, markdown, text"
|
||||
)
|
||||
|
||||
# Provider Selection Logic
|
||||
provider_recommendations: Dict[str, str] = Field(
|
||||
default_factory=dict,
|
||||
description="Provider recommendations by use case: {'trends': 'tavily', 'deep_research': 'exa', 'factual': 'google'}"
|
||||
)
|
||||
|
||||
# Query Enhancement Intelligence
|
||||
research_angles: List[str] = Field(
|
||||
default_factory=list,
|
||||
description="5-8 alternative research angles/focuses"
|
||||
)
|
||||
query_enhancement_rules: Dict[str, str] = Field(
|
||||
default_factory=dict,
|
||||
description="Templates for improving vague user queries"
|
||||
)
|
||||
|
||||
# Research History Insights
|
||||
recommended_presets: List[ResearchPreset] = Field(
|
||||
default_factory=list,
|
||||
description="3-5 personalized research preset templates"
|
||||
)
|
||||
|
||||
# Research Preferences
|
||||
research_preferences: Dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Structured research preferences from onboarding"
|
||||
)
|
||||
|
||||
# Metadata
|
||||
generated_at: Optional[str] = Field(None, description="ISO timestamp of generation")
|
||||
confidence_score: Optional[float] = Field(None, ge=0.0, le=1.0, description="Confidence score 0-1")
|
||||
version: Optional[str] = Field(None, description="Schema version")
|
||||
|
||||
class Config:
|
||||
json_schema_extra = {
|
||||
"example": {
|
||||
"default_industry": "Healthcare",
|
||||
"default_target_audience": "Medical professionals and healthcare administrators",
|
||||
"default_research_mode": "comprehensive",
|
||||
"default_provider": "exa",
|
||||
"suggested_keywords": ["telemedicine", "patient care", "healthcare technology"],
|
||||
"keyword_expansion_patterns": {
|
||||
"AI": ["healthcare AI", "medical AI", "clinical AI"],
|
||||
"tools": ["medical devices", "clinical tools"]
|
||||
},
|
||||
"suggested_exa_domains": ["pubmed.gov", "nejm.org", "thelancet.com"],
|
||||
"suggested_exa_category": "research paper",
|
||||
"suggested_exa_search_type": "neural",
|
||||
"suggested_tavily_topic": "news",
|
||||
"suggested_tavily_search_depth": "advanced",
|
||||
"suggested_tavily_include_answer": "advanced",
|
||||
"suggested_tavily_time_range": "month",
|
||||
"suggested_tavily_raw_content_format": "markdown",
|
||||
"provider_recommendations": {
|
||||
"trends": "tavily",
|
||||
"deep_research": "exa",
|
||||
"factual": "google",
|
||||
"news": "tavily",
|
||||
"academic": "exa"
|
||||
},
|
||||
"research_angles": [
|
||||
"Compare telemedicine platforms",
|
||||
"Telemedicine ROI analysis",
|
||||
"Latest telemedicine trends"
|
||||
],
|
||||
"query_enhancement_rules": {
|
||||
"vague_ai": "Research: AI applications in Healthcare for Medical professionals",
|
||||
"vague_tools": "Compare top Healthcare tools"
|
||||
},
|
||||
"recommended_presets": [],
|
||||
"research_preferences": {
|
||||
"research_depth": "comprehensive",
|
||||
"content_types": ["blog", "article"]
|
||||
},
|
||||
"generated_at": "2024-01-01T00:00:00Z",
|
||||
"confidence_score": 0.85,
|
||||
"version": "1.0"
|
||||
}
|
||||
}
|
||||
|
||||
48
backend/models/scheduler_cumulative_stats_model.py
Normal file
48
backend/models/scheduler_cumulative_stats_model.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""
|
||||
Scheduler Cumulative Stats Model
|
||||
Model for storing persistent cumulative scheduler metrics that survive restarts.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, DateTime, Index
|
||||
from datetime import datetime
|
||||
from models.enhanced_strategy_models import Base
|
||||
|
||||
|
||||
class SchedulerCumulativeStats(Base):
|
||||
"""Model for storing cumulative scheduler metrics that persist across restarts"""
|
||||
__tablename__ = "scheduler_cumulative_stats"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True, default=1) # Always use id=1
|
||||
total_check_cycles = Column(Integer, default=0, nullable=False)
|
||||
cumulative_tasks_found = Column(Integer, default=0, nullable=False)
|
||||
cumulative_tasks_executed = Column(Integer, default=0, nullable=False)
|
||||
cumulative_tasks_failed = Column(Integer, default=0, nullable=False)
|
||||
cumulative_tasks_skipped = Column(Integer, default=0, nullable=False)
|
||||
cumulative_job_completed = Column(Integer, default=0, nullable=False)
|
||||
cumulative_job_failed = Column(Integer, default=0, nullable=False)
|
||||
|
||||
last_updated = Column(DateTime, default=datetime.utcnow, nullable=False, onupdate=datetime.utcnow)
|
||||
last_check_cycle_id = Column(Integer, nullable=True) # Reference to last check_cycle event log ID
|
||||
|
||||
created_at = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, nullable=False, onupdate=datetime.utcnow)
|
||||
|
||||
__table_args__ = (
|
||||
Index('idx_scheduler_cumulative_stats_single_row', 'id', unique=True),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_or_create(cls, db_session):
|
||||
"""
|
||||
Get the cumulative stats row (id=1) or create it if it doesn't exist.
|
||||
|
||||
Returns:
|
||||
SchedulerCumulativeStats instance
|
||||
"""
|
||||
stats = db_session.query(cls).filter(cls.id == 1).first()
|
||||
if not stats:
|
||||
stats = cls(id=1)
|
||||
db_session.add(stats)
|
||||
db_session.commit()
|
||||
return stats
|
||||
|
||||
48
backend/models/scheduler_models.py
Normal file
48
backend/models/scheduler_models.py
Normal file
@@ -0,0 +1,48 @@
|
||||
"""
|
||||
Scheduler Event Models
|
||||
Models for tracking scheduler-level events and history.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, Text, DateTime, JSON, Float
|
||||
from datetime import datetime
|
||||
|
||||
# Import the same Base from enhanced_strategy_models
|
||||
from models.enhanced_strategy_models import Base
|
||||
|
||||
|
||||
class SchedulerEventLog(Base):
|
||||
"""Model for storing scheduler-level events (check cycles, interval adjustments, etc.)"""
|
||||
__tablename__ = "scheduler_event_logs"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
event_type = Column(String(50), nullable=False) # 'check_cycle', 'interval_adjustment', 'start', 'stop', 'job_scheduled', 'job_cancelled'
|
||||
event_date = Column(DateTime, default=datetime.utcnow, nullable=False, index=True)
|
||||
|
||||
# Event details
|
||||
check_cycle_number = Column(Integer, nullable=True) # For check_cycle events
|
||||
check_interval_minutes = Column(Integer, nullable=True) # Interval at time of event
|
||||
previous_interval_minutes = Column(Integer, nullable=True) # For interval_adjustment events
|
||||
new_interval_minutes = Column(Integer, nullable=True) # For interval_adjustment events
|
||||
|
||||
# Task execution summary for check cycles
|
||||
tasks_found = Column(Integer, nullable=True)
|
||||
tasks_executed = Column(Integer, nullable=True)
|
||||
tasks_failed = Column(Integer, nullable=True)
|
||||
tasks_by_type = Column(JSON, nullable=True) # {'monitoring_task': 5, ...}
|
||||
|
||||
# Job information
|
||||
job_id = Column(String(200), nullable=True) # For job_scheduled/cancelled events
|
||||
job_type = Column(String(50), nullable=True) # 'recurring', 'one_time'
|
||||
user_id = Column(String(200), nullable=True, index=True) # For user isolation
|
||||
|
||||
# Performance metrics
|
||||
check_duration_seconds = Column(Float, nullable=True) # How long the check cycle took
|
||||
active_strategies_count = Column(Integer, nullable=True)
|
||||
active_executions = Column(Integer, nullable=True)
|
||||
|
||||
# Additional context
|
||||
event_data = Column(JSON, nullable=True) # Additional event-specific data
|
||||
error_message = Column(Text, nullable=True) # For error events
|
||||
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
502
backend/models/seo_analysis.py
Normal file
502
backend/models/seo_analysis.py
Normal file
@@ -0,0 +1,502 @@
|
||||
"""
|
||||
Database models for SEO analysis data storage
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, DateTime, Text, JSON, Float, Boolean, ForeignKey, func
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, List
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class SEOActionType(Base):
|
||||
"""Catalog of supported SEO action types (17 actions)."""
|
||||
__tablename__ = 'seo_action_types'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
code = Column(String(100), unique=True, nullable=False) # e.g., analyze_page_speed
|
||||
name = Column(String(200), nullable=False)
|
||||
category = Column(String(50), nullable=True) # content, technical, performance, etc.
|
||||
description = Column(Text, nullable=True)
|
||||
created_at = Column(DateTime, default=func.now())
|
||||
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOActionType(code='{self.code}', category='{self.category}')>"
|
||||
|
||||
class SEOAnalysisSession(Base):
|
||||
"""Anchor session for a set of SEO actions and summary."""
|
||||
__tablename__ = 'seo_analysis_sessions'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
url = Column(String(500), nullable=False, index=True)
|
||||
triggered_by_user_id = Column(String(64), nullable=True)
|
||||
trigger_source = Column(String(32), nullable=True) # manual, schedule, action_followup, system
|
||||
input_context = Column(JSON, nullable=True)
|
||||
status = Column(String(20), default='success') # queued, running, success, failed, cancelled
|
||||
started_at = Column(DateTime, default=func.now(), nullable=False)
|
||||
completed_at = Column(DateTime, nullable=True)
|
||||
summary = Column(Text, nullable=True)
|
||||
overall_score = Column(Integer, nullable=True)
|
||||
health_label = Column(String(50), nullable=True)
|
||||
metrics = Column(JSON, nullable=True)
|
||||
issues_overview = Column(JSON, nullable=True)
|
||||
|
||||
# Relationships
|
||||
action_runs = relationship("SEOActionRun", back_populates="session", cascade="all, delete-orphan")
|
||||
analyses = relationship("SEOAnalysis", back_populates="session", cascade="all, delete-orphan")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOAnalysisSession(url='{self.url}', status='{self.status}')>"
|
||||
|
||||
class SEOActionRun(Base):
|
||||
"""Each execution of a specific action (one of the 17)."""
|
||||
__tablename__ = 'seo_action_runs'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
session_id = Column(Integer, ForeignKey('seo_analysis_sessions.id'), nullable=False)
|
||||
action_type_id = Column(Integer, ForeignKey('seo_action_types.id'), nullable=False)
|
||||
triggered_by_user_id = Column(String(64), nullable=True)
|
||||
input_params = Column(JSON, nullable=True)
|
||||
status = Column(String(20), default='success')
|
||||
started_at = Column(DateTime, default=func.now(), nullable=False)
|
||||
completed_at = Column(DateTime, nullable=True)
|
||||
result_summary = Column(Text, nullable=True)
|
||||
result = Column(JSON, nullable=True)
|
||||
diagnostics = Column(JSON, nullable=True)
|
||||
|
||||
# Relationships
|
||||
session = relationship("SEOAnalysisSession", back_populates="action_runs")
|
||||
action_type = relationship("SEOActionType")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOActionRun(action_type_id={self.action_type_id}, status='{self.status}')>"
|
||||
|
||||
class SEOActionRunLink(Base):
|
||||
"""Graph relations between action runs for narrative linkage."""
|
||||
__tablename__ = 'seo_action_run_links'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
from_action_run_id = Column(Integer, ForeignKey('seo_action_runs.id'), nullable=False)
|
||||
to_action_run_id = Column(Integer, ForeignKey('seo_action_runs.id'), nullable=False)
|
||||
relation = Column(String(50), nullable=False) # followup_of, supports, caused_by
|
||||
created_at = Column(DateTime, default=func.now())
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOActionRunLink(relation='{self.relation}')>"
|
||||
|
||||
class SEOAnalysis(Base):
|
||||
"""Main SEO analysis record"""
|
||||
__tablename__ = 'seo_analyses'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
url = Column(String(500), nullable=False, index=True)
|
||||
overall_score = Column(Integer, nullable=False)
|
||||
health_status = Column(String(50), nullable=False) # excellent, good, needs_improvement, poor, error
|
||||
timestamp = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
analysis_data = Column(JSON, nullable=True) # Store complete analysis data
|
||||
session_id = Column(Integer, ForeignKey('seo_analysis_sessions.id'), nullable=True)
|
||||
|
||||
# Relationships
|
||||
critical_issues = relationship("SEOIssue", back_populates="analysis", cascade="all, delete-orphan")
|
||||
warnings = relationship("SEOWarning", back_populates="analysis", cascade="all, delete-orphan")
|
||||
recommendations = relationship("SEORecommendation", back_populates="analysis", cascade="all, delete-orphan")
|
||||
category_scores = relationship("SEOCategoryScore", back_populates="analysis", cascade="all, delete-orphan")
|
||||
session = relationship("SEOAnalysisSession", back_populates="analyses")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOAnalysis(url='{self.url}', score={self.overall_score}, status='{self.health_status}')>"
|
||||
|
||||
class SEOIssue(Base):
|
||||
"""Critical SEO issues"""
|
||||
__tablename__ = 'seo_issues'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
analysis_id = Column(Integer, ForeignKey('seo_analyses.id'), nullable=False)
|
||||
session_id = Column(Integer, ForeignKey('seo_analysis_sessions.id'), nullable=True)
|
||||
action_run_id = Column(Integer, ForeignKey('seo_action_runs.id'), nullable=True)
|
||||
issue_text = Column(Text, nullable=False)
|
||||
category = Column(String(100), nullable=True) # url_structure, meta_data, content, etc.
|
||||
priority = Column(String(20), default='critical') # critical, high, medium, low
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationship
|
||||
analysis = relationship("SEOAnalysis", back_populates="critical_issues")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOIssue(category='{self.category}', priority='{self.priority}')>"
|
||||
|
||||
class SEOWarning(Base):
|
||||
"""SEO warnings"""
|
||||
__tablename__ = 'seo_warnings'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
analysis_id = Column(Integer, ForeignKey('seo_analyses.id'), nullable=False)
|
||||
session_id = Column(Integer, ForeignKey('seo_analysis_sessions.id'), nullable=True)
|
||||
action_run_id = Column(Integer, ForeignKey('seo_action_runs.id'), nullable=True)
|
||||
warning_text = Column(Text, nullable=False)
|
||||
category = Column(String(100), nullable=True)
|
||||
priority = Column(String(20), default='medium')
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationship
|
||||
analysis = relationship("SEOAnalysis", back_populates="warnings")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOWarning(category='{self.category}', priority='{self.priority}')>"
|
||||
|
||||
class SEORecommendation(Base):
|
||||
"""SEO recommendations"""
|
||||
__tablename__ = 'seo_recommendations'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
analysis_id = Column(Integer, ForeignKey('seo_analyses.id'), nullable=False)
|
||||
session_id = Column(Integer, ForeignKey('seo_analysis_sessions.id'), nullable=True)
|
||||
action_run_id = Column(Integer, ForeignKey('seo_action_runs.id'), nullable=True)
|
||||
recommendation_text = Column(Text, nullable=False)
|
||||
category = Column(String(100), nullable=True)
|
||||
difficulty = Column(String(20), default='medium') # easy, medium, hard
|
||||
estimated_impact = Column(String(20), default='medium') # high, medium, low
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationship
|
||||
analysis = relationship("SEOAnalysis", back_populates="recommendations")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEORecommendation(category='{self.category}', difficulty='{self.difficulty}')>"
|
||||
|
||||
class SEOCategoryScore(Base):
|
||||
"""Individual category scores"""
|
||||
__tablename__ = 'seo_category_scores'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
analysis_id = Column(Integer, ForeignKey('seo_analyses.id'), nullable=False)
|
||||
category = Column(String(100), nullable=False) # url_structure, meta_data, content, etc.
|
||||
score = Column(Integer, nullable=False)
|
||||
max_score = Column(Integer, default=100)
|
||||
details = Column(JSON, nullable=True) # Store category-specific details
|
||||
|
||||
# Relationship
|
||||
analysis = relationship("SEOAnalysis", back_populates="category_scores")
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOCategoryScore(category='{self.category}', score={self.score})>"
|
||||
|
||||
class SEOAnalysisHistory(Base):
|
||||
"""Historical SEO analysis data for tracking improvements"""
|
||||
__tablename__ = 'seo_analysis_history'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
url = Column(String(500), nullable=False, index=True)
|
||||
analysis_date = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
overall_score = Column(Integer, nullable=False)
|
||||
health_status = Column(String(50), nullable=False)
|
||||
score_change = Column(Integer, default=0) # Change from previous analysis
|
||||
|
||||
# Category scores for tracking
|
||||
url_structure_score = Column(Integer, nullable=True)
|
||||
meta_data_score = Column(Integer, nullable=True)
|
||||
content_score = Column(Integer, nullable=True)
|
||||
technical_score = Column(Integer, nullable=True)
|
||||
performance_score = Column(Integer, nullable=True)
|
||||
accessibility_score = Column(Integer, nullable=True)
|
||||
user_experience_score = Column(Integer, nullable=True)
|
||||
security_score = Column(Integer, nullable=True)
|
||||
|
||||
# Issue counts
|
||||
critical_issues_count = Column(Integer, default=0)
|
||||
warnings_count = Column(Integer, default=0)
|
||||
recommendations_count = Column(Integer, default=0)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOAnalysisHistory(url='{self.url}', score={self.overall_score}, date='{self.analysis_date}')>"
|
||||
|
||||
class SEOKeywordAnalysis(Base):
|
||||
"""Keyword analysis data"""
|
||||
__tablename__ = 'seo_keyword_analyses'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
analysis_id = Column(Integer, ForeignKey('seo_analyses.id'), nullable=False)
|
||||
keyword = Column(String(200), nullable=False)
|
||||
density = Column(Float, nullable=True)
|
||||
count = Column(Integer, default=0)
|
||||
in_title = Column(Boolean, default=False)
|
||||
in_headings = Column(Boolean, default=False)
|
||||
in_alt_text = Column(Boolean, default=False)
|
||||
in_meta_description = Column(Boolean, default=False)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOKeywordAnalysis(keyword='{self.keyword}', density={self.density})>"
|
||||
|
||||
class SEOTechnicalData(Base):
|
||||
"""Technical SEO data"""
|
||||
__tablename__ = 'seo_technical_data'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
analysis_id = Column(Integer, ForeignKey('seo_analyses.id'), nullable=False)
|
||||
|
||||
# Meta data
|
||||
title = Column(Text, nullable=True)
|
||||
title_length = Column(Integer, nullable=True)
|
||||
meta_description = Column(Text, nullable=True)
|
||||
meta_description_length = Column(Integer, nullable=True)
|
||||
|
||||
# Technical elements
|
||||
has_canonical = Column(Boolean, default=False)
|
||||
canonical_url = Column(String(500), nullable=True)
|
||||
has_schema_markup = Column(Boolean, default=False)
|
||||
schema_types = Column(JSON, nullable=True)
|
||||
has_hreflang = Column(Boolean, default=False)
|
||||
hreflang_data = Column(JSON, nullable=True)
|
||||
|
||||
# Social media
|
||||
og_tags_count = Column(Integer, default=0)
|
||||
twitter_tags_count = Column(Integer, default=0)
|
||||
|
||||
# Technical files
|
||||
robots_txt_exists = Column(Boolean, default=False)
|
||||
sitemap_exists = Column(Boolean, default=False)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOTechnicalData(title_length={self.title_length}, has_schema={self.has_schema_markup})>"
|
||||
|
||||
class SEOContentData(Base):
|
||||
"""Content analysis data"""
|
||||
__tablename__ = 'seo_content_data'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
analysis_id = Column(Integer, ForeignKey('seo_analyses.id'), nullable=False)
|
||||
|
||||
# Content metrics
|
||||
word_count = Column(Integer, default=0)
|
||||
char_count = Column(Integer, default=0)
|
||||
headings_count = Column(Integer, default=0)
|
||||
h1_count = Column(Integer, default=0)
|
||||
h2_count = Column(Integer, default=0)
|
||||
|
||||
# Media
|
||||
images_count = Column(Integer, default=0)
|
||||
images_with_alt = Column(Integer, default=0)
|
||||
images_without_alt = Column(Integer, default=0)
|
||||
|
||||
# Links
|
||||
internal_links_count = Column(Integer, default=0)
|
||||
external_links_count = Column(Integer, default=0)
|
||||
|
||||
# Quality metrics
|
||||
readability_score = Column(Float, nullable=True)
|
||||
spelling_errors = Column(Integer, default=0)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOContentData(word_count={self.word_count}, readability={self.readability_score})>"
|
||||
|
||||
class SEOPerformanceData(Base):
|
||||
"""Performance analysis data"""
|
||||
__tablename__ = 'seo_performance_data'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
analysis_id = Column(Integer, ForeignKey('seo_analyses.id'), nullable=False)
|
||||
|
||||
# Load time
|
||||
load_time = Column(Float, nullable=True)
|
||||
|
||||
# Compression
|
||||
is_compressed = Column(Boolean, default=False)
|
||||
compression_type = Column(String(50), nullable=True) # gzip, br, etc.
|
||||
|
||||
# Caching
|
||||
has_cache_headers = Column(Boolean, default=False)
|
||||
cache_control = Column(String(200), nullable=True)
|
||||
|
||||
# HTTP headers
|
||||
content_encoding = Column(String(100), nullable=True)
|
||||
server_info = Column(String(200), nullable=True)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOPerformanceData(load_time={self.load_time}, compressed={self.is_compressed})>"
|
||||
|
||||
class SEOAccessibilityData(Base):
|
||||
"""Accessibility analysis data"""
|
||||
__tablename__ = 'seo_accessibility_data'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
analysis_id = Column(Integer, ForeignKey('seo_analyses.id'), nullable=False)
|
||||
|
||||
# Alt text
|
||||
images_with_alt = Column(Integer, default=0)
|
||||
images_without_alt = Column(Integer, default=0)
|
||||
alt_text_ratio = Column(Float, nullable=True)
|
||||
|
||||
# Forms
|
||||
form_fields_count = Column(Integer, default=0)
|
||||
labeled_fields_count = Column(Integer, default=0)
|
||||
label_ratio = Column(Float, nullable=True)
|
||||
|
||||
# ARIA
|
||||
aria_elements_count = Column(Integer, default=0)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOAccessibilityData(alt_ratio={self.alt_text_ratio}, aria_count={self.aria_elements_count})>"
|
||||
|
||||
class SEOUserExperienceData(Base):
|
||||
"""User experience analysis data"""
|
||||
__tablename__ = 'seo_user_experience_data'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
analysis_id = Column(Integer, ForeignKey('seo_analyses.id'), nullable=False)
|
||||
|
||||
# Mobile
|
||||
is_mobile_friendly = Column(Boolean, default=False)
|
||||
has_viewport = Column(Boolean, default=False)
|
||||
|
||||
# CTAs
|
||||
ctas_found = Column(JSON, nullable=True) # List of found CTAs
|
||||
cta_count = Column(Integer, default=0)
|
||||
|
||||
# Navigation
|
||||
has_navigation = Column(Boolean, default=False)
|
||||
nav_elements_count = Column(Integer, default=0)
|
||||
|
||||
# Contact info
|
||||
has_contact_info = Column(Boolean, default=False)
|
||||
|
||||
# Social media
|
||||
social_links_count = Column(Integer, default=0)
|
||||
social_links = Column(JSON, nullable=True)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOUserExperienceData(mobile_friendly={self.is_mobile_friendly}, cta_count={self.cta_count})>"
|
||||
|
||||
class SEOSecurityData(Base):
|
||||
"""Security headers analysis data"""
|
||||
__tablename__ = 'seo_security_data'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
analysis_id = Column(Integer, ForeignKey('seo_analyses.id'), nullable=False)
|
||||
|
||||
# Security headers
|
||||
has_x_frame_options = Column(Boolean, default=False)
|
||||
has_x_content_type_options = Column(Boolean, default=False)
|
||||
has_x_xss_protection = Column(Boolean, default=False)
|
||||
has_strict_transport_security = Column(Boolean, default=False)
|
||||
has_content_security_policy = Column(Boolean, default=False)
|
||||
has_referrer_policy = Column(Boolean, default=False)
|
||||
|
||||
# HTTPS
|
||||
is_https = Column(Boolean, default=False)
|
||||
|
||||
# Total security score
|
||||
security_score = Column(Integer, default=0)
|
||||
present_headers = Column(JSON, nullable=True)
|
||||
missing_headers = Column(JSON, nullable=True)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<SEOSecurityData(score={self.security_score}, https={self.is_https})>"
|
||||
|
||||
# Helper functions for data conversion
|
||||
def create_analysis_from_result(result: 'SEOAnalysisResult') -> SEOAnalysis:
|
||||
"""Create SEOAnalysis record from analysis result"""
|
||||
return SEOAnalysis(
|
||||
url=result.url,
|
||||
overall_score=result.overall_score,
|
||||
health_status=result.health_status,
|
||||
timestamp=result.timestamp,
|
||||
analysis_data=result.data
|
||||
)
|
||||
|
||||
def create_issues_from_result(analysis_id: int, result: 'SEOAnalysisResult') -> List[SEOIssue]:
|
||||
"""Create SEOIssue records from analysis result"""
|
||||
issues = []
|
||||
for issue_data in result.critical_issues:
|
||||
# Handle both string and dictionary formats
|
||||
if isinstance(issue_data, dict):
|
||||
issue_text = issue_data.get('message', str(issue_data))
|
||||
category = issue_data.get('category', extract_category_from_text(issue_text))
|
||||
else:
|
||||
issue_text = str(issue_data)
|
||||
category = extract_category_from_text(issue_text)
|
||||
|
||||
issues.append(SEOIssue(
|
||||
analysis_id=analysis_id,
|
||||
issue_text=issue_text,
|
||||
category=category,
|
||||
priority='critical'
|
||||
))
|
||||
return issues
|
||||
|
||||
def create_warnings_from_result(analysis_id: int, result: 'SEOAnalysisResult') -> List[SEOWarning]:
|
||||
"""Create SEOWarning records from analysis result"""
|
||||
warnings = []
|
||||
for warning_data in result.warnings:
|
||||
# Handle both string and dictionary formats
|
||||
if isinstance(warning_data, dict):
|
||||
warning_text = warning_data.get('message', str(warning_data))
|
||||
category = warning_data.get('category', extract_category_from_text(warning_text))
|
||||
else:
|
||||
warning_text = str(warning_data)
|
||||
category = extract_category_from_text(warning_text)
|
||||
|
||||
warnings.append(SEOWarning(
|
||||
analysis_id=analysis_id,
|
||||
warning_text=warning_text,
|
||||
category=category,
|
||||
priority='medium'
|
||||
))
|
||||
return warnings
|
||||
|
||||
def create_recommendations_from_result(analysis_id: int, result: 'SEOAnalysisResult') -> List[SEORecommendation]:
|
||||
"""Create SEORecommendation records from analysis result"""
|
||||
recommendations = []
|
||||
for rec_data in result.recommendations:
|
||||
# Handle both string and dictionary formats
|
||||
if isinstance(rec_data, dict):
|
||||
rec_text = rec_data.get('message', str(rec_data))
|
||||
category = rec_data.get('category', extract_category_from_text(rec_text))
|
||||
else:
|
||||
rec_text = str(rec_data)
|
||||
category = extract_category_from_text(rec_text)
|
||||
|
||||
recommendations.append(SEORecommendation(
|
||||
analysis_id=analysis_id,
|
||||
recommendation_text=rec_text,
|
||||
category=category,
|
||||
difficulty='medium',
|
||||
estimated_impact='medium'
|
||||
))
|
||||
return recommendations
|
||||
|
||||
def create_category_scores_from_result(analysis_id: int, result: 'SEOAnalysisResult') -> List[SEOCategoryScore]:
|
||||
"""Create SEOCategoryScore records from analysis result"""
|
||||
scores = []
|
||||
for category, data in result.data.items():
|
||||
if isinstance(data, dict) and 'score' in data:
|
||||
scores.append(SEOCategoryScore(
|
||||
analysis_id=analysis_id,
|
||||
category=category,
|
||||
score=data['score'],
|
||||
max_score=100,
|
||||
details=data
|
||||
))
|
||||
return scores
|
||||
|
||||
def extract_category_from_text(text: str) -> str:
|
||||
"""Extract category from issue/warning/recommendation text"""
|
||||
text_lower = text.lower()
|
||||
|
||||
if any(word in text_lower for word in ['title', 'meta', 'description']):
|
||||
return 'meta_data'
|
||||
elif any(word in text_lower for word in ['https', 'url', 'security']):
|
||||
return 'url_structure'
|
||||
elif any(word in text_lower for word in ['content', 'word', 'heading', 'image']):
|
||||
return 'content_analysis'
|
||||
elif any(word in text_lower for word in ['schema', 'canonical', 'technical']):
|
||||
return 'technical_seo'
|
||||
elif any(word in text_lower for word in ['speed', 'load', 'performance']):
|
||||
return 'performance'
|
||||
elif any(word in text_lower for word in ['alt', 'accessibility', 'aria']):
|
||||
return 'accessibility'
|
||||
elif any(word in text_lower for word in ['mobile', 'cta', 'navigation']):
|
||||
return 'user_experience'
|
||||
else:
|
||||
return 'general'
|
||||
474
backend/models/stability_models.py
Normal file
474
backend/models/stability_models.py
Normal file
@@ -0,0 +1,474 @@
|
||||
"""Pydantic models for Stability AI API requests and responses."""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional, List, Union, Literal, Tuple
|
||||
from enum import Enum
|
||||
|
||||
|
||||
# ==================== ENUMS ====================
|
||||
|
||||
class OutputFormat(str, Enum):
|
||||
"""Supported output formats for images."""
|
||||
JPEG = "jpeg"
|
||||
PNG = "png"
|
||||
WEBP = "webp"
|
||||
|
||||
|
||||
class AudioOutputFormat(str, Enum):
|
||||
"""Supported output formats for audio."""
|
||||
MP3 = "mp3"
|
||||
WAV = "wav"
|
||||
|
||||
|
||||
class AspectRatio(str, Enum):
|
||||
"""Supported aspect ratios."""
|
||||
RATIO_21_9 = "21:9"
|
||||
RATIO_16_9 = "16:9"
|
||||
RATIO_3_2 = "3:2"
|
||||
RATIO_5_4 = "5:4"
|
||||
RATIO_1_1 = "1:1"
|
||||
RATIO_4_5 = "4:5"
|
||||
RATIO_2_3 = "2:3"
|
||||
RATIO_9_16 = "9:16"
|
||||
RATIO_9_21 = "9:21"
|
||||
|
||||
|
||||
class StylePreset(str, Enum):
|
||||
"""Supported style presets."""
|
||||
ENHANCE = "enhance"
|
||||
ANIME = "anime"
|
||||
PHOTOGRAPHIC = "photographic"
|
||||
DIGITAL_ART = "digital-art"
|
||||
COMIC_BOOK = "comic-book"
|
||||
FANTASY_ART = "fantasy-art"
|
||||
LINE_ART = "line-art"
|
||||
ANALOG_FILM = "analog-film"
|
||||
NEON_PUNK = "neon-punk"
|
||||
ISOMETRIC = "isometric"
|
||||
LOW_POLY = "low-poly"
|
||||
ORIGAMI = "origami"
|
||||
MODELING_COMPOUND = "modeling-compound"
|
||||
CINEMATIC = "cinematic"
|
||||
THREE_D_MODEL = "3d-model"
|
||||
PIXEL_ART = "pixel-art"
|
||||
TILE_TEXTURE = "tile-texture"
|
||||
|
||||
|
||||
class FinishReason(str, Enum):
|
||||
"""Generation finish reasons."""
|
||||
SUCCESS = "SUCCESS"
|
||||
CONTENT_FILTERED = "CONTENT_FILTERED"
|
||||
|
||||
|
||||
class GenerationMode(str, Enum):
|
||||
"""Generation modes for SD3."""
|
||||
TEXT_TO_IMAGE = "text-to-image"
|
||||
IMAGE_TO_IMAGE = "image-to-image"
|
||||
|
||||
|
||||
class SD3Model(str, Enum):
|
||||
"""SD3 model variants."""
|
||||
SD3_5_LARGE = "sd3.5-large"
|
||||
SD3_5_LARGE_TURBO = "sd3.5-large-turbo"
|
||||
SD3_5_MEDIUM = "sd3.5-medium"
|
||||
|
||||
|
||||
class AudioModel(str, Enum):
|
||||
"""Audio model variants."""
|
||||
STABLE_AUDIO_2_5 = "stable-audio-2.5"
|
||||
STABLE_AUDIO_2 = "stable-audio-2"
|
||||
|
||||
|
||||
class TextureResolution(str, Enum):
|
||||
"""Texture resolution for 3D models."""
|
||||
RES_512 = "512"
|
||||
RES_1024 = "1024"
|
||||
RES_2048 = "2048"
|
||||
|
||||
|
||||
class RemeshType(str, Enum):
|
||||
"""Remesh types for 3D models."""
|
||||
NONE = "none"
|
||||
TRIANGLE = "triangle"
|
||||
QUAD = "quad"
|
||||
|
||||
|
||||
class TargetType(str, Enum):
|
||||
"""Target types for 3D mesh simplification."""
|
||||
NONE = "none"
|
||||
VERTEX = "vertex"
|
||||
FACE = "face"
|
||||
|
||||
|
||||
class LightSourceDirection(str, Enum):
|
||||
"""Light source directions."""
|
||||
LEFT = "left"
|
||||
RIGHT = "right"
|
||||
ABOVE = "above"
|
||||
BELOW = "below"
|
||||
|
||||
|
||||
class InpaintMode(str, Enum):
|
||||
"""Inpainting modes."""
|
||||
SEARCH = "search"
|
||||
MASK = "mask"
|
||||
|
||||
|
||||
# ==================== BASE MODELS ====================
|
||||
|
||||
class BaseStabilityRequest(BaseModel):
|
||||
"""Base request model with common fields."""
|
||||
seed: Optional[int] = Field(default=0, ge=0, le=4294967294, description="Random seed for generation")
|
||||
output_format: Optional[OutputFormat] = Field(default=OutputFormat.PNG, description="Output image format")
|
||||
|
||||
|
||||
class BaseImageRequest(BaseStabilityRequest):
|
||||
"""Base request for image operations."""
|
||||
negative_prompt: Optional[str] = Field(default=None, max_length=10000, description="What you do not want to see")
|
||||
|
||||
|
||||
# ==================== GENERATE MODELS ====================
|
||||
|
||||
class StableImageUltraRequest(BaseImageRequest):
|
||||
"""Request model for Stable Image Ultra generation."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000, description="Text prompt for image generation")
|
||||
aspect_ratio: Optional[AspectRatio] = Field(default=AspectRatio.RATIO_1_1, description="Aspect ratio")
|
||||
style_preset: Optional[StylePreset] = Field(default=None, description="Style preset")
|
||||
strength: Optional[float] = Field(default=None, ge=0, le=1, description="Image influence strength (required if image provided)")
|
||||
|
||||
|
||||
class StableImageCoreRequest(BaseImageRequest):
|
||||
"""Request model for Stable Image Core generation."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000, description="Text prompt for image generation")
|
||||
aspect_ratio: Optional[AspectRatio] = Field(default=AspectRatio.RATIO_1_1, description="Aspect ratio")
|
||||
style_preset: Optional[StylePreset] = Field(default=None, description="Style preset")
|
||||
|
||||
|
||||
class StableSD3Request(BaseImageRequest):
|
||||
"""Request model for Stable Diffusion 3.5 generation."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000, description="Text prompt for image generation")
|
||||
mode: Optional[GenerationMode] = Field(default=GenerationMode.TEXT_TO_IMAGE, description="Generation mode")
|
||||
aspect_ratio: Optional[AspectRatio] = Field(default=AspectRatio.RATIO_1_1, description="Aspect ratio (text-to-image only)")
|
||||
model: Optional[SD3Model] = Field(default=SD3Model.SD3_5_LARGE, description="SD3 model variant")
|
||||
strength: Optional[float] = Field(default=None, ge=0, le=1, description="Image influence strength (image-to-image only)")
|
||||
style_preset: Optional[StylePreset] = Field(default=None, description="Style preset")
|
||||
cfg_scale: Optional[float] = Field(default=None, ge=1, le=10, description="CFG scale")
|
||||
|
||||
|
||||
# ==================== EDIT MODELS ====================
|
||||
|
||||
class EraseRequest(BaseStabilityRequest):
|
||||
"""Request model for image erasing."""
|
||||
grow_mask: Optional[float] = Field(default=5, ge=0, le=20, description="Mask edge growth in pixels")
|
||||
|
||||
|
||||
class InpaintRequest(BaseImageRequest):
|
||||
"""Request model for image inpainting."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000, description="Text prompt for inpainting")
|
||||
grow_mask: Optional[float] = Field(default=5, ge=0, le=100, description="Mask edge growth in pixels")
|
||||
style_preset: Optional[StylePreset] = Field(default=None, description="Style preset")
|
||||
|
||||
|
||||
class OutpaintRequest(BaseStabilityRequest):
|
||||
"""Request model for image outpainting."""
|
||||
left: Optional[int] = Field(default=0, ge=0, le=2000, description="Pixels to outpaint left")
|
||||
right: Optional[int] = Field(default=0, ge=0, le=2000, description="Pixels to outpaint right")
|
||||
up: Optional[int] = Field(default=0, ge=0, le=2000, description="Pixels to outpaint up")
|
||||
down: Optional[int] = Field(default=0, ge=0, le=2000, description="Pixels to outpaint down")
|
||||
creativity: Optional[float] = Field(default=0.5, ge=0, le=1, description="Creativity level")
|
||||
prompt: Optional[str] = Field(default="", max_length=10000, description="Text prompt for outpainting")
|
||||
style_preset: Optional[StylePreset] = Field(default=None, description="Style preset")
|
||||
|
||||
|
||||
class SearchAndReplaceRequest(BaseImageRequest):
|
||||
"""Request model for search and replace."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000, description="Text prompt for replacement")
|
||||
search_prompt: str = Field(..., max_length=10000, description="What to search for")
|
||||
grow_mask: Optional[float] = Field(default=3, ge=0, le=20, description="Mask edge growth in pixels")
|
||||
style_preset: Optional[StylePreset] = Field(default=None, description="Style preset")
|
||||
|
||||
|
||||
class SearchAndRecolorRequest(BaseImageRequest):
|
||||
"""Request model for search and recolor."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000, description="Text prompt for recoloring")
|
||||
select_prompt: str = Field(..., max_length=10000, description="What to select for recoloring")
|
||||
grow_mask: Optional[float] = Field(default=3, ge=0, le=20, description="Mask edge growth in pixels")
|
||||
style_preset: Optional[StylePreset] = Field(default=None, description="Style preset")
|
||||
|
||||
|
||||
class RemoveBackgroundRequest(BaseStabilityRequest):
|
||||
"""Request model for background removal."""
|
||||
pass # Only requires image and output_format
|
||||
|
||||
|
||||
class ReplaceBackgroundAndRelightRequest(BaseImageRequest):
|
||||
"""Request model for background replacement and relighting."""
|
||||
subject_image: bytes = Field(..., description="Subject image binary data")
|
||||
background_prompt: Optional[str] = Field(default=None, max_length=10000, description="Background description")
|
||||
foreground_prompt: Optional[str] = Field(default=None, max_length=10000, description="Subject description")
|
||||
preserve_original_subject: Optional[float] = Field(default=0.6, ge=0, le=1, description="Subject preservation")
|
||||
original_background_depth: Optional[float] = Field(default=0.5, ge=0, le=1, description="Background depth matching")
|
||||
keep_original_background: Optional[bool] = Field(default=False, description="Keep original background")
|
||||
light_source_direction: Optional[LightSourceDirection] = Field(default=None, description="Light direction")
|
||||
light_source_strength: Optional[float] = Field(default=0.3, ge=0, le=1, description="Light strength")
|
||||
|
||||
|
||||
# ==================== UPSCALE MODELS ====================
|
||||
|
||||
class FastUpscaleRequest(BaseStabilityRequest):
|
||||
"""Request model for fast upscaling."""
|
||||
pass # Only requires image and output_format
|
||||
|
||||
|
||||
class ConservativeUpscaleRequest(BaseImageRequest):
|
||||
"""Request model for conservative upscaling."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000, description="Text prompt for upscaling")
|
||||
creativity: Optional[float] = Field(default=0.35, ge=0.2, le=0.5, description="Creativity level")
|
||||
|
||||
|
||||
class CreativeUpscaleRequest(BaseImageRequest):
|
||||
"""Request model for creative upscaling."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000, description="Text prompt for upscaling")
|
||||
creativity: Optional[float] = Field(default=0.3, ge=0.1, le=0.5, description="Creativity level")
|
||||
style_preset: Optional[StylePreset] = Field(default=None, description="Style preset")
|
||||
|
||||
|
||||
# ==================== CONTROL MODELS ====================
|
||||
|
||||
class SketchControlRequest(BaseImageRequest):
|
||||
"""Request model for sketch control."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000, description="Text prompt for generation")
|
||||
control_strength: Optional[float] = Field(default=0.7, ge=0, le=1, description="Control strength")
|
||||
style_preset: Optional[StylePreset] = Field(default=None, description="Style preset")
|
||||
|
||||
|
||||
class StructureControlRequest(BaseImageRequest):
|
||||
"""Request model for structure control."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000, description="Text prompt for generation")
|
||||
control_strength: Optional[float] = Field(default=0.7, ge=0, le=1, description="Control strength")
|
||||
style_preset: Optional[StylePreset] = Field(default=None, description="Style preset")
|
||||
|
||||
|
||||
class StyleControlRequest(BaseImageRequest):
|
||||
"""Request model for style control."""
|
||||
prompt: str = Field(..., min_length=1, max_length=10000, description="Text prompt for generation")
|
||||
aspect_ratio: Optional[AspectRatio] = Field(default=AspectRatio.RATIO_1_1, description="Aspect ratio")
|
||||
fidelity: Optional[float] = Field(default=0.5, ge=0, le=1, description="Style fidelity")
|
||||
style_preset: Optional[StylePreset] = Field(default=None, description="Style preset")
|
||||
|
||||
|
||||
class StyleTransferRequest(BaseImageRequest):
|
||||
"""Request model for style transfer."""
|
||||
prompt: Optional[str] = Field(default="", max_length=10000, description="Text prompt for generation")
|
||||
style_strength: Optional[float] = Field(default=1, ge=0, le=1, description="Style strength")
|
||||
composition_fidelity: Optional[float] = Field(default=0.9, ge=0, le=1, description="Composition fidelity")
|
||||
change_strength: Optional[float] = Field(default=0.9, ge=0.1, le=1, description="Change strength")
|
||||
|
||||
|
||||
# ==================== 3D MODELS ====================
|
||||
|
||||
class StableFast3DRequest(BaseStabilityRequest):
|
||||
"""Request model for Stable Fast 3D."""
|
||||
texture_resolution: Optional[TextureResolution] = Field(default=TextureResolution.RES_1024, description="Texture resolution")
|
||||
foreground_ratio: Optional[float] = Field(default=0.85, ge=0.1, le=1, description="Foreground ratio")
|
||||
remesh: Optional[RemeshType] = Field(default=RemeshType.NONE, description="Remesh algorithm")
|
||||
vertex_count: Optional[int] = Field(default=-1, ge=-1, le=20000, description="Target vertex count")
|
||||
|
||||
|
||||
class StablePointAware3DRequest(BaseStabilityRequest):
|
||||
"""Request model for Stable Point Aware 3D."""
|
||||
texture_resolution: Optional[TextureResolution] = Field(default=TextureResolution.RES_1024, description="Texture resolution")
|
||||
foreground_ratio: Optional[float] = Field(default=1.3, ge=1, le=2, description="Foreground ratio")
|
||||
remesh: Optional[RemeshType] = Field(default=RemeshType.NONE, description="Remesh algorithm")
|
||||
target_type: Optional[TargetType] = Field(default=TargetType.NONE, description="Target type")
|
||||
target_count: Optional[int] = Field(default=1000, ge=100, le=20000, description="Target count")
|
||||
guidance_scale: Optional[float] = Field(default=3, ge=1, le=10, description="Guidance scale")
|
||||
|
||||
|
||||
# ==================== AUDIO MODELS ====================
|
||||
|
||||
class TextToAudioRequest(BaseModel):
|
||||
"""Request model for text-to-audio generation."""
|
||||
prompt: str = Field(..., max_length=10000, description="Audio generation prompt")
|
||||
duration: Optional[float] = Field(default=190, ge=1, le=190, description="Duration in seconds")
|
||||
seed: Optional[int] = Field(default=0, ge=0, le=4294967294, description="Random seed")
|
||||
steps: Optional[int] = Field(default=None, description="Sampling steps (model-dependent)")
|
||||
cfg_scale: Optional[float] = Field(default=None, ge=1, le=25, description="CFG scale")
|
||||
model: Optional[AudioModel] = Field(default=AudioModel.STABLE_AUDIO_2, description="Audio model")
|
||||
output_format: Optional[AudioOutputFormat] = Field(default=AudioOutputFormat.MP3, description="Output format")
|
||||
|
||||
|
||||
class AudioToAudioRequest(BaseModel):
|
||||
"""Request model for audio-to-audio generation."""
|
||||
prompt: str = Field(..., max_length=10000, description="Audio generation prompt")
|
||||
duration: Optional[float] = Field(default=190, ge=1, le=190, description="Duration in seconds")
|
||||
seed: Optional[int] = Field(default=0, ge=0, le=4294967294, description="Random seed")
|
||||
steps: Optional[int] = Field(default=None, description="Sampling steps (model-dependent)")
|
||||
cfg_scale: Optional[float] = Field(default=None, ge=1, le=25, description="CFG scale")
|
||||
model: Optional[AudioModel] = Field(default=AudioModel.STABLE_AUDIO_2, description="Audio model")
|
||||
output_format: Optional[AudioOutputFormat] = Field(default=AudioOutputFormat.MP3, description="Output format")
|
||||
strength: Optional[float] = Field(default=1, ge=0, le=1, description="Audio influence strength")
|
||||
|
||||
|
||||
class AudioInpaintRequest(BaseModel):
|
||||
"""Request model for audio inpainting."""
|
||||
prompt: str = Field(..., max_length=10000, description="Audio generation prompt")
|
||||
duration: Optional[float] = Field(default=190, ge=1, le=190, description="Duration in seconds")
|
||||
seed: Optional[int] = Field(default=0, ge=0, le=4294967294, description="Random seed")
|
||||
steps: Optional[int] = Field(default=8, ge=4, le=8, description="Sampling steps")
|
||||
output_format: Optional[AudioOutputFormat] = Field(default=AudioOutputFormat.MP3, description="Output format")
|
||||
mask_start: Optional[float] = Field(default=30, ge=0, le=190, description="Mask start time")
|
||||
mask_end: Optional[float] = Field(default=190, ge=0, le=190, description="Mask end time")
|
||||
|
||||
|
||||
# ==================== RESPONSE MODELS ====================
|
||||
|
||||
class GenerationResponse(BaseModel):
|
||||
"""Response model for generation requests."""
|
||||
id: str = Field(..., description="Generation ID for async operations")
|
||||
|
||||
|
||||
class ImageGenerationResponse(BaseModel):
|
||||
"""Response model for direct image generation."""
|
||||
image: Optional[str] = Field(default=None, description="Base64 encoded image")
|
||||
seed: Optional[int] = Field(default=None, description="Seed used for generation")
|
||||
finish_reason: Optional[FinishReason] = Field(default=None, description="Generation finish reason")
|
||||
|
||||
|
||||
class AudioGenerationResponse(BaseModel):
|
||||
"""Response model for audio generation."""
|
||||
audio: Optional[str] = Field(default=None, description="Base64 encoded audio")
|
||||
seed: Optional[int] = Field(default=None, description="Seed used for generation")
|
||||
finish_reason: Optional[FinishReason] = Field(default=None, description="Generation finish reason")
|
||||
|
||||
|
||||
class GenerationStatusResponse(BaseModel):
|
||||
"""Response model for generation status."""
|
||||
id: str = Field(..., description="Generation ID")
|
||||
status: Literal["in-progress"] = Field(..., description="Generation status")
|
||||
|
||||
|
||||
class ErrorResponse(BaseModel):
|
||||
"""Error response model."""
|
||||
id: str = Field(..., description="Error ID")
|
||||
name: str = Field(..., description="Error name")
|
||||
errors: List[str] = Field(..., description="Error messages")
|
||||
|
||||
|
||||
# ==================== LEGACY V1 MODELS ====================
|
||||
|
||||
class TextPrompt(BaseModel):
|
||||
"""Text prompt for V1 API."""
|
||||
text: str = Field(..., max_length=2000, description="Prompt text")
|
||||
weight: Optional[float] = Field(default=1.0, description="Prompt weight")
|
||||
|
||||
|
||||
class V1TextToImageRequest(BaseModel):
|
||||
"""V1 Text-to-image request."""
|
||||
text_prompts: List[TextPrompt] = Field(..., min_items=1, description="Text prompts")
|
||||
height: Optional[int] = Field(default=512, ge=128, description="Image height")
|
||||
width: Optional[int] = Field(default=512, ge=128, description="Image width")
|
||||
cfg_scale: Optional[float] = Field(default=7, ge=0, le=35, description="CFG scale")
|
||||
samples: Optional[int] = Field(default=1, ge=1, le=10, description="Number of samples")
|
||||
steps: Optional[int] = Field(default=30, ge=10, le=50, description="Diffusion steps")
|
||||
seed: Optional[int] = Field(default=0, ge=0, le=4294967295, description="Random seed")
|
||||
|
||||
|
||||
class V1ImageToImageRequest(BaseModel):
|
||||
"""V1 Image-to-image request."""
|
||||
text_prompts: List[TextPrompt] = Field(..., min_items=1, description="Text prompts")
|
||||
image_strength: Optional[float] = Field(default=0.35, ge=0, le=1, description="Image strength")
|
||||
init_image_mode: Optional[str] = Field(default="IMAGE_STRENGTH", description="Init image mode")
|
||||
cfg_scale: Optional[float] = Field(default=7, ge=0, le=35, description="CFG scale")
|
||||
samples: Optional[int] = Field(default=1, ge=1, le=10, description="Number of samples")
|
||||
steps: Optional[int] = Field(default=30, ge=10, le=50, description="Diffusion steps")
|
||||
seed: Optional[int] = Field(default=0, ge=0, le=4294967295, description="Random seed")
|
||||
|
||||
|
||||
class V1MaskingRequest(BaseModel):
|
||||
"""V1 Masking request."""
|
||||
text_prompts: List[TextPrompt] = Field(..., min_items=1, description="Text prompts")
|
||||
mask_source: str = Field(..., description="Mask source")
|
||||
cfg_scale: Optional[float] = Field(default=7, ge=0, le=35, description="CFG scale")
|
||||
samples: Optional[int] = Field(default=1, ge=1, le=10, description="Number of samples")
|
||||
steps: Optional[int] = Field(default=30, ge=10, le=50, description="Diffusion steps")
|
||||
seed: Optional[int] = Field(default=0, ge=0, le=4294967295, description="Random seed")
|
||||
|
||||
|
||||
class V1GenerationArtifact(BaseModel):
|
||||
"""V1 Generation artifact."""
|
||||
base64: str = Field(..., description="Base64 encoded image")
|
||||
seed: int = Field(..., description="Generation seed")
|
||||
finishReason: str = Field(..., description="Finish reason")
|
||||
|
||||
|
||||
class V1GenerationResponse(BaseModel):
|
||||
"""V1 Generation response."""
|
||||
artifacts: List[V1GenerationArtifact] = Field(..., description="Generated artifacts")
|
||||
|
||||
|
||||
# ==================== USER & ACCOUNT MODELS ====================
|
||||
|
||||
class OrganizationMembership(BaseModel):
|
||||
"""Organization membership details."""
|
||||
id: str = Field(..., description="Organization ID")
|
||||
name: str = Field(..., description="Organization name")
|
||||
role: str = Field(..., description="User role")
|
||||
is_default: bool = Field(..., description="Is default organization")
|
||||
|
||||
|
||||
class AccountResponse(BaseModel):
|
||||
"""Account details response."""
|
||||
id: str = Field(..., description="User ID")
|
||||
email: str = Field(..., description="User email")
|
||||
profile_picture: str = Field(..., description="Profile picture URL")
|
||||
organizations: List[OrganizationMembership] = Field(..., description="Organizations")
|
||||
|
||||
|
||||
class BalanceResponse(BaseModel):
|
||||
"""Balance response."""
|
||||
credits: float = Field(..., description="Credit balance")
|
||||
|
||||
|
||||
class Engine(BaseModel):
|
||||
"""Engine details."""
|
||||
id: str = Field(..., description="Engine ID")
|
||||
name: str = Field(..., description="Engine name")
|
||||
description: str = Field(..., description="Engine description")
|
||||
type: str = Field(..., description="Engine type")
|
||||
|
||||
|
||||
class ListEnginesResponse(BaseModel):
|
||||
"""List engines response."""
|
||||
engines: List[Engine] = Field(..., description="Available engines")
|
||||
|
||||
|
||||
# ==================== MULTIPART FORM MODELS ====================
|
||||
|
||||
class MultipartImageRequest(BaseModel):
|
||||
"""Base multipart request with image."""
|
||||
image: bytes = Field(..., description="Image file binary data")
|
||||
|
||||
|
||||
class MultipartAudioRequest(BaseModel):
|
||||
"""Base multipart request with audio."""
|
||||
audio: bytes = Field(..., description="Audio file binary data")
|
||||
|
||||
|
||||
class MultipartMaskRequest(BaseModel):
|
||||
"""Multipart request with image and mask."""
|
||||
image: bytes = Field(..., description="Image file binary data")
|
||||
mask: Optional[bytes] = Field(default=None, description="Mask file binary data")
|
||||
|
||||
|
||||
class MultipartStyleTransferRequest(BaseModel):
|
||||
"""Multipart request for style transfer."""
|
||||
init_image: bytes = Field(..., description="Initial image binary data")
|
||||
style_image: bytes = Field(..., description="Style image binary data")
|
||||
|
||||
|
||||
class MultipartReplaceBackgroundRequest(BaseModel):
|
||||
"""Multipart request for background replacement."""
|
||||
subject_image: bytes = Field(..., description="Subject image binary data")
|
||||
background_reference: Optional[bytes] = Field(default=None, description="Background reference image")
|
||||
light_reference: Optional[bytes] = Field(default=None, description="Light reference image")
|
||||
354
backend/models/story_models.py
Normal file
354
backend/models/story_models.py
Normal file
@@ -0,0 +1,354 @@
|
||||
"""
|
||||
Story Writer Models
|
||||
|
||||
Pydantic models for story generation API requests and responses.
|
||||
"""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import List, Optional, Dict, Any, Union
|
||||
|
||||
|
||||
class StoryGenerationRequest(BaseModel):
|
||||
"""Request model for story generation."""
|
||||
persona: str = Field(..., description="The persona statement for the author")
|
||||
story_setting: str = Field(..., description="The setting of the story")
|
||||
character_input: str = Field(..., description="The characters in the story")
|
||||
plot_elements: str = Field(..., description="The plot elements of the story")
|
||||
writing_style: str = Field(..., description="The writing style (e.g., Formal, Casual, Poetic, Humorous)")
|
||||
story_tone: str = Field(..., description="The tone of the story (e.g., Dark, Uplifting, Suspenseful, Whimsical)")
|
||||
narrative_pov: str = Field(..., description="The narrative point of view (e.g., First Person, Third Person Limited, Third Person Omniscient)")
|
||||
audience_age_group: str = Field(..., description="The target audience age group (e.g., Children, Young Adults, Adults)")
|
||||
content_rating: str = Field(..., description="The content rating (e.g., G, PG, PG-13, R)")
|
||||
ending_preference: str = Field(..., description="The preferred ending (e.g., Happy, Tragic, Cliffhanger, Twist)")
|
||||
story_length: str = Field(default="Medium", description="Target story length (Short: >1000 words, Medium: >5000 words, Long: >10000 words)")
|
||||
enable_explainer: bool = Field(default=True, description="Enable explainer features")
|
||||
enable_illustration: bool = Field(default=True, description="Enable illustration features")
|
||||
enable_video_narration: bool = Field(default=True, description="Enable story video and narration features")
|
||||
|
||||
# Image generation settings
|
||||
image_provider: Optional[str] = Field(default=None, description="Image generation provider (gemini, huggingface, stability)")
|
||||
image_width: int = Field(default=1024, description="Image width in pixels")
|
||||
image_height: int = Field(default=1024, description="Image height in pixels")
|
||||
image_model: Optional[str] = Field(default=None, description="Image generation model")
|
||||
|
||||
# Video generation settings
|
||||
video_fps: int = Field(default=24, description="Frames per second for video")
|
||||
video_transition_duration: float = Field(default=0.5, description="Duration of transitions between scenes in seconds")
|
||||
|
||||
# Audio generation settings
|
||||
audio_provider: Optional[str] = Field(default="gtts", description="TTS provider (gtts, pyttsx3)")
|
||||
audio_lang: str = Field(default="en", description="Language code for TTS")
|
||||
audio_slow: bool = Field(default=False, description="Whether to speak slowly (gTTS only)")
|
||||
audio_rate: int = Field(default=150, description="Speech rate (pyttsx3 only)")
|
||||
|
||||
|
||||
class StorySetupGenerationRequest(BaseModel):
|
||||
"""Request model for AI story setup generation."""
|
||||
story_idea: str = Field(..., description="Basic story idea or information from the user")
|
||||
|
||||
|
||||
class StorySetupOption(BaseModel):
|
||||
"""A single story setup option."""
|
||||
persona: str = Field(..., description="The persona statement for the author")
|
||||
story_setting: str = Field(..., description="The setting of the story")
|
||||
character_input: str = Field(..., description="The characters in the story")
|
||||
plot_elements: str = Field(..., description="The plot elements of the story")
|
||||
writing_style: str = Field(..., description="The writing style")
|
||||
story_tone: str = Field(..., description="The tone of the story")
|
||||
narrative_pov: str = Field(..., description="The narrative point of view")
|
||||
audience_age_group: str = Field(..., description="The target audience age group")
|
||||
content_rating: str = Field(..., description="The content rating")
|
||||
ending_preference: str = Field(..., description="The preferred ending")
|
||||
story_length: str = Field(default="Medium", description="Target story length (Short: >1000 words, Medium: >5000 words, Long: >10000 words)")
|
||||
premise: str = Field(..., description="The story premise (1-2 sentences)")
|
||||
reasoning: str = Field(..., description="Brief reasoning for this setup option")
|
||||
|
||||
# Image generation settings
|
||||
image_provider: Optional[str] = Field(default=None, description="Image generation provider (gemini, huggingface, stability)")
|
||||
image_width: int = Field(default=1024, description="Image width in pixels")
|
||||
image_height: int = Field(default=1024, description="Image height in pixels")
|
||||
image_model: Optional[str] = Field(default=None, description="Image generation model")
|
||||
|
||||
# Video generation settings
|
||||
video_fps: int = Field(default=24, description="Frames per second for video")
|
||||
video_transition_duration: float = Field(default=0.5, description="Duration of transitions between scenes in seconds")
|
||||
|
||||
# Audio generation settings
|
||||
audio_provider: Optional[str] = Field(default="gtts", description="TTS provider (gtts, pyttsx3)")
|
||||
audio_lang: str = Field(default="en", description="Language code for TTS")
|
||||
audio_slow: bool = Field(default=False, description="Whether to speak slowly (gTTS only)")
|
||||
audio_rate: int = Field(default=150, description="Speech rate (pyttsx3 only)")
|
||||
|
||||
|
||||
class StorySetupGenerationResponse(BaseModel):
|
||||
"""Response model for story setup generation."""
|
||||
options: List[StorySetupOption] = Field(..., description="Three story setup options")
|
||||
success: bool = Field(default=True, description="Whether the generation was successful")
|
||||
|
||||
|
||||
class StoryScene(BaseModel):
|
||||
"""Model for a story scene."""
|
||||
scene_number: int = Field(..., description="Scene number")
|
||||
title: str = Field(..., description="Scene title")
|
||||
description: str = Field(..., description="Scene description")
|
||||
image_prompt: str = Field(..., description="Image prompt for scene visualization")
|
||||
audio_narration: str = Field(..., description="Audio narration text for the scene")
|
||||
character_descriptions: List[str] = Field(default_factory=list, description="Character descriptions in the scene")
|
||||
key_events: List[str] = Field(default_factory=list, description="Key events in the scene")
|
||||
|
||||
|
||||
class StoryStartRequest(StoryGenerationRequest):
|
||||
"""Request model for story start generation."""
|
||||
premise: str = Field(..., description="The story premise")
|
||||
outline: Union[str, List[StoryScene], List[Dict[str, Any]]] = Field(..., description="The story outline (text or structured scenes)")
|
||||
|
||||
|
||||
class StoryPremiseResponse(BaseModel):
|
||||
"""Response model for premise generation."""
|
||||
premise: str = Field(..., description="Generated story premise")
|
||||
success: bool = Field(default=True, description="Whether the generation was successful")
|
||||
task_id: Optional[str] = Field(None, description="Task ID for async operations")
|
||||
|
||||
|
||||
class StoryOutlineResponse(BaseModel):
|
||||
"""Response model for outline generation."""
|
||||
outline: Union[str, List[StoryScene]] = Field(..., description="Generated story outline (text or structured scenes)")
|
||||
success: bool = Field(default=True, description="Whether the generation was successful")
|
||||
task_id: Optional[str] = Field(None, description="Task ID for async operations")
|
||||
is_structured: bool = Field(default=False, description="Whether the outline is structured (scenes) or plain text")
|
||||
|
||||
|
||||
class StoryContentResponse(BaseModel):
|
||||
"""Response model for story content generation."""
|
||||
story: str = Field(..., description="Generated story content")
|
||||
premise: Optional[str] = Field(None, description="Story premise")
|
||||
outline: Optional[str] = Field(None, description="Story outline")
|
||||
is_complete: bool = Field(default=False, description="Whether the story is complete")
|
||||
iterations: int = Field(default=0, description="Number of continuation iterations")
|
||||
success: bool = Field(default=True, description="Whether the generation was successful")
|
||||
task_id: Optional[str] = Field(None, description="Task ID for async operations")
|
||||
|
||||
|
||||
class StoryFullGenerationResponse(BaseModel):
|
||||
"""Response model for full story generation."""
|
||||
premise: str = Field(..., description="Generated story premise")
|
||||
outline: str = Field(..., description="Generated story outline")
|
||||
story: str = Field(..., description="Generated complete story")
|
||||
is_complete: bool = Field(default=False, description="Whether the story is complete")
|
||||
iterations: int = Field(default=0, description="Number of continuation iterations")
|
||||
success: bool = Field(default=True, description="Whether the generation was successful")
|
||||
task_id: Optional[str] = Field(None, description="Task ID for async operations")
|
||||
|
||||
|
||||
class StoryContinueRequest(BaseModel):
|
||||
"""Request model for continuing story generation."""
|
||||
premise: str = Field(..., description="The story premise")
|
||||
outline: Union[str, List[StoryScene], List[Dict[str, Any]]] = Field(..., description="The story outline (text or structured scenes)")
|
||||
story_text: str = Field(..., description="Current story text to continue from")
|
||||
persona: str = Field(..., description="The persona statement for the author")
|
||||
story_setting: str = Field(..., description="The setting of the story")
|
||||
character_input: str = Field(..., description="The characters in the story")
|
||||
plot_elements: str = Field(..., description="The plot elements of the story")
|
||||
writing_style: str = Field(..., description="The writing style")
|
||||
story_tone: str = Field(..., description="The tone of the story")
|
||||
narrative_pov: str = Field(..., description="The narrative point of view")
|
||||
audience_age_group: str = Field(..., description="The target audience age group")
|
||||
content_rating: str = Field(..., description="The content rating")
|
||||
ending_preference: str = Field(..., description="The preferred ending")
|
||||
story_length: str = Field(default="Medium", description="Target story length (Short: >1000 words, Medium: >5000 words, Long: >10000 words)")
|
||||
|
||||
|
||||
class StoryContinueResponse(BaseModel):
|
||||
"""Response model for story continuation."""
|
||||
continuation: str = Field(..., description="Generated story continuation")
|
||||
is_complete: bool = Field(default=False, description="Whether the story is complete (contains IAMDONE)")
|
||||
success: bool = Field(default=True, description="Whether the generation was successful")
|
||||
|
||||
|
||||
class TaskStatus(BaseModel):
|
||||
"""Task status model."""
|
||||
task_id: str = Field(..., description="Task ID")
|
||||
status: str = Field(..., description="Task status (pending, processing, completed, failed)")
|
||||
progress: Optional[float] = Field(None, description="Progress percentage (0-100)")
|
||||
message: Optional[str] = Field(None, description="Progress message")
|
||||
result: Optional[Dict[str, Any]] = Field(None, description="Task result when completed")
|
||||
error: Optional[str] = Field(None, description="Error message if failed")
|
||||
created_at: Optional[str] = Field(None, description="Task creation timestamp")
|
||||
updated_at: Optional[str] = Field(None, description="Task last update timestamp")
|
||||
|
||||
|
||||
class StoryImageGenerationRequest(BaseModel):
|
||||
"""Request model for image generation."""
|
||||
scenes: List[StoryScene] = Field(..., description="List of scenes to generate images for")
|
||||
provider: Optional[str] = Field(None, description="Image generation provider (gemini, huggingface, stability)")
|
||||
width: Optional[int] = Field(default=1024, description="Image width")
|
||||
height: Optional[int] = Field(default=1024, description="Image height")
|
||||
model: Optional[str] = Field(None, description="Image generation model")
|
||||
|
||||
|
||||
class StoryImageResult(BaseModel):
|
||||
"""Model for a generated image result."""
|
||||
scene_number: int = Field(..., description="Scene number")
|
||||
scene_title: str = Field(..., description="Scene title")
|
||||
image_filename: str = Field(..., description="Image filename")
|
||||
image_url: str = Field(..., description="Image URL")
|
||||
width: int = Field(..., description="Image width")
|
||||
height: int = Field(..., description="Image height")
|
||||
provider: str = Field(..., description="Image generation provider")
|
||||
model: Optional[str] = Field(None, description="Image generation model")
|
||||
seed: Optional[int] = Field(None, description="Image generation seed")
|
||||
error: Optional[str] = Field(None, description="Error message if generation failed")
|
||||
|
||||
|
||||
class StoryImageGenerationResponse(BaseModel):
|
||||
"""Response model for image generation."""
|
||||
images: List[StoryImageResult] = Field(..., description="List of generated images")
|
||||
success: bool = Field(default=True, description="Whether the generation was successful")
|
||||
task_id: Optional[str] = Field(None, description="Task ID for async operations")
|
||||
|
||||
|
||||
class RegenerateImageRequest(BaseModel):
|
||||
"""Request model for regenerating a single scene image with a direct prompt."""
|
||||
scene_number: int = Field(..., description="Scene number to regenerate image for")
|
||||
scene_title: str = Field(..., description="Scene title")
|
||||
prompt: str = Field(..., description="Direct prompt to use for image generation (no AI prompt generation)")
|
||||
provider: Optional[str] = Field(None, description="Image generation provider (gemini, huggingface, stability)")
|
||||
width: Optional[int] = Field(1024, description="Image width")
|
||||
height: Optional[int] = Field(1024, description="Image height")
|
||||
model: Optional[str] = Field(None, description="Model to use for image generation")
|
||||
|
||||
|
||||
class RegenerateImageResponse(BaseModel):
|
||||
"""Response model for regenerated image."""
|
||||
scene_number: int = Field(..., description="Scene number")
|
||||
scene_title: str = Field(..., description="Scene title")
|
||||
image_filename: str = Field(..., description="Generated image filename")
|
||||
image_url: str = Field(..., description="Image URL")
|
||||
width: int = Field(..., description="Image width")
|
||||
height: int = Field(..., description="Image height")
|
||||
provider: str = Field(..., description="Provider used")
|
||||
model: Optional[str] = Field(None, description="Model used")
|
||||
seed: Optional[int] = Field(None, description="Seed used")
|
||||
success: bool = Field(default=True, description="Whether the generation was successful")
|
||||
error: Optional[str] = Field(None, description="Error message if generation failed")
|
||||
|
||||
|
||||
class StoryAudioGenerationRequest(BaseModel):
|
||||
"""Request model for audio generation."""
|
||||
scenes: List[StoryScene] = Field(..., description="List of scenes to generate audio for")
|
||||
provider: Optional[str] = Field(default="gtts", description="TTS provider (gtts, pyttsx3)")
|
||||
lang: Optional[str] = Field(default="en", description="Language code for TTS")
|
||||
slow: Optional[bool] = Field(default=False, description="Whether to speak slowly (gTTS only)")
|
||||
rate: Optional[int] = Field(default=150, description="Speech rate (pyttsx3 only)")
|
||||
|
||||
|
||||
class StoryAudioResult(BaseModel):
|
||||
"""Model for a generated audio result."""
|
||||
scene_number: int = Field(..., description="Scene number")
|
||||
scene_title: str = Field(..., description="Scene title")
|
||||
audio_filename: str = Field(..., description="Audio filename")
|
||||
audio_url: str = Field(..., description="Audio URL")
|
||||
provider: str = Field(..., description="TTS provider")
|
||||
file_size: int = Field(..., description="Audio file size in bytes")
|
||||
error: Optional[str] = Field(None, description="Error message if generation failed")
|
||||
|
||||
|
||||
class StoryAudioGenerationResponse(BaseModel):
|
||||
"""Response model for audio generation."""
|
||||
audio_files: List[StoryAudioResult] = Field(..., description="List of generated audio files")
|
||||
success: bool = Field(default=True, description="Whether the generation was successful")
|
||||
task_id: Optional[str] = Field(None, description="Task ID for async operations")
|
||||
|
||||
|
||||
class GenerateAIAudioRequest(BaseModel):
|
||||
"""Request model for generating AI audio for a single scene."""
|
||||
scene_number: int = Field(..., description="Scene number to generate audio for")
|
||||
scene_title: str = Field(..., description="Scene title")
|
||||
text: str = Field(..., description="Text to convert to speech")
|
||||
voice_id: Optional[str] = Field("Wise_Woman", description="Voice ID for AI audio generation")
|
||||
speed: Optional[float] = Field(1.0, description="Speech speed (0.5-2.0)")
|
||||
volume: Optional[float] = Field(1.0, description="Speech volume (0.1-10.0)")
|
||||
pitch: Optional[float] = Field(0.0, description="Speech pitch (-12 to 12)")
|
||||
emotion: Optional[str] = Field("happy", description="Emotion for speech")
|
||||
|
||||
|
||||
class GenerateAIAudioResponse(BaseModel):
|
||||
"""Response model for AI audio generation."""
|
||||
scene_number: int = Field(..., description="Scene number")
|
||||
scene_title: str = Field(..., description="Scene title")
|
||||
audio_filename: str = Field(..., description="Generated audio filename")
|
||||
audio_url: str = Field(..., description="Audio URL")
|
||||
provider: str = Field(..., description="Provider used (wavespeed)")
|
||||
model: str = Field(..., description="Model used (minimax/speech-02-hd)")
|
||||
voice_id: str = Field(..., description="Voice ID used")
|
||||
text_length: int = Field(..., description="Number of characters in text")
|
||||
file_size: int = Field(..., description="Audio file size in bytes")
|
||||
cost: float = Field(..., description="Cost of generation")
|
||||
success: bool = Field(default=True, description="Whether the generation was successful")
|
||||
error: Optional[str] = Field(None, description="Error message if generation failed")
|
||||
|
||||
|
||||
class StoryVideoGenerationRequest(BaseModel):
|
||||
"""Request model for video generation."""
|
||||
scenes: List[StoryScene] = Field(..., description="List of scenes to generate video for")
|
||||
image_urls: List[str] = Field(..., description="List of image URLs for each scene")
|
||||
audio_urls: List[str] = Field(..., description="List of audio URLs for each scene")
|
||||
video_urls: Optional[List[Optional[str]]] = Field(None, description="Optional list of animated video URLs (preferred over images)")
|
||||
ai_audio_urls: Optional[List[Optional[str]]] = Field(None, description="Optional list of AI audio URLs (preferred over free audio)")
|
||||
story_title: Optional[str] = Field(default="Story", description="Title of the story")
|
||||
fps: Optional[int] = Field(default=24, description="Frames per second for video")
|
||||
transition_duration: Optional[float] = Field(default=0.5, description="Duration of transitions between scenes")
|
||||
|
||||
|
||||
class StoryVideoResult(BaseModel):
|
||||
"""Model for a generated video result."""
|
||||
video_filename: str = Field(..., description="Video filename")
|
||||
video_url: str = Field(..., description="Video URL")
|
||||
duration: float = Field(..., description="Video duration in seconds")
|
||||
fps: int = Field(..., description="Frames per second")
|
||||
file_size: int = Field(..., description="Video file size in bytes")
|
||||
num_scenes: int = Field(..., description="Number of scenes in the video")
|
||||
error: Optional[str] = Field(None, description="Error message if generation failed")
|
||||
|
||||
|
||||
class StoryVideoGenerationResponse(BaseModel):
|
||||
"""Response model for video generation."""
|
||||
video: StoryVideoResult = Field(..., description="Generated video")
|
||||
success: bool = Field(default=True, description="Whether the generation was successful")
|
||||
task_id: Optional[str] = Field(None, description="Task ID for async operations")
|
||||
|
||||
|
||||
class AnimateSceneRequest(BaseModel):
|
||||
"""Request model for per-scene animation preview."""
|
||||
scene_number: int = Field(..., description="Scene number to animate")
|
||||
scene_data: Dict[str, Any] = Field(..., description="Scene data payload")
|
||||
story_context: Dict[str, Any] = Field(..., description="Story-wide context used for prompts")
|
||||
image_url: str = Field(..., description="Relative URL to the generated scene image")
|
||||
duration: int = Field(default=5, description="Animation duration (5 or 10 seconds)")
|
||||
|
||||
|
||||
class AnimateSceneVoiceoverRequest(AnimateSceneRequest):
|
||||
"""Request model for WaveSpeed InfiniteTalk animation."""
|
||||
audio_url: str = Field(..., description="Relative URL to the generated scene audio")
|
||||
resolution: Optional[str] = Field("720p", description="Output resolution ('480p' or '720p')")
|
||||
prompt: Optional[str] = Field(None, description="Optional positive prompt override")
|
||||
|
||||
|
||||
class AnimateSceneResponse(BaseModel):
|
||||
"""Response model for scene animation preview."""
|
||||
success: bool = Field(default=True, description="Whether the animation succeeded")
|
||||
scene_number: int = Field(..., description="Scene number animated")
|
||||
video_filename: str = Field(..., description="Stored video filename")
|
||||
video_url: str = Field(..., description="API URL to access the animated video")
|
||||
duration: int = Field(..., description="Duration of the animation")
|
||||
cost: float = Field(..., description="Cost billed for the animation")
|
||||
prompt_used: str = Field(..., description="Animation prompt passed to the model")
|
||||
provider: str = Field(default="wavespeed", description="Underlying provider used")
|
||||
prediction_id: Optional[str] = Field(None, description="WaveSpeed prediction ID for resume operations")
|
||||
|
||||
|
||||
class ResumeSceneAnimationRequest(BaseModel):
|
||||
"""Request model to resume scene animation download."""
|
||||
prediction_id: str = Field(..., description="WaveSpeed prediction ID to resume from")
|
||||
scene_number: int = Field(..., description="Scene number being resumed")
|
||||
duration: int = Field(default=5, description="Animation duration (5 or 10 seconds)")
|
||||
388
backend/models/subscription_models.py
Normal file
388
backend/models/subscription_models.py
Normal file
@@ -0,0 +1,388 @@
|
||||
"""
|
||||
Subscription and Usage Tracking Models
|
||||
Comprehensive models for usage-based subscription system with API cost tracking.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, DateTime, Float, Boolean, JSON, Text, ForeignKey, Enum
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime, timedelta
|
||||
import enum
|
||||
from typing import Dict, Any, Optional
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
class SubscriptionTier(enum.Enum):
|
||||
FREE = "free"
|
||||
BASIC = "basic"
|
||||
PRO = "pro"
|
||||
ENTERPRISE = "enterprise"
|
||||
|
||||
class UsageStatus(enum.Enum):
|
||||
ACTIVE = "active"
|
||||
WARNING = "warning" # 80% usage
|
||||
LIMIT_REACHED = "limit_reached" # 100% usage
|
||||
SUSPENDED = "suspended"
|
||||
|
||||
class APIProvider(enum.Enum):
|
||||
GEMINI = "gemini"
|
||||
OPENAI = "openai"
|
||||
ANTHROPIC = "anthropic"
|
||||
MISTRAL = "mistral"
|
||||
TAVILY = "tavily"
|
||||
SERPER = "serper"
|
||||
METAPHOR = "metaphor"
|
||||
FIRECRAWL = "firecrawl"
|
||||
STABILITY = "stability"
|
||||
EXA = "exa"
|
||||
VIDEO = "video"
|
||||
IMAGE_EDIT = "image_edit"
|
||||
AUDIO = "audio"
|
||||
|
||||
class BillingCycle(enum.Enum):
|
||||
MONTHLY = "monthly"
|
||||
YEARLY = "yearly"
|
||||
|
||||
class SubscriptionPlan(Base):
|
||||
"""Defines subscription tiers and their limits."""
|
||||
|
||||
__tablename__ = "subscription_plans"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
name = Column(String(50), nullable=False, unique=True)
|
||||
tier = Column(Enum(SubscriptionTier), nullable=False)
|
||||
price_monthly = Column(Float, nullable=False, default=0.0)
|
||||
price_yearly = Column(Float, nullable=False, default=0.0)
|
||||
|
||||
# Unified AI Text Generation Call Limit (applies to all LLM providers: gemini, openai, anthropic, mistral)
|
||||
# Note: This column may not exist in older databases - use getattr() when accessing
|
||||
ai_text_generation_calls_limit = Column(Integer, default=0, nullable=True) # 0 = unlimited, None if column doesn't exist
|
||||
|
||||
# Legacy per-provider limits (kept for backwards compatibility and analytics)
|
||||
gemini_calls_limit = Column(Integer, default=0) # 0 = unlimited (deprecated, use ai_text_generation_calls_limit)
|
||||
openai_calls_limit = Column(Integer, default=0) # (deprecated, use ai_text_generation_calls_limit)
|
||||
anthropic_calls_limit = Column(Integer, default=0) # (deprecated, use ai_text_generation_calls_limit)
|
||||
mistral_calls_limit = Column(Integer, default=0) # (deprecated, use ai_text_generation_calls_limit)
|
||||
|
||||
# Other API Call Limits (non-LLM)
|
||||
tavily_calls_limit = Column(Integer, default=0)
|
||||
serper_calls_limit = Column(Integer, default=0)
|
||||
metaphor_calls_limit = Column(Integer, default=0)
|
||||
firecrawl_calls_limit = Column(Integer, default=0)
|
||||
stability_calls_limit = Column(Integer, default=0) # Image generation
|
||||
exa_calls_limit = Column(Integer, default=0) # Exa neural search
|
||||
video_calls_limit = Column(Integer, default=0) # AI video generation
|
||||
image_edit_calls_limit = Column(Integer, default=0) # AI image editing
|
||||
audio_calls_limit = Column(Integer, default=0) # AI audio generation (text-to-speech)
|
||||
|
||||
# Token Limits (for LLM providers)
|
||||
gemini_tokens_limit = Column(Integer, default=0)
|
||||
openai_tokens_limit = Column(Integer, default=0)
|
||||
anthropic_tokens_limit = Column(Integer, default=0)
|
||||
mistral_tokens_limit = Column(Integer, default=0)
|
||||
|
||||
# Cost Limits (in USD)
|
||||
monthly_cost_limit = Column(Float, default=0.0) # 0 = unlimited
|
||||
|
||||
# Features
|
||||
features = Column(JSON, nullable=True) # JSON list of enabled features
|
||||
|
||||
# Metadata
|
||||
description = Column(Text, nullable=True)
|
||||
is_active = Column(Boolean, default=True)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
class UserSubscription(Base):
|
||||
"""User's current subscription and billing information."""
|
||||
|
||||
__tablename__ = "user_subscriptions"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(String(100), nullable=False, unique=True)
|
||||
plan_id = Column(Integer, ForeignKey('subscription_plans.id'), nullable=False)
|
||||
|
||||
# Billing
|
||||
billing_cycle = Column(Enum(BillingCycle), default=BillingCycle.MONTHLY)
|
||||
current_period_start = Column(DateTime, nullable=False)
|
||||
current_period_end = Column(DateTime, nullable=False)
|
||||
|
||||
# Status
|
||||
status = Column(Enum(UsageStatus), default=UsageStatus.ACTIVE)
|
||||
is_active = Column(Boolean, default=True)
|
||||
auto_renew = Column(Boolean, default=True)
|
||||
|
||||
# Payment
|
||||
stripe_customer_id = Column(String(100), nullable=True)
|
||||
stripe_subscription_id = Column(String(100), nullable=True)
|
||||
payment_method = Column(String(50), nullable=True)
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
plan = relationship("SubscriptionPlan")
|
||||
|
||||
class APIUsageLog(Base):
|
||||
"""Detailed log of every API call for billing and monitoring."""
|
||||
|
||||
__tablename__ = "api_usage_logs"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(String(100), nullable=False)
|
||||
|
||||
# API Details
|
||||
provider = Column(Enum(APIProvider), nullable=False)
|
||||
endpoint = Column(String(200), nullable=False)
|
||||
method = Column(String(10), nullable=False)
|
||||
model_used = Column(String(100), nullable=True) # e.g., "gemini-2.5-flash"
|
||||
|
||||
# Usage Metrics
|
||||
tokens_input = Column(Integer, default=0)
|
||||
tokens_output = Column(Integer, default=0)
|
||||
tokens_total = Column(Integer, default=0)
|
||||
|
||||
# Cost Calculation
|
||||
cost_input = Column(Float, default=0.0) # Cost for input tokens
|
||||
cost_output = Column(Float, default=0.0) # Cost for output tokens
|
||||
cost_total = Column(Float, default=0.0) # Total cost for this call
|
||||
|
||||
# Performance
|
||||
response_time = Column(Float, nullable=False) # Response time in seconds
|
||||
status_code = Column(Integer, nullable=False)
|
||||
|
||||
# Request Details
|
||||
request_size = Column(Integer, nullable=True) # Request size in bytes
|
||||
response_size = Column(Integer, nullable=True) # Response size in bytes
|
||||
user_agent = Column(String(500), nullable=True)
|
||||
ip_address = Column(String(45), nullable=True)
|
||||
|
||||
# Error Handling
|
||||
error_message = Column(Text, nullable=True)
|
||||
retry_count = Column(Integer, default=0)
|
||||
|
||||
# Metadata
|
||||
timestamp = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
billing_period = Column(String(20), nullable=False) # e.g., "2025-01"
|
||||
|
||||
# Indexes for performance
|
||||
__table_args__ = (
|
||||
{'mysql_engine': 'InnoDB'},
|
||||
)
|
||||
|
||||
class UsageSummary(Base):
|
||||
"""Aggregated usage statistics per user per billing period."""
|
||||
|
||||
__tablename__ = "usage_summaries"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(String(100), nullable=False)
|
||||
billing_period = Column(String(20), nullable=False) # e.g., "2025-01"
|
||||
|
||||
# API Call Counts
|
||||
gemini_calls = Column(Integer, default=0)
|
||||
openai_calls = Column(Integer, default=0)
|
||||
anthropic_calls = Column(Integer, default=0)
|
||||
mistral_calls = Column(Integer, default=0)
|
||||
tavily_calls = Column(Integer, default=0)
|
||||
serper_calls = Column(Integer, default=0)
|
||||
metaphor_calls = Column(Integer, default=0)
|
||||
firecrawl_calls = Column(Integer, default=0)
|
||||
stability_calls = Column(Integer, default=0)
|
||||
exa_calls = Column(Integer, default=0)
|
||||
video_calls = Column(Integer, default=0) # AI video generation
|
||||
image_edit_calls = Column(Integer, default=0) # AI image editing
|
||||
audio_calls = Column(Integer, default=0) # AI audio generation (text-to-speech)
|
||||
|
||||
# Token Usage
|
||||
gemini_tokens = Column(Integer, default=0)
|
||||
openai_tokens = Column(Integer, default=0)
|
||||
anthropic_tokens = Column(Integer, default=0)
|
||||
mistral_tokens = Column(Integer, default=0)
|
||||
|
||||
# Cost Tracking
|
||||
gemini_cost = Column(Float, default=0.0)
|
||||
openai_cost = Column(Float, default=0.0)
|
||||
anthropic_cost = Column(Float, default=0.0)
|
||||
mistral_cost = Column(Float, default=0.0)
|
||||
tavily_cost = Column(Float, default=0.0)
|
||||
serper_cost = Column(Float, default=0.0)
|
||||
metaphor_cost = Column(Float, default=0.0)
|
||||
firecrawl_cost = Column(Float, default=0.0)
|
||||
stability_cost = Column(Float, default=0.0)
|
||||
exa_cost = Column(Float, default=0.0)
|
||||
video_cost = Column(Float, default=0.0) # AI video generation
|
||||
image_edit_cost = Column(Float, default=0.0) # AI image editing
|
||||
audio_cost = Column(Float, default=0.0) # AI audio generation (text-to-speech)
|
||||
|
||||
# Totals
|
||||
total_calls = Column(Integer, default=0)
|
||||
total_tokens = Column(Integer, default=0)
|
||||
total_cost = Column(Float, default=0.0)
|
||||
|
||||
# Performance Metrics
|
||||
avg_response_time = Column(Float, default=0.0)
|
||||
error_rate = Column(Float, default=0.0) # Percentage
|
||||
|
||||
# Status
|
||||
usage_status = Column(Enum(UsageStatus), default=UsageStatus.ACTIVE)
|
||||
warnings_sent = Column(Integer, default=0) # Number of warning emails sent
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Unique constraint on user_id and billing_period
|
||||
__table_args__ = (
|
||||
{'mysql_engine': 'InnoDB'},
|
||||
)
|
||||
|
||||
class APIProviderPricing(Base):
|
||||
"""Pricing configuration for different API providers."""
|
||||
|
||||
__tablename__ = "api_provider_pricing"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
provider = Column(Enum(APIProvider), nullable=False)
|
||||
model_name = Column(String(100), nullable=False)
|
||||
|
||||
# Pricing per token (in USD)
|
||||
cost_per_input_token = Column(Float, default=0.0)
|
||||
cost_per_output_token = Column(Float, default=0.0)
|
||||
cost_per_request = Column(Float, default=0.0) # Fixed cost per API call
|
||||
|
||||
# Pricing per unit for non-LLM APIs
|
||||
cost_per_search = Column(Float, default=0.0) # For search APIs
|
||||
cost_per_image = Column(Float, default=0.0) # For image generation
|
||||
cost_per_page = Column(Float, default=0.0) # For web crawling
|
||||
|
||||
# Token conversion (tokens per unit)
|
||||
tokens_per_word = Column(Float, default=1.3) # Approximate tokens per word
|
||||
tokens_per_character = Column(Float, default=0.25) # Approximate tokens per character
|
||||
|
||||
# Metadata
|
||||
description = Column(Text, nullable=True)
|
||||
is_active = Column(Boolean, default=True)
|
||||
effective_date = Column(DateTime, default=datetime.utcnow)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Unique constraint on provider and model
|
||||
__table_args__ = (
|
||||
{'mysql_engine': 'InnoDB'},
|
||||
)
|
||||
|
||||
class UsageAlert(Base):
|
||||
"""Usage alerts and notifications."""
|
||||
|
||||
__tablename__ = "usage_alerts"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(String(100), nullable=False)
|
||||
|
||||
# Alert Details
|
||||
alert_type = Column(String(50), nullable=False) # "usage_warning", "limit_reached", "cost_warning"
|
||||
threshold_percentage = Column(Integer, nullable=False) # 80, 90, 100
|
||||
provider = Column(Enum(APIProvider), nullable=True) # Specific provider or None for overall
|
||||
|
||||
# Alert Content
|
||||
title = Column(String(200), nullable=False)
|
||||
message = Column(Text, nullable=False)
|
||||
severity = Column(String(20), default="info") # "info", "warning", "error"
|
||||
|
||||
# Status
|
||||
is_sent = Column(Boolean, default=False)
|
||||
sent_at = Column(DateTime, nullable=True)
|
||||
is_read = Column(Boolean, default=False)
|
||||
read_at = Column(DateTime, nullable=True)
|
||||
|
||||
# Metadata
|
||||
billing_period = Column(String(20), nullable=False)
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
class BillingHistory(Base):
|
||||
"""Historical billing records."""
|
||||
|
||||
__tablename__ = "billing_history"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(String(100), nullable=False)
|
||||
|
||||
# Billing Period
|
||||
billing_period = Column(String(20), nullable=False) # e.g., "2025-01"
|
||||
period_start = Column(DateTime, nullable=False)
|
||||
period_end = Column(DateTime, nullable=False)
|
||||
|
||||
# Subscription
|
||||
plan_name = Column(String(50), nullable=False)
|
||||
base_cost = Column(Float, default=0.0)
|
||||
|
||||
# Usage Costs
|
||||
usage_cost = Column(Float, default=0.0)
|
||||
overage_cost = Column(Float, default=0.0)
|
||||
total_cost = Column(Float, default=0.0)
|
||||
|
||||
# Payment
|
||||
payment_status = Column(String(20), default="pending") # "pending", "paid", "failed"
|
||||
payment_date = Column(DateTime, nullable=True)
|
||||
stripe_invoice_id = Column(String(100), nullable=True)
|
||||
|
||||
# Usage Summary (snapshot)
|
||||
total_api_calls = Column(Integer, default=0)
|
||||
total_tokens = Column(Integer, default=0)
|
||||
usage_details = Column(JSON, nullable=True) # Detailed breakdown
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
class SubscriptionRenewalHistory(Base):
|
||||
"""Historical record of subscription renewals and expiration events."""
|
||||
|
||||
__tablename__ = "subscription_renewal_history"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
user_id = Column(String(100), nullable=False)
|
||||
|
||||
# Subscription Details
|
||||
plan_id = Column(Integer, ForeignKey('subscription_plans.id'), nullable=False)
|
||||
plan_name = Column(String(50), nullable=False)
|
||||
plan_tier = Column(String(20), nullable=False) # e.g., "free", "basic", "pro", "enterprise"
|
||||
|
||||
# Period Information
|
||||
previous_period_start = Column(DateTime, nullable=True) # Start of the previous period (if renewal)
|
||||
previous_period_end = Column(DateTime, nullable=True) # End of the previous period (when it expired)
|
||||
new_period_start = Column(DateTime, nullable=False) # Start of the new period (when renewed)
|
||||
new_period_end = Column(DateTime, nullable=False) # End of the new period
|
||||
|
||||
# Billing Cycle
|
||||
billing_cycle = Column(Enum(BillingCycle), nullable=False) # "monthly" or "yearly"
|
||||
|
||||
# Renewal Information
|
||||
renewal_type = Column(String(20), nullable=False) # "new", "renewal", "upgrade", "downgrade"
|
||||
renewal_count = Column(Integer, default=0) # Sequential renewal number (1st renewal, 2nd renewal, etc.)
|
||||
|
||||
# Previous Subscription Snapshot (before renewal)
|
||||
previous_plan_name = Column(String(50), nullable=True)
|
||||
previous_plan_tier = Column(String(20), nullable=True)
|
||||
|
||||
# Usage Summary Before Renewal (snapshot)
|
||||
usage_before_renewal = Column(JSON, nullable=True) # Snapshot of usage before renewal
|
||||
|
||||
# Payment Information
|
||||
payment_amount = Column(Float, default=0.0)
|
||||
payment_status = Column(String(20), default="pending") # "pending", "paid", "failed"
|
||||
payment_date = Column(DateTime, nullable=True)
|
||||
stripe_invoice_id = Column(String(100), nullable=True)
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationships
|
||||
plan = relationship("SubscriptionPlan")
|
||||
|
||||
# Indexes for performance
|
||||
__table_args__ = (
|
||||
{'mysql_engine': 'InnoDB'},
|
||||
)
|
||||
38
backend/models/user_business_info.py
Normal file
38
backend/models/user_business_info.py
Normal file
@@ -0,0 +1,38 @@
|
||||
"""User Business Information Model for ALwrity backend."""
|
||||
from sqlalchemy.ext.declarative import declarative_base
|
||||
from sqlalchemy import Column, Integer, String, Text, DateTime, func
|
||||
from loguru import logger
|
||||
from datetime import datetime
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
logger.debug("🔄 Loading UserBusinessInfo model...")
|
||||
|
||||
class UserBusinessInfo(Base):
|
||||
__tablename__ = 'user_business_info'
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
user_id = Column(Integer, index=True, nullable=True)
|
||||
business_description = Column(Text, nullable=False)
|
||||
industry = Column(String(100), nullable=True)
|
||||
target_audience = Column(Text, nullable=True)
|
||||
business_goals = Column(Text, nullable=True)
|
||||
created_at = Column(DateTime, default=func.now())
|
||||
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
|
||||
|
||||
def __repr__(self):
|
||||
return f"<UserBusinessInfo(id={self.id}, user_id={self.user_id}, industry='{self.industry}')>"
|
||||
|
||||
def to_dict(self):
|
||||
return {
|
||||
"id": self.id,
|
||||
"user_id": self.user_id,
|
||||
"business_description": self.business_description,
|
||||
"industry": self.industry,
|
||||
"target_audience": self.target_audience,
|
||||
"business_goals": self.business_goals,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
|
||||
}
|
||||
|
||||
logger.debug("✅ UserBusinessInfo model loaded successfully!")
|
||||
109
backend/models/website_analysis_monitoring_models.py
Normal file
109
backend/models/website_analysis_monitoring_models.py
Normal file
@@ -0,0 +1,109 @@
|
||||
"""
|
||||
Website Analysis Monitoring Models
|
||||
Database models for tracking website analysis tasks and execution logs.
|
||||
"""
|
||||
|
||||
from sqlalchemy import Column, Integer, String, Text, DateTime, Boolean, JSON, Index, ForeignKey
|
||||
from sqlalchemy.orm import relationship
|
||||
from datetime import datetime
|
||||
|
||||
# Import the same Base from enhanced_strategy_models
|
||||
from models.enhanced_strategy_models import Base
|
||||
|
||||
|
||||
class WebsiteAnalysisTask(Base):
|
||||
"""
|
||||
Model for storing website analysis monitoring tasks.
|
||||
|
||||
Tracks per-user, per-URL website analysis with recurring checks.
|
||||
"""
|
||||
__tablename__ = "website_analysis_tasks"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
|
||||
# User and URL Identification
|
||||
user_id = Column(String(255), nullable=False, index=True) # Clerk user ID (string)
|
||||
website_url = Column(String(500), nullable=False) # URL to analyze
|
||||
task_type = Column(String(50), nullable=False) # 'user_website' or 'competitor'
|
||||
competitor_id = Column(String(255), nullable=True) # For competitor tasks (domain or identifier)
|
||||
|
||||
# Task Status
|
||||
status = Column(String(50), default='active') # 'active', 'failed', 'paused', 'needs_intervention'
|
||||
|
||||
# Execution Tracking
|
||||
last_check = Column(DateTime, nullable=True)
|
||||
last_success = Column(DateTime, nullable=True)
|
||||
last_failure = Column(DateTime, nullable=True)
|
||||
failure_reason = Column(Text, nullable=True)
|
||||
|
||||
# Failure Pattern Tracking
|
||||
consecutive_failures = Column(Integer, default=0) # Count of consecutive failures
|
||||
failure_pattern = Column(JSON, nullable=True) # JSON storing failure analysis
|
||||
|
||||
# Scheduling
|
||||
next_check = Column(DateTime, nullable=True, index=True) # Next scheduled check time
|
||||
frequency_days = Column(Integer, default=10) # Recurring frequency in days
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
updated_at = Column(DateTime, default=datetime.utcnow, onupdate=datetime.utcnow)
|
||||
|
||||
# Execution Logs Relationship
|
||||
execution_logs = relationship(
|
||||
"WebsiteAnalysisExecutionLog",
|
||||
back_populates="task",
|
||||
cascade="all, delete-orphan"
|
||||
)
|
||||
|
||||
# Indexes for efficient queries
|
||||
# Note: Index names match migration script to avoid conflicts
|
||||
__table_args__ = (
|
||||
Index('idx_website_analysis_tasks_user_url', 'user_id', 'website_url'),
|
||||
Index('idx_website_analysis_tasks_user_task_type', 'user_id', 'task_type'),
|
||||
Index('idx_website_analysis_tasks_next_check', 'next_check'),
|
||||
Index('idx_website_analysis_tasks_status', 'status'),
|
||||
Index('idx_website_analysis_tasks_task_type', 'task_type'),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<WebsiteAnalysisTask(id={self.id}, user_id={self.user_id}, url={self.website_url}, type={self.task_type}, status={self.status})>"
|
||||
|
||||
|
||||
class WebsiteAnalysisExecutionLog(Base):
|
||||
"""
|
||||
Model for storing website analysis execution logs.
|
||||
|
||||
Tracks individual execution attempts with results and error details.
|
||||
"""
|
||||
__tablename__ = "website_analysis_execution_logs"
|
||||
|
||||
id = Column(Integer, primary_key=True, index=True)
|
||||
|
||||
# Task Reference
|
||||
task_id = Column(Integer, ForeignKey("website_analysis_tasks.id"), nullable=False, index=True)
|
||||
|
||||
# Execution Details
|
||||
execution_date = Column(DateTime, default=datetime.utcnow, nullable=False)
|
||||
status = Column(String(50), nullable=False) # 'success', 'failed', 'skipped', 'running'
|
||||
|
||||
# Results
|
||||
result_data = Column(JSON, nullable=True) # Analysis results (style_analysis, crawl_result, etc.)
|
||||
error_message = Column(Text, nullable=True)
|
||||
execution_time_ms = Column(Integer, nullable=True)
|
||||
|
||||
# Metadata
|
||||
created_at = Column(DateTime, default=datetime.utcnow)
|
||||
|
||||
# Relationship to task
|
||||
task = relationship("WebsiteAnalysisTask", back_populates="execution_logs")
|
||||
|
||||
# Indexes for efficient queries
|
||||
# Note: Index names match migration script to avoid conflicts
|
||||
__table_args__ = (
|
||||
Index('idx_website_analysis_execution_logs_task_execution_date', 'task_id', 'execution_date'),
|
||||
Index('idx_website_analysis_execution_logs_status', 'status'),
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"<WebsiteAnalysisExecutionLog(id={self.id}, task_id={self.task_id}, status={self.status}, execution_date={self.execution_date})>"
|
||||
|
||||
Reference in New Issue
Block a user