Base code
This commit is contained in:
5
backend/routers/__init__.py
Normal file
5
backend/routers/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""
|
||||
Routers Package
|
||||
|
||||
FastAPI routers for the ALwrity backend.
|
||||
"""
|
||||
353
backend/routers/background_jobs.py
Normal file
353
backend/routers/background_jobs.py
Normal file
@@ -0,0 +1,353 @@
|
||||
"""
|
||||
Background Jobs API Routes
|
||||
|
||||
Provides endpoints for managing background jobs like comprehensive Bing insights generation.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Depends, Query, BackgroundTasks
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel
|
||||
|
||||
from services.background_jobs import background_job_service
|
||||
from middleware.auth_middleware import get_current_user
|
||||
|
||||
router = APIRouter(prefix="/api/background-jobs", tags=["Background Jobs"])
|
||||
|
||||
|
||||
class JobRequest(BaseModel):
|
||||
"""Request model for creating a job"""
|
||||
job_type: str
|
||||
data: Dict[str, Any]
|
||||
|
||||
|
||||
class JobResponse(BaseModel):
|
||||
"""Response model for job operations"""
|
||||
success: bool
|
||||
job_id: Optional[str] = None
|
||||
message: str
|
||||
data: Optional[Dict[str, Any]] = None
|
||||
|
||||
|
||||
@router.post("/create")
|
||||
async def create_background_job(
|
||||
request: JobRequest,
|
||||
current_user: dict = Depends(get_current_user)
|
||||
) -> JobResponse:
|
||||
"""
|
||||
Create a new background job
|
||||
|
||||
Args:
|
||||
request: Job creation request
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Job creation result
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
# Validate job type
|
||||
valid_job_types = ['bing_comprehensive_insights', 'bing_data_collection', 'analytics_refresh']
|
||||
if request.job_type not in valid_job_types:
|
||||
raise HTTPException(status_code=400, detail=f"Invalid job type. Valid types: {valid_job_types}")
|
||||
|
||||
# Create the job
|
||||
job_id = background_job_service.create_job(
|
||||
job_type=request.job_type,
|
||||
user_id=user_id,
|
||||
data=request.data
|
||||
)
|
||||
|
||||
logger.info(f"Created background job {job_id} for user {user_id}")
|
||||
|
||||
return JobResponse(
|
||||
success=True,
|
||||
job_id=job_id,
|
||||
message=f"Background job created successfully",
|
||||
data={'job_id': job_id}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating background job: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/status/{job_id}")
|
||||
async def get_job_status(
|
||||
job_id: str,
|
||||
current_user: dict = Depends(get_current_user)
|
||||
) -> JobResponse:
|
||||
"""
|
||||
Get the status of a background job
|
||||
|
||||
Args:
|
||||
job_id: Job ID to check
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Job status information
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
job_status = background_job_service.get_job_status(job_id)
|
||||
|
||||
if not job_status:
|
||||
raise HTTPException(status_code=404, detail="Job not found")
|
||||
|
||||
# Verify the job belongs to the user
|
||||
if job_status['user_id'] != user_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
return JobResponse(
|
||||
success=True,
|
||||
message="Job status retrieved successfully",
|
||||
data=job_status
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting job status: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/user-jobs")
|
||||
async def get_user_jobs(
|
||||
limit: int = Query(10, description="Maximum number of jobs to return"),
|
||||
current_user: dict = Depends(get_current_user)
|
||||
) -> JobResponse:
|
||||
"""
|
||||
Get recent jobs for the current user
|
||||
|
||||
Args:
|
||||
limit: Maximum number of jobs to return
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
List of user's jobs
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
jobs = background_job_service.get_user_jobs(user_id, limit)
|
||||
|
||||
return JobResponse(
|
||||
success=True,
|
||||
message=f"Retrieved {len(jobs)} jobs for user",
|
||||
data={'jobs': jobs}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting user jobs: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/cancel/{job_id}")
|
||||
async def cancel_job(
|
||||
job_id: str,
|
||||
current_user: dict = Depends(get_current_user)
|
||||
) -> JobResponse:
|
||||
"""
|
||||
Cancel a pending background job
|
||||
|
||||
Args:
|
||||
job_id: Job ID to cancel
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Cancellation result
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
# Check if job exists and belongs to user
|
||||
job_status = background_job_service.get_job_status(job_id)
|
||||
if not job_status:
|
||||
raise HTTPException(status_code=404, detail="Job not found")
|
||||
|
||||
if job_status['user_id'] != user_id:
|
||||
raise HTTPException(status_code=403, detail="Access denied")
|
||||
|
||||
# Cancel the job
|
||||
success = background_job_service.cancel_job(job_id)
|
||||
|
||||
if success:
|
||||
return JobResponse(
|
||||
success=True,
|
||||
message="Job cancelled successfully",
|
||||
data={'job_id': job_id}
|
||||
)
|
||||
else:
|
||||
return JobResponse(
|
||||
success=False,
|
||||
message="Job cannot be cancelled (may be running or completed)",
|
||||
data={'job_id': job_id}
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error cancelling job: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/bing/comprehensive-insights")
|
||||
async def create_bing_comprehensive_insights_job(
|
||||
site_url: str = Query(..., description="Site URL to analyze"),
|
||||
days: int = Query(30, description="Number of days to analyze"),
|
||||
current_user: dict = Depends(get_current_user)
|
||||
) -> JobResponse:
|
||||
"""
|
||||
Create a background job to generate comprehensive Bing insights
|
||||
|
||||
Args:
|
||||
site_url: Site URL to analyze
|
||||
days: Number of days to analyze
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Job creation result
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
# Create the job
|
||||
job_id = background_job_service.create_job(
|
||||
job_type='bing_comprehensive_insights',
|
||||
user_id=user_id,
|
||||
data={
|
||||
'site_url': site_url,
|
||||
'days': days
|
||||
}
|
||||
)
|
||||
|
||||
logger.info(f"Created Bing comprehensive insights job {job_id} for user {user_id}")
|
||||
|
||||
return JobResponse(
|
||||
success=True,
|
||||
job_id=job_id,
|
||||
message="Bing comprehensive insights job created successfully. Check status for progress.",
|
||||
data={
|
||||
'job_id': job_id,
|
||||
'site_url': site_url,
|
||||
'days': days,
|
||||
'estimated_time': '2-5 minutes'
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating Bing comprehensive insights job: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/bing/data-collection")
|
||||
async def create_bing_data_collection_job(
|
||||
site_url: str = Query(..., description="Site URL to collect data for"),
|
||||
days_back: int = Query(30, description="Number of days back to collect"),
|
||||
current_user: dict = Depends(get_current_user)
|
||||
) -> JobResponse:
|
||||
"""
|
||||
Create a background job to collect fresh Bing data from API
|
||||
|
||||
Args:
|
||||
site_url: Site URL to collect data for
|
||||
days_back: Number of days back to collect
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Job creation result
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
# Create the job
|
||||
job_id = background_job_service.create_job(
|
||||
job_type='bing_data_collection',
|
||||
user_id=user_id,
|
||||
data={
|
||||
'site_url': site_url,
|
||||
'days_back': days_back
|
||||
}
|
||||
)
|
||||
|
||||
logger.info(f"Created Bing data collection job {job_id} for user {user_id}")
|
||||
|
||||
return JobResponse(
|
||||
success=True,
|
||||
job_id=job_id,
|
||||
message="Bing data collection job created successfully. This will collect fresh data from Bing API.",
|
||||
data={
|
||||
'job_id': job_id,
|
||||
'site_url': site_url,
|
||||
'days_back': days_back,
|
||||
'estimated_time': '3-7 minutes'
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating Bing data collection job: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.post("/analytics/refresh")
|
||||
async def create_analytics_refresh_job(
|
||||
platforms: str = Query("bing,gsc", description="Comma-separated list of platforms to refresh"),
|
||||
current_user: dict = Depends(get_current_user)
|
||||
) -> JobResponse:
|
||||
"""
|
||||
Create a background job to refresh analytics data for all platforms
|
||||
|
||||
Args:
|
||||
platforms: Comma-separated list of platforms to refresh
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Job creation result
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
platform_list = [p.strip() for p in platforms.split(',')]
|
||||
|
||||
# Create the job
|
||||
job_id = background_job_service.create_job(
|
||||
job_type='analytics_refresh',
|
||||
user_id=user_id,
|
||||
data={
|
||||
'platforms': platform_list
|
||||
}
|
||||
)
|
||||
|
||||
logger.info(f"Created analytics refresh job {job_id} for user {user_id}")
|
||||
|
||||
return JobResponse(
|
||||
success=True,
|
||||
job_id=job_id,
|
||||
message="Analytics refresh job created successfully. This will refresh data for all connected platforms.",
|
||||
data={
|
||||
'job_id': job_id,
|
||||
'platforms': platform_list,
|
||||
'estimated_time': '1-3 minutes'
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating analytics refresh job: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
166
backend/routers/bing_analytics.py
Normal file
166
backend/routers/bing_analytics.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""
|
||||
Bing Webmaster Analytics API Routes
|
||||
Provides endpoints for accessing Bing Webmaster Tools analytics data.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from typing import Optional, Dict, Any
|
||||
from datetime import datetime, timedelta
|
||||
from loguru import logger
|
||||
|
||||
from services.integrations.bing_oauth import BingOAuthService
|
||||
from middleware.auth_middleware import get_current_user
|
||||
|
||||
router = APIRouter(prefix="/bing", tags=["Bing Analytics"])
|
||||
|
||||
# Initialize Bing OAuth service
|
||||
bing_service = BingOAuthService()
|
||||
|
||||
@router.get("/query-stats")
|
||||
async def get_query_stats(
|
||||
site_url: str = Query(..., description="The site URL to get query stats for"),
|
||||
start_date: Optional[str] = Query(None, description="Start date in YYYY-MM-DD format"),
|
||||
end_date: Optional[str] = Query(None, description="End date in YYYY-MM-DD format"),
|
||||
page: int = Query(0, description="Page number for pagination"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""Get search query statistics for a Bing Webmaster site."""
|
||||
try:
|
||||
user_id = current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting Bing query stats for user {user_id}, site: {site_url}")
|
||||
|
||||
# Get query stats from Bing service
|
||||
result = bing_service.get_query_stats(
|
||||
user_id=user_id,
|
||||
site_url=site_url,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
page=page
|
||||
)
|
||||
|
||||
if "error" in result:
|
||||
logger.error(f"Bing query stats error: {result['error']}")
|
||||
raise HTTPException(status_code=400, detail=result["error"])
|
||||
|
||||
logger.info(f"Successfully retrieved Bing query stats for {site_url}")
|
||||
return {
|
||||
"success": True,
|
||||
"data": result,
|
||||
"site_url": site_url,
|
||||
"start_date": start_date,
|
||||
"end_date": end_date,
|
||||
"page": page
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting Bing query stats: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
@router.get("/user-sites")
|
||||
async def get_user_sites(
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""Get list of user's verified sites from Bing Webmaster."""
|
||||
try:
|
||||
user_id = current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting Bing user sites for user {user_id}")
|
||||
|
||||
# Get user sites from Bing service
|
||||
sites = bing_service.get_user_sites(user_id)
|
||||
|
||||
logger.info(f"Successfully retrieved {len(sites)} Bing sites for user {user_id}")
|
||||
return {
|
||||
"success": True,
|
||||
"sites": sites,
|
||||
"total_sites": len(sites)
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting Bing user sites: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
@router.get("/query-stats/summary")
|
||||
async def get_query_stats_summary(
|
||||
site_url: str = Query(..., description="The site URL to get query stats summary for"),
|
||||
start_date: Optional[str] = Query(None, description="Start date in YYYY-MM-DD format"),
|
||||
end_date: Optional[str] = Query(None, description="End date in YYYY-MM-DD format"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""Get summarized query statistics for a Bing Webmaster site."""
|
||||
try:
|
||||
user_id = current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting Bing query stats summary for user {user_id}, site: {site_url}")
|
||||
|
||||
# Get query stats from Bing service
|
||||
result = bing_service.get_query_stats(
|
||||
user_id=user_id,
|
||||
site_url=site_url,
|
||||
start_date=start_date,
|
||||
end_date=end_date,
|
||||
page=0 # Just get first page for summary
|
||||
)
|
||||
|
||||
if "error" in result:
|
||||
logger.error(f"Bing query stats error: {result['error']}")
|
||||
raise HTTPException(status_code=400, detail=result["error"])
|
||||
|
||||
# Extract summary data
|
||||
query_data = result.get('d', {})
|
||||
queries = query_data.get('results', [])
|
||||
|
||||
# Calculate summary statistics
|
||||
total_clicks = sum(query.get('Clicks', 0) for query in queries)
|
||||
total_impressions = sum(query.get('Impressions', 0) for query in queries)
|
||||
total_queries = len(queries)
|
||||
avg_ctr = (total_clicks / total_impressions * 100) if total_impressions > 0 else 0
|
||||
avg_position = sum(query.get('AvgClickPosition', 0) for query in queries) / total_queries if total_queries > 0 else 0
|
||||
|
||||
# Get top queries
|
||||
top_queries = sorted(queries, key=lambda x: x.get('Clicks', 0), reverse=True)[:5]
|
||||
|
||||
summary = {
|
||||
"total_queries": total_queries,
|
||||
"total_clicks": total_clicks,
|
||||
"total_impressions": total_impressions,
|
||||
"average_ctr": round(avg_ctr, 2),
|
||||
"average_position": round(avg_position, 2),
|
||||
"top_queries": [
|
||||
{
|
||||
"query": q.get('Query', ''),
|
||||
"clicks": q.get('Clicks', 0),
|
||||
"impressions": q.get('Impressions', 0),
|
||||
"ctr": round(q.get('Clicks', 0) / q.get('Impressions', 1) * 100, 2),
|
||||
"position": q.get('AvgClickPosition', 0)
|
||||
}
|
||||
for q in top_queries
|
||||
]
|
||||
}
|
||||
|
||||
logger.info(f"Successfully created Bing query stats summary for {site_url}")
|
||||
return {
|
||||
"success": True,
|
||||
"summary": summary,
|
||||
"site_url": site_url,
|
||||
"start_date": start_date,
|
||||
"end_date": end_date,
|
||||
"raw_data": result
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting Bing query stats summary: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
453
backend/routers/bing_analytics_storage.py
Normal file
453
backend/routers/bing_analytics_storage.py
Normal file
@@ -0,0 +1,453 @@
|
||||
"""
|
||||
Bing Analytics Storage API Routes
|
||||
|
||||
Provides endpoints for accessing stored Bing analytics data,
|
||||
historical trends, and performance analysis.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, BackgroundTasks
|
||||
from typing import Optional, Dict, Any, List
|
||||
from datetime import datetime, timedelta
|
||||
from loguru import logger
|
||||
import os
|
||||
import json
|
||||
from sqlalchemy import and_
|
||||
|
||||
from services.bing_analytics_storage_service import BingAnalyticsStorageService
|
||||
from middleware.auth_middleware import get_current_user
|
||||
|
||||
router = APIRouter(prefix="/bing-analytics", tags=["Bing Analytics Storage"])
|
||||
|
||||
# Initialize storage service
|
||||
DATABASE_URL = os.getenv('DATABASE_URL', 'sqlite:///./bing_analytics.db')
|
||||
storage_service = BingAnalyticsStorageService(DATABASE_URL)
|
||||
|
||||
|
||||
@router.post("/collect-data")
|
||||
async def collect_bing_data(
|
||||
background_tasks: BackgroundTasks,
|
||||
site_url: str = Query(..., description="Site URL to collect data for"),
|
||||
days_back: int = Query(30, description="Number of days back to collect data"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Collect and store Bing analytics data for a site.
|
||||
This endpoint triggers data collection from Bing API and stores it in the database.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Starting Bing data collection for user {user_id}, site: {site_url}")
|
||||
|
||||
# Run data collection in background
|
||||
background_tasks.add_task(
|
||||
storage_service.collect_and_store_data,
|
||||
user_id=user_id,
|
||||
site_url=site_url,
|
||||
days_back=days_back
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Bing data collection started for {site_url}",
|
||||
"site_url": site_url,
|
||||
"days_back": days_back,
|
||||
"status": "collecting"
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error starting Bing data collection: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/summary")
|
||||
async def get_analytics_summary(
|
||||
site_url: str = Query(..., description="Site URL to get analytics for"),
|
||||
days: int = Query(30, description="Number of days for analytics summary"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Get comprehensive analytics summary for a site over a specified period.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting analytics summary for user {user_id}, site: {site_url}, days: {days}")
|
||||
|
||||
summary = storage_service.get_analytics_summary(
|
||||
user_id=user_id,
|
||||
site_url=site_url,
|
||||
days=days
|
||||
)
|
||||
|
||||
if 'error' in summary:
|
||||
raise HTTPException(status_code=404, detail=summary['error'])
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"data": summary,
|
||||
"site_url": site_url,
|
||||
"period_days": days
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting analytics summary: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/daily-metrics")
|
||||
async def get_daily_metrics(
|
||||
site_url: str = Query(..., description="Site URL to get daily metrics for"),
|
||||
days: int = Query(30, description="Number of days to retrieve"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Get daily metrics for a site over a specified period.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting daily metrics for user {user_id}, site: {site_url}, days: {days}")
|
||||
|
||||
db = storage_service._get_db_session()
|
||||
|
||||
# Calculate date range
|
||||
end_date = datetime.now()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
# Get daily metrics
|
||||
daily_metrics = db.query(storage_service.BingDailyMetrics).filter(
|
||||
and_(
|
||||
storage_service.BingDailyMetrics.user_id == user_id,
|
||||
storage_service.BingDailyMetrics.site_url == site_url,
|
||||
storage_service.BingDailyMetrics.metric_date >= start_date,
|
||||
storage_service.BingDailyMetrics.metric_date <= end_date
|
||||
)
|
||||
).order_by(storage_service.BingDailyMetrics.metric_date).all()
|
||||
|
||||
db.close()
|
||||
|
||||
# Format response
|
||||
metrics_data = []
|
||||
for metric in daily_metrics:
|
||||
metrics_data.append({
|
||||
"date": metric.metric_date.isoformat(),
|
||||
"total_clicks": metric.total_clicks,
|
||||
"total_impressions": metric.total_impressions,
|
||||
"total_queries": metric.total_queries,
|
||||
"avg_ctr": metric.avg_ctr,
|
||||
"avg_position": metric.avg_position,
|
||||
"clicks_change": metric.clicks_change,
|
||||
"impressions_change": metric.impressions_change,
|
||||
"ctr_change": metric.ctr_change,
|
||||
"top_queries": json.loads(metric.top_queries) if metric.top_queries else [],
|
||||
"collected_at": metric.collected_at.isoformat()
|
||||
})
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"data": metrics_data,
|
||||
"site_url": site_url,
|
||||
"period_days": days,
|
||||
"metrics_count": len(metrics_data)
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting daily metrics: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/top-queries")
|
||||
async def get_top_queries(
|
||||
site_url: str = Query(..., description="Site URL to get top queries for"),
|
||||
days: int = Query(30, description="Number of days to analyze"),
|
||||
limit: int = Query(50, description="Number of top queries to return"),
|
||||
sort_by: str = Query("clicks", description="Sort by: clicks, impressions, or ctr"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Get top performing queries for a site over a specified period.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
if sort_by not in ["clicks", "impressions", "ctr"]:
|
||||
raise HTTPException(status_code=400, detail="sort_by must be 'clicks', 'impressions', or 'ctr'")
|
||||
|
||||
logger.info(f"Getting top queries for user {user_id}, site: {site_url}, sort_by: {sort_by}")
|
||||
|
||||
db = storage_service._get_db_session()
|
||||
|
||||
# Calculate date range
|
||||
end_date = datetime.now()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
# Get raw query data
|
||||
query_stats = db.query(storage_service.BingQueryStats).filter(
|
||||
and_(
|
||||
storage_service.BingQueryStats.user_id == user_id,
|
||||
storage_service.BingQueryStats.site_url == site_url,
|
||||
storage_service.BingQueryStats.query_date >= start_date,
|
||||
storage_service.BingQueryStats.query_date <= end_date
|
||||
)
|
||||
).all()
|
||||
|
||||
db.close()
|
||||
|
||||
if not query_stats:
|
||||
return {
|
||||
"success": True,
|
||||
"data": [],
|
||||
"message": "No query data found for the specified period"
|
||||
}
|
||||
|
||||
# Aggregate queries
|
||||
query_aggregates = {}
|
||||
for stat in query_stats:
|
||||
query = stat.query
|
||||
if query not in query_aggregates:
|
||||
query_aggregates[query] = {
|
||||
"query": query,
|
||||
"total_clicks": 0,
|
||||
"total_impressions": 0,
|
||||
"avg_ctr": 0,
|
||||
"avg_position": 0,
|
||||
"days_appeared": 0,
|
||||
"category": stat.category,
|
||||
"is_brand": stat.is_brand_query
|
||||
}
|
||||
|
||||
query_aggregates[query]["total_clicks"] += stat.clicks
|
||||
query_aggregates[query]["total_impressions"] += stat.impressions
|
||||
query_aggregates[query]["days_appeared"] += 1
|
||||
|
||||
# Calculate weighted average position
|
||||
if stat.avg_click_position > 0:
|
||||
query_aggregates[query]["avg_position"] = (
|
||||
query_aggregates[query]["avg_position"] * (query_aggregates[query]["days_appeared"] - 1) +
|
||||
stat.avg_click_position
|
||||
) / query_aggregates[query]["days_appeared"]
|
||||
|
||||
# Calculate CTR for each query
|
||||
for query_data in query_aggregates.values():
|
||||
query_data["avg_ctr"] = (
|
||||
query_data["total_clicks"] / query_data["total_impressions"] * 100
|
||||
) if query_data["total_impressions"] > 0 else 0
|
||||
|
||||
# Sort and limit results
|
||||
sorted_queries = sorted(
|
||||
list(query_aggregates.values()),
|
||||
key=lambda x: x[f"total_{sort_by}"],
|
||||
reverse=True
|
||||
)[:limit]
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"data": sorted_queries,
|
||||
"site_url": site_url,
|
||||
"period_days": days,
|
||||
"sort_by": sort_by,
|
||||
"total_queries": len(query_aggregates),
|
||||
"returned_queries": len(sorted_queries)
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting top queries: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/query-details")
|
||||
async def get_query_details(
|
||||
site_url: str = Query(..., description="Site URL"),
|
||||
query: str = Query(..., description="Specific query to analyze"),
|
||||
days: int = Query(30, description="Number of days to analyze"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Get detailed performance data for a specific query.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting query details for user {user_id}, query: {query}")
|
||||
|
||||
db = storage_service._get_db_session()
|
||||
|
||||
# Calculate date range
|
||||
end_date = datetime.now()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
# Get query stats
|
||||
query_stats = db.query(storage_service.BingQueryStats).filter(
|
||||
and_(
|
||||
storage_service.BingQueryStats.user_id == user_id,
|
||||
storage_service.BingQueryStats.site_url == site_url,
|
||||
storage_service.BingQueryStats.query == query,
|
||||
storage_service.BingQueryStats.query_date >= start_date,
|
||||
storage_service.BingQueryStats.query_date <= end_date
|
||||
)
|
||||
).order_by(storage_service.BingQueryStats.query_date).all()
|
||||
|
||||
db.close()
|
||||
|
||||
if not query_stats:
|
||||
return {
|
||||
"success": True,
|
||||
"data": None,
|
||||
"message": f"No data found for query: {query}"
|
||||
}
|
||||
|
||||
# Calculate summary statistics
|
||||
total_clicks = sum(stat.clicks for stat in query_stats)
|
||||
total_impressions = sum(stat.impressions for stat in query_stats)
|
||||
avg_ctr = (total_clicks / total_impressions * 100) if total_impressions > 0 else 0
|
||||
avg_position = sum(stat.avg_click_position for stat in query_stats if stat.avg_click_position > 0) / len([stat for stat in query_stats if stat.avg_click_position > 0]) if any(stat.avg_click_position > 0 for stat in query_stats) else 0
|
||||
|
||||
# Daily performance data
|
||||
daily_data = []
|
||||
for stat in query_stats:
|
||||
daily_data.append({
|
||||
"date": stat.query_date.isoformat(),
|
||||
"clicks": stat.clicks,
|
||||
"impressions": stat.impressions,
|
||||
"ctr": stat.ctr,
|
||||
"avg_click_position": stat.avg_click_position,
|
||||
"avg_impression_position": stat.avg_impression_position
|
||||
})
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"data": {
|
||||
"query": query,
|
||||
"period_days": days,
|
||||
"total_clicks": total_clicks,
|
||||
"total_impressions": total_impressions,
|
||||
"avg_ctr": round(avg_ctr, 2),
|
||||
"avg_position": round(avg_position, 2),
|
||||
"days_appeared": len(query_stats),
|
||||
"category": query_stats[0].category,
|
||||
"is_brand_query": query_stats[0].is_brand_query,
|
||||
"daily_performance": daily_data
|
||||
}
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting query details: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/sites")
|
||||
async def get_user_sites(
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Get list of sites with stored Bing analytics data.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting user sites for user {user_id}")
|
||||
|
||||
db = storage_service._get_db_session()
|
||||
|
||||
# Get unique sites for the user
|
||||
sites = db.query(storage_service.BingDailyMetrics.site_url).filter(
|
||||
storage_service.BingDailyMetrics.user_id == user_id
|
||||
).distinct().all()
|
||||
|
||||
db.close()
|
||||
|
||||
sites_data = []
|
||||
for site_tuple in sites:
|
||||
site_url = site_tuple[0]
|
||||
|
||||
# Get latest metrics for each site
|
||||
summary = storage_service.get_analytics_summary(user_id, site_url, 7)
|
||||
|
||||
sites_data.append({
|
||||
"site_url": site_url,
|
||||
"latest_summary": summary if 'error' not in summary else None,
|
||||
"has_data": 'error' not in summary
|
||||
})
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"data": sites_data,
|
||||
"total_sites": len(sites_data)
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting user sites: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/generate-daily-metrics")
|
||||
async def generate_daily_metrics(
|
||||
background_tasks: BackgroundTasks,
|
||||
site_url: str = Query(..., description="Site URL to generate metrics for"),
|
||||
target_date: Optional[str] = Query(None, description="Target date (YYYY-MM-DD), defaults to yesterday"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Generate daily metrics for a specific date from stored raw data.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
# Parse target date
|
||||
if target_date:
|
||||
try:
|
||||
target_dt = datetime.strptime(target_date, '%Y-%m-%d')
|
||||
except ValueError:
|
||||
raise HTTPException(status_code=400, detail="Invalid date format. Use YYYY-MM-DD")
|
||||
else:
|
||||
target_dt = None
|
||||
|
||||
logger.info(f"Generating daily metrics for user {user_id}, site: {site_url}, date: {target_dt}")
|
||||
|
||||
# Run in background
|
||||
background_tasks.add_task(
|
||||
storage_service.generate_daily_metrics,
|
||||
user_id=user_id,
|
||||
site_url=site_url,
|
||||
target_date=target_dt
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Daily metrics generation started for {site_url}",
|
||||
"site_url": site_url,
|
||||
"target_date": target_dt.isoformat() if target_dt else "yesterday"
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating daily metrics: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
219
backend/routers/bing_insights.py
Normal file
219
backend/routers/bing_insights.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""
|
||||
Bing Insights API Routes
|
||||
|
||||
Provides endpoints for accessing Bing Webmaster insights and recommendations.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from typing import Optional, Dict, Any
|
||||
from datetime import datetime, timedelta
|
||||
from loguru import logger
|
||||
import os
|
||||
|
||||
from services.analytics.insights.bing_insights_service import BingInsightsService
|
||||
from middleware.auth_middleware import get_current_user
|
||||
|
||||
router = APIRouter(prefix="/api/bing-insights", tags=["Bing Insights"])
|
||||
|
||||
# Initialize insights service
|
||||
DATABASE_URL = os.getenv('DATABASE_URL', 'sqlite:///./bing_analytics.db')
|
||||
insights_service = BingInsightsService(DATABASE_URL)
|
||||
|
||||
|
||||
@router.get("/performance")
|
||||
async def get_performance_insights(
|
||||
site_url: str = Query(..., description="Site URL to analyze"),
|
||||
days: int = Query(30, description="Number of days to analyze"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Get performance insights including trends and patterns for a Bing Webmaster site.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("id") or current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting performance insights for user {user_id}, site: {site_url}")
|
||||
|
||||
insights = insights_service.get_performance_insights(user_id, site_url, days)
|
||||
|
||||
if 'error' in insights:
|
||||
raise HTTPException(status_code=404, detail=insights['error'])
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"data": insights,
|
||||
"site_url": site_url,
|
||||
"analysis_period": f"{days} days",
|
||||
"generated_at": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting performance insights: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/seo")
|
||||
async def get_seo_insights(
|
||||
site_url: str = Query(..., description="Site URL to analyze"),
|
||||
days: int = Query(30, description="Number of days to analyze"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Get SEO-specific insights and opportunities for a Bing Webmaster site.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("id") or current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting SEO insights for user {user_id}, site: {site_url}")
|
||||
|
||||
insights = insights_service.get_seo_insights(user_id, site_url, days)
|
||||
|
||||
if 'error' in insights:
|
||||
raise HTTPException(status_code=404, detail=insights['error'])
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"data": insights,
|
||||
"site_url": site_url,
|
||||
"analysis_period": f"{days} days",
|
||||
"generated_at": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting SEO insights: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/competitive")
|
||||
async def get_competitive_insights(
|
||||
site_url: str = Query(..., description="Site URL to analyze"),
|
||||
days: int = Query(30, description="Number of days to analyze"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Get competitive analysis and market insights for a Bing Webmaster site.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("id") or current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting competitive insights for user {user_id}, site: {site_url}")
|
||||
|
||||
insights = insights_service.get_competitive_insights(user_id, site_url, days)
|
||||
|
||||
if 'error' in insights:
|
||||
raise HTTPException(status_code=404, detail=insights['error'])
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"data": insights,
|
||||
"site_url": site_url,
|
||||
"analysis_period": f"{days} days",
|
||||
"generated_at": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting competitive insights: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/recommendations")
|
||||
async def get_actionable_recommendations(
|
||||
site_url: str = Query(..., description="Site URL to analyze"),
|
||||
days: int = Query(30, description="Number of days to analyze"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Get actionable recommendations for improving search performance.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("id") or current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting actionable recommendations for user {user_id}, site: {site_url}")
|
||||
|
||||
recommendations = insights_service.get_actionable_recommendations(user_id, site_url, days)
|
||||
|
||||
if 'error' in recommendations:
|
||||
raise HTTPException(status_code=404, detail=recommendations['error'])
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"data": recommendations,
|
||||
"site_url": site_url,
|
||||
"analysis_period": f"{days} days",
|
||||
"generated_at": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting actionable recommendations: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/comprehensive")
|
||||
async def get_comprehensive_insights(
|
||||
site_url: str = Query(..., description="Site URL to analyze"),
|
||||
days: int = Query(30, description="Number of days to analyze"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""
|
||||
Get comprehensive insights including performance, SEO, competitive, and recommendations.
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get("id") or current_user.get("user_id")
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=401, detail="User not authenticated")
|
||||
|
||||
logger.info(f"Getting comprehensive insights for user {user_id}, site: {site_url}")
|
||||
|
||||
# Get all types of insights
|
||||
performance = insights_service.get_performance_insights(user_id, site_url, days)
|
||||
seo = insights_service.get_seo_insights(user_id, site_url, days)
|
||||
competitive = insights_service.get_competitive_insights(user_id, site_url, days)
|
||||
recommendations = insights_service.get_actionable_recommendations(user_id, site_url, days)
|
||||
|
||||
# Check for errors
|
||||
errors = []
|
||||
if 'error' in performance:
|
||||
errors.append(f"Performance insights: {performance['error']}")
|
||||
if 'error' in seo:
|
||||
errors.append(f"SEO insights: {seo['error']}")
|
||||
if 'error' in competitive:
|
||||
errors.append(f"Competitive insights: {competitive['error']}")
|
||||
if 'error' in recommendations:
|
||||
errors.append(f"Recommendations: {recommendations['error']}")
|
||||
|
||||
if errors:
|
||||
logger.warning(f"Some insights failed: {errors}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"data": {
|
||||
"performance": performance,
|
||||
"seo": seo,
|
||||
"competitive": competitive,
|
||||
"recommendations": recommendations
|
||||
},
|
||||
"site_url": site_url,
|
||||
"analysis_period": f"{days} days",
|
||||
"generated_at": datetime.now().isoformat(),
|
||||
"warnings": errors if errors else None
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting comprehensive insights: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Internal server error: {str(e)}")
|
||||
341
backend/routers/bing_oauth.py
Normal file
341
backend/routers/bing_oauth.py
Normal file
@@ -0,0 +1,341 @@
|
||||
"""
|
||||
Bing Webmaster OAuth2 Routes
|
||||
Handles Bing Webmaster Tools OAuth2 authentication flow.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Query
|
||||
from fastapi.responses import RedirectResponse, HTMLResponse
|
||||
from typing import Dict, Any, Optional
|
||||
from pydantic import BaseModel
|
||||
from loguru import logger
|
||||
|
||||
from services.integrations.bing_oauth import BingOAuthService
|
||||
from middleware.auth_middleware import get_current_user
|
||||
|
||||
router = APIRouter(prefix="/bing", tags=["Bing Webmaster OAuth"])
|
||||
|
||||
# Initialize OAuth service
|
||||
oauth_service = BingOAuthService()
|
||||
|
||||
# Pydantic Models
|
||||
class BingOAuthResponse(BaseModel):
|
||||
auth_url: str
|
||||
state: str
|
||||
|
||||
class BingCallbackResponse(BaseModel):
|
||||
success: bool
|
||||
message: str
|
||||
access_token: Optional[str] = None
|
||||
expires_in: Optional[int] = None
|
||||
|
||||
class BingStatusResponse(BaseModel):
|
||||
connected: bool
|
||||
sites: list
|
||||
total_sites: int
|
||||
|
||||
@router.get("/auth/url", response_model=BingOAuthResponse)
|
||||
async def get_bing_auth_url(
|
||||
user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""Get Bing Webmaster OAuth2 authorization URL."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="User ID not found.")
|
||||
|
||||
auth_data = oauth_service.generate_authorization_url(user_id)
|
||||
if not auth_data:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Bing Webmaster OAuth is not properly configured. Please check that BING_CLIENT_ID and BING_CLIENT_SECRET environment variables are set with valid Bing Webmaster application credentials."
|
||||
)
|
||||
|
||||
return BingOAuthResponse(**auth_data)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating Bing Webmaster OAuth URL: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to generate Bing Webmaster OAuth URL."
|
||||
)
|
||||
|
||||
@router.get("/callback")
|
||||
async def handle_bing_callback(
|
||||
code: str = Query(..., description="Authorization code from Bing"),
|
||||
state: str = Query(..., description="State parameter for security"),
|
||||
error: Optional[str] = Query(None, description="Error from Bing OAuth")
|
||||
):
|
||||
"""Handle Bing Webmaster OAuth2 callback."""
|
||||
try:
|
||||
if error:
|
||||
logger.error(f"Bing Webmaster OAuth error: {error}")
|
||||
html_content = f"""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Bing Webmaster Connection Failed</title>
|
||||
<script>
|
||||
// Send error message to parent window
|
||||
window.onload = function() {{
|
||||
window.parent.postMessage({{
|
||||
type: 'BING_OAUTH_ERROR',
|
||||
success: false,
|
||||
error: '{error}'
|
||||
}}, '*');
|
||||
window.close();
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Connection Failed</h1>
|
||||
<p>There was an error connecting to Bing Webmaster Tools.</p>
|
||||
<p>You can close this window and try again.</p>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(content=html_content, headers={
|
||||
"Cross-Origin-Opener-Policy": "unsafe-none",
|
||||
"Cross-Origin-Embedder-Policy": "unsafe-none"
|
||||
})
|
||||
|
||||
if not code or not state:
|
||||
logger.error("Missing code or state parameter in Bing Webmaster OAuth callback")
|
||||
html_content = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Bing Webmaster Connection Failed</title>
|
||||
<script>
|
||||
// Send error message to opener/parent window
|
||||
window.onload = function() {{
|
||||
(window.opener || window.parent).postMessage({{
|
||||
type: 'BING_OAUTH_ERROR',
|
||||
success: false,
|
||||
error: 'Missing parameters'
|
||||
}}, '*');
|
||||
window.close();
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Connection Failed</h1>
|
||||
<p>Missing required parameters.</p>
|
||||
<p>You can close this window and try again.</p>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(content=html_content, headers={
|
||||
"Cross-Origin-Opener-Policy": "unsafe-none",
|
||||
"Cross-Origin-Embedder-Policy": "unsafe-none"
|
||||
})
|
||||
|
||||
# Exchange code for token
|
||||
result = oauth_service.handle_oauth_callback(code, state)
|
||||
|
||||
if not result or not result.get('success'):
|
||||
logger.error("Failed to exchange Bing Webmaster OAuth code for token")
|
||||
html_content = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Bing Webmaster Connection Failed</title>
|
||||
<script>
|
||||
// Send error message to opener/parent window
|
||||
window.onload = function() {{
|
||||
(window.opener || window.parent).postMessage({{
|
||||
type: 'BING_OAUTH_ERROR',
|
||||
success: false,
|
||||
error: 'Token exchange failed'
|
||||
}}, '*');
|
||||
window.close();
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Connection Failed</h1>
|
||||
<p>Failed to exchange authorization code for access token.</p>
|
||||
<p>You can close this window and try again.</p>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(content=html_content)
|
||||
|
||||
# Create Bing insights task immediately after successful connection
|
||||
try:
|
||||
from services.database import SessionLocal
|
||||
from services.platform_insights_monitoring_service import create_platform_insights_task
|
||||
|
||||
# Get user_id from state (stored during OAuth flow)
|
||||
db = SessionLocal()
|
||||
try:
|
||||
# Get user_id from Bing OAuth service state lookup
|
||||
import sqlite3
|
||||
with sqlite3.connect(oauth_service.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('SELECT user_id FROM bing_oauth_states WHERE state = ?', (state,))
|
||||
result_db = cursor.fetchone()
|
||||
if result_db:
|
||||
user_id = result_db[0]
|
||||
|
||||
# Don't fetch site_url here - it requires API calls
|
||||
# The executor will fetch it when the task runs (weekly)
|
||||
# Create insights task without site_url to avoid API calls
|
||||
task_result = create_platform_insights_task(
|
||||
user_id=user_id,
|
||||
platform='bing',
|
||||
site_url=None, # Will be fetched by executor when task runs
|
||||
db=db
|
||||
)
|
||||
|
||||
if task_result.get('success'):
|
||||
logger.info(f"Created Bing insights task for user {user_id}")
|
||||
else:
|
||||
logger.warning(f"Failed to create Bing insights task: {task_result.get('error')}")
|
||||
finally:
|
||||
db.close()
|
||||
except Exception as e:
|
||||
# Non-critical: log but don't fail OAuth callback
|
||||
logger.warning(f"Failed to create Bing insights task after OAuth: {e}")
|
||||
|
||||
# Return success page with postMessage script
|
||||
html_content = f"""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Bing Webmaster Connection Successful</title>
|
||||
<script>
|
||||
// Send success message to opener/parent window
|
||||
window.onload = function() {{
|
||||
(window.opener || window.parent).postMessage({{
|
||||
type: 'BING_OAUTH_SUCCESS',
|
||||
success: true,
|
||||
accessToken: '{result.get('access_token', '')}',
|
||||
expiresIn: {result.get('expires_in', 0)}
|
||||
}}, '*');
|
||||
window.close();
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Connection Successful!</h1>
|
||||
<p>Your Bing Webmaster Tools account has been connected successfully.</p>
|
||||
<p>You can close this window now.</p>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
return HTMLResponse(content=html_content, headers={
|
||||
"Cross-Origin-Opener-Policy": "unsafe-none",
|
||||
"Cross-Origin-Embedder-Policy": "unsafe-none"
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling Bing Webmaster OAuth callback: {e}")
|
||||
html_content = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Bing Webmaster Connection Failed</title>
|
||||
<script>
|
||||
// Send error message to opener/parent window
|
||||
window.onload = function() {{
|
||||
(window.opener || window.parent).postMessage({{
|
||||
type: 'BING_OAUTH_ERROR',
|
||||
success: false,
|
||||
error: 'Callback error'
|
||||
}}, '*');
|
||||
window.close();
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Connection Failed</h1>
|
||||
<p>An unexpected error occurred during connection.</p>
|
||||
<p>You can close this window and try again.</p>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(content=html_content, headers={
|
||||
"Cross-Origin-Opener-Policy": "unsafe-none",
|
||||
"Cross-Origin-Embedder-Policy": "unsafe-none"
|
||||
})
|
||||
|
||||
@router.get("/status", response_model=BingStatusResponse)
|
||||
async def get_bing_oauth_status(
|
||||
user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""Get Bing Webmaster OAuth connection status."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="User ID not found.")
|
||||
|
||||
status_data = oauth_service.get_connection_status(user_id)
|
||||
return BingStatusResponse(**status_data)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting Bing Webmaster OAuth status: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to get Bing Webmaster connection status."
|
||||
)
|
||||
|
||||
@router.delete("/disconnect/{token_id}")
|
||||
async def disconnect_bing_site(
|
||||
token_id: int,
|
||||
user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""Disconnect a Bing Webmaster site."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="User ID not found.")
|
||||
|
||||
success = oauth_service.revoke_token(user_id, token_id)
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="Bing Webmaster token not found or could not be disconnected."
|
||||
)
|
||||
|
||||
return {"success": True, "message": f"Bing Webmaster site disconnected successfully."}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error disconnecting Bing Webmaster site: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to disconnect Bing Webmaster site."
|
||||
)
|
||||
|
||||
@router.get("/health")
|
||||
async def bing_oauth_health():
|
||||
"""Bing Webmaster OAuth health check."""
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "bing_oauth",
|
||||
"timestamp": "2024-01-01T00:00:00Z",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
|
||||
@router.post("/purge-expired")
|
||||
async def purge_expired_bing_tokens(
|
||||
user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""Purge user's expired/inactive Bing tokens to avoid refresh loops before reauth."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="User ID not found.")
|
||||
|
||||
deleted = oauth_service.purge_expired_tokens(user_id)
|
||||
return {
|
||||
"success": True,
|
||||
"purged": deleted,
|
||||
"message": f"Purged {deleted} expired/inactive Bing tokens"
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error purging expired Bing tokens: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to purge expired Bing tokens."
|
||||
)
|
||||
1
backend/routers/content_planning.py
Normal file
1
backend/routers/content_planning.py
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
50
backend/routers/error_logging.py
Normal file
50
backend/routers/error_logging.py
Normal file
@@ -0,0 +1,50 @@
|
||||
"""
|
||||
Error Logging Router
|
||||
Provides endpoints for frontend error reporting
|
||||
"""
|
||||
from fastapi import APIRouter, HTTPException
|
||||
from pydantic import BaseModel
|
||||
from typing import Optional
|
||||
import logging
|
||||
|
||||
router = APIRouter()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class ErrorLogRequest(BaseModel):
|
||||
error_message: str
|
||||
error_stack: Optional[str] = None
|
||||
component_stack: Optional[str] = None
|
||||
user_id: Optional[str] = None
|
||||
url: Optional[str] = None
|
||||
user_agent: Optional[str] = None
|
||||
timestamp: Optional[str] = None
|
||||
additional_info: Optional[dict] = None
|
||||
|
||||
@router.post("/log-error")
|
||||
async def log_frontend_error(error_log: ErrorLogRequest):
|
||||
"""
|
||||
Log errors from the frontend for monitoring and debugging
|
||||
"""
|
||||
try:
|
||||
# Log the error with all details
|
||||
logger.error(
|
||||
f"Frontend Error: {error_log.error_message}",
|
||||
extra={
|
||||
"error_stack": error_log.error_stack,
|
||||
"component_stack": error_log.component_stack,
|
||||
"user_id": error_log.user_id,
|
||||
"url": error_log.url,
|
||||
"user_agent": error_log.user_agent,
|
||||
"timestamp": error_log.timestamp,
|
||||
"additional_info": error_log.additional_info
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"message": "Error logged successfully"
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to log frontend error: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail="Failed to log error")
|
||||
|
||||
110
backend/routers/frontend_env_manager.py
Normal file
110
backend/routers/frontend_env_manager.py
Normal file
@@ -0,0 +1,110 @@
|
||||
"""
|
||||
Frontend Environment Manager
|
||||
Handles updating frontend environment variables (for development purposes).
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Depends
|
||||
from pydantic import BaseModel
|
||||
from typing import Dict, Any, Optional
|
||||
from loguru import logger
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
router = APIRouter(
|
||||
prefix="/api/frontend-env",
|
||||
tags=["Frontend Environment"],
|
||||
)
|
||||
|
||||
class FrontendEnvUpdateRequest(BaseModel):
|
||||
key: str
|
||||
value: str
|
||||
description: Optional[str] = None
|
||||
|
||||
@router.post("/update")
|
||||
async def update_frontend_env(request: FrontendEnvUpdateRequest):
|
||||
"""
|
||||
Update frontend environment variable (for development purposes).
|
||||
This writes to the frontend/.env file.
|
||||
"""
|
||||
try:
|
||||
# Get the frontend directory path
|
||||
backend_dir = Path(__file__).parent.parent
|
||||
frontend_dir = backend_dir.parent / "frontend"
|
||||
env_path = frontend_dir / ".env"
|
||||
|
||||
# Ensure the frontend directory exists
|
||||
if not frontend_dir.exists():
|
||||
raise HTTPException(status_code=404, detail="Frontend directory not found")
|
||||
|
||||
# Read existing .env file
|
||||
env_lines = []
|
||||
if env_path.exists():
|
||||
with open(env_path, 'r') as f:
|
||||
env_lines = f.readlines()
|
||||
|
||||
# Update or add the environment variable
|
||||
key_found = False
|
||||
updated_lines = []
|
||||
for line in env_lines:
|
||||
if line.startswith(f"{request.key}="):
|
||||
updated_lines.append(f"{request.key}={request.value}\n")
|
||||
key_found = True
|
||||
else:
|
||||
updated_lines.append(line)
|
||||
|
||||
if not key_found:
|
||||
# Add comment if description provided
|
||||
if request.description:
|
||||
updated_lines.append(f"# {request.description}\n")
|
||||
updated_lines.append(f"{request.key}={request.value}\n")
|
||||
|
||||
# Write back to .env file
|
||||
with open(env_path, 'w') as f:
|
||||
f.writelines(updated_lines)
|
||||
|
||||
logger.info(f"Updated frontend environment variable: {request.key}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Environment variable {request.key} updated successfully",
|
||||
"key": request.key,
|
||||
"value": request.value
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating frontend environment: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Failed to update environment variable: {str(e)}")
|
||||
|
||||
@router.get("/status")
|
||||
async def get_frontend_env_status():
|
||||
"""
|
||||
Get status of frontend environment file.
|
||||
"""
|
||||
try:
|
||||
# Get the frontend directory path
|
||||
backend_dir = Path(__file__).parent.parent
|
||||
frontend_dir = backend_dir.parent / "frontend"
|
||||
env_path = frontend_dir / ".env"
|
||||
|
||||
if not env_path.exists():
|
||||
return {
|
||||
"exists": False,
|
||||
"path": str(env_path),
|
||||
"message": "Frontend .env file does not exist"
|
||||
}
|
||||
|
||||
# Read and return basic info about the .env file
|
||||
with open(env_path, 'r') as f:
|
||||
content = f.read()
|
||||
|
||||
return {
|
||||
"exists": True,
|
||||
"path": str(env_path),
|
||||
"size": len(content),
|
||||
"lines": len(content.splitlines()),
|
||||
"message": "Frontend .env file exists"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking frontend environment status: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Failed to check environment status: {str(e)}")
|
||||
318
backend/routers/gsc_auth.py
Normal file
318
backend/routers/gsc_auth.py
Normal file
@@ -0,0 +1,318 @@
|
||||
"""Google Search Console Authentication Router for ALwrity."""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Depends, Query
|
||||
from fastapi.responses import HTMLResponse, JSONResponse
|
||||
from typing import Dict, List, Any, Optional
|
||||
from pydantic import BaseModel
|
||||
from loguru import logger
|
||||
import os
|
||||
|
||||
from services.gsc_service import GSCService
|
||||
from middleware.auth_middleware import get_current_user
|
||||
|
||||
# Initialize router
|
||||
router = APIRouter(prefix="/gsc", tags=["Google Search Console"])
|
||||
|
||||
# Initialize GSC service
|
||||
gsc_service = GSCService()
|
||||
|
||||
# Pydantic models
|
||||
class GSCAnalyticsRequest(BaseModel):
|
||||
site_url: str
|
||||
start_date: Optional[str] = None
|
||||
end_date: Optional[str] = None
|
||||
|
||||
class GSCStatusResponse(BaseModel):
|
||||
connected: bool
|
||||
sites: Optional[List[Dict[str, Any]]] = None
|
||||
last_sync: Optional[str] = None
|
||||
|
||||
@router.get("/auth/url")
|
||||
async def get_gsc_auth_url(user: dict = Depends(get_current_user)):
|
||||
"""Get Google Search Console OAuth authorization URL."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Generating GSC OAuth URL for user: {user_id}")
|
||||
|
||||
auth_url = gsc_service.get_oauth_url(user_id)
|
||||
|
||||
logger.info(f"GSC OAuth URL generated successfully for user: {user_id}")
|
||||
logger.info(f"OAuth URL: {auth_url[:100]}...")
|
||||
return {"auth_url": auth_url}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating GSC OAuth URL: {e}")
|
||||
logger.error(f"Error details: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=f"Error generating OAuth URL: {str(e)}")
|
||||
|
||||
@router.get("/callback")
|
||||
async def handle_gsc_callback(
|
||||
code: str = Query(..., description="Authorization code from Google"),
|
||||
state: str = Query(..., description="State parameter for security")
|
||||
):
|
||||
"""Handle Google Search Console OAuth callback.
|
||||
|
||||
For a smoother UX when opened in a popup, this endpoint returns a tiny HTML
|
||||
page that posts a completion message back to the opener window and closes
|
||||
itself. The JSON payload is still included in the page for debugging.
|
||||
"""
|
||||
try:
|
||||
logger.info(f"Handling GSC OAuth callback with code: {code[:10]}...")
|
||||
|
||||
success = gsc_service.handle_oauth_callback(code, state)
|
||||
|
||||
if success:
|
||||
logger.info("GSC OAuth callback handled successfully")
|
||||
|
||||
# Create GSC insights task immediately after successful connection
|
||||
try:
|
||||
from services.database import SessionLocal
|
||||
from services.platform_insights_monitoring_service import create_platform_insights_task
|
||||
|
||||
# Get user_id from state (stored during OAuth flow)
|
||||
# Note: handle_oauth_callback already deleted state, so we need to get user_id from recent credentials
|
||||
db = SessionLocal()
|
||||
try:
|
||||
# Get user_id from most recent GSC credentials (since state was deleted)
|
||||
import sqlite3
|
||||
with sqlite3.connect(gsc_service.db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute('SELECT user_id FROM gsc_credentials ORDER BY updated_at DESC LIMIT 1')
|
||||
result = cursor.fetchone()
|
||||
if result:
|
||||
user_id = result[0]
|
||||
|
||||
# Don't fetch site_url here - it requires API calls
|
||||
# The executor will fetch it when the task runs (weekly)
|
||||
# Create insights task without site_url to avoid API calls
|
||||
task_result = create_platform_insights_task(
|
||||
user_id=user_id,
|
||||
platform='gsc',
|
||||
site_url=None, # Will be fetched by executor when task runs
|
||||
db=db
|
||||
)
|
||||
|
||||
if task_result.get('success'):
|
||||
logger.info(f"Created GSC insights task for user {user_id}")
|
||||
else:
|
||||
logger.warning(f"Failed to create GSC insights task: {task_result.get('error')}")
|
||||
finally:
|
||||
db.close()
|
||||
except Exception as e:
|
||||
# Non-critical: log but don't fail OAuth callback
|
||||
logger.warning(f"Failed to create GSC insights task after OAuth: {e}", exc_info=True)
|
||||
|
||||
html = """
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head><meta charset=\"utf-8\"><title>GSC Connected</title></head>
|
||||
<body style=\"font-family: sans-serif; padding: 24px;\">
|
||||
<p>Connection Successful. You can close this window.</p>
|
||||
<script>
|
||||
try {{ window.opener && window.opener.postMessage({{ type: 'GSC_AUTH_SUCCESS' }}, '*'); }} catch (e) {{}}
|
||||
try {{ window.close(); }} catch (e) {{}}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(content=html)
|
||||
else:
|
||||
logger.error("Failed to handle GSC OAuth callback")
|
||||
html = """
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head><meta charset=\"utf-8\"><title>GSC Connection Failed</title></head>
|
||||
<body style=\"font-family: sans-serif; padding: 24px;\">
|
||||
<p>Connection Failed. Please close this window and try again.</p>
|
||||
<script>
|
||||
try {{ window.opener && window.opener.postMessage({{ type: 'GSC_AUTH_ERROR' }}, '*'); }} catch (e) {{}}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(status_code=400, content=html)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling GSC OAuth callback: {e}")
|
||||
html = f"""
|
||||
<!doctype html>
|
||||
<html>
|
||||
<head><meta charset=\"utf-8\"><title>GSC Connection Error</title></head>
|
||||
<body style=\"font-family: sans-serif; padding: 24px;\">
|
||||
<p>Connection Error. Please close this window and try again.</p>
|
||||
<pre style=\"white-space: pre-wrap;\">{str(e)}</pre>
|
||||
<script>
|
||||
try {{ window.opener && window.opener.postMessage({{ type: 'GSC_AUTH_ERROR' }}, '*'); }} catch (e) {{}}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(status_code=500, content=html)
|
||||
|
||||
@router.get("/sites")
|
||||
async def get_gsc_sites(user: dict = Depends(get_current_user)):
|
||||
"""Get user's Google Search Console sites."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Getting GSC sites for user: {user_id}")
|
||||
|
||||
sites = gsc_service.get_site_list(user_id)
|
||||
|
||||
logger.info(f"Retrieved {len(sites)} GSC sites for user: {user_id}")
|
||||
return {"sites": sites}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting GSC sites: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error getting sites: {str(e)}")
|
||||
|
||||
@router.post("/analytics")
|
||||
async def get_gsc_analytics(
|
||||
request: GSCAnalyticsRequest,
|
||||
user: dict = Depends(get_current_user)
|
||||
):
|
||||
"""Get Google Search Console analytics data."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Getting GSC analytics for user: {user_id}, site: {request.site_url}")
|
||||
|
||||
analytics = gsc_service.get_search_analytics(
|
||||
user_id=user_id,
|
||||
site_url=request.site_url,
|
||||
start_date=request.start_date,
|
||||
end_date=request.end_date
|
||||
)
|
||||
|
||||
logger.info(f"Retrieved GSC analytics for user: {user_id}")
|
||||
return analytics
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting GSC analytics: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error getting analytics: {str(e)}")
|
||||
|
||||
@router.get("/sitemaps/{site_url:path}")
|
||||
async def get_gsc_sitemaps(
|
||||
site_url: str,
|
||||
user: dict = Depends(get_current_user)
|
||||
):
|
||||
"""Get sitemaps for a specific site."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Getting GSC sitemaps for user: {user_id}, site: {site_url}")
|
||||
|
||||
sitemaps = gsc_service.get_sitemaps(user_id, site_url)
|
||||
|
||||
logger.info(f"Retrieved {len(sitemaps)} sitemaps for user: {user_id}")
|
||||
return {"sitemaps": sitemaps}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting GSC sitemaps: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error getting sitemaps: {str(e)}")
|
||||
|
||||
@router.get("/status")
|
||||
async def get_gsc_status(user: dict = Depends(get_current_user)):
|
||||
"""Get GSC connection status for user."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Checking GSC status for user: {user_id}")
|
||||
|
||||
# Check if user has credentials
|
||||
credentials = gsc_service.load_user_credentials(user_id)
|
||||
connected = credentials is not None
|
||||
|
||||
sites = []
|
||||
if connected:
|
||||
try:
|
||||
sites = gsc_service.get_site_list(user_id)
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not get sites for user {user_id}: {e}")
|
||||
# Clear incomplete credentials and mark as disconnected
|
||||
gsc_service.clear_incomplete_credentials(user_id)
|
||||
connected = False
|
||||
|
||||
status_response = GSCStatusResponse(
|
||||
connected=connected,
|
||||
sites=sites if connected else None,
|
||||
last_sync=None # Could be enhanced to track last sync time
|
||||
)
|
||||
|
||||
logger.info(f"GSC status checked for user: {user_id}, connected: {connected}")
|
||||
return status_response
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking GSC status: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error checking status: {str(e)}")
|
||||
|
||||
@router.delete("/disconnect")
|
||||
async def disconnect_gsc(user: dict = Depends(get_current_user)):
|
||||
"""Disconnect user's Google Search Console account."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Disconnecting GSC for user: {user_id}")
|
||||
|
||||
success = gsc_service.revoke_user_access(user_id)
|
||||
|
||||
if success:
|
||||
logger.info(f"GSC disconnected successfully for user: {user_id}")
|
||||
return {"success": True, "message": "GSC disconnected successfully"}
|
||||
else:
|
||||
logger.error(f"Failed to disconnect GSC for user: {user_id}")
|
||||
raise HTTPException(status_code=500, detail="Failed to disconnect GSC")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error disconnecting GSC: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error disconnecting GSC: {str(e)}")
|
||||
|
||||
@router.post("/clear-incomplete")
|
||||
async def clear_incomplete_credentials(user: dict = Depends(get_current_user)):
|
||||
"""Clear incomplete GSC credentials that are missing required fields."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Clearing incomplete GSC credentials for user: {user_id}")
|
||||
|
||||
success = gsc_service.clear_incomplete_credentials(user_id)
|
||||
|
||||
if success:
|
||||
logger.info(f"Incomplete GSC credentials cleared for user: {user_id}")
|
||||
return {"success": True, "message": "Incomplete credentials cleared"}
|
||||
else:
|
||||
logger.error(f"Failed to clear incomplete credentials for user: {user_id}")
|
||||
raise HTTPException(status_code=500, detail="Failed to clear incomplete credentials")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing incomplete credentials: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error clearing incomplete credentials: {str(e)}")
|
||||
|
||||
@router.get("/health")
|
||||
async def gsc_health_check():
|
||||
"""Health check for GSC service."""
|
||||
try:
|
||||
logger.info("GSC health check requested")
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "Google Search Console API",
|
||||
"timestamp": "2024-01-15T10:30:00Z"
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"GSC health check failed: {e}")
|
||||
raise HTTPException(status_code=500, detail="GSC service unhealthy")
|
||||
1033
backend/routers/image_studio.py
Normal file
1033
backend/routers/image_studio.py
Normal file
File diff suppressed because it is too large
Load Diff
728
backend/routers/linkedin.py
Normal file
728
backend/routers/linkedin.py
Normal file
@@ -0,0 +1,728 @@
|
||||
"""
|
||||
LinkedIn Content Generation Router
|
||||
|
||||
FastAPI router for LinkedIn content generation endpoints.
|
||||
Provides comprehensive LinkedIn content creation functionality with
|
||||
proper error handling, monitoring, and documentation.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from typing import Dict, Any, Optional
|
||||
import time
|
||||
from loguru import logger
|
||||
from pathlib import Path
|
||||
|
||||
from models.linkedin_models import (
|
||||
LinkedInPostRequest, LinkedInArticleRequest, LinkedInCarouselRequest,
|
||||
LinkedInVideoScriptRequest, LinkedInCommentResponseRequest,
|
||||
LinkedInPostResponse, LinkedInArticleResponse, LinkedInCarouselResponse,
|
||||
LinkedInVideoScriptResponse, LinkedInCommentResponseResult
|
||||
)
|
||||
from services.linkedin_service import LinkedInService
|
||||
from middleware.auth_middleware import get_current_user
|
||||
from utils.text_asset_tracker import save_and_track_text_content
|
||||
|
||||
# Initialize the LinkedIn service instance
|
||||
linkedin_service = LinkedInService()
|
||||
from services.subscription.monitoring_middleware import DatabaseAPIMonitor
|
||||
from services.database import get_db as get_db_dependency
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
# Initialize router
|
||||
router = APIRouter(
|
||||
prefix="/api/linkedin",
|
||||
tags=["LinkedIn Content Generation"],
|
||||
responses={
|
||||
404: {"description": "Not found"},
|
||||
422: {"description": "Validation error"},
|
||||
500: {"description": "Internal server error"}
|
||||
}
|
||||
)
|
||||
|
||||
# Initialize monitoring
|
||||
monitor = DatabaseAPIMonitor()
|
||||
|
||||
|
||||
# Use the proper database dependency from services.database
|
||||
get_db = get_db_dependency
|
||||
|
||||
|
||||
async def log_api_request(request: Request, db: Session, duration: float, status_code: int):
|
||||
"""Log API request to database for monitoring."""
|
||||
try:
|
||||
await monitor.add_request(
|
||||
db=db,
|
||||
path=str(request.url.path),
|
||||
method=request.method,
|
||||
status_code=status_code,
|
||||
duration=duration,
|
||||
user_id=request.headers.get("X-User-ID"),
|
||||
request_size=len(await request.body()) if request.method == "POST" else 0,
|
||||
user_agent=request.headers.get("User-Agent"),
|
||||
ip_address=request.client.host if request.client else None
|
||||
)
|
||||
db.commit()
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to log API request: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/health", summary="Health Check", description="Check LinkedIn service health")
|
||||
async def health_check():
|
||||
"""Health check endpoint for LinkedIn service."""
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "linkedin_content_generation",
|
||||
"version": "1.0.0",
|
||||
"timestamp": time.time()
|
||||
}
|
||||
|
||||
|
||||
@router.post(
|
||||
"/generate-post",
|
||||
response_model=LinkedInPostResponse,
|
||||
summary="Generate LinkedIn Post",
|
||||
description="""
|
||||
Generate a professional LinkedIn post with AI-powered content creation.
|
||||
|
||||
Features:
|
||||
- Research-backed content using multiple search engines
|
||||
- Industry-specific optimization
|
||||
- Hashtag generation and optimization
|
||||
- Call-to-action suggestions
|
||||
- Engagement prediction
|
||||
- Multiple tone and style options
|
||||
|
||||
The service conducts research on the specified topic and industry,
|
||||
then generates engaging content optimized for LinkedIn's algorithm.
|
||||
"""
|
||||
)
|
||||
async def generate_post(
|
||||
request: LinkedInPostRequest,
|
||||
background_tasks: BackgroundTasks,
|
||||
http_request: Request,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: Optional[Dict[str, Any]] = Depends(get_current_user)
|
||||
):
|
||||
"""Generate a LinkedIn post based on the provided parameters."""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
logger.info(f"Received LinkedIn post generation request for topic: {request.topic}")
|
||||
|
||||
# Validate request
|
||||
if not request.topic.strip():
|
||||
raise HTTPException(status_code=422, detail="Topic cannot be empty")
|
||||
|
||||
if not request.industry.strip():
|
||||
raise HTTPException(status_code=422, detail="Industry cannot be empty")
|
||||
|
||||
# Extract user_id
|
||||
user_id = None
|
||||
if current_user:
|
||||
user_id = str(current_user.get('id', '') or current_user.get('sub', ''))
|
||||
if not user_id:
|
||||
user_id = http_request.headers.get("X-User-ID") or http_request.headers.get("Authorization")
|
||||
|
||||
# Generate post content
|
||||
response = await linkedin_service.generate_linkedin_post(request)
|
||||
|
||||
# Log successful request
|
||||
duration = time.time() - start_time
|
||||
background_tasks.add_task(
|
||||
log_api_request, http_request, db, duration, 200
|
||||
)
|
||||
|
||||
if not response.success:
|
||||
raise HTTPException(status_code=500, detail=response.error)
|
||||
|
||||
# Save and track text content (non-blocking)
|
||||
if user_id and response.data and response.data.content:
|
||||
try:
|
||||
# Combine all text content
|
||||
text_content = response.data.content
|
||||
if response.data.call_to_action:
|
||||
text_content += f"\n\nCall to Action: {response.data.call_to_action}"
|
||||
if response.data.hashtags:
|
||||
hashtag_text = " ".join([f"#{h.hashtag}" if isinstance(h, dict) else f"#{h.get('hashtag', '')}" for h in response.data.hashtags])
|
||||
text_content += f"\n\nHashtags: {hashtag_text}"
|
||||
|
||||
save_and_track_text_content(
|
||||
db=db,
|
||||
user_id=user_id,
|
||||
content=text_content,
|
||||
source_module="linkedin_writer",
|
||||
title=f"LinkedIn Post: {request.topic[:80]}",
|
||||
description=f"LinkedIn post for {request.industry} industry",
|
||||
prompt=f"Topic: {request.topic}\nIndustry: {request.industry}\nTone: {request.tone}",
|
||||
tags=["linkedin", "post", request.industry.lower().replace(' ', '_')],
|
||||
asset_metadata={
|
||||
"post_type": request.post_type.value if hasattr(request.post_type, 'value') else str(request.post_type),
|
||||
"tone": request.tone.value if hasattr(request.tone, 'value') else str(request.tone),
|
||||
"character_count": response.data.character_count,
|
||||
"hashtag_count": len(response.data.hashtags),
|
||||
"grounding_enabled": response.data.grounding_enabled if hasattr(response.data, 'grounding_enabled') else False
|
||||
},
|
||||
subdirectory="posts"
|
||||
)
|
||||
except Exception as track_error:
|
||||
logger.warning(f"Failed to track LinkedIn post asset: {track_error}")
|
||||
|
||||
logger.info(f"Successfully generated LinkedIn post in {duration:.2f} seconds")
|
||||
return response
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
duration = time.time() - start_time
|
||||
logger.error(f"Error generating LinkedIn post: {str(e)}")
|
||||
|
||||
# Log failed request
|
||||
background_tasks.add_task(
|
||||
log_api_request, http_request, db, duration, 500
|
||||
)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to generate LinkedIn post: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/generate-article",
|
||||
response_model=LinkedInArticleResponse,
|
||||
summary="Generate LinkedIn Article",
|
||||
description="""
|
||||
Generate a comprehensive LinkedIn article with AI-powered content creation.
|
||||
|
||||
Features:
|
||||
- Long-form content generation
|
||||
- Research-backed insights and data
|
||||
- SEO optimization for LinkedIn
|
||||
- Section structuring and organization
|
||||
- Image placement suggestions
|
||||
- Reading time estimation
|
||||
- Multiple research sources integration
|
||||
|
||||
Perfect for thought leadership and in-depth industry analysis.
|
||||
"""
|
||||
)
|
||||
async def generate_article(
|
||||
request: LinkedInArticleRequest,
|
||||
background_tasks: BackgroundTasks,
|
||||
http_request: Request,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: Optional[Dict[str, Any]] = Depends(get_current_user)
|
||||
):
|
||||
"""Generate a LinkedIn article based on the provided parameters."""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
logger.info(f"Received LinkedIn article generation request for topic: {request.topic}")
|
||||
|
||||
# Validate request
|
||||
if not request.topic.strip():
|
||||
raise HTTPException(status_code=422, detail="Topic cannot be empty")
|
||||
|
||||
if not request.industry.strip():
|
||||
raise HTTPException(status_code=422, detail="Industry cannot be empty")
|
||||
|
||||
# Extract user_id
|
||||
user_id = None
|
||||
if current_user:
|
||||
user_id = str(current_user.get('id', '') or current_user.get('sub', ''))
|
||||
if not user_id:
|
||||
user_id = http_request.headers.get("X-User-ID") or http_request.headers.get("Authorization")
|
||||
|
||||
# Generate article content
|
||||
response = await linkedin_service.generate_linkedin_article(request)
|
||||
|
||||
# Log successful request
|
||||
duration = time.time() - start_time
|
||||
background_tasks.add_task(
|
||||
log_api_request, http_request, db, duration, 200
|
||||
)
|
||||
|
||||
if not response.success:
|
||||
raise HTTPException(status_code=500, detail=response.error)
|
||||
|
||||
# Save and track text content (non-blocking)
|
||||
if user_id and response.data:
|
||||
try:
|
||||
# Combine article content
|
||||
text_content = f"# {response.data.title}\n\n"
|
||||
text_content += response.data.content
|
||||
|
||||
if response.data.sections:
|
||||
text_content += "\n\n## Sections:\n"
|
||||
for section in response.data.sections:
|
||||
if isinstance(section, dict):
|
||||
text_content += f"\n### {section.get('heading', 'Section')}\n{section.get('content', '')}\n"
|
||||
|
||||
if response.data.seo_metadata:
|
||||
text_content += f"\n\n## SEO Metadata\n{response.data.seo_metadata}\n"
|
||||
|
||||
save_and_track_text_content(
|
||||
db=db,
|
||||
user_id=user_id,
|
||||
content=text_content,
|
||||
source_module="linkedin_writer",
|
||||
title=f"LinkedIn Article: {response.data.title[:80] if response.data.title else request.topic[:80]}",
|
||||
description=f"LinkedIn article for {request.industry} industry",
|
||||
prompt=f"Topic: {request.topic}\nIndustry: {request.industry}\nTone: {request.tone}\nWord Count: {request.word_count}",
|
||||
tags=["linkedin", "article", request.industry.lower().replace(' ', '_')],
|
||||
asset_metadata={
|
||||
"tone": request.tone.value if hasattr(request.tone, 'value') else str(request.tone),
|
||||
"word_count": response.data.word_count,
|
||||
"reading_time": response.data.reading_time,
|
||||
"section_count": len(response.data.sections) if response.data.sections else 0,
|
||||
"grounding_enabled": response.data.grounding_enabled if hasattr(response.data, 'grounding_enabled') else False
|
||||
},
|
||||
subdirectory="articles",
|
||||
file_extension=".md"
|
||||
)
|
||||
except Exception as track_error:
|
||||
logger.warning(f"Failed to track LinkedIn article asset: {track_error}")
|
||||
|
||||
logger.info(f"Successfully generated LinkedIn article in {duration:.2f} seconds")
|
||||
return response
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
duration = time.time() - start_time
|
||||
logger.error(f"Error generating LinkedIn article: {str(e)}")
|
||||
|
||||
# Log failed request
|
||||
background_tasks.add_task(
|
||||
log_api_request, http_request, db, duration, 500
|
||||
)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to generate LinkedIn article: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/generate-carousel",
|
||||
response_model=LinkedInCarouselResponse,
|
||||
summary="Generate LinkedIn Carousel",
|
||||
description="""
|
||||
Generate a LinkedIn carousel post with multiple slides.
|
||||
|
||||
Features:
|
||||
- Multi-slide content generation
|
||||
- Visual hierarchy optimization
|
||||
- Story arc development
|
||||
- Design guidelines and suggestions
|
||||
- Cover and CTA slide options
|
||||
- Professional slide structuring
|
||||
|
||||
Ideal for step-by-step guides, tips, and visual storytelling.
|
||||
"""
|
||||
)
|
||||
async def generate_carousel(
|
||||
request: LinkedInCarouselRequest,
|
||||
background_tasks: BackgroundTasks,
|
||||
http_request: Request,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: Optional[Dict[str, Any]] = Depends(get_current_user)
|
||||
):
|
||||
"""Generate a LinkedIn carousel based on the provided parameters."""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
logger.info(f"Received LinkedIn carousel generation request for topic: {request.topic}")
|
||||
|
||||
# Validate request
|
||||
if not request.topic.strip():
|
||||
raise HTTPException(status_code=422, detail="Topic cannot be empty")
|
||||
|
||||
if not request.industry.strip():
|
||||
raise HTTPException(status_code=422, detail="Industry cannot be empty")
|
||||
|
||||
if request.slide_count < 3 or request.slide_count > 15:
|
||||
raise HTTPException(status_code=422, detail="Slide count must be between 3 and 15")
|
||||
|
||||
# Extract user_id
|
||||
user_id = None
|
||||
if current_user:
|
||||
user_id = str(current_user.get('id', '') or current_user.get('sub', ''))
|
||||
if not user_id:
|
||||
user_id = http_request.headers.get("X-User-ID") or http_request.headers.get("Authorization")
|
||||
|
||||
# Generate carousel content
|
||||
response = await linkedin_service.generate_linkedin_carousel(request)
|
||||
|
||||
# Log successful request
|
||||
duration = time.time() - start_time
|
||||
background_tasks.add_task(
|
||||
log_api_request, http_request, db, duration, 200
|
||||
)
|
||||
|
||||
if not response.success:
|
||||
raise HTTPException(status_code=500, detail=response.error)
|
||||
|
||||
# Save and track text content (non-blocking)
|
||||
if user_id and response.data:
|
||||
try:
|
||||
# Combine carousel content
|
||||
text_content = f"# {response.data.title}\n\n"
|
||||
for slide in response.data.slides:
|
||||
text_content += f"\n## Slide {slide.slide_number}: {slide.title}\n{slide.content}\n"
|
||||
if slide.visual_elements:
|
||||
text_content += f"\nVisual Elements: {', '.join(slide.visual_elements)}\n"
|
||||
|
||||
save_and_track_text_content(
|
||||
db=db,
|
||||
user_id=user_id,
|
||||
content=text_content,
|
||||
source_module="linkedin_writer",
|
||||
title=f"LinkedIn Carousel: {response.data.title[:80] if response.data.title else request.topic[:80]}",
|
||||
description=f"LinkedIn carousel for {request.industry} industry",
|
||||
prompt=f"Topic: {request.topic}\nIndustry: {request.industry}\nSlides: {getattr(request, 'number_of_slides', request.slide_count if hasattr(request, 'slide_count') else 5)}",
|
||||
tags=["linkedin", "carousel", request.industry.lower().replace(' ', '_')],
|
||||
asset_metadata={
|
||||
"slide_count": len(response.data.slides),
|
||||
"has_cover": response.data.cover_slide is not None,
|
||||
"has_cta": response.data.cta_slide is not None
|
||||
},
|
||||
subdirectory="carousels",
|
||||
file_extension=".md"
|
||||
)
|
||||
except Exception as track_error:
|
||||
logger.warning(f"Failed to track LinkedIn carousel asset: {track_error}")
|
||||
|
||||
logger.info(f"Successfully generated LinkedIn carousel in {duration:.2f} seconds")
|
||||
return response
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
duration = time.time() - start_time
|
||||
logger.error(f"Error generating LinkedIn carousel: {str(e)}")
|
||||
|
||||
# Log failed request
|
||||
background_tasks.add_task(
|
||||
log_api_request, http_request, db, duration, 500
|
||||
)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to generate LinkedIn carousel: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/generate-video-script",
|
||||
response_model=LinkedInVideoScriptResponse,
|
||||
summary="Generate LinkedIn Video Script",
|
||||
description="""
|
||||
Generate a LinkedIn video script optimized for engagement.
|
||||
|
||||
Features:
|
||||
- Attention-grabbing hooks
|
||||
- Structured storytelling
|
||||
- Visual cue suggestions
|
||||
- Caption generation
|
||||
- Thumbnail text recommendations
|
||||
- Timing and pacing guidance
|
||||
|
||||
Perfect for creating professional video content for LinkedIn.
|
||||
"""
|
||||
)
|
||||
async def generate_video_script(
|
||||
request: LinkedInVideoScriptRequest,
|
||||
background_tasks: BackgroundTasks,
|
||||
http_request: Request,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: Optional[Dict[str, Any]] = Depends(get_current_user)
|
||||
):
|
||||
"""Generate a LinkedIn video script based on the provided parameters."""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
logger.info(f"Received LinkedIn video script generation request for topic: {request.topic}")
|
||||
|
||||
# Validate request
|
||||
if not request.topic.strip():
|
||||
raise HTTPException(status_code=422, detail="Topic cannot be empty")
|
||||
|
||||
if not request.industry.strip():
|
||||
raise HTTPException(status_code=422, detail="Industry cannot be empty")
|
||||
|
||||
video_duration = getattr(request, 'video_duration', getattr(request, 'video_length', 60))
|
||||
if video_duration < 15 or video_duration > 300:
|
||||
raise HTTPException(status_code=422, detail="Video length must be between 15 and 300 seconds")
|
||||
|
||||
# Extract user_id
|
||||
user_id = None
|
||||
if current_user:
|
||||
user_id = str(current_user.get('id', '') or current_user.get('sub', ''))
|
||||
if not user_id:
|
||||
user_id = http_request.headers.get("X-User-ID") or http_request.headers.get("Authorization")
|
||||
|
||||
# Generate video script content
|
||||
response = await linkedin_service.generate_linkedin_video_script(request)
|
||||
|
||||
# Log successful request
|
||||
duration = time.time() - start_time
|
||||
background_tasks.add_task(
|
||||
log_api_request, http_request, db, duration, 200
|
||||
)
|
||||
|
||||
if not response.success:
|
||||
raise HTTPException(status_code=500, detail=response.error)
|
||||
|
||||
# Save and track text content (non-blocking)
|
||||
if user_id and response.data:
|
||||
try:
|
||||
# Combine video script content
|
||||
text_content = f"# Video Script: {request.topic}\n\n"
|
||||
text_content += f"## Hook\n{response.data.hook}\n\n"
|
||||
text_content += "## Main Content\n"
|
||||
for scene in response.data.main_content:
|
||||
if isinstance(scene, dict):
|
||||
text_content += f"\n### Scene {scene.get('scene_number', '')}\n"
|
||||
text_content += f"{scene.get('content', '')}\n"
|
||||
if scene.get('duration'):
|
||||
text_content += f"Duration: {scene.get('duration')}s\n"
|
||||
if scene.get('visual_notes'):
|
||||
text_content += f"Visual Notes: {scene.get('visual_notes')}\n"
|
||||
text_content += f"\n## Conclusion\n{response.data.conclusion}\n"
|
||||
if response.data.captions:
|
||||
text_content += f"\n## Captions\n" + "\n".join(response.data.captions) + "\n"
|
||||
if response.data.thumbnail_suggestions:
|
||||
text_content += f"\n## Thumbnail Suggestions\n" + "\n".join(response.data.thumbnail_suggestions) + "\n"
|
||||
|
||||
save_and_track_text_content(
|
||||
db=db,
|
||||
user_id=user_id,
|
||||
content=text_content,
|
||||
source_module="linkedin_writer",
|
||||
title=f"LinkedIn Video Script: {request.topic[:80]}",
|
||||
description=f"LinkedIn video script for {request.industry} industry",
|
||||
prompt=f"Topic: {request.topic}\nIndustry: {request.industry}\nDuration: {video_duration}s",
|
||||
tags=["linkedin", "video_script", request.industry.lower().replace(' ', '_')],
|
||||
asset_metadata={
|
||||
"video_duration": video_duration,
|
||||
"scene_count": len(response.data.main_content),
|
||||
"has_captions": bool(response.data.captions)
|
||||
},
|
||||
subdirectory="video_scripts",
|
||||
file_extension=".md"
|
||||
)
|
||||
except Exception as track_error:
|
||||
logger.warning(f"Failed to track LinkedIn video script asset: {track_error}")
|
||||
|
||||
logger.info(f"Successfully generated LinkedIn video script in {duration:.2f} seconds")
|
||||
return response
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
duration = time.time() - start_time
|
||||
logger.error(f"Error generating LinkedIn video script: {str(e)}")
|
||||
|
||||
# Log failed request
|
||||
background_tasks.add_task(
|
||||
log_api_request, http_request, db, duration, 500
|
||||
)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to generate LinkedIn video script: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.post(
|
||||
"/generate-comment-response",
|
||||
response_model=LinkedInCommentResponseResult,
|
||||
summary="Generate LinkedIn Comment Response",
|
||||
description="""
|
||||
Generate professional responses to LinkedIn comments.
|
||||
|
||||
Features:
|
||||
- Context-aware responses
|
||||
- Multiple response type options
|
||||
- Tone optimization
|
||||
- Brand voice customization
|
||||
- Alternative response suggestions
|
||||
- Engagement goal targeting
|
||||
|
||||
Helps maintain professional engagement and build relationships.
|
||||
"""
|
||||
)
|
||||
async def generate_comment_response(
|
||||
request: LinkedInCommentResponseRequest,
|
||||
background_tasks: BackgroundTasks,
|
||||
http_request: Request,
|
||||
db: Session = Depends(get_db),
|
||||
current_user: Optional[Dict[str, Any]] = Depends(get_current_user)
|
||||
):
|
||||
"""Generate a LinkedIn comment response based on the provided parameters."""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
logger.info("Received LinkedIn comment response generation request")
|
||||
|
||||
# Validate request
|
||||
original_comment = getattr(request, 'original_comment', getattr(request, 'comment', ''))
|
||||
post_context = getattr(request, 'post_context', getattr(request, 'original_post', ''))
|
||||
|
||||
if not original_comment.strip():
|
||||
raise HTTPException(status_code=422, detail="Original comment cannot be empty")
|
||||
|
||||
if not post_context.strip():
|
||||
raise HTTPException(status_code=422, detail="Post context cannot be empty")
|
||||
|
||||
# Extract user_id
|
||||
user_id = None
|
||||
if current_user:
|
||||
user_id = str(current_user.get('id', '') or current_user.get('sub', ''))
|
||||
if not user_id:
|
||||
user_id = http_request.headers.get("X-User-ID") or http_request.headers.get("Authorization")
|
||||
|
||||
# Generate comment response
|
||||
response = await linkedin_service.generate_linkedin_comment_response(request)
|
||||
|
||||
# Log successful request
|
||||
duration = time.time() - start_time
|
||||
background_tasks.add_task(
|
||||
log_api_request, http_request, db, duration, 200
|
||||
)
|
||||
|
||||
if not response.success:
|
||||
raise HTTPException(status_code=500, detail=response.error)
|
||||
|
||||
# Save and track text content (non-blocking)
|
||||
if user_id and hasattr(response, 'response') and response.response:
|
||||
try:
|
||||
text_content = f"# Comment Response\n\n"
|
||||
text_content += f"## Original Comment\n{original_comment}\n\n"
|
||||
text_content += f"## Post Context\n{post_context}\n\n"
|
||||
text_content += f"## Generated Response\n{response.response}\n"
|
||||
if hasattr(response, 'alternatives') and response.alternatives:
|
||||
text_content += f"\n## Alternative Responses\n"
|
||||
for i, alt in enumerate(response.alternatives, 1):
|
||||
text_content += f"\n### Alternative {i}\n{alt}\n"
|
||||
|
||||
save_and_track_text_content(
|
||||
db=db,
|
||||
user_id=user_id,
|
||||
content=text_content,
|
||||
source_module="linkedin_writer",
|
||||
title=f"LinkedIn Comment Response: {original_comment[:60]}",
|
||||
description=f"LinkedIn comment response for {request.industry} industry",
|
||||
prompt=f"Original Comment: {original_comment}\nPost Context: {post_context}\nIndustry: {request.industry}",
|
||||
tags=["linkedin", "comment_response", request.industry.lower().replace(' ', '_')],
|
||||
asset_metadata={
|
||||
"response_length": getattr(request, 'response_length', 'medium'),
|
||||
"tone": request.tone.value if hasattr(request.tone, 'value') else str(request.tone),
|
||||
"has_alternatives": hasattr(response, 'alternatives') and bool(response.alternatives)
|
||||
},
|
||||
subdirectory="comment_responses",
|
||||
file_extension=".md"
|
||||
)
|
||||
except Exception as track_error:
|
||||
logger.warning(f"Failed to track LinkedIn comment response asset: {track_error}")
|
||||
|
||||
logger.info(f"Successfully generated LinkedIn comment response in {duration:.2f} seconds")
|
||||
return response
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
duration = time.time() - start_time
|
||||
logger.error(f"Error generating LinkedIn comment response: {str(e)}")
|
||||
|
||||
# Log failed request
|
||||
background_tasks.add_task(
|
||||
log_api_request, http_request, db, duration, 500
|
||||
)
|
||||
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Failed to generate LinkedIn comment response: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get(
|
||||
"/content-types",
|
||||
summary="Get Available Content Types",
|
||||
description="Get list of available LinkedIn content types and their descriptions"
|
||||
)
|
||||
async def get_content_types():
|
||||
"""Get available LinkedIn content types."""
|
||||
return {
|
||||
"content_types": {
|
||||
"post": {
|
||||
"name": "LinkedIn Post",
|
||||
"description": "Short-form content for regular LinkedIn posts",
|
||||
"max_length": 3000,
|
||||
"features": ["hashtags", "call_to_action", "engagement_prediction"]
|
||||
},
|
||||
"article": {
|
||||
"name": "LinkedIn Article",
|
||||
"description": "Long-form content for LinkedIn articles",
|
||||
"max_length": 125000,
|
||||
"features": ["seo_optimization", "image_suggestions", "reading_time"]
|
||||
},
|
||||
"carousel": {
|
||||
"name": "LinkedIn Carousel",
|
||||
"description": "Multi-slide visual content",
|
||||
"slide_range": "3-15 slides",
|
||||
"features": ["visual_guidelines", "slide_design", "story_flow"]
|
||||
},
|
||||
"video_script": {
|
||||
"name": "LinkedIn Video Script",
|
||||
"description": "Script for LinkedIn video content",
|
||||
"length_range": "15-300 seconds",
|
||||
"features": ["hooks", "visual_cues", "captions", "thumbnails"]
|
||||
},
|
||||
"comment_response": {
|
||||
"name": "Comment Response",
|
||||
"description": "Professional responses to LinkedIn comments",
|
||||
"response_types": ["professional", "appreciative", "clarifying", "disagreement", "value_add"],
|
||||
"features": ["tone_matching", "brand_voice", "alternatives"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@router.get(
|
||||
"/usage-stats",
|
||||
summary="Get Usage Statistics",
|
||||
description="Get LinkedIn content generation usage statistics"
|
||||
)
|
||||
async def get_usage_stats(db: Session = Depends(get_db)):
|
||||
"""Get usage statistics for LinkedIn content generation."""
|
||||
try:
|
||||
# This would query the database for actual usage stats
|
||||
# For now, returning mock data
|
||||
return {
|
||||
"total_requests": 1250,
|
||||
"content_types": {
|
||||
"posts": 650,
|
||||
"articles": 320,
|
||||
"carousels": 180,
|
||||
"video_scripts": 70,
|
||||
"comment_responses": 30
|
||||
},
|
||||
"success_rate": 0.96,
|
||||
"average_generation_time": 4.2,
|
||||
"top_industries": [
|
||||
"Technology",
|
||||
"Healthcare",
|
||||
"Finance",
|
||||
"Marketing",
|
||||
"Education"
|
||||
]
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrieving usage stats: {str(e)}")
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail="Failed to retrieve usage statistics"
|
||||
)
|
||||
318
backend/routers/platform_analytics.py
Normal file
318
backend/routers/platform_analytics.py
Normal file
@@ -0,0 +1,318 @@
|
||||
"""
|
||||
Platform Analytics API Routes
|
||||
|
||||
Provides endpoints for retrieving analytics data from connected platforms.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Depends, Query
|
||||
from typing import Dict, Any, List, Optional
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel
|
||||
|
||||
from services.analytics import PlatformAnalyticsService
|
||||
from middleware.auth_middleware import get_current_user
|
||||
|
||||
router = APIRouter(prefix="/api/analytics", tags=["Platform Analytics"])
|
||||
|
||||
# Initialize analytics service
|
||||
analytics_service = PlatformAnalyticsService()
|
||||
|
||||
|
||||
class AnalyticsRequest(BaseModel):
|
||||
"""Request model for analytics data"""
|
||||
platforms: Optional[List[str]] = None
|
||||
date_range: Optional[Dict[str, str]] = None
|
||||
|
||||
|
||||
class AnalyticsResponse(BaseModel):
|
||||
"""Response model for analytics data"""
|
||||
success: bool
|
||||
data: Dict[str, Any]
|
||||
summary: Dict[str, Any]
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
@router.get("/platforms")
|
||||
async def get_platform_connection_status(current_user: dict = Depends(get_current_user)) -> Dict[str, Any]:
|
||||
"""
|
||||
Get connection status for all platforms
|
||||
|
||||
Args:
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Connection status for each platform
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Getting platform connection status for user: {user_id}")
|
||||
|
||||
status = await analytics_service.get_platform_connection_status(user_id)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"platforms": status,
|
||||
"total_connected": sum(1 for p in status.values() if p.get('connected', False))
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get platform connection status: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/data")
|
||||
async def get_analytics_data(
|
||||
platforms: Optional[str] = Query(None, description="Comma-separated list of platforms (gsc,wix,wordpress)"),
|
||||
current_user: dict = Depends(get_current_user)
|
||||
) -> AnalyticsResponse:
|
||||
"""
|
||||
Get analytics data from connected platforms
|
||||
|
||||
Args:
|
||||
platforms: Comma-separated list of platforms to get data from
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Analytics data from specified platforms
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
# Parse platforms parameter
|
||||
platform_list = None
|
||||
if platforms:
|
||||
platform_list = [p.strip() for p in platforms.split(',') if p.strip()]
|
||||
|
||||
logger.info(f"Getting analytics data for user: {user_id}, platforms: {platform_list}")
|
||||
|
||||
# Get analytics data
|
||||
analytics_data = await analytics_service.get_comprehensive_analytics(user_id, platform_list)
|
||||
|
||||
# Generate summary
|
||||
summary = analytics_service.get_analytics_summary(analytics_data)
|
||||
|
||||
# Convert AnalyticsData objects to dictionaries
|
||||
data_dict = {}
|
||||
for platform, data in analytics_data.items():
|
||||
data_dict[platform] = {
|
||||
'platform': data.platform,
|
||||
'metrics': data.metrics,
|
||||
'date_range': data.date_range,
|
||||
'last_updated': data.last_updated,
|
||||
'status': data.status,
|
||||
'error_message': data.error_message
|
||||
}
|
||||
|
||||
return AnalyticsResponse(
|
||||
success=True,
|
||||
data=data_dict,
|
||||
summary=summary,
|
||||
error=None
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get analytics data: {e}")
|
||||
return AnalyticsResponse(
|
||||
success=False,
|
||||
data={},
|
||||
summary={},
|
||||
error=str(e)
|
||||
)
|
||||
|
||||
|
||||
@router.post("/data")
|
||||
async def get_analytics_data_post(
|
||||
request: AnalyticsRequest,
|
||||
current_user: dict = Depends(get_current_user)
|
||||
) -> AnalyticsResponse:
|
||||
"""
|
||||
Get analytics data from connected platforms (POST version)
|
||||
|
||||
Args:
|
||||
request: Analytics request with platforms and date range
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Analytics data from specified platforms
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Getting analytics data for user: {user_id}, platforms: {request.platforms}")
|
||||
|
||||
# Get analytics data
|
||||
analytics_data = await analytics_service.get_comprehensive_analytics(user_id, request.platforms)
|
||||
|
||||
# Generate summary
|
||||
summary = analytics_service.get_analytics_summary(analytics_data)
|
||||
|
||||
# Convert AnalyticsData objects to dictionaries
|
||||
data_dict = {}
|
||||
for platform, data in analytics_data.items():
|
||||
data_dict[platform] = {
|
||||
'platform': data.platform,
|
||||
'metrics': data.metrics,
|
||||
'date_range': data.date_range,
|
||||
'last_updated': data.last_updated,
|
||||
'status': data.status,
|
||||
'error_message': data.error_message
|
||||
}
|
||||
|
||||
return AnalyticsResponse(
|
||||
success=True,
|
||||
data=data_dict,
|
||||
summary=summary,
|
||||
error=None
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get analytics data: {e}")
|
||||
return AnalyticsResponse(
|
||||
success=False,
|
||||
data={},
|
||||
summary={},
|
||||
error=str(e)
|
||||
)
|
||||
|
||||
|
||||
@router.get("/gsc")
|
||||
async def get_gsc_analytics(
|
||||
current_user: dict = Depends(get_current_user)
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get Google Search Console analytics data specifically
|
||||
|
||||
Args:
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
GSC analytics data
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Getting GSC analytics for user: {user_id}")
|
||||
|
||||
# Get GSC analytics
|
||||
gsc_data = await analytics_service._get_gsc_analytics(user_id)
|
||||
|
||||
return {
|
||||
"success": gsc_data.status == 'success',
|
||||
"platform": gsc_data.platform,
|
||||
"metrics": gsc_data.metrics,
|
||||
"date_range": gsc_data.date_range,
|
||||
"last_updated": gsc_data.last_updated,
|
||||
"status": gsc_data.status,
|
||||
"error": gsc_data.error_message
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get GSC analytics: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/summary")
|
||||
async def get_analytics_summary(current_user: dict = Depends(get_current_user)) -> Dict[str, Any]:
|
||||
"""
|
||||
Get a summary of analytics data across all connected platforms
|
||||
|
||||
Args:
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Analytics summary
|
||||
"""
|
||||
try:
|
||||
user_id = current_user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Getting analytics summary for user: {user_id}")
|
||||
|
||||
# Get analytics data from all platforms
|
||||
analytics_data = await analytics_service.get_comprehensive_analytics(user_id)
|
||||
|
||||
# Generate summary
|
||||
summary = analytics_service.get_analytics_summary(analytics_data)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"summary": summary,
|
||||
"platforms_connected": summary['connected_platforms'],
|
||||
"platforms_total": summary['total_platforms']
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to get analytics summary: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/cache/test")
|
||||
async def test_cache_endpoint(current_user: dict = Depends(get_current_user)) -> Dict[str, Any]:
|
||||
"""
|
||||
Test endpoint to verify cache routes are working
|
||||
"""
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Cache endpoint is working",
|
||||
"user_id": current_user.get('id'),
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
|
||||
@router.post("/cache/clear")
|
||||
async def clear_analytics_cache(
|
||||
platform: Optional[str] = Query(None, description="Specific platform to clear cache for (optional)"),
|
||||
current_user: dict = Depends(get_current_user)
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Clear analytics cache for a user
|
||||
|
||||
Args:
|
||||
platform: Specific platform to clear cache for (optional, clears all if None)
|
||||
current_user: Current authenticated user
|
||||
|
||||
Returns:
|
||||
Cache clearing result
|
||||
"""
|
||||
try:
|
||||
from datetime import datetime
|
||||
user_id = current_user.get('id')
|
||||
logger.info(f"Cache clear request received for user {user_id}, platform: {platform}")
|
||||
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
if platform:
|
||||
# Clear cache for specific platform
|
||||
analytics_service.invalidate_platform_cache(user_id, platform)
|
||||
message = f"Cleared cache for {platform}"
|
||||
else:
|
||||
# Clear all cache for user
|
||||
analytics_service.invalidate_user_cache(user_id)
|
||||
message = "Cleared all analytics cache"
|
||||
|
||||
logger.info(f"Cache cleared for user {user_id}: {message}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"user_id": user_id,
|
||||
"platform": platform,
|
||||
"message": message,
|
||||
"timestamp": datetime.now().isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error clearing cache: {e}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
640
backend/routers/product_marketing.py
Normal file
640
backend/routers/product_marketing.py
Normal file
@@ -0,0 +1,640 @@
|
||||
"""API endpoints for Product Marketing Suite."""
|
||||
|
||||
from typing import Optional, List, Dict, Any
|
||||
from fastapi import APIRouter, Depends, HTTPException, status
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from services.product_marketing import (
|
||||
ProductMarketingOrchestrator,
|
||||
BrandDNASyncService,
|
||||
AssetAuditService,
|
||||
ChannelPackService,
|
||||
)
|
||||
from services.product_marketing.campaign_storage import CampaignStorageService
|
||||
from services.product_marketing.product_image_service import ProductImageService, ProductImageRequest
|
||||
from middleware.auth_middleware import get_current_user
|
||||
from utils.logger_utils import get_service_logger
|
||||
from services.database import get_db
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
|
||||
logger = get_service_logger("api.product_marketing")
|
||||
router = APIRouter(prefix="/api/product-marketing", tags=["product-marketing"])
|
||||
|
||||
|
||||
# ====================
|
||||
# REQUEST MODELS
|
||||
# ====================
|
||||
|
||||
class CampaignCreateRequest(BaseModel):
|
||||
"""Request to create a new campaign blueprint."""
|
||||
campaign_name: str = Field(..., description="Campaign name")
|
||||
goal: str = Field(..., description="Campaign goal (product_launch, awareness, conversion, etc.)")
|
||||
kpi: Optional[str] = Field(None, description="Key performance indicator")
|
||||
channels: List[str] = Field(..., description="Target channels (instagram, linkedin, tiktok, etc.)")
|
||||
product_context: Optional[Dict[str, Any]] = Field(None, description="Product information")
|
||||
|
||||
|
||||
class AssetProposalRequest(BaseModel):
|
||||
"""Request to generate asset proposals."""
|
||||
campaign_id: str = Field(..., description="Campaign ID")
|
||||
product_context: Optional[Dict[str, Any]] = Field(None, description="Product information")
|
||||
|
||||
|
||||
class AssetGenerateRequest(BaseModel):
|
||||
"""Request to generate a specific asset."""
|
||||
asset_proposal: Dict[str, Any] = Field(..., description="Asset proposal from generate_proposals")
|
||||
product_context: Optional[Dict[str, Any]] = Field(None, description="Product information")
|
||||
|
||||
|
||||
class AssetAuditRequest(BaseModel):
|
||||
"""Request to audit uploaded assets."""
|
||||
image_base64: str = Field(..., description="Base64 encoded image")
|
||||
asset_metadata: Optional[Dict[str, Any]] = Field(None, description="Asset metadata")
|
||||
|
||||
|
||||
# ====================
|
||||
# DEPENDENCY
|
||||
# ====================
|
||||
|
||||
def get_orchestrator() -> ProductMarketingOrchestrator:
|
||||
"""Get Product Marketing Orchestrator instance."""
|
||||
return ProductMarketingOrchestrator()
|
||||
|
||||
|
||||
def get_campaign_storage() -> CampaignStorageService:
|
||||
"""Get Campaign Storage Service instance."""
|
||||
return CampaignStorageService()
|
||||
|
||||
|
||||
def _require_user_id(current_user: Dict[str, Any], operation: str) -> str:
|
||||
"""Ensure user_id is available for protected operations."""
|
||||
user_id = current_user.get("sub") or current_user.get("user_id") or current_user.get("id")
|
||||
if not user_id:
|
||||
logger.error(
|
||||
"[Product Marketing] ❌ Missing user_id for %s operation - blocking request",
|
||||
operation,
|
||||
)
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
detail="Authenticated user required for product marketing operations.",
|
||||
)
|
||||
return str(user_id)
|
||||
|
||||
|
||||
# ====================
|
||||
# CAMPAIGN ENDPOINTS
|
||||
# ====================
|
||||
|
||||
@router.post("/campaigns/validate-preflight", summary="Validate Campaign Pre-flight")
|
||||
async def validate_campaign_preflight(
|
||||
request: CampaignCreateRequest,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
orchestrator: ProductMarketingOrchestrator = Depends(get_orchestrator)
|
||||
):
|
||||
"""Validate campaign blueprint against subscription limits before creation.
|
||||
|
||||
This endpoint:
|
||||
- Creates a temporary blueprint to estimate costs
|
||||
- Validates subscription limits
|
||||
- Returns cost estimates and validation results
|
||||
- Does NOT save anything to database
|
||||
"""
|
||||
try:
|
||||
user_id = _require_user_id(current_user, "campaign pre-flight validation")
|
||||
logger.info(f"[Product Marketing] Pre-flight validation for user {user_id}")
|
||||
|
||||
# Create temporary blueprint for validation (not saved)
|
||||
campaign_data = {
|
||||
"campaign_name": request.campaign_name or "Temporary Campaign",
|
||||
"goal": request.goal,
|
||||
"kpi": request.kpi,
|
||||
"channels": request.channels,
|
||||
}
|
||||
|
||||
blueprint = orchestrator.create_campaign_blueprint(user_id, campaign_data)
|
||||
|
||||
# Run pre-flight validation
|
||||
validation_result = orchestrator.validate_campaign_preflight(user_id, blueprint)
|
||||
|
||||
logger.info(f"[Product Marketing] ✅ Pre-flight validation completed: can_proceed={validation_result.get('can_proceed')}")
|
||||
return validation_result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error in pre-flight validation: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Pre-flight validation failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/campaigns/create-blueprint", summary="Create Campaign Blueprint")
|
||||
async def create_campaign_blueprint(
|
||||
request: CampaignCreateRequest,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
orchestrator: ProductMarketingOrchestrator = Depends(get_orchestrator)
|
||||
):
|
||||
"""Create a campaign blueprint with personalized asset nodes.
|
||||
|
||||
This endpoint:
|
||||
- Uses onboarding data to personalize the blueprint
|
||||
- Generates campaign phases (teaser, launch, nurture)
|
||||
- Creates asset nodes for each phase and channel
|
||||
- Returns blueprint ready for AI proposal generation
|
||||
"""
|
||||
try:
|
||||
user_id = _require_user_id(current_user, "campaign blueprint creation")
|
||||
logger.info(f"[Product Marketing] Creating blueprint for user {user_id}: {request.campaign_name}")
|
||||
|
||||
campaign_data = {
|
||||
"campaign_name": request.campaign_name,
|
||||
"goal": request.goal,
|
||||
"kpi": request.kpi,
|
||||
"channels": request.channels,
|
||||
}
|
||||
|
||||
blueprint = orchestrator.create_campaign_blueprint(user_id, campaign_data)
|
||||
|
||||
# Convert blueprint to dict for JSON response
|
||||
blueprint_dict = {
|
||||
"campaign_id": blueprint.campaign_id,
|
||||
"campaign_name": blueprint.campaign_name,
|
||||
"goal": blueprint.goal,
|
||||
"kpi": blueprint.kpi,
|
||||
"phases": blueprint.phases,
|
||||
"asset_nodes": [
|
||||
{
|
||||
"asset_id": node.asset_id,
|
||||
"asset_type": node.asset_type,
|
||||
"channel": node.channel,
|
||||
"status": node.status,
|
||||
}
|
||||
for node in blueprint.asset_nodes
|
||||
],
|
||||
"channels": blueprint.channels,
|
||||
"status": blueprint.status,
|
||||
}
|
||||
|
||||
# Save to database
|
||||
campaign_storage = get_campaign_storage()
|
||||
campaign_storage.save_campaign(user_id, blueprint_dict)
|
||||
|
||||
logger.info(f"[Product Marketing] ✅ Blueprint created and saved: {blueprint.campaign_id}")
|
||||
return blueprint_dict
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error creating blueprint: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Campaign blueprint creation failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/campaigns/{campaign_id}/generate-proposals", summary="Generate Asset Proposals")
|
||||
async def generate_asset_proposals(
|
||||
campaign_id: str,
|
||||
request: AssetProposalRequest,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
orchestrator: ProductMarketingOrchestrator = Depends(get_orchestrator)
|
||||
):
|
||||
"""Generate AI proposals for all assets in a campaign blueprint.
|
||||
|
||||
This endpoint:
|
||||
- Uses specialized marketing prompts with brand DNA
|
||||
- Recommends templates, providers, and settings
|
||||
- Provides cost estimates
|
||||
- Returns proposals ready for user approval
|
||||
"""
|
||||
try:
|
||||
user_id = _require_user_id(current_user, "asset proposal generation")
|
||||
logger.info(f"[Product Marketing] Generating proposals for campaign {campaign_id}")
|
||||
|
||||
# Fetch blueprint from database
|
||||
campaign_storage = get_campaign_storage()
|
||||
campaign = campaign_storage.get_campaign(user_id, campaign_id)
|
||||
|
||||
if not campaign:
|
||||
raise HTTPException(status_code=404, detail="Campaign not found")
|
||||
|
||||
# Reconstruct blueprint from database
|
||||
from services.product_marketing.orchestrator import CampaignBlueprint, CampaignAssetNode
|
||||
|
||||
asset_nodes = []
|
||||
if campaign.asset_nodes:
|
||||
for node_data in campaign.asset_nodes:
|
||||
asset_nodes.append(CampaignAssetNode(
|
||||
asset_id=node_data.get('asset_id'),
|
||||
asset_type=node_data.get('asset_type'),
|
||||
channel=node_data.get('channel'),
|
||||
status=node_data.get('status', 'draft'),
|
||||
))
|
||||
|
||||
blueprint = CampaignBlueprint(
|
||||
campaign_id=campaign.campaign_id,
|
||||
campaign_name=campaign.campaign_name,
|
||||
goal=campaign.goal,
|
||||
kpi=campaign.kpi,
|
||||
channels=campaign.channels or [],
|
||||
asset_nodes=asset_nodes,
|
||||
)
|
||||
|
||||
proposals = orchestrator.generate_asset_proposals(
|
||||
user_id=user_id,
|
||||
blueprint=blueprint,
|
||||
product_context=request.product_context,
|
||||
)
|
||||
|
||||
# Save proposals to database
|
||||
try:
|
||||
campaign_storage.save_proposals(user_id, campaign_id, proposals)
|
||||
logger.info(f"[Product Marketing] ✅ Saved {proposals['total_assets']} proposals to database")
|
||||
except Exception as save_error:
|
||||
logger.error(f"[Product Marketing] ⚠️ Failed to save proposals to database: {str(save_error)}")
|
||||
# Continue even if save fails - proposals are still returned to user
|
||||
# This allows the workflow to continue, but proposals won't persist
|
||||
|
||||
logger.info(f"[Product Marketing] ✅ Generated {proposals['total_assets']} proposals")
|
||||
return proposals
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error generating proposals: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Asset proposal generation failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/assets/generate", summary="Generate Asset")
|
||||
async def generate_asset(
|
||||
request: AssetGenerateRequest,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
orchestrator: ProductMarketingOrchestrator = Depends(get_orchestrator)
|
||||
):
|
||||
"""Generate a single asset using Image Studio APIs.
|
||||
|
||||
This endpoint:
|
||||
- Reuses existing Image Studio APIs
|
||||
- Applies specialized marketing prompts
|
||||
- Automatically tracks assets in Asset Library
|
||||
- Validates subscription limits
|
||||
"""
|
||||
try:
|
||||
user_id = _require_user_id(current_user, "asset generation")
|
||||
logger.info(f"[Product Marketing] Generating asset for user {user_id}")
|
||||
|
||||
result = await orchestrator.generate_asset(
|
||||
user_id=user_id,
|
||||
asset_proposal=request.asset_proposal,
|
||||
product_context=request.product_context,
|
||||
)
|
||||
|
||||
logger.info(f"[Product Marketing] ✅ Asset generated successfully")
|
||||
return result
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error generating asset: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Asset generation failed: {str(e)}")
|
||||
|
||||
|
||||
# ====================
|
||||
# BRAND DNA ENDPOINTS
|
||||
# ====================
|
||||
|
||||
@router.get("/brand-dna", summary="Get Brand DNA Tokens")
|
||||
async def get_brand_dna(
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
brand_dna_sync: BrandDNASyncService = Depends(lambda: BrandDNASyncService())
|
||||
):
|
||||
"""Get brand DNA tokens for the authenticated user.
|
||||
|
||||
Returns normalized brand DNA from onboarding and persona data.
|
||||
"""
|
||||
try:
|
||||
user_id = _require_user_id(current_user, "brand DNA retrieval")
|
||||
brand_tokens = brand_dna_sync.get_brand_dna_tokens(user_id)
|
||||
|
||||
return {"brand_dna": brand_tokens}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error getting brand DNA: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/brand-dna/channel/{channel}", summary="Get Channel-Specific Brand DNA")
|
||||
async def get_channel_brand_dna(
|
||||
channel: str,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
brand_dna_sync: BrandDNASyncService = Depends(lambda: BrandDNASyncService())
|
||||
):
|
||||
"""Get channel-specific brand DNA adaptations."""
|
||||
try:
|
||||
user_id = _require_user_id(current_user, "channel brand DNA retrieval")
|
||||
channel_dna = brand_dna_sync.get_channel_specific_dna(user_id, channel)
|
||||
|
||||
return {"channel": channel, "brand_dna": channel_dna}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error getting channel DNA: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# ====================
|
||||
# ASSET AUDIT ENDPOINTS
|
||||
# ====================
|
||||
|
||||
@router.post("/assets/audit", summary="Audit Asset")
|
||||
async def audit_asset(
|
||||
request: AssetAuditRequest,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
asset_audit: AssetAuditService = Depends(lambda: AssetAuditService())
|
||||
):
|
||||
"""Audit an uploaded asset and get enhancement recommendations."""
|
||||
try:
|
||||
user_id = _require_user_id(current_user, "asset audit")
|
||||
audit_result = asset_audit.audit_asset(
|
||||
request.image_base64,
|
||||
request.asset_metadata,
|
||||
)
|
||||
|
||||
return audit_result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error auditing asset: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# ====================
|
||||
# CHANNEL PACK ENDPOINTS
|
||||
# ====================
|
||||
|
||||
@router.get("/channels/{channel}/pack", summary="Get Channel Pack")
|
||||
async def get_channel_pack(
|
||||
channel: str,
|
||||
asset_type: str = "social_post",
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
channel_pack: ChannelPackService = Depends(lambda: ChannelPackService())
|
||||
):
|
||||
"""Get channel-specific pack configuration with templates and optimization tips."""
|
||||
try:
|
||||
pack = channel_pack.get_channel_pack(channel, asset_type)
|
||||
return pack
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error getting channel pack: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# ====================
|
||||
# CAMPAIGN LISTING & RETRIEVAL
|
||||
# ====================
|
||||
|
||||
@router.get("/campaigns", summary="List Campaigns")
|
||||
async def list_campaigns(
|
||||
status: Optional[str] = None,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
campaign_storage: CampaignStorageService = Depends(get_campaign_storage)
|
||||
):
|
||||
"""List all campaigns for the authenticated user."""
|
||||
try:
|
||||
user_id = _require_user_id(current_user, "list campaigns")
|
||||
campaigns = campaign_storage.list_campaigns(user_id, status=status)
|
||||
|
||||
return {
|
||||
"campaigns": [
|
||||
{
|
||||
"campaign_id": c.campaign_id,
|
||||
"campaign_name": c.campaign_name,
|
||||
"goal": c.goal,
|
||||
"kpi": c.kpi,
|
||||
"status": c.status,
|
||||
"channels": c.channels,
|
||||
"phases": c.phases,
|
||||
"asset_nodes": c.asset_nodes,
|
||||
"created_at": c.created_at.isoformat() if c.created_at else None,
|
||||
"updated_at": c.updated_at.isoformat() if c.updated_at else None,
|
||||
}
|
||||
for c in campaigns
|
||||
],
|
||||
"total": len(campaigns),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error listing campaigns: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/campaigns/{campaign_id}", summary="Get Campaign")
|
||||
async def get_campaign(
|
||||
campaign_id: str,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
campaign_storage: CampaignStorageService = Depends(get_campaign_storage)
|
||||
):
|
||||
"""Get a specific campaign by ID."""
|
||||
try:
|
||||
user_id = _require_user_id(current_user, "get campaign")
|
||||
campaign = campaign_storage.get_campaign(user_id, campaign_id)
|
||||
|
||||
if not campaign:
|
||||
raise HTTPException(status_code=404, detail="Campaign not found")
|
||||
|
||||
return {
|
||||
"campaign_id": campaign.campaign_id,
|
||||
"campaign_name": campaign.campaign_name,
|
||||
"goal": campaign.goal,
|
||||
"kpi": campaign.kpi,
|
||||
"status": campaign.status,
|
||||
"channels": campaign.channels,
|
||||
"phases": campaign.phases,
|
||||
"asset_nodes": campaign.asset_nodes,
|
||||
"product_context": campaign.product_context,
|
||||
"created_at": campaign.created_at.isoformat() if campaign.created_at else None,
|
||||
"updated_at": campaign.updated_at.isoformat() if campaign.updated_at else None,
|
||||
}
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error getting campaign: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get("/campaigns/{campaign_id}/proposals", summary="Get Campaign Proposals")
|
||||
async def get_campaign_proposals(
|
||||
campaign_id: str,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
campaign_storage: CampaignStorageService = Depends(get_campaign_storage)
|
||||
):
|
||||
"""Get proposals for a campaign."""
|
||||
try:
|
||||
user_id = _require_user_id(current_user, "get proposals")
|
||||
proposals = campaign_storage.get_proposals(user_id, campaign_id)
|
||||
|
||||
proposals_dict = {}
|
||||
for proposal in proposals:
|
||||
proposals_dict[proposal.asset_node_id] = {
|
||||
"asset_id": proposal.asset_node_id,
|
||||
"asset_type": proposal.asset_type,
|
||||
"channel": proposal.channel,
|
||||
"proposed_prompt": proposal.proposed_prompt,
|
||||
"recommended_template": proposal.recommended_template,
|
||||
"recommended_provider": proposal.recommended_provider,
|
||||
"cost_estimate": proposal.cost_estimate,
|
||||
"concept_summary": proposal.concept_summary,
|
||||
"status": proposal.status,
|
||||
}
|
||||
|
||||
return {
|
||||
"proposals": proposals_dict,
|
||||
"total_assets": len(proposals),
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error getting proposals: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# ====================
|
||||
# PRODUCT ASSET ENDPOINTS (Product Marketing Suite - Product Assets)
|
||||
# ====================
|
||||
|
||||
class ProductPhotoshootRequest(BaseModel):
|
||||
"""Request for product image photoshoot generation."""
|
||||
product_name: str = Field(..., description="Product name")
|
||||
product_description: str = Field(..., description="Product description")
|
||||
environment: str = Field(default="studio", description="Environment: studio, lifestyle, outdoor, minimalist, luxury")
|
||||
background_style: str = Field(default="white", description="Background: white, transparent, lifestyle, branded")
|
||||
lighting: str = Field(default="natural", description="Lighting: natural, studio, dramatic, soft")
|
||||
product_variant: Optional[str] = Field(None, description="Product variant (color, size, etc.)")
|
||||
angle: Optional[str] = Field(None, description="Product angle: front, side, top, 360")
|
||||
style: str = Field(default="photorealistic", description="Style: photorealistic, minimalist, luxury, technical")
|
||||
resolution: str = Field(default="1024x1024", description="Resolution (e.g., 1024x1024, 1280x720)")
|
||||
num_variations: int = Field(default=1, description="Number of variations to generate")
|
||||
brand_colors: Optional[List[str]] = Field(None, description="Brand color palette")
|
||||
additional_context: Optional[str] = Field(None, description="Additional context for generation")
|
||||
|
||||
|
||||
def get_product_image_service() -> ProductImageService:
|
||||
"""Get Product Image Service instance."""
|
||||
return ProductImageService()
|
||||
|
||||
|
||||
@router.post("/products/photoshoot", summary="Generate Product Image")
|
||||
async def generate_product_image(
|
||||
request: ProductPhotoshootRequest,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
product_image_service: ProductImageService = Depends(get_product_image_service),
|
||||
brand_dna_sync: BrandDNASyncService = Depends(lambda: BrandDNASyncService())
|
||||
):
|
||||
"""Generate professional product images using AI.
|
||||
|
||||
This endpoint:
|
||||
- Generates product images optimized for e-commerce
|
||||
- Supports multiple environments and styles
|
||||
- Integrates with brand DNA for personalization
|
||||
- Automatically saves to Asset Library
|
||||
"""
|
||||
try:
|
||||
user_id = _require_user_id(current_user, "product image generation")
|
||||
logger.info(f"[Product Marketing] Generating product image for '{request.product_name}'")
|
||||
|
||||
# Get brand DNA for personalization
|
||||
brand_context = None
|
||||
try:
|
||||
brand_dna = brand_dna_sync.get_brand_dna_tokens(user_id)
|
||||
brand_context = {
|
||||
"visual_identity": brand_dna.get("visual_identity", {}),
|
||||
"persona": brand_dna.get("persona", {}),
|
||||
}
|
||||
except Exception as brand_error:
|
||||
logger.warning(f"[Product Marketing] Could not load brand DNA: {str(brand_error)}")
|
||||
|
||||
# Convert request to service request
|
||||
service_request = ProductImageRequest(
|
||||
product_name=request.product_name,
|
||||
product_description=request.product_description,
|
||||
environment=request.environment,
|
||||
background_style=request.background_style,
|
||||
lighting=request.lighting,
|
||||
product_variant=request.product_variant,
|
||||
angle=request.angle,
|
||||
style=request.style,
|
||||
resolution=request.resolution,
|
||||
num_variations=request.num_variations,
|
||||
brand_colors=request.brand_colors,
|
||||
additional_context=request.additional_context,
|
||||
)
|
||||
|
||||
# Generate product image
|
||||
result = await product_image_service.generate_product_image(
|
||||
request=service_request,
|
||||
user_id=user_id,
|
||||
brand_context=brand_context,
|
||||
)
|
||||
|
||||
if not result.success:
|
||||
raise HTTPException(status_code=500, detail=result.error or "Product image generation failed")
|
||||
|
||||
logger.info(f"[Product Marketing] ✅ Generated product image: {result.asset_id}")
|
||||
|
||||
# Return result (image_bytes will be served via separate endpoint)
|
||||
return {
|
||||
"success": True,
|
||||
"product_name": result.product_name,
|
||||
"image_url": result.image_url,
|
||||
"asset_id": result.asset_id,
|
||||
"provider": result.provider,
|
||||
"model": result.model,
|
||||
"cost": result.cost,
|
||||
"generation_time": result.generation_time,
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error generating product image: {str(e)}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Product image generation failed: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/products/images/{filename}", summary="Serve Product Image")
|
||||
async def serve_product_image(
|
||||
filename: str,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
):
|
||||
"""Serve generated product images."""
|
||||
try:
|
||||
from fastapi.responses import FileResponse
|
||||
from pathlib import Path
|
||||
|
||||
_require_user_id(current_user, "serving product image")
|
||||
|
||||
# Locate image file
|
||||
base_dir = Path(__file__).parent.parent.parent
|
||||
image_path = base_dir / "product_images" / filename
|
||||
|
||||
if not image_path.exists():
|
||||
raise HTTPException(status_code=404, detail="Image not found")
|
||||
|
||||
return FileResponse(
|
||||
path=str(image_path),
|
||||
media_type="image/png",
|
||||
filename=filename
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[Product Marketing] ❌ Error serving product image: {str(e)}")
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
# ====================
|
||||
# HEALTH CHECK
|
||||
# ====================
|
||||
|
||||
@router.get("/health", summary="Health Check")
|
||||
async def health_check():
|
||||
"""Health check endpoint for Product Marketing Suite."""
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "product_marketing",
|
||||
"version": "1.0.0",
|
||||
"modules": {
|
||||
"orchestrator": "available",
|
||||
"prompt_builder": "available",
|
||||
"brand_dna_sync": "available",
|
||||
"asset_audit": "available",
|
||||
"channel_pack": "available",
|
||||
"product_image_service": "available",
|
||||
}
|
||||
}
|
||||
|
||||
653
backend/routers/seo_tools.py
Normal file
653
backend/routers/seo_tools.py
Normal file
@@ -0,0 +1,653 @@
|
||||
"""
|
||||
AI SEO Tools FastAPI Router
|
||||
|
||||
This module provides FastAPI endpoints for all AI SEO tools migrated from ToBeMigrated/ai_seo_tools.
|
||||
Includes intelligent logging, exception handling, and structured responses.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks, UploadFile, File
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel, HttpUrl, Field, validator
|
||||
from typing import Dict, Any, List, Optional, Union
|
||||
from datetime import datetime
|
||||
import json
|
||||
import traceback
|
||||
from loguru import logger
|
||||
import os
|
||||
import tempfile
|
||||
import asyncio
|
||||
|
||||
# Import services
|
||||
from services.llm_providers.main_text_generation import llm_text_gen
|
||||
from services.seo_tools.meta_description_service import MetaDescriptionService
|
||||
from services.seo_tools.pagespeed_service import PageSpeedService
|
||||
from services.seo_tools.sitemap_service import SitemapService
|
||||
from services.seo_tools.image_alt_service import ImageAltService
|
||||
from services.seo_tools.opengraph_service import OpenGraphService
|
||||
from services.seo_tools.on_page_seo_service import OnPageSEOService
|
||||
from services.seo_tools.technical_seo_service import TechnicalSEOService
|
||||
from services.seo_tools.enterprise_seo_service import EnterpriseSEOService
|
||||
from services.seo_tools.content_strategy_service import ContentStrategyService
|
||||
from middleware.logging_middleware import log_api_call, save_to_file
|
||||
|
||||
router = APIRouter(prefix="/api/seo", tags=["AI SEO Tools"])
|
||||
|
||||
# Configuration for intelligent logging
|
||||
LOG_DIR = "logs/seo_tools"
|
||||
os.makedirs(LOG_DIR, exist_ok=True)
|
||||
|
||||
# Request/Response Models
|
||||
class BaseResponse(BaseModel):
|
||||
"""Base response model for all SEO tools"""
|
||||
success: bool
|
||||
message: str
|
||||
timestamp: datetime = Field(default_factory=datetime.utcnow)
|
||||
execution_time: Optional[float] = None
|
||||
data: Optional[Dict[str, Any]] = None
|
||||
|
||||
class ErrorResponse(BaseResponse):
|
||||
"""Error response model"""
|
||||
error_type: str
|
||||
error_details: Optional[str] = None
|
||||
traceback: Optional[str] = None
|
||||
|
||||
class MetaDescriptionRequest(BaseModel):
|
||||
"""Request model for meta description generation"""
|
||||
keywords: List[str] = Field(..., description="Target keywords for meta description")
|
||||
tone: str = Field(default="General", description="Desired tone for meta description")
|
||||
search_intent: str = Field(default="Informational Intent", description="Search intent type")
|
||||
language: str = Field(default="English", description="Preferred language")
|
||||
custom_prompt: Optional[str] = Field(None, description="Custom prompt for generation")
|
||||
|
||||
@validator('keywords')
|
||||
def validate_keywords(cls, v):
|
||||
if not v or len(v) == 0:
|
||||
raise ValueError("At least one keyword is required")
|
||||
return v
|
||||
|
||||
class PageSpeedRequest(BaseModel):
|
||||
"""Request model for PageSpeed Insights analysis"""
|
||||
url: HttpUrl = Field(..., description="URL to analyze")
|
||||
strategy: str = Field(default="DESKTOP", description="Analysis strategy (DESKTOP/MOBILE)")
|
||||
locale: str = Field(default="en", description="Locale for analysis")
|
||||
categories: List[str] = Field(default=["performance", "accessibility", "best-practices", "seo"])
|
||||
|
||||
class SitemapAnalysisRequest(BaseModel):
|
||||
"""Request model for sitemap analysis"""
|
||||
sitemap_url: HttpUrl = Field(..., description="Sitemap URL to analyze")
|
||||
analyze_content_trends: bool = Field(default=True, description="Analyze content trends")
|
||||
analyze_publishing_patterns: bool = Field(default=True, description="Analyze publishing patterns")
|
||||
|
||||
class ImageAltRequest(BaseModel):
|
||||
"""Request model for image alt text generation"""
|
||||
image_url: Optional[HttpUrl] = Field(None, description="URL of image to analyze")
|
||||
context: Optional[str] = Field(None, description="Context about the image")
|
||||
keywords: Optional[List[str]] = Field(None, description="Keywords to include in alt text")
|
||||
|
||||
class OpenGraphRequest(BaseModel):
|
||||
"""Request model for OpenGraph tag generation"""
|
||||
url: HttpUrl = Field(..., description="URL for OpenGraph tags")
|
||||
title_hint: Optional[str] = Field(None, description="Hint for title")
|
||||
description_hint: Optional[str] = Field(None, description="Hint for description")
|
||||
platform: str = Field(default="General", description="Platform (General/Facebook/Twitter)")
|
||||
|
||||
class OnPageSEORequest(BaseModel):
|
||||
"""Request model for on-page SEO analysis"""
|
||||
url: HttpUrl = Field(..., description="URL to analyze")
|
||||
target_keywords: Optional[List[str]] = Field(None, description="Target keywords for analysis")
|
||||
analyze_images: bool = Field(default=True, description="Include image analysis")
|
||||
analyze_content_quality: bool = Field(default=True, description="Analyze content quality")
|
||||
|
||||
class TechnicalSEORequest(BaseModel):
|
||||
"""Request model for technical SEO analysis"""
|
||||
url: HttpUrl = Field(..., description="URL to crawl and analyze")
|
||||
crawl_depth: int = Field(default=3, description="Crawl depth (1-5)")
|
||||
include_external_links: bool = Field(default=True, description="Include external link analysis")
|
||||
analyze_performance: bool = Field(default=True, description="Include performance analysis")
|
||||
|
||||
class WorkflowRequest(BaseModel):
|
||||
"""Request model for SEO workflow execution"""
|
||||
website_url: HttpUrl = Field(..., description="Primary website URL")
|
||||
workflow_type: str = Field(..., description="Type of workflow to execute")
|
||||
competitors: Optional[List[HttpUrl]] = Field(None, description="Competitor URLs (max 5)")
|
||||
target_keywords: Optional[List[str]] = Field(None, description="Target keywords")
|
||||
custom_parameters: Optional[Dict[str, Any]] = Field(None, description="Custom workflow parameters")
|
||||
|
||||
# Exception Handler
|
||||
async def handle_seo_tool_exception(func_name: str, error: Exception, request_data: Dict) -> ErrorResponse:
|
||||
"""Handle exceptions from SEO tools with intelligent logging"""
|
||||
error_id = f"seo_{func_name}_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}"
|
||||
error_msg = str(error)
|
||||
error_trace = traceback.format_exc()
|
||||
|
||||
# Log error with structured data
|
||||
error_log = {
|
||||
"error_id": error_id,
|
||||
"function": func_name,
|
||||
"error_type": type(error).__name__,
|
||||
"error_message": error_msg,
|
||||
"request_data": request_data,
|
||||
"traceback": error_trace,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
logger.error(f"SEO Tool Error [{error_id}]: {error_msg}")
|
||||
|
||||
# Save error to file
|
||||
await save_to_file(f"{LOG_DIR}/errors.jsonl", error_log)
|
||||
|
||||
return ErrorResponse(
|
||||
success=False,
|
||||
message=f"Error in {func_name}: {error_msg}",
|
||||
error_type=type(error).__name__,
|
||||
error_details=error_msg,
|
||||
traceback=error_trace if os.getenv("DEBUG", "false").lower() == "true" else None
|
||||
)
|
||||
|
||||
# SEO Tool Endpoints
|
||||
|
||||
@router.post("/meta-description", response_model=BaseResponse)
|
||||
@log_api_call
|
||||
async def generate_meta_description(
|
||||
request: MetaDescriptionRequest,
|
||||
background_tasks: BackgroundTasks
|
||||
) -> Union[BaseResponse, ErrorResponse]:
|
||||
"""
|
||||
Generate AI-powered SEO meta descriptions
|
||||
|
||||
Generates compelling, SEO-optimized meta descriptions based on keywords,
|
||||
tone, and search intent using advanced AI analysis.
|
||||
"""
|
||||
start_time = datetime.utcnow()
|
||||
|
||||
try:
|
||||
service = MetaDescriptionService()
|
||||
result = await service.generate_meta_description(
|
||||
keywords=request.keywords,
|
||||
tone=request.tone,
|
||||
search_intent=request.search_intent,
|
||||
language=request.language,
|
||||
custom_prompt=request.custom_prompt
|
||||
)
|
||||
|
||||
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
||||
|
||||
# Log successful operation
|
||||
log_data = {
|
||||
"operation": "meta_description_generation",
|
||||
"keywords_count": len(request.keywords),
|
||||
"tone": request.tone,
|
||||
"language": request.language,
|
||||
"execution_time": execution_time,
|
||||
"success": True
|
||||
}
|
||||
background_tasks.add_task(save_to_file, f"{LOG_DIR}/operations.jsonl", log_data)
|
||||
|
||||
return BaseResponse(
|
||||
success=True,
|
||||
message="Meta description generated successfully",
|
||||
execution_time=execution_time,
|
||||
data=result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return await handle_seo_tool_exception("generate_meta_description", e, request.dict())
|
||||
|
||||
@router.post("/pagespeed-analysis", response_model=BaseResponse)
|
||||
@log_api_call
|
||||
async def analyze_pagespeed(
|
||||
request: PageSpeedRequest,
|
||||
background_tasks: BackgroundTasks
|
||||
) -> Union[BaseResponse, ErrorResponse]:
|
||||
"""
|
||||
Analyze website performance using Google PageSpeed Insights
|
||||
|
||||
Provides comprehensive performance analysis including Core Web Vitals,
|
||||
accessibility, SEO, and best practices scores with AI-enhanced insights.
|
||||
"""
|
||||
start_time = datetime.utcnow()
|
||||
|
||||
try:
|
||||
service = PageSpeedService()
|
||||
result = await service.analyze_pagespeed(
|
||||
url=str(request.url),
|
||||
strategy=request.strategy,
|
||||
locale=request.locale,
|
||||
categories=request.categories
|
||||
)
|
||||
|
||||
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
||||
|
||||
# Log successful operation
|
||||
log_data = {
|
||||
"operation": "pagespeed_analysis",
|
||||
"url": str(request.url),
|
||||
"strategy": request.strategy,
|
||||
"categories": request.categories,
|
||||
"execution_time": execution_time,
|
||||
"success": True
|
||||
}
|
||||
background_tasks.add_task(save_to_file, f"{LOG_DIR}/operations.jsonl", log_data)
|
||||
|
||||
return BaseResponse(
|
||||
success=True,
|
||||
message="PageSpeed analysis completed successfully",
|
||||
execution_time=execution_time,
|
||||
data=result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return await handle_seo_tool_exception("analyze_pagespeed", e, request.dict())
|
||||
|
||||
@router.post("/sitemap-analysis", response_model=BaseResponse)
|
||||
@log_api_call
|
||||
async def analyze_sitemap(
|
||||
request: SitemapAnalysisRequest,
|
||||
background_tasks: BackgroundTasks
|
||||
) -> Union[BaseResponse, ErrorResponse]:
|
||||
"""
|
||||
Analyze website sitemap for content structure and trends
|
||||
|
||||
Provides insights into content distribution, publishing patterns,
|
||||
and SEO opportunities with AI-powered recommendations.
|
||||
"""
|
||||
start_time = datetime.utcnow()
|
||||
|
||||
try:
|
||||
service = SitemapService()
|
||||
result = await service.analyze_sitemap(
|
||||
sitemap_url=str(request.sitemap_url),
|
||||
analyze_content_trends=request.analyze_content_trends,
|
||||
analyze_publishing_patterns=request.analyze_publishing_patterns
|
||||
)
|
||||
|
||||
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
||||
|
||||
# Log successful operation
|
||||
log_data = {
|
||||
"operation": "sitemap_analysis",
|
||||
"sitemap_url": str(request.sitemap_url),
|
||||
"urls_found": result.get("total_urls", 0),
|
||||
"execution_time": execution_time,
|
||||
"success": True
|
||||
}
|
||||
background_tasks.add_task(save_to_file, f"{LOG_DIR}/operations.jsonl", log_data)
|
||||
|
||||
return BaseResponse(
|
||||
success=True,
|
||||
message="Sitemap analysis completed successfully",
|
||||
execution_time=execution_time,
|
||||
data=result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return await handle_seo_tool_exception("analyze_sitemap", e, request.dict())
|
||||
|
||||
@router.post("/image-alt-text", response_model=BaseResponse)
|
||||
@log_api_call
|
||||
async def generate_image_alt_text(
|
||||
request: ImageAltRequest = None,
|
||||
image_file: UploadFile = File(None),
|
||||
background_tasks: BackgroundTasks = BackgroundTasks()
|
||||
) -> Union[BaseResponse, ErrorResponse]:
|
||||
"""
|
||||
Generate AI-powered alt text for images
|
||||
|
||||
Creates SEO-optimized alt text for images using advanced AI vision
|
||||
models with context-aware keyword integration.
|
||||
"""
|
||||
start_time = datetime.utcnow()
|
||||
|
||||
try:
|
||||
service = ImageAltService()
|
||||
|
||||
if image_file:
|
||||
# Handle uploaded file
|
||||
with tempfile.NamedTemporaryFile(delete=False, suffix=f".{image_file.filename.split('.')[-1]}") as tmp_file:
|
||||
content = await image_file.read()
|
||||
tmp_file.write(content)
|
||||
tmp_file_path = tmp_file.name
|
||||
|
||||
result = await service.generate_alt_text_from_file(
|
||||
image_path=tmp_file_path,
|
||||
context=request.context if request else None,
|
||||
keywords=request.keywords if request else None
|
||||
)
|
||||
|
||||
# Cleanup
|
||||
os.unlink(tmp_file_path)
|
||||
|
||||
elif request and request.image_url:
|
||||
result = await service.generate_alt_text_from_url(
|
||||
image_url=str(request.image_url),
|
||||
context=request.context,
|
||||
keywords=request.keywords
|
||||
)
|
||||
else:
|
||||
raise ValueError("Either image_file or image_url must be provided")
|
||||
|
||||
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
||||
|
||||
# Log successful operation
|
||||
log_data = {
|
||||
"operation": "image_alt_text_generation",
|
||||
"has_image_file": image_file is not None,
|
||||
"has_image_url": request.image_url is not None if request else False,
|
||||
"execution_time": execution_time,
|
||||
"success": True
|
||||
}
|
||||
background_tasks.add_task(save_to_file, f"{LOG_DIR}/operations.jsonl", log_data)
|
||||
|
||||
return BaseResponse(
|
||||
success=True,
|
||||
message="Image alt text generated successfully",
|
||||
execution_time=execution_time,
|
||||
data=result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return await handle_seo_tool_exception("generate_image_alt_text", e,
|
||||
request.dict() if request else {})
|
||||
|
||||
@router.post("/opengraph-tags", response_model=BaseResponse)
|
||||
@log_api_call
|
||||
async def generate_opengraph_tags(
|
||||
request: OpenGraphRequest,
|
||||
background_tasks: BackgroundTasks
|
||||
) -> Union[BaseResponse, ErrorResponse]:
|
||||
"""
|
||||
Generate OpenGraph tags for social media optimization
|
||||
|
||||
Creates platform-specific OpenGraph tags optimized for Facebook,
|
||||
Twitter, and other social platforms with AI-powered content analysis.
|
||||
"""
|
||||
start_time = datetime.utcnow()
|
||||
|
||||
try:
|
||||
service = OpenGraphService()
|
||||
result = await service.generate_opengraph_tags(
|
||||
url=str(request.url),
|
||||
title_hint=request.title_hint,
|
||||
description_hint=request.description_hint,
|
||||
platform=request.platform
|
||||
)
|
||||
|
||||
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
||||
|
||||
# Log successful operation
|
||||
log_data = {
|
||||
"operation": "opengraph_generation",
|
||||
"url": str(request.url),
|
||||
"platform": request.platform,
|
||||
"execution_time": execution_time,
|
||||
"success": True
|
||||
}
|
||||
background_tasks.add_task(save_to_file, f"{LOG_DIR}/operations.jsonl", log_data)
|
||||
|
||||
return BaseResponse(
|
||||
success=True,
|
||||
message="OpenGraph tags generated successfully",
|
||||
execution_time=execution_time,
|
||||
data=result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return await handle_seo_tool_exception("generate_opengraph_tags", e, request.dict())
|
||||
|
||||
@router.post("/on-page-analysis", response_model=BaseResponse)
|
||||
@log_api_call
|
||||
async def analyze_on_page_seo(
|
||||
request: OnPageSEORequest,
|
||||
background_tasks: BackgroundTasks
|
||||
) -> Union[BaseResponse, ErrorResponse]:
|
||||
"""
|
||||
Comprehensive on-page SEO analysis
|
||||
|
||||
Analyzes meta tags, content quality, keyword optimization, internal linking,
|
||||
and provides actionable AI-powered recommendations for improvement.
|
||||
"""
|
||||
start_time = datetime.utcnow()
|
||||
|
||||
try:
|
||||
service = OnPageSEOService()
|
||||
result = await service.analyze_on_page_seo(
|
||||
url=str(request.url),
|
||||
target_keywords=request.target_keywords,
|
||||
analyze_images=request.analyze_images,
|
||||
analyze_content_quality=request.analyze_content_quality
|
||||
)
|
||||
|
||||
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
||||
|
||||
# Log successful operation
|
||||
log_data = {
|
||||
"operation": "on_page_seo_analysis",
|
||||
"url": str(request.url),
|
||||
"target_keywords_count": len(request.target_keywords) if request.target_keywords else 0,
|
||||
"seo_score": result.get("overall_score", 0),
|
||||
"execution_time": execution_time,
|
||||
"success": True
|
||||
}
|
||||
background_tasks.add_task(save_to_file, f"{LOG_DIR}/operations.jsonl", log_data)
|
||||
|
||||
return BaseResponse(
|
||||
success=True,
|
||||
message="On-page SEO analysis completed successfully",
|
||||
execution_time=execution_time,
|
||||
data=result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return await handle_seo_tool_exception("analyze_on_page_seo", e, request.dict())
|
||||
|
||||
@router.post("/technical-seo", response_model=BaseResponse)
|
||||
@log_api_call
|
||||
async def analyze_technical_seo(
|
||||
request: TechnicalSEORequest,
|
||||
background_tasks: BackgroundTasks
|
||||
) -> Union[BaseResponse, ErrorResponse]:
|
||||
"""
|
||||
Technical SEO analysis and crawling
|
||||
|
||||
Performs comprehensive technical SEO audit including site structure,
|
||||
crawlability, indexability, and performance with AI-enhanced insights.
|
||||
"""
|
||||
start_time = datetime.utcnow()
|
||||
|
||||
try:
|
||||
service = TechnicalSEOService()
|
||||
result = await service.analyze_technical_seo(
|
||||
url=str(request.url),
|
||||
crawl_depth=request.crawl_depth,
|
||||
include_external_links=request.include_external_links,
|
||||
analyze_performance=request.analyze_performance
|
||||
)
|
||||
|
||||
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
||||
|
||||
# Log successful operation
|
||||
log_data = {
|
||||
"operation": "technical_seo_analysis",
|
||||
"url": str(request.url),
|
||||
"crawl_depth": request.crawl_depth,
|
||||
"pages_crawled": result.get("pages_crawled", 0),
|
||||
"issues_found": len(result.get("issues", [])),
|
||||
"execution_time": execution_time,
|
||||
"success": True
|
||||
}
|
||||
background_tasks.add_task(save_to_file, f"{LOG_DIR}/operations.jsonl", log_data)
|
||||
|
||||
return BaseResponse(
|
||||
success=True,
|
||||
message="Technical SEO analysis completed successfully",
|
||||
execution_time=execution_time,
|
||||
data=result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return await handle_seo_tool_exception("analyze_technical_seo", e, request.dict())
|
||||
|
||||
# Workflow Endpoints
|
||||
|
||||
@router.post("/workflow/website-audit", response_model=BaseResponse)
|
||||
@log_api_call
|
||||
async def execute_website_audit(
|
||||
request: WorkflowRequest,
|
||||
background_tasks: BackgroundTasks
|
||||
) -> Union[BaseResponse, ErrorResponse]:
|
||||
"""
|
||||
Complete website SEO audit workflow
|
||||
|
||||
Executes a comprehensive SEO audit combining on-page analysis,
|
||||
technical SEO, performance analysis, and competitive intelligence.
|
||||
"""
|
||||
start_time = datetime.utcnow()
|
||||
|
||||
try:
|
||||
service = EnterpriseSEOService()
|
||||
result = await service.execute_complete_audit(
|
||||
website_url=str(request.website_url),
|
||||
competitors=[str(comp) for comp in request.competitors] if request.competitors else [],
|
||||
target_keywords=request.target_keywords or []
|
||||
)
|
||||
|
||||
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
||||
|
||||
# Log successful operation
|
||||
log_data = {
|
||||
"operation": "website_audit_workflow",
|
||||
"website_url": str(request.website_url),
|
||||
"competitors_count": len(request.competitors) if request.competitors else 0,
|
||||
"overall_score": result.get("overall_score", 0),
|
||||
"execution_time": execution_time,
|
||||
"success": True
|
||||
}
|
||||
background_tasks.add_task(save_to_file, f"{LOG_DIR}/workflows.jsonl", log_data)
|
||||
|
||||
return BaseResponse(
|
||||
success=True,
|
||||
message="Website audit completed successfully",
|
||||
execution_time=execution_time,
|
||||
data=result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return await handle_seo_tool_exception("execute_website_audit", e, request.dict())
|
||||
|
||||
@router.post("/workflow/content-analysis", response_model=BaseResponse)
|
||||
@log_api_call
|
||||
async def execute_content_analysis(
|
||||
request: WorkflowRequest,
|
||||
background_tasks: BackgroundTasks
|
||||
) -> Union[BaseResponse, ErrorResponse]:
|
||||
"""
|
||||
AI-powered content analysis workflow
|
||||
|
||||
Analyzes content gaps, opportunities, and competitive positioning
|
||||
with AI-generated strategic recommendations for content creators.
|
||||
"""
|
||||
start_time = datetime.utcnow()
|
||||
|
||||
try:
|
||||
service = ContentStrategyService()
|
||||
result = await service.analyze_content_strategy(
|
||||
website_url=str(request.website_url),
|
||||
competitors=[str(comp) for comp in request.competitors] if request.competitors else [],
|
||||
target_keywords=request.target_keywords or [],
|
||||
custom_parameters=request.custom_parameters or {}
|
||||
)
|
||||
|
||||
execution_time = (datetime.utcnow() - start_time).total_seconds()
|
||||
|
||||
# Log successful operation
|
||||
log_data = {
|
||||
"operation": "content_analysis_workflow",
|
||||
"website_url": str(request.website_url),
|
||||
"content_gaps_found": len(result.get("content_gaps", [])),
|
||||
"opportunities_identified": len(result.get("opportunities", [])),
|
||||
"execution_time": execution_time,
|
||||
"success": True
|
||||
}
|
||||
background_tasks.add_task(save_to_file, f"{LOG_DIR}/workflows.jsonl", log_data)
|
||||
|
||||
return BaseResponse(
|
||||
success=True,
|
||||
message="Content analysis completed successfully",
|
||||
execution_time=execution_time,
|
||||
data=result
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
return await handle_seo_tool_exception("execute_content_analysis", e, request.dict())
|
||||
|
||||
# Health and Status Endpoints
|
||||
|
||||
@router.get("/health", response_model=BaseResponse)
|
||||
async def health_check() -> BaseResponse:
|
||||
"""Health check endpoint for SEO tools"""
|
||||
return BaseResponse(
|
||||
success=True,
|
||||
message="AI SEO Tools API is healthy",
|
||||
data={
|
||||
"status": "operational",
|
||||
"available_tools": [
|
||||
"meta_description",
|
||||
"pagespeed_analysis",
|
||||
"sitemap_analysis",
|
||||
"image_alt_text",
|
||||
"opengraph_tags",
|
||||
"on_page_analysis",
|
||||
"technical_seo",
|
||||
"website_audit",
|
||||
"content_analysis"
|
||||
],
|
||||
"version": "1.0.0"
|
||||
}
|
||||
)
|
||||
|
||||
@router.get("/tools/status", response_model=BaseResponse)
|
||||
async def get_tools_status() -> BaseResponse:
|
||||
"""Get status of all SEO tools and their dependencies"""
|
||||
|
||||
tools_status = {}
|
||||
overall_healthy = True
|
||||
|
||||
# Check each service
|
||||
services = [
|
||||
("meta_description", MetaDescriptionService),
|
||||
("pagespeed", PageSpeedService),
|
||||
("sitemap", SitemapService),
|
||||
("image_alt", ImageAltService),
|
||||
("opengraph", OpenGraphService),
|
||||
("on_page_seo", OnPageSEOService),
|
||||
("technical_seo", TechnicalSEOService),
|
||||
("enterprise_seo", EnterpriseSEOService),
|
||||
("content_strategy", ContentStrategyService)
|
||||
]
|
||||
|
||||
for service_name, service_class in services:
|
||||
try:
|
||||
service = service_class()
|
||||
status = await service.health_check() if hasattr(service, 'health_check') else {"status": "unknown"}
|
||||
tools_status[service_name] = {
|
||||
"healthy": status.get("status") == "operational",
|
||||
"details": status
|
||||
}
|
||||
if not tools_status[service_name]["healthy"]:
|
||||
overall_healthy = False
|
||||
except Exception as e:
|
||||
tools_status[service_name] = {
|
||||
"healthy": False,
|
||||
"error": str(e)
|
||||
}
|
||||
overall_healthy = False
|
||||
|
||||
return BaseResponse(
|
||||
success=overall_healthy,
|
||||
message="Tools status check completed",
|
||||
data={
|
||||
"overall_healthy": overall_healthy,
|
||||
"tools": tools_status,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
)
|
||||
1166
backend/routers/stability.py
Normal file
1166
backend/routers/stability.py
Normal file
File diff suppressed because it is too large
Load Diff
737
backend/routers/stability_admin.py
Normal file
737
backend/routers/stability_admin.py
Normal file
@@ -0,0 +1,737 @@
|
||||
"""Admin endpoints for Stability AI service management."""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query
|
||||
from fastapi.responses import JSONResponse
|
||||
from typing import Dict, Any, Optional, List
|
||||
from datetime import datetime, timedelta
|
||||
import json
|
||||
|
||||
from services.stability_service import get_stability_service, StabilityAIService
|
||||
from middleware.stability_middleware import get_middleware_stats
|
||||
from config.stability_config import (
|
||||
MODEL_PRICING, IMAGE_LIMITS, AUDIO_LIMITS, WORKFLOW_TEMPLATES,
|
||||
get_stability_config, get_model_recommendations, calculate_estimated_cost
|
||||
)
|
||||
|
||||
router = APIRouter(prefix="/api/stability/admin", tags=["Stability AI Admin"])
|
||||
|
||||
|
||||
# ==================== MONITORING ENDPOINTS ====================
|
||||
|
||||
@router.get("/stats", summary="Get Service Statistics")
|
||||
async def get_service_stats():
|
||||
"""Get comprehensive statistics about Stability AI service usage."""
|
||||
return {
|
||||
"service_info": {
|
||||
"name": "Stability AI Integration",
|
||||
"version": "1.0.0",
|
||||
"uptime": "N/A", # Would track actual uptime
|
||||
"last_restart": datetime.utcnow().isoformat()
|
||||
},
|
||||
"middleware_stats": get_middleware_stats(),
|
||||
"pricing_info": MODEL_PRICING,
|
||||
"limits": {
|
||||
"image": IMAGE_LIMITS,
|
||||
"audio": AUDIO_LIMITS
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@router.get("/health/detailed", summary="Detailed Health Check")
|
||||
async def detailed_health_check(
|
||||
stability_service: StabilityAIService = Depends(get_stability_service)
|
||||
):
|
||||
"""Perform detailed health check of Stability AI service."""
|
||||
health_status = {
|
||||
"timestamp": datetime.utcnow().isoformat(),
|
||||
"overall_status": "healthy",
|
||||
"checks": {}
|
||||
}
|
||||
|
||||
try:
|
||||
# Test API connectivity
|
||||
async with stability_service:
|
||||
account_info = await stability_service.get_account_details()
|
||||
health_status["checks"]["api_connectivity"] = {
|
||||
"status": "healthy",
|
||||
"response_time": "N/A",
|
||||
"account_id": account_info.get("id", "unknown")
|
||||
}
|
||||
except Exception as e:
|
||||
health_status["checks"]["api_connectivity"] = {
|
||||
"status": "unhealthy",
|
||||
"error": str(e)
|
||||
}
|
||||
health_status["overall_status"] = "degraded"
|
||||
|
||||
try:
|
||||
# Test account balance
|
||||
async with stability_service:
|
||||
balance_info = await stability_service.get_account_balance()
|
||||
credits = balance_info.get("credits", 0)
|
||||
|
||||
health_status["checks"]["account_balance"] = {
|
||||
"status": "healthy" if credits > 10 else "warning",
|
||||
"credits": credits,
|
||||
"warning": "Low credit balance" if credits < 10 else None
|
||||
}
|
||||
except Exception as e:
|
||||
health_status["checks"]["account_balance"] = {
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
# Check configuration
|
||||
try:
|
||||
config = get_stability_config()
|
||||
health_status["checks"]["configuration"] = {
|
||||
"status": "healthy",
|
||||
"api_key_configured": bool(config.api_key),
|
||||
"base_url": config.base_url
|
||||
}
|
||||
except Exception as e:
|
||||
health_status["checks"]["configuration"] = {
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
health_status["overall_status"] = "unhealthy"
|
||||
|
||||
return health_status
|
||||
|
||||
|
||||
@router.get("/usage/summary", summary="Get Usage Summary")
|
||||
async def get_usage_summary(
|
||||
days: Optional[int] = Query(7, description="Number of days to analyze")
|
||||
):
|
||||
"""Get usage summary for the specified time period."""
|
||||
# In a real implementation, this would query a database
|
||||
# For now, return mock data
|
||||
|
||||
end_date = datetime.utcnow()
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
return {
|
||||
"period": {
|
||||
"start": start_date.isoformat(),
|
||||
"end": end_date.isoformat(),
|
||||
"days": days
|
||||
},
|
||||
"usage_summary": {
|
||||
"total_requests": 156,
|
||||
"successful_requests": 148,
|
||||
"failed_requests": 8,
|
||||
"success_rate": 94.87,
|
||||
"total_credits_used": 450.5,
|
||||
"average_credits_per_request": 2.89
|
||||
},
|
||||
"operation_breakdown": {
|
||||
"generate_ultra": {"requests": 25, "credits": 200},
|
||||
"generate_core": {"requests": 45, "credits": 135},
|
||||
"upscale_fast": {"requests": 30, "credits": 60},
|
||||
"inpaint": {"requests": 20, "credits": 100},
|
||||
"control_sketch": {"requests": 15, "credits": 75}
|
||||
},
|
||||
"daily_usage": [
|
||||
{"date": (end_date - timedelta(days=i)).strftime("%Y-%m-%d"),
|
||||
"requests": 20 + i * 2,
|
||||
"credits": 50 + i * 5}
|
||||
for i in range(days)
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@router.get("/costs/estimate", summary="Estimate Operation Costs")
|
||||
async def estimate_operation_costs(
|
||||
operations: str = Query(..., description="JSON array of operations to estimate"),
|
||||
model_preferences: Optional[str] = Query(None, description="JSON object of model preferences")
|
||||
):
|
||||
"""Estimate costs for a list of operations."""
|
||||
try:
|
||||
ops_list = json.loads(operations)
|
||||
preferences = json.loads(model_preferences) if model_preferences else {}
|
||||
except json.JSONDecodeError:
|
||||
raise HTTPException(status_code=400, detail="Invalid JSON in parameters")
|
||||
|
||||
estimates = []
|
||||
total_cost = 0
|
||||
|
||||
for op in ops_list:
|
||||
operation = op.get("operation")
|
||||
model = preferences.get(operation) or op.get("model")
|
||||
steps = op.get("steps")
|
||||
|
||||
cost = calculate_estimated_cost(operation, model, steps)
|
||||
total_cost += cost
|
||||
|
||||
estimates.append({
|
||||
"operation": operation,
|
||||
"model": model,
|
||||
"estimated_credits": cost,
|
||||
"description": f"Estimated cost for {operation}"
|
||||
})
|
||||
|
||||
return {
|
||||
"estimates": estimates,
|
||||
"total_estimated_credits": total_cost,
|
||||
"currency_equivalent": f"${total_cost * 0.01:.2f}", # Assuming $0.01 per credit
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
# ==================== CONFIGURATION ENDPOINTS ====================
|
||||
|
||||
@router.get("/config", summary="Get Current Configuration")
|
||||
async def get_current_config():
|
||||
"""Get current Stability AI service configuration."""
|
||||
try:
|
||||
config = get_stability_config()
|
||||
return {
|
||||
"base_url": config.base_url,
|
||||
"timeout": config.timeout,
|
||||
"max_retries": config.max_retries,
|
||||
"max_file_size": config.max_file_size,
|
||||
"supported_image_formats": config.supported_image_formats,
|
||||
"supported_audio_formats": config.supported_audio_formats,
|
||||
"api_key_configured": bool(config.api_key),
|
||||
"api_key_preview": f"{config.api_key[:8]}..." if config.api_key else None
|
||||
}
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=500, detail=f"Configuration error: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/models/recommendations", summary="Get Model Recommendations")
|
||||
async def get_model_recommendations_endpoint(
|
||||
use_case: str = Query(..., description="Use case (portrait, landscape, art, product, concept)"),
|
||||
quality_preference: str = Query("standard", description="Quality preference (draft, standard, premium)"),
|
||||
speed_preference: str = Query("balanced", description="Speed preference (fast, balanced, quality)")
|
||||
):
|
||||
"""Get model recommendations based on use case and preferences."""
|
||||
recommendations = get_model_recommendations(use_case, quality_preference, speed_preference)
|
||||
|
||||
# Add detailed information
|
||||
recommendations["use_case_info"] = {
|
||||
"description": f"Recommendations optimized for {use_case} use case",
|
||||
"quality_level": quality_preference,
|
||||
"speed_priority": speed_preference
|
||||
}
|
||||
|
||||
# Add cost information
|
||||
primary_cost = calculate_estimated_cost("generate", recommendations["primary"])
|
||||
alternative_cost = calculate_estimated_cost("generate", recommendations["alternative"])
|
||||
|
||||
recommendations["cost_comparison"] = {
|
||||
"primary_model_cost": primary_cost,
|
||||
"alternative_model_cost": alternative_cost,
|
||||
"cost_difference": abs(primary_cost - alternative_cost)
|
||||
}
|
||||
|
||||
return recommendations
|
||||
|
||||
|
||||
@router.get("/workflows/templates", summary="Get Workflow Templates")
|
||||
async def get_workflow_templates():
|
||||
"""Get available workflow templates."""
|
||||
return {
|
||||
"templates": WORKFLOW_TEMPLATES,
|
||||
"template_count": len(WORKFLOW_TEMPLATES),
|
||||
"categories": list(set(
|
||||
template["description"].split()[0].lower()
|
||||
for template in WORKFLOW_TEMPLATES.values()
|
||||
))
|
||||
}
|
||||
|
||||
|
||||
@router.post("/workflows/validate", summary="Validate Custom Workflow")
|
||||
async def validate_custom_workflow(
|
||||
workflow: dict
|
||||
):
|
||||
"""Validate a custom workflow configuration."""
|
||||
from utils.stability_utils import WorkflowManager
|
||||
|
||||
steps = workflow.get("steps", [])
|
||||
|
||||
if not steps:
|
||||
raise HTTPException(status_code=400, detail="Workflow must contain at least one step")
|
||||
|
||||
# Validate workflow
|
||||
errors = WorkflowManager.validate_workflow(steps)
|
||||
|
||||
if errors:
|
||||
return {
|
||||
"is_valid": False,
|
||||
"errors": errors,
|
||||
"workflow": workflow
|
||||
}
|
||||
|
||||
# Calculate estimated cost and time
|
||||
total_cost = sum(calculate_estimated_cost(step.get("operation", "unknown")) for step in steps)
|
||||
estimated_time = len(steps) * 30 # Rough estimate
|
||||
|
||||
# Optimize workflow
|
||||
optimized_steps = WorkflowManager.optimize_workflow(steps)
|
||||
|
||||
return {
|
||||
"is_valid": True,
|
||||
"original_workflow": workflow,
|
||||
"optimized_workflow": {"steps": optimized_steps},
|
||||
"estimates": {
|
||||
"total_credits": total_cost,
|
||||
"estimated_time_seconds": estimated_time,
|
||||
"step_count": len(steps)
|
||||
},
|
||||
"optimizations_applied": len(steps) != len(optimized_steps)
|
||||
}
|
||||
|
||||
|
||||
# ==================== CACHE MANAGEMENT ====================
|
||||
|
||||
@router.post("/cache/clear", summary="Clear Service Cache")
|
||||
async def clear_cache():
|
||||
"""Clear all cached data."""
|
||||
from middleware.stability_middleware import caching
|
||||
|
||||
caching.clear_cache()
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"message": "Cache cleared successfully",
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
@router.get("/cache/stats", summary="Get Cache Statistics")
|
||||
async def get_cache_stats():
|
||||
"""Get cache usage statistics."""
|
||||
from middleware.stability_middleware import caching
|
||||
|
||||
return {
|
||||
"cache_stats": caching.get_cache_stats(),
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
# ==================== RATE LIMITING MANAGEMENT ====================
|
||||
|
||||
@router.get("/rate-limit/status", summary="Get Rate Limit Status")
|
||||
async def get_rate_limit_status():
|
||||
"""Get current rate limiting status."""
|
||||
from middleware.stability_middleware import rate_limiter
|
||||
|
||||
return {
|
||||
"rate_limit_config": {
|
||||
"requests_per_window": rate_limiter.requests_per_window,
|
||||
"window_seconds": rate_limiter.window_seconds
|
||||
},
|
||||
"current_blocks": len(rate_limiter.blocked_until),
|
||||
"active_clients": len(rate_limiter.request_times),
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
@router.post("/rate-limit/reset", summary="Reset Rate Limits")
|
||||
async def reset_rate_limits():
|
||||
"""Reset rate limiting for all clients (admin only)."""
|
||||
from middleware.stability_middleware import rate_limiter
|
||||
|
||||
# Clear all rate limiting data
|
||||
rate_limiter.request_times.clear()
|
||||
rate_limiter.blocked_until.clear()
|
||||
|
||||
return {
|
||||
"status": "success",
|
||||
"message": "Rate limits reset for all clients",
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
# ==================== ACCOUNT MANAGEMENT ====================
|
||||
|
||||
@router.get("/account/detailed", summary="Get Detailed Account Information")
|
||||
async def get_detailed_account_info(
|
||||
stability_service: StabilityAIService = Depends(get_stability_service)
|
||||
):
|
||||
"""Get detailed account information including usage and limits."""
|
||||
async with stability_service:
|
||||
account_info = await stability_service.get_account_details()
|
||||
balance_info = await stability_service.get_account_balance()
|
||||
engines_info = await stability_service.list_engines()
|
||||
|
||||
return {
|
||||
"account": account_info,
|
||||
"balance": balance_info,
|
||||
"available_engines": engines_info,
|
||||
"service_limits": {
|
||||
"rate_limit": "150 requests per 10 seconds",
|
||||
"max_file_size": "10MB for images, 50MB for audio",
|
||||
"result_storage": "24 hours for async generations"
|
||||
},
|
||||
"pricing": MODEL_PRICING,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
# ==================== DEBUGGING ENDPOINTS ====================
|
||||
|
||||
@router.post("/debug/test-connection", summary="Test API Connection")
|
||||
async def test_api_connection(
|
||||
stability_service: StabilityAIService = Depends(get_stability_service)
|
||||
):
|
||||
"""Test connection to Stability AI API."""
|
||||
test_results = {}
|
||||
|
||||
try:
|
||||
async with stability_service:
|
||||
# Test account endpoint
|
||||
start_time = datetime.utcnow()
|
||||
account_info = await stability_service.get_account_details()
|
||||
end_time = datetime.utcnow()
|
||||
|
||||
test_results["account_test"] = {
|
||||
"status": "success",
|
||||
"response_time_ms": (end_time - start_time).total_seconds() * 1000,
|
||||
"account_id": account_info.get("id")
|
||||
}
|
||||
except Exception as e:
|
||||
test_results["account_test"] = {
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
try:
|
||||
async with stability_service:
|
||||
# Test engines endpoint
|
||||
start_time = datetime.utcnow()
|
||||
engines = await stability_service.list_engines()
|
||||
end_time = datetime.utcnow()
|
||||
|
||||
test_results["engines_test"] = {
|
||||
"status": "success",
|
||||
"response_time_ms": (end_time - start_time).total_seconds() * 1000,
|
||||
"engine_count": len(engines)
|
||||
}
|
||||
except Exception as e:
|
||||
test_results["engines_test"] = {
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
}
|
||||
|
||||
overall_status = "healthy" if all(
|
||||
test["status"] == "success"
|
||||
for test in test_results.values()
|
||||
) else "unhealthy"
|
||||
|
||||
return {
|
||||
"overall_status": overall_status,
|
||||
"tests": test_results,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
@router.get("/debug/request-logs", summary="Get Recent Request Logs")
|
||||
async def get_request_logs(
|
||||
limit: int = Query(50, description="Maximum number of log entries to return"),
|
||||
operation_filter: Optional[str] = Query(None, description="Filter by operation type")
|
||||
):
|
||||
"""Get recent request logs for debugging."""
|
||||
from middleware.stability_middleware import request_logging
|
||||
|
||||
logs = request_logging.get_recent_logs(limit)
|
||||
|
||||
if operation_filter:
|
||||
logs = [
|
||||
log for log in logs
|
||||
if operation_filter in log.get("path", "")
|
||||
]
|
||||
|
||||
return {
|
||||
"logs": logs,
|
||||
"total_entries": len(logs),
|
||||
"filter_applied": operation_filter,
|
||||
"summary": request_logging.get_log_summary()
|
||||
}
|
||||
|
||||
|
||||
# ==================== MAINTENANCE ENDPOINTS ====================
|
||||
|
||||
@router.post("/maintenance/cleanup", summary="Cleanup Service Resources")
|
||||
async def cleanup_service_resources():
|
||||
"""Cleanup service resources and temporary files."""
|
||||
cleanup_results = {}
|
||||
|
||||
try:
|
||||
# Clear caches
|
||||
from middleware.stability_middleware import caching
|
||||
caching.clear_cache()
|
||||
cleanup_results["cache_cleanup"] = "success"
|
||||
except Exception as e:
|
||||
cleanup_results["cache_cleanup"] = f"error: {str(e)}"
|
||||
|
||||
try:
|
||||
# Clean up temporary files (if any)
|
||||
import os
|
||||
import glob
|
||||
|
||||
temp_files = glob.glob("/tmp/stability_*")
|
||||
removed_count = 0
|
||||
|
||||
for temp_file in temp_files:
|
||||
try:
|
||||
os.remove(temp_file)
|
||||
removed_count += 1
|
||||
except:
|
||||
pass
|
||||
|
||||
cleanup_results["temp_file_cleanup"] = f"removed {removed_count} files"
|
||||
except Exception as e:
|
||||
cleanup_results["temp_file_cleanup"] = f"error: {str(e)}"
|
||||
|
||||
return {
|
||||
"cleanup_results": cleanup_results,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
@router.post("/maintenance/optimize", summary="Optimize Service Performance")
|
||||
async def optimize_service_performance():
|
||||
"""Optimize service performance by adjusting configurations."""
|
||||
optimizations = []
|
||||
|
||||
# Check and optimize cache settings
|
||||
from middleware.stability_middleware import caching
|
||||
cache_stats = caching.get_cache_stats()
|
||||
|
||||
if cache_stats["total_entries"] > 100:
|
||||
caching.clear_cache()
|
||||
optimizations.append("Cleared large cache to free memory")
|
||||
|
||||
# Check rate limiting efficiency
|
||||
from middleware.stability_middleware import rate_limiter
|
||||
if len(rate_limiter.blocked_until) > 10:
|
||||
# Reset old blocks
|
||||
import time
|
||||
current_time = time.time()
|
||||
expired_blocks = [
|
||||
client_id for client_id, block_time in rate_limiter.blocked_until.items()
|
||||
if current_time > block_time
|
||||
]
|
||||
|
||||
for client_id in expired_blocks:
|
||||
del rate_limiter.blocked_until[client_id]
|
||||
|
||||
optimizations.append(f"Cleared {len(expired_blocks)} expired rate limit blocks")
|
||||
|
||||
return {
|
||||
"optimizations_applied": optimizations,
|
||||
"optimization_count": len(optimizations),
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
# ==================== FEATURE FLAGS ====================
|
||||
|
||||
@router.get("/features", summary="Get Feature Flags")
|
||||
async def get_feature_flags():
|
||||
"""Get current feature flag status."""
|
||||
from config.stability_config import FEATURE_FLAGS
|
||||
|
||||
return {
|
||||
"features": FEATURE_FLAGS,
|
||||
"enabled_count": sum(1 for enabled in FEATURE_FLAGS.values() if enabled),
|
||||
"total_features": len(FEATURE_FLAGS)
|
||||
}
|
||||
|
||||
|
||||
@router.post("/features/{feature_name}/toggle", summary="Toggle Feature Flag")
|
||||
async def toggle_feature_flag(feature_name: str):
|
||||
"""Toggle a feature flag on/off."""
|
||||
from config.stability_config import FEATURE_FLAGS
|
||||
|
||||
if feature_name not in FEATURE_FLAGS:
|
||||
raise HTTPException(status_code=404, detail=f"Feature '{feature_name}' not found")
|
||||
|
||||
# Toggle the feature
|
||||
FEATURE_FLAGS[feature_name] = not FEATURE_FLAGS[feature_name]
|
||||
|
||||
return {
|
||||
"feature": feature_name,
|
||||
"new_status": FEATURE_FLAGS[feature_name],
|
||||
"message": f"Feature '{feature_name}' {'enabled' if FEATURE_FLAGS[feature_name] else 'disabled'}",
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
# ==================== EXPORT ENDPOINTS ====================
|
||||
|
||||
@router.get("/export/config", summary="Export Configuration")
|
||||
async def export_configuration():
|
||||
"""Export current service configuration."""
|
||||
config = get_stability_config()
|
||||
|
||||
export_data = {
|
||||
"service_config": {
|
||||
"base_url": config.base_url,
|
||||
"timeout": config.timeout,
|
||||
"max_retries": config.max_retries,
|
||||
"max_file_size": config.max_file_size
|
||||
},
|
||||
"pricing": MODEL_PRICING,
|
||||
"limits": {
|
||||
"image": IMAGE_LIMITS,
|
||||
"audio": AUDIO_LIMITS
|
||||
},
|
||||
"workflows": WORKFLOW_TEMPLATES,
|
||||
"export_timestamp": datetime.utcnow().isoformat(),
|
||||
"version": "1.0.0"
|
||||
}
|
||||
|
||||
return export_data
|
||||
|
||||
|
||||
@router.get("/export/usage-report", summary="Export Usage Report")
|
||||
async def export_usage_report(
|
||||
format_type: str = Query("json", description="Export format (json, csv)"),
|
||||
days: int = Query(30, description="Number of days to include")
|
||||
):
|
||||
"""Export detailed usage report."""
|
||||
# In a real implementation, this would query actual usage data
|
||||
|
||||
usage_data = {
|
||||
"report_info": {
|
||||
"generated_at": datetime.utcnow().isoformat(),
|
||||
"period_days": days,
|
||||
"format": format_type
|
||||
},
|
||||
"summary": {
|
||||
"total_requests": 500,
|
||||
"total_credits_used": 1250,
|
||||
"average_daily_usage": 41.67,
|
||||
"most_used_operation": "generate_core"
|
||||
},
|
||||
"detailed_usage": [
|
||||
{
|
||||
"date": (datetime.utcnow() - timedelta(days=i)).strftime("%Y-%m-%d"),
|
||||
"requests": 15 + (i % 5),
|
||||
"credits": 37.5 + (i % 5) * 2.5,
|
||||
"top_operation": "generate_core"
|
||||
}
|
||||
for i in range(days)
|
||||
]
|
||||
}
|
||||
|
||||
if format_type == "csv":
|
||||
# Convert to CSV format
|
||||
import csv
|
||||
import io
|
||||
|
||||
output = io.StringIO()
|
||||
writer = csv.DictWriter(output, fieldnames=["date", "requests", "credits", "top_operation"])
|
||||
writer.writeheader()
|
||||
writer.writerows(usage_data["detailed_usage"])
|
||||
|
||||
return Response(
|
||||
content=output.getvalue(),
|
||||
media_type="text/csv",
|
||||
headers={"Content-Disposition": f"attachment; filename=stability_usage_{days}days.csv"}
|
||||
)
|
||||
|
||||
return usage_data
|
||||
|
||||
|
||||
# ==================== SYSTEM INFO ENDPOINTS ====================
|
||||
|
||||
@router.get("/system/info", summary="Get System Information")
|
||||
async def get_system_info():
|
||||
"""Get comprehensive system information."""
|
||||
import sys
|
||||
import platform
|
||||
import psutil
|
||||
|
||||
return {
|
||||
"system": {
|
||||
"platform": platform.platform(),
|
||||
"python_version": sys.version,
|
||||
"cpu_count": psutil.cpu_count(),
|
||||
"memory_total_gb": round(psutil.virtual_memory().total / (1024**3), 2),
|
||||
"memory_available_gb": round(psutil.virtual_memory().available / (1024**3), 2)
|
||||
},
|
||||
"service": {
|
||||
"name": "Stability AI Integration",
|
||||
"version": "1.0.0",
|
||||
"uptime": "N/A", # Would track actual uptime
|
||||
"active_connections": "N/A"
|
||||
},
|
||||
"api_info": {
|
||||
"base_url": "https://api.stability.ai",
|
||||
"supported_versions": ["v2beta", "v1"],
|
||||
"rate_limit": "150 requests per 10 seconds"
|
||||
},
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
@router.get("/system/dependencies", summary="Get Service Dependencies")
|
||||
async def get_service_dependencies():
|
||||
"""Get information about service dependencies."""
|
||||
dependencies = {
|
||||
"required": {
|
||||
"fastapi": "Web framework",
|
||||
"aiohttp": "HTTP client for API calls",
|
||||
"pydantic": "Data validation",
|
||||
"pillow": "Image processing",
|
||||
"loguru": "Logging"
|
||||
},
|
||||
"optional": {
|
||||
"scikit-learn": "Color analysis",
|
||||
"numpy": "Numerical operations",
|
||||
"psutil": "System monitoring"
|
||||
},
|
||||
"external_services": {
|
||||
"stability_ai_api": {
|
||||
"url": "https://api.stability.ai",
|
||||
"status": "unknown", # Would check actual status
|
||||
"description": "Stability AI REST API"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return dependencies
|
||||
|
||||
|
||||
# ==================== WEBHOOK MANAGEMENT ====================
|
||||
|
||||
@router.get("/webhooks/config", summary="Get Webhook Configuration")
|
||||
async def get_webhook_config():
|
||||
"""Get current webhook configuration."""
|
||||
return {
|
||||
"webhooks_enabled": True,
|
||||
"supported_events": [
|
||||
"generation.completed",
|
||||
"generation.failed",
|
||||
"upscale.completed",
|
||||
"edit.completed"
|
||||
],
|
||||
"webhook_url": "/api/stability/webhook/generation-complete",
|
||||
"retry_policy": {
|
||||
"max_retries": 3,
|
||||
"retry_delay_seconds": 5
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@router.post("/webhooks/test", summary="Test Webhook Delivery")
|
||||
async def test_webhook_delivery():
|
||||
"""Test webhook delivery mechanism."""
|
||||
test_payload = {
|
||||
"event": "generation.completed",
|
||||
"generation_id": "test_generation_id",
|
||||
"status": "success",
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
# In a real implementation, this would send to configured webhook URLs
|
||||
|
||||
return {
|
||||
"test_status": "success",
|
||||
"payload_sent": test_payload,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
817
backend/routers/stability_advanced.py
Normal file
817
backend/routers/stability_advanced.py
Normal file
@@ -0,0 +1,817 @@
|
||||
"""Advanced Stability AI endpoints with specialized features."""
|
||||
|
||||
from fastapi import APIRouter, UploadFile, File, Form, Depends, HTTPException, BackgroundTasks
|
||||
from fastapi.responses import Response, StreamingResponse
|
||||
from typing import Optional, List, Dict, Any
|
||||
import asyncio
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from services.stability_service import get_stability_service, StabilityAIService
|
||||
|
||||
router = APIRouter(prefix="/api/stability/advanced", tags=["Stability AI Advanced"])
|
||||
|
||||
|
||||
# ==================== ADVANCED GENERATION WORKFLOWS ====================
|
||||
|
||||
@router.post("/workflow/image-enhancement", summary="Complete Image Enhancement Workflow")
|
||||
async def image_enhancement_workflow(
|
||||
image: UploadFile = File(..., description="Image to enhance"),
|
||||
enhancement_type: str = Form("auto", description="Enhancement type: auto, upscale, denoise, sharpen"),
|
||||
prompt: Optional[str] = Form(None, description="Optional prompt for guided enhancement"),
|
||||
target_resolution: Optional[str] = Form("4k", description="Target resolution: 4k, 2k, hd"),
|
||||
preserve_style: Optional[bool] = Form(True, description="Preserve original style"),
|
||||
background_tasks: BackgroundTasks = BackgroundTasks(),
|
||||
stability_service: StabilityAIService = Depends(get_stability_service)
|
||||
):
|
||||
"""Complete image enhancement workflow with automatic optimization.
|
||||
|
||||
This workflow automatically determines the best enhancement approach based on
|
||||
the input image characteristics and user preferences.
|
||||
"""
|
||||
async with stability_service:
|
||||
# Analyze image first
|
||||
content = await image.read()
|
||||
img_info = await _analyze_image(content)
|
||||
|
||||
# Reset file pointer
|
||||
await image.seek(0)
|
||||
|
||||
# Determine enhancement strategy
|
||||
strategy = _determine_enhancement_strategy(img_info, enhancement_type, target_resolution)
|
||||
|
||||
# Execute enhancement workflow
|
||||
results = []
|
||||
|
||||
for step in strategy["steps"]:
|
||||
if step["operation"] == "upscale_fast":
|
||||
result = await stability_service.upscale_fast(image=image)
|
||||
elif step["operation"] == "upscale_conservative":
|
||||
result = await stability_service.upscale_conservative(
|
||||
image=image,
|
||||
prompt=prompt or step["default_prompt"]
|
||||
)
|
||||
elif step["operation"] == "upscale_creative":
|
||||
result = await stability_service.upscale_creative(
|
||||
image=image,
|
||||
prompt=prompt or step["default_prompt"]
|
||||
)
|
||||
|
||||
results.append({
|
||||
"step": step["name"],
|
||||
"operation": step["operation"],
|
||||
"status": "completed",
|
||||
"result_size": len(result) if isinstance(result, bytes) else None
|
||||
})
|
||||
|
||||
# Use result as input for next step if needed
|
||||
if isinstance(result, bytes) and len(strategy["steps"]) > 1:
|
||||
# Convert bytes back to UploadFile-like object for next step
|
||||
image = _bytes_to_upload_file(result, image.filename)
|
||||
|
||||
# Return final result
|
||||
if isinstance(result, bytes):
|
||||
return Response(
|
||||
content=result,
|
||||
media_type="image/png",
|
||||
headers={
|
||||
"X-Enhancement-Strategy": json.dumps(strategy),
|
||||
"X-Processing-Steps": str(len(results))
|
||||
}
|
||||
)
|
||||
|
||||
return {
|
||||
"strategy": strategy,
|
||||
"steps_completed": results,
|
||||
"generation_id": result.get("id") if isinstance(result, dict) else None
|
||||
}
|
||||
|
||||
|
||||
@router.post("/workflow/creative-suite", summary="Creative Suite Multi-Step Workflow")
|
||||
async def creative_suite_workflow(
|
||||
base_image: Optional[UploadFile] = File(None, description="Base image (optional for text-to-image)"),
|
||||
prompt: str = Form(..., description="Main creative prompt"),
|
||||
style_reference: Optional[UploadFile] = File(None, description="Style reference image"),
|
||||
workflow_steps: str = Form(..., description="JSON array of workflow steps"),
|
||||
output_format: Optional[str] = Form("png", description="Output format"),
|
||||
stability_service: StabilityAIService = Depends(get_stability_service)
|
||||
):
|
||||
"""Execute a multi-step creative workflow combining various Stability AI services.
|
||||
|
||||
This endpoint allows you to chain multiple operations together for complex
|
||||
creative workflows.
|
||||
"""
|
||||
try:
|
||||
steps = json.loads(workflow_steps)
|
||||
except json.JSONDecodeError:
|
||||
raise HTTPException(status_code=400, detail="Invalid JSON in workflow_steps")
|
||||
|
||||
async with stability_service:
|
||||
current_image = base_image
|
||||
results = []
|
||||
|
||||
for i, step in enumerate(steps):
|
||||
operation = step.get("operation")
|
||||
params = step.get("parameters", {})
|
||||
|
||||
try:
|
||||
if operation == "generate_core" and not current_image:
|
||||
result = await stability_service.generate_core(prompt=prompt, **params)
|
||||
elif operation == "control_style" and style_reference:
|
||||
result = await stability_service.control_style(
|
||||
image=style_reference, prompt=prompt, **params
|
||||
)
|
||||
elif operation == "inpaint" and current_image:
|
||||
result = await stability_service.inpaint(
|
||||
image=current_image, prompt=prompt, **params
|
||||
)
|
||||
elif operation == "upscale_fast" and current_image:
|
||||
result = await stability_service.upscale_fast(image=current_image, **params)
|
||||
else:
|
||||
raise ValueError(f"Unsupported operation or missing requirements: {operation}")
|
||||
|
||||
# Convert result to next step input if needed
|
||||
if isinstance(result, bytes):
|
||||
current_image = _bytes_to_upload_file(result, f"step_{i}_output.png")
|
||||
|
||||
results.append({
|
||||
"step": i + 1,
|
||||
"operation": operation,
|
||||
"status": "completed",
|
||||
"result_type": "image" if isinstance(result, bytes) else "json"
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
results.append({
|
||||
"step": i + 1,
|
||||
"operation": operation,
|
||||
"status": "error",
|
||||
"error": str(e)
|
||||
})
|
||||
break
|
||||
|
||||
# Return final result
|
||||
if isinstance(result, bytes):
|
||||
return Response(
|
||||
content=result,
|
||||
media_type=f"image/{output_format}",
|
||||
headers={"X-Workflow-Steps": json.dumps(results)}
|
||||
)
|
||||
|
||||
return {"workflow_results": results, "final_result": result}
|
||||
|
||||
|
||||
# ==================== COMPARISON ENDPOINTS ====================
|
||||
|
||||
@router.post("/compare/models", summary="Compare Different Models")
|
||||
async def compare_models(
|
||||
prompt: str = Form(..., description="Text prompt for comparison"),
|
||||
models: str = Form(..., description="JSON array of models to compare"),
|
||||
seed: Optional[int] = Form(42, description="Seed for consistent comparison"),
|
||||
stability_service: StabilityAIService = Depends(get_stability_service)
|
||||
):
|
||||
"""Generate images using different models for comparison.
|
||||
|
||||
This endpoint generates the same prompt using different Stability AI models
|
||||
to help you compare quality and style differences.
|
||||
"""
|
||||
try:
|
||||
model_list = json.loads(models)
|
||||
except json.JSONDecodeError:
|
||||
raise HTTPException(status_code=400, detail="Invalid JSON in models")
|
||||
|
||||
async with stability_service:
|
||||
results = {}
|
||||
|
||||
for model in model_list:
|
||||
try:
|
||||
if model == "ultra":
|
||||
result = await stability_service.generate_ultra(
|
||||
prompt=prompt, seed=seed, output_format="webp"
|
||||
)
|
||||
elif model == "core":
|
||||
result = await stability_service.generate_core(
|
||||
prompt=prompt, seed=seed, output_format="webp"
|
||||
)
|
||||
elif model.startswith("sd3"):
|
||||
result = await stability_service.generate_sd3(
|
||||
prompt=prompt, model=model, seed=seed, output_format="webp"
|
||||
)
|
||||
else:
|
||||
continue
|
||||
|
||||
if isinstance(result, bytes):
|
||||
results[model] = {
|
||||
"status": "success",
|
||||
"image": base64.b64encode(result).decode(),
|
||||
"size": len(result)
|
||||
}
|
||||
else:
|
||||
results[model] = {"status": "async", "generation_id": result.get("id")}
|
||||
|
||||
except Exception as e:
|
||||
results[model] = {"status": "error", "error": str(e)}
|
||||
|
||||
return {
|
||||
"prompt": prompt,
|
||||
"seed": seed,
|
||||
"comparison_results": results,
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
|
||||
# ==================== STYLE TRANSFER WORKFLOWS ====================
|
||||
|
||||
@router.post("/style/multi-style-transfer", summary="Multi-Style Transfer")
|
||||
async def multi_style_transfer(
|
||||
content_image: UploadFile = File(..., description="Content image"),
|
||||
style_images: List[UploadFile] = File(..., description="Multiple style reference images"),
|
||||
blend_weights: Optional[str] = Form(None, description="JSON array of blend weights"),
|
||||
output_format: Optional[str] = Form("png", description="Output format"),
|
||||
stability_service: StabilityAIService = Depends(get_stability_service)
|
||||
):
|
||||
"""Apply multiple styles to a single content image with blending.
|
||||
|
||||
This endpoint applies multiple style references to a content image,
|
||||
optionally with specified blend weights.
|
||||
"""
|
||||
weights = None
|
||||
if blend_weights:
|
||||
try:
|
||||
weights = json.loads(blend_weights)
|
||||
except json.JSONDecodeError:
|
||||
raise HTTPException(status_code=400, detail="Invalid JSON in blend_weights")
|
||||
|
||||
if weights and len(weights) != len(style_images):
|
||||
raise HTTPException(status_code=400, detail="Number of weights must match number of style images")
|
||||
|
||||
async with stability_service:
|
||||
results = []
|
||||
|
||||
for i, style_image in enumerate(style_images):
|
||||
weight = weights[i] if weights else 1.0
|
||||
|
||||
result = await stability_service.control_style_transfer(
|
||||
init_image=content_image,
|
||||
style_image=style_image,
|
||||
style_strength=weight,
|
||||
output_format=output_format
|
||||
)
|
||||
|
||||
if isinstance(result, bytes):
|
||||
results.append({
|
||||
"style_index": i,
|
||||
"weight": weight,
|
||||
"image": base64.b64encode(result).decode(),
|
||||
"size": len(result)
|
||||
})
|
||||
|
||||
# Reset content image file pointer for next iteration
|
||||
await content_image.seek(0)
|
||||
|
||||
return {
|
||||
"content_image": content_image.filename,
|
||||
"style_count": len(style_images),
|
||||
"results": results
|
||||
}
|
||||
|
||||
|
||||
# ==================== ANIMATION & SEQUENCE ENDPOINTS ====================
|
||||
|
||||
@router.post("/animation/image-sequence", summary="Generate Image Sequence")
|
||||
async def generate_image_sequence(
|
||||
base_prompt: str = Form(..., description="Base prompt for sequence"),
|
||||
sequence_prompts: str = Form(..., description="JSON array of sequence variations"),
|
||||
seed_start: Optional[int] = Form(42, description="Starting seed"),
|
||||
seed_increment: Optional[int] = Form(1, description="Seed increment per frame"),
|
||||
output_format: Optional[str] = Form("png", description="Output format"),
|
||||
stability_service: StabilityAIService = Depends(get_stability_service)
|
||||
):
|
||||
"""Generate a sequence of related images for animation or storytelling.
|
||||
|
||||
This endpoint generates a series of images with slight variations to create
|
||||
animation frames or story sequences.
|
||||
"""
|
||||
try:
|
||||
prompts = json.loads(sequence_prompts)
|
||||
except json.JSONDecodeError:
|
||||
raise HTTPException(status_code=400, detail="Invalid JSON in sequence_prompts")
|
||||
|
||||
async with stability_service:
|
||||
sequence_results = []
|
||||
current_seed = seed_start
|
||||
|
||||
for i, variation in enumerate(prompts):
|
||||
full_prompt = f"{base_prompt}, {variation}"
|
||||
|
||||
result = await stability_service.generate_core(
|
||||
prompt=full_prompt,
|
||||
seed=current_seed,
|
||||
output_format=output_format
|
||||
)
|
||||
|
||||
if isinstance(result, bytes):
|
||||
sequence_results.append({
|
||||
"frame": i + 1,
|
||||
"prompt": full_prompt,
|
||||
"seed": current_seed,
|
||||
"image": base64.b64encode(result).decode(),
|
||||
"size": len(result)
|
||||
})
|
||||
|
||||
current_seed += seed_increment
|
||||
|
||||
return {
|
||||
"base_prompt": base_prompt,
|
||||
"frame_count": len(sequence_results),
|
||||
"sequence": sequence_results
|
||||
}
|
||||
|
||||
|
||||
# ==================== QUALITY ANALYSIS ENDPOINTS ====================
|
||||
|
||||
@router.post("/analysis/generation-quality", summary="Analyze Generation Quality")
|
||||
async def analyze_generation_quality(
|
||||
image: UploadFile = File(..., description="Generated image to analyze"),
|
||||
original_prompt: str = Form(..., description="Original generation prompt"),
|
||||
model_used: str = Form(..., description="Model used for generation")
|
||||
):
|
||||
"""Analyze the quality and characteristics of a generated image.
|
||||
|
||||
This endpoint provides detailed analysis of generated images including
|
||||
quality metrics, style adherence, and improvement suggestions.
|
||||
"""
|
||||
from PIL import Image, ImageStat
|
||||
import numpy as np
|
||||
|
||||
try:
|
||||
content = await image.read()
|
||||
img = Image.open(io.BytesIO(content))
|
||||
|
||||
# Basic image statistics
|
||||
stat = ImageStat.Stat(img)
|
||||
|
||||
# Convert to RGB if needed for analysis
|
||||
if img.mode != "RGB":
|
||||
img = img.convert("RGB")
|
||||
|
||||
# Calculate quality metrics
|
||||
img_array = np.array(img)
|
||||
|
||||
# Brightness analysis
|
||||
brightness = np.mean(img_array)
|
||||
|
||||
# Contrast analysis
|
||||
contrast = np.std(img_array)
|
||||
|
||||
# Color distribution
|
||||
color_channels = np.mean(img_array, axis=(0, 1))
|
||||
|
||||
# Sharpness estimation (using Laplacian variance)
|
||||
gray = img.convert('L')
|
||||
gray_array = np.array(gray)
|
||||
laplacian_var = np.var(np.gradient(gray_array))
|
||||
|
||||
quality_score = min(100, (contrast / 50) * (laplacian_var / 1000) * 100)
|
||||
|
||||
analysis = {
|
||||
"image_info": {
|
||||
"dimensions": f"{img.width}x{img.height}",
|
||||
"format": img.format,
|
||||
"mode": img.mode,
|
||||
"file_size": len(content)
|
||||
},
|
||||
"quality_metrics": {
|
||||
"overall_score": round(quality_score, 2),
|
||||
"brightness": round(brightness, 2),
|
||||
"contrast": round(contrast, 2),
|
||||
"sharpness": round(laplacian_var, 2)
|
||||
},
|
||||
"color_analysis": {
|
||||
"red_channel": round(float(color_channels[0]), 2),
|
||||
"green_channel": round(float(color_channels[1]), 2),
|
||||
"blue_channel": round(float(color_channels[2]), 2),
|
||||
"color_balance": "balanced" if max(color_channels) - min(color_channels) < 30 else "imbalanced"
|
||||
},
|
||||
"generation_info": {
|
||||
"original_prompt": original_prompt,
|
||||
"model_used": model_used,
|
||||
"analysis_timestamp": datetime.utcnow().isoformat()
|
||||
},
|
||||
"recommendations": _generate_quality_recommendations(quality_score, brightness, contrast)
|
||||
}
|
||||
|
||||
return analysis
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=f"Error analyzing image: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/analysis/prompt-optimization", summary="Optimize Text Prompts")
|
||||
async def optimize_prompt(
|
||||
prompt: str = Form(..., description="Original prompt to optimize"),
|
||||
target_style: Optional[str] = Form(None, description="Target style"),
|
||||
target_quality: Optional[str] = Form("high", description="Target quality level"),
|
||||
model: Optional[str] = Form("ultra", description="Target model"),
|
||||
include_negative: Optional[bool] = Form(True, description="Include negative prompt suggestions")
|
||||
):
|
||||
"""Analyze and optimize text prompts for better generation results.
|
||||
|
||||
This endpoint analyzes your prompt and provides suggestions for improvement
|
||||
based on best practices and model-specific optimizations.
|
||||
"""
|
||||
analysis = {
|
||||
"original_prompt": prompt,
|
||||
"prompt_length": len(prompt),
|
||||
"word_count": len(prompt.split()),
|
||||
"optimization_suggestions": []
|
||||
}
|
||||
|
||||
# Analyze prompt structure
|
||||
suggestions = []
|
||||
|
||||
# Check for style descriptors
|
||||
style_keywords = ["photorealistic", "digital art", "oil painting", "watercolor", "sketch"]
|
||||
has_style = any(keyword in prompt.lower() for keyword in style_keywords)
|
||||
if not has_style and target_style:
|
||||
suggestions.append(f"Add style descriptor: {target_style}")
|
||||
|
||||
# Check for quality enhancers
|
||||
quality_keywords = ["high quality", "detailed", "sharp", "crisp", "professional"]
|
||||
has_quality = any(keyword in prompt.lower() for keyword in quality_keywords)
|
||||
if not has_quality and target_quality == "high":
|
||||
suggestions.append("Add quality enhancers: 'high quality, detailed, sharp'")
|
||||
|
||||
# Check for composition elements
|
||||
composition_keywords = ["composition", "lighting", "perspective", "framing"]
|
||||
has_composition = any(keyword in prompt.lower() for keyword in composition_keywords)
|
||||
if not has_composition:
|
||||
suggestions.append("Consider adding composition details: lighting, perspective, framing")
|
||||
|
||||
# Model-specific optimizations
|
||||
if model == "ultra":
|
||||
suggestions.append("For Ultra model: Use detailed, specific descriptions")
|
||||
elif model == "core":
|
||||
suggestions.append("For Core model: Keep prompts concise but descriptive")
|
||||
|
||||
# Generate optimized prompt
|
||||
optimized_prompt = prompt
|
||||
if suggestions:
|
||||
optimized_prompt = _apply_prompt_optimizations(prompt, suggestions, target_style)
|
||||
|
||||
# Generate negative prompt suggestions
|
||||
negative_suggestions = []
|
||||
if include_negative:
|
||||
negative_suggestions = _generate_negative_prompt_suggestions(prompt, target_style)
|
||||
|
||||
analysis.update({
|
||||
"optimization_suggestions": suggestions,
|
||||
"optimized_prompt": optimized_prompt,
|
||||
"negative_prompt_suggestions": negative_suggestions,
|
||||
"estimated_improvement": len(suggestions) * 10, # Rough estimate
|
||||
"model_compatibility": _check_model_compatibility(optimized_prompt, model)
|
||||
})
|
||||
|
||||
return analysis
|
||||
|
||||
|
||||
# ==================== BATCH PROCESSING ENDPOINTS ====================
|
||||
|
||||
@router.post("/batch/process-folder", summary="Process Multiple Images")
|
||||
async def batch_process_folder(
|
||||
images: List[UploadFile] = File(..., description="Multiple images to process"),
|
||||
operation: str = Form(..., description="Operation to perform on all images"),
|
||||
operation_params: str = Form("{}", description="JSON parameters for operation"),
|
||||
background_tasks: BackgroundTasks = BackgroundTasks(),
|
||||
stability_service: StabilityAIService = Depends(get_stability_service)
|
||||
):
|
||||
"""Process multiple images with the same operation in batch.
|
||||
|
||||
This endpoint allows you to apply the same operation to multiple images
|
||||
efficiently.
|
||||
"""
|
||||
try:
|
||||
params = json.loads(operation_params)
|
||||
except json.JSONDecodeError:
|
||||
raise HTTPException(status_code=400, detail="Invalid JSON in operation_params")
|
||||
|
||||
# Validate operation
|
||||
supported_operations = [
|
||||
"upscale_fast", "remove_background", "erase", "generate_ultra", "generate_core"
|
||||
]
|
||||
if operation not in supported_operations:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Unsupported operation. Supported: {supported_operations}"
|
||||
)
|
||||
|
||||
# Start batch processing in background
|
||||
batch_id = f"batch_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}"
|
||||
|
||||
background_tasks.add_task(
|
||||
_process_batch_images,
|
||||
batch_id,
|
||||
images,
|
||||
operation,
|
||||
params,
|
||||
stability_service
|
||||
)
|
||||
|
||||
return {
|
||||
"batch_id": batch_id,
|
||||
"status": "started",
|
||||
"image_count": len(images),
|
||||
"operation": operation,
|
||||
"estimated_completion": (datetime.utcnow() + timedelta(minutes=len(images) * 2)).isoformat()
|
||||
}
|
||||
|
||||
|
||||
@router.get("/batch/{batch_id}/status", summary="Get Batch Processing Status")
|
||||
async def get_batch_status(batch_id: str):
|
||||
"""Get the status of a batch processing operation.
|
||||
|
||||
Returns the current status and progress of a batch operation.
|
||||
"""
|
||||
# In a real implementation, you'd store batch status in a database
|
||||
# For now, return a mock response
|
||||
return {
|
||||
"batch_id": batch_id,
|
||||
"status": "processing",
|
||||
"progress": {
|
||||
"completed": 2,
|
||||
"total": 5,
|
||||
"percentage": 40
|
||||
},
|
||||
"estimated_completion": (datetime.utcnow() + timedelta(minutes=5)).isoformat()
|
||||
}
|
||||
|
||||
|
||||
# ==================== HELPER FUNCTIONS ====================
|
||||
|
||||
async def _analyze_image(content: bytes) -> Dict[str, Any]:
|
||||
"""Analyze image characteristics."""
|
||||
from PIL import Image
|
||||
|
||||
img = Image.open(io.BytesIO(content))
|
||||
total_pixels = img.width * img.height
|
||||
|
||||
return {
|
||||
"width": img.width,
|
||||
"height": img.height,
|
||||
"total_pixels": total_pixels,
|
||||
"aspect_ratio": img.width / img.height,
|
||||
"format": img.format,
|
||||
"mode": img.mode,
|
||||
"is_low_res": total_pixels < 500000, # Less than 0.5MP
|
||||
"is_high_res": total_pixels > 2000000, # More than 2MP
|
||||
"needs_upscaling": total_pixels < 1000000 # Less than 1MP
|
||||
}
|
||||
|
||||
|
||||
def _determine_enhancement_strategy(img_info: Dict[str, Any], enhancement_type: str, target_resolution: str) -> Dict[str, Any]:
|
||||
"""Determine the best enhancement strategy based on image characteristics."""
|
||||
strategy = {"steps": []}
|
||||
|
||||
if enhancement_type == "auto":
|
||||
if img_info["is_low_res"]:
|
||||
if img_info["total_pixels"] < 100000: # Very low res
|
||||
strategy["steps"].append({
|
||||
"name": "Creative Upscale",
|
||||
"operation": "upscale_creative",
|
||||
"default_prompt": "high quality, detailed, sharp"
|
||||
})
|
||||
else:
|
||||
strategy["steps"].append({
|
||||
"name": "Conservative Upscale",
|
||||
"operation": "upscale_conservative",
|
||||
"default_prompt": "enhance quality, preserve details"
|
||||
})
|
||||
else:
|
||||
strategy["steps"].append({
|
||||
"name": "Fast Upscale",
|
||||
"operation": "upscale_fast",
|
||||
"default_prompt": ""
|
||||
})
|
||||
elif enhancement_type == "upscale":
|
||||
if target_resolution == "4k":
|
||||
strategy["steps"].append({
|
||||
"name": "Conservative Upscale to 4K",
|
||||
"operation": "upscale_conservative",
|
||||
"default_prompt": "4K resolution, high quality"
|
||||
})
|
||||
else:
|
||||
strategy["steps"].append({
|
||||
"name": "Fast Upscale",
|
||||
"operation": "upscale_fast",
|
||||
"default_prompt": ""
|
||||
})
|
||||
|
||||
return strategy
|
||||
|
||||
|
||||
def _bytes_to_upload_file(content: bytes, filename: str):
|
||||
"""Convert bytes to UploadFile-like object."""
|
||||
from fastapi import UploadFile
|
||||
from io import BytesIO
|
||||
|
||||
file_obj = BytesIO(content)
|
||||
file_obj.seek(0)
|
||||
|
||||
# Create a mock UploadFile
|
||||
class MockUploadFile:
|
||||
def __init__(self, file_obj, filename):
|
||||
self.file = file_obj
|
||||
self.filename = filename
|
||||
self.content_type = "image/png"
|
||||
|
||||
async def read(self):
|
||||
return self.file.read()
|
||||
|
||||
async def seek(self, position):
|
||||
self.file.seek(position)
|
||||
|
||||
return MockUploadFile(file_obj, filename)
|
||||
|
||||
|
||||
def _generate_quality_recommendations(quality_score: float, brightness: float, contrast: float) -> List[str]:
|
||||
"""Generate quality improvement recommendations."""
|
||||
recommendations = []
|
||||
|
||||
if quality_score < 50:
|
||||
recommendations.append("Consider using a higher quality model like Ultra")
|
||||
|
||||
if brightness < 100:
|
||||
recommendations.append("Image appears dark, consider adjusting lighting in prompt")
|
||||
elif brightness > 200:
|
||||
recommendations.append("Image appears bright, consider reducing exposure in prompt")
|
||||
|
||||
if contrast < 30:
|
||||
recommendations.append("Low contrast detected, add 'high contrast' to prompt")
|
||||
|
||||
if not recommendations:
|
||||
recommendations.append("Image quality looks good!")
|
||||
|
||||
return recommendations
|
||||
|
||||
|
||||
def _apply_prompt_optimizations(prompt: str, suggestions: List[str], target_style: Optional[str]) -> str:
|
||||
"""Apply optimization suggestions to prompt."""
|
||||
optimized = prompt
|
||||
|
||||
# Add style if suggested
|
||||
if target_style and f"Add style descriptor: {target_style}" in suggestions:
|
||||
optimized = f"{optimized}, {target_style} style"
|
||||
|
||||
# Add quality enhancers if suggested
|
||||
if any("quality enhancer" in s for s in suggestions):
|
||||
optimized = f"{optimized}, high quality, detailed, sharp"
|
||||
|
||||
return optimized.strip()
|
||||
|
||||
|
||||
def _generate_negative_prompt_suggestions(prompt: str, target_style: Optional[str]) -> List[str]:
|
||||
"""Generate negative prompt suggestions based on prompt analysis."""
|
||||
suggestions = []
|
||||
|
||||
# Common negative prompts
|
||||
suggestions.extend([
|
||||
"blurry, low quality, pixelated",
|
||||
"distorted, deformed, malformed",
|
||||
"oversaturated, undersaturated"
|
||||
])
|
||||
|
||||
# Style-specific negative prompts
|
||||
if target_style:
|
||||
if "photorealistic" in target_style.lower():
|
||||
suggestions.append("cartoon, anime, illustration")
|
||||
elif "anime" in target_style.lower():
|
||||
suggestions.append("realistic, photographic")
|
||||
|
||||
return suggestions
|
||||
|
||||
|
||||
def _check_model_compatibility(prompt: str, model: str) -> Dict[str, Any]:
|
||||
"""Check prompt compatibility with specific models."""
|
||||
compatibility = {"score": 100, "notes": []}
|
||||
|
||||
if model == "ultra":
|
||||
if len(prompt.split()) < 5:
|
||||
compatibility["score"] -= 20
|
||||
compatibility["notes"].append("Ultra model works best with detailed prompts")
|
||||
elif model == "core":
|
||||
if len(prompt) > 500:
|
||||
compatibility["score"] -= 10
|
||||
compatibility["notes"].append("Core model works well with concise prompts")
|
||||
|
||||
return compatibility
|
||||
|
||||
|
||||
async def _process_batch_images(
|
||||
batch_id: str,
|
||||
images: List[UploadFile],
|
||||
operation: str,
|
||||
params: Dict[str, Any],
|
||||
stability_service: StabilityAIService
|
||||
):
|
||||
"""Background task for processing multiple images."""
|
||||
# In a real implementation, you'd store progress in a database
|
||||
# This is a simplified version for demonstration
|
||||
|
||||
async with stability_service:
|
||||
for i, image in enumerate(images):
|
||||
try:
|
||||
if operation == "upscale_fast":
|
||||
await stability_service.upscale_fast(image=image, **params)
|
||||
elif operation == "remove_background":
|
||||
await stability_service.remove_background(image=image, **params)
|
||||
# Add other operations as needed
|
||||
|
||||
# Log progress (in real implementation, update database)
|
||||
logger.info(f"Batch {batch_id}: Completed image {i+1}/{len(images)}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Batch {batch_id}: Error processing image {i+1}: {str(e)}")
|
||||
|
||||
|
||||
# ==================== EXPERIMENTAL ENDPOINTS ====================
|
||||
|
||||
@router.post("/experimental/ai-director", summary="AI Director Mode")
|
||||
async def ai_director_mode(
|
||||
concept: str = Form(..., description="High-level creative concept"),
|
||||
target_audience: Optional[str] = Form(None, description="Target audience"),
|
||||
mood: Optional[str] = Form(None, description="Desired mood"),
|
||||
color_palette: Optional[str] = Form(None, description="Preferred color palette"),
|
||||
iterations: Optional[int] = Form(3, description="Number of iterations"),
|
||||
stability_service: StabilityAIService = Depends(get_stability_service)
|
||||
):
|
||||
"""AI Director mode for automated creative decision making.
|
||||
|
||||
This experimental endpoint acts as an AI creative director, making
|
||||
intelligent decisions about style, composition, and execution based on
|
||||
high-level creative concepts.
|
||||
"""
|
||||
# Generate detailed prompts based on concept
|
||||
director_prompts = _generate_director_prompts(concept, target_audience, mood, color_palette)
|
||||
|
||||
async with stability_service:
|
||||
iterations_results = []
|
||||
|
||||
for i in range(iterations):
|
||||
prompt = director_prompts[i % len(director_prompts)]
|
||||
|
||||
result = await stability_service.generate_ultra(
|
||||
prompt=prompt,
|
||||
output_format="webp"
|
||||
)
|
||||
|
||||
if isinstance(result, bytes):
|
||||
iterations_results.append({
|
||||
"iteration": i + 1,
|
||||
"prompt": prompt,
|
||||
"image": base64.b64encode(result).decode(),
|
||||
"size": len(result)
|
||||
})
|
||||
|
||||
return {
|
||||
"concept": concept,
|
||||
"director_analysis": {
|
||||
"target_audience": target_audience,
|
||||
"mood": mood,
|
||||
"color_palette": color_palette
|
||||
},
|
||||
"generated_prompts": director_prompts,
|
||||
"iterations": iterations_results
|
||||
}
|
||||
|
||||
|
||||
def _generate_director_prompts(concept: str, audience: Optional[str], mood: Optional[str], colors: Optional[str]) -> List[str]:
|
||||
"""Generate creative prompts based on director inputs."""
|
||||
base_prompt = concept
|
||||
|
||||
# Add audience-specific elements
|
||||
if audience:
|
||||
if "professional" in audience.lower():
|
||||
base_prompt += ", professional, clean, sophisticated"
|
||||
elif "creative" in audience.lower():
|
||||
base_prompt += ", artistic, innovative, expressive"
|
||||
elif "casual" in audience.lower():
|
||||
base_prompt += ", friendly, approachable, relaxed"
|
||||
|
||||
# Add mood elements
|
||||
if mood:
|
||||
base_prompt += f", {mood} mood"
|
||||
|
||||
# Add color palette
|
||||
if colors:
|
||||
base_prompt += f", {colors} color palette"
|
||||
|
||||
# Generate variations
|
||||
variations = [
|
||||
f"{base_prompt}, high quality, detailed",
|
||||
f"{base_prompt}, cinematic lighting, professional photography",
|
||||
f"{base_prompt}, artistic composition, creative perspective"
|
||||
]
|
||||
|
||||
return variations
|
||||
11
backend/routers/video_studio.py
Normal file
11
backend/routers/video_studio.py
Normal file
@@ -0,0 +1,11 @@
|
||||
"""
|
||||
Video Studio Router (Legacy Import)
|
||||
|
||||
This file is kept for backward compatibility.
|
||||
All functionality has been moved to backend/routers/video_studio/ module.
|
||||
"""
|
||||
|
||||
# Re-export from the new modular structure
|
||||
from routers.video_studio import router
|
||||
|
||||
__all__ = ["router"]
|
||||
38
backend/routers/video_studio/__init__.py
Normal file
38
backend/routers/video_studio/__init__.py
Normal file
@@ -0,0 +1,38 @@
|
||||
"""
|
||||
Video Studio Router
|
||||
|
||||
Provides AI video generation capabilities including:
|
||||
- Text-to-video generation
|
||||
- Image-to-video transformation
|
||||
- Avatar/face generation
|
||||
- Video enhancement and editing
|
||||
|
||||
Uses WaveSpeed AI models for high-quality video generation.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter
|
||||
|
||||
from .endpoints import create, avatar, enhance, extend, transform, models, serve, tasks, prompt, social, face_swap, video_translate, video_background_remover, add_audio_to_video
|
||||
|
||||
# Create main router
|
||||
router = APIRouter(
|
||||
prefix="/video-studio",
|
||||
tags=["video-studio"],
|
||||
responses={404: {"description": "Not found"}},
|
||||
)
|
||||
|
||||
# Include all endpoint routers
|
||||
router.include_router(create.router)
|
||||
router.include_router(avatar.router)
|
||||
router.include_router(enhance.router)
|
||||
router.include_router(extend.router)
|
||||
router.include_router(transform.router)
|
||||
router.include_router(social.router)
|
||||
router.include_router(face_swap.router)
|
||||
router.include_router(video_translate.router)
|
||||
router.include_router(video_background_remover.router)
|
||||
router.include_router(add_audio_to_video.router)
|
||||
router.include_router(models.router)
|
||||
router.include_router(serve.router)
|
||||
router.include_router(tasks.router)
|
||||
router.include_router(prompt.router)
|
||||
1
backend/routers/video_studio/endpoints/__init__.py
Normal file
1
backend/routers/video_studio/endpoints/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Video Studio endpoint modules."""
|
||||
159
backend/routers/video_studio/endpoints/add_audio_to_video.py
Normal file
159
backend/routers/video_studio/endpoints/add_audio_to_video.py
Normal file
@@ -0,0 +1,159 @@
|
||||
"""
|
||||
Add Audio to Video endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, Dict, Any
|
||||
import uuid
|
||||
|
||||
from ...database import get_db
|
||||
from ...models.content_asset_models import AssetSource, AssetType
|
||||
from ...services.video_studio.add_audio_to_video_service import AddAudioToVideoService
|
||||
from ...services.asset_service import ContentAssetService
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.add_audio_to_video")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/add-audio-to-video")
|
||||
async def add_audio_to_video(
|
||||
background_tasks: BackgroundTasks,
|
||||
video_file: UploadFile = File(..., description="Source video for audio addition"),
|
||||
model: str = Form("hunyuan-video-foley", description="AI model to use: 'hunyuan-video-foley' or 'think-sound'"),
|
||||
prompt: Optional[str] = Form(None, description="Optional text prompt describing desired sounds (Hunyuan Video Foley)"),
|
||||
seed: Optional[int] = Form(None, description="Random seed for reproducibility (-1 for random)"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Add audio to video using AI models.
|
||||
|
||||
Supports:
|
||||
1. Hunyuan Video Foley - Generate realistic Foley and ambient audio from video
|
||||
- Optional text prompt to describe desired sounds
|
||||
- Seed control for reproducibility
|
||||
|
||||
2. Think Sound - (To be added)
|
||||
|
||||
Args:
|
||||
video_file: Source video file
|
||||
model: AI model to use
|
||||
prompt: Optional text prompt describing desired sounds
|
||||
seed: Random seed for reproducibility
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
if not video_file.content_type.startswith('video/'):
|
||||
raise HTTPException(status_code=400, detail="File must be a video")
|
||||
|
||||
# Initialize services
|
||||
add_audio_service = AddAudioToVideoService()
|
||||
asset_service = ContentAssetService(db)
|
||||
|
||||
logger.info(f"[AddAudioToVideo] Audio addition request: user={user_id}, model={model}, has_prompt={prompt is not None}")
|
||||
|
||||
# Read video file
|
||||
video_data = await video_file.read()
|
||||
|
||||
# Add audio to video
|
||||
result = await add_audio_service.add_audio(
|
||||
video_data=video_data,
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
seed=seed,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
if not result.get("success"):
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Adding audio failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Store processed video in asset library
|
||||
video_url = result.get("video_url")
|
||||
if video_url:
|
||||
asset_metadata = {
|
||||
"original_file": video_file.filename,
|
||||
"model": result.get("model_used", model),
|
||||
"has_prompt": prompt is not None,
|
||||
"prompt": prompt,
|
||||
"generation_type": "add_audio",
|
||||
}
|
||||
|
||||
asset_service.create_asset(
|
||||
user_id=user_id,
|
||||
filename=f"audio_added_{uuid.uuid4().hex[:8]}.mp4",
|
||||
file_url=video_url,
|
||||
asset_type=AssetType.VIDEO,
|
||||
source_module=AssetSource.VIDEO_STUDIO,
|
||||
asset_metadata=asset_metadata,
|
||||
cost=result.get("cost", 0),
|
||||
tags=["video_studio", "audio_addition", "ai-processed"]
|
||||
)
|
||||
|
||||
logger.info(f"[AddAudioToVideo] Audio addition successful: user={user_id}, url={video_url}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"video_url": video_url,
|
||||
"cost": result.get("cost", 0),
|
||||
"model_used": result.get("model_used", model),
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[AddAudioToVideo] Audio addition error: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Adding audio failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/add-audio-to-video/estimate-cost")
|
||||
async def estimate_add_audio_cost(
|
||||
model: str = Form("hunyuan-video-foley", description="AI model to use"),
|
||||
estimated_duration: float = Form(10.0, description="Estimated video duration in seconds", ge=0.0),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Estimate cost for adding audio to video operation.
|
||||
|
||||
Returns estimated cost based on model and duration.
|
||||
"""
|
||||
try:
|
||||
require_authenticated_user(current_user)
|
||||
|
||||
add_audio_service = AddAudioToVideoService()
|
||||
estimated_cost = add_audio_service.calculate_cost(model, estimated_duration)
|
||||
|
||||
# Build response based on model pricing
|
||||
if model == "think-sound":
|
||||
return {
|
||||
"estimated_cost": estimated_cost,
|
||||
"model": model,
|
||||
"estimated_duration": estimated_duration,
|
||||
"pricing_model": "per_video",
|
||||
"flat_rate": 0.05,
|
||||
}
|
||||
else:
|
||||
# Hunyuan Video Foley (per-second pricing)
|
||||
return {
|
||||
"estimated_cost": estimated_cost,
|
||||
"model": model,
|
||||
"estimated_duration": estimated_duration,
|
||||
"cost_per_second": 0.02, # Estimated pricing
|
||||
"pricing_model": "per_second",
|
||||
"min_duration": 5.0,
|
||||
"max_duration": 600.0, # 10 minutes max
|
||||
"min_charge": 0.02 * 5.0,
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[AddAudioToVideo] Failed to estimate cost: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to estimate cost: {str(e)}")
|
||||
293
backend/routers/video_studio/endpoints/avatar.py
Normal file
293
backend/routers/video_studio/endpoints/avatar.py
Normal file
@@ -0,0 +1,293 @@
|
||||
"""
|
||||
Avatar generation endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, Dict, Any
|
||||
import base64
|
||||
import uuid
|
||||
|
||||
from ...database import get_db
|
||||
from ...models.content_asset_models import AssetSource, AssetType
|
||||
from ...services.video_studio import VideoStudioService
|
||||
from ...services.video_studio.avatar_service import AvatarStudioService
|
||||
from ...services.asset_service import ContentAssetService
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
from api.story_writer.task_manager import task_manager
|
||||
from ..tasks.avatar_generation import execute_avatar_generation_task
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.avatar")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/avatars")
|
||||
async def generate_avatar_video(
|
||||
background_tasks: BackgroundTasks,
|
||||
avatar_file: UploadFile = File(..., description="Avatar/face image"),
|
||||
audio_file: Optional[UploadFile] = File(None, description="Audio file for lip sync"),
|
||||
video_file: Optional[UploadFile] = File(None, description="Source video for face swap"),
|
||||
text: Optional[str] = Form(None, description="Text to speak (alternative to audio)"),
|
||||
language: str = Form("en", description="Language for text-to-speech"),
|
||||
provider: str = Form("wavespeed", description="AI provider to use"),
|
||||
model: str = Form("wavespeed/mocha", description="Specific AI model to use"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate talking avatar video or perform face swap.
|
||||
|
||||
Supports both text-to-speech and audio input for natural lip sync.
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
# Validate inputs
|
||||
if not avatar_file.content_type.startswith('image/'):
|
||||
raise HTTPException(status_code=400, detail="Avatar file must be an image")
|
||||
|
||||
if not any([audio_file, video_file, text]):
|
||||
raise HTTPException(status_code=400, detail="Must provide audio file, video file, or text")
|
||||
|
||||
# Initialize services
|
||||
video_service = VideoStudioService()
|
||||
asset_service = ContentAssetService(db)
|
||||
|
||||
logger.info(f"[VideoStudio] Avatar generation request: user={user_id}, model={model}")
|
||||
|
||||
# Read files
|
||||
avatar_data = await avatar_file.read()
|
||||
audio_data = await audio_file.read() if audio_file else None
|
||||
video_data = await video_file.read() if video_file else None
|
||||
|
||||
# Generate avatar video
|
||||
result = await video_service.generate_avatar_video(
|
||||
avatar_data=avatar_data,
|
||||
audio_data=audio_data,
|
||||
video_data=video_data,
|
||||
text=text,
|
||||
language=language,
|
||||
provider=provider,
|
||||
model=model,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
if not result.get("success"):
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Avatar generation failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Store in asset library if successful
|
||||
video_url = result.get("video_url")
|
||||
if video_url:
|
||||
asset_metadata = {
|
||||
"avatar_file": avatar_file.filename,
|
||||
"audio_file": audio_file.filename if audio_file else None,
|
||||
"video_file": video_file.filename if video_file else None,
|
||||
"text": text,
|
||||
"language": language,
|
||||
"provider": provider,
|
||||
"model": model,
|
||||
"generation_type": "avatar",
|
||||
}
|
||||
|
||||
asset_service.create_asset(
|
||||
user_id=user_id,
|
||||
filename=f"avatar_{uuid.uuid4().hex[:8]}.mp4",
|
||||
file_url=video_url,
|
||||
asset_type=AssetType.VIDEO,
|
||||
source_module=AssetSource.VIDEO_STUDIO,
|
||||
asset_metadata=asset_metadata,
|
||||
cost=result.get("cost", 0),
|
||||
tags=["video_studio", "avatar", "ai-generated"]
|
||||
)
|
||||
|
||||
logger.info(f"[VideoStudio] Avatar generation successful: user={user_id}, url={video_url}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"video_url": video_url,
|
||||
"cost": result.get("cost", 0),
|
||||
"model_used": model,
|
||||
"provider": provider,
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Avatar generation error: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Avatar generation failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/avatar/create-async")
|
||||
async def create_avatar_async(
|
||||
background_tasks: BackgroundTasks,
|
||||
image: UploadFile = File(..., description="Image file for avatar"),
|
||||
audio: UploadFile = File(..., description="Audio file for lip-sync"),
|
||||
resolution: str = Form("720p", description="Video resolution (480p or 720p)"),
|
||||
prompt: Optional[str] = Form(None, description="Optional prompt for expression/style"),
|
||||
mask_image: Optional[UploadFile] = File(None, description="Optional mask image (InfiniteTalk only)"),
|
||||
seed: Optional[int] = Form(None, description="Optional random seed"),
|
||||
model: str = Form("infinitetalk", description="Model to use: 'infinitetalk' or 'hunyuan-avatar'"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Create talking avatar asynchronously with polling support.
|
||||
|
||||
Upload a photo and audio to create a talking avatar with perfect lip-sync.
|
||||
Supports resolutions of 480p and 720p.
|
||||
- InfiniteTalk: up to 10 minutes long
|
||||
- Hunyuan Avatar: up to 2 minutes (120 seconds) long
|
||||
|
||||
Returns task_id for polling. Frontend can poll /api/video-studio/task/{task_id}/status
|
||||
to get progress updates and final result.
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
# Validate resolution
|
||||
if resolution not in ["480p", "720p"]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Resolution must be '480p' or '720p'"
|
||||
)
|
||||
|
||||
# Read image data
|
||||
image_data = await image.read()
|
||||
if len(image_data) == 0:
|
||||
raise HTTPException(status_code=400, detail="Image file is empty")
|
||||
|
||||
# Read audio data
|
||||
audio_data = await audio.read()
|
||||
if len(audio_data) == 0:
|
||||
raise HTTPException(status_code=400, detail="Audio file is empty")
|
||||
|
||||
# Convert to base64
|
||||
image_base64 = base64.b64encode(image_data).decode('utf-8')
|
||||
# Add data URI prefix
|
||||
image_mime = image.content_type or "image/png"
|
||||
image_base64 = f"data:{image_mime};base64,{image_base64}"
|
||||
|
||||
audio_base64 = base64.b64encode(audio_data).decode('utf-8')
|
||||
audio_mime = audio.content_type or "audio/mpeg"
|
||||
audio_base64 = f"data:{audio_mime};base64,{audio_base64}"
|
||||
|
||||
# Handle optional mask image
|
||||
mask_image_base64 = None
|
||||
if mask_image:
|
||||
mask_data = await mask_image.read()
|
||||
if len(mask_data) > 0:
|
||||
mask_base64 = base64.b64encode(mask_data).decode('utf-8')
|
||||
mask_mime = mask_image.content_type or "image/png"
|
||||
mask_image_base64 = f"data:{mask_mime};base64,{mask_base64}"
|
||||
|
||||
# Create task
|
||||
task_id = task_manager.create_task("avatar_generation")
|
||||
|
||||
# Validate model
|
||||
if model not in ["infinitetalk", "hunyuan-avatar"]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Model must be 'infinitetalk' or 'hunyuan-avatar'"
|
||||
)
|
||||
|
||||
# Start background task
|
||||
background_tasks.add_task(
|
||||
execute_avatar_generation_task,
|
||||
task_id=task_id,
|
||||
user_id=user_id,
|
||||
image_base64=image_base64,
|
||||
audio_base64=audio_base64,
|
||||
resolution=resolution,
|
||||
prompt=prompt,
|
||||
mask_image_base64=mask_image_base64,
|
||||
seed=seed,
|
||||
model=model,
|
||||
)
|
||||
|
||||
logger.info(f"[AvatarStudio] Started async avatar generation: task_id={task_id}, user={user_id}")
|
||||
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"status": "pending",
|
||||
"message": f"Avatar generation started. This may take several minutes. Poll /api/video-studio/task/{task_id}/status for updates."
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[AvatarStudio] Failed to start async avatar generation: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to start avatar generation: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/avatar/estimate-cost")
|
||||
async def estimate_avatar_cost(
|
||||
resolution: str = Form("720p", description="Video resolution (480p or 720p)"),
|
||||
estimated_duration: float = Form(10.0, description="Estimated video duration in seconds", ge=5.0, le=600.0),
|
||||
model: str = Form("infinitetalk", description="Model to use: 'infinitetalk' or 'hunyuan-avatar'"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Estimate cost for talking avatar generation.
|
||||
|
||||
Returns estimated cost based on resolution, duration, and model.
|
||||
"""
|
||||
try:
|
||||
require_authenticated_user(current_user)
|
||||
|
||||
# Validate resolution
|
||||
if resolution not in ["480p", "720p"]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Resolution must be '480p' or '720p'"
|
||||
)
|
||||
|
||||
# Validate model
|
||||
if model not in ["infinitetalk", "hunyuan-avatar"]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Model must be 'infinitetalk' or 'hunyuan-avatar'"
|
||||
)
|
||||
|
||||
# Validate duration for Hunyuan Avatar (max 120 seconds)
|
||||
if model == "hunyuan-avatar" and estimated_duration > 120:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Hunyuan Avatar supports maximum 120 seconds (2 minutes)"
|
||||
)
|
||||
|
||||
avatar_service = AvatarStudioService()
|
||||
estimated_cost = avatar_service.calculate_cost_estimate(resolution, estimated_duration, model)
|
||||
|
||||
# Return pricing info based on model
|
||||
if model == "hunyuan-avatar":
|
||||
cost_per_5_seconds = 0.15 if resolution == "480p" else 0.30
|
||||
return {
|
||||
"estimated_cost": estimated_cost,
|
||||
"resolution": resolution,
|
||||
"estimated_duration": estimated_duration,
|
||||
"model": model,
|
||||
"cost_per_5_seconds": cost_per_5_seconds,
|
||||
"pricing_model": "per_5_seconds",
|
||||
"max_duration": 120,
|
||||
}
|
||||
else:
|
||||
cost_per_second = 0.03 if resolution == "480p" else 0.06
|
||||
return {
|
||||
"estimated_cost": estimated_cost,
|
||||
"resolution": resolution,
|
||||
"estimated_duration": estimated_duration,
|
||||
"model": model,
|
||||
"cost_per_second": cost_per_second,
|
||||
"pricing_model": "per_second",
|
||||
"max_duration": 600,
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[AvatarStudio] Failed to estimate cost: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to estimate cost: {str(e)}")
|
||||
304
backend/routers/video_studio/endpoints/create.py
Normal file
304
backend/routers/video_studio/endpoints/create.py
Normal file
@@ -0,0 +1,304 @@
|
||||
"""
|
||||
Create video endpoints: text-to-video and image-to-video generation.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, Dict, Any
|
||||
import uuid
|
||||
|
||||
from ...database import get_db
|
||||
from ...models.content_asset_models import AssetSource, AssetType
|
||||
from ...services.video_studio import VideoStudioService
|
||||
from ...services.asset_service import ContentAssetService
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
from api.story_writer.task_manager import task_manager
|
||||
from ..tasks.video_generation import execute_video_generation_task
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.create")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/generate")
|
||||
async def generate_video(
|
||||
background_tasks: BackgroundTasks,
|
||||
prompt: str = Form(..., description="Text description for video generation"),
|
||||
negative_prompt: Optional[str] = Form(None, description="What to avoid in the video"),
|
||||
duration: int = Form(5, description="Video duration in seconds", ge=1, le=10),
|
||||
resolution: str = Form("720p", description="Video resolution"),
|
||||
aspect_ratio: str = Form("16:9", description="Video aspect ratio"),
|
||||
motion_preset: str = Form("medium", description="Motion intensity"),
|
||||
provider: str = Form("wavespeed", description="AI provider to use"),
|
||||
model: str = Form("hunyuan-video-1.5", description="Specific AI model to use"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate video from text description using AI models.
|
||||
|
||||
Supports multiple providers and models for optimal quality and cost.
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
# Initialize services
|
||||
video_service = VideoStudioService()
|
||||
asset_service = ContentAssetService(db)
|
||||
|
||||
logger.info(f"[VideoStudio] Text-to-video request: user={user_id}, model={model}, duration={duration}s")
|
||||
|
||||
# Generate video
|
||||
result = await video_service.generate_text_to_video(
|
||||
prompt=prompt,
|
||||
negative_prompt=negative_prompt,
|
||||
duration=duration,
|
||||
resolution=resolution,
|
||||
aspect_ratio=aspect_ratio,
|
||||
motion_preset=motion_preset,
|
||||
provider=provider,
|
||||
model=model,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
if not result.get("success"):
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Video generation failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Store in asset library if successful
|
||||
video_url = result.get("video_url")
|
||||
if video_url:
|
||||
asset_metadata = {
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
"duration": duration,
|
||||
"resolution": resolution,
|
||||
"aspect_ratio": aspect_ratio,
|
||||
"motion_preset": motion_preset,
|
||||
"provider": provider,
|
||||
"model": model,
|
||||
"generation_type": "text-to-video",
|
||||
}
|
||||
|
||||
asset_service.create_asset(
|
||||
user_id=user_id,
|
||||
filename=f"video_{uuid.uuid4().hex[:8]}.mp4",
|
||||
file_url=video_url,
|
||||
asset_type=AssetType.VIDEO,
|
||||
source_module=AssetSource.VIDEO_STUDIO,
|
||||
asset_metadata=asset_metadata,
|
||||
cost=result.get("cost", 0),
|
||||
tags=["video_studio", "text-to-video", "ai-generated"]
|
||||
)
|
||||
|
||||
logger.info(f"[VideoStudio] Video generated successfully: user={user_id}, url={video_url}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"video_url": video_url,
|
||||
"cost": result.get("cost", 0),
|
||||
"estimated_duration": result.get("estimated_duration", duration),
|
||||
"model_used": model,
|
||||
"provider": provider,
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Text-to-video error: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Video generation failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/transform")
|
||||
async def transform_to_video(
|
||||
background_tasks: BackgroundTasks,
|
||||
file: UploadFile = File(..., description="Image file to transform"),
|
||||
prompt: Optional[str] = Form(None, description="Optional text prompt to guide transformation"),
|
||||
duration: int = Form(5, description="Video duration in seconds", ge=1, le=10),
|
||||
resolution: str = Form("720p", description="Video resolution"),
|
||||
aspect_ratio: str = Form("16:9", description="Video aspect ratio"),
|
||||
motion_preset: str = Form("medium", description="Motion intensity"),
|
||||
provider: str = Form("wavespeed", description="AI provider to use"),
|
||||
model: str = Form("alibaba/wan-2.5", description="Specific AI model to use"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Transform image to video using AI models.
|
||||
|
||||
Supports various motion presets and durations for dynamic video creation.
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
# Validate file type
|
||||
if not file.content_type.startswith('image/'):
|
||||
raise HTTPException(status_code=400, detail="File must be an image")
|
||||
|
||||
# Initialize services
|
||||
video_service = VideoStudioService()
|
||||
asset_service = ContentAssetService(db)
|
||||
|
||||
logger.info(f"[VideoStudio] Image-to-video request: user={user_id}, model={model}, duration={duration}s")
|
||||
|
||||
# Read image file
|
||||
image_data = await file.read()
|
||||
|
||||
# Generate video
|
||||
result = await video_service.generate_image_to_video(
|
||||
image_data=image_data,
|
||||
prompt=prompt,
|
||||
duration=duration,
|
||||
resolution=resolution,
|
||||
aspect_ratio=aspect_ratio,
|
||||
motion_preset=motion_preset,
|
||||
provider=provider,
|
||||
model=model,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
if not result.get("success"):
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Video transformation failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Store in asset library if successful
|
||||
video_url = result.get("video_url")
|
||||
if video_url:
|
||||
asset_metadata = {
|
||||
"original_image": file.filename,
|
||||
"prompt": prompt,
|
||||
"duration": duration,
|
||||
"resolution": resolution,
|
||||
"aspect_ratio": aspect_ratio,
|
||||
"motion_preset": motion_preset,
|
||||
"provider": provider,
|
||||
"model": model,
|
||||
"generation_type": "image-to-video",
|
||||
}
|
||||
|
||||
asset_service.create_asset(
|
||||
user_id=user_id,
|
||||
filename=f"video_{uuid.uuid4().hex[:8]}.mp4",
|
||||
file_url=video_url,
|
||||
asset_type=AssetType.VIDEO,
|
||||
source_module=AssetSource.VIDEO_STUDIO,
|
||||
asset_metadata=asset_metadata,
|
||||
cost=result.get("cost", 0),
|
||||
tags=["video_studio", "image-to-video", "ai-generated"]
|
||||
)
|
||||
|
||||
logger.info(f"[VideoStudio] Video transformation successful: user={user_id}, url={video_url}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"video_url": video_url,
|
||||
"cost": result.get("cost", 0),
|
||||
"estimated_duration": result.get("estimated_duration", duration),
|
||||
"model_used": model,
|
||||
"provider": provider,
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Image-to-video error: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Video transformation failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/generate-async")
|
||||
async def generate_video_async(
|
||||
background_tasks: BackgroundTasks,
|
||||
prompt: Optional[str] = Form(None, description="Text description for video generation"),
|
||||
image: Optional[UploadFile] = File(None, description="Image file for image-to-video"),
|
||||
operation_type: str = Form("text-to-video", description="Operation type: text-to-video or image-to-video"),
|
||||
negative_prompt: Optional[str] = Form(None, description="What to avoid in the video"),
|
||||
duration: int = Form(5, description="Video duration in seconds", ge=1, le=10),
|
||||
resolution: str = Form("720p", description="Video resolution"),
|
||||
aspect_ratio: str = Form("16:9", description="Video aspect ratio"),
|
||||
motion_preset: str = Form("medium", description="Motion intensity"),
|
||||
provider: str = Form("wavespeed", description="AI provider to use"),
|
||||
model: str = Form("alibaba/wan-2.5", description="Specific AI model to use"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate video asynchronously with polling support.
|
||||
|
||||
Returns task_id for polling. Frontend can poll /api/video-studio/task/{task_id}/status
|
||||
to get progress updates and final result.
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
# Validate operation type
|
||||
if operation_type not in ["text-to-video", "image-to-video"]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid operation_type: {operation_type}. Must be 'text-to-video' or 'image-to-video'"
|
||||
)
|
||||
|
||||
# Validate inputs based on operation type
|
||||
if operation_type == "text-to-video" and not prompt:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="prompt is required for text-to-video generation"
|
||||
)
|
||||
|
||||
if operation_type == "image-to-video" and not image:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="image file is required for image-to-video generation"
|
||||
)
|
||||
|
||||
# Read image data if provided
|
||||
image_data = None
|
||||
if image:
|
||||
image_data = await image.read()
|
||||
if len(image_data) == 0:
|
||||
raise HTTPException(status_code=400, detail="Image file is empty")
|
||||
|
||||
# Create task
|
||||
task_id = task_manager.create_task("video_generation")
|
||||
|
||||
# Prepare kwargs
|
||||
kwargs = {
|
||||
"duration": duration,
|
||||
"resolution": resolution,
|
||||
"model": model,
|
||||
}
|
||||
if negative_prompt:
|
||||
kwargs["negative_prompt"] = negative_prompt
|
||||
if aspect_ratio:
|
||||
kwargs["aspect_ratio"] = aspect_ratio
|
||||
if motion_preset:
|
||||
kwargs["motion_preset"] = motion_preset
|
||||
|
||||
# Start background task
|
||||
background_tasks.add_task(
|
||||
execute_video_generation_task,
|
||||
task_id=task_id,
|
||||
operation_type=operation_type,
|
||||
user_id=user_id,
|
||||
prompt=prompt,
|
||||
image_data=image_data,
|
||||
provider=provider,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
logger.info(f"[VideoStudio] Started async video generation: task_id={task_id}, operation={operation_type}, user={user_id}")
|
||||
|
||||
return {
|
||||
"task_id": task_id,
|
||||
"status": "pending",
|
||||
"message": f"Video generation started. This may take several minutes. Poll /api/video-studio/task/{task_id}/status for updates."
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Failed to start async video generation: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to start video generation: {str(e)}")
|
||||
157
backend/routers/video_studio/endpoints/enhance.py
Normal file
157
backend/routers/video_studio/endpoints/enhance.py
Normal file
@@ -0,0 +1,157 @@
|
||||
"""
|
||||
Video enhancement endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, Dict, Any
|
||||
import uuid
|
||||
|
||||
from ...database import get_db
|
||||
from ...models.content_asset_models import AssetSource, AssetType
|
||||
from ...services.video_studio import VideoStudioService
|
||||
from ...services.asset_service import ContentAssetService
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.enhance")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/enhance")
|
||||
async def enhance_video(
|
||||
background_tasks: BackgroundTasks,
|
||||
file: UploadFile = File(..., description="Video file to enhance"),
|
||||
enhancement_type: str = Form(..., description="Type of enhancement: upscale, stabilize, colorize, etc"),
|
||||
target_resolution: Optional[str] = Form(None, description="Target resolution for upscale"),
|
||||
provider: str = Form("wavespeed", description="AI provider to use"),
|
||||
model: str = Form("wavespeed/flashvsr", description="Specific AI model to use"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Enhance existing video using AI models.
|
||||
|
||||
Supports upscaling, stabilization, colorization, and other enhancements.
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
if not file.content_type.startswith('video/'):
|
||||
raise HTTPException(status_code=400, detail="File must be a video")
|
||||
|
||||
# Initialize services
|
||||
video_service = VideoStudioService()
|
||||
asset_service = ContentAssetService(db)
|
||||
|
||||
logger.info(f"[VideoStudio] Video enhancement request: user={user_id}, type={enhancement_type}, model={model}")
|
||||
|
||||
# Read video file
|
||||
video_data = await file.read()
|
||||
|
||||
# Enhance video
|
||||
result = await video_service.enhance_video(
|
||||
video_data=video_data,
|
||||
enhancement_type=enhancement_type,
|
||||
target_resolution=target_resolution,
|
||||
provider=provider,
|
||||
model=model,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
if not result.get("success"):
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Video enhancement failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Store enhanced version in asset library
|
||||
video_url = result.get("video_url")
|
||||
if video_url:
|
||||
asset_metadata = {
|
||||
"original_file": file.filename,
|
||||
"enhancement_type": enhancement_type,
|
||||
"target_resolution": target_resolution,
|
||||
"provider": provider,
|
||||
"model": model,
|
||||
"generation_type": "enhancement",
|
||||
}
|
||||
|
||||
asset_service.create_asset(
|
||||
user_id=user_id,
|
||||
filename=f"enhanced_{uuid.uuid4().hex[:8]}.mp4",
|
||||
file_url=video_url,
|
||||
asset_type=AssetType.VIDEO,
|
||||
source_module=AssetSource.VIDEO_STUDIO,
|
||||
asset_metadata=asset_metadata,
|
||||
cost=result.get("cost", 0),
|
||||
tags=["video_studio", "enhancement", "ai-enhanced"]
|
||||
)
|
||||
|
||||
logger.info(f"[VideoStudio] Video enhancement successful: user={user_id}, url={video_url}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"video_url": video_url,
|
||||
"cost": result.get("cost", 0),
|
||||
"enhancement_type": enhancement_type,
|
||||
"model_used": model,
|
||||
"provider": provider,
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Video enhancement error: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Video enhancement failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/enhance/estimate-cost")
|
||||
async def estimate_enhance_cost(
|
||||
target_resolution: str = Form("1080p", description="Target resolution (720p, 1080p, 2k, 4k)"),
|
||||
estimated_duration: float = Form(10.0, description="Estimated video duration in seconds", ge=5.0),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Estimate cost for video enhancement operation.
|
||||
|
||||
Returns estimated cost based on target resolution and duration.
|
||||
"""
|
||||
try:
|
||||
require_authenticated_user(current_user)
|
||||
|
||||
# Validate resolution
|
||||
if target_resolution not in ("720p", "1080p", "2k", "4k"):
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Target resolution must be '720p', '1080p', '2k', or '4k'"
|
||||
)
|
||||
|
||||
# FlashVSR pricing: $0.06-$0.16 per 5 seconds based on resolution
|
||||
pricing = {
|
||||
"720p": 0.06 / 5, # $0.012 per second
|
||||
"1080p": 0.09 / 5, # $0.018 per second
|
||||
"2k": 0.12 / 5, # $0.024 per second
|
||||
"4k": 0.16 / 5, # $0.032 per second
|
||||
}
|
||||
|
||||
cost_per_second = pricing.get(target_resolution.lower(), pricing["1080p"])
|
||||
estimated_cost = max(5.0, estimated_duration) * cost_per_second # Minimum 5 seconds
|
||||
|
||||
return {
|
||||
"estimated_cost": estimated_cost,
|
||||
"target_resolution": target_resolution,
|
||||
"estimated_duration": estimated_duration,
|
||||
"cost_per_second": cost_per_second,
|
||||
"pricing_model": "per_second",
|
||||
"min_duration": 5.0,
|
||||
"max_duration": 600.0, # 10 minutes max
|
||||
"min_charge": cost_per_second * 5.0,
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Failed to estimate cost: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to estimate cost: {str(e)}")
|
||||
158
backend/routers/video_studio/endpoints/extend.py
Normal file
158
backend/routers/video_studio/endpoints/extend.py
Normal file
@@ -0,0 +1,158 @@
|
||||
"""
|
||||
Video extension endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, Dict, Any
|
||||
import uuid
|
||||
|
||||
from ...database import get_db
|
||||
from ...models.content_asset_models import AssetSource, AssetType
|
||||
from ...services.video_studio import VideoStudioService
|
||||
from ...services.asset_service import ContentAssetService
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.extend")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/extend")
|
||||
async def extend_video(
|
||||
background_tasks: BackgroundTasks,
|
||||
file: UploadFile = File(..., description="Video file to extend"),
|
||||
prompt: str = Form(..., description="Text prompt describing how to extend the video"),
|
||||
model: str = Form("wan-2.5", description="Model to use: 'wan-2.5', 'wan-2.2-spicy', or 'seedance-1.5-pro'"),
|
||||
audio: Optional[UploadFile] = File(None, description="Optional audio file to guide generation (WAN 2.5 only)"),
|
||||
negative_prompt: Optional[str] = Form(None, description="Negative prompt (WAN 2.5 only)"),
|
||||
resolution: str = Form("720p", description="Output resolution: 480p, 720p, or 1080p (1080p WAN 2.5 only)"),
|
||||
duration: int = Form(5, description="Duration of extended video in seconds (varies by model)"),
|
||||
enable_prompt_expansion: bool = Form(False, description="Enable prompt optimizer (WAN 2.5 only)"),
|
||||
generate_audio: bool = Form(True, description="Generate audio for extended video (Seedance 1.5 Pro only)"),
|
||||
camera_fixed: bool = Form(False, description="Fix camera position (Seedance 1.5 Pro only)"),
|
||||
seed: Optional[int] = Form(None, description="Random seed for reproducibility"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Extend video duration using WAN 2.5, WAN 2.2 Spicy, or Seedance 1.5 Pro video-extend.
|
||||
|
||||
Takes a short video clip and extends it with motion/audio continuity.
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
if not file.content_type.startswith('video/'):
|
||||
raise HTTPException(status_code=400, detail="File must be a video")
|
||||
|
||||
# Validate model-specific constraints
|
||||
if model in ("wan-2.2-spicy", "wavespeed-ai/wan-2.2-spicy/video-extend"):
|
||||
if duration not in [5, 8]:
|
||||
raise HTTPException(status_code=400, detail="WAN 2.2 Spicy only supports 5 or 8 second durations")
|
||||
if resolution not in ["480p", "720p"]:
|
||||
raise HTTPException(status_code=400, detail="WAN 2.2 Spicy only supports 480p or 720p resolution")
|
||||
if audio:
|
||||
raise HTTPException(status_code=400, detail="Audio is not supported for WAN 2.2 Spicy")
|
||||
elif model in ("seedance-1.5-pro", "bytedance/seedance-v1.5-pro/video-extend"):
|
||||
if duration < 4 or duration > 12:
|
||||
raise HTTPException(status_code=400, detail="Seedance 1.5 Pro only supports 4-12 second durations")
|
||||
if resolution not in ["480p", "720p"]:
|
||||
raise HTTPException(status_code=400, detail="Seedance 1.5 Pro only supports 480p or 720p resolution")
|
||||
if audio:
|
||||
raise HTTPException(status_code=400, detail="Audio upload is not supported for Seedance 1.5 Pro (use generate_audio instead)")
|
||||
else:
|
||||
# WAN 2.5 validation
|
||||
if duration < 3 or duration > 10:
|
||||
raise HTTPException(status_code=400, detail="WAN 2.5 duration must be between 3 and 10 seconds")
|
||||
if resolution not in ["480p", "720p", "1080p"]:
|
||||
raise HTTPException(status_code=400, detail="WAN 2.5 resolution must be 480p, 720p, or 1080p")
|
||||
|
||||
# Initialize services
|
||||
video_service = VideoStudioService()
|
||||
asset_service = ContentAssetService(db)
|
||||
|
||||
logger.info(f"[VideoStudio] Video extension request: user={user_id}, model={model}, duration={duration}s, resolution={resolution}")
|
||||
|
||||
# Read video file
|
||||
video_data = await file.read()
|
||||
|
||||
# Read audio file if provided (WAN 2.5 only)
|
||||
audio_data = None
|
||||
if audio:
|
||||
if model in ("wan-2.2-spicy", "wavespeed-ai/wan-2.2-spicy/video-extend", "seedance-1.5-pro", "bytedance/seedance-v1.5-pro/video-extend"):
|
||||
raise HTTPException(status_code=400, detail=f"Audio upload is not supported for {model} model")
|
||||
|
||||
if not audio.content_type.startswith('audio/'):
|
||||
raise HTTPException(status_code=400, detail="Audio file must be an audio file")
|
||||
|
||||
# Validate audio file size (max 15MB per documentation)
|
||||
audio_data = await audio.read()
|
||||
if len(audio_data) > 15 * 1024 * 1024:
|
||||
raise HTTPException(status_code=400, detail="Audio file must be less than 15MB")
|
||||
|
||||
# Note: Audio duration validation (3-30s) would require parsing the audio file
|
||||
# This is handled by the API, but we could add it here if needed
|
||||
|
||||
# Extend video
|
||||
result = await video_service.extend_video(
|
||||
video_data=video_data,
|
||||
prompt=prompt,
|
||||
model=model,
|
||||
audio_data=audio_data,
|
||||
negative_prompt=negative_prompt,
|
||||
resolution=resolution,
|
||||
duration=duration,
|
||||
enable_prompt_expansion=enable_prompt_expansion,
|
||||
generate_audio=generate_audio,
|
||||
camera_fixed=camera_fixed,
|
||||
seed=seed,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
if not result.get("success"):
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Video extension failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Store extended version in asset library
|
||||
video_url = result.get("video_url")
|
||||
if video_url:
|
||||
asset_metadata = {
|
||||
"original_file": file.filename,
|
||||
"prompt": prompt,
|
||||
"duration": duration,
|
||||
"resolution": resolution,
|
||||
"generation_type": "extend",
|
||||
"model": result.get("model_used", "alibaba/wan-2.5/video-extend"),
|
||||
}
|
||||
|
||||
asset_service.create_asset(
|
||||
user_id=user_id,
|
||||
filename=f"extended_{uuid.uuid4().hex[:8]}.mp4",
|
||||
file_url=video_url,
|
||||
asset_type=AssetType.VIDEO,
|
||||
source_module=AssetSource.VIDEO_STUDIO,
|
||||
asset_metadata=asset_metadata,
|
||||
cost=result.get("cost", 0),
|
||||
tags=["video_studio", "extend", "ai-extended"]
|
||||
)
|
||||
|
||||
logger.info(f"[VideoStudio] Video extension successful: user={user_id}, url={video_url}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"video_url": video_url,
|
||||
"cost": result.get("cost", 0),
|
||||
"duration": duration,
|
||||
"resolution": resolution,
|
||||
"model_used": result.get("model_used", "alibaba/wan-2.5/video-extend"),
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Video extension error: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Video extension failed: {str(e)}")
|
||||
237
backend/routers/video_studio/endpoints/face_swap.py
Normal file
237
backend/routers/video_studio/endpoints/face_swap.py
Normal file
@@ -0,0 +1,237 @@
|
||||
"""
|
||||
Face Swap endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, Dict, Any
|
||||
import uuid
|
||||
|
||||
from ...database import get_db
|
||||
from ...models.content_asset_models import AssetSource, AssetType
|
||||
from ...services.video_studio import VideoStudioService
|
||||
from ...services.video_studio.face_swap_service import FaceSwapService
|
||||
from ...services.asset_service import ContentAssetService
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.face_swap")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/face-swap")
|
||||
async def swap_face(
|
||||
background_tasks: BackgroundTasks,
|
||||
image_file: UploadFile = File(..., description="Reference image for character swap"),
|
||||
video_file: UploadFile = File(..., description="Source video for face swap"),
|
||||
model: str = Form("mocha", description="AI model to use: 'mocha' or 'video-face-swap'"),
|
||||
prompt: Optional[str] = Form(None, description="Optional prompt to guide the swap (MoCha only)"),
|
||||
resolution: str = Form("480p", description="Output resolution for MoCha (480p or 720p)"),
|
||||
seed: Optional[int] = Form(None, description="Random seed for reproducibility (MoCha only, -1 for random)"),
|
||||
target_gender: str = Form("all", description="Filter which faces to swap (video-face-swap only: all, female, male)"),
|
||||
target_index: int = Form(0, description="Select which face to swap (video-face-swap only: 0 = largest)"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Perform face/character swap using MoCha or Video Face Swap.
|
||||
|
||||
Supports two models:
|
||||
1. MoCha (wavespeed-ai/wan-2.1/mocha) - Character replacement with motion preservation
|
||||
- Resolution: 480p ($0.04/s) or 720p ($0.08/s)
|
||||
- Max length: 120 seconds
|
||||
- Features: Prompt guidance, seed control
|
||||
|
||||
2. Video Face Swap (wavespeed-ai/video-face-swap) - Simple face swap with multi-face support
|
||||
- Pricing: $0.01/s
|
||||
- Max length: 10 minutes (600 seconds)
|
||||
- Features: Gender filter, face index selection
|
||||
|
||||
Requirements:
|
||||
- Image: Clear reference image (JPG/PNG, avoid WEBP)
|
||||
- Video: Source video (max 120s for MoCha, max 600s for video-face-swap)
|
||||
- Minimum charge: 5 seconds for both models
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
# Validate file types
|
||||
if not image_file.content_type.startswith('image/'):
|
||||
raise HTTPException(status_code=400, detail="Image file must be an image")
|
||||
|
||||
if not video_file.content_type.startswith('video/'):
|
||||
raise HTTPException(status_code=400, detail="Video file must be a video")
|
||||
|
||||
# Validate resolution
|
||||
if resolution not in ("480p", "720p"):
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Resolution must be '480p' or '720p'"
|
||||
)
|
||||
|
||||
# Initialize services
|
||||
face_swap_service = FaceSwapService()
|
||||
asset_service = ContentAssetService(db)
|
||||
|
||||
logger.info(
|
||||
f"[FaceSwap] Face swap request: user={user_id}, "
|
||||
f"resolution={resolution}"
|
||||
)
|
||||
|
||||
# Read files
|
||||
image_data = await image_file.read()
|
||||
video_data = await video_file.read()
|
||||
|
||||
# Validate file sizes
|
||||
if len(image_data) > 10 * 1024 * 1024: # 10MB
|
||||
raise HTTPException(status_code=400, detail="Image file must be less than 10MB")
|
||||
|
||||
if len(video_data) > 500 * 1024 * 1024: # 500MB
|
||||
raise HTTPException(status_code=400, detail="Video file must be less than 500MB")
|
||||
|
||||
# Perform face swap
|
||||
result = await face_swap_service.swap_face(
|
||||
image_data=image_data,
|
||||
video_data=video_data,
|
||||
model=model,
|
||||
prompt=prompt,
|
||||
resolution=resolution,
|
||||
seed=seed,
|
||||
target_gender=target_gender,
|
||||
target_index=target_index,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
if not result.get("success"):
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Face swap failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Store in asset library
|
||||
video_url = result.get("video_url")
|
||||
if video_url:
|
||||
model_name = "wavespeed-ai/wan-2.1/mocha" if model == "mocha" else "wavespeed-ai/video-face-swap"
|
||||
|
||||
asset_metadata = {
|
||||
"image_file": image_file.filename,
|
||||
"video_file": video_file.filename,
|
||||
"model": model,
|
||||
"operation_type": "face_swap",
|
||||
}
|
||||
|
||||
if model == "mocha":
|
||||
asset_metadata.update({
|
||||
"prompt": prompt,
|
||||
"resolution": resolution,
|
||||
"seed": seed,
|
||||
})
|
||||
else: # video-face-swap
|
||||
asset_metadata.update({
|
||||
"target_gender": target_gender,
|
||||
"target_index": target_index,
|
||||
})
|
||||
|
||||
asset_service.create_asset(
|
||||
user_id=user_id,
|
||||
filename=f"face_swap_{uuid.uuid4().hex[:8]}.mp4",
|
||||
file_url=video_url,
|
||||
asset_type=AssetType.VIDEO,
|
||||
source_module=AssetSource.VIDEO_STUDIO,
|
||||
asset_metadata=asset_metadata,
|
||||
cost=result.get("cost", 0),
|
||||
tags=["video_studio", "face_swap", "ai-generated"],
|
||||
)
|
||||
|
||||
logger.info(f"[FaceSwap] Face swap successful: user={user_id}, url={video_url}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"video_url": video_url,
|
||||
"cost": result.get("cost", 0),
|
||||
"model": model,
|
||||
"resolution": result.get("resolution"),
|
||||
"metadata": result.get("metadata", {}),
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[FaceSwap] Face swap error: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Face swap failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/face-swap/estimate-cost")
|
||||
async def estimate_face_swap_cost(
|
||||
model: str = Form("mocha", description="AI model to use: 'mocha' or 'video-face-swap'"),
|
||||
resolution: str = Form("480p", description="Output resolution for MoCha (480p or 720p)"),
|
||||
estimated_duration: float = Form(10.0, description="Estimated video duration in seconds", ge=5.0),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Estimate cost for face swap operation.
|
||||
|
||||
Returns estimated cost based on model, resolution (for MoCha), and duration.
|
||||
"""
|
||||
try:
|
||||
require_authenticated_user(current_user)
|
||||
|
||||
# Validate model
|
||||
if model not in ("mocha", "video-face-swap"):
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Model must be 'mocha' or 'video-face-swap'"
|
||||
)
|
||||
|
||||
# Validate resolution (only for MoCha)
|
||||
if model == "mocha":
|
||||
if resolution not in ("480p", "720p"):
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Resolution must be '480p' or '720p' for MoCha"
|
||||
)
|
||||
max_duration = 120.0
|
||||
else:
|
||||
max_duration = 600.0 # 10 minutes for video-face-swap
|
||||
|
||||
if estimated_duration > max_duration:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Estimated duration must be <= {max_duration} seconds for {model}"
|
||||
)
|
||||
|
||||
face_swap_service = FaceSwapService()
|
||||
estimated_cost = face_swap_service.calculate_cost(model, resolution if model == "mocha" else None, estimated_duration)
|
||||
|
||||
# Pricing info
|
||||
if model == "mocha":
|
||||
cost_per_second = 0.04 if resolution == "480p" else 0.08
|
||||
return {
|
||||
"estimated_cost": estimated_cost,
|
||||
"model": model,
|
||||
"resolution": resolution,
|
||||
"estimated_duration": estimated_duration,
|
||||
"cost_per_second": cost_per_second,
|
||||
"pricing_model": "per_second",
|
||||
"min_duration": 5.0,
|
||||
"max_duration": 120.0,
|
||||
"min_charge": cost_per_second * 5.0,
|
||||
}
|
||||
else: # video-face-swap
|
||||
return {
|
||||
"estimated_cost": estimated_cost,
|
||||
"model": model,
|
||||
"estimated_duration": estimated_duration,
|
||||
"cost_per_second": 0.01,
|
||||
"pricing_model": "per_second",
|
||||
"min_duration": 5.0,
|
||||
"max_duration": 600.0,
|
||||
"min_charge": 0.05, # $0.01 * 5 seconds
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[FaceSwap] Failed to estimate cost: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to estimate cost: {str(e)}")
|
||||
82
backend/routers/video_studio/endpoints/models.py
Normal file
82
backend/routers/video_studio/endpoints/models.py
Normal file
@@ -0,0 +1,82 @@
|
||||
"""
|
||||
Model listing and cost estimation endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
from ...services.video_studio import VideoStudioService
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.models")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/models")
|
||||
async def list_available_models(
|
||||
operation_type: Optional[str] = None,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
List available AI models for video generation.
|
||||
|
||||
Optionally filter by operation type (text-to-video, image-to-video, avatar, enhancement).
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
video_service = VideoStudioService()
|
||||
|
||||
models = video_service.get_available_models(operation_type)
|
||||
|
||||
logger.info(f"[VideoStudio] Listed models for user={user_id}, operation={operation_type}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"models": models,
|
||||
"operation_type": operation_type,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Error listing models: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to list models: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/cost-estimate")
|
||||
async def estimate_cost(
|
||||
operation_type: str,
|
||||
duration: Optional[int] = None,
|
||||
resolution: Optional[str] = None,
|
||||
model: Optional[str] = None,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Estimate cost for video generation operations.
|
||||
|
||||
Provides real-time cost estimates before generation.
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
video_service = VideoStudioService()
|
||||
|
||||
estimate = video_service.estimate_cost(
|
||||
operation_type=operation_type,
|
||||
duration=duration,
|
||||
resolution=resolution,
|
||||
model=model,
|
||||
)
|
||||
|
||||
logger.info(f"[VideoStudio] Cost estimate for user={user_id}: {estimate}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"estimate": estimate,
|
||||
"operation_type": operation_type,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Error estimating cost: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to estimate cost: {str(e)}")
|
||||
89
backend/routers/video_studio/endpoints/prompt.py
Normal file
89
backend/routers/video_studio/endpoints/prompt.py
Normal file
@@ -0,0 +1,89 @@
|
||||
"""
|
||||
Prompt optimization endpoints for Video Studio.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
from services.wavespeed.client import WaveSpeedClient
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.prompt")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
class PromptOptimizeRequest(BaseModel):
|
||||
text: str = Field(..., description="The prompt text to optimize")
|
||||
mode: Optional[str] = Field(
|
||||
default="video",
|
||||
pattern="^(image|video)$",
|
||||
description="Optimization mode: 'image' or 'video' (default: 'video' for Video Studio)"
|
||||
)
|
||||
style: Optional[str] = Field(
|
||||
default="default",
|
||||
pattern="^(default|artistic|photographic|technical|anime|realistic)$",
|
||||
description="Style: 'default', 'artistic', 'photographic', 'technical', 'anime', or 'realistic'"
|
||||
)
|
||||
image: Optional[str] = Field(None, description="Base64-encoded image for context (optional)")
|
||||
|
||||
|
||||
class PromptOptimizeResponse(BaseModel):
|
||||
optimized_prompt: str
|
||||
success: bool
|
||||
|
||||
|
||||
@router.post("/optimize-prompt")
|
||||
async def optimize_prompt(
|
||||
request: PromptOptimizeRequest,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> PromptOptimizeResponse:
|
||||
"""
|
||||
Optimize a prompt using WaveSpeed prompt optimizer.
|
||||
|
||||
The WaveSpeedAI Prompt Optimizer enhances prompts specifically for image and video
|
||||
generation workflows. It restructures and enriches your input prompt to improve:
|
||||
- Visual clarity and composition
|
||||
- Cinematic framing and lighting
|
||||
- Camera movement and style consistency
|
||||
- Motion dynamics for video generation
|
||||
|
||||
Produces significantly better outputs across video generation models like FLUX, Wan,
|
||||
Kling, Veo, Seedance, and more.
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
if not request.text or not request.text.strip():
|
||||
raise HTTPException(status_code=400, detail="Prompt text is required")
|
||||
|
||||
# Default to "video" mode for Video Studio
|
||||
mode = request.mode or "video"
|
||||
style = request.style or "default"
|
||||
|
||||
logger.info(f"[VideoStudio] Optimizing prompt for user {user_id} (mode={mode}, style={style})")
|
||||
|
||||
client = WaveSpeedClient()
|
||||
optimized_prompt = client.optimize_prompt(
|
||||
text=request.text.strip(),
|
||||
mode=mode,
|
||||
style=style,
|
||||
image=request.image, # Optional base64 image
|
||||
enable_sync_mode=True,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
logger.info(f"[VideoStudio] Prompt optimized successfully for user {user_id}")
|
||||
|
||||
return PromptOptimizeResponse(
|
||||
optimized_prompt=optimized_prompt,
|
||||
success=True
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as exc:
|
||||
logger.error(f"[VideoStudio] Failed to optimize prompt: {exc}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to optimize prompt: {str(exc)}")
|
||||
74
backend/routers/video_studio/endpoints/serve.py
Normal file
74
backend/routers/video_studio/endpoints/serve.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""
|
||||
Video serving endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from fastapi.responses import FileResponse
|
||||
from typing import Dict, Any
|
||||
from pathlib import Path
|
||||
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.serve")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/videos/{user_id}/{video_filename:path}", summary="Serve Video Studio Video")
|
||||
async def serve_video_studio_video(
|
||||
user_id: str,
|
||||
video_filename: str,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> FileResponse:
|
||||
"""
|
||||
Serve a generated Video Studio video file.
|
||||
|
||||
Security: Only the video owner can access their videos.
|
||||
"""
|
||||
try:
|
||||
# Verify the requesting user matches the video owner
|
||||
authenticated_user_id = require_authenticated_user(current_user)
|
||||
if authenticated_user_id != user_id:
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="You can only access your own videos"
|
||||
)
|
||||
|
||||
# Get base directory
|
||||
base_dir = Path(__file__).parent.parent.parent.parent
|
||||
video_studio_videos_dir = base_dir / "video_studio_videos"
|
||||
video_path = video_studio_videos_dir / user_id / video_filename
|
||||
|
||||
# Security: Ensure path is within video_studio_videos directory
|
||||
try:
|
||||
resolved_path = video_path.resolve()
|
||||
resolved_base = video_studio_videos_dir.resolve()
|
||||
if not str(resolved_path).startswith(str(resolved_base)):
|
||||
raise HTTPException(
|
||||
status_code=403,
|
||||
detail="Invalid video path"
|
||||
)
|
||||
except (OSError, ValueError) as e:
|
||||
logger.error(f"[VideoStudio] Path resolution error: {e}")
|
||||
raise HTTPException(status_code=403, detail="Invalid video path")
|
||||
|
||||
# Check if file exists
|
||||
if not video_path.exists() or not video_path.is_file():
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail=f"Video not found: {video_filename}"
|
||||
)
|
||||
|
||||
logger.info(f"[VideoStudio] Serving video: {video_path}")
|
||||
return FileResponse(
|
||||
path=str(video_path),
|
||||
media_type="video/mp4",
|
||||
filename=video_filename,
|
||||
)
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Failed to serve video: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to serve video: {str(e)}")
|
||||
195
backend/routers/video_studio/endpoints/social.py
Normal file
195
backend/routers/video_studio/endpoints/social.py
Normal file
@@ -0,0 +1,195 @@
|
||||
"""
|
||||
Social Optimizer endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, Dict, Any, List
|
||||
import json
|
||||
|
||||
from ...database import get_db
|
||||
from ...models.content_asset_models import AssetSource, AssetType
|
||||
from ...services.video_studio import VideoStudioService
|
||||
from ...services.video_studio.social_optimizer_service import (
|
||||
SocialOptimizerService,
|
||||
OptimizationOptions,
|
||||
)
|
||||
from ...services.asset_service import ContentAssetService
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.social")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/social/optimize")
|
||||
async def optimize_for_social(
|
||||
background_tasks: BackgroundTasks,
|
||||
file: UploadFile = File(..., description="Source video file"),
|
||||
platforms: str = Form(..., description="Comma-separated list of platforms (instagram,tiktok,youtube,linkedin,facebook,twitter)"),
|
||||
auto_crop: bool = Form(True, description="Auto-crop to platform aspect ratio"),
|
||||
generate_thumbnails: bool = Form(True, description="Generate thumbnails"),
|
||||
compress: bool = Form(True, description="Compress for file size limits"),
|
||||
trim_mode: str = Form("beginning", description="Trim mode if video exceeds duration (beginning, middle, end)"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Optimize video for multiple social media platforms.
|
||||
|
||||
Creates platform-optimized versions with:
|
||||
- Aspect ratio conversion
|
||||
- Duration trimming
|
||||
- File size compression
|
||||
- Thumbnail generation
|
||||
|
||||
Returns optimized videos for each selected platform.
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
if not file.content_type.startswith('video/'):
|
||||
raise HTTPException(status_code=400, detail="File must be a video")
|
||||
|
||||
# Parse platforms
|
||||
platform_list = [p.strip().lower() for p in platforms.split(",") if p.strip()]
|
||||
if not platform_list:
|
||||
raise HTTPException(status_code=400, detail="At least one platform must be specified")
|
||||
|
||||
# Validate platforms
|
||||
valid_platforms = ["instagram", "tiktok", "youtube", "linkedin", "facebook", "twitter"]
|
||||
invalid_platforms = [p for p in platform_list if p not in valid_platforms]
|
||||
if invalid_platforms:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid platforms: {', '.join(invalid_platforms)}. Valid platforms: {', '.join(valid_platforms)}"
|
||||
)
|
||||
|
||||
# Validate trim_mode
|
||||
valid_trim_modes = ["beginning", "middle", "end"]
|
||||
if trim_mode not in valid_trim_modes:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid trim_mode. Must be one of: {', '.join(valid_trim_modes)}"
|
||||
)
|
||||
|
||||
# Initialize services
|
||||
video_service = VideoStudioService()
|
||||
social_optimizer = SocialOptimizerService()
|
||||
asset_service = ContentAssetService(db)
|
||||
|
||||
logger.info(
|
||||
f"[SocialOptimizer] Optimization request: "
|
||||
f"user={user_id}, platforms={platform_list}"
|
||||
)
|
||||
|
||||
# Read video file
|
||||
video_data = await file.read()
|
||||
|
||||
# Create optimization options
|
||||
options = OptimizationOptions(
|
||||
auto_crop=auto_crop,
|
||||
generate_thumbnails=generate_thumbnails,
|
||||
compress=compress,
|
||||
trim_mode=trim_mode,
|
||||
)
|
||||
|
||||
# Optimize for platforms
|
||||
result = await social_optimizer.optimize_for_platforms(
|
||||
video_bytes=video_data,
|
||||
platforms=platform_list,
|
||||
options=options,
|
||||
user_id=user_id,
|
||||
video_studio_service=video_service,
|
||||
)
|
||||
|
||||
if not result.get("success"):
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Optimization failed: {result.get('errors', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Store results in asset library
|
||||
for platform_result in result.get("results", []):
|
||||
asset_metadata = {
|
||||
"platform": platform_result["platform"],
|
||||
"name": platform_result["name"],
|
||||
"aspect_ratio": platform_result["aspect_ratio"],
|
||||
"duration": platform_result["duration"],
|
||||
"file_size": platform_result["file_size"],
|
||||
"width": platform_result["width"],
|
||||
"height": platform_result["height"],
|
||||
"optimization_type": "social_optimizer",
|
||||
}
|
||||
|
||||
asset_service.create_asset(
|
||||
user_id=user_id,
|
||||
filename=f"social_{platform_result['platform']}_{platform_result['name'].replace(' ', '_').lower()}.mp4",
|
||||
file_url=platform_result["video_url"],
|
||||
asset_type=AssetType.VIDEO,
|
||||
source_module=AssetSource.VIDEO_STUDIO,
|
||||
asset_metadata=asset_metadata,
|
||||
cost=0.0, # Free (FFmpeg processing)
|
||||
tags=["video_studio", "social_optimizer", platform_result["platform"]],
|
||||
)
|
||||
|
||||
logger.info(
|
||||
f"[SocialOptimizer] Optimization successful: "
|
||||
f"user={user_id}, platforms={len(result.get('results', []))}"
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"results": result.get("results", []),
|
||||
"errors": result.get("errors", []),
|
||||
"cost": result.get("cost", 0.0),
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[SocialOptimizer] Optimization error: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Optimization failed: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/social/platforms")
|
||||
async def get_platforms(
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get list of available platforms and their specifications.
|
||||
"""
|
||||
try:
|
||||
require_authenticated_user(current_user)
|
||||
|
||||
from ...services.video_studio.platform_specs import (
|
||||
PLATFORM_SPECS,
|
||||
Platform,
|
||||
)
|
||||
|
||||
platforms_data = {}
|
||||
for platform in Platform:
|
||||
specs = [spec for spec in PLATFORM_SPECS if spec.platform == platform]
|
||||
platforms_data[platform.value] = [
|
||||
{
|
||||
"name": spec.name,
|
||||
"aspect_ratio": spec.aspect_ratio,
|
||||
"width": spec.width,
|
||||
"height": spec.height,
|
||||
"max_duration": spec.max_duration,
|
||||
"max_file_size_mb": spec.max_file_size_mb,
|
||||
"formats": spec.formats,
|
||||
"description": spec.description,
|
||||
}
|
||||
for spec in specs
|
||||
]
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"platforms": platforms_data,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"[SocialOptimizer] Failed to get platforms: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get platforms: {str(e)}")
|
||||
40
backend/routers/video_studio/endpoints/tasks.py
Normal file
40
backend/routers/video_studio/endpoints/tasks.py
Normal file
@@ -0,0 +1,40 @@
|
||||
"""
|
||||
Async task status endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from typing import Dict, Any
|
||||
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
from api.story_writer.task_manager import task_manager
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.tasks")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/task/{task_id}/status")
|
||||
async def get_task_status(
|
||||
task_id: str,
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Poll for video generation task status.
|
||||
|
||||
Returns task status, progress, and result when complete.
|
||||
"""
|
||||
try:
|
||||
require_authenticated_user(current_user)
|
||||
|
||||
status = task_manager.get_task_status(task_id)
|
||||
if not status:
|
||||
raise HTTPException(status_code=404, detail="Task not found or expired")
|
||||
|
||||
return status
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Failed to get task status: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get task status: {str(e)}")
|
||||
144
backend/routers/video_studio/endpoints/transform.py
Normal file
144
backend/routers/video_studio/endpoints/transform.py
Normal file
@@ -0,0 +1,144 @@
|
||||
"""
|
||||
Video transformation endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, Dict, Any
|
||||
import uuid
|
||||
|
||||
from ...database import get_db
|
||||
from ...models.content_asset_models import AssetSource, AssetType
|
||||
from ...services.video_studio import VideoStudioService
|
||||
from ...services.asset_service import ContentAssetService
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.transform")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/transform")
|
||||
async def transform_video(
|
||||
background_tasks: BackgroundTasks,
|
||||
file: UploadFile = File(..., description="Video file to transform"),
|
||||
transform_type: str = Form(..., description="Type of transformation: format, aspect, speed, resolution, compress"),
|
||||
# Format conversion parameters
|
||||
output_format: Optional[str] = Form(None, description="Output format for format conversion (mp4, mov, webm, gif)"),
|
||||
codec: Optional[str] = Form(None, description="Video codec (libx264, libvpx-vp9, etc.)"),
|
||||
quality: Optional[str] = Form(None, description="Quality preset (high, medium, low)"),
|
||||
audio_codec: Optional[str] = Form(None, description="Audio codec (aac, mp3, opus, etc.)"),
|
||||
# Aspect ratio parameters
|
||||
target_aspect: Optional[str] = Form(None, description="Target aspect ratio (16:9, 9:16, 1:1, 4:5, 21:9)"),
|
||||
crop_mode: Optional[str] = Form("center", description="Crop mode for aspect conversion (center, letterbox)"),
|
||||
# Speed parameters
|
||||
speed_factor: Optional[float] = Form(None, description="Speed multiplier (0.25, 0.5, 1.0, 1.5, 2.0, 4.0)"),
|
||||
# Resolution parameters
|
||||
target_resolution: Optional[str] = Form(None, description="Target resolution (480p, 720p, 1080p, 1440p, 4k)"),
|
||||
maintain_aspect: bool = Form(True, description="Whether to maintain aspect ratio when scaling"),
|
||||
# Compression parameters
|
||||
target_size_mb: Optional[float] = Form(None, description="Target file size in MB for compression"),
|
||||
compress_quality: Optional[str] = Form(None, description="Quality preset for compression (high, medium, low)"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Transform video using FFmpeg/MoviePy (format, aspect, speed, resolution, compression).
|
||||
|
||||
Supports:
|
||||
- Format conversion (MP4, MOV, WebM, GIF)
|
||||
- Aspect ratio conversion (16:9, 9:16, 1:1, 4:5, 21:9)
|
||||
- Speed adjustment (0.25x - 4x)
|
||||
- Resolution scaling (480p - 4K)
|
||||
- Compression (file size optimization)
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
if not file.content_type.startswith('video/'):
|
||||
raise HTTPException(status_code=400, detail="File must be a video")
|
||||
|
||||
# Initialize services
|
||||
video_service = VideoStudioService()
|
||||
asset_service = ContentAssetService(db)
|
||||
|
||||
logger.info(
|
||||
f"[VideoStudio] Video transformation request: "
|
||||
f"user={user_id}, type={transform_type}"
|
||||
)
|
||||
|
||||
# Read video file
|
||||
video_data = await file.read()
|
||||
|
||||
# Validate transform type
|
||||
valid_transform_types = ["format", "aspect", "speed", "resolution", "compress"]
|
||||
if transform_type not in valid_transform_types:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Invalid transform_type. Must be one of: {', '.join(valid_transform_types)}"
|
||||
)
|
||||
|
||||
# Transform video
|
||||
result = await video_service.transform_video(
|
||||
video_data=video_data,
|
||||
transform_type=transform_type,
|
||||
user_id=user_id,
|
||||
output_format=output_format,
|
||||
codec=codec,
|
||||
quality=quality,
|
||||
audio_codec=audio_codec,
|
||||
target_aspect=target_aspect,
|
||||
crop_mode=crop_mode,
|
||||
speed_factor=speed_factor,
|
||||
target_resolution=target_resolution,
|
||||
maintain_aspect=maintain_aspect,
|
||||
target_size_mb=target_size_mb,
|
||||
compress_quality=compress_quality,
|
||||
)
|
||||
|
||||
if not result.get("success"):
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Video transformation failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Store transformed version in asset library
|
||||
video_url = result.get("video_url")
|
||||
if video_url:
|
||||
asset_metadata = {
|
||||
"original_file": file.filename,
|
||||
"transform_type": transform_type,
|
||||
"output_format": output_format,
|
||||
"target_aspect": target_aspect,
|
||||
"speed_factor": speed_factor,
|
||||
"target_resolution": target_resolution,
|
||||
"generation_type": "transformation",
|
||||
}
|
||||
|
||||
asset_service.create_asset(
|
||||
user_id=user_id,
|
||||
filename=f"transformed_{uuid.uuid4().hex[:8]}.mp4",
|
||||
file_url=video_url,
|
||||
asset_type=AssetType.VIDEO,
|
||||
source_module=AssetSource.VIDEO_STUDIO,
|
||||
asset_metadata=asset_metadata,
|
||||
cost=result.get("cost", 0),
|
||||
tags=["video_studio", "transform", transform_type]
|
||||
)
|
||||
|
||||
logger.info(f"[VideoStudio] Video transformation successful: user={user_id}, url={video_url}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"video_url": video_url,
|
||||
"cost": result.get("cost", 0),
|
||||
"transform_type": transform_type,
|
||||
"metadata": result.get("metadata", {}),
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoStudio] Video transformation error: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Video transformation failed: {str(e)}")
|
||||
@@ -0,0 +1,146 @@
|
||||
"""
|
||||
Video Background Remover endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, Dict, Any
|
||||
import uuid
|
||||
|
||||
from ...database import get_db
|
||||
from ...models.content_asset_models import AssetSource, AssetType
|
||||
from ...services.video_studio.video_background_remover_service import VideoBackgroundRemoverService
|
||||
from ...services.asset_service import ContentAssetService
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.video_background_remover")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/video-background-remover")
|
||||
async def remove_background(
|
||||
background_tasks: BackgroundTasks,
|
||||
video_file: UploadFile = File(..., description="Source video for background removal"),
|
||||
background_image_file: Optional[UploadFile] = File(None, description="Optional background image for replacement"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Remove or replace video background using WaveSpeed Video Background Remover.
|
||||
|
||||
Features:
|
||||
- Clean matting and edge-aware blending
|
||||
- Natural compositing for realistic results
|
||||
- Optional background image replacement
|
||||
- Supports videos up to 10 minutes
|
||||
|
||||
Args:
|
||||
video_file: Source video file
|
||||
background_image_file: Optional replacement background image
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
if not video_file.content_type.startswith('video/'):
|
||||
raise HTTPException(status_code=400, detail="File must be a video")
|
||||
|
||||
# Initialize services
|
||||
background_remover_service = VideoBackgroundRemoverService()
|
||||
asset_service = ContentAssetService(db)
|
||||
|
||||
logger.info(f"[VideoBackgroundRemover] Background removal request: user={user_id}, has_background={background_image_file is not None}")
|
||||
|
||||
# Read video file
|
||||
video_data = await video_file.read()
|
||||
|
||||
# Read background image if provided
|
||||
background_image_data = None
|
||||
if background_image_file:
|
||||
if not background_image_file.content_type.startswith('image/'):
|
||||
raise HTTPException(status_code=400, detail="Background file must be an image")
|
||||
background_image_data = await background_image_file.read()
|
||||
|
||||
# Remove/replace background
|
||||
result = await background_remover_service.remove_background(
|
||||
video_data=video_data,
|
||||
background_image_data=background_image_data,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
if not result.get("success"):
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Background removal failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Store processed video in asset library
|
||||
video_url = result.get("video_url")
|
||||
if video_url:
|
||||
asset_metadata = {
|
||||
"original_file": video_file.filename,
|
||||
"has_background_replacement": result.get("has_background_replacement", False),
|
||||
"background_file": background_image_file.filename if background_image_file else None,
|
||||
"generation_type": "background_removal",
|
||||
}
|
||||
|
||||
asset_service.create_asset(
|
||||
user_id=user_id,
|
||||
filename=f"bg_removed_{uuid.uuid4().hex[:8]}.mp4",
|
||||
file_url=video_url,
|
||||
asset_type=AssetType.VIDEO,
|
||||
source_module=AssetSource.VIDEO_STUDIO,
|
||||
asset_metadata=asset_metadata,
|
||||
cost=result.get("cost", 0),
|
||||
tags=["video_studio", "background_removal", "ai-processed"]
|
||||
)
|
||||
|
||||
logger.info(f"[VideoBackgroundRemover] Background removal successful: user={user_id}, url={video_url}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"video_url": video_url,
|
||||
"cost": result.get("cost", 0),
|
||||
"has_background_replacement": result.get("has_background_replacement", False),
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoBackgroundRemover] Background removal error: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Background removal failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/video-background-remover/estimate-cost")
|
||||
async def estimate_background_removal_cost(
|
||||
estimated_duration: float = Form(10.0, description="Estimated video duration in seconds", ge=5.0),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Estimate cost for video background removal operation.
|
||||
|
||||
Returns estimated cost based on duration.
|
||||
"""
|
||||
try:
|
||||
require_authenticated_user(current_user)
|
||||
|
||||
background_remover_service = VideoBackgroundRemoverService()
|
||||
estimated_cost = background_remover_service.calculate_cost(estimated_duration)
|
||||
|
||||
return {
|
||||
"estimated_cost": estimated_cost,
|
||||
"estimated_duration": estimated_duration,
|
||||
"cost_per_second": 0.01,
|
||||
"pricing_model": "per_second",
|
||||
"min_duration": 0.0,
|
||||
"max_duration": 600.0, # 10 minutes max
|
||||
"min_charge": 0.05, # Minimum $0.05 for ≤5 seconds
|
||||
"max_charge": 6.00, # Maximum $6.00 for 600 seconds
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoBackgroundRemover] Failed to estimate cost: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to estimate cost: {str(e)}")
|
||||
260
backend/routers/video_studio/endpoints/video_translate.py
Normal file
260
backend/routers/video_studio/endpoints/video_translate.py
Normal file
@@ -0,0 +1,260 @@
|
||||
"""
|
||||
Video Translate endpoints.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form, BackgroundTasks
|
||||
from sqlalchemy.orm import Session
|
||||
from typing import Optional, Dict, Any
|
||||
import uuid
|
||||
|
||||
from ...database import get_db
|
||||
from ...models.content_asset_models import AssetSource, AssetType
|
||||
from ...services.video_studio import VideoStudioService
|
||||
from ...services.video_studio.video_translate_service import VideoTranslateService
|
||||
from ...services.asset_service import ContentAssetService
|
||||
from ...utils.auth import get_current_user, require_authenticated_user
|
||||
from ...utils.logger_utils import get_service_logger
|
||||
|
||||
logger = get_service_logger("video_studio.endpoints.video_translate")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.post("/video-translate")
|
||||
async def translate_video(
|
||||
background_tasks: BackgroundTasks,
|
||||
video_file: UploadFile = File(..., description="Source video to translate"),
|
||||
output_language: str = Form("English", description="Target language for translation"),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
db: Session = Depends(get_db),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Translate video to target language using HeyGen Video Translate.
|
||||
|
||||
Supports 70+ languages and 175+ dialects. Translates both audio and video
|
||||
with lip-sync preservation.
|
||||
|
||||
Requirements:
|
||||
- Video: Source video file (MP4, WebM, etc.)
|
||||
- Output Language: Target language (default: "English")
|
||||
- Pricing: $0.0375/second
|
||||
|
||||
Supported languages include:
|
||||
- English, Spanish, French, Hindi, Italian, German, Polish, Portuguese
|
||||
- Chinese, Japanese, Korean, Arabic, Russian, and many more
|
||||
- Regional variants (e.g., "English (United States)", "Spanish (Mexico)")
|
||||
"""
|
||||
try:
|
||||
user_id = require_authenticated_user(current_user)
|
||||
|
||||
# Validate file type
|
||||
if not video_file.content_type.startswith('video/'):
|
||||
raise HTTPException(status_code=400, detail="File must be a video")
|
||||
|
||||
# Initialize services
|
||||
video_translate_service = VideoTranslateService()
|
||||
asset_service = ContentAssetService(db)
|
||||
|
||||
logger.info(
|
||||
f"[VideoTranslate] Video translate request: user={user_id}, "
|
||||
f"output_language={output_language}"
|
||||
)
|
||||
|
||||
# Read file
|
||||
video_data = await video_file.read()
|
||||
|
||||
# Validate file size (reasonable limit)
|
||||
if len(video_data) > 500 * 1024 * 1024: # 500MB
|
||||
raise HTTPException(status_code=400, detail="Video file must be less than 500MB")
|
||||
|
||||
# Perform video translation
|
||||
result = await video_translate_service.translate_video(
|
||||
video_data=video_data,
|
||||
output_language=output_language,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
if not result.get("success"):
|
||||
raise HTTPException(
|
||||
status_code=500,
|
||||
detail=f"Video translation failed: {result.get('error', 'Unknown error')}"
|
||||
)
|
||||
|
||||
# Store in asset library
|
||||
video_url = result.get("video_url")
|
||||
if video_url:
|
||||
asset_metadata = {
|
||||
"video_file": video_file.filename,
|
||||
"output_language": output_language,
|
||||
"operation_type": "video_translate",
|
||||
"model": "heygen/video-translate",
|
||||
}
|
||||
|
||||
asset_service.create_asset(
|
||||
user_id=user_id,
|
||||
filename=f"video_translate_{uuid.uuid4().hex[:8]}.mp4",
|
||||
file_url=video_url,
|
||||
asset_type=AssetType.VIDEO,
|
||||
source_module=AssetSource.VIDEO_STUDIO,
|
||||
asset_metadata=asset_metadata,
|
||||
cost=result.get("cost", 0),
|
||||
tags=["video_studio", "video_translate", "ai-generated"],
|
||||
)
|
||||
|
||||
logger.info(f"[VideoTranslate] Video translate successful: user={user_id}, url={video_url}")
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"video_url": video_url,
|
||||
"cost": result.get("cost", 0),
|
||||
"output_language": output_language,
|
||||
"metadata": result.get("metadata", {}),
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoTranslate] Video translate error: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Video translation failed: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/video-translate/estimate-cost")
|
||||
async def estimate_video_translate_cost(
|
||||
estimated_duration: float = Form(10.0, description="Estimated video duration in seconds", ge=1.0),
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Estimate cost for video translation operation.
|
||||
|
||||
Returns estimated cost based on duration.
|
||||
"""
|
||||
try:
|
||||
require_authenticated_user(current_user)
|
||||
|
||||
video_translate_service = VideoTranslateService()
|
||||
estimated_cost = video_translate_service.calculate_cost(estimated_duration)
|
||||
|
||||
return {
|
||||
"estimated_cost": estimated_cost,
|
||||
"estimated_duration": estimated_duration,
|
||||
"cost_per_second": 0.0375,
|
||||
"pricing_model": "per_second",
|
||||
"min_duration": 1.0,
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoTranslate] Failed to estimate cost: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to estimate cost: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/video-translate/languages")
|
||||
async def get_supported_languages(
|
||||
current_user: Dict[str, Any] = Depends(get_current_user),
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Get list of supported languages for video translation.
|
||||
|
||||
Returns a categorized list of 70+ languages and 175+ dialects.
|
||||
"""
|
||||
try:
|
||||
require_authenticated_user(current_user)
|
||||
|
||||
# Common languages (simplified list - full list has 175+ dialects)
|
||||
languages = [
|
||||
"English",
|
||||
"English (United States)",
|
||||
"English (UK)",
|
||||
"English (Australia)",
|
||||
"English (Canada)",
|
||||
"Spanish",
|
||||
"Spanish (Spain)",
|
||||
"Spanish (Mexico)",
|
||||
"Spanish (Argentina)",
|
||||
"French",
|
||||
"French (France)",
|
||||
"French (Canada)",
|
||||
"German",
|
||||
"German (Germany)",
|
||||
"Italian",
|
||||
"Italian (Italy)",
|
||||
"Portuguese",
|
||||
"Portuguese (Brazil)",
|
||||
"Portuguese (Portugal)",
|
||||
"Chinese",
|
||||
"Chinese (Mandarin, Simplified)",
|
||||
"Chinese (Cantonese, Traditional)",
|
||||
"Japanese",
|
||||
"Japanese (Japan)",
|
||||
"Korean",
|
||||
"Korean (Korea)",
|
||||
"Hindi",
|
||||
"Hindi (India)",
|
||||
"Arabic",
|
||||
"Arabic (Saudi Arabia)",
|
||||
"Arabic (Egypt)",
|
||||
"Russian",
|
||||
"Russian (Russia)",
|
||||
"Polish",
|
||||
"Polish (Poland)",
|
||||
"Dutch",
|
||||
"Dutch (Netherlands)",
|
||||
"Turkish",
|
||||
"Turkish (Türkiye)",
|
||||
"Thai",
|
||||
"Thai (Thailand)",
|
||||
"Vietnamese",
|
||||
"Vietnamese (Vietnam)",
|
||||
"Indonesian",
|
||||
"Indonesian (Indonesia)",
|
||||
"Malay",
|
||||
"Malay (Malaysia)",
|
||||
"Filipino",
|
||||
"Filipino (Philippines)",
|
||||
"Bengali (India)",
|
||||
"Tamil (India)",
|
||||
"Telugu (India)",
|
||||
"Marathi (India)",
|
||||
"Gujarati (India)",
|
||||
"Kannada (India)",
|
||||
"Malayalam (India)",
|
||||
"Urdu (India)",
|
||||
"Urdu (Pakistan)",
|
||||
"Swedish",
|
||||
"Swedish (Sweden)",
|
||||
"Norwegian Bokmål (Norway)",
|
||||
"Danish",
|
||||
"Danish (Denmark)",
|
||||
"Finnish",
|
||||
"Finnish (Finland)",
|
||||
"Greek",
|
||||
"Greek (Greece)",
|
||||
"Hebrew (Israel)",
|
||||
"Czech",
|
||||
"Czech (Czechia)",
|
||||
"Romanian",
|
||||
"Romanian (Romania)",
|
||||
"Hungarian",
|
||||
"Hungarian (Hungary)",
|
||||
"Bulgarian",
|
||||
"Bulgarian (Bulgaria)",
|
||||
"Croatian",
|
||||
"Croatian (Croatia)",
|
||||
"Ukrainian",
|
||||
"Ukrainian (Ukraine)",
|
||||
"English - Your Accent",
|
||||
"English - American Accent",
|
||||
]
|
||||
|
||||
return {
|
||||
"languages": sorted(languages),
|
||||
"total_count": len(languages),
|
||||
"note": "This is a simplified list. Full API supports 70+ languages and 175+ dialects. See documentation for complete list.",
|
||||
}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"[VideoTranslate] Failed to get languages: {e}", exc_info=True)
|
||||
raise HTTPException(status_code=500, detail=f"Failed to get languages: {str(e)}")
|
||||
1
backend/routers/video_studio/tasks/__init__.py
Normal file
1
backend/routers/video_studio/tasks/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
"""Background tasks for Video Studio."""
|
||||
147
backend/routers/video_studio/tasks/avatar_generation.py
Normal file
147
backend/routers/video_studio/tasks/avatar_generation.py
Normal file
@@ -0,0 +1,147 @@
|
||||
"""
|
||||
Background task for async avatar generation.
|
||||
"""
|
||||
|
||||
from typing import Optional
|
||||
from api.story_writer.task_manager import task_manager
|
||||
from services.video_studio.avatar_service import AvatarStudioService
|
||||
from services.video_studio import VideoStudioService
|
||||
from utils.asset_tracker import save_asset_to_library
|
||||
from utils.logger_utils import get_service_logger
|
||||
from ..utils import extract_error_message
|
||||
|
||||
logger = get_service_logger("video_studio.tasks.avatar")
|
||||
|
||||
|
||||
async def execute_avatar_generation_task(
|
||||
task_id: str,
|
||||
user_id: str,
|
||||
image_base64: str,
|
||||
audio_base64: str,
|
||||
resolution: str = "720p",
|
||||
prompt: Optional[str] = None,
|
||||
mask_image_base64: Optional[str] = None,
|
||||
seed: Optional[int] = None,
|
||||
model: str = "infinitetalk",
|
||||
):
|
||||
"""Background task for async avatar generation with progress updates."""
|
||||
try:
|
||||
# Progress callback that updates task status
|
||||
def progress_callback(progress: float, message: str):
|
||||
task_manager.update_task_status(
|
||||
task_id,
|
||||
"processing",
|
||||
progress=progress,
|
||||
message=message
|
||||
)
|
||||
|
||||
# Update initial status
|
||||
task_manager.update_task_status(
|
||||
task_id,
|
||||
"processing",
|
||||
progress=5.0,
|
||||
message="Initializing avatar generation..."
|
||||
)
|
||||
|
||||
# Create avatar service
|
||||
avatar_service = AvatarStudioService()
|
||||
|
||||
# Generate avatar video
|
||||
task_manager.update_task_status(
|
||||
task_id,
|
||||
"processing",
|
||||
progress=20.0,
|
||||
message=f"Submitting request to {model}..."
|
||||
)
|
||||
|
||||
result = await avatar_service.create_talking_avatar(
|
||||
image_base64=image_base64,
|
||||
audio_base64=audio_base64,
|
||||
resolution=resolution,
|
||||
prompt=prompt,
|
||||
mask_image_base64=mask_image_base64,
|
||||
seed=seed,
|
||||
user_id=user_id,
|
||||
model=model,
|
||||
progress_callback=progress_callback,
|
||||
)
|
||||
|
||||
task_manager.update_task_status(
|
||||
task_id,
|
||||
"processing",
|
||||
progress=90.0,
|
||||
message="Saving video file..."
|
||||
)
|
||||
|
||||
# Save file
|
||||
video_service = VideoStudioService()
|
||||
save_result = video_service._save_video_file(
|
||||
video_bytes=result["video_bytes"],
|
||||
operation_type="talking-avatar",
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
# Save to asset library
|
||||
try:
|
||||
from services.database import get_db
|
||||
db = next(get_db())
|
||||
try:
|
||||
save_asset_to_library(
|
||||
db=db,
|
||||
user_id=user_id,
|
||||
asset_type="video",
|
||||
source_module="video_studio",
|
||||
filename=save_result["filename"],
|
||||
file_url=save_result["file_url"],
|
||||
file_path=save_result["file_path"],
|
||||
file_size=save_result["file_size"],
|
||||
mime_type="video/mp4",
|
||||
title="Video Studio: Talking Avatar",
|
||||
description=f"Talking avatar video: {prompt[:100] if prompt else 'No prompt'}",
|
||||
prompt=result.get("prompt", prompt or ""),
|
||||
tags=["video_studio", "avatar", "talking_avatar"],
|
||||
provider=result.get("provider", "wavespeed"),
|
||||
model=result.get("model_name", "wavespeed-ai/infinitetalk"),
|
||||
cost=result.get("cost", 0.0),
|
||||
asset_metadata={
|
||||
"resolution": result.get("resolution", resolution),
|
||||
"duration": result.get("duration", 5.0),
|
||||
"operation": "talking-avatar",
|
||||
"width": result.get("width", 1280),
|
||||
"height": result.get("height", 720),
|
||||
}
|
||||
)
|
||||
logger.info(f"[AvatarStudio] Video saved to asset library")
|
||||
finally:
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"[AvatarStudio] Failed to save to asset library: {e}")
|
||||
|
||||
# Update task with final result
|
||||
task_manager.update_task_status(
|
||||
task_id,
|
||||
"completed",
|
||||
progress=100.0,
|
||||
message="Avatar generation complete!",
|
||||
result={
|
||||
"video_url": save_result["file_url"],
|
||||
"cost": result.get("cost", 0.0),
|
||||
"duration": result.get("duration", 5.0),
|
||||
"model": result.get("model_name", "wavespeed-ai/infinitetalk"),
|
||||
"provider": result.get("provider", "wavespeed"),
|
||||
"resolution": result.get("resolution", resolution),
|
||||
"width": result.get("width", 1280),
|
||||
"height": result.get("height", 720),
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
error_message = extract_error_message(exc)
|
||||
logger.error(f"[AvatarStudio] Avatar generation failed: {error_message}", exc_info=True)
|
||||
task_manager.update_task_status(
|
||||
task_id,
|
||||
"failed",
|
||||
progress=0.0,
|
||||
message=f"Avatar generation failed: {error_message}",
|
||||
error=error_message
|
||||
)
|
||||
128
backend/routers/video_studio/tasks/video_generation.py
Normal file
128
backend/routers/video_studio/tasks/video_generation.py
Normal file
@@ -0,0 +1,128 @@
|
||||
"""
|
||||
Background task for async video generation.
|
||||
"""
|
||||
|
||||
from typing import Optional, Dict, Any
|
||||
from api.story_writer.task_manager import task_manager
|
||||
from services.video_studio import VideoStudioService
|
||||
from utils.asset_tracker import save_asset_to_library
|
||||
from utils.logger_utils import get_service_logger
|
||||
from ..utils import extract_error_message
|
||||
|
||||
logger = get_service_logger("video_studio.tasks")
|
||||
|
||||
|
||||
def execute_video_generation_task(
|
||||
task_id: str,
|
||||
operation_type: str,
|
||||
user_id: str,
|
||||
prompt: Optional[str] = None,
|
||||
image_data: Optional[bytes] = None,
|
||||
image_base64: Optional[str] = None,
|
||||
provider: str = "wavespeed",
|
||||
**kwargs,
|
||||
):
|
||||
"""Background task for async video generation with progress updates."""
|
||||
try:
|
||||
from services.llm_providers.main_video_generation import ai_video_generate
|
||||
|
||||
# Progress callback that updates task status
|
||||
def progress_callback(progress: float, message: str):
|
||||
task_manager.update_task_status(
|
||||
task_id,
|
||||
"processing",
|
||||
progress=progress,
|
||||
message=message
|
||||
)
|
||||
|
||||
# Update initial status
|
||||
task_manager.update_task_status(
|
||||
task_id,
|
||||
"processing",
|
||||
progress=5.0,
|
||||
message="Initializing video generation..."
|
||||
)
|
||||
|
||||
# Call unified video generation with progress callback
|
||||
result = ai_video_generate(
|
||||
prompt=prompt,
|
||||
image_data=image_data,
|
||||
image_base64=image_base64,
|
||||
operation_type=operation_type,
|
||||
provider=provider,
|
||||
user_id=user_id,
|
||||
progress_callback=progress_callback,
|
||||
**kwargs
|
||||
)
|
||||
|
||||
# Save file
|
||||
video_service = VideoStudioService()
|
||||
save_result = video_service._save_video_file(
|
||||
video_bytes=result["video_bytes"],
|
||||
operation_type=operation_type,
|
||||
user_id=user_id,
|
||||
)
|
||||
|
||||
# Save to asset library
|
||||
try:
|
||||
from services.database import get_db
|
||||
db = next(get_db())
|
||||
try:
|
||||
save_asset_to_library(
|
||||
db=db,
|
||||
user_id=user_id,
|
||||
asset_type="video",
|
||||
source_module="video_studio",
|
||||
filename=save_result["filename"],
|
||||
file_url=save_result["file_url"],
|
||||
file_path=save_result["file_path"],
|
||||
file_size=save_result["file_size"],
|
||||
mime_type="video/mp4",
|
||||
title=f"Video Studio: {operation_type.replace('-', ' ').title()}",
|
||||
description=f"Generated video: {prompt[:100] if prompt else 'No prompt'}",
|
||||
prompt=result.get("prompt", prompt or ""),
|
||||
tags=["video_studio", operation_type],
|
||||
provider=result.get("provider", provider),
|
||||
model=result.get("model_name", kwargs.get("model", "unknown")),
|
||||
cost=result.get("cost", 0.0),
|
||||
asset_metadata={
|
||||
"resolution": result.get("resolution", kwargs.get("resolution", "720p")),
|
||||
"duration": result.get("duration", float(kwargs.get("duration", 5))),
|
||||
"operation": operation_type,
|
||||
"width": result.get("width", 1280),
|
||||
"height": result.get("height", 720),
|
||||
}
|
||||
)
|
||||
logger.info(f"[VideoStudio] Video saved to asset library")
|
||||
finally:
|
||||
db.close()
|
||||
except Exception as e:
|
||||
logger.warning(f"[VideoStudio] Failed to save to asset library: {e}")
|
||||
|
||||
# Update task with final result
|
||||
task_manager.update_task_status(
|
||||
task_id,
|
||||
"completed",
|
||||
progress=100.0,
|
||||
message="Video generation complete!",
|
||||
result={
|
||||
"video_url": save_result["file_url"],
|
||||
"cost": result.get("cost", 0.0),
|
||||
"duration": result.get("duration", float(kwargs.get("duration", 5))),
|
||||
"model": result.get("model_name", kwargs.get("model", "unknown")),
|
||||
"provider": result.get("provider", provider),
|
||||
"resolution": result.get("resolution", kwargs.get("resolution", "720p")),
|
||||
"width": result.get("width", 1280),
|
||||
"height": result.get("height", 720),
|
||||
}
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
logger.exception(f"[VideoStudio] Video generation failed: {exc}")
|
||||
error_msg = extract_error_message(exc)
|
||||
task_manager.update_task_status(
|
||||
task_id,
|
||||
"failed",
|
||||
error=error_msg,
|
||||
message=f"Video generation failed: {error_msg}"
|
||||
)
|
||||
54
backend/routers/video_studio/utils.py
Normal file
54
backend/routers/video_studio/utils.py
Normal file
@@ -0,0 +1,54 @@
|
||||
"""
|
||||
Utility functions for Video Studio router.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
from typing import Any
|
||||
from fastapi import HTTPException
|
||||
from utils.logger_utils import get_service_logger
|
||||
|
||||
logger = get_service_logger("video_studio_router")
|
||||
|
||||
|
||||
def extract_error_message(exc: Exception) -> str:
|
||||
"""
|
||||
Extract user-friendly error message from exception.
|
||||
Handles HTTPException with nested error details from WaveSpeed API.
|
||||
"""
|
||||
if isinstance(exc, HTTPException):
|
||||
detail = exc.detail
|
||||
# If detail is a dict (from WaveSpeed client)
|
||||
if isinstance(detail, dict):
|
||||
# Try to extract message from nested response JSON
|
||||
response_str = detail.get("response", "")
|
||||
if response_str:
|
||||
try:
|
||||
response_json = json.loads(response_str)
|
||||
if isinstance(response_json, dict) and "message" in response_json:
|
||||
return response_json["message"]
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
pass
|
||||
# Fall back to error field
|
||||
if "error" in detail:
|
||||
return detail["error"]
|
||||
# If detail is a string
|
||||
elif isinstance(detail, str):
|
||||
return detail
|
||||
|
||||
# For other exceptions, use string representation
|
||||
error_str = str(exc)
|
||||
|
||||
# Try to extract meaningful message from HTTPException string format
|
||||
if "Insufficient credits" in error_str or "insufficient credits" in error_str.lower():
|
||||
return "Insufficient WaveSpeed credits. Please top up your account."
|
||||
|
||||
# Try to extract JSON message from string
|
||||
try:
|
||||
json_match = re.search(r'"message"\s*:\s*"([^"]+)"', error_str)
|
||||
if json_match:
|
||||
return json_match.group(1)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return error_str
|
||||
409
backend/routers/wordpress.py
Normal file
409
backend/routers/wordpress.py
Normal file
@@ -0,0 +1,409 @@
|
||||
"""
|
||||
WordPress API Routes
|
||||
REST API endpoints for WordPress integration management.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, HTTPException, Depends, status
|
||||
from fastapi.responses import JSONResponse
|
||||
from typing import List, Optional, Dict, Any
|
||||
from pydantic import BaseModel, HttpUrl
|
||||
from loguru import logger
|
||||
|
||||
from services.integrations.wordpress_service import WordPressService
|
||||
from services.integrations.wordpress_publisher import WordPressPublisher
|
||||
from middleware.auth_middleware import get_current_user
|
||||
|
||||
|
||||
router = APIRouter(prefix="/wordpress", tags=["WordPress"])
|
||||
|
||||
|
||||
# Pydantic Models
|
||||
class WordPressSiteRequest(BaseModel):
|
||||
site_url: str
|
||||
site_name: str
|
||||
username: str
|
||||
app_password: str
|
||||
|
||||
|
||||
class WordPressSiteResponse(BaseModel):
|
||||
id: int
|
||||
site_url: str
|
||||
site_name: str
|
||||
username: str
|
||||
is_active: bool
|
||||
created_at: str
|
||||
updated_at: str
|
||||
|
||||
|
||||
class WordPressPublishRequest(BaseModel):
|
||||
site_id: int
|
||||
title: str
|
||||
content: str
|
||||
excerpt: Optional[str] = ""
|
||||
featured_image_path: Optional[str] = None
|
||||
categories: Optional[List[str]] = None
|
||||
tags: Optional[List[str]] = None
|
||||
status: str = "draft"
|
||||
meta_description: Optional[str] = ""
|
||||
|
||||
|
||||
class WordPressPublishResponse(BaseModel):
|
||||
success: bool
|
||||
post_id: Optional[int] = None
|
||||
post_url: Optional[str] = None
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class WordPressPostResponse(BaseModel):
|
||||
id: int
|
||||
wp_post_id: int
|
||||
title: str
|
||||
status: str
|
||||
published_at: Optional[str]
|
||||
created_at: str
|
||||
site_name: str
|
||||
site_url: str
|
||||
|
||||
|
||||
class WordPressStatusResponse(BaseModel):
|
||||
connected: bool
|
||||
sites: Optional[List[WordPressSiteResponse]] = None
|
||||
total_sites: int = 0
|
||||
|
||||
|
||||
# Initialize services
|
||||
wp_service = WordPressService()
|
||||
wp_publisher = WordPressPublisher()
|
||||
|
||||
|
||||
@router.get("/status", response_model=WordPressStatusResponse)
|
||||
async def get_wordpress_status(user: dict = Depends(get_current_user)):
|
||||
"""Get WordPress connection status for the current user."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Checking WordPress status for user: {user_id}")
|
||||
|
||||
# Get user's WordPress sites
|
||||
sites = wp_service.get_all_sites(user_id)
|
||||
|
||||
if sites:
|
||||
# Convert to response format
|
||||
site_responses = [
|
||||
WordPressSiteResponse(
|
||||
id=site['id'],
|
||||
site_url=site['site_url'],
|
||||
site_name=site['site_name'],
|
||||
username=site['username'],
|
||||
is_active=site['is_active'],
|
||||
created_at=site['created_at'],
|
||||
updated_at=site['updated_at']
|
||||
)
|
||||
for site in sites
|
||||
]
|
||||
|
||||
logger.info(f"Found {len(sites)} WordPress sites for user {user_id}")
|
||||
return WordPressStatusResponse(
|
||||
connected=True,
|
||||
sites=site_responses,
|
||||
total_sites=len(sites)
|
||||
)
|
||||
else:
|
||||
logger.info(f"No WordPress sites found for user {user_id}")
|
||||
return WordPressStatusResponse(
|
||||
connected=False,
|
||||
sites=[],
|
||||
total_sites=0
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting WordPress status for user {user_id}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error checking WordPress status: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/sites", response_model=WordPressSiteResponse)
|
||||
async def add_wordpress_site(
|
||||
site_request: WordPressSiteRequest,
|
||||
user: dict = Depends(get_current_user)
|
||||
):
|
||||
"""Add a new WordPress site connection."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Adding WordPress site for user {user_id}: {site_request.site_name}")
|
||||
|
||||
# Add the site
|
||||
success = wp_service.add_site(
|
||||
user_id=user_id,
|
||||
site_url=site_request.site_url,
|
||||
site_name=site_request.site_name,
|
||||
username=site_request.username,
|
||||
app_password=site_request.app_password
|
||||
)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Failed to connect to WordPress site. Please check your credentials."
|
||||
)
|
||||
|
||||
# Get the added site info
|
||||
sites = wp_service.get_all_sites(user_id)
|
||||
if sites:
|
||||
latest_site = sites[0] # Most recent site
|
||||
return WordPressSiteResponse(
|
||||
id=latest_site['id'],
|
||||
site_url=latest_site['site_url'],
|
||||
site_name=latest_site['site_name'],
|
||||
username=latest_site['username'],
|
||||
is_active=latest_site['is_active'],
|
||||
created_at=latest_site['created_at'],
|
||||
updated_at=latest_site['updated_at']
|
||||
)
|
||||
else:
|
||||
raise HTTPException(status_code=500, detail="Site added but could not retrieve details")
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error adding WordPress site: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error adding WordPress site: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/sites", response_model=List[WordPressSiteResponse])
|
||||
async def get_wordpress_sites(user: dict = Depends(get_current_user)):
|
||||
"""Get all WordPress sites for the current user."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Getting WordPress sites for user: {user_id}")
|
||||
|
||||
sites = wp_service.get_all_sites(user_id)
|
||||
|
||||
site_responses = [
|
||||
WordPressSiteResponse(
|
||||
id=site['id'],
|
||||
site_url=site['site_url'],
|
||||
site_name=site['site_name'],
|
||||
username=site['username'],
|
||||
is_active=site['is_active'],
|
||||
created_at=site['created_at'],
|
||||
updated_at=site['updated_at']
|
||||
)
|
||||
for site in sites
|
||||
]
|
||||
|
||||
logger.info(f"Retrieved {len(sites)} WordPress sites for user {user_id}")
|
||||
return site_responses
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting WordPress sites for user {user_id}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error retrieving WordPress sites: {str(e)}")
|
||||
|
||||
|
||||
@router.delete("/sites/{site_id}")
|
||||
async def disconnect_wordpress_site(
|
||||
site_id: int,
|
||||
user: dict = Depends(get_current_user)
|
||||
):
|
||||
"""Disconnect a WordPress site."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Disconnecting WordPress site {site_id} for user {user_id}")
|
||||
|
||||
success = wp_service.disconnect_site(user_id, site_id)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail="WordPress site not found or already disconnected"
|
||||
)
|
||||
|
||||
logger.info(f"WordPress site {site_id} disconnected successfully")
|
||||
return {"success": True, "message": "WordPress site disconnected successfully"}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error disconnecting WordPress site {site_id}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error disconnecting WordPress site: {str(e)}")
|
||||
|
||||
|
||||
@router.post("/publish", response_model=WordPressPublishResponse)
|
||||
async def publish_to_wordpress(
|
||||
publish_request: WordPressPublishRequest,
|
||||
user: dict = Depends(get_current_user)
|
||||
):
|
||||
"""Publish content to a WordPress site."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Publishing to WordPress site {publish_request.site_id} for user {user_id}")
|
||||
|
||||
# Publish the content
|
||||
result = wp_publisher.publish_blog_post(
|
||||
user_id=user_id,
|
||||
site_id=publish_request.site_id,
|
||||
title=publish_request.title,
|
||||
content=publish_request.content,
|
||||
excerpt=publish_request.excerpt,
|
||||
featured_image_path=publish_request.featured_image_path,
|
||||
categories=publish_request.categories,
|
||||
tags=publish_request.tags,
|
||||
status=publish_request.status,
|
||||
meta_description=publish_request.meta_description
|
||||
)
|
||||
|
||||
if result['success']:
|
||||
logger.info(f"Content published successfully to WordPress: {result['post_id']}")
|
||||
return WordPressPublishResponse(
|
||||
success=True,
|
||||
post_id=result['post_id'],
|
||||
post_url=result.get('post_url')
|
||||
)
|
||||
else:
|
||||
logger.error(f"Failed to publish content: {result['error']}")
|
||||
return WordPressPublishResponse(
|
||||
success=False,
|
||||
error=result['error']
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error publishing to WordPress: {e}")
|
||||
return WordPressPublishResponse(
|
||||
success=False,
|
||||
error=f"Error publishing content: {str(e)}"
|
||||
)
|
||||
|
||||
|
||||
@router.get("/posts", response_model=List[WordPressPostResponse])
|
||||
async def get_wordpress_posts(
|
||||
site_id: Optional[int] = None,
|
||||
user: dict = Depends(get_current_user)
|
||||
):
|
||||
"""Get published posts from WordPress sites."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Getting WordPress posts for user {user_id}, site_id: {site_id}")
|
||||
|
||||
posts = wp_service.get_posts_for_site(user_id, site_id) if site_id else wp_service.get_posts_for_all_sites(user_id)
|
||||
|
||||
post_responses = [
|
||||
WordPressPostResponse(
|
||||
id=post['id'],
|
||||
wp_post_id=post['wp_post_id'],
|
||||
title=post['title'],
|
||||
status=post['status'],
|
||||
published_at=post['published_at'],
|
||||
created_at=post['created_at'],
|
||||
site_name=post['site_name'],
|
||||
site_url=post['site_url']
|
||||
)
|
||||
for post in posts
|
||||
]
|
||||
|
||||
logger.info(f"Retrieved {len(posts)} WordPress posts for user {user_id}")
|
||||
return post_responses
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting WordPress posts for user {user_id}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error retrieving WordPress posts: {str(e)}")
|
||||
|
||||
|
||||
@router.put("/posts/{post_id}/status")
|
||||
async def update_post_status(
|
||||
post_id: int,
|
||||
status: str,
|
||||
user: dict = Depends(get_current_user)
|
||||
):
|
||||
"""Update the status of a WordPress post (draft/publish)."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
if status not in ['draft', 'publish', 'private']:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Invalid status. Must be 'draft', 'publish', or 'private'"
|
||||
)
|
||||
|
||||
logger.info(f"Updating WordPress post {post_id} status to {status} for user {user_id}")
|
||||
|
||||
success = wp_publisher.update_post_status(user_id, post_id, status)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail="Post not found or update failed"
|
||||
)
|
||||
|
||||
logger.info(f"WordPress post {post_id} status updated to {status}")
|
||||
return {"success": True, "message": f"Post status updated to {status}"}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error updating WordPress post {post_id} status: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error updating post status: {str(e)}")
|
||||
|
||||
|
||||
@router.delete("/posts/{post_id}")
|
||||
async def delete_wordpress_post(
|
||||
post_id: int,
|
||||
force: bool = False,
|
||||
user: dict = Depends(get_current_user)
|
||||
):
|
||||
"""Delete a WordPress post."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=400, detail="User ID not found")
|
||||
|
||||
logger.info(f"Deleting WordPress post {post_id} for user {user_id}, force: {force}")
|
||||
|
||||
success = wp_publisher.delete_post(user_id, post_id, force)
|
||||
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=404,
|
||||
detail="Post not found or deletion failed"
|
||||
)
|
||||
|
||||
logger.info(f"WordPress post {post_id} deleted successfully")
|
||||
return {"success": True, "message": "Post deleted successfully"}
|
||||
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting WordPress post {post_id}: {e}")
|
||||
raise HTTPException(status_code=500, detail=f"Error deleting post: {str(e)}")
|
||||
|
||||
|
||||
@router.get("/health")
|
||||
async def wordpress_health_check():
|
||||
"""WordPress integration health check."""
|
||||
try:
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "wordpress",
|
||||
"timestamp": "2024-01-01T00:00:00Z",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
except Exception as e:
|
||||
logger.error(f"WordPress health check failed: {e}")
|
||||
raise HTTPException(status_code=500, detail="WordPress service unhealthy")
|
||||
282
backend/routers/wordpress_oauth.py
Normal file
282
backend/routers/wordpress_oauth.py
Normal file
@@ -0,0 +1,282 @@
|
||||
"""
|
||||
WordPress OAuth2 Routes
|
||||
Handles WordPress.com OAuth2 authentication flow.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, status, Query
|
||||
from fastapi.responses import RedirectResponse, HTMLResponse
|
||||
from typing import Dict, Any, Optional
|
||||
from pydantic import BaseModel
|
||||
from loguru import logger
|
||||
|
||||
from services.integrations.wordpress_oauth import WordPressOAuthService
|
||||
from middleware.auth_middleware import get_current_user
|
||||
|
||||
router = APIRouter(prefix="/wp", tags=["WordPress OAuth"])
|
||||
|
||||
# Initialize OAuth service
|
||||
oauth_service = WordPressOAuthService()
|
||||
|
||||
# Pydantic Models
|
||||
class WordPressOAuthResponse(BaseModel):
|
||||
auth_url: str
|
||||
state: str
|
||||
|
||||
class WordPressCallbackResponse(BaseModel):
|
||||
success: bool
|
||||
message: str
|
||||
blog_url: Optional[str] = None
|
||||
blog_id: Optional[str] = None
|
||||
|
||||
class WordPressStatusResponse(BaseModel):
|
||||
connected: bool
|
||||
sites: list
|
||||
total_sites: int
|
||||
|
||||
@router.get("/auth/url", response_model=WordPressOAuthResponse)
|
||||
async def get_wordpress_auth_url(
|
||||
user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""Get WordPress OAuth2 authorization URL."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="User ID not found.")
|
||||
|
||||
auth_data = oauth_service.generate_authorization_url(user_id)
|
||||
if not auth_data:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="WordPress OAuth is not properly configured. Please check that WORDPRESS_CLIENT_ID and WORDPRESS_CLIENT_SECRET environment variables are set with valid WordPress.com application credentials."
|
||||
)
|
||||
|
||||
return WordPressOAuthResponse(**auth_data)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating WordPress OAuth URL: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to generate WordPress OAuth URL."
|
||||
)
|
||||
|
||||
@router.get("/callback")
|
||||
async def handle_wordpress_callback(
|
||||
code: str = Query(..., description="Authorization code from WordPress"),
|
||||
state: str = Query(..., description="State parameter for security"),
|
||||
error: Optional[str] = Query(None, description="Error from WordPress OAuth")
|
||||
):
|
||||
"""Handle WordPress OAuth2 callback."""
|
||||
try:
|
||||
if error:
|
||||
logger.error(f"WordPress OAuth error: {error}")
|
||||
html_content = f"""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>WordPress.com Connection Failed</title>
|
||||
<script>
|
||||
// Send error message to parent window
|
||||
window.onload = function() {{
|
||||
window.parent.postMessage({{
|
||||
type: 'WPCOM_OAUTH_ERROR',
|
||||
success: false,
|
||||
error: '{error}'
|
||||
}}, '*');
|
||||
window.close();
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Connection Failed</h1>
|
||||
<p>There was an error connecting to WordPress.com.</p>
|
||||
<p>You can close this window and try again.</p>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(content=html_content, headers={
|
||||
"Cross-Origin-Opener-Policy": "unsafe-none",
|
||||
"Cross-Origin-Embedder-Policy": "unsafe-none"
|
||||
})
|
||||
|
||||
if not code or not state:
|
||||
logger.error("Missing code or state parameter in WordPress OAuth callback")
|
||||
html_content = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>WordPress.com Connection Failed</title>
|
||||
<script>
|
||||
// Send error message to opener/parent window
|
||||
window.onload = function() {{
|
||||
(window.opener || window.parent).postMessage({{
|
||||
type: 'WPCOM_OAUTH_ERROR',
|
||||
success: false,
|
||||
error: 'Missing parameters'
|
||||
}}, '*');
|
||||
window.close();
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Connection Failed</h1>
|
||||
<p>Missing required parameters.</p>
|
||||
<p>You can close this window and try again.</p>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(content=html_content, headers={
|
||||
"Cross-Origin-Opener-Policy": "unsafe-none",
|
||||
"Cross-Origin-Embedder-Policy": "unsafe-none"
|
||||
})
|
||||
|
||||
# Exchange code for token
|
||||
result = oauth_service.handle_oauth_callback(code, state)
|
||||
|
||||
if not result or not result.get('success'):
|
||||
logger.error("Failed to exchange WordPress OAuth code for token")
|
||||
html_content = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>WordPress.com Connection Failed</title>
|
||||
<script>
|
||||
// Send error message to opener/parent window
|
||||
window.onload = function() {{
|
||||
(window.opener || window.parent).postMessage({{
|
||||
type: 'WPCOM_OAUTH_ERROR',
|
||||
success: false,
|
||||
error: 'Token exchange failed'
|
||||
}}, '*');
|
||||
window.close();
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Connection Failed</h1>
|
||||
<p>Failed to exchange authorization code for access token.</p>
|
||||
<p>You can close this window and try again.</p>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(content=html_content)
|
||||
|
||||
# Return success page with postMessage script
|
||||
blog_url = result.get('blog_url', '')
|
||||
html_content = f"""
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>WordPress.com Connection Successful</title>
|
||||
<script>
|
||||
// Send success message to opener/parent window
|
||||
window.onload = function() {{
|
||||
(window.opener || window.parent).postMessage({{
|
||||
type: 'WPCOM_OAUTH_SUCCESS',
|
||||
success: true,
|
||||
blogUrl: '{blog_url}',
|
||||
blogId: '{result.get('blog_id', '')}'
|
||||
}}, '*');
|
||||
window.close();
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Connection Successful!</h1>
|
||||
<p>Your WordPress.com site has been connected successfully.</p>
|
||||
<p>You can close this window now.</p>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
return HTMLResponse(content=html_content, headers={
|
||||
"Cross-Origin-Opener-Policy": "unsafe-none",
|
||||
"Cross-Origin-Embedder-Policy": "unsafe-none"
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error handling WordPress OAuth callback: {e}")
|
||||
html_content = """
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>WordPress.com Connection Failed</title>
|
||||
<script>
|
||||
// Send error message to opener/parent window
|
||||
window.onload = function() {{
|
||||
(window.opener || window.parent).postMessage({{
|
||||
type: 'WPCOM_OAUTH_ERROR',
|
||||
success: false,
|
||||
error: 'Callback error'
|
||||
}}, '*');
|
||||
window.close();
|
||||
}};
|
||||
</script>
|
||||
</head>
|
||||
<body>
|
||||
<h1>Connection Failed</h1>
|
||||
<p>An unexpected error occurred during connection.</p>
|
||||
<p>You can close this window and try again.</p>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
return HTMLResponse(content=html_content, headers={
|
||||
"Cross-Origin-Opener-Policy": "unsafe-none",
|
||||
"Cross-Origin-Embedder-Policy": "unsafe-none"
|
||||
})
|
||||
|
||||
@router.get("/status", response_model=WordPressStatusResponse)
|
||||
async def get_wordpress_oauth_status(
|
||||
user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""Get WordPress OAuth connection status."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="User ID not found.")
|
||||
|
||||
status_data = oauth_service.get_connection_status(user_id)
|
||||
return WordPressStatusResponse(**status_data)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting WordPress OAuth status: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to get WordPress connection status."
|
||||
)
|
||||
|
||||
@router.delete("/disconnect/{token_id}")
|
||||
async def disconnect_wordpress_site(
|
||||
token_id: int,
|
||||
user: Dict[str, Any] = Depends(get_current_user)
|
||||
):
|
||||
"""Disconnect a WordPress site."""
|
||||
try:
|
||||
user_id = user.get('id')
|
||||
if not user_id:
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="User ID not found.")
|
||||
|
||||
success = oauth_service.revoke_token(user_id, token_id)
|
||||
if not success:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_404_NOT_FOUND,
|
||||
detail="WordPress token not found or could not be disconnected."
|
||||
)
|
||||
|
||||
return {"success": True, "message": f"WordPress site disconnected successfully."}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error disconnecting WordPress site: {e}")
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail="Failed to disconnect WordPress site."
|
||||
)
|
||||
|
||||
@router.get("/health")
|
||||
async def wordpress_oauth_health():
|
||||
"""WordPress OAuth health check."""
|
||||
return {
|
||||
"status": "healthy",
|
||||
"service": "wordpress_oauth",
|
||||
"timestamp": "2024-01-01T00:00:00Z",
|
||||
"version": "1.0.0"
|
||||
}
|
||||
Reference in New Issue
Block a user