Base code
This commit is contained in:
158
backend/utils/asset_tracker.py
Normal file
158
backend/utils/asset_tracker.py
Normal file
@@ -0,0 +1,158 @@
|
||||
"""
|
||||
Asset Tracker Utility
|
||||
Helper utility for modules to easily save generated content to the unified asset library.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, Optional
|
||||
from sqlalchemy.orm import Session
|
||||
from services.content_asset_service import ContentAssetService
|
||||
from models.content_asset_models import AssetType, AssetSource
|
||||
import logging
|
||||
import re
|
||||
from urllib.parse import urlparse
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Maximum file size (100MB)
|
||||
MAX_FILE_SIZE = 100 * 1024 * 1024
|
||||
|
||||
# Allowed URL schemes
|
||||
ALLOWED_URL_SCHEMES = ['http', 'https', '/'] # Allow relative paths starting with /
|
||||
|
||||
|
||||
def validate_file_url(file_url: str) -> bool:
|
||||
"""Validate file URL format."""
|
||||
if not file_url or not isinstance(file_url, str):
|
||||
return False
|
||||
|
||||
# Allow relative paths
|
||||
if file_url.startswith('/'):
|
||||
return True
|
||||
|
||||
# Validate absolute URLs
|
||||
try:
|
||||
parsed = urlparse(file_url)
|
||||
return parsed.scheme in ALLOWED_URL_SCHEMES and parsed.netloc
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def save_asset_to_library(
|
||||
db: Session,
|
||||
user_id: str,
|
||||
asset_type: str,
|
||||
source_module: str,
|
||||
filename: str,
|
||||
file_url: str,
|
||||
file_path: Optional[str] = None,
|
||||
file_size: Optional[int] = None,
|
||||
mime_type: Optional[str] = None,
|
||||
title: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
prompt: Optional[str] = None,
|
||||
tags: Optional[list] = None,
|
||||
asset_metadata: Optional[Dict[str, Any]] = None,
|
||||
provider: Optional[str] = None,
|
||||
model: Optional[str] = None,
|
||||
cost: Optional[float] = None,
|
||||
generation_time: Optional[float] = None,
|
||||
) -> Optional[int]:
|
||||
"""
|
||||
Helper function to save a generated asset to the unified asset library.
|
||||
|
||||
This can be called from any module (story writer, image studio, etc.)
|
||||
to automatically track generated content.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
user_id: Clerk user ID
|
||||
asset_type: 'text', 'image', 'video', or 'audio'
|
||||
source_module: 'story_writer', 'image_studio', 'main_text_generation', etc.
|
||||
filename: Original filename
|
||||
file_url: Public URL to access the asset
|
||||
file_path: Server file path (optional)
|
||||
file_size: File size in bytes (optional)
|
||||
mime_type: MIME type (optional)
|
||||
title: Asset title (optional)
|
||||
description: Asset description (optional)
|
||||
prompt: Generation prompt (optional)
|
||||
tags: List of tags (optional)
|
||||
asset_metadata: Additional metadata (optional)
|
||||
provider: AI provider used (optional)
|
||||
model: Model used (optional)
|
||||
cost: Generation cost (optional)
|
||||
generation_time: Generation time in seconds (optional)
|
||||
|
||||
Returns:
|
||||
Asset ID if successful, None otherwise
|
||||
"""
|
||||
try:
|
||||
# Validate inputs
|
||||
if not user_id or not isinstance(user_id, str):
|
||||
logger.error("Invalid user_id provided")
|
||||
return None
|
||||
|
||||
if not filename or not isinstance(filename, str):
|
||||
logger.error("Invalid filename provided")
|
||||
return None
|
||||
|
||||
if not validate_file_url(file_url):
|
||||
logger.error(f"Invalid file_url format: {file_url}")
|
||||
return None
|
||||
|
||||
if file_size and file_size > MAX_FILE_SIZE:
|
||||
logger.warning(f"File size {file_size} exceeds maximum {MAX_FILE_SIZE}")
|
||||
# Don't fail, just log warning
|
||||
|
||||
# Convert string enums to enum types
|
||||
try:
|
||||
asset_type_enum = AssetType(asset_type.lower())
|
||||
except ValueError:
|
||||
logger.warning(f"Invalid asset type: {asset_type}, defaulting to 'text'")
|
||||
asset_type_enum = AssetType.TEXT
|
||||
|
||||
try:
|
||||
source_module_enum = AssetSource(source_module.lower())
|
||||
except ValueError:
|
||||
logger.warning(f"Invalid source module: {source_module}, defaulting to 'story_writer'")
|
||||
source_module_enum = AssetSource.STORY_WRITER
|
||||
|
||||
# Sanitize filename (remove path traversal attempts)
|
||||
filename = re.sub(r'[^\w\s\-_\.]', '', filename.split('/')[-1])
|
||||
if not filename:
|
||||
filename = f"asset_{asset_type}_{source_module}.{asset_type}"
|
||||
|
||||
# Generate title from filename if not provided
|
||||
if not title:
|
||||
title = filename.replace('_', ' ').replace('-', ' ').title()
|
||||
# Limit title length
|
||||
if len(title) > 200:
|
||||
title = title[:197] + '...'
|
||||
|
||||
service = ContentAssetService(db)
|
||||
asset = service.create_asset(
|
||||
user_id=user_id,
|
||||
asset_type=asset_type_enum,
|
||||
source_module=source_module_enum,
|
||||
filename=filename,
|
||||
file_url=file_url,
|
||||
file_path=file_path,
|
||||
file_size=file_size,
|
||||
mime_type=mime_type,
|
||||
title=title,
|
||||
description=description,
|
||||
prompt=prompt,
|
||||
tags=tags or [],
|
||||
asset_metadata=asset_metadata or {},
|
||||
provider=provider,
|
||||
model=model,
|
||||
cost=cost,
|
||||
generation_time=generation_time,
|
||||
)
|
||||
|
||||
logger.info(f"✅ Asset saved to library: {asset.id} ({asset_type} from {source_module})")
|
||||
return asset.id
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error saving asset to library: {str(e)}", exc_info=True)
|
||||
return None
|
||||
246
backend/utils/file_storage.py
Normal file
246
backend/utils/file_storage.py
Normal file
@@ -0,0 +1,246 @@
|
||||
"""
|
||||
File Storage Utility
|
||||
Robust file storage helper for saving generated content assets.
|
||||
"""
|
||||
|
||||
import os
|
||||
import uuid
|
||||
from pathlib import Path
|
||||
from typing import Optional, Tuple
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Maximum filename length
|
||||
MAX_FILENAME_LENGTH = 255
|
||||
|
||||
# Allowed characters in filenames (alphanumeric, dash, underscore, dot)
|
||||
ALLOWED_FILENAME_CHARS = set('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789-_.')
|
||||
|
||||
|
||||
def sanitize_filename(filename: str, max_length: int = 100) -> str:
|
||||
"""
|
||||
Sanitize filename to be filesystem-safe.
|
||||
|
||||
Args:
|
||||
filename: Original filename
|
||||
max_length: Maximum length for filename
|
||||
|
||||
Returns:
|
||||
Sanitized filename
|
||||
"""
|
||||
if not filename:
|
||||
return f"file_{uuid.uuid4().hex[:8]}"
|
||||
|
||||
# Remove path separators and other dangerous characters
|
||||
sanitized = "".join(c if c in ALLOWED_FILENAME_CHARS else '_' for c in filename)
|
||||
|
||||
# Remove leading/trailing dots and spaces
|
||||
sanitized = sanitized.strip('. ')
|
||||
|
||||
# Ensure it's not empty
|
||||
if not sanitized:
|
||||
sanitized = f"file_{uuid.uuid4().hex[:8]}"
|
||||
|
||||
# Truncate if too long
|
||||
if len(sanitized) > max_length:
|
||||
name, ext = os.path.splitext(sanitized)
|
||||
max_name_length = max_length - len(ext) - 1
|
||||
sanitized = name[:max_name_length] + ext
|
||||
|
||||
return sanitized
|
||||
|
||||
|
||||
def ensure_directory_exists(directory: Path) -> bool:
|
||||
"""
|
||||
Ensure directory exists, creating it if necessary.
|
||||
|
||||
Args:
|
||||
directory: Path to directory
|
||||
|
||||
Returns:
|
||||
True if directory exists or was created, False otherwise
|
||||
"""
|
||||
try:
|
||||
directory.mkdir(parents=True, exist_ok=True)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create directory {directory}: {e}")
|
||||
return False
|
||||
|
||||
|
||||
def save_file_safely(
|
||||
content: bytes,
|
||||
directory: Path,
|
||||
filename: str,
|
||||
max_file_size: int = 100 * 1024 * 1024 # 100MB default
|
||||
) -> Tuple[Optional[Path], Optional[str]]:
|
||||
"""
|
||||
Safely save file content to disk.
|
||||
|
||||
Args:
|
||||
content: File content as bytes
|
||||
directory: Directory to save file in
|
||||
filename: Filename (will be sanitized)
|
||||
max_file_size: Maximum allowed file size in bytes
|
||||
|
||||
Returns:
|
||||
Tuple of (file_path, error_message). file_path is None on error.
|
||||
"""
|
||||
try:
|
||||
# Validate file size
|
||||
if len(content) > max_file_size:
|
||||
return None, f"File size {len(content)} exceeds maximum {max_file_size}"
|
||||
|
||||
if len(content) == 0:
|
||||
return None, "File content is empty"
|
||||
|
||||
# Ensure directory exists
|
||||
if not ensure_directory_exists(directory):
|
||||
return None, f"Failed to create directory: {directory}"
|
||||
|
||||
# Sanitize filename
|
||||
safe_filename = sanitize_filename(filename)
|
||||
|
||||
# Construct full path
|
||||
file_path = directory / safe_filename
|
||||
|
||||
# Check if file already exists (unlikely with UUID, but check anyway)
|
||||
if file_path.exists():
|
||||
# Add UUID to make it unique
|
||||
name, ext = os.path.splitext(safe_filename)
|
||||
safe_filename = f"{name}_{uuid.uuid4().hex[:8]}{ext}"
|
||||
file_path = directory / safe_filename
|
||||
|
||||
# Write file atomically (write to temp file first, then rename)
|
||||
temp_path = file_path.with_suffix(file_path.suffix + '.tmp')
|
||||
try:
|
||||
with open(temp_path, 'wb') as f:
|
||||
f.write(content)
|
||||
|
||||
# Atomic rename
|
||||
temp_path.replace(file_path)
|
||||
|
||||
logger.info(f"Successfully saved file: {file_path} ({len(content)} bytes)")
|
||||
return file_path, None
|
||||
|
||||
except Exception as write_error:
|
||||
# Clean up temp file if it exists
|
||||
if temp_path.exists():
|
||||
try:
|
||||
temp_path.unlink()
|
||||
except:
|
||||
pass
|
||||
raise write_error
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving file: {e}", exc_info=True)
|
||||
return None, str(e)
|
||||
|
||||
|
||||
def generate_unique_filename(
|
||||
prefix: str,
|
||||
extension: str = ".png",
|
||||
include_uuid: bool = True
|
||||
) -> str:
|
||||
"""
|
||||
Generate a unique filename.
|
||||
|
||||
Args:
|
||||
prefix: Filename prefix
|
||||
extension: File extension (with or without dot)
|
||||
include_uuid: Whether to include UUID in filename
|
||||
|
||||
Returns:
|
||||
Unique filename
|
||||
"""
|
||||
if not extension.startswith('.'):
|
||||
extension = '.' + extension
|
||||
|
||||
prefix = sanitize_filename(prefix, max_length=50)
|
||||
|
||||
if include_uuid:
|
||||
unique_id = uuid.uuid4().hex[:8]
|
||||
return f"{prefix}_{unique_id}{extension}"
|
||||
else:
|
||||
return f"{prefix}{extension}"
|
||||
|
||||
|
||||
def save_text_file_safely(
|
||||
content: str,
|
||||
directory: Path,
|
||||
filename: str,
|
||||
encoding: str = 'utf-8',
|
||||
max_file_size: int = 10 * 1024 * 1024 # 10MB default for text
|
||||
) -> Tuple[Optional[Path], Optional[str]]:
|
||||
"""
|
||||
Safely save text content to disk.
|
||||
|
||||
Args:
|
||||
content: Text content as string
|
||||
directory: Directory to save file in
|
||||
filename: Filename (will be sanitized)
|
||||
encoding: Text encoding (default: utf-8)
|
||||
max_file_size: Maximum allowed file size in bytes
|
||||
|
||||
Returns:
|
||||
Tuple of (file_path, error_message). file_path is None on error.
|
||||
"""
|
||||
try:
|
||||
# Validate content
|
||||
if not content or not isinstance(content, str):
|
||||
return None, "Content must be a non-empty string"
|
||||
|
||||
# Convert to bytes for size check
|
||||
content_bytes = content.encode(encoding)
|
||||
|
||||
# Validate file size
|
||||
if len(content_bytes) > max_file_size:
|
||||
return None, f"File size {len(content_bytes)} exceeds maximum {max_file_size}"
|
||||
|
||||
# Ensure directory exists
|
||||
if not ensure_directory_exists(directory):
|
||||
return None, f"Failed to create directory: {directory}"
|
||||
|
||||
# Sanitize filename
|
||||
safe_filename = sanitize_filename(filename)
|
||||
|
||||
# Ensure .txt extension if not present
|
||||
if not safe_filename.endswith(('.txt', '.md', '.json')):
|
||||
safe_filename = os.path.splitext(safe_filename)[0] + '.txt'
|
||||
|
||||
# Construct full path
|
||||
file_path = directory / safe_filename
|
||||
|
||||
# Check if file already exists
|
||||
if file_path.exists():
|
||||
# Add UUID to make it unique
|
||||
name, ext = os.path.splitext(safe_filename)
|
||||
safe_filename = f"{name}_{uuid.uuid4().hex[:8]}{ext}"
|
||||
file_path = directory / safe_filename
|
||||
|
||||
# Write file atomically (write to temp file first, then rename)
|
||||
temp_path = file_path.with_suffix(file_path.suffix + '.tmp')
|
||||
try:
|
||||
with open(temp_path, 'w', encoding=encoding) as f:
|
||||
f.write(content)
|
||||
|
||||
# Atomic rename
|
||||
temp_path.replace(file_path)
|
||||
|
||||
logger.info(f"Successfully saved text file: {file_path} ({len(content_bytes)} bytes, {len(content)} chars)")
|
||||
return file_path, None
|
||||
|
||||
except Exception as write_error:
|
||||
# Clean up temp file if it exists
|
||||
if temp_path.exists():
|
||||
try:
|
||||
temp_path.unlink()
|
||||
except:
|
||||
pass
|
||||
raise write_error
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error saving text file: {e}", exc_info=True)
|
||||
return None, str(e)
|
||||
|
||||
53
backend/utils/logger_utils.py
Normal file
53
backend/utils/logger_utils.py
Normal file
@@ -0,0 +1,53 @@
|
||||
"""
|
||||
Logger utilities to prevent conflicts between different logging configurations.
|
||||
"""
|
||||
|
||||
from loguru import logger
|
||||
import sys
|
||||
|
||||
|
||||
def safe_logger_config(format_string: str, level: str = "INFO"):
|
||||
"""
|
||||
Safely configure logger without removing existing handlers.
|
||||
This prevents conflicts with the main logging configuration.
|
||||
|
||||
Args:
|
||||
format_string: Log format string
|
||||
level: Log level
|
||||
"""
|
||||
try:
|
||||
# Only add a new handler if we don't already have one with this format
|
||||
existing_handlers = logger._core.handlers
|
||||
for handler in existing_handlers:
|
||||
if hasattr(handler, '_sink') and handler._sink == sys.stdout:
|
||||
# Check if format is similar to avoid duplicates
|
||||
if hasattr(handler, '_format') and handler._format == format_string:
|
||||
return # Handler already exists with this format
|
||||
|
||||
# Add new handler only if needed
|
||||
logger.add(
|
||||
sys.stdout,
|
||||
level=level,
|
||||
format=format_string,
|
||||
colorize=True
|
||||
)
|
||||
except Exception as e:
|
||||
# If there's any error, just use the existing logger configuration
|
||||
pass
|
||||
|
||||
|
||||
def get_service_logger(service_name: str, format_string: str = None):
|
||||
"""
|
||||
Get a logger for a specific service without conflicting with main configuration.
|
||||
|
||||
Args:
|
||||
service_name: Name of the service
|
||||
format_string: Optional custom format string
|
||||
|
||||
Returns:
|
||||
Logger instance
|
||||
"""
|
||||
if format_string:
|
||||
safe_logger_config(format_string)
|
||||
|
||||
return logger.bind(service=service_name)
|
||||
858
backend/utils/stability_utils.py
Normal file
858
backend/utils/stability_utils.py
Normal file
@@ -0,0 +1,858 @@
|
||||
"""Utility functions for Stability AI operations."""
|
||||
|
||||
import base64
|
||||
import io
|
||||
import json
|
||||
import mimetypes
|
||||
import os
|
||||
from typing import Dict, Any, Optional, List, Union, Tuple
|
||||
from PIL import Image, ImageStat
|
||||
import numpy as np
|
||||
from fastapi import UploadFile, HTTPException
|
||||
import aiofiles
|
||||
import asyncio
|
||||
from datetime import datetime
|
||||
import hashlib
|
||||
|
||||
|
||||
class ImageValidator:
|
||||
"""Validator for image files and parameters."""
|
||||
|
||||
@staticmethod
|
||||
def validate_image_file(file: UploadFile) -> Dict[str, Any]:
|
||||
"""Validate uploaded image file.
|
||||
|
||||
Args:
|
||||
file: Uploaded file
|
||||
|
||||
Returns:
|
||||
Validation result with file info
|
||||
"""
|
||||
if not file.content_type or not file.content_type.startswith('image/'):
|
||||
raise HTTPException(status_code=400, detail="File must be an image")
|
||||
|
||||
# Check file extension
|
||||
allowed_extensions = ['.jpg', '.jpeg', '.png', '.webp']
|
||||
if file.filename:
|
||||
ext = '.' + file.filename.split('.')[-1].lower()
|
||||
if ext not in allowed_extensions:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Unsupported file format. Allowed: {allowed_extensions}"
|
||||
)
|
||||
|
||||
return {
|
||||
"filename": file.filename,
|
||||
"content_type": file.content_type,
|
||||
"is_valid": True
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
async def analyze_image_content(content: bytes) -> Dict[str, Any]:
|
||||
"""Analyze image content and characteristics.
|
||||
|
||||
Args:
|
||||
content: Image bytes
|
||||
|
||||
Returns:
|
||||
Image analysis results
|
||||
"""
|
||||
try:
|
||||
img = Image.open(io.BytesIO(content))
|
||||
|
||||
# Basic info
|
||||
info = {
|
||||
"format": img.format,
|
||||
"mode": img.mode,
|
||||
"size": img.size,
|
||||
"width": img.width,
|
||||
"height": img.height,
|
||||
"total_pixels": img.width * img.height,
|
||||
"aspect_ratio": round(img.width / img.height, 3),
|
||||
"file_size": len(content),
|
||||
"has_alpha": img.mode in ("RGBA", "LA") or "transparency" in img.info
|
||||
}
|
||||
|
||||
# Color analysis
|
||||
if img.mode == "RGB" or img.mode == "RGBA":
|
||||
img_rgb = img.convert("RGB")
|
||||
stat = ImageStat.Stat(img_rgb)
|
||||
|
||||
info.update({
|
||||
"brightness": round(sum(stat.mean) / 3, 2),
|
||||
"color_variance": round(sum(stat.stddev) / 3, 2),
|
||||
"dominant_colors": _extract_dominant_colors(img_rgb)
|
||||
})
|
||||
|
||||
# Quality assessment
|
||||
info["quality_assessment"] = _assess_image_quality(img)
|
||||
|
||||
return info
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=f"Error analyzing image: {str(e)}")
|
||||
|
||||
@staticmethod
|
||||
def validate_dimensions(width: int, height: int, operation: str) -> None:
|
||||
"""Validate image dimensions for specific operation.
|
||||
|
||||
Args:
|
||||
width: Image width
|
||||
height: Image height
|
||||
operation: Operation type
|
||||
"""
|
||||
from config.stability_config import IMAGE_LIMITS
|
||||
|
||||
limits = IMAGE_LIMITS.get(operation, IMAGE_LIMITS["generate"])
|
||||
total_pixels = width * height
|
||||
|
||||
if "min_pixels" in limits and total_pixels < limits["min_pixels"]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Image must have at least {limits['min_pixels']} pixels for {operation}"
|
||||
)
|
||||
|
||||
if "max_pixels" in limits and total_pixels > limits["max_pixels"]:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Image must have at most {limits['max_pixels']} pixels for {operation}"
|
||||
)
|
||||
|
||||
if "min_dimension" in limits:
|
||||
min_dim = limits["min_dimension"]
|
||||
if width < min_dim or height < min_dim:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Both dimensions must be at least {min_dim} pixels for {operation}"
|
||||
)
|
||||
|
||||
|
||||
class AudioValidator:
|
||||
"""Validator for audio files and parameters."""
|
||||
|
||||
@staticmethod
|
||||
def validate_audio_file(file: UploadFile) -> Dict[str, Any]:
|
||||
"""Validate uploaded audio file.
|
||||
|
||||
Args:
|
||||
file: Uploaded file
|
||||
|
||||
Returns:
|
||||
Validation result with file info
|
||||
"""
|
||||
if not file.content_type or not file.content_type.startswith('audio/'):
|
||||
raise HTTPException(status_code=400, detail="File must be an audio file")
|
||||
|
||||
# Check file extension
|
||||
allowed_extensions = ['.mp3', '.wav']
|
||||
if file.filename:
|
||||
ext = '.' + file.filename.split('.')[-1].lower()
|
||||
if ext not in allowed_extensions:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Unsupported audio format. Allowed: {allowed_extensions}"
|
||||
)
|
||||
|
||||
return {
|
||||
"filename": file.filename,
|
||||
"content_type": file.content_type,
|
||||
"is_valid": True
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
async def analyze_audio_content(content: bytes) -> Dict[str, Any]:
|
||||
"""Analyze audio content and characteristics.
|
||||
|
||||
Args:
|
||||
content: Audio bytes
|
||||
|
||||
Returns:
|
||||
Audio analysis results
|
||||
"""
|
||||
try:
|
||||
# Basic info
|
||||
info = {
|
||||
"file_size": len(content),
|
||||
"format": "unknown" # Would need audio library to detect
|
||||
}
|
||||
|
||||
# For actual implementation, you'd use libraries like librosa or pydub
|
||||
# to analyze audio characteristics like duration, sample rate, etc.
|
||||
|
||||
return info
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=f"Error analyzing audio: {str(e)}")
|
||||
|
||||
|
||||
class PromptOptimizer:
|
||||
"""Optimizer for text prompts."""
|
||||
|
||||
@staticmethod
|
||||
def analyze_prompt(prompt: str) -> Dict[str, Any]:
|
||||
"""Analyze prompt structure and content.
|
||||
|
||||
Args:
|
||||
prompt: Text prompt
|
||||
|
||||
Returns:
|
||||
Prompt analysis
|
||||
"""
|
||||
words = prompt.split()
|
||||
|
||||
analysis = {
|
||||
"length": len(prompt),
|
||||
"word_count": len(words),
|
||||
"sentence_count": len([s for s in prompt.split('.') if s.strip()]),
|
||||
"has_style_descriptors": _has_style_descriptors(prompt),
|
||||
"has_quality_terms": _has_quality_terms(prompt),
|
||||
"has_technical_terms": _has_technical_terms(prompt),
|
||||
"complexity_score": _calculate_complexity_score(prompt)
|
||||
}
|
||||
|
||||
return analysis
|
||||
|
||||
@staticmethod
|
||||
def optimize_prompt(
|
||||
prompt: str,
|
||||
target_model: str = "ultra",
|
||||
target_style: Optional[str] = None,
|
||||
quality_level: str = "high"
|
||||
) -> Dict[str, Any]:
|
||||
"""Optimize prompt for better results.
|
||||
|
||||
Args:
|
||||
prompt: Original prompt
|
||||
target_model: Target model
|
||||
target_style: Target style
|
||||
quality_level: Desired quality level
|
||||
|
||||
Returns:
|
||||
Optimization results
|
||||
"""
|
||||
optimizations = []
|
||||
optimized_prompt = prompt.strip()
|
||||
|
||||
# Add style if not present
|
||||
if target_style and not _has_style_descriptors(prompt):
|
||||
optimized_prompt += f", {target_style} style"
|
||||
optimizations.append(f"Added style: {target_style}")
|
||||
|
||||
# Add quality terms if needed
|
||||
if quality_level == "high" and not _has_quality_terms(prompt):
|
||||
optimized_prompt += ", high quality, detailed, sharp"
|
||||
optimizations.append("Added quality enhancers")
|
||||
|
||||
# Model-specific optimizations
|
||||
if target_model == "ultra":
|
||||
if len(prompt.split()) < 10:
|
||||
optimized_prompt += ", professional photography, detailed composition"
|
||||
optimizations.append("Added detail for Ultra model")
|
||||
elif target_model == "core":
|
||||
# Keep concise for Core model
|
||||
if len(prompt.split()) > 30:
|
||||
optimizations.append("Consider shortening prompt for Core model")
|
||||
|
||||
return {
|
||||
"original_prompt": prompt,
|
||||
"optimized_prompt": optimized_prompt,
|
||||
"optimizations_applied": optimizations,
|
||||
"improvement_estimate": len(optimizations) * 15 # Rough percentage
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def generate_negative_prompt(
|
||||
prompt: str,
|
||||
style: Optional[str] = None
|
||||
) -> str:
|
||||
"""Generate appropriate negative prompt.
|
||||
|
||||
Args:
|
||||
prompt: Original prompt
|
||||
style: Target style
|
||||
|
||||
Returns:
|
||||
Suggested negative prompt
|
||||
"""
|
||||
base_negative = "blurry, low quality, distorted, deformed, pixelated"
|
||||
|
||||
# Add style-specific negatives
|
||||
if style:
|
||||
if "photographic" in style.lower():
|
||||
base_negative += ", cartoon, anime, illustration"
|
||||
elif "anime" in style.lower():
|
||||
base_negative += ", realistic, photographic"
|
||||
elif "art" in style.lower():
|
||||
base_negative += ", photograph, realistic"
|
||||
|
||||
# Add content-specific negatives based on prompt
|
||||
if "person" in prompt.lower() or "human" in prompt.lower():
|
||||
base_negative += ", extra limbs, malformed hands, duplicate"
|
||||
|
||||
return base_negative
|
||||
|
||||
|
||||
class FileManager:
|
||||
"""Manager for file operations and caching."""
|
||||
|
||||
@staticmethod
|
||||
async def save_result(
|
||||
content: bytes,
|
||||
filename: str,
|
||||
operation: str,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> str:
|
||||
"""Save generation result to file.
|
||||
|
||||
Args:
|
||||
content: File content
|
||||
filename: Filename
|
||||
operation: Operation type
|
||||
metadata: Optional metadata
|
||||
|
||||
Returns:
|
||||
File path
|
||||
"""
|
||||
# Create directory structure
|
||||
base_dir = "generated_content"
|
||||
operation_dir = os.path.join(base_dir, operation)
|
||||
date_dir = os.path.join(operation_dir, datetime.now().strftime("%Y/%m/%d"))
|
||||
|
||||
os.makedirs(date_dir, exist_ok=True)
|
||||
|
||||
# Generate unique filename
|
||||
timestamp = datetime.now().strftime("%H%M%S")
|
||||
file_hash = hashlib.md5(content).hexdigest()[:8]
|
||||
unique_filename = f"{timestamp}_{file_hash}_{filename}"
|
||||
|
||||
file_path = os.path.join(date_dir, unique_filename)
|
||||
|
||||
# Save file
|
||||
async with aiofiles.open(file_path, 'wb') as f:
|
||||
await f.write(content)
|
||||
|
||||
# Save metadata if provided
|
||||
if metadata:
|
||||
metadata_path = file_path + ".json"
|
||||
async with aiofiles.open(metadata_path, 'w') as f:
|
||||
await f.write(json.dumps(metadata, indent=2))
|
||||
|
||||
return file_path
|
||||
|
||||
@staticmethod
|
||||
def generate_cache_key(operation: str, parameters: Dict[str, Any]) -> str:
|
||||
"""Generate cache key for operation and parameters.
|
||||
|
||||
Args:
|
||||
operation: Operation type
|
||||
parameters: Operation parameters
|
||||
|
||||
Returns:
|
||||
Cache key
|
||||
"""
|
||||
# Create deterministic hash from operation and parameters
|
||||
key_data = f"{operation}:{json.dumps(parameters, sort_keys=True)}"
|
||||
return hashlib.sha256(key_data.encode()).hexdigest()
|
||||
|
||||
|
||||
class ResponseFormatter:
|
||||
"""Formatter for API responses."""
|
||||
|
||||
@staticmethod
|
||||
def format_image_response(
|
||||
content: bytes,
|
||||
output_format: str,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Format image response with metadata.
|
||||
|
||||
Args:
|
||||
content: Image content
|
||||
output_format: Output format
|
||||
metadata: Optional metadata
|
||||
|
||||
Returns:
|
||||
Formatted response
|
||||
"""
|
||||
response = {
|
||||
"image": base64.b64encode(content).decode(),
|
||||
"format": output_format,
|
||||
"size": len(content),
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
if metadata:
|
||||
response["metadata"] = metadata
|
||||
|
||||
return response
|
||||
|
||||
@staticmethod
|
||||
def format_audio_response(
|
||||
content: bytes,
|
||||
output_format: str,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Format audio response with metadata.
|
||||
|
||||
Args:
|
||||
content: Audio content
|
||||
output_format: Output format
|
||||
metadata: Optional metadata
|
||||
|
||||
Returns:
|
||||
Formatted response
|
||||
"""
|
||||
response = {
|
||||
"audio": base64.b64encode(content).decode(),
|
||||
"format": output_format,
|
||||
"size": len(content),
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
if metadata:
|
||||
response["metadata"] = metadata
|
||||
|
||||
return response
|
||||
|
||||
@staticmethod
|
||||
def format_3d_response(
|
||||
content: bytes,
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""Format 3D model response with metadata.
|
||||
|
||||
Args:
|
||||
content: 3D model content (GLB)
|
||||
metadata: Optional metadata
|
||||
|
||||
Returns:
|
||||
Formatted response
|
||||
"""
|
||||
response = {
|
||||
"model": base64.b64encode(content).decode(),
|
||||
"format": "glb",
|
||||
"size": len(content),
|
||||
"timestamp": datetime.utcnow().isoformat()
|
||||
}
|
||||
|
||||
if metadata:
|
||||
response["metadata"] = metadata
|
||||
|
||||
return response
|
||||
|
||||
|
||||
class ParameterValidator:
|
||||
"""Validator for operation parameters."""
|
||||
|
||||
@staticmethod
|
||||
def validate_seed(seed: Optional[int]) -> int:
|
||||
"""Validate and normalize seed parameter.
|
||||
|
||||
Args:
|
||||
seed: Seed value
|
||||
|
||||
Returns:
|
||||
Valid seed value
|
||||
"""
|
||||
if seed is None:
|
||||
return 0
|
||||
|
||||
if not isinstance(seed, int) or seed < 0 or seed > 4294967294:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Seed must be an integer between 0 and 4294967294"
|
||||
)
|
||||
|
||||
return seed
|
||||
|
||||
@staticmethod
|
||||
def validate_strength(strength: Optional[float], operation: str) -> Optional[float]:
|
||||
"""Validate strength parameter for different operations.
|
||||
|
||||
Args:
|
||||
strength: Strength value
|
||||
operation: Operation type
|
||||
|
||||
Returns:
|
||||
Valid strength value
|
||||
"""
|
||||
if strength is None:
|
||||
return None
|
||||
|
||||
if not isinstance(strength, (int, float)) or strength < 0 or strength > 1:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Strength must be a float between 0 and 1"
|
||||
)
|
||||
|
||||
# Operation-specific validation
|
||||
if operation == "audio_to_audio" and strength < 0.01:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail="Minimum strength for audio-to-audio is 0.01"
|
||||
)
|
||||
|
||||
return float(strength)
|
||||
|
||||
@staticmethod
|
||||
def validate_creativity(creativity: Optional[float], operation: str) -> Optional[float]:
|
||||
"""Validate creativity parameter.
|
||||
|
||||
Args:
|
||||
creativity: Creativity value
|
||||
operation: Operation type
|
||||
|
||||
Returns:
|
||||
Valid creativity value
|
||||
"""
|
||||
if creativity is None:
|
||||
return None
|
||||
|
||||
# Different operations have different creativity ranges
|
||||
ranges = {
|
||||
"upscale": (0.1, 0.5),
|
||||
"outpaint": (0, 1),
|
||||
"conservative_upscale": (0.2, 0.5)
|
||||
}
|
||||
|
||||
min_val, max_val = ranges.get(operation, (0, 1))
|
||||
|
||||
if not isinstance(creativity, (int, float)) or creativity < min_val or creativity > max_val:
|
||||
raise HTTPException(
|
||||
status_code=400,
|
||||
detail=f"Creativity for {operation} must be between {min_val} and {max_val}"
|
||||
)
|
||||
|
||||
return float(creativity)
|
||||
|
||||
|
||||
class WorkflowManager:
|
||||
"""Manager for complex workflows and pipelines."""
|
||||
|
||||
@staticmethod
|
||||
def validate_workflow(workflow: List[Dict[str, Any]]) -> List[str]:
|
||||
"""Validate workflow steps.
|
||||
|
||||
Args:
|
||||
workflow: List of workflow steps
|
||||
|
||||
Returns:
|
||||
List of validation errors
|
||||
"""
|
||||
errors = []
|
||||
supported_operations = [
|
||||
"generate_ultra", "generate_core", "generate_sd3",
|
||||
"upscale_fast", "upscale_conservative", "upscale_creative",
|
||||
"inpaint", "outpaint", "erase", "search_and_replace",
|
||||
"control_sketch", "control_structure", "control_style"
|
||||
]
|
||||
|
||||
for i, step in enumerate(workflow):
|
||||
if "operation" not in step:
|
||||
errors.append(f"Step {i+1}: Missing 'operation' field")
|
||||
continue
|
||||
|
||||
operation = step["operation"]
|
||||
if operation not in supported_operations:
|
||||
errors.append(f"Step {i+1}: Unsupported operation '{operation}'")
|
||||
|
||||
# Validate step dependencies
|
||||
if i > 0 and operation.startswith("generate_") and i > 0:
|
||||
errors.append(f"Step {i+1}: Generate operations should be first in workflow")
|
||||
|
||||
return errors
|
||||
|
||||
@staticmethod
|
||||
def optimize_workflow(workflow: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
"""Optimize workflow for better performance.
|
||||
|
||||
Args:
|
||||
workflow: Original workflow
|
||||
|
||||
Returns:
|
||||
Optimized workflow
|
||||
"""
|
||||
optimized = workflow.copy()
|
||||
|
||||
# Remove redundant operations
|
||||
operations_seen = set()
|
||||
filtered_workflow = []
|
||||
|
||||
for step in optimized:
|
||||
operation = step["operation"]
|
||||
if operation not in operations_seen or operation.startswith("generate_"):
|
||||
filtered_workflow.append(step)
|
||||
operations_seen.add(operation)
|
||||
|
||||
# Reorder for optimal execution
|
||||
# Generation operations first, then modifications, then upscaling
|
||||
order_priority = {
|
||||
"generate": 0,
|
||||
"control": 1,
|
||||
"edit": 2,
|
||||
"upscale": 3
|
||||
}
|
||||
|
||||
def get_priority(step):
|
||||
operation = step["operation"]
|
||||
for key, priority in order_priority.items():
|
||||
if operation.startswith(key):
|
||||
return priority
|
||||
return 999
|
||||
|
||||
filtered_workflow.sort(key=get_priority)
|
||||
|
||||
return filtered_workflow
|
||||
|
||||
|
||||
# ==================== HELPER FUNCTIONS ====================
|
||||
|
||||
def _extract_dominant_colors(img: Image.Image, num_colors: int = 5) -> List[Tuple[int, int, int]]:
|
||||
"""Extract dominant colors from image.
|
||||
|
||||
Args:
|
||||
img: PIL Image
|
||||
num_colors: Number of dominant colors to extract
|
||||
|
||||
Returns:
|
||||
List of RGB tuples
|
||||
"""
|
||||
# Resize image for faster processing
|
||||
img_small = img.resize((150, 150))
|
||||
|
||||
# Convert to numpy array
|
||||
img_array = np.array(img_small)
|
||||
pixels = img_array.reshape(-1, 3)
|
||||
|
||||
# Use k-means clustering to find dominant colors
|
||||
from sklearn.cluster import KMeans
|
||||
|
||||
kmeans = KMeans(n_clusters=num_colors, random_state=42, n_init=10)
|
||||
kmeans.fit(pixels)
|
||||
|
||||
colors = kmeans.cluster_centers_.astype(int)
|
||||
return [tuple(color) for color in colors]
|
||||
|
||||
|
||||
def _assess_image_quality(img: Image.Image) -> Dict[str, Any]:
|
||||
"""Assess image quality metrics.
|
||||
|
||||
Args:
|
||||
img: PIL Image
|
||||
|
||||
Returns:
|
||||
Quality assessment
|
||||
"""
|
||||
# Convert to grayscale for quality analysis
|
||||
gray = img.convert('L')
|
||||
gray_array = np.array(gray)
|
||||
|
||||
# Calculate sharpness using Laplacian variance
|
||||
laplacian_var = np.var(np.gradient(gray_array))
|
||||
sharpness_score = min(100, laplacian_var / 100)
|
||||
|
||||
# Calculate noise level
|
||||
noise_level = np.std(gray_array)
|
||||
|
||||
# Overall quality score
|
||||
overall_score = (sharpness_score + max(0, 100 - noise_level)) / 2
|
||||
|
||||
return {
|
||||
"sharpness_score": round(sharpness_score, 2),
|
||||
"noise_level": round(noise_level, 2),
|
||||
"overall_score": round(overall_score, 2),
|
||||
"needs_enhancement": overall_score < 70
|
||||
}
|
||||
|
||||
|
||||
def _has_style_descriptors(prompt: str) -> bool:
|
||||
"""Check if prompt contains style descriptors."""
|
||||
style_keywords = [
|
||||
"photorealistic", "realistic", "anime", "cartoon", "digital art",
|
||||
"oil painting", "watercolor", "sketch", "illustration", "3d render",
|
||||
"cinematic", "artistic", "professional"
|
||||
]
|
||||
return any(keyword in prompt.lower() for keyword in style_keywords)
|
||||
|
||||
|
||||
def _has_quality_terms(prompt: str) -> bool:
|
||||
"""Check if prompt contains quality terms."""
|
||||
quality_keywords = [
|
||||
"high quality", "detailed", "sharp", "crisp", "clear",
|
||||
"professional", "masterpiece", "award winning"
|
||||
]
|
||||
return any(keyword in prompt.lower() for keyword in quality_keywords)
|
||||
|
||||
|
||||
def _has_technical_terms(prompt: str) -> bool:
|
||||
"""Check if prompt contains technical photography terms."""
|
||||
technical_keywords = [
|
||||
"bokeh", "depth of field", "macro", "wide angle", "telephoto",
|
||||
"iso", "aperture", "shutter speed", "lighting", "composition"
|
||||
]
|
||||
return any(keyword in prompt.lower() for keyword in technical_keywords)
|
||||
|
||||
|
||||
def _calculate_complexity_score(prompt: str) -> float:
|
||||
"""Calculate prompt complexity score.
|
||||
|
||||
Args:
|
||||
prompt: Text prompt
|
||||
|
||||
Returns:
|
||||
Complexity score (0-100)
|
||||
"""
|
||||
words = prompt.split()
|
||||
|
||||
# Base score from word count
|
||||
base_score = min(len(words) * 2, 50)
|
||||
|
||||
# Add points for descriptive elements
|
||||
if _has_style_descriptors(prompt):
|
||||
base_score += 15
|
||||
if _has_quality_terms(prompt):
|
||||
base_score += 10
|
||||
if _has_technical_terms(prompt):
|
||||
base_score += 15
|
||||
|
||||
# Add points for specific details
|
||||
if any(word in prompt.lower() for word in ["color", "lighting", "composition"]):
|
||||
base_score += 10
|
||||
|
||||
return min(base_score, 100)
|
||||
|
||||
|
||||
def create_batch_manifest(
|
||||
operation: str,
|
||||
files: List[UploadFile],
|
||||
parameters: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""Create manifest for batch processing.
|
||||
|
||||
Args:
|
||||
operation: Operation type
|
||||
files: List of files to process
|
||||
parameters: Operation parameters
|
||||
|
||||
Returns:
|
||||
Batch manifest
|
||||
"""
|
||||
return {
|
||||
"batch_id": f"batch_{datetime.utcnow().strftime('%Y%m%d_%H%M%S')}",
|
||||
"operation": operation,
|
||||
"file_count": len(files),
|
||||
"files": [{"filename": f.filename, "size": f.size} for f in files],
|
||||
"parameters": parameters,
|
||||
"created_at": datetime.utcnow().isoformat(),
|
||||
"estimated_duration": len(files) * 30, # 30 seconds per file estimate
|
||||
"estimated_cost": len(files) * _get_operation_cost(operation)
|
||||
}
|
||||
|
||||
|
||||
def _get_operation_cost(operation: str) -> float:
|
||||
"""Get estimated cost for operation.
|
||||
|
||||
Args:
|
||||
operation: Operation type
|
||||
|
||||
Returns:
|
||||
Estimated cost in credits
|
||||
"""
|
||||
from config.stability_config import MODEL_PRICING
|
||||
|
||||
# Map operation to pricing category
|
||||
if operation.startswith("generate_"):
|
||||
return MODEL_PRICING["generate"].get("core", 3) # Default to core
|
||||
elif operation.startswith("upscale_"):
|
||||
upscale_type = operation.replace("upscale_", "")
|
||||
return MODEL_PRICING["upscale"].get(upscale_type, 5)
|
||||
elif operation.startswith("control_"):
|
||||
return MODEL_PRICING["control"].get("sketch", 5) # Default
|
||||
else:
|
||||
return 5 # Default cost
|
||||
|
||||
|
||||
def validate_file_size(file: UploadFile, max_size: int = 10 * 1024 * 1024) -> None:
|
||||
"""Validate file size.
|
||||
|
||||
Args:
|
||||
file: Uploaded file
|
||||
max_size: Maximum allowed size in bytes
|
||||
"""
|
||||
if file.size and file.size > max_size:
|
||||
raise HTTPException(
|
||||
status_code=413,
|
||||
detail=f"File size ({file.size} bytes) exceeds maximum allowed size ({max_size} bytes)"
|
||||
)
|
||||
|
||||
|
||||
async def convert_image_format(content: bytes, target_format: str) -> bytes:
|
||||
"""Convert image to target format.
|
||||
|
||||
Args:
|
||||
content: Image content
|
||||
target_format: Target format (jpeg, png, webp)
|
||||
|
||||
Returns:
|
||||
Converted image bytes
|
||||
"""
|
||||
try:
|
||||
img = Image.open(io.BytesIO(content))
|
||||
|
||||
# Convert to RGB if saving as JPEG
|
||||
if target_format.lower() == "jpeg" and img.mode in ("RGBA", "LA"):
|
||||
img = img.convert("RGB")
|
||||
|
||||
output = io.BytesIO()
|
||||
img.save(output, format=target_format.upper())
|
||||
return output.getvalue()
|
||||
|
||||
except Exception as e:
|
||||
raise HTTPException(status_code=400, detail=f"Error converting image: {str(e)}")
|
||||
|
||||
|
||||
def estimate_processing_time(
|
||||
operation: str,
|
||||
file_size: int,
|
||||
complexity: Optional[Dict[str, Any]] = None
|
||||
) -> float:
|
||||
"""Estimate processing time for operation.
|
||||
|
||||
Args:
|
||||
operation: Operation type
|
||||
file_size: File size in bytes
|
||||
complexity: Optional complexity metrics
|
||||
|
||||
Returns:
|
||||
Estimated time in seconds
|
||||
"""
|
||||
# Base times by operation (in seconds)
|
||||
base_times = {
|
||||
"generate_ultra": 15,
|
||||
"generate_core": 5,
|
||||
"generate_sd3": 10,
|
||||
"upscale_fast": 2,
|
||||
"upscale_conservative": 30,
|
||||
"upscale_creative": 60,
|
||||
"inpaint": 10,
|
||||
"outpaint": 15,
|
||||
"control_sketch": 8,
|
||||
"control_structure": 8,
|
||||
"control_style": 10,
|
||||
"3d_fast": 10,
|
||||
"3d_point_aware": 20,
|
||||
"audio_text": 30,
|
||||
"audio_transform": 45
|
||||
}
|
||||
|
||||
base_time = base_times.get(operation, 10)
|
||||
|
||||
# Adjust for file size
|
||||
size_factor = max(1, file_size / (1024 * 1024)) # Size in MB
|
||||
adjusted_time = base_time * size_factor
|
||||
|
||||
# Adjust for complexity if provided
|
||||
if complexity and complexity.get("complexity_score", 0) > 80:
|
||||
adjusted_time *= 1.5
|
||||
|
||||
return round(adjusted_time, 1)
|
||||
133
backend/utils/text_asset_tracker.py
Normal file
133
backend/utils/text_asset_tracker.py
Normal file
@@ -0,0 +1,133 @@
|
||||
"""
|
||||
Text Asset Tracker Utility
|
||||
Helper utility for saving and tracking text content as files in the asset library.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, Optional
|
||||
from pathlib import Path
|
||||
from sqlalchemy.orm import Session
|
||||
from utils.asset_tracker import save_asset_to_library
|
||||
from utils.file_storage import save_text_file_safely, generate_unique_filename, sanitize_filename
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def save_and_track_text_content(
|
||||
db: Session,
|
||||
user_id: str,
|
||||
content: str,
|
||||
source_module: str,
|
||||
title: str,
|
||||
description: Optional[str] = None,
|
||||
prompt: Optional[str] = None,
|
||||
tags: Optional[list] = None,
|
||||
asset_metadata: Optional[Dict[str, Any]] = None,
|
||||
base_dir: Optional[Path] = None,
|
||||
subdirectory: Optional[str] = None,
|
||||
file_extension: str = ".txt"
|
||||
) -> Optional[int]:
|
||||
"""
|
||||
Save text content to disk and track it in the asset library.
|
||||
|
||||
Args:
|
||||
db: Database session
|
||||
user_id: Clerk user ID
|
||||
content: Text content to save
|
||||
source_module: Source module name (e.g., "linkedin_writer", "facebook_writer")
|
||||
title: Title for the asset
|
||||
description: Description of the content
|
||||
prompt: Original prompt used for generation
|
||||
tags: List of tags for search/filtering
|
||||
asset_metadata: Additional metadata
|
||||
base_dir: Base directory for file storage (defaults to backend/{module}_text)
|
||||
subdirectory: Optional subdirectory (e.g., "posts", "articles")
|
||||
file_extension: File extension (.txt, .md, etc.)
|
||||
|
||||
Returns:
|
||||
Asset ID if successful, None otherwise
|
||||
"""
|
||||
try:
|
||||
if not content or not isinstance(content, str) or len(content.strip()) == 0:
|
||||
logger.warning("Empty or invalid content provided")
|
||||
return None
|
||||
|
||||
if not user_id or not isinstance(user_id, str):
|
||||
logger.error("Invalid user_id provided")
|
||||
return None
|
||||
|
||||
# Determine output directory
|
||||
if base_dir is None:
|
||||
# Default to backend/{module}_text
|
||||
base_dir = Path(__file__).parent.parent
|
||||
module_name = source_module.replace('_', '')
|
||||
output_dir = base_dir / f"{module_name}_text"
|
||||
else:
|
||||
output_dir = base_dir
|
||||
|
||||
# Add subdirectory if specified
|
||||
if subdirectory:
|
||||
output_dir = output_dir / subdirectory
|
||||
|
||||
# Generate safe filename from title
|
||||
safe_title = sanitize_filename(title, max_length=80)
|
||||
filename = generate_unique_filename(
|
||||
prefix=safe_title,
|
||||
extension=file_extension,
|
||||
include_uuid=True
|
||||
)
|
||||
|
||||
# Save text file
|
||||
file_path, save_error = save_text_file_safely(
|
||||
content=content,
|
||||
directory=output_dir,
|
||||
filename=filename,
|
||||
encoding='utf-8',
|
||||
max_file_size=10 * 1024 * 1024 # 10MB for text
|
||||
)
|
||||
|
||||
if not file_path or save_error:
|
||||
logger.error(f"Failed to save text file: {save_error}")
|
||||
return None
|
||||
|
||||
# Generate file URL
|
||||
relative_path = file_path.relative_to(base_dir)
|
||||
file_url = f"/api/text-assets/{relative_path.as_posix()}"
|
||||
|
||||
# Prepare metadata
|
||||
final_metadata = asset_metadata or {}
|
||||
final_metadata.update({
|
||||
"status": "completed",
|
||||
"character_count": len(content),
|
||||
"word_count": len(content.split())
|
||||
})
|
||||
|
||||
# Save to asset library
|
||||
asset_id = save_asset_to_library(
|
||||
db=db,
|
||||
user_id=user_id,
|
||||
asset_type="text",
|
||||
source_module=source_module,
|
||||
filename=filename,
|
||||
file_url=file_url,
|
||||
file_path=str(file_path),
|
||||
file_size=len(content.encode('utf-8')),
|
||||
mime_type="text/plain" if file_extension == ".txt" else "text/markdown",
|
||||
title=title,
|
||||
description=description or f"Generated {source_module.replace('_', ' ')} content",
|
||||
prompt=prompt,
|
||||
tags=tags or [source_module, "text"],
|
||||
asset_metadata=final_metadata
|
||||
)
|
||||
|
||||
if asset_id:
|
||||
logger.info(f"✅ Text asset saved to library: ID={asset_id}, filename={filename}")
|
||||
else:
|
||||
logger.warning(f"Asset tracking returned None for {filename}")
|
||||
|
||||
return asset_id
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"❌ Error saving and tracking text content: {str(e)}", exc_info=True)
|
||||
return None
|
||||
|
||||
Reference in New Issue
Block a user