Base code
This commit is contained in:
22
backend/services/linkedin/image_generation/__init__.py
Normal file
22
backend/services/linkedin/image_generation/__init__.py
Normal file
@@ -0,0 +1,22 @@
|
||||
"""
|
||||
LinkedIn Image Generation Package
|
||||
|
||||
This package provides AI-powered image generation capabilities for LinkedIn content
|
||||
using Google's Gemini API. It includes image generation, editing, storage, and
|
||||
management services optimized for professional business use.
|
||||
"""
|
||||
|
||||
from .linkedin_image_generator import LinkedInImageGenerator
|
||||
from .linkedin_image_editor import LinkedInImageEditor
|
||||
from .linkedin_image_storage import LinkedInImageStorage
|
||||
|
||||
__all__ = [
|
||||
'LinkedInImageGenerator',
|
||||
'LinkedInImageEditor',
|
||||
'LinkedInImageStorage'
|
||||
]
|
||||
|
||||
# Version information
|
||||
__version__ = "1.0.0"
|
||||
__author__ = "Alwrity Team"
|
||||
__description__ = "LinkedIn AI Image Generation Services"
|
||||
@@ -0,0 +1,530 @@
|
||||
"""
|
||||
LinkedIn Image Editor Service
|
||||
|
||||
This service handles image editing capabilities for LinkedIn content using Gemini's
|
||||
conversational editing features. It provides professional image refinement and
|
||||
optimization specifically for LinkedIn use cases.
|
||||
"""
|
||||
|
||||
import os
|
||||
import base64
|
||||
from typing import Dict, Any, Optional, List
|
||||
from datetime import datetime
|
||||
from PIL import Image, ImageEnhance, ImageFilter
|
||||
from io import BytesIO
|
||||
from loguru import logger
|
||||
|
||||
# Import existing infrastructure
|
||||
from ...onboarding.api_key_manager import APIKeyManager
|
||||
|
||||
|
||||
class LinkedInImageEditor:
|
||||
"""
|
||||
Handles LinkedIn image editing and refinement using Gemini's capabilities.
|
||||
|
||||
This service provides both AI-powered editing through Gemini and traditional
|
||||
image processing for LinkedIn-specific optimizations.
|
||||
"""
|
||||
|
||||
def __init__(self, api_key_manager: Optional[APIKeyManager] = None):
|
||||
"""
|
||||
Initialize the LinkedIn Image Editor.
|
||||
|
||||
Args:
|
||||
api_key_manager: API key manager for Gemini authentication
|
||||
"""
|
||||
self.api_key_manager = api_key_manager or APIKeyManager()
|
||||
self.model = "gemini-2.5-flash-image-preview"
|
||||
|
||||
# LinkedIn-specific editing parameters
|
||||
self.enhancement_factors = {
|
||||
'brightness': 1.1, # Slightly brighter for mobile viewing
|
||||
'contrast': 1.05, # Subtle contrast enhancement
|
||||
'sharpness': 1.2, # Enhanced sharpness for clarity
|
||||
'saturation': 1.05 # Slight saturation boost
|
||||
}
|
||||
|
||||
logger.info("LinkedIn Image Editor initialized")
|
||||
|
||||
async def edit_image_conversationally(
|
||||
self,
|
||||
base_image: bytes,
|
||||
edit_prompt: str,
|
||||
content_context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Edit image using Gemini's conversational editing capabilities.
|
||||
|
||||
Args:
|
||||
base_image: Base image data in bytes
|
||||
edit_prompt: Natural language description of desired edits
|
||||
content_context: LinkedIn content context for optimization
|
||||
|
||||
Returns:
|
||||
Dict containing edited image result and metadata
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
logger.info(f"Starting conversational image editing: {edit_prompt[:100]}...")
|
||||
|
||||
# Enhance edit prompt for LinkedIn optimization
|
||||
enhanced_prompt = self._enhance_edit_prompt_for_linkedin(
|
||||
edit_prompt, content_context
|
||||
)
|
||||
|
||||
# TODO: Implement Gemini conversational editing when available
|
||||
# For now, we'll use traditional image processing based on prompt analysis
|
||||
edited_image = await self._apply_traditional_editing(
|
||||
base_image, edit_prompt, content_context
|
||||
)
|
||||
|
||||
if not edited_image.get('success'):
|
||||
return edited_image
|
||||
|
||||
generation_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': edited_image['image_data'],
|
||||
'metadata': {
|
||||
'edit_prompt': edit_prompt,
|
||||
'enhanced_prompt': enhanced_prompt,
|
||||
'editing_method': 'traditional_processing',
|
||||
'editing_time': generation_time,
|
||||
'content_context': content_context,
|
||||
'model_used': self.model
|
||||
},
|
||||
'linkedin_optimization': {
|
||||
'mobile_optimized': True,
|
||||
'professional_aesthetic': True,
|
||||
'brand_compliant': True,
|
||||
'engagement_optimized': True
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in conversational image editing: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Conversational editing failed: {str(e)}",
|
||||
'generation_time': (datetime.now() - start_time).total_seconds() if 'start_time' in locals() else 0
|
||||
}
|
||||
|
||||
async def apply_style_transfer(
|
||||
self,
|
||||
base_image: bytes,
|
||||
style_reference: bytes,
|
||||
content_context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Apply style transfer from reference image to base image.
|
||||
|
||||
Args:
|
||||
base_image: Base image data in bytes
|
||||
style_reference: Reference image for style transfer
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Dict containing style-transferred image result
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
logger.info("Starting style transfer for LinkedIn image")
|
||||
|
||||
# TODO: Implement Gemini style transfer when available
|
||||
# For now, return placeholder implementation
|
||||
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Style transfer not yet implemented - coming in next Gemini API update',
|
||||
'generation_time': (datetime.now() - start_time).total_seconds()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in style transfer: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Style transfer failed: {str(e)}",
|
||||
'generation_time': (datetime.now() - start_time).total_seconds() if 'start_time' in locals() else 0
|
||||
}
|
||||
|
||||
async def enhance_image_quality(
|
||||
self,
|
||||
image_data: bytes,
|
||||
enhancement_type: str = "linkedin_optimized",
|
||||
content_context: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Enhance image quality using traditional image processing.
|
||||
|
||||
Args:
|
||||
image_data: Image data in bytes
|
||||
enhancement_type: Type of enhancement to apply
|
||||
content_context: LinkedIn content context for optimization
|
||||
|
||||
Returns:
|
||||
Dict containing enhanced image result
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
logger.info(f"Starting image quality enhancement: {enhancement_type}")
|
||||
|
||||
# Open image for processing
|
||||
image = Image.open(BytesIO(image_data))
|
||||
original_size = image.size
|
||||
|
||||
# Apply LinkedIn-specific enhancements
|
||||
if enhancement_type == "linkedin_optimized":
|
||||
enhanced_image = self._apply_linkedin_enhancements(image, content_context)
|
||||
elif enhancement_type == "professional":
|
||||
enhanced_image = self._apply_professional_enhancements(image)
|
||||
elif enhancement_type == "creative":
|
||||
enhanced_image = self._apply_creative_enhancements(image)
|
||||
else:
|
||||
enhanced_image = self._apply_linkedin_enhancements(image, content_context)
|
||||
|
||||
# Convert back to bytes
|
||||
output_buffer = BytesIO()
|
||||
enhanced_image.save(output_buffer, format=image.format or "PNG", optimize=True)
|
||||
enhanced_data = output_buffer.getvalue()
|
||||
|
||||
enhancement_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': enhanced_data,
|
||||
'metadata': {
|
||||
'enhancement_type': enhancement_type,
|
||||
'original_size': original_size,
|
||||
'enhanced_size': enhanced_image.size,
|
||||
'enhancement_time': enhancement_time,
|
||||
'content_context': content_context
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in image quality enhancement: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Quality enhancement failed: {str(e)}",
|
||||
'generation_time': (datetime.now() - start_time).total_seconds() if 'start_time' in locals() else 0
|
||||
}
|
||||
|
||||
def _enhance_edit_prompt_for_linkedin(
|
||||
self,
|
||||
edit_prompt: str,
|
||||
content_context: Dict[str, Any]
|
||||
) -> str:
|
||||
"""
|
||||
Enhance edit prompt for LinkedIn optimization.
|
||||
|
||||
Args:
|
||||
edit_prompt: Original edit prompt
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Enhanced edit prompt
|
||||
"""
|
||||
industry = content_context.get('industry', 'business')
|
||||
content_type = content_context.get('content_type', 'post')
|
||||
|
||||
linkedin_edit_enhancements = [
|
||||
f"Maintain professional business aesthetic for {industry} industry",
|
||||
f"Ensure mobile-optimized composition for LinkedIn {content_type}",
|
||||
"Keep professional color scheme and typography",
|
||||
"Maintain brand consistency and visual hierarchy",
|
||||
"Optimize for LinkedIn feed viewing and engagement"
|
||||
]
|
||||
|
||||
enhanced_prompt = f"{edit_prompt}\n\n"
|
||||
enhanced_prompt += "\n".join(linkedin_edit_enhancements)
|
||||
|
||||
return enhanced_prompt
|
||||
|
||||
async def _apply_traditional_editing(
|
||||
self,
|
||||
base_image: bytes,
|
||||
edit_prompt: str,
|
||||
content_context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Apply traditional image processing based on edit prompt analysis.
|
||||
|
||||
Args:
|
||||
base_image: Base image data in bytes
|
||||
edit_prompt: Description of desired edits
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Dict containing edited image result
|
||||
"""
|
||||
try:
|
||||
# Open image for processing
|
||||
image = Image.open(BytesIO(base_image))
|
||||
|
||||
# Analyze edit prompt and apply appropriate processing
|
||||
edit_prompt_lower = edit_prompt.lower()
|
||||
|
||||
if any(word in edit_prompt_lower for word in ['brighter', 'light', 'lighting']):
|
||||
image = self._adjust_brightness(image, 1.2)
|
||||
logger.info("Applied brightness adjustment")
|
||||
|
||||
if any(word in edit_prompt_lower for word in ['sharper', 'sharp', 'clear']):
|
||||
image = self._apply_sharpening(image)
|
||||
logger.info("Applied sharpening")
|
||||
|
||||
if any(word in edit_prompt_lower for word in ['warmer', 'warm', 'color']):
|
||||
image = self._adjust_color_temperature(image, 'warm')
|
||||
logger.info("Applied warm color adjustment")
|
||||
|
||||
if any(word in edit_prompt_lower for word in ['professional', 'business']):
|
||||
image = self._apply_professional_enhancements(image)
|
||||
logger.info("Applied professional enhancements")
|
||||
|
||||
# Convert back to bytes
|
||||
output_buffer = BytesIO()
|
||||
image.save(output_buffer, format=image.format or "PNG", optimize=True)
|
||||
edited_data = output_buffer.getvalue()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': edited_data
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in traditional editing: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Traditional editing failed: {str(e)}"
|
||||
}
|
||||
|
||||
def _apply_linkedin_enhancements(
|
||||
self,
|
||||
image: Image.Image,
|
||||
content_context: Optional[Dict[str, Any]] = None
|
||||
) -> Image.Image:
|
||||
"""
|
||||
Apply LinkedIn-specific image enhancements.
|
||||
|
||||
Args:
|
||||
image: PIL Image object
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Enhanced image
|
||||
"""
|
||||
try:
|
||||
# Apply standard LinkedIn optimizations
|
||||
image = self._adjust_brightness(image, self.enhancement_factors['brightness'])
|
||||
image = self._adjust_contrast(image, self.enhancement_factors['contrast'])
|
||||
image = self._apply_sharpening(image)
|
||||
image = self._adjust_saturation(image, self.enhancement_factors['saturation'])
|
||||
|
||||
# Ensure professional appearance
|
||||
image = self._ensure_professional_appearance(image, content_context)
|
||||
|
||||
return image
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error applying LinkedIn enhancements: {str(e)}")
|
||||
return image
|
||||
|
||||
def _apply_professional_enhancements(self, image: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Apply professional business aesthetic enhancements.
|
||||
|
||||
Args:
|
||||
image: PIL Image object
|
||||
|
||||
Returns:
|
||||
Enhanced image
|
||||
"""
|
||||
try:
|
||||
# Subtle enhancements for professional appearance
|
||||
image = self._adjust_brightness(image, 1.05)
|
||||
image = self._adjust_contrast(image, 1.03)
|
||||
image = self._apply_sharpening(image)
|
||||
|
||||
return image
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error applying professional enhancements: {str(e)}")
|
||||
return image
|
||||
|
||||
def _apply_creative_enhancements(self, image: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Apply creative and engaging enhancements.
|
||||
|
||||
Args:
|
||||
image: PIL Image object
|
||||
|
||||
Returns:
|
||||
Enhanced image
|
||||
"""
|
||||
try:
|
||||
# More pronounced enhancements for creative appeal
|
||||
image = self._adjust_brightness(image, 1.1)
|
||||
image = self._adjust_contrast(image, 1.08)
|
||||
image = self._adjust_saturation(image, 1.1)
|
||||
image = self._apply_sharpening(image)
|
||||
|
||||
return image
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error applying creative enhancements: {str(e)}")
|
||||
return image
|
||||
|
||||
def _adjust_brightness(self, image: Image.Image, factor: float) -> Image.Image:
|
||||
"""Adjust image brightness."""
|
||||
try:
|
||||
enhancer = ImageEnhance.Brightness(image)
|
||||
return enhancer.enhance(factor)
|
||||
except Exception as e:
|
||||
logger.error(f"Error adjusting brightness: {str(e)}")
|
||||
return image
|
||||
|
||||
def _adjust_contrast(self, image: Image.Image, factor: float) -> Image.Image:
|
||||
"""Adjust image contrast."""
|
||||
try:
|
||||
enhancer = ImageEnhance.Contrast(image)
|
||||
return enhancer.enhance(factor)
|
||||
except Exception as e:
|
||||
logger.error(f"Error adjusting contrast: {str(e)}")
|
||||
return image
|
||||
|
||||
def _adjust_saturation(self, image: Image.Image, factor: float) -> Image.Image:
|
||||
"""Adjust image saturation."""
|
||||
try:
|
||||
enhancer = ImageEnhance.Color(image)
|
||||
return enhancer.enhance(factor)
|
||||
except Exception as e:
|
||||
logger.error(f"Error adjusting saturation: {str(e)}")
|
||||
return image
|
||||
|
||||
def _apply_sharpening(self, image: Image.Image) -> Image.Image:
|
||||
"""Apply image sharpening."""
|
||||
try:
|
||||
# Apply unsharp mask for professional sharpening
|
||||
return image.filter(ImageFilter.UnsharpMask(radius=1, percent=150, threshold=3))
|
||||
except Exception as e:
|
||||
logger.error(f"Error applying sharpening: {str(e)}")
|
||||
return image
|
||||
|
||||
def _adjust_color_temperature(self, image: Image.Image, temperature: str) -> Image.Image:
|
||||
"""Adjust image color temperature."""
|
||||
try:
|
||||
if temperature == 'warm':
|
||||
# Apply warm color adjustment
|
||||
enhancer = ImageEnhance.Color(image)
|
||||
image = enhancer.enhance(1.1)
|
||||
|
||||
# Slight red tint for warmth
|
||||
# This is a simplified approach - more sophisticated color grading could be implemented
|
||||
return image
|
||||
else:
|
||||
return image
|
||||
except Exception as e:
|
||||
logger.error(f"Error adjusting color temperature: {str(e)}")
|
||||
return image
|
||||
|
||||
def _ensure_professional_appearance(
|
||||
self,
|
||||
image: Image.Image,
|
||||
content_context: Optional[Dict[str, Any]] = None
|
||||
) -> Image.Image:
|
||||
"""
|
||||
Ensure image meets professional LinkedIn standards.
|
||||
|
||||
Args:
|
||||
image: PIL Image object
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Professionally optimized image
|
||||
"""
|
||||
try:
|
||||
# Ensure minimum quality standards
|
||||
if image.mode in ('RGBA', 'LA', 'P'):
|
||||
# Convert to RGB for better compatibility
|
||||
background = Image.new('RGB', image.size, (255, 255, 255))
|
||||
if image.mode == 'P':
|
||||
image = image.convert('RGBA')
|
||||
background.paste(image, mask=image.split()[-1] if image.mode == 'RGBA' else None)
|
||||
image = background
|
||||
|
||||
# Ensure minimum resolution for LinkedIn
|
||||
min_resolution = (1024, 1024)
|
||||
if image.size[0] < min_resolution[0] or image.size[1] < min_resolution[1]:
|
||||
# Resize to minimum resolution while maintaining aspect ratio
|
||||
ratio = max(min_resolution[0] / image.size[0], min_resolution[1] / image.size[1])
|
||||
new_size = (int(image.size[0] * ratio), int(image.size[1] * ratio))
|
||||
image = image.resize(new_size, Image.Resampling.LANCZOS)
|
||||
logger.info(f"Resized image to {new_size} for LinkedIn professional standards")
|
||||
|
||||
return image
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error ensuring professional appearance: {str(e)}")
|
||||
return image
|
||||
|
||||
async def get_editing_suggestions(
|
||||
self,
|
||||
image_data: bytes,
|
||||
content_context: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get AI-powered editing suggestions for LinkedIn image.
|
||||
|
||||
Args:
|
||||
image_data: Image data in bytes
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
List of editing suggestions
|
||||
"""
|
||||
try:
|
||||
# Analyze image and provide contextual suggestions
|
||||
suggestions = []
|
||||
|
||||
# Professional enhancement suggestions
|
||||
suggestions.append({
|
||||
'id': 'professional_enhancement',
|
||||
'title': 'Professional Enhancement',
|
||||
'description': 'Apply subtle professional enhancements for business appeal',
|
||||
'prompt': 'Enhance this image with professional business aesthetics',
|
||||
'priority': 'high'
|
||||
})
|
||||
|
||||
# Mobile optimization suggestions
|
||||
suggestions.append({
|
||||
'id': 'mobile_optimization',
|
||||
'title': 'Mobile Optimization',
|
||||
'description': 'Optimize for LinkedIn mobile feed viewing',
|
||||
'prompt': 'Optimize this image for mobile LinkedIn viewing',
|
||||
'priority': 'medium'
|
||||
})
|
||||
|
||||
# Industry-specific suggestions
|
||||
industry = content_context.get('industry', 'business')
|
||||
suggestions.append({
|
||||
'id': 'industry_optimization',
|
||||
'title': f'{industry.title()} Industry Optimization',
|
||||
'description': f'Apply {industry} industry-specific visual enhancements',
|
||||
'prompt': f'Enhance this image with {industry} industry aesthetics',
|
||||
'priority': 'medium'
|
||||
})
|
||||
|
||||
# Engagement optimization suggestions
|
||||
suggestions.append({
|
||||
'id': 'engagement_optimization',
|
||||
'title': 'Engagement Optimization',
|
||||
'description': 'Make this image more engaging for LinkedIn audience',
|
||||
'prompt': 'Make this image more engaging and shareable for LinkedIn',
|
||||
'priority': 'low'
|
||||
})
|
||||
|
||||
return suggestions
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting editing suggestions: {str(e)}")
|
||||
return []
|
||||
@@ -0,0 +1,496 @@
|
||||
"""
|
||||
LinkedIn Image Generator Service
|
||||
|
||||
This service generates LinkedIn-optimized images using Google's Gemini API.
|
||||
It provides professional, business-appropriate imagery for LinkedIn content.
|
||||
"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, Optional, Tuple
|
||||
from pathlib import Path
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
|
||||
# Import existing infrastructure
|
||||
from ...onboarding.api_key_manager import APIKeyManager
|
||||
from ...llm_providers.main_image_generation import generate_image
|
||||
|
||||
# Set up logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LinkedInImageGenerator:
|
||||
"""
|
||||
Handles LinkedIn-optimized image generation using Gemini API.
|
||||
|
||||
This service integrates with the existing Gemini provider infrastructure
|
||||
and provides LinkedIn-specific image optimization, quality assurance,
|
||||
and professional business aesthetics.
|
||||
"""
|
||||
|
||||
def __init__(self, api_key_manager: Optional[APIKeyManager] = None):
|
||||
"""
|
||||
Initialize the LinkedIn Image Generator.
|
||||
|
||||
Args:
|
||||
api_key_manager: API key manager for Gemini authentication
|
||||
"""
|
||||
self.api_key_manager = api_key_manager or APIKeyManager()
|
||||
self.model = "gemini-2.5-flash-image-preview"
|
||||
self.default_aspect_ratio = "1:1" # LinkedIn post optimal ratio
|
||||
self.max_retries = 3
|
||||
|
||||
# LinkedIn-specific image requirements
|
||||
self.min_resolution = (1024, 1024)
|
||||
self.max_file_size_mb = 5
|
||||
self.supported_formats = ["PNG", "JPEG"]
|
||||
|
||||
logger.info("LinkedIn Image Generator initialized")
|
||||
|
||||
async def generate_image(
|
||||
self,
|
||||
prompt: str,
|
||||
content_context: Dict[str, Any],
|
||||
aspect_ratio: str = "1:1",
|
||||
style_preference: str = "professional"
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate LinkedIn-optimized image using Gemini API.
|
||||
|
||||
Args:
|
||||
prompt: User's image generation prompt
|
||||
content_context: LinkedIn content context (topic, industry, content_type)
|
||||
aspect_ratio: Image aspect ratio (1:1, 16:9, 4:3)
|
||||
style_preference: Style preference (professional, creative, industry-specific)
|
||||
|
||||
Returns:
|
||||
Dict containing generation result, image data, and metadata
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
logger.info(f"Starting LinkedIn image generation for topic: {content_context.get('topic', 'Unknown')}")
|
||||
|
||||
# Enhance prompt with LinkedIn-specific context
|
||||
enhanced_prompt = self._enhance_prompt_for_linkedin(
|
||||
prompt, content_context, style_preference, aspect_ratio
|
||||
)
|
||||
|
||||
# Generate image using existing Gemini infrastructure
|
||||
generation_result = await self._generate_with_gemini(enhanced_prompt, aspect_ratio)
|
||||
|
||||
if not generation_result.get('success'):
|
||||
return {
|
||||
'success': False,
|
||||
'error': generation_result.get('error', 'Image generation failed'),
|
||||
'generation_time': (datetime.now() - start_time).total_seconds()
|
||||
}
|
||||
|
||||
# Process and validate generated image
|
||||
processed_image = await self._process_generated_image(
|
||||
generation_result['image_data'],
|
||||
content_context,
|
||||
aspect_ratio
|
||||
)
|
||||
|
||||
generation_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': processed_image['image_data'],
|
||||
'image_url': processed_image.get('image_url'),
|
||||
'metadata': {
|
||||
'prompt_used': enhanced_prompt,
|
||||
'original_prompt': prompt,
|
||||
'style_preference': style_preference,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
'content_context': content_context,
|
||||
'generation_time': generation_time,
|
||||
'model_used': self.model,
|
||||
'image_format': processed_image['format'],
|
||||
'image_size': processed_image['size'],
|
||||
'resolution': processed_image['resolution']
|
||||
},
|
||||
'linkedin_optimization': {
|
||||
'mobile_optimized': True,
|
||||
'professional_aesthetic': True,
|
||||
'brand_compliant': True,
|
||||
'engagement_optimized': True
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in LinkedIn image generation: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image generation failed: {str(e)}",
|
||||
'generation_time': (datetime.now() - start_time).total_seconds() if 'start_time' in locals() else 0
|
||||
}
|
||||
|
||||
async def edit_image(
|
||||
self,
|
||||
base_image: bytes,
|
||||
edit_prompt: str,
|
||||
content_context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Edit existing image using Gemini's conversational editing capabilities.
|
||||
|
||||
Args:
|
||||
base_image: Base image data in bytes
|
||||
edit_prompt: Description of desired edits
|
||||
content_context: LinkedIn content context for optimization
|
||||
|
||||
Returns:
|
||||
Dict containing edited image result and metadata
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
logger.info(f"Starting LinkedIn image editing with prompt: {edit_prompt[:100]}...")
|
||||
|
||||
# Enhance edit prompt for LinkedIn optimization
|
||||
enhanced_edit_prompt = self._enhance_edit_prompt_for_linkedin(
|
||||
edit_prompt, content_context
|
||||
)
|
||||
|
||||
# Use Gemini's image editing capabilities
|
||||
# Note: This will be implemented when Gemini's image editing is fully available
|
||||
# For now, we'll return a placeholder implementation
|
||||
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Image editing not yet implemented - coming in next Gemini API update',
|
||||
'generation_time': (datetime.now() - start_time).total_seconds()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in LinkedIn image editing: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image editing failed: {str(e)}",
|
||||
'generation_time': (datetime.now() - start_time).total_seconds() if 'start_time' in locals() else 0
|
||||
}
|
||||
|
||||
def _enhance_prompt_for_linkedin(
|
||||
self,
|
||||
prompt: str,
|
||||
content_context: Dict[str, Any],
|
||||
style_preference: str,
|
||||
aspect_ratio: str
|
||||
) -> str:
|
||||
"""
|
||||
Enhance user prompt with LinkedIn-specific context and best practices.
|
||||
|
||||
Args:
|
||||
prompt: Original user prompt
|
||||
content_context: LinkedIn content context
|
||||
style_preference: Preferred visual style
|
||||
aspect_ratio: Image aspect ratio
|
||||
|
||||
Returns:
|
||||
Enhanced prompt optimized for LinkedIn
|
||||
"""
|
||||
topic = content_context.get('topic', 'business')
|
||||
industry = content_context.get('industry', 'business')
|
||||
content_type = content_context.get('content_type', 'post')
|
||||
|
||||
# Base LinkedIn optimization
|
||||
linkedin_optimizations = [
|
||||
f"Create a professional LinkedIn {content_type} image for {topic}",
|
||||
f"Industry: {industry}",
|
||||
f"Professional business aesthetic suitable for LinkedIn audience",
|
||||
f"Mobile-optimized design for LinkedIn feed viewing",
|
||||
f"Aspect ratio: {aspect_ratio}",
|
||||
"High-quality, modern design with clear visual hierarchy",
|
||||
"Professional color scheme and typography",
|
||||
"Suitable for business and professional networking"
|
||||
]
|
||||
|
||||
# Style-specific enhancements
|
||||
if style_preference == "professional":
|
||||
style_enhancements = [
|
||||
"Corporate aesthetics with clean lines and geometric shapes",
|
||||
"Professional color palette (blues, grays, whites)",
|
||||
"Modern business environment or abstract business concepts",
|
||||
"Clean, minimalist design approach"
|
||||
]
|
||||
elif style_preference == "creative":
|
||||
style_enhancements = [
|
||||
"Eye-catching and engaging visual style",
|
||||
"Vibrant colors while maintaining professional appeal",
|
||||
"Creative composition that encourages social media engagement",
|
||||
"Modern design elements with business context"
|
||||
]
|
||||
else: # industry-specific
|
||||
style_enhancements = [
|
||||
f"Industry-specific visual elements for {industry}",
|
||||
"Professional yet creative approach",
|
||||
"Balanced design suitable for business audience",
|
||||
"Industry-relevant imagery and color schemes"
|
||||
]
|
||||
|
||||
# Combine all enhancements
|
||||
enhanced_prompt = f"{prompt}\n\n"
|
||||
enhanced_prompt += "\n".join(linkedin_optimizations)
|
||||
enhanced_prompt += "\n" + "\n".join(style_enhancements)
|
||||
|
||||
logger.info(f"Enhanced prompt for LinkedIn: {enhanced_prompt[:200]}...")
|
||||
return enhanced_prompt
|
||||
|
||||
def _enhance_edit_prompt_for_linkedin(
|
||||
self,
|
||||
edit_prompt: str,
|
||||
content_context: Dict[str, Any]
|
||||
) -> str:
|
||||
"""
|
||||
Enhance edit prompt for LinkedIn optimization.
|
||||
|
||||
Args:
|
||||
edit_prompt: Original edit prompt
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Enhanced edit prompt
|
||||
"""
|
||||
industry = content_context.get('industry', 'business')
|
||||
|
||||
linkedin_edit_enhancements = [
|
||||
f"Maintain professional business aesthetic for {industry} industry",
|
||||
"Ensure mobile-optimized composition for LinkedIn feed",
|
||||
"Keep professional color scheme and typography",
|
||||
"Maintain brand consistency and visual hierarchy"
|
||||
]
|
||||
|
||||
enhanced_edit_prompt = f"{edit_prompt}\n\n"
|
||||
enhanced_edit_prompt += "\n".join(linkedin_edit_enhancements)
|
||||
|
||||
return enhanced_edit_prompt
|
||||
|
||||
async def _generate_with_gemini(self, prompt: str, aspect_ratio: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate image using unified image generation infrastructure.
|
||||
|
||||
Args:
|
||||
prompt: Enhanced prompt for image generation
|
||||
aspect_ratio: Desired aspect ratio
|
||||
|
||||
Returns:
|
||||
Generation result from image generation provider
|
||||
"""
|
||||
try:
|
||||
# Map aspect ratio to dimensions (LinkedIn-optimized)
|
||||
aspect_map = {
|
||||
"1:1": (1024, 1024),
|
||||
"16:9": (1920, 1080),
|
||||
"4:3": (1366, 1024),
|
||||
"9:16": (1080, 1920), # Portrait for stories
|
||||
}
|
||||
width, height = aspect_map.get(aspect_ratio, (1024, 1024))
|
||||
|
||||
# Use unified image generation system (defaults to provider based on GPT_PROVIDER)
|
||||
result = generate_image(
|
||||
prompt=prompt,
|
||||
options={
|
||||
"provider": "gemini", # LinkedIn uses Gemini by default
|
||||
"model": self.model if hasattr(self, 'model') else None,
|
||||
"width": width,
|
||||
"height": height,
|
||||
}
|
||||
)
|
||||
|
||||
if result and result.image_bytes:
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': result.image_bytes,
|
||||
'image_path': None, # No file path, using bytes directly
|
||||
'width': result.width,
|
||||
'height': result.height,
|
||||
'provider': result.provider,
|
||||
'model': result.model,
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Image generation returned no result'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in image generation: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image generation failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def _process_generated_image(
|
||||
self,
|
||||
image_data: bytes,
|
||||
content_context: Dict[str, Any],
|
||||
aspect_ratio: str
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Process and validate generated image for LinkedIn use.
|
||||
|
||||
Args:
|
||||
image_data: Raw image data
|
||||
content_context: LinkedIn content context
|
||||
aspect_ratio: Image aspect ratio
|
||||
|
||||
Returns:
|
||||
Processed image information
|
||||
"""
|
||||
try:
|
||||
# Open image for processing
|
||||
image = Image.open(BytesIO(image_data))
|
||||
|
||||
# Get image information
|
||||
width, height = image.size
|
||||
format_name = image.format or "PNG"
|
||||
|
||||
# Validate resolution
|
||||
if width < self.min_resolution[0] or height < self.min_resolution[1]:
|
||||
logger.warning(f"Generated image resolution {width}x{height} below minimum {self.min_resolution}")
|
||||
|
||||
# Validate file size
|
||||
image_size_mb = len(image_data) / (1024 * 1024)
|
||||
if image_size_mb > self.max_file_size_mb:
|
||||
logger.warning(f"Generated image size {image_size_mb:.2f}MB exceeds maximum {self.max_file_size_mb}MB")
|
||||
|
||||
# LinkedIn-specific optimizations
|
||||
optimized_image = self._optimize_for_linkedin(image, content_context)
|
||||
|
||||
# Convert back to bytes
|
||||
output_buffer = BytesIO()
|
||||
optimized_image.save(output_buffer, format=format_name, optimize=True)
|
||||
optimized_data = output_buffer.getvalue()
|
||||
|
||||
return {
|
||||
'image_data': optimized_data,
|
||||
'format': format_name,
|
||||
'size': len(optimized_data),
|
||||
'resolution': (width, height),
|
||||
'aspect_ratio': f"{width}:{height}"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing generated image: {str(e)}")
|
||||
# Return original image data if processing fails
|
||||
return {
|
||||
'image_data': image_data,
|
||||
'format': 'PNG',
|
||||
'size': len(image_data),
|
||||
'resolution': (1024, 1024),
|
||||
'aspect_ratio': aspect_ratio
|
||||
}
|
||||
|
||||
def _optimize_for_linkedin(self, image: Image.Image, content_context: Dict[str, Any]) -> Image.Image:
|
||||
"""
|
||||
Optimize image specifically for LinkedIn display.
|
||||
|
||||
Args:
|
||||
image: PIL Image object
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Optimized image
|
||||
"""
|
||||
try:
|
||||
# Ensure minimum resolution
|
||||
width, height = image.size
|
||||
if width < self.min_resolution[0] or height < self.min_resolution[1]:
|
||||
# Resize to minimum resolution while maintaining aspect ratio
|
||||
ratio = max(self.min_resolution[0] / width, self.min_resolution[1] / height)
|
||||
new_width = int(width * ratio)
|
||||
new_height = int(height * ratio)
|
||||
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
||||
logger.info(f"Resized image to {new_width}x{new_height} for LinkedIn optimization")
|
||||
|
||||
# Convert to RGB if necessary (for JPEG compatibility)
|
||||
if image.mode in ('RGBA', 'LA', 'P'):
|
||||
# Create white background for transparent images
|
||||
background = Image.new('RGB', image.size, (255, 255, 255))
|
||||
if image.mode == 'P':
|
||||
image = image.convert('RGBA')
|
||||
background.paste(image, mask=image.split()[-1] if image.mode == 'RGBA' else None)
|
||||
image = background
|
||||
|
||||
return image
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error optimizing image for LinkedIn: {str(e)}")
|
||||
return image # Return original if optimization fails
|
||||
|
||||
async def validate_image_for_linkedin(self, image_data: bytes) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate image for LinkedIn compliance and quality standards.
|
||||
|
||||
Args:
|
||||
image_data: Image data to validate
|
||||
|
||||
Returns:
|
||||
Validation results
|
||||
"""
|
||||
try:
|
||||
image = Image.open(BytesIO(image_data))
|
||||
width, height = image.size
|
||||
|
||||
validation_results = {
|
||||
'resolution_ok': width >= self.min_resolution[0] and height >= self.min_resolution[1],
|
||||
'aspect_ratio_suitable': self._is_aspect_ratio_suitable(width, height),
|
||||
'file_size_ok': len(image_data) <= self.max_file_size_mb * 1024 * 1024,
|
||||
'format_supported': image.format in self.supported_formats,
|
||||
'professional_aesthetic': True, # Placeholder for future AI-based validation
|
||||
'overall_score': 0
|
||||
}
|
||||
|
||||
# Calculate overall score
|
||||
score = 0
|
||||
if validation_results['resolution_ok']: score += 25
|
||||
if validation_results['aspect_ratio_suitable']: score += 25
|
||||
if validation_results['file_size_ok']: score += 20
|
||||
if validation_results['format_supported']: score += 20
|
||||
if validation_results['professional_aesthetic']: score += 10
|
||||
|
||||
validation_results['overall_score'] = score
|
||||
|
||||
return validation_results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error validating image: {str(e)}")
|
||||
return {
|
||||
'resolution_ok': False,
|
||||
'aspect_ratio_suitable': False,
|
||||
'file_size_ok': False,
|
||||
'format_supported': False,
|
||||
'professional_aesthetic': False,
|
||||
'overall_score': 0,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
def _is_aspect_ratio_suitable(self, width: int, height: int) -> bool:
|
||||
"""
|
||||
Check if image aspect ratio is suitable for LinkedIn.
|
||||
|
||||
Args:
|
||||
width: Image width
|
||||
height: Image height
|
||||
|
||||
Returns:
|
||||
True if aspect ratio is suitable for LinkedIn
|
||||
"""
|
||||
ratio = width / height
|
||||
|
||||
# LinkedIn-optimized aspect ratios
|
||||
suitable_ratios = [
|
||||
(0.9, 1.1), # 1:1 (square)
|
||||
(1.6, 1.8), # 16:9 (landscape)
|
||||
(0.7, 0.8), # 4:3 (portrait)
|
||||
(1.2, 1.4), # 5:4 (landscape)
|
||||
]
|
||||
|
||||
for min_ratio, max_ratio in suitable_ratios:
|
||||
if min_ratio <= ratio <= max_ratio:
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -0,0 +1,536 @@
|
||||
"""
|
||||
LinkedIn Image Storage Service
|
||||
|
||||
This service handles image storage, retrieval, and management for LinkedIn image generation.
|
||||
It provides secure storage, efficient retrieval, and metadata management for generated images.
|
||||
"""
|
||||
|
||||
import os
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Dict, Any, Optional, List, Tuple
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
from loguru import logger
|
||||
|
||||
# Import existing infrastructure
|
||||
from ...onboarding.api_key_manager import APIKeyManager
|
||||
|
||||
|
||||
class LinkedInImageStorage:
|
||||
"""
|
||||
Handles storage and management of LinkedIn generated images.
|
||||
|
||||
This service provides secure storage, efficient retrieval, metadata management,
|
||||
and cleanup functionality for LinkedIn image generation.
|
||||
"""
|
||||
|
||||
def __init__(self, storage_path: Optional[str] = None, api_key_manager: Optional[APIKeyManager] = None):
|
||||
"""
|
||||
Initialize the LinkedIn Image Storage service.
|
||||
|
||||
Args:
|
||||
storage_path: Base path for image storage
|
||||
api_key_manager: API key manager for authentication
|
||||
"""
|
||||
self.api_key_manager = api_key_manager or APIKeyManager()
|
||||
|
||||
# Set up storage paths
|
||||
if storage_path:
|
||||
self.base_storage_path = Path(storage_path)
|
||||
else:
|
||||
# Default to project-relative path
|
||||
self.base_storage_path = Path(__file__).parent.parent.parent.parent / "linkedin_images"
|
||||
|
||||
# Create storage directories
|
||||
self.images_path = self.base_storage_path / "images"
|
||||
self.metadata_path = self.base_storage_path / "metadata"
|
||||
self.temp_path = self.base_storage_path / "temp"
|
||||
|
||||
# Ensure directories exist
|
||||
self._create_storage_directories()
|
||||
|
||||
# Storage configuration
|
||||
self.max_storage_size_gb = 10 # Maximum storage size in GB
|
||||
self.image_retention_days = 30 # Days to keep images
|
||||
self.max_image_size_mb = 10 # Maximum individual image size in MB
|
||||
|
||||
logger.info(f"LinkedIn Image Storage initialized at {self.base_storage_path}")
|
||||
|
||||
def _create_storage_directories(self):
|
||||
"""Create necessary storage directories."""
|
||||
try:
|
||||
self.images_path.mkdir(parents=True, exist_ok=True)
|
||||
self.metadata_path.mkdir(parents=True, exist_ok=True)
|
||||
self.temp_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create subdirectories for organization
|
||||
(self.images_path / "posts").mkdir(exist_ok=True)
|
||||
(self.images_path / "articles").mkdir(exist_ok=True)
|
||||
(self.images_path / "carousels").mkdir(exist_ok=True)
|
||||
(self.images_path / "video_scripts").mkdir(exist_ok=True)
|
||||
|
||||
logger.info("Storage directories created successfully")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating storage directories: {str(e)}")
|
||||
raise
|
||||
|
||||
async def store_image(
|
||||
self,
|
||||
image_data: bytes,
|
||||
metadata: Dict[str, Any],
|
||||
content_type: str = "post"
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Store generated image with metadata.
|
||||
|
||||
Args:
|
||||
image_data: Image data in bytes
|
||||
image_metadata: Image metadata and context
|
||||
content_type: Type of LinkedIn content (post, article, carousel, video_script)
|
||||
|
||||
Returns:
|
||||
Dict containing storage result and image ID
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
|
||||
# Generate unique image ID
|
||||
image_id = self._generate_image_id(image_data, metadata)
|
||||
|
||||
# Validate image data
|
||||
validation_result = await self._validate_image_for_storage(image_data)
|
||||
if not validation_result['valid']:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image validation failed: {validation_result['error']}"
|
||||
}
|
||||
|
||||
# Determine storage path based on content type
|
||||
storage_path = self._get_storage_path(content_type, image_id)
|
||||
|
||||
# Store image file
|
||||
image_stored = await self._store_image_file(image_data, storage_path)
|
||||
if not image_stored:
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Failed to store image file'
|
||||
}
|
||||
|
||||
# Store metadata
|
||||
metadata_stored = await self._store_metadata(image_id, metadata, storage_path)
|
||||
if not metadata_stored:
|
||||
# Clean up image file if metadata storage fails
|
||||
await self._cleanup_failed_storage(storage_path)
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Failed to store image metadata'
|
||||
}
|
||||
|
||||
# Update storage statistics
|
||||
await self._update_storage_stats()
|
||||
|
||||
storage_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_id': image_id,
|
||||
'storage_path': str(storage_path),
|
||||
'metadata': {
|
||||
'stored_at': datetime.now().isoformat(),
|
||||
'storage_time': storage_time,
|
||||
'file_size': len(image_data),
|
||||
'content_type': content_type
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing LinkedIn image: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image storage failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def retrieve_image(self, image_id: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Retrieve stored image by ID.
|
||||
|
||||
Args:
|
||||
image_id: Unique image identifier
|
||||
|
||||
Returns:
|
||||
Dict containing image data and metadata
|
||||
"""
|
||||
try:
|
||||
# Find image file
|
||||
image_path = await self._find_image_by_id(image_id)
|
||||
if not image_path:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f'Image not found: {image_id}'
|
||||
}
|
||||
|
||||
# Load metadata
|
||||
metadata = await self._load_metadata(image_id)
|
||||
if not metadata:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f'Metadata not found for image: {image_id}'
|
||||
}
|
||||
|
||||
# Read image data
|
||||
with open(image_path, 'rb') as f:
|
||||
image_data = f.read()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': image_data,
|
||||
'metadata': metadata,
|
||||
'image_path': str(image_path)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrieving LinkedIn image {image_id}: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image retrieval failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def delete_image(self, image_id: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Delete stored image and metadata.
|
||||
|
||||
Args:
|
||||
image_id: Unique image identifier
|
||||
|
||||
Returns:
|
||||
Dict containing deletion result
|
||||
"""
|
||||
try:
|
||||
# Find image file
|
||||
image_path = await self._find_image_by_id(image_id)
|
||||
if not image_path:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f'Image not found: {image_id}'
|
||||
}
|
||||
|
||||
# Delete image file
|
||||
if image_path.exists():
|
||||
image_path.unlink()
|
||||
logger.info(f"Deleted image file: {image_path}")
|
||||
|
||||
# Delete metadata
|
||||
metadata_path = self.metadata_path / f"{image_id}.json"
|
||||
if metadata_path.exists():
|
||||
metadata_path.unlink()
|
||||
logger.info(f"Deleted metadata file: {metadata_path}")
|
||||
|
||||
# Update storage statistics
|
||||
await self._update_storage_stats()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'message': f'Image {image_id} deleted successfully'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting LinkedIn image {image_id}: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image deletion failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def list_images(
|
||||
self,
|
||||
content_type: Optional[str] = None,
|
||||
limit: int = 50,
|
||||
offset: int = 0
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
List stored images with optional filtering.
|
||||
|
||||
Args:
|
||||
content_type: Filter by content type
|
||||
limit: Maximum number of images to return
|
||||
offset: Number of images to skip
|
||||
|
||||
Returns:
|
||||
Dict containing list of images and metadata
|
||||
"""
|
||||
try:
|
||||
images = []
|
||||
|
||||
# Scan metadata directory
|
||||
metadata_files = list(self.metadata_path.glob("*.json"))
|
||||
|
||||
for metadata_file in metadata_files[offset:offset + limit]:
|
||||
try:
|
||||
with open(metadata_file, 'r') as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Apply content type filter
|
||||
if content_type and metadata.get('content_type') != content_type:
|
||||
continue
|
||||
|
||||
# Check if image file still exists
|
||||
image_id = metadata_file.stem
|
||||
image_path = await self._find_image_by_id(image_id)
|
||||
|
||||
if image_path and image_path.exists():
|
||||
# Add file size and last modified info
|
||||
stat = image_path.stat()
|
||||
metadata['file_size'] = stat.st_size
|
||||
metadata['last_modified'] = datetime.fromtimestamp(stat.st_mtime).isoformat()
|
||||
|
||||
images.append(metadata)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error reading metadata file {metadata_file}: {str(e)}")
|
||||
continue
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'images': images,
|
||||
'total_count': len(images),
|
||||
'limit': limit,
|
||||
'offset': offset
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing LinkedIn images: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image listing failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def cleanup_old_images(self, days_old: Optional[int] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Clean up old images based on retention policy.
|
||||
|
||||
Args:
|
||||
days_old: Minimum age in days for cleanup (defaults to retention policy)
|
||||
|
||||
Returns:
|
||||
Dict containing cleanup results
|
||||
"""
|
||||
try:
|
||||
if days_old is None:
|
||||
days_old = self.image_retention_days
|
||||
|
||||
cutoff_date = datetime.now() - timedelta(days=days_old)
|
||||
deleted_count = 0
|
||||
errors = []
|
||||
|
||||
# Scan metadata directory
|
||||
metadata_files = list(self.metadata_path.glob("*.json"))
|
||||
|
||||
for metadata_file in metadata_files:
|
||||
try:
|
||||
with open(metadata_file, 'r') as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Check creation date
|
||||
created_at = metadata.get('stored_at')
|
||||
if created_at:
|
||||
created_date = datetime.fromisoformat(created_at)
|
||||
if created_date < cutoff_date:
|
||||
# Delete old image
|
||||
image_id = metadata_file.stem
|
||||
delete_result = await self.delete_image(image_id)
|
||||
|
||||
if delete_result['success']:
|
||||
deleted_count += 1
|
||||
else:
|
||||
errors.append(f"Failed to delete {image_id}: {delete_result['error']}")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error processing metadata file {metadata_file}: {str(e)}")
|
||||
continue
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'deleted_count': deleted_count,
|
||||
'errors': errors,
|
||||
'cutoff_date': cutoff_date.isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up old LinkedIn images: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Cleanup failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def get_storage_stats(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get storage statistics and usage information.
|
||||
|
||||
Returns:
|
||||
Dict containing storage statistics
|
||||
"""
|
||||
try:
|
||||
total_size = 0
|
||||
total_files = 0
|
||||
content_type_counts = {}
|
||||
|
||||
# Calculate storage usage
|
||||
for content_type_dir in self.images_path.iterdir():
|
||||
if content_type_dir.is_dir():
|
||||
content_type = content_type_dir.name
|
||||
content_type_counts[content_type] = 0
|
||||
|
||||
for image_file in content_type_dir.glob("*"):
|
||||
if image_file.is_file():
|
||||
total_size += image_file.stat().st_size
|
||||
total_files += 1
|
||||
content_type_counts[content_type] += 1
|
||||
|
||||
# Check storage limits
|
||||
total_size_gb = total_size / (1024 ** 3)
|
||||
storage_limit_exceeded = total_size_gb > self.max_storage_size_gb
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'total_size_bytes': total_size,
|
||||
'total_size_gb': round(total_size_gb, 2),
|
||||
'total_files': total_files,
|
||||
'content_type_counts': content_type_counts,
|
||||
'storage_limit_gb': self.max_storage_size_gb,
|
||||
'storage_limit_exceeded': storage_limit_exceeded,
|
||||
'retention_days': self.image_retention_days
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting storage stats: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Failed to get storage stats: {str(e)}"
|
||||
}
|
||||
|
||||
def _generate_image_id(self, image_data: bytes, metadata: Dict[str, Any]) -> str:
|
||||
"""Generate unique image ID based on content and metadata."""
|
||||
# Create hash from image data and key metadata
|
||||
hash_input = f"{image_data[:1000]}{metadata.get('topic', '')}{metadata.get('industry', '')}{datetime.now().isoformat()}"
|
||||
return hashlib.sha256(hash_input.encode()).hexdigest()[:16]
|
||||
|
||||
async def _validate_image_for_storage(self, image_data: bytes) -> Dict[str, Any]:
|
||||
"""Validate image data before storage."""
|
||||
try:
|
||||
# Check file size
|
||||
if len(image_data) > self.max_image_size_mb * 1024 * 1024:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Image size {len(image_data) / (1024*1024):.2f}MB exceeds maximum {self.max_image_size_mb}MB'
|
||||
}
|
||||
|
||||
# Validate image format
|
||||
try:
|
||||
image = Image.open(BytesIO(image_data))
|
||||
if image.format not in ['PNG', 'JPEG', 'JPG']:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Unsupported image format: {image.format}'
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Invalid image data: {str(e)}'
|
||||
}
|
||||
|
||||
return {'valid': True}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Validation error: {str(e)}'
|
||||
}
|
||||
|
||||
def _get_storage_path(self, content_type: str, image_id: str) -> Path:
|
||||
"""Get storage path for image based on content type."""
|
||||
# Map content types to directory names
|
||||
content_type_map = {
|
||||
'post': 'posts',
|
||||
'article': 'articles',
|
||||
'carousel': 'carousels',
|
||||
'video_script': 'video_scripts'
|
||||
}
|
||||
|
||||
directory = content_type_map.get(content_type, 'posts')
|
||||
return self.images_path / directory / f"{image_id}.png"
|
||||
|
||||
async def _store_image_file(self, image_data: bytes, storage_path: Path) -> bool:
|
||||
"""Store image file to disk."""
|
||||
try:
|
||||
# Ensure directory exists
|
||||
storage_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write image data
|
||||
with open(storage_path, 'wb') as f:
|
||||
f.write(image_data)
|
||||
|
||||
logger.info(f"Stored image file: {storage_path}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing image file: {str(e)}")
|
||||
return False
|
||||
|
||||
async def _store_metadata(self, image_id: str, metadata: Dict[str, Any], storage_path: Path) -> bool:
|
||||
"""Store image metadata to JSON file."""
|
||||
try:
|
||||
# Add storage metadata
|
||||
metadata['image_id'] = image_id
|
||||
metadata['storage_path'] = str(storage_path)
|
||||
metadata['stored_at'] = datetime.now().isoformat()
|
||||
|
||||
# Write metadata file
|
||||
metadata_path = self.metadata_path / f"{image_id}.json"
|
||||
with open(metadata_path, 'w') as f:
|
||||
json.dump(metadata, f, indent=2, default=str)
|
||||
|
||||
logger.info(f"Stored metadata: {metadata_path}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing metadata: {str(e)}")
|
||||
return False
|
||||
|
||||
async def _find_image_by_id(self, image_id: str) -> Optional[Path]:
|
||||
"""Find image file by ID across all content type directories."""
|
||||
for content_dir in self.images_path.iterdir():
|
||||
if content_dir.is_dir():
|
||||
image_path = content_dir / f"{image_id}.png"
|
||||
if image_path.exists():
|
||||
return image_path
|
||||
|
||||
return None
|
||||
|
||||
async def _load_metadata(self, image_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load metadata for image ID."""
|
||||
try:
|
||||
metadata_path = self.metadata_path / f"{image_id}.json"
|
||||
if metadata_path.exists():
|
||||
with open(metadata_path, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading metadata for {image_id}: {str(e)}")
|
||||
|
||||
return None
|
||||
|
||||
async def _cleanup_failed_storage(self, storage_path: Path):
|
||||
"""Clean up files if storage operation fails."""
|
||||
try:
|
||||
if storage_path.exists():
|
||||
storage_path.unlink()
|
||||
logger.info(f"Cleaned up failed storage: {storage_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up failed storage: {str(e)}")
|
||||
|
||||
async def _update_storage_stats(self):
|
||||
"""Update storage statistics (placeholder for future implementation)."""
|
||||
# This could be implemented to track storage usage over time
|
||||
pass
|
||||
Reference in New Issue
Block a user