Base code
This commit is contained in:
53
backend/services/linkedin/__init__.py
Normal file
53
backend/services/linkedin/__init__.py
Normal file
@@ -0,0 +1,53 @@
|
||||
"""
|
||||
LinkedIn Services Package
|
||||
|
||||
This package provides comprehensive LinkedIn content generation and management services
|
||||
including content generation, image generation, and various LinkedIn-specific utilities.
|
||||
"""
|
||||
|
||||
# Import existing services
|
||||
from .content_generator import ContentGenerator
|
||||
from .content_generator_prompts import (
|
||||
PostPromptBuilder,
|
||||
ArticlePromptBuilder,
|
||||
CarouselPromptBuilder,
|
||||
VideoScriptPromptBuilder,
|
||||
CommentResponsePromptBuilder,
|
||||
CarouselGenerator,
|
||||
VideoScriptGenerator
|
||||
)
|
||||
|
||||
# Import new image generation services
|
||||
from .image_generation import (
|
||||
LinkedInImageGenerator,
|
||||
LinkedInImageEditor,
|
||||
LinkedInImageStorage
|
||||
)
|
||||
from .image_prompts import LinkedInPromptGenerator
|
||||
|
||||
__all__ = [
|
||||
# Content Generation
|
||||
'ContentGenerator',
|
||||
|
||||
# Prompt Builders
|
||||
'PostPromptBuilder',
|
||||
'ArticlePromptBuilder',
|
||||
'CarouselPromptBuilder',
|
||||
'VideoScriptPromptBuilder',
|
||||
'CommentResponsePromptBuilder',
|
||||
|
||||
# Specialized Generators
|
||||
'CarouselGenerator',
|
||||
'VideoScriptGenerator',
|
||||
|
||||
# Image Generation Services
|
||||
'LinkedInImageGenerator',
|
||||
'LinkedInImageEditor',
|
||||
'LinkedInImageStorage',
|
||||
'LinkedInPromptGenerator'
|
||||
]
|
||||
|
||||
# Version information
|
||||
__version__ = "2.0.0"
|
||||
__author__ = "Alwrity Team"
|
||||
__description__ = "LinkedIn Content and Image Generation Services"
|
||||
593
backend/services/linkedin/content_generator.py
Normal file
593
backend/services/linkedin/content_generator.py
Normal file
@@ -0,0 +1,593 @@
|
||||
"""
|
||||
Content Generator for LinkedIn Content Generation
|
||||
|
||||
Handles the main content generation logic for posts and articles.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
from loguru import logger
|
||||
from models.linkedin_models import (
|
||||
LinkedInPostRequest, LinkedInArticleRequest, LinkedInPostResponse, LinkedInArticleResponse,
|
||||
PostContent, ArticleContent, GroundingLevel, ResearchSource
|
||||
)
|
||||
from services.linkedin.quality_handler import QualityHandler
|
||||
from services.linkedin.content_generator_prompts import (
|
||||
PostPromptBuilder,
|
||||
ArticlePromptBuilder,
|
||||
CarouselPromptBuilder,
|
||||
VideoScriptPromptBuilder,
|
||||
CommentResponsePromptBuilder,
|
||||
CarouselGenerator,
|
||||
VideoScriptGenerator
|
||||
)
|
||||
from services.persona_analysis_service import PersonaAnalysisService
|
||||
import time
|
||||
|
||||
|
||||
class ContentGenerator:
|
||||
"""Handles content generation for all LinkedIn content types."""
|
||||
|
||||
def __init__(self, citation_manager=None, quality_analyzer=None, gemini_grounded=None, fallback_provider=None):
|
||||
self.citation_manager = citation_manager
|
||||
self.quality_analyzer = quality_analyzer
|
||||
self.gemini_grounded = gemini_grounded
|
||||
self.fallback_provider = fallback_provider
|
||||
|
||||
# Persona caching
|
||||
self._persona_cache: Dict[str, Dict[str, Any]] = {}
|
||||
self._cache_timestamps: Dict[str, float] = {}
|
||||
self._cache_duration = 300 # 5 minutes cache duration
|
||||
|
||||
# Initialize specialized generators
|
||||
self.carousel_generator = CarouselGenerator(citation_manager, quality_analyzer)
|
||||
self.video_script_generator = VideoScriptGenerator(citation_manager, quality_analyzer)
|
||||
|
||||
def _get_cached_persona_data(self, user_id: int, platform: str) -> Optional[Dict[str, Any]]:
|
||||
"""
|
||||
Get persona data with caching for LinkedIn platform.
|
||||
|
||||
Args:
|
||||
user_id: User ID to get persona for
|
||||
platform: Platform type (linkedin)
|
||||
|
||||
Returns:
|
||||
Persona data or None if not available
|
||||
"""
|
||||
cache_key = f"{platform}_persona_{user_id}"
|
||||
current_time = time.time()
|
||||
|
||||
# Check cache first
|
||||
if cache_key in self._persona_cache and cache_key in self._cache_timestamps:
|
||||
cache_age = current_time - self._cache_timestamps[cache_key]
|
||||
if cache_age < self._cache_duration:
|
||||
logger.debug(f"Using cached persona data for user {user_id} (age: {cache_age:.1f}s)")
|
||||
return self._persona_cache[cache_key]
|
||||
else:
|
||||
# Cache expired, remove it
|
||||
logger.debug(f"Cache expired for user {user_id}, refreshing...")
|
||||
del self._persona_cache[cache_key]
|
||||
del self._cache_timestamps[cache_key]
|
||||
|
||||
# Fetch fresh data
|
||||
try:
|
||||
persona_service = PersonaAnalysisService()
|
||||
persona_data = persona_service.get_persona_for_platform(user_id, platform)
|
||||
|
||||
# Cache the result
|
||||
if persona_data:
|
||||
self._persona_cache[cache_key] = persona_data
|
||||
self._cache_timestamps[cache_key] = current_time
|
||||
logger.debug(f"Cached persona data for user {user_id}")
|
||||
|
||||
return persona_data
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not load persona data for {platform} content generation: {e}")
|
||||
return None
|
||||
|
||||
def _clear_persona_cache(self, user_id: int = None):
|
||||
"""
|
||||
Clear persona cache for a specific user or all users.
|
||||
|
||||
Args:
|
||||
user_id: User ID to clear cache for, or None to clear all
|
||||
"""
|
||||
if user_id is None:
|
||||
self._persona_cache.clear()
|
||||
self._cache_timestamps.clear()
|
||||
logger.info("Cleared all persona cache")
|
||||
else:
|
||||
# Clear cache for all platforms for this user
|
||||
keys_to_remove = [key for key in self._persona_cache.keys() if key.endswith(f"_{user_id}")]
|
||||
for key in keys_to_remove:
|
||||
del self._persona_cache[key]
|
||||
del self._cache_timestamps[key]
|
||||
logger.info(f"Cleared persona cache for user {user_id}")
|
||||
|
||||
def _transform_gemini_sources(self, gemini_sources):
|
||||
"""Transform Gemini sources to ResearchSource format."""
|
||||
transformed_sources = []
|
||||
for source in gemini_sources:
|
||||
transformed_source = ResearchSource(
|
||||
title=source.get('title', 'Unknown Source'),
|
||||
url=source.get('url', ''),
|
||||
content=f"Source from {source.get('title', 'Unknown')}",
|
||||
relevance_score=0.8, # Default relevance score
|
||||
credibility_score=0.7, # Default credibility score
|
||||
domain_authority=0.6, # Default domain authority
|
||||
source_type=source.get('type', 'web'),
|
||||
publication_date=datetime.now().strftime('%Y-%m-%d')
|
||||
)
|
||||
transformed_sources.append(transformed_source)
|
||||
return transformed_sources
|
||||
|
||||
async def generate_post(
|
||||
self,
|
||||
request: LinkedInPostRequest,
|
||||
research_sources: List,
|
||||
research_time: float,
|
||||
content_result: Dict[str, Any],
|
||||
grounding_enabled: bool
|
||||
) -> LinkedInPostResponse:
|
||||
"""Generate LinkedIn post with all processing steps."""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
|
||||
# Debug: Log what we received
|
||||
logger.info(f"ContentGenerator.generate_post called with:")
|
||||
logger.info(f" - research_sources count: {len(research_sources) if research_sources else 0}")
|
||||
logger.info(f" - research_sources type: {type(research_sources)}")
|
||||
logger.info(f" - content_result keys: {list(content_result.keys()) if content_result else 'None'}")
|
||||
logger.info(f" - grounding_enabled: {grounding_enabled}")
|
||||
logger.info(f" - include_citations: {request.include_citations}")
|
||||
|
||||
# Debug: Log content_result details
|
||||
if content_result:
|
||||
logger.info(f" - content_result has citations: {'citations' in content_result}")
|
||||
logger.info(f" - content_result has sources: {'sources' in content_result}")
|
||||
if 'citations' in content_result:
|
||||
logger.info(f" - citations count: {len(content_result['citations']) if content_result['citations'] else 0}")
|
||||
if 'sources' in content_result:
|
||||
logger.info(f" - sources count: {len(content_result['sources']) if content_result['sources'] else 0}")
|
||||
|
||||
if research_sources:
|
||||
logger.info(f" - First research source: {research_sources[0] if research_sources else 'None'}")
|
||||
logger.info(f" - Research sources types: {[type(s) for s in research_sources[:3]]}")
|
||||
|
||||
# Step 3: Add citations if requested - POST METHOD
|
||||
citations = []
|
||||
source_list = None
|
||||
final_research_sources = research_sources # Default to passed research_sources
|
||||
|
||||
# Use sources and citations from content_result if available (from Gemini grounding)
|
||||
if content_result.get('citations') and content_result.get('sources'):
|
||||
logger.info(f"Using citations and sources from Gemini grounding: {len(content_result['citations'])} citations, {len(content_result['sources'])} sources")
|
||||
citations = content_result['citations']
|
||||
# Transform Gemini sources to ResearchSource format
|
||||
gemini_sources = self._transform_gemini_sources(content_result['sources'])
|
||||
source_list = self.citation_manager.generate_source_list(gemini_sources) if self.citation_manager else None
|
||||
# Use transformed sources for the response
|
||||
final_research_sources = gemini_sources
|
||||
elif request.include_citations and research_sources and self.citation_manager:
|
||||
try:
|
||||
logger.info(f"Processing citations for content length: {len(content_result['content'])}")
|
||||
citations = self.citation_manager.extract_citations(content_result['content'])
|
||||
logger.info(f"Extracted {len(citations)} citations from content")
|
||||
source_list = self.citation_manager.generate_source_list(research_sources)
|
||||
logger.info(f"Generated source list: {source_list[:200] if source_list else 'None'}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Citation processing failed: {e}")
|
||||
else:
|
||||
logger.info(f"Citation processing skipped: include_citations={request.include_citations}, research_sources={len(research_sources) if research_sources else 0}, citation_manager={self.citation_manager is not None}")
|
||||
|
||||
# Step 4: Analyze content quality
|
||||
quality_metrics = None
|
||||
if grounding_enabled and self.quality_analyzer:
|
||||
try:
|
||||
quality_handler = QualityHandler(self.quality_analyzer)
|
||||
quality_metrics = quality_handler.create_quality_metrics(
|
||||
content=content_result['content'],
|
||||
sources=final_research_sources, # Use final_research_sources
|
||||
industry=request.industry,
|
||||
grounding_enabled=grounding_enabled
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Quality analysis failed: {e}")
|
||||
|
||||
# Step 5: Build response
|
||||
post_content = PostContent(
|
||||
content=content_result['content'],
|
||||
character_count=len(content_result['content']),
|
||||
hashtags=content_result.get('hashtags', []),
|
||||
call_to_action=content_result.get('call_to_action'),
|
||||
engagement_prediction=content_result.get('engagement_prediction'),
|
||||
citations=citations,
|
||||
source_list=source_list,
|
||||
quality_metrics=quality_metrics,
|
||||
grounding_enabled=grounding_enabled,
|
||||
search_queries=content_result.get('search_queries', [])
|
||||
)
|
||||
|
||||
generation_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
# Build grounding status
|
||||
grounding_status = {
|
||||
'status': 'success' if grounding_enabled else 'disabled',
|
||||
'sources_used': len(final_research_sources), # Use final_research_sources
|
||||
'citation_coverage': len(citations) / max(len(final_research_sources), 1) if final_research_sources else 0,
|
||||
'quality_score': quality_metrics.overall_score if quality_metrics else 0.0
|
||||
} if grounding_enabled else None
|
||||
|
||||
return LinkedInPostResponse(
|
||||
success=True,
|
||||
data=post_content,
|
||||
research_sources=final_research_sources, # Use final_research_sources
|
||||
generation_metadata={
|
||||
'model_used': 'gemini-2.0-flash-001',
|
||||
'generation_time': generation_time,
|
||||
'research_time': research_time,
|
||||
'grounding_enabled': grounding_enabled
|
||||
},
|
||||
grounding_status=grounding_status
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating LinkedIn post: {str(e)}")
|
||||
return LinkedInPostResponse(
|
||||
success=False,
|
||||
error=f"Failed to generate LinkedIn post: {str(e)}"
|
||||
)
|
||||
|
||||
async def generate_article(
|
||||
self,
|
||||
request: LinkedInArticleRequest,
|
||||
research_sources: List,
|
||||
research_time: float,
|
||||
content_result: Dict[str, Any],
|
||||
grounding_enabled: bool
|
||||
) -> LinkedInArticleResponse:
|
||||
"""Generate LinkedIn article with all processing steps."""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
|
||||
# Step 3: Add citations if requested - ARTICLE METHOD
|
||||
citations = []
|
||||
source_list = None
|
||||
final_research_sources = research_sources # Default to passed research_sources
|
||||
|
||||
# Use sources and citations from content_result if available (from Gemini grounding)
|
||||
if content_result.get('citations') and content_result.get('sources'):
|
||||
logger.info(f"Using citations and sources from Gemini grounding: {len(content_result['citations'])} citations, {len(content_result['sources'])} sources")
|
||||
citations = content_result['citations']
|
||||
# Transform Gemini sources to ResearchSource format
|
||||
gemini_sources = self._transform_gemini_sources(content_result['sources'])
|
||||
source_list = self.citation_manager.generate_source_list(gemini_sources) if self.citation_manager else None
|
||||
# Use transformed sources for the response
|
||||
final_research_sources = gemini_sources
|
||||
elif request.include_citations and research_sources and self.citation_manager:
|
||||
try:
|
||||
citations = self.citation_manager.extract_citations(content_result['content'])
|
||||
source_list = self.citation_manager.generate_source_list(research_sources)
|
||||
except Exception as e:
|
||||
logger.warning(f"Citation processing failed: {e}")
|
||||
|
||||
# Step 4: Analyze content quality
|
||||
quality_metrics = None
|
||||
if grounding_enabled and self.quality_analyzer:
|
||||
try:
|
||||
quality_handler = QualityHandler(self.quality_analyzer)
|
||||
quality_metrics = quality_handler.create_quality_metrics(
|
||||
content=content_result['content'],
|
||||
sources=final_research_sources, # Use final_research_sources
|
||||
industry=request.industry,
|
||||
grounding_enabled=grounding_enabled
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Quality analysis failed: {e}")
|
||||
|
||||
# Step 5: Build response
|
||||
article_content = ArticleContent(
|
||||
title=content_result['title'],
|
||||
content=content_result['content'],
|
||||
word_count=len(content_result['content'].split()),
|
||||
sections=content_result.get('sections', []),
|
||||
seo_metadata=content_result.get('seo_metadata'),
|
||||
image_suggestions=content_result.get('image_suggestions', []),
|
||||
reading_time=content_result.get('reading_time'),
|
||||
citations=citations,
|
||||
source_list=source_list,
|
||||
quality_metrics=quality_metrics,
|
||||
grounding_enabled=grounding_enabled,
|
||||
search_queries=content_result.get('search_queries', [])
|
||||
)
|
||||
|
||||
generation_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
# Build grounding status
|
||||
grounding_status = {
|
||||
'status': 'success' if grounding_enabled else 'disabled',
|
||||
'sources_used': len(final_research_sources), # Use final_research_sources
|
||||
'citation_coverage': len(citations) / max(len(final_research_sources), 1) if final_research_sources else 0,
|
||||
'quality_score': quality_metrics.overall_score if quality_metrics else 0.0
|
||||
} if grounding_enabled else None
|
||||
|
||||
return LinkedInArticleResponse(
|
||||
success=True,
|
||||
data=article_content,
|
||||
research_sources=final_research_sources, # Use final_research_sources
|
||||
generation_metadata={
|
||||
'model_used': 'gemini-2.0-flash-001',
|
||||
'generation_time': generation_time,
|
||||
'research_time': research_time,
|
||||
'grounding_enabled': grounding_enabled
|
||||
},
|
||||
grounding_status=grounding_status
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating LinkedIn article: {str(e)}")
|
||||
return LinkedInArticleResponse(
|
||||
success=False,
|
||||
error=f"Failed to generate LinkedIn article: {str(e)}"
|
||||
)
|
||||
|
||||
async def generate_carousel(
|
||||
self,
|
||||
request,
|
||||
research_sources: List,
|
||||
research_time: float,
|
||||
content_result: Dict[str, Any],
|
||||
grounding_enabled: bool
|
||||
):
|
||||
"""Generate LinkedIn carousel using the specialized CarouselGenerator."""
|
||||
return await self.carousel_generator.generate_carousel(
|
||||
request, research_sources, research_time, content_result, grounding_enabled
|
||||
)
|
||||
|
||||
async def generate_video_script(
|
||||
self,
|
||||
request,
|
||||
research_sources: List,
|
||||
research_time: float,
|
||||
content_result: Dict[str, Any],
|
||||
grounding_enabled: bool
|
||||
):
|
||||
"""Generate LinkedIn video script using the specialized VideoScriptGenerator."""
|
||||
return await self.video_script_generator.generate_video_script(
|
||||
request, research_sources, research_time, content_result, grounding_enabled
|
||||
)
|
||||
|
||||
async def generate_comment_response(
|
||||
self,
|
||||
request,
|
||||
research_sources: List,
|
||||
research_time: float,
|
||||
content_result: Dict[str, Any],
|
||||
grounding_enabled: bool
|
||||
):
|
||||
"""Generate LinkedIn comment response with all processing steps."""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
|
||||
generation_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
# Build grounding status
|
||||
grounding_status = {
|
||||
'status': 'success' if grounding_enabled else 'disabled',
|
||||
'sources_used': len(research_sources),
|
||||
'citation_coverage': 0, # Comments typically don't have citations
|
||||
'quality_score': 0.8 # Default quality for comments
|
||||
} if grounding_enabled else None
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'response': content_result['response'],
|
||||
'alternative_responses': content_result.get('alternative_responses', []),
|
||||
'tone_analysis': content_result.get('tone_analysis'),
|
||||
'generation_metadata': {
|
||||
'model_used': 'gemini-2.0-flash-001',
|
||||
'generation_time': generation_time,
|
||||
'research_time': research_time,
|
||||
'grounding_enabled': grounding_enabled
|
||||
},
|
||||
'grounding_status': grounding_status
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating LinkedIn comment response: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Failed to generate LinkedIn comment response: {str(e)}"
|
||||
}
|
||||
|
||||
# Grounded content generation methods
|
||||
async def generate_grounded_post_content(self, request, research_sources: List) -> Dict[str, Any]:
|
||||
"""Generate grounded post content using the enhanced Gemini provider with native grounding."""
|
||||
try:
|
||||
if not self.gemini_grounded:
|
||||
logger.error("Gemini Grounded Provider not available - cannot generate content without AI provider")
|
||||
raise Exception("Gemini Grounded Provider not available - cannot generate content without AI provider")
|
||||
|
||||
# Build the prompt for grounded generation using persona if available (DB vs session override)
|
||||
# Beta testing: Force user_id=1 for all requests
|
||||
user_id = 1
|
||||
persona_data = self._get_cached_persona_data(user_id, 'linkedin')
|
||||
if getattr(request, 'persona_override', None):
|
||||
try:
|
||||
# Merge shallowly: override core and platform adaptation parts
|
||||
override = request.persona_override
|
||||
if persona_data:
|
||||
core = persona_data.get('core_persona', {})
|
||||
platform_adapt = persona_data.get('platform_adaptation', {})
|
||||
if 'core_persona' in override:
|
||||
core.update(override['core_persona'])
|
||||
if 'platform_adaptation' in override:
|
||||
platform_adapt.update(override['platform_adaptation'])
|
||||
persona_data['core_persona'] = core
|
||||
persona_data['platform_adaptation'] = platform_adapt
|
||||
else:
|
||||
persona_data = override
|
||||
except Exception:
|
||||
pass
|
||||
prompt = PostPromptBuilder.build_post_prompt(request, persona=persona_data)
|
||||
|
||||
# Generate grounded content using native Google Search grounding
|
||||
result = await self.gemini_grounded.generate_grounded_content(
|
||||
prompt=prompt,
|
||||
content_type="linkedin_post",
|
||||
temperature=0.7,
|
||||
max_tokens=request.max_length
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating grounded post content: {str(e)}")
|
||||
logger.info("Attempting fallback to standard content generation...")
|
||||
|
||||
# Fallback to standard content generation without grounding
|
||||
try:
|
||||
if not self.fallback_provider:
|
||||
raise Exception("No fallback provider available")
|
||||
|
||||
# Build a simpler prompt for fallback generation
|
||||
prompt = PostPromptBuilder.build_post_prompt(request)
|
||||
|
||||
# Generate content using fallback provider (it's a dict with functions)
|
||||
if 'generate_text' in self.fallback_provider:
|
||||
result = await self.fallback_provider['generate_text'](
|
||||
prompt=prompt,
|
||||
temperature=0.7,
|
||||
max_tokens=request.max_length
|
||||
)
|
||||
else:
|
||||
raise Exception("Fallback provider doesn't have generate_text method")
|
||||
|
||||
# Return result in the expected format
|
||||
return {
|
||||
'content': result.get('content', '') if isinstance(result, dict) else str(result),
|
||||
'sources': [],
|
||||
'citations': [],
|
||||
'grounding_enabled': False,
|
||||
'fallback_used': True
|
||||
}
|
||||
|
||||
except Exception as fallback_error:
|
||||
logger.error(f"Fallback generation also failed: {str(fallback_error)}")
|
||||
raise Exception(f"Failed to generate content: {str(e)}. Fallback also failed: {str(fallback_error)}")
|
||||
|
||||
async def generate_grounded_article_content(self, request, research_sources: List) -> Dict[str, Any]:
|
||||
"""Generate grounded article content using the enhanced Gemini provider with native grounding."""
|
||||
try:
|
||||
if not self.gemini_grounded:
|
||||
logger.error("Gemini Grounded Provider not available - cannot generate content without AI provider")
|
||||
raise Exception("Gemini Grounded Provider not available - cannot generate content without AI provider")
|
||||
|
||||
# Build the prompt for grounded generation using persona if available (DB vs session override)
|
||||
# Beta testing: Force user_id=1 for all requests
|
||||
user_id = 1
|
||||
persona_data = self._get_cached_persona_data(user_id, 'linkedin')
|
||||
if getattr(request, 'persona_override', None):
|
||||
try:
|
||||
override = request.persona_override
|
||||
if persona_data:
|
||||
core = persona_data.get('core_persona', {})
|
||||
platform_adapt = persona_data.get('platform_adaptation', {})
|
||||
if 'core_persona' in override:
|
||||
core.update(override['core_persona'])
|
||||
if 'platform_adaptation' in override:
|
||||
platform_adapt.update(override['platform_adaptation'])
|
||||
persona_data['core_persona'] = core
|
||||
persona_data['platform_adaptation'] = platform_adapt
|
||||
else:
|
||||
persona_data = override
|
||||
except Exception:
|
||||
pass
|
||||
prompt = ArticlePromptBuilder.build_article_prompt(request, persona=persona_data)
|
||||
|
||||
# Generate grounded content using native Google Search grounding
|
||||
result = await self.gemini_grounded.generate_grounded_content(
|
||||
prompt=prompt,
|
||||
content_type="linkedin_article",
|
||||
temperature=0.7,
|
||||
max_tokens=request.word_count * 10 # Approximate character count
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating grounded article content: {str(e)}")
|
||||
raise Exception(f"Failed to generate grounded article content: {str(e)}")
|
||||
|
||||
async def generate_grounded_carousel_content(self, request, research_sources: List) -> Dict[str, Any]:
|
||||
"""Generate grounded carousel content using the enhanced Gemini provider with native grounding."""
|
||||
try:
|
||||
if not self.gemini_grounded:
|
||||
logger.error("Gemini Grounded Provider not available - cannot generate content without AI provider")
|
||||
raise Exception("Gemini Grounded Provider not available - cannot generate content without AI provider")
|
||||
|
||||
# Build the prompt for grounded generation using the new prompt builder
|
||||
prompt = CarouselPromptBuilder.build_carousel_prompt(request)
|
||||
|
||||
# Generate grounded content using native Google Search grounding
|
||||
result = await self.gemini_grounded.generate_grounded_content(
|
||||
prompt=prompt,
|
||||
content_type="linkedin_carousel",
|
||||
temperature=0.7,
|
||||
max_tokens=2000
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating grounded carousel content: {str(e)}")
|
||||
raise Exception(f"Failed to generate grounded carousel content: {str(e)}")
|
||||
|
||||
async def generate_grounded_video_script_content(self, request, research_sources: List) -> Dict[str, Any]:
|
||||
"""Generate grounded video script content using the enhanced Gemini provider with native grounding."""
|
||||
try:
|
||||
if not self.gemini_grounded:
|
||||
logger.error("Gemini Grounded Provider not available - cannot generate content without AI provider")
|
||||
raise Exception("Gemini Grounded Provider not available - cannot generate content without AI provider")
|
||||
|
||||
# Build the prompt for grounded generation using the new prompt builder
|
||||
prompt = VideoScriptPromptBuilder.build_video_script_prompt(request)
|
||||
|
||||
# Generate grounded content using native Google Search grounding
|
||||
result = await self.gemini_grounded.generate_grounded_content(
|
||||
prompt=prompt,
|
||||
content_type="linkedin_video_script",
|
||||
temperature=0.7,
|
||||
max_tokens=1500
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating grounded video script content: {str(e)}")
|
||||
raise Exception(f"Failed to generate grounded video script content: {str(e)}")
|
||||
|
||||
async def generate_grounded_comment_response(self, request, research_sources: List) -> Dict[str, Any]:
|
||||
"""Generate grounded comment response using the enhanced Gemini provider with native grounding."""
|
||||
try:
|
||||
if not self.gemini_grounded:
|
||||
logger.error("Gemini Grounded Provider not available - cannot generate content without AI provider")
|
||||
raise Exception("Gemini Grounded Provider not available - cannot generate content without AI provider")
|
||||
|
||||
# Build the prompt for grounded generation using the new prompt builder
|
||||
prompt = CommentResponsePromptBuilder.build_comment_response_prompt(request)
|
||||
|
||||
# Generate grounded content using native Google Search grounding
|
||||
result = await self.gemini_grounded.generate_grounded_content(
|
||||
prompt=prompt,
|
||||
content_type="linkedin_comment_response",
|
||||
temperature=0.7,
|
||||
max_tokens=2000
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating grounded comment response: {str(e)}")
|
||||
raise Exception(f"Failed to generate grounded comment response: {str(e)}")
|
||||
@@ -0,0 +1,24 @@
|
||||
"""
|
||||
Content Generator Prompts Package
|
||||
|
||||
This package contains all the prompt templates and generation logic used by the ContentGenerator class
|
||||
for generating various types of LinkedIn content.
|
||||
"""
|
||||
|
||||
from .post_prompts import PostPromptBuilder
|
||||
from .article_prompts import ArticlePromptBuilder
|
||||
from .carousel_prompts import CarouselPromptBuilder
|
||||
from .video_script_prompts import VideoScriptPromptBuilder
|
||||
from .comment_response_prompts import CommentResponsePromptBuilder
|
||||
from .carousel_generator import CarouselGenerator
|
||||
from .video_script_generator import VideoScriptGenerator
|
||||
|
||||
__all__ = [
|
||||
'PostPromptBuilder',
|
||||
'ArticlePromptBuilder',
|
||||
'CarouselPromptBuilder',
|
||||
'VideoScriptPromptBuilder',
|
||||
'CommentResponsePromptBuilder',
|
||||
'CarouselGenerator',
|
||||
'VideoScriptGenerator'
|
||||
]
|
||||
@@ -0,0 +1,88 @@
|
||||
"""
|
||||
LinkedIn Article Generation Prompts
|
||||
|
||||
This module contains prompt templates and builders for generating LinkedIn articles.
|
||||
"""
|
||||
|
||||
from typing import Any, Optional, Dict
|
||||
|
||||
|
||||
class ArticlePromptBuilder:
|
||||
"""Builder class for LinkedIn article generation prompts."""
|
||||
|
||||
@staticmethod
|
||||
def build_article_prompt(request: Any, persona: Optional[Dict[str, Any]] = None) -> str:
|
||||
"""
|
||||
Build prompt for article generation.
|
||||
|
||||
Args:
|
||||
request: LinkedInArticleRequest object containing generation parameters
|
||||
|
||||
Returns:
|
||||
Formatted prompt string for article generation
|
||||
"""
|
||||
persona_block = ""
|
||||
if persona:
|
||||
try:
|
||||
core = persona.get('core_persona', persona)
|
||||
platform_adaptation = persona.get('platform_adaptation', persona.get('platform_persona', {}))
|
||||
linguistic = core.get('linguistic_fingerprint', {})
|
||||
sentence_metrics = linguistic.get('sentence_metrics', {})
|
||||
lexical_features = linguistic.get('lexical_features', {})
|
||||
tonal_range = core.get('tonal_range', {})
|
||||
persona_block = f"""
|
||||
PERSONA CONTEXT:
|
||||
- Persona Name: {core.get('persona_name', 'N/A')}
|
||||
- Archetype: {core.get('archetype', 'N/A')}
|
||||
- Core Belief: {core.get('core_belief', 'N/A')}
|
||||
- Default Tone: {tonal_range.get('default_tone', request.tone)}
|
||||
- Avg Sentence Length: {sentence_metrics.get('average_sentence_length_words', 18)} words
|
||||
- Go-to Words: {', '.join(lexical_features.get('go_to_words', [])[:5])}
|
||||
""".rstrip()
|
||||
except Exception:
|
||||
persona_block = ""
|
||||
|
||||
prompt = f"""
|
||||
You are a senior content strategist and industry expert specializing in {request.industry}. Create a comprehensive, thought-provoking LinkedIn article that establishes authority, drives engagement, and provides genuine value to professionals in this field.
|
||||
|
||||
TOPIC: {request.topic}
|
||||
INDUSTRY: {request.industry}
|
||||
TONE: {request.tone}
|
||||
TARGET AUDIENCE: {request.target_audience or 'Industry professionals, executives, and thought leaders'}
|
||||
WORD COUNT: {request.word_count} words
|
||||
|
||||
{persona_block}
|
||||
|
||||
CONTENT STRUCTURE:
|
||||
- Compelling headline that promises specific value
|
||||
- Engaging introduction with a hook and clear value proposition
|
||||
- 3-5 main sections with actionable insights and examples
|
||||
- Data-driven insights with proper citations
|
||||
- Practical takeaways and next steps
|
||||
- Strong conclusion with a call-to-action
|
||||
|
||||
CONTENT QUALITY REQUIREMENTS:
|
||||
- Include current industry statistics and trends (2024-2025)
|
||||
- Provide real-world examples and case studies
|
||||
- Address common challenges and pain points
|
||||
- Offer actionable strategies and frameworks
|
||||
- Use industry-specific terminology appropriately
|
||||
- Include expert quotes or insights when relevant
|
||||
|
||||
SEO & ENGAGEMENT OPTIMIZATION:
|
||||
- Use relevant keywords naturally throughout the content
|
||||
- Include engaging subheadings for scannability
|
||||
- Add bullet points and numbered lists for key insights
|
||||
- Include relevant hashtags for discoverability
|
||||
- End with thought-provoking questions to encourage comments
|
||||
|
||||
VISUAL ELEMENTS:
|
||||
- Suggest 2-3 relevant images or graphics
|
||||
- Recommend data visualization opportunities
|
||||
- Include pull quotes for key insights
|
||||
|
||||
KEY SECTIONS TO COVER: {', '.join(request.key_sections) if request.key_sections else 'Industry overview, current challenges, emerging trends, practical solutions, future outlook'}
|
||||
|
||||
REMEMBER: This article should position the author as a thought leader while providing actionable insights that readers can immediately apply in their professional lives.
|
||||
"""
|
||||
return prompt.strip()
|
||||
@@ -0,0 +1,112 @@
|
||||
"""
|
||||
LinkedIn Carousel Generation Module
|
||||
|
||||
This module handles the generation of LinkedIn carousels with all processing steps.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List
|
||||
from datetime import datetime
|
||||
from loguru import logger
|
||||
from services.linkedin.quality_handler import QualityHandler
|
||||
|
||||
|
||||
class CarouselGenerator:
|
||||
"""Handles LinkedIn carousel generation with all processing steps."""
|
||||
|
||||
def __init__(self, citation_manager=None, quality_analyzer=None):
|
||||
self.citation_manager = citation_manager
|
||||
self.quality_analyzer = quality_analyzer
|
||||
|
||||
async def generate_carousel(
|
||||
self,
|
||||
request,
|
||||
research_sources: List,
|
||||
research_time: float,
|
||||
content_result: Dict[str, Any],
|
||||
grounding_enabled: bool
|
||||
):
|
||||
"""Generate LinkedIn carousel with all processing steps."""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
|
||||
# Step 3: Add citations if requested
|
||||
citations = []
|
||||
source_list = None
|
||||
if request.include_citations and research_sources:
|
||||
# Extract citations from all slides
|
||||
all_content = " ".join([slide['content'] for slide in content_result['slides']])
|
||||
citations = self.citation_manager.extract_citations(all_content) if self.citation_manager else []
|
||||
source_list = self.citation_manager.generate_source_list(research_sources) if self.citation_manager else None
|
||||
|
||||
# Step 4: Analyze content quality
|
||||
quality_metrics = None
|
||||
if grounding_enabled and self.quality_analyzer:
|
||||
try:
|
||||
all_content = " ".join([slide['content'] for slide in content_result['slides']])
|
||||
quality_handler = QualityHandler(self.quality_analyzer)
|
||||
quality_metrics = quality_handler.create_quality_metrics(
|
||||
content=all_content,
|
||||
sources=research_sources,
|
||||
industry=request.industry,
|
||||
grounding_enabled=grounding_enabled
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Quality analysis failed: {e}")
|
||||
|
||||
# Step 5: Build response
|
||||
slides = []
|
||||
for i, slide_data in enumerate(content_result['slides']):
|
||||
slide_citations = []
|
||||
if request.include_citations and research_sources and self.citation_manager:
|
||||
slide_citations = self.citation_manager.extract_citations(slide_data['content'])
|
||||
|
||||
slides.append({
|
||||
'slide_number': i + 1,
|
||||
'title': slide_data['title'],
|
||||
'content': slide_data['content'],
|
||||
'visual_elements': slide_data.get('visual_elements', []),
|
||||
'design_notes': slide_data.get('design_notes'),
|
||||
'citations': slide_citations
|
||||
})
|
||||
|
||||
carousel_content = {
|
||||
'title': content_result['title'],
|
||||
'slides': slides,
|
||||
'cover_slide': content_result.get('cover_slide'),
|
||||
'cta_slide': content_result.get('cta_slide'),
|
||||
'design_guidelines': content_result.get('design_guidelines', {}),
|
||||
'citations': citations,
|
||||
'source_list': source_list,
|
||||
'quality_metrics': quality_metrics,
|
||||
'grounding_enabled': grounding_enabled
|
||||
}
|
||||
|
||||
generation_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
# Build grounding status
|
||||
grounding_status = {
|
||||
'status': 'success' if grounding_enabled else 'disabled',
|
||||
'sources_used': len(research_sources),
|
||||
'citation_coverage': len(citations) / max(len(research_sources), 1) if research_sources else 0,
|
||||
'quality_score': quality_metrics.overall_score if quality_metrics else 0.0
|
||||
} if grounding_enabled else None
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'data': carousel_content,
|
||||
'research_sources': research_sources,
|
||||
'generation_metadata': {
|
||||
'model_used': 'gemini-2.0-flash-001',
|
||||
'generation_time': generation_time,
|
||||
'research_time': research_time,
|
||||
'grounding_enabled': grounding_enabled
|
||||
},
|
||||
'grounding_status': grounding_status
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating LinkedIn carousel: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Failed to generate LinkedIn carousel: {str(e)}"
|
||||
}
|
||||
@@ -0,0 +1,63 @@
|
||||
"""
|
||||
LinkedIn Carousel Generation Prompts
|
||||
|
||||
This module contains prompt templates and builders for generating LinkedIn carousels.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
class CarouselPromptBuilder:
|
||||
"""Builder class for LinkedIn carousel generation prompts."""
|
||||
|
||||
@staticmethod
|
||||
def build_carousel_prompt(request: Any) -> str:
|
||||
"""
|
||||
Build prompt for carousel generation.
|
||||
|
||||
Args:
|
||||
request: LinkedInCarouselRequest object containing generation parameters
|
||||
|
||||
Returns:
|
||||
Formatted prompt string for carousel generation
|
||||
"""
|
||||
prompt = f"""
|
||||
You are a visual content strategist and {request.industry} industry expert. Create a compelling LinkedIn carousel that tells a cohesive story and drives engagement through visual storytelling and valuable insights.
|
||||
|
||||
TOPIC: {request.topic}
|
||||
INDUSTRY: {request.industry}
|
||||
TONE: {request.tone}
|
||||
TARGET AUDIENCE: {request.target_audience or 'Industry professionals and decision-makers'}
|
||||
NUMBER OF SLIDES: {request.number_of_slides}
|
||||
INCLUDE COVER SLIDE: {request.include_cover_slide}
|
||||
INCLUDE CTA SLIDE: {request.include_cta_slide}
|
||||
|
||||
CAROUSEL STRUCTURE & DESIGN:
|
||||
- Cover Slide: Compelling headline with visual hook and clear value proposition
|
||||
- Content Slides: Each slide should focus on ONE key insight with supporting data
|
||||
- Visual Flow: Create a logical progression that builds understanding
|
||||
- CTA Slide: Clear next steps and engagement prompts
|
||||
|
||||
CONTENT REQUIREMENTS PER SLIDE:
|
||||
- Maximum 3-4 bullet points per slide for readability
|
||||
- Include relevant statistics, percentages, or data points
|
||||
- Use action-oriented language and specific examples
|
||||
- Each slide should be self-contained but contribute to the overall narrative
|
||||
|
||||
VISUAL DESIGN GUIDELINES:
|
||||
- Suggest color schemes that match the industry (professional yet engaging)
|
||||
- Recommend icon styles and visual elements for each slide
|
||||
- Include layout suggestions (text placement, image positioning)
|
||||
- Suggest data visualization opportunities (charts, graphs, infographics)
|
||||
|
||||
ENGAGEMENT STRATEGY:
|
||||
- Include thought-provoking questions on key slides
|
||||
- Suggest interactive elements (polls, surveys, comment prompts)
|
||||
- Use storytelling elements to create emotional connection
|
||||
- End with clear call-to-action and hashtag suggestions
|
||||
|
||||
KEY INSIGHTS TO COVER: {', '.join(request.key_points) if request.key_points else 'Industry trends, challenges, solutions, and opportunities'}
|
||||
|
||||
REMEMBER: Each slide should be visually appealing, informative, and encourage the viewer to continue reading. The carousel should provide immediate value while building anticipation for the next slide.
|
||||
"""
|
||||
return prompt.strip()
|
||||
@@ -0,0 +1,64 @@
|
||||
"""
|
||||
LinkedIn Comment Response Generation Prompts
|
||||
|
||||
This module contains prompt templates and builders for generating LinkedIn comment responses.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
class CommentResponsePromptBuilder:
|
||||
"""Builder class for LinkedIn comment response generation prompts."""
|
||||
|
||||
@staticmethod
|
||||
def build_comment_response_prompt(request: Any) -> str:
|
||||
"""
|
||||
Build prompt for comment response generation.
|
||||
|
||||
Args:
|
||||
request: LinkedInCommentResponseRequest object containing generation parameters
|
||||
|
||||
Returns:
|
||||
Formatted prompt string for comment response generation
|
||||
"""
|
||||
prompt = f"""
|
||||
You are a {request.industry} industry expert and LinkedIn engagement specialist. Create a thoughtful, professional comment response that adds genuine value to the conversation and encourages further engagement.
|
||||
|
||||
ORIGINAL COMMENT: "{request.original_comment}"
|
||||
POST CONTEXT: {request.post_context}
|
||||
INDUSTRY: {request.industry}
|
||||
TONE: {request.tone}
|
||||
RESPONSE LENGTH: {request.response_length}
|
||||
INCLUDE QUESTIONS: {request.include_questions}
|
||||
|
||||
RESPONSE STRATEGY:
|
||||
- Acknowledge the commenter's perspective or question
|
||||
- Provide specific, actionable insights or examples
|
||||
- Share relevant industry knowledge or experience
|
||||
- Encourage further discussion and engagement
|
||||
- Maintain professional yet conversational tone
|
||||
|
||||
CONTENT REQUIREMENTS:
|
||||
- Start with appreciation or acknowledgment of the comment
|
||||
- Include 1-2 specific insights that add value
|
||||
- Use industry-specific examples when relevant
|
||||
- End with a thought-provoking question or invitation to continue
|
||||
- Keep the tone consistent with the original post
|
||||
|
||||
ENGAGEMENT TECHNIQUES:
|
||||
- Ask follow-up questions that encourage response
|
||||
- Share relevant statistics or data points
|
||||
- Include personal experiences or case studies
|
||||
- Suggest additional resources or next steps
|
||||
- Use inclusive language that welcomes others to join
|
||||
|
||||
PROFESSIONAL GUIDELINES:
|
||||
- Always be respectful and constructive
|
||||
- Avoid controversial or polarizing statements
|
||||
- Focus on building relationships, not just responding
|
||||
- Demonstrate expertise without being condescending
|
||||
- Use appropriate emojis and formatting for warmth
|
||||
|
||||
REMEMBER: This response should feel like a natural continuation of the conversation, not just a reply. It should encourage the original commenter and others to engage further.
|
||||
"""
|
||||
return prompt.strip()
|
||||
@@ -0,0 +1,86 @@
|
||||
"""
|
||||
LinkedIn Post Generation Prompts
|
||||
|
||||
This module contains prompt templates and builders for generating LinkedIn posts.
|
||||
"""
|
||||
|
||||
from typing import Any, Optional, Dict
|
||||
|
||||
|
||||
class PostPromptBuilder:
|
||||
"""Builder class for LinkedIn post generation prompts."""
|
||||
|
||||
@staticmethod
|
||||
def build_post_prompt(request: Any, persona: Optional[Dict[str, Any]] = None) -> str:
|
||||
"""
|
||||
Build prompt for post generation.
|
||||
|
||||
Args:
|
||||
request: LinkedInPostRequest object containing generation parameters
|
||||
|
||||
Returns:
|
||||
Formatted prompt string for post generation
|
||||
"""
|
||||
persona_block = ""
|
||||
if persona:
|
||||
try:
|
||||
# Expecting structure similar to persona_service.get_persona_for_platform output
|
||||
core = persona.get('core_persona', persona)
|
||||
platform_adaptation = persona.get('platform_adaptation', persona.get('platform_persona', {}))
|
||||
linguistic = core.get('linguistic_fingerprint', {})
|
||||
sentence_metrics = linguistic.get('sentence_metrics', {})
|
||||
lexical_features = linguistic.get('lexical_features', {})
|
||||
rhetorical_devices = linguistic.get('rhetorical_devices', {})
|
||||
tonal_range = core.get('tonal_range', {})
|
||||
|
||||
persona_block = f"""
|
||||
PERSONA CONTEXT:
|
||||
- Persona Name: {core.get('persona_name', 'N/A')}
|
||||
- Archetype: {core.get('archetype', 'N/A')}
|
||||
- Core Belief: {core.get('core_belief', 'N/A')}
|
||||
- Tone: {tonal_range.get('default_tone', request.tone)}
|
||||
- Sentence Length (avg): {sentence_metrics.get('average_sentence_length_words', 15)} words
|
||||
- Preferred Sentence Type: {sentence_metrics.get('preferred_sentence_type', 'simple and compound')}
|
||||
- Go-to Words: {', '.join(lexical_features.get('go_to_words', [])[:5])}
|
||||
- Avoid Words: {', '.join(lexical_features.get('avoid_words', [])[:5])}
|
||||
- Rhetorical Style: {rhetorical_devices.get('summary','balanced rhetorical questions and examples')}
|
||||
""".rstrip()
|
||||
except Exception:
|
||||
persona_block = ""
|
||||
|
||||
prompt = f"""
|
||||
You are an expert LinkedIn content strategist with 10+ years of experience in the {request.industry} industry. Create a highly engaging, professional LinkedIn post that drives meaningful engagement and establishes thought leadership.
|
||||
|
||||
TOPIC: {request.topic}
|
||||
INDUSTRY: {request.industry}
|
||||
TONE: {request.tone}
|
||||
TARGET AUDIENCE: {request.target_audience or 'Industry professionals, decision-makers, and thought leaders'}
|
||||
MAX LENGTH: {request.max_length} characters
|
||||
|
||||
{persona_block}
|
||||
|
||||
CONTENT REQUIREMENTS:
|
||||
- Start with a compelling hook that addresses a pain point or opportunity
|
||||
- Include 2-3 specific, actionable insights or data points
|
||||
- Use storytelling elements to make it relatable and memorable
|
||||
- Include industry-specific examples or case studies when relevant
|
||||
- End with a thought-provoking question or clear call-to-action
|
||||
- Use professional yet conversational language that encourages discussion
|
||||
|
||||
ENGAGEMENT STRATEGY:
|
||||
- Include 3-5 highly relevant, trending hashtags (mix of broad and niche)
|
||||
- Use line breaks and emojis strategically for readability
|
||||
- Encourage comments by asking for opinions or experiences
|
||||
- Make it shareable by providing genuine value
|
||||
|
||||
KEY POINTS TO COVER: {', '.join(request.key_points) if request.key_points else 'Current industry trends, challenges, and opportunities'}
|
||||
|
||||
FORMATTING:
|
||||
- Use bullet points or numbered lists for key insights
|
||||
- Include relevant emojis to enhance visual appeal
|
||||
- Break text into digestible paragraphs (2-3 lines max)
|
||||
- Leave space for engagement (don't fill the entire character limit)
|
||||
|
||||
REMEMBER: This post should position the author as a knowledgeable industry expert while being genuinely helpful to the audience.
|
||||
"""
|
||||
return prompt.strip()
|
||||
@@ -0,0 +1,97 @@
|
||||
"""
|
||||
LinkedIn Video Script Generation Module
|
||||
|
||||
This module handles the generation of LinkedIn video scripts with all processing steps.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, List
|
||||
from datetime import datetime
|
||||
from loguru import logger
|
||||
from services.linkedin.quality_handler import QualityHandler
|
||||
|
||||
|
||||
class VideoScriptGenerator:
|
||||
"""Handles LinkedIn video script generation with all processing steps."""
|
||||
|
||||
def __init__(self, citation_manager=None, quality_analyzer=None):
|
||||
self.citation_manager = citation_manager
|
||||
self.quality_analyzer = quality_analyzer
|
||||
|
||||
async def generate_video_script(
|
||||
self,
|
||||
request,
|
||||
research_sources: List,
|
||||
research_time: float,
|
||||
content_result: Dict[str, Any],
|
||||
grounding_enabled: bool
|
||||
):
|
||||
"""Generate LinkedIn video script with all processing steps."""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
|
||||
# Step 3: Add citations if requested
|
||||
citations = []
|
||||
source_list = None
|
||||
if request.include_citations and research_sources and self.citation_manager:
|
||||
all_content = f"{content_result['hook']} {' '.join([scene['content'] for scene in content_result['main_content']])} {content_result['conclusion']}"
|
||||
citations = self.citation_manager.extract_citations(all_content)
|
||||
source_list = self.citation_manager.generate_source_list(research_sources)
|
||||
|
||||
# Step 4: Analyze content quality
|
||||
quality_metrics = None
|
||||
if grounding_enabled and self.quality_analyzer:
|
||||
try:
|
||||
all_content = f"{content_result['hook']} {' '.join([scene['content'] for scene in content_result['main_content']])} {content_result['conclusion']}"
|
||||
quality_handler = QualityHandler(self.quality_analyzer)
|
||||
quality_metrics = quality_handler.create_quality_metrics(
|
||||
content=all_content,
|
||||
sources=research_sources,
|
||||
industry=request.industry,
|
||||
grounding_enabled=grounding_enabled
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Quality analysis failed: {e}")
|
||||
|
||||
# Step 5: Build response
|
||||
video_script = {
|
||||
'hook': content_result['hook'],
|
||||
'main_content': content_result['main_content'],
|
||||
'conclusion': content_result['conclusion'],
|
||||
'captions': content_result.get('captions'),
|
||||
'thumbnail_suggestions': content_result.get('thumbnail_suggestions', []),
|
||||
'video_description': content_result.get('video_description', ''),
|
||||
'citations': citations,
|
||||
'source_list': source_list,
|
||||
'quality_metrics': quality_metrics,
|
||||
'grounding_enabled': grounding_enabled
|
||||
}
|
||||
|
||||
generation_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
# Build grounding status
|
||||
grounding_status = {
|
||||
'status': 'success' if grounding_enabled else 'disabled',
|
||||
'sources_used': len(research_sources),
|
||||
'citation_coverage': len(citations) / max(len(research_sources), 1) if research_sources else 0,
|
||||
'quality_score': quality_metrics.overall_score if quality_metrics else 0.0
|
||||
} if grounding_enabled else None
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'data': video_script,
|
||||
'research_sources': research_sources,
|
||||
'generation_metadata': {
|
||||
'model_used': 'gemini-2.0-flash-001',
|
||||
'generation_time': generation_time,
|
||||
'research_time': research_time,
|
||||
'grounding_enabled': grounding_enabled
|
||||
},
|
||||
'grounding_status': grounding_status
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating LinkedIn video script: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Failed to generate LinkedIn video script: {str(e)}"
|
||||
}
|
||||
@@ -0,0 +1,75 @@
|
||||
"""
|
||||
LinkedIn Video Script Generation Prompts
|
||||
|
||||
This module contains prompt templates and builders for generating LinkedIn video scripts.
|
||||
"""
|
||||
|
||||
from typing import Any
|
||||
|
||||
|
||||
class VideoScriptPromptBuilder:
|
||||
"""Builder class for LinkedIn video script generation prompts."""
|
||||
|
||||
@staticmethod
|
||||
def build_video_script_prompt(request: Any) -> str:
|
||||
"""
|
||||
Build prompt for video script generation.
|
||||
|
||||
Args:
|
||||
request: LinkedInVideoScriptRequest object containing generation parameters
|
||||
|
||||
Returns:
|
||||
Formatted prompt string for video script generation
|
||||
"""
|
||||
prompt = f"""
|
||||
You are a video content strategist and {request.industry} industry expert. Create a compelling LinkedIn video script that captures attention in the first 3 seconds and maintains engagement throughout the entire duration.
|
||||
|
||||
TOPIC: {request.topic}
|
||||
INDUSTRY: {request.industry}
|
||||
TONE: {request.tone}
|
||||
TARGET AUDIENCE: {request.target_audience or 'Industry professionals and decision-makers'}
|
||||
DURATION: {request.video_duration} seconds
|
||||
INCLUDE CAPTIONS: {request.include_captions}
|
||||
INCLUDE THUMBNAIL SUGGESTIONS: {request.include_thumbnail_suggestions}
|
||||
|
||||
VIDEO STRUCTURE & TIMING:
|
||||
- Hook (0-3 seconds): Compelling opening that stops the scroll
|
||||
- Introduction (3-8 seconds): Establish credibility and preview value
|
||||
- Main Content (8-{request.video_duration-5} seconds): 2-3 key insights with examples
|
||||
- Conclusion (Last 5 seconds): Clear call-to-action and engagement prompt
|
||||
|
||||
CONTENT REQUIREMENTS:
|
||||
- Start with a surprising statistic, question, or bold statement
|
||||
- Include specific examples and case studies from the industry
|
||||
- Use conversational, engaging language that feels natural when spoken
|
||||
- Include 2-3 actionable takeaways viewers can implement immediately
|
||||
- End with a question that encourages comments and discussion
|
||||
|
||||
VISUAL & AUDIO GUIDELINES:
|
||||
- Suggest background music style and mood
|
||||
- Recommend visual elements (text overlays, graphics, charts)
|
||||
- Include specific camera angle and movement suggestions
|
||||
- Suggest props or visual aids that enhance the message
|
||||
|
||||
CAPTION OPTIMIZATION:
|
||||
- Write captions that are engaging even without audio
|
||||
- Include emojis and formatting for visual appeal
|
||||
- Ensure captions complement the spoken content
|
||||
- Make captions scannable and easy to read
|
||||
|
||||
THUMBNAIL DESIGN:
|
||||
- Suggest compelling thumbnail text and imagery
|
||||
- Recommend color schemes that match the industry
|
||||
- Include specific design elements that increase click-through rates
|
||||
|
||||
ENGAGEMENT STRATEGY:
|
||||
- Include moments that encourage viewers to pause and think
|
||||
- Suggest interactive elements (polls, questions, challenges)
|
||||
- Create emotional connection through storytelling
|
||||
- End with clear next steps and hashtag suggestions
|
||||
|
||||
KEY INSIGHTS TO COVER: {', '.join(request.key_points) if request.key_points else 'Industry trends, challenges, solutions, and opportunities'}
|
||||
|
||||
REMEMBER: This video should provide immediate value while building the creator's authority. Every second should count toward engagement and viewer retention.
|
||||
"""
|
||||
return prompt.strip()
|
||||
22
backend/services/linkedin/image_generation/__init__.py
Normal file
22
backend/services/linkedin/image_generation/__init__.py
Normal file
@@ -0,0 +1,22 @@
|
||||
"""
|
||||
LinkedIn Image Generation Package
|
||||
|
||||
This package provides AI-powered image generation capabilities for LinkedIn content
|
||||
using Google's Gemini API. It includes image generation, editing, storage, and
|
||||
management services optimized for professional business use.
|
||||
"""
|
||||
|
||||
from .linkedin_image_generator import LinkedInImageGenerator
|
||||
from .linkedin_image_editor import LinkedInImageEditor
|
||||
from .linkedin_image_storage import LinkedInImageStorage
|
||||
|
||||
__all__ = [
|
||||
'LinkedInImageGenerator',
|
||||
'LinkedInImageEditor',
|
||||
'LinkedInImageStorage'
|
||||
]
|
||||
|
||||
# Version information
|
||||
__version__ = "1.0.0"
|
||||
__author__ = "Alwrity Team"
|
||||
__description__ = "LinkedIn AI Image Generation Services"
|
||||
@@ -0,0 +1,530 @@
|
||||
"""
|
||||
LinkedIn Image Editor Service
|
||||
|
||||
This service handles image editing capabilities for LinkedIn content using Gemini's
|
||||
conversational editing features. It provides professional image refinement and
|
||||
optimization specifically for LinkedIn use cases.
|
||||
"""
|
||||
|
||||
import os
|
||||
import base64
|
||||
from typing import Dict, Any, Optional, List
|
||||
from datetime import datetime
|
||||
from PIL import Image, ImageEnhance, ImageFilter
|
||||
from io import BytesIO
|
||||
from loguru import logger
|
||||
|
||||
# Import existing infrastructure
|
||||
from ...onboarding.api_key_manager import APIKeyManager
|
||||
|
||||
|
||||
class LinkedInImageEditor:
|
||||
"""
|
||||
Handles LinkedIn image editing and refinement using Gemini's capabilities.
|
||||
|
||||
This service provides both AI-powered editing through Gemini and traditional
|
||||
image processing for LinkedIn-specific optimizations.
|
||||
"""
|
||||
|
||||
def __init__(self, api_key_manager: Optional[APIKeyManager] = None):
|
||||
"""
|
||||
Initialize the LinkedIn Image Editor.
|
||||
|
||||
Args:
|
||||
api_key_manager: API key manager for Gemini authentication
|
||||
"""
|
||||
self.api_key_manager = api_key_manager or APIKeyManager()
|
||||
self.model = "gemini-2.5-flash-image-preview"
|
||||
|
||||
# LinkedIn-specific editing parameters
|
||||
self.enhancement_factors = {
|
||||
'brightness': 1.1, # Slightly brighter for mobile viewing
|
||||
'contrast': 1.05, # Subtle contrast enhancement
|
||||
'sharpness': 1.2, # Enhanced sharpness for clarity
|
||||
'saturation': 1.05 # Slight saturation boost
|
||||
}
|
||||
|
||||
logger.info("LinkedIn Image Editor initialized")
|
||||
|
||||
async def edit_image_conversationally(
|
||||
self,
|
||||
base_image: bytes,
|
||||
edit_prompt: str,
|
||||
content_context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Edit image using Gemini's conversational editing capabilities.
|
||||
|
||||
Args:
|
||||
base_image: Base image data in bytes
|
||||
edit_prompt: Natural language description of desired edits
|
||||
content_context: LinkedIn content context for optimization
|
||||
|
||||
Returns:
|
||||
Dict containing edited image result and metadata
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
logger.info(f"Starting conversational image editing: {edit_prompt[:100]}...")
|
||||
|
||||
# Enhance edit prompt for LinkedIn optimization
|
||||
enhanced_prompt = self._enhance_edit_prompt_for_linkedin(
|
||||
edit_prompt, content_context
|
||||
)
|
||||
|
||||
# TODO: Implement Gemini conversational editing when available
|
||||
# For now, we'll use traditional image processing based on prompt analysis
|
||||
edited_image = await self._apply_traditional_editing(
|
||||
base_image, edit_prompt, content_context
|
||||
)
|
||||
|
||||
if not edited_image.get('success'):
|
||||
return edited_image
|
||||
|
||||
generation_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': edited_image['image_data'],
|
||||
'metadata': {
|
||||
'edit_prompt': edit_prompt,
|
||||
'enhanced_prompt': enhanced_prompt,
|
||||
'editing_method': 'traditional_processing',
|
||||
'editing_time': generation_time,
|
||||
'content_context': content_context,
|
||||
'model_used': self.model
|
||||
},
|
||||
'linkedin_optimization': {
|
||||
'mobile_optimized': True,
|
||||
'professional_aesthetic': True,
|
||||
'brand_compliant': True,
|
||||
'engagement_optimized': True
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in conversational image editing: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Conversational editing failed: {str(e)}",
|
||||
'generation_time': (datetime.now() - start_time).total_seconds() if 'start_time' in locals() else 0
|
||||
}
|
||||
|
||||
async def apply_style_transfer(
|
||||
self,
|
||||
base_image: bytes,
|
||||
style_reference: bytes,
|
||||
content_context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Apply style transfer from reference image to base image.
|
||||
|
||||
Args:
|
||||
base_image: Base image data in bytes
|
||||
style_reference: Reference image for style transfer
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Dict containing style-transferred image result
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
logger.info("Starting style transfer for LinkedIn image")
|
||||
|
||||
# TODO: Implement Gemini style transfer when available
|
||||
# For now, return placeholder implementation
|
||||
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Style transfer not yet implemented - coming in next Gemini API update',
|
||||
'generation_time': (datetime.now() - start_time).total_seconds()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in style transfer: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Style transfer failed: {str(e)}",
|
||||
'generation_time': (datetime.now() - start_time).total_seconds() if 'start_time' in locals() else 0
|
||||
}
|
||||
|
||||
async def enhance_image_quality(
|
||||
self,
|
||||
image_data: bytes,
|
||||
enhancement_type: str = "linkedin_optimized",
|
||||
content_context: Optional[Dict[str, Any]] = None
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Enhance image quality using traditional image processing.
|
||||
|
||||
Args:
|
||||
image_data: Image data in bytes
|
||||
enhancement_type: Type of enhancement to apply
|
||||
content_context: LinkedIn content context for optimization
|
||||
|
||||
Returns:
|
||||
Dict containing enhanced image result
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
logger.info(f"Starting image quality enhancement: {enhancement_type}")
|
||||
|
||||
# Open image for processing
|
||||
image = Image.open(BytesIO(image_data))
|
||||
original_size = image.size
|
||||
|
||||
# Apply LinkedIn-specific enhancements
|
||||
if enhancement_type == "linkedin_optimized":
|
||||
enhanced_image = self._apply_linkedin_enhancements(image, content_context)
|
||||
elif enhancement_type == "professional":
|
||||
enhanced_image = self._apply_professional_enhancements(image)
|
||||
elif enhancement_type == "creative":
|
||||
enhanced_image = self._apply_creative_enhancements(image)
|
||||
else:
|
||||
enhanced_image = self._apply_linkedin_enhancements(image, content_context)
|
||||
|
||||
# Convert back to bytes
|
||||
output_buffer = BytesIO()
|
||||
enhanced_image.save(output_buffer, format=image.format or "PNG", optimize=True)
|
||||
enhanced_data = output_buffer.getvalue()
|
||||
|
||||
enhancement_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': enhanced_data,
|
||||
'metadata': {
|
||||
'enhancement_type': enhancement_type,
|
||||
'original_size': original_size,
|
||||
'enhanced_size': enhanced_image.size,
|
||||
'enhancement_time': enhancement_time,
|
||||
'content_context': content_context
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in image quality enhancement: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Quality enhancement failed: {str(e)}",
|
||||
'generation_time': (datetime.now() - start_time).total_seconds() if 'start_time' in locals() else 0
|
||||
}
|
||||
|
||||
def _enhance_edit_prompt_for_linkedin(
|
||||
self,
|
||||
edit_prompt: str,
|
||||
content_context: Dict[str, Any]
|
||||
) -> str:
|
||||
"""
|
||||
Enhance edit prompt for LinkedIn optimization.
|
||||
|
||||
Args:
|
||||
edit_prompt: Original edit prompt
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Enhanced edit prompt
|
||||
"""
|
||||
industry = content_context.get('industry', 'business')
|
||||
content_type = content_context.get('content_type', 'post')
|
||||
|
||||
linkedin_edit_enhancements = [
|
||||
f"Maintain professional business aesthetic for {industry} industry",
|
||||
f"Ensure mobile-optimized composition for LinkedIn {content_type}",
|
||||
"Keep professional color scheme and typography",
|
||||
"Maintain brand consistency and visual hierarchy",
|
||||
"Optimize for LinkedIn feed viewing and engagement"
|
||||
]
|
||||
|
||||
enhanced_prompt = f"{edit_prompt}\n\n"
|
||||
enhanced_prompt += "\n".join(linkedin_edit_enhancements)
|
||||
|
||||
return enhanced_prompt
|
||||
|
||||
async def _apply_traditional_editing(
|
||||
self,
|
||||
base_image: bytes,
|
||||
edit_prompt: str,
|
||||
content_context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Apply traditional image processing based on edit prompt analysis.
|
||||
|
||||
Args:
|
||||
base_image: Base image data in bytes
|
||||
edit_prompt: Description of desired edits
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Dict containing edited image result
|
||||
"""
|
||||
try:
|
||||
# Open image for processing
|
||||
image = Image.open(BytesIO(base_image))
|
||||
|
||||
# Analyze edit prompt and apply appropriate processing
|
||||
edit_prompt_lower = edit_prompt.lower()
|
||||
|
||||
if any(word in edit_prompt_lower for word in ['brighter', 'light', 'lighting']):
|
||||
image = self._adjust_brightness(image, 1.2)
|
||||
logger.info("Applied brightness adjustment")
|
||||
|
||||
if any(word in edit_prompt_lower for word in ['sharper', 'sharp', 'clear']):
|
||||
image = self._apply_sharpening(image)
|
||||
logger.info("Applied sharpening")
|
||||
|
||||
if any(word in edit_prompt_lower for word in ['warmer', 'warm', 'color']):
|
||||
image = self._adjust_color_temperature(image, 'warm')
|
||||
logger.info("Applied warm color adjustment")
|
||||
|
||||
if any(word in edit_prompt_lower for word in ['professional', 'business']):
|
||||
image = self._apply_professional_enhancements(image)
|
||||
logger.info("Applied professional enhancements")
|
||||
|
||||
# Convert back to bytes
|
||||
output_buffer = BytesIO()
|
||||
image.save(output_buffer, format=image.format or "PNG", optimize=True)
|
||||
edited_data = output_buffer.getvalue()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': edited_data
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in traditional editing: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Traditional editing failed: {str(e)}"
|
||||
}
|
||||
|
||||
def _apply_linkedin_enhancements(
|
||||
self,
|
||||
image: Image.Image,
|
||||
content_context: Optional[Dict[str, Any]] = None
|
||||
) -> Image.Image:
|
||||
"""
|
||||
Apply LinkedIn-specific image enhancements.
|
||||
|
||||
Args:
|
||||
image: PIL Image object
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Enhanced image
|
||||
"""
|
||||
try:
|
||||
# Apply standard LinkedIn optimizations
|
||||
image = self._adjust_brightness(image, self.enhancement_factors['brightness'])
|
||||
image = self._adjust_contrast(image, self.enhancement_factors['contrast'])
|
||||
image = self._apply_sharpening(image)
|
||||
image = self._adjust_saturation(image, self.enhancement_factors['saturation'])
|
||||
|
||||
# Ensure professional appearance
|
||||
image = self._ensure_professional_appearance(image, content_context)
|
||||
|
||||
return image
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error applying LinkedIn enhancements: {str(e)}")
|
||||
return image
|
||||
|
||||
def _apply_professional_enhancements(self, image: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Apply professional business aesthetic enhancements.
|
||||
|
||||
Args:
|
||||
image: PIL Image object
|
||||
|
||||
Returns:
|
||||
Enhanced image
|
||||
"""
|
||||
try:
|
||||
# Subtle enhancements for professional appearance
|
||||
image = self._adjust_brightness(image, 1.05)
|
||||
image = self._adjust_contrast(image, 1.03)
|
||||
image = self._apply_sharpening(image)
|
||||
|
||||
return image
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error applying professional enhancements: {str(e)}")
|
||||
return image
|
||||
|
||||
def _apply_creative_enhancements(self, image: Image.Image) -> Image.Image:
|
||||
"""
|
||||
Apply creative and engaging enhancements.
|
||||
|
||||
Args:
|
||||
image: PIL Image object
|
||||
|
||||
Returns:
|
||||
Enhanced image
|
||||
"""
|
||||
try:
|
||||
# More pronounced enhancements for creative appeal
|
||||
image = self._adjust_brightness(image, 1.1)
|
||||
image = self._adjust_contrast(image, 1.08)
|
||||
image = self._adjust_saturation(image, 1.1)
|
||||
image = self._apply_sharpening(image)
|
||||
|
||||
return image
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error applying creative enhancements: {str(e)}")
|
||||
return image
|
||||
|
||||
def _adjust_brightness(self, image: Image.Image, factor: float) -> Image.Image:
|
||||
"""Adjust image brightness."""
|
||||
try:
|
||||
enhancer = ImageEnhance.Brightness(image)
|
||||
return enhancer.enhance(factor)
|
||||
except Exception as e:
|
||||
logger.error(f"Error adjusting brightness: {str(e)}")
|
||||
return image
|
||||
|
||||
def _adjust_contrast(self, image: Image.Image, factor: float) -> Image.Image:
|
||||
"""Adjust image contrast."""
|
||||
try:
|
||||
enhancer = ImageEnhance.Contrast(image)
|
||||
return enhancer.enhance(factor)
|
||||
except Exception as e:
|
||||
logger.error(f"Error adjusting contrast: {str(e)}")
|
||||
return image
|
||||
|
||||
def _adjust_saturation(self, image: Image.Image, factor: float) -> Image.Image:
|
||||
"""Adjust image saturation."""
|
||||
try:
|
||||
enhancer = ImageEnhance.Color(image)
|
||||
return enhancer.enhance(factor)
|
||||
except Exception as e:
|
||||
logger.error(f"Error adjusting saturation: {str(e)}")
|
||||
return image
|
||||
|
||||
def _apply_sharpening(self, image: Image.Image) -> Image.Image:
|
||||
"""Apply image sharpening."""
|
||||
try:
|
||||
# Apply unsharp mask for professional sharpening
|
||||
return image.filter(ImageFilter.UnsharpMask(radius=1, percent=150, threshold=3))
|
||||
except Exception as e:
|
||||
logger.error(f"Error applying sharpening: {str(e)}")
|
||||
return image
|
||||
|
||||
def _adjust_color_temperature(self, image: Image.Image, temperature: str) -> Image.Image:
|
||||
"""Adjust image color temperature."""
|
||||
try:
|
||||
if temperature == 'warm':
|
||||
# Apply warm color adjustment
|
||||
enhancer = ImageEnhance.Color(image)
|
||||
image = enhancer.enhance(1.1)
|
||||
|
||||
# Slight red tint for warmth
|
||||
# This is a simplified approach - more sophisticated color grading could be implemented
|
||||
return image
|
||||
else:
|
||||
return image
|
||||
except Exception as e:
|
||||
logger.error(f"Error adjusting color temperature: {str(e)}")
|
||||
return image
|
||||
|
||||
def _ensure_professional_appearance(
|
||||
self,
|
||||
image: Image.Image,
|
||||
content_context: Optional[Dict[str, Any]] = None
|
||||
) -> Image.Image:
|
||||
"""
|
||||
Ensure image meets professional LinkedIn standards.
|
||||
|
||||
Args:
|
||||
image: PIL Image object
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Professionally optimized image
|
||||
"""
|
||||
try:
|
||||
# Ensure minimum quality standards
|
||||
if image.mode in ('RGBA', 'LA', 'P'):
|
||||
# Convert to RGB for better compatibility
|
||||
background = Image.new('RGB', image.size, (255, 255, 255))
|
||||
if image.mode == 'P':
|
||||
image = image.convert('RGBA')
|
||||
background.paste(image, mask=image.split()[-1] if image.mode == 'RGBA' else None)
|
||||
image = background
|
||||
|
||||
# Ensure minimum resolution for LinkedIn
|
||||
min_resolution = (1024, 1024)
|
||||
if image.size[0] < min_resolution[0] or image.size[1] < min_resolution[1]:
|
||||
# Resize to minimum resolution while maintaining aspect ratio
|
||||
ratio = max(min_resolution[0] / image.size[0], min_resolution[1] / image.size[1])
|
||||
new_size = (int(image.size[0] * ratio), int(image.size[1] * ratio))
|
||||
image = image.resize(new_size, Image.Resampling.LANCZOS)
|
||||
logger.info(f"Resized image to {new_size} for LinkedIn professional standards")
|
||||
|
||||
return image
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error ensuring professional appearance: {str(e)}")
|
||||
return image
|
||||
|
||||
async def get_editing_suggestions(
|
||||
self,
|
||||
image_data: bytes,
|
||||
content_context: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Get AI-powered editing suggestions for LinkedIn image.
|
||||
|
||||
Args:
|
||||
image_data: Image data in bytes
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
List of editing suggestions
|
||||
"""
|
||||
try:
|
||||
# Analyze image and provide contextual suggestions
|
||||
suggestions = []
|
||||
|
||||
# Professional enhancement suggestions
|
||||
suggestions.append({
|
||||
'id': 'professional_enhancement',
|
||||
'title': 'Professional Enhancement',
|
||||
'description': 'Apply subtle professional enhancements for business appeal',
|
||||
'prompt': 'Enhance this image with professional business aesthetics',
|
||||
'priority': 'high'
|
||||
})
|
||||
|
||||
# Mobile optimization suggestions
|
||||
suggestions.append({
|
||||
'id': 'mobile_optimization',
|
||||
'title': 'Mobile Optimization',
|
||||
'description': 'Optimize for LinkedIn mobile feed viewing',
|
||||
'prompt': 'Optimize this image for mobile LinkedIn viewing',
|
||||
'priority': 'medium'
|
||||
})
|
||||
|
||||
# Industry-specific suggestions
|
||||
industry = content_context.get('industry', 'business')
|
||||
suggestions.append({
|
||||
'id': 'industry_optimization',
|
||||
'title': f'{industry.title()} Industry Optimization',
|
||||
'description': f'Apply {industry} industry-specific visual enhancements',
|
||||
'prompt': f'Enhance this image with {industry} industry aesthetics',
|
||||
'priority': 'medium'
|
||||
})
|
||||
|
||||
# Engagement optimization suggestions
|
||||
suggestions.append({
|
||||
'id': 'engagement_optimization',
|
||||
'title': 'Engagement Optimization',
|
||||
'description': 'Make this image more engaging for LinkedIn audience',
|
||||
'prompt': 'Make this image more engaging and shareable for LinkedIn',
|
||||
'priority': 'low'
|
||||
})
|
||||
|
||||
return suggestions
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting editing suggestions: {str(e)}")
|
||||
return []
|
||||
@@ -0,0 +1,496 @@
|
||||
"""
|
||||
LinkedIn Image Generator Service
|
||||
|
||||
This service generates LinkedIn-optimized images using Google's Gemini API.
|
||||
It provides professional, business-appropriate imagery for LinkedIn content.
|
||||
"""
|
||||
|
||||
import os
|
||||
import asyncio
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from typing import Dict, Any, Optional, Tuple
|
||||
from pathlib import Path
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
|
||||
# Import existing infrastructure
|
||||
from ...onboarding.api_key_manager import APIKeyManager
|
||||
from ...llm_providers.main_image_generation import generate_image
|
||||
|
||||
# Set up logging
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LinkedInImageGenerator:
|
||||
"""
|
||||
Handles LinkedIn-optimized image generation using Gemini API.
|
||||
|
||||
This service integrates with the existing Gemini provider infrastructure
|
||||
and provides LinkedIn-specific image optimization, quality assurance,
|
||||
and professional business aesthetics.
|
||||
"""
|
||||
|
||||
def __init__(self, api_key_manager: Optional[APIKeyManager] = None):
|
||||
"""
|
||||
Initialize the LinkedIn Image Generator.
|
||||
|
||||
Args:
|
||||
api_key_manager: API key manager for Gemini authentication
|
||||
"""
|
||||
self.api_key_manager = api_key_manager or APIKeyManager()
|
||||
self.model = "gemini-2.5-flash-image-preview"
|
||||
self.default_aspect_ratio = "1:1" # LinkedIn post optimal ratio
|
||||
self.max_retries = 3
|
||||
|
||||
# LinkedIn-specific image requirements
|
||||
self.min_resolution = (1024, 1024)
|
||||
self.max_file_size_mb = 5
|
||||
self.supported_formats = ["PNG", "JPEG"]
|
||||
|
||||
logger.info("LinkedIn Image Generator initialized")
|
||||
|
||||
async def generate_image(
|
||||
self,
|
||||
prompt: str,
|
||||
content_context: Dict[str, Any],
|
||||
aspect_ratio: str = "1:1",
|
||||
style_preference: str = "professional"
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate LinkedIn-optimized image using Gemini API.
|
||||
|
||||
Args:
|
||||
prompt: User's image generation prompt
|
||||
content_context: LinkedIn content context (topic, industry, content_type)
|
||||
aspect_ratio: Image aspect ratio (1:1, 16:9, 4:3)
|
||||
style_preference: Style preference (professional, creative, industry-specific)
|
||||
|
||||
Returns:
|
||||
Dict containing generation result, image data, and metadata
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
logger.info(f"Starting LinkedIn image generation for topic: {content_context.get('topic', 'Unknown')}")
|
||||
|
||||
# Enhance prompt with LinkedIn-specific context
|
||||
enhanced_prompt = self._enhance_prompt_for_linkedin(
|
||||
prompt, content_context, style_preference, aspect_ratio
|
||||
)
|
||||
|
||||
# Generate image using existing Gemini infrastructure
|
||||
generation_result = await self._generate_with_gemini(enhanced_prompt, aspect_ratio)
|
||||
|
||||
if not generation_result.get('success'):
|
||||
return {
|
||||
'success': False,
|
||||
'error': generation_result.get('error', 'Image generation failed'),
|
||||
'generation_time': (datetime.now() - start_time).total_seconds()
|
||||
}
|
||||
|
||||
# Process and validate generated image
|
||||
processed_image = await self._process_generated_image(
|
||||
generation_result['image_data'],
|
||||
content_context,
|
||||
aspect_ratio
|
||||
)
|
||||
|
||||
generation_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': processed_image['image_data'],
|
||||
'image_url': processed_image.get('image_url'),
|
||||
'metadata': {
|
||||
'prompt_used': enhanced_prompt,
|
||||
'original_prompt': prompt,
|
||||
'style_preference': style_preference,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
'content_context': content_context,
|
||||
'generation_time': generation_time,
|
||||
'model_used': self.model,
|
||||
'image_format': processed_image['format'],
|
||||
'image_size': processed_image['size'],
|
||||
'resolution': processed_image['resolution']
|
||||
},
|
||||
'linkedin_optimization': {
|
||||
'mobile_optimized': True,
|
||||
'professional_aesthetic': True,
|
||||
'brand_compliant': True,
|
||||
'engagement_optimized': True
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in LinkedIn image generation: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image generation failed: {str(e)}",
|
||||
'generation_time': (datetime.now() - start_time).total_seconds() if 'start_time' in locals() else 0
|
||||
}
|
||||
|
||||
async def edit_image(
|
||||
self,
|
||||
base_image: bytes,
|
||||
edit_prompt: str,
|
||||
content_context: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Edit existing image using Gemini's conversational editing capabilities.
|
||||
|
||||
Args:
|
||||
base_image: Base image data in bytes
|
||||
edit_prompt: Description of desired edits
|
||||
content_context: LinkedIn content context for optimization
|
||||
|
||||
Returns:
|
||||
Dict containing edited image result and metadata
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
logger.info(f"Starting LinkedIn image editing with prompt: {edit_prompt[:100]}...")
|
||||
|
||||
# Enhance edit prompt for LinkedIn optimization
|
||||
enhanced_edit_prompt = self._enhance_edit_prompt_for_linkedin(
|
||||
edit_prompt, content_context
|
||||
)
|
||||
|
||||
# Use Gemini's image editing capabilities
|
||||
# Note: This will be implemented when Gemini's image editing is fully available
|
||||
# For now, we'll return a placeholder implementation
|
||||
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Image editing not yet implemented - coming in next Gemini API update',
|
||||
'generation_time': (datetime.now() - start_time).total_seconds()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in LinkedIn image editing: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image editing failed: {str(e)}",
|
||||
'generation_time': (datetime.now() - start_time).total_seconds() if 'start_time' in locals() else 0
|
||||
}
|
||||
|
||||
def _enhance_prompt_for_linkedin(
|
||||
self,
|
||||
prompt: str,
|
||||
content_context: Dict[str, Any],
|
||||
style_preference: str,
|
||||
aspect_ratio: str
|
||||
) -> str:
|
||||
"""
|
||||
Enhance user prompt with LinkedIn-specific context and best practices.
|
||||
|
||||
Args:
|
||||
prompt: Original user prompt
|
||||
content_context: LinkedIn content context
|
||||
style_preference: Preferred visual style
|
||||
aspect_ratio: Image aspect ratio
|
||||
|
||||
Returns:
|
||||
Enhanced prompt optimized for LinkedIn
|
||||
"""
|
||||
topic = content_context.get('topic', 'business')
|
||||
industry = content_context.get('industry', 'business')
|
||||
content_type = content_context.get('content_type', 'post')
|
||||
|
||||
# Base LinkedIn optimization
|
||||
linkedin_optimizations = [
|
||||
f"Create a professional LinkedIn {content_type} image for {topic}",
|
||||
f"Industry: {industry}",
|
||||
f"Professional business aesthetic suitable for LinkedIn audience",
|
||||
f"Mobile-optimized design for LinkedIn feed viewing",
|
||||
f"Aspect ratio: {aspect_ratio}",
|
||||
"High-quality, modern design with clear visual hierarchy",
|
||||
"Professional color scheme and typography",
|
||||
"Suitable for business and professional networking"
|
||||
]
|
||||
|
||||
# Style-specific enhancements
|
||||
if style_preference == "professional":
|
||||
style_enhancements = [
|
||||
"Corporate aesthetics with clean lines and geometric shapes",
|
||||
"Professional color palette (blues, grays, whites)",
|
||||
"Modern business environment or abstract business concepts",
|
||||
"Clean, minimalist design approach"
|
||||
]
|
||||
elif style_preference == "creative":
|
||||
style_enhancements = [
|
||||
"Eye-catching and engaging visual style",
|
||||
"Vibrant colors while maintaining professional appeal",
|
||||
"Creative composition that encourages social media engagement",
|
||||
"Modern design elements with business context"
|
||||
]
|
||||
else: # industry-specific
|
||||
style_enhancements = [
|
||||
f"Industry-specific visual elements for {industry}",
|
||||
"Professional yet creative approach",
|
||||
"Balanced design suitable for business audience",
|
||||
"Industry-relevant imagery and color schemes"
|
||||
]
|
||||
|
||||
# Combine all enhancements
|
||||
enhanced_prompt = f"{prompt}\n\n"
|
||||
enhanced_prompt += "\n".join(linkedin_optimizations)
|
||||
enhanced_prompt += "\n" + "\n".join(style_enhancements)
|
||||
|
||||
logger.info(f"Enhanced prompt for LinkedIn: {enhanced_prompt[:200]}...")
|
||||
return enhanced_prompt
|
||||
|
||||
def _enhance_edit_prompt_for_linkedin(
|
||||
self,
|
||||
edit_prompt: str,
|
||||
content_context: Dict[str, Any]
|
||||
) -> str:
|
||||
"""
|
||||
Enhance edit prompt for LinkedIn optimization.
|
||||
|
||||
Args:
|
||||
edit_prompt: Original edit prompt
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Enhanced edit prompt
|
||||
"""
|
||||
industry = content_context.get('industry', 'business')
|
||||
|
||||
linkedin_edit_enhancements = [
|
||||
f"Maintain professional business aesthetic for {industry} industry",
|
||||
"Ensure mobile-optimized composition for LinkedIn feed",
|
||||
"Keep professional color scheme and typography",
|
||||
"Maintain brand consistency and visual hierarchy"
|
||||
]
|
||||
|
||||
enhanced_edit_prompt = f"{edit_prompt}\n\n"
|
||||
enhanced_edit_prompt += "\n".join(linkedin_edit_enhancements)
|
||||
|
||||
return enhanced_edit_prompt
|
||||
|
||||
async def _generate_with_gemini(self, prompt: str, aspect_ratio: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Generate image using unified image generation infrastructure.
|
||||
|
||||
Args:
|
||||
prompt: Enhanced prompt for image generation
|
||||
aspect_ratio: Desired aspect ratio
|
||||
|
||||
Returns:
|
||||
Generation result from image generation provider
|
||||
"""
|
||||
try:
|
||||
# Map aspect ratio to dimensions (LinkedIn-optimized)
|
||||
aspect_map = {
|
||||
"1:1": (1024, 1024),
|
||||
"16:9": (1920, 1080),
|
||||
"4:3": (1366, 1024),
|
||||
"9:16": (1080, 1920), # Portrait for stories
|
||||
}
|
||||
width, height = aspect_map.get(aspect_ratio, (1024, 1024))
|
||||
|
||||
# Use unified image generation system (defaults to provider based on GPT_PROVIDER)
|
||||
result = generate_image(
|
||||
prompt=prompt,
|
||||
options={
|
||||
"provider": "gemini", # LinkedIn uses Gemini by default
|
||||
"model": self.model if hasattr(self, 'model') else None,
|
||||
"width": width,
|
||||
"height": height,
|
||||
}
|
||||
)
|
||||
|
||||
if result and result.image_bytes:
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': result.image_bytes,
|
||||
'image_path': None, # No file path, using bytes directly
|
||||
'width': result.width,
|
||||
'height': result.height,
|
||||
'provider': result.provider,
|
||||
'model': result.model,
|
||||
}
|
||||
else:
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Image generation returned no result'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in image generation: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image generation failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def _process_generated_image(
|
||||
self,
|
||||
image_data: bytes,
|
||||
content_context: Dict[str, Any],
|
||||
aspect_ratio: str
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Process and validate generated image for LinkedIn use.
|
||||
|
||||
Args:
|
||||
image_data: Raw image data
|
||||
content_context: LinkedIn content context
|
||||
aspect_ratio: Image aspect ratio
|
||||
|
||||
Returns:
|
||||
Processed image information
|
||||
"""
|
||||
try:
|
||||
# Open image for processing
|
||||
image = Image.open(BytesIO(image_data))
|
||||
|
||||
# Get image information
|
||||
width, height = image.size
|
||||
format_name = image.format or "PNG"
|
||||
|
||||
# Validate resolution
|
||||
if width < self.min_resolution[0] or height < self.min_resolution[1]:
|
||||
logger.warning(f"Generated image resolution {width}x{height} below minimum {self.min_resolution}")
|
||||
|
||||
# Validate file size
|
||||
image_size_mb = len(image_data) / (1024 * 1024)
|
||||
if image_size_mb > self.max_file_size_mb:
|
||||
logger.warning(f"Generated image size {image_size_mb:.2f}MB exceeds maximum {self.max_file_size_mb}MB")
|
||||
|
||||
# LinkedIn-specific optimizations
|
||||
optimized_image = self._optimize_for_linkedin(image, content_context)
|
||||
|
||||
# Convert back to bytes
|
||||
output_buffer = BytesIO()
|
||||
optimized_image.save(output_buffer, format=format_name, optimize=True)
|
||||
optimized_data = output_buffer.getvalue()
|
||||
|
||||
return {
|
||||
'image_data': optimized_data,
|
||||
'format': format_name,
|
||||
'size': len(optimized_data),
|
||||
'resolution': (width, height),
|
||||
'aspect_ratio': f"{width}:{height}"
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing generated image: {str(e)}")
|
||||
# Return original image data if processing fails
|
||||
return {
|
||||
'image_data': image_data,
|
||||
'format': 'PNG',
|
||||
'size': len(image_data),
|
||||
'resolution': (1024, 1024),
|
||||
'aspect_ratio': aspect_ratio
|
||||
}
|
||||
|
||||
def _optimize_for_linkedin(self, image: Image.Image, content_context: Dict[str, Any]) -> Image.Image:
|
||||
"""
|
||||
Optimize image specifically for LinkedIn display.
|
||||
|
||||
Args:
|
||||
image: PIL Image object
|
||||
content_context: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
Optimized image
|
||||
"""
|
||||
try:
|
||||
# Ensure minimum resolution
|
||||
width, height = image.size
|
||||
if width < self.min_resolution[0] or height < self.min_resolution[1]:
|
||||
# Resize to minimum resolution while maintaining aspect ratio
|
||||
ratio = max(self.min_resolution[0] / width, self.min_resolution[1] / height)
|
||||
new_width = int(width * ratio)
|
||||
new_height = int(height * ratio)
|
||||
image = image.resize((new_width, new_height), Image.Resampling.LANCZOS)
|
||||
logger.info(f"Resized image to {new_width}x{new_height} for LinkedIn optimization")
|
||||
|
||||
# Convert to RGB if necessary (for JPEG compatibility)
|
||||
if image.mode in ('RGBA', 'LA', 'P'):
|
||||
# Create white background for transparent images
|
||||
background = Image.new('RGB', image.size, (255, 255, 255))
|
||||
if image.mode == 'P':
|
||||
image = image.convert('RGBA')
|
||||
background.paste(image, mask=image.split()[-1] if image.mode == 'RGBA' else None)
|
||||
image = background
|
||||
|
||||
return image
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error optimizing image for LinkedIn: {str(e)}")
|
||||
return image # Return original if optimization fails
|
||||
|
||||
async def validate_image_for_linkedin(self, image_data: bytes) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate image for LinkedIn compliance and quality standards.
|
||||
|
||||
Args:
|
||||
image_data: Image data to validate
|
||||
|
||||
Returns:
|
||||
Validation results
|
||||
"""
|
||||
try:
|
||||
image = Image.open(BytesIO(image_data))
|
||||
width, height = image.size
|
||||
|
||||
validation_results = {
|
||||
'resolution_ok': width >= self.min_resolution[0] and height >= self.min_resolution[1],
|
||||
'aspect_ratio_suitable': self._is_aspect_ratio_suitable(width, height),
|
||||
'file_size_ok': len(image_data) <= self.max_file_size_mb * 1024 * 1024,
|
||||
'format_supported': image.format in self.supported_formats,
|
||||
'professional_aesthetic': True, # Placeholder for future AI-based validation
|
||||
'overall_score': 0
|
||||
}
|
||||
|
||||
# Calculate overall score
|
||||
score = 0
|
||||
if validation_results['resolution_ok']: score += 25
|
||||
if validation_results['aspect_ratio_suitable']: score += 25
|
||||
if validation_results['file_size_ok']: score += 20
|
||||
if validation_results['format_supported']: score += 20
|
||||
if validation_results['professional_aesthetic']: score += 10
|
||||
|
||||
validation_results['overall_score'] = score
|
||||
|
||||
return validation_results
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error validating image: {str(e)}")
|
||||
return {
|
||||
'resolution_ok': False,
|
||||
'aspect_ratio_suitable': False,
|
||||
'file_size_ok': False,
|
||||
'format_supported': False,
|
||||
'professional_aesthetic': False,
|
||||
'overall_score': 0,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
def _is_aspect_ratio_suitable(self, width: int, height: int) -> bool:
|
||||
"""
|
||||
Check if image aspect ratio is suitable for LinkedIn.
|
||||
|
||||
Args:
|
||||
width: Image width
|
||||
height: Image height
|
||||
|
||||
Returns:
|
||||
True if aspect ratio is suitable for LinkedIn
|
||||
"""
|
||||
ratio = width / height
|
||||
|
||||
# LinkedIn-optimized aspect ratios
|
||||
suitable_ratios = [
|
||||
(0.9, 1.1), # 1:1 (square)
|
||||
(1.6, 1.8), # 16:9 (landscape)
|
||||
(0.7, 0.8), # 4:3 (portrait)
|
||||
(1.2, 1.4), # 5:4 (landscape)
|
||||
]
|
||||
|
||||
for min_ratio, max_ratio in suitable_ratios:
|
||||
if min_ratio <= ratio <= max_ratio:
|
||||
return True
|
||||
|
||||
return False
|
||||
@@ -0,0 +1,536 @@
|
||||
"""
|
||||
LinkedIn Image Storage Service
|
||||
|
||||
This service handles image storage, retrieval, and management for LinkedIn image generation.
|
||||
It provides secure storage, efficient retrieval, and metadata management for generated images.
|
||||
"""
|
||||
|
||||
import os
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Dict, Any, Optional, List, Tuple
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from PIL import Image
|
||||
from io import BytesIO
|
||||
from loguru import logger
|
||||
|
||||
# Import existing infrastructure
|
||||
from ...onboarding.api_key_manager import APIKeyManager
|
||||
|
||||
|
||||
class LinkedInImageStorage:
|
||||
"""
|
||||
Handles storage and management of LinkedIn generated images.
|
||||
|
||||
This service provides secure storage, efficient retrieval, metadata management,
|
||||
and cleanup functionality for LinkedIn image generation.
|
||||
"""
|
||||
|
||||
def __init__(self, storage_path: Optional[str] = None, api_key_manager: Optional[APIKeyManager] = None):
|
||||
"""
|
||||
Initialize the LinkedIn Image Storage service.
|
||||
|
||||
Args:
|
||||
storage_path: Base path for image storage
|
||||
api_key_manager: API key manager for authentication
|
||||
"""
|
||||
self.api_key_manager = api_key_manager or APIKeyManager()
|
||||
|
||||
# Set up storage paths
|
||||
if storage_path:
|
||||
self.base_storage_path = Path(storage_path)
|
||||
else:
|
||||
# Default to project-relative path
|
||||
self.base_storage_path = Path(__file__).parent.parent.parent.parent / "linkedin_images"
|
||||
|
||||
# Create storage directories
|
||||
self.images_path = self.base_storage_path / "images"
|
||||
self.metadata_path = self.base_storage_path / "metadata"
|
||||
self.temp_path = self.base_storage_path / "temp"
|
||||
|
||||
# Ensure directories exist
|
||||
self._create_storage_directories()
|
||||
|
||||
# Storage configuration
|
||||
self.max_storage_size_gb = 10 # Maximum storage size in GB
|
||||
self.image_retention_days = 30 # Days to keep images
|
||||
self.max_image_size_mb = 10 # Maximum individual image size in MB
|
||||
|
||||
logger.info(f"LinkedIn Image Storage initialized at {self.base_storage_path}")
|
||||
|
||||
def _create_storage_directories(self):
|
||||
"""Create necessary storage directories."""
|
||||
try:
|
||||
self.images_path.mkdir(parents=True, exist_ok=True)
|
||||
self.metadata_path.mkdir(parents=True, exist_ok=True)
|
||||
self.temp_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create subdirectories for organization
|
||||
(self.images_path / "posts").mkdir(exist_ok=True)
|
||||
(self.images_path / "articles").mkdir(exist_ok=True)
|
||||
(self.images_path / "carousels").mkdir(exist_ok=True)
|
||||
(self.images_path / "video_scripts").mkdir(exist_ok=True)
|
||||
|
||||
logger.info("Storage directories created successfully")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error creating storage directories: {str(e)}")
|
||||
raise
|
||||
|
||||
async def store_image(
|
||||
self,
|
||||
image_data: bytes,
|
||||
metadata: Dict[str, Any],
|
||||
content_type: str = "post"
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Store generated image with metadata.
|
||||
|
||||
Args:
|
||||
image_data: Image data in bytes
|
||||
image_metadata: Image metadata and context
|
||||
content_type: Type of LinkedIn content (post, article, carousel, video_script)
|
||||
|
||||
Returns:
|
||||
Dict containing storage result and image ID
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
|
||||
# Generate unique image ID
|
||||
image_id = self._generate_image_id(image_data, metadata)
|
||||
|
||||
# Validate image data
|
||||
validation_result = await self._validate_image_for_storage(image_data)
|
||||
if not validation_result['valid']:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image validation failed: {validation_result['error']}"
|
||||
}
|
||||
|
||||
# Determine storage path based on content type
|
||||
storage_path = self._get_storage_path(content_type, image_id)
|
||||
|
||||
# Store image file
|
||||
image_stored = await self._store_image_file(image_data, storage_path)
|
||||
if not image_stored:
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Failed to store image file'
|
||||
}
|
||||
|
||||
# Store metadata
|
||||
metadata_stored = await self._store_metadata(image_id, metadata, storage_path)
|
||||
if not metadata_stored:
|
||||
# Clean up image file if metadata storage fails
|
||||
await self._cleanup_failed_storage(storage_path)
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Failed to store image metadata'
|
||||
}
|
||||
|
||||
# Update storage statistics
|
||||
await self._update_storage_stats()
|
||||
|
||||
storage_time = (datetime.now() - start_time).total_seconds()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_id': image_id,
|
||||
'storage_path': str(storage_path),
|
||||
'metadata': {
|
||||
'stored_at': datetime.now().isoformat(),
|
||||
'storage_time': storage_time,
|
||||
'file_size': len(image_data),
|
||||
'content_type': content_type
|
||||
}
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing LinkedIn image: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image storage failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def retrieve_image(self, image_id: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Retrieve stored image by ID.
|
||||
|
||||
Args:
|
||||
image_id: Unique image identifier
|
||||
|
||||
Returns:
|
||||
Dict containing image data and metadata
|
||||
"""
|
||||
try:
|
||||
# Find image file
|
||||
image_path = await self._find_image_by_id(image_id)
|
||||
if not image_path:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f'Image not found: {image_id}'
|
||||
}
|
||||
|
||||
# Load metadata
|
||||
metadata = await self._load_metadata(image_id)
|
||||
if not metadata:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f'Metadata not found for image: {image_id}'
|
||||
}
|
||||
|
||||
# Read image data
|
||||
with open(image_path, 'rb') as f:
|
||||
image_data = f.read()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'image_data': image_data,
|
||||
'metadata': metadata,
|
||||
'image_path': str(image_path)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error retrieving LinkedIn image {image_id}: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image retrieval failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def delete_image(self, image_id: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Delete stored image and metadata.
|
||||
|
||||
Args:
|
||||
image_id: Unique image identifier
|
||||
|
||||
Returns:
|
||||
Dict containing deletion result
|
||||
"""
|
||||
try:
|
||||
# Find image file
|
||||
image_path = await self._find_image_by_id(image_id)
|
||||
if not image_path:
|
||||
return {
|
||||
'success': False,
|
||||
'error': f'Image not found: {image_id}'
|
||||
}
|
||||
|
||||
# Delete image file
|
||||
if image_path.exists():
|
||||
image_path.unlink()
|
||||
logger.info(f"Deleted image file: {image_path}")
|
||||
|
||||
# Delete metadata
|
||||
metadata_path = self.metadata_path / f"{image_id}.json"
|
||||
if metadata_path.exists():
|
||||
metadata_path.unlink()
|
||||
logger.info(f"Deleted metadata file: {metadata_path}")
|
||||
|
||||
# Update storage statistics
|
||||
await self._update_storage_stats()
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'message': f'Image {image_id} deleted successfully'
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error deleting LinkedIn image {image_id}: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image deletion failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def list_images(
|
||||
self,
|
||||
content_type: Optional[str] = None,
|
||||
limit: int = 50,
|
||||
offset: int = 0
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
List stored images with optional filtering.
|
||||
|
||||
Args:
|
||||
content_type: Filter by content type
|
||||
limit: Maximum number of images to return
|
||||
offset: Number of images to skip
|
||||
|
||||
Returns:
|
||||
Dict containing list of images and metadata
|
||||
"""
|
||||
try:
|
||||
images = []
|
||||
|
||||
# Scan metadata directory
|
||||
metadata_files = list(self.metadata_path.glob("*.json"))
|
||||
|
||||
for metadata_file in metadata_files[offset:offset + limit]:
|
||||
try:
|
||||
with open(metadata_file, 'r') as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Apply content type filter
|
||||
if content_type and metadata.get('content_type') != content_type:
|
||||
continue
|
||||
|
||||
# Check if image file still exists
|
||||
image_id = metadata_file.stem
|
||||
image_path = await self._find_image_by_id(image_id)
|
||||
|
||||
if image_path and image_path.exists():
|
||||
# Add file size and last modified info
|
||||
stat = image_path.stat()
|
||||
metadata['file_size'] = stat.st_size
|
||||
metadata['last_modified'] = datetime.fromtimestamp(stat.st_mtime).isoformat()
|
||||
|
||||
images.append(metadata)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error reading metadata file {metadata_file}: {str(e)}")
|
||||
continue
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'images': images,
|
||||
'total_count': len(images),
|
||||
'limit': limit,
|
||||
'offset': offset
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing LinkedIn images: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Image listing failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def cleanup_old_images(self, days_old: Optional[int] = None) -> Dict[str, Any]:
|
||||
"""
|
||||
Clean up old images based on retention policy.
|
||||
|
||||
Args:
|
||||
days_old: Minimum age in days for cleanup (defaults to retention policy)
|
||||
|
||||
Returns:
|
||||
Dict containing cleanup results
|
||||
"""
|
||||
try:
|
||||
if days_old is None:
|
||||
days_old = self.image_retention_days
|
||||
|
||||
cutoff_date = datetime.now() - timedelta(days=days_old)
|
||||
deleted_count = 0
|
||||
errors = []
|
||||
|
||||
# Scan metadata directory
|
||||
metadata_files = list(self.metadata_path.glob("*.json"))
|
||||
|
||||
for metadata_file in metadata_files:
|
||||
try:
|
||||
with open(metadata_file, 'r') as f:
|
||||
metadata = json.load(f)
|
||||
|
||||
# Check creation date
|
||||
created_at = metadata.get('stored_at')
|
||||
if created_at:
|
||||
created_date = datetime.fromisoformat(created_at)
|
||||
if created_date < cutoff_date:
|
||||
# Delete old image
|
||||
image_id = metadata_file.stem
|
||||
delete_result = await self.delete_image(image_id)
|
||||
|
||||
if delete_result['success']:
|
||||
deleted_count += 1
|
||||
else:
|
||||
errors.append(f"Failed to delete {image_id}: {delete_result['error']}")
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f"Error processing metadata file {metadata_file}: {str(e)}")
|
||||
continue
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'deleted_count': deleted_count,
|
||||
'errors': errors,
|
||||
'cutoff_date': cutoff_date.isoformat()
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up old LinkedIn images: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Cleanup failed: {str(e)}"
|
||||
}
|
||||
|
||||
async def get_storage_stats(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get storage statistics and usage information.
|
||||
|
||||
Returns:
|
||||
Dict containing storage statistics
|
||||
"""
|
||||
try:
|
||||
total_size = 0
|
||||
total_files = 0
|
||||
content_type_counts = {}
|
||||
|
||||
# Calculate storage usage
|
||||
for content_type_dir in self.images_path.iterdir():
|
||||
if content_type_dir.is_dir():
|
||||
content_type = content_type_dir.name
|
||||
content_type_counts[content_type] = 0
|
||||
|
||||
for image_file in content_type_dir.glob("*"):
|
||||
if image_file.is_file():
|
||||
total_size += image_file.stat().st_size
|
||||
total_files += 1
|
||||
content_type_counts[content_type] += 1
|
||||
|
||||
# Check storage limits
|
||||
total_size_gb = total_size / (1024 ** 3)
|
||||
storage_limit_exceeded = total_size_gb > self.max_storage_size_gb
|
||||
|
||||
return {
|
||||
'success': True,
|
||||
'total_size_bytes': total_size,
|
||||
'total_size_gb': round(total_size_gb, 2),
|
||||
'total_files': total_files,
|
||||
'content_type_counts': content_type_counts,
|
||||
'storage_limit_gb': self.max_storage_size_gb,
|
||||
'storage_limit_exceeded': storage_limit_exceeded,
|
||||
'retention_days': self.image_retention_days
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting storage stats: {str(e)}")
|
||||
return {
|
||||
'success': False,
|
||||
'error': f"Failed to get storage stats: {str(e)}"
|
||||
}
|
||||
|
||||
def _generate_image_id(self, image_data: bytes, metadata: Dict[str, Any]) -> str:
|
||||
"""Generate unique image ID based on content and metadata."""
|
||||
# Create hash from image data and key metadata
|
||||
hash_input = f"{image_data[:1000]}{metadata.get('topic', '')}{metadata.get('industry', '')}{datetime.now().isoformat()}"
|
||||
return hashlib.sha256(hash_input.encode()).hexdigest()[:16]
|
||||
|
||||
async def _validate_image_for_storage(self, image_data: bytes) -> Dict[str, Any]:
|
||||
"""Validate image data before storage."""
|
||||
try:
|
||||
# Check file size
|
||||
if len(image_data) > self.max_image_size_mb * 1024 * 1024:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Image size {len(image_data) / (1024*1024):.2f}MB exceeds maximum {self.max_image_size_mb}MB'
|
||||
}
|
||||
|
||||
# Validate image format
|
||||
try:
|
||||
image = Image.open(BytesIO(image_data))
|
||||
if image.format not in ['PNG', 'JPEG', 'JPG']:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Unsupported image format: {image.format}'
|
||||
}
|
||||
except Exception as e:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Invalid image data: {str(e)}'
|
||||
}
|
||||
|
||||
return {'valid': True}
|
||||
|
||||
except Exception as e:
|
||||
return {
|
||||
'valid': False,
|
||||
'error': f'Validation error: {str(e)}'
|
||||
}
|
||||
|
||||
def _get_storage_path(self, content_type: str, image_id: str) -> Path:
|
||||
"""Get storage path for image based on content type."""
|
||||
# Map content types to directory names
|
||||
content_type_map = {
|
||||
'post': 'posts',
|
||||
'article': 'articles',
|
||||
'carousel': 'carousels',
|
||||
'video_script': 'video_scripts'
|
||||
}
|
||||
|
||||
directory = content_type_map.get(content_type, 'posts')
|
||||
return self.images_path / directory / f"{image_id}.png"
|
||||
|
||||
async def _store_image_file(self, image_data: bytes, storage_path: Path) -> bool:
|
||||
"""Store image file to disk."""
|
||||
try:
|
||||
# Ensure directory exists
|
||||
storage_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Write image data
|
||||
with open(storage_path, 'wb') as f:
|
||||
f.write(image_data)
|
||||
|
||||
logger.info(f"Stored image file: {storage_path}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing image file: {str(e)}")
|
||||
return False
|
||||
|
||||
async def _store_metadata(self, image_id: str, metadata: Dict[str, Any], storage_path: Path) -> bool:
|
||||
"""Store image metadata to JSON file."""
|
||||
try:
|
||||
# Add storage metadata
|
||||
metadata['image_id'] = image_id
|
||||
metadata['storage_path'] = str(storage_path)
|
||||
metadata['stored_at'] = datetime.now().isoformat()
|
||||
|
||||
# Write metadata file
|
||||
metadata_path = self.metadata_path / f"{image_id}.json"
|
||||
with open(metadata_path, 'w') as f:
|
||||
json.dump(metadata, f, indent=2, default=str)
|
||||
|
||||
logger.info(f"Stored metadata: {metadata_path}")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error storing metadata: {str(e)}")
|
||||
return False
|
||||
|
||||
async def _find_image_by_id(self, image_id: str) -> Optional[Path]:
|
||||
"""Find image file by ID across all content type directories."""
|
||||
for content_dir in self.images_path.iterdir():
|
||||
if content_dir.is_dir():
|
||||
image_path = content_dir / f"{image_id}.png"
|
||||
if image_path.exists():
|
||||
return image_path
|
||||
|
||||
return None
|
||||
|
||||
async def _load_metadata(self, image_id: str) -> Optional[Dict[str, Any]]:
|
||||
"""Load metadata for image ID."""
|
||||
try:
|
||||
metadata_path = self.metadata_path / f"{image_id}.json"
|
||||
if metadata_path.exists():
|
||||
with open(metadata_path, 'r') as f:
|
||||
return json.load(f)
|
||||
except Exception as e:
|
||||
logger.error(f"Error loading metadata for {image_id}: {str(e)}")
|
||||
|
||||
return None
|
||||
|
||||
async def _cleanup_failed_storage(self, storage_path: Path):
|
||||
"""Clean up files if storage operation fails."""
|
||||
try:
|
||||
if storage_path.exists():
|
||||
storage_path.unlink()
|
||||
logger.info(f"Cleaned up failed storage: {storage_path}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up failed storage: {str(e)}")
|
||||
|
||||
async def _update_storage_stats(self):
|
||||
"""Update storage statistics (placeholder for future implementation)."""
|
||||
# This could be implemented to track storage usage over time
|
||||
pass
|
||||
18
backend/services/linkedin/image_prompts/__init__.py
Normal file
18
backend/services/linkedin/image_prompts/__init__.py
Normal file
@@ -0,0 +1,18 @@
|
||||
"""
|
||||
LinkedIn Image Prompts Package
|
||||
|
||||
This package provides AI-powered image prompt generation for LinkedIn content
|
||||
using Google's Gemini API. It creates three distinct prompt styles optimized
|
||||
for professional business image generation.
|
||||
"""
|
||||
|
||||
from .linkedin_prompt_generator import LinkedInPromptGenerator
|
||||
|
||||
__all__ = [
|
||||
'LinkedInPromptGenerator'
|
||||
]
|
||||
|
||||
# Version information
|
||||
__version__ = "1.0.0"
|
||||
__author__ = "Alwrity Team"
|
||||
__description__ = "LinkedIn AI Image Prompt Generation Services"
|
||||
@@ -0,0 +1,812 @@
|
||||
"""
|
||||
LinkedIn Image Prompt Generator Service
|
||||
|
||||
This service generates AI-optimized image prompts for LinkedIn content using Gemini's
|
||||
capabilities. It creates three distinct prompt styles (professional, creative, industry-specific)
|
||||
following best practices for image generation.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
from typing import Dict, Any, List, Optional
|
||||
from datetime import datetime
|
||||
from loguru import logger
|
||||
|
||||
# Import existing infrastructure
|
||||
from ...onboarding.api_key_manager import APIKeyManager
|
||||
from ...llm_providers.gemini_provider import gemini_text_response
|
||||
|
||||
|
||||
class LinkedInPromptGenerator:
|
||||
"""
|
||||
Generates AI-optimized image prompts for LinkedIn content.
|
||||
|
||||
This service creates three distinct prompt styles following Gemini API best practices:
|
||||
1. Professional Style - Corporate aesthetics, clean lines, business colors
|
||||
2. Creative Style - Engaging visuals, vibrant colors, social media appeal
|
||||
3. Industry-Specific Style - Tailored to specific business sectors
|
||||
"""
|
||||
|
||||
def __init__(self, api_key_manager: Optional[APIKeyManager] = None):
|
||||
"""
|
||||
Initialize the LinkedIn Prompt Generator.
|
||||
|
||||
Args:
|
||||
api_key_manager: API key manager for Gemini authentication
|
||||
"""
|
||||
self.api_key_manager = api_key_manager or APIKeyManager()
|
||||
self.model = "gemini-2.0-flash-exp"
|
||||
|
||||
# Prompt generation configuration
|
||||
self.max_prompt_length = 500
|
||||
self.style_variations = {
|
||||
'professional': 'corporate, clean, business, professional',
|
||||
'creative': 'engaging, vibrant, creative, social media',
|
||||
'industry_specific': 'industry-tailored, specialized, contextual'
|
||||
}
|
||||
|
||||
logger.info("LinkedIn Prompt Generator initialized")
|
||||
|
||||
async def generate_three_prompts(
|
||||
self,
|
||||
linkedin_content: Dict[str, Any],
|
||||
aspect_ratio: str = "1:1"
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Generate three AI-optimized image prompts for LinkedIn content.
|
||||
|
||||
Args:
|
||||
linkedin_content: LinkedIn content context (topic, industry, content_type, content)
|
||||
aspect_ratio: Desired image aspect ratio
|
||||
|
||||
Returns:
|
||||
List of three prompt objects with style, prompt, and description
|
||||
"""
|
||||
try:
|
||||
start_time = datetime.now()
|
||||
logger.info(f"Generating image prompts for LinkedIn content: {linkedin_content.get('topic', 'Unknown')}")
|
||||
|
||||
# Generate prompts using Gemini
|
||||
prompts = await self._generate_prompts_with_gemini(linkedin_content, aspect_ratio)
|
||||
|
||||
if not prompts or len(prompts) < 3:
|
||||
logger.warning("Gemini prompt generation failed, using fallback prompts")
|
||||
prompts = self._get_fallback_prompts(linkedin_content, aspect_ratio)
|
||||
|
||||
# Ensure exactly 3 prompts
|
||||
prompts = prompts[:3]
|
||||
|
||||
# Validate and enhance prompts
|
||||
enhanced_prompts = []
|
||||
for i, prompt in enumerate(prompts):
|
||||
enhanced_prompt = self._enhance_prompt_for_linkedin(
|
||||
prompt, linkedin_content, aspect_ratio, i
|
||||
)
|
||||
enhanced_prompts.append(enhanced_prompt)
|
||||
|
||||
generation_time = (datetime.now() - start_time).total_seconds()
|
||||
logger.info(f"Generated {len(enhanced_prompts)} image prompts in {generation_time:.2f}s")
|
||||
|
||||
return enhanced_prompts
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating LinkedIn image prompts: {str(e)}")
|
||||
return self._get_fallback_prompts(linkedin_content, aspect_ratio)
|
||||
|
||||
async def _generate_prompts_with_gemini(
|
||||
self,
|
||||
linkedin_content: Dict[str, Any],
|
||||
aspect_ratio: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Generate image prompts using Gemini AI.
|
||||
|
||||
Args:
|
||||
linkedin_content: LinkedIn content context
|
||||
aspect_ratio: Image aspect ratio
|
||||
|
||||
Returns:
|
||||
List of generated prompts
|
||||
"""
|
||||
try:
|
||||
# Build the prompt for Gemini
|
||||
gemini_prompt = self._build_gemini_prompt(linkedin_content, aspect_ratio)
|
||||
|
||||
# Generate response using Gemini
|
||||
response = gemini_text_response(
|
||||
prompt=gemini_prompt,
|
||||
temperature=0.7,
|
||||
top_p=0.8,
|
||||
n=1,
|
||||
max_tokens=1000,
|
||||
system_prompt="You are an expert AI image prompt engineer specializing in LinkedIn content optimization."
|
||||
)
|
||||
|
||||
if not response:
|
||||
logger.warning("No response from Gemini prompt generation")
|
||||
return []
|
||||
|
||||
# Parse Gemini response into structured prompts
|
||||
prompts = self._parse_gemini_response(response, linkedin_content)
|
||||
|
||||
return prompts
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in Gemini prompt generation: {str(e)}")
|
||||
return []
|
||||
|
||||
def _build_gemini_prompt(
|
||||
self,
|
||||
linkedin_content: Dict[str, Any],
|
||||
aspect_ratio: str
|
||||
) -> str:
|
||||
"""
|
||||
Build comprehensive prompt for Gemini to generate image prompts.
|
||||
|
||||
Args:
|
||||
linkedin_content: LinkedIn content context
|
||||
aspect_ratio: Image aspect ratio
|
||||
|
||||
Returns:
|
||||
Formatted prompt for Gemini
|
||||
"""
|
||||
topic = linkedin_content.get('topic', 'business')
|
||||
industry = linkedin_content.get('industry', 'business')
|
||||
content_type = linkedin_content.get('content_type', 'post')
|
||||
content = linkedin_content.get('content', '')
|
||||
|
||||
# Extract key content elements for better context
|
||||
content_analysis = self._analyze_content_for_image_context(content, content_type)
|
||||
|
||||
prompt = f"""
|
||||
As an expert AI image prompt engineer specializing in LinkedIn content, generate 3 distinct image generation prompts for the following LinkedIn {content_type}:
|
||||
|
||||
TOPIC: {topic}
|
||||
INDUSTRY: {industry}
|
||||
CONTENT TYPE: {content_type}
|
||||
ASPECT RATIO: {aspect_ratio}
|
||||
|
||||
GENERATED CONTENT:
|
||||
{content}
|
||||
|
||||
CONTENT ANALYSIS:
|
||||
- Key Themes: {content_analysis['key_themes']}
|
||||
- Tone: {content_analysis['tone']}
|
||||
- Visual Elements: {content_analysis['visual_elements']}
|
||||
- Target Audience: {content_analysis['target_audience']}
|
||||
- Content Purpose: {content_analysis['content_purpose']}
|
||||
|
||||
Generate exactly 3 image prompts that directly relate to and enhance the generated content above:
|
||||
|
||||
1. PROFESSIONAL STYLE:
|
||||
- Corporate aesthetics with clean lines and geometric shapes
|
||||
- Professional color palette (blues, grays, whites)
|
||||
- Modern business environment or abstract business concepts
|
||||
- Clean, minimalist design approach
|
||||
- Suitable for B2B and professional networking
|
||||
- MUST directly relate to the specific content themes and industry context above
|
||||
|
||||
2. CREATIVE STYLE:
|
||||
- Eye-catching and engaging visual style
|
||||
- Vibrant colors while maintaining professional appeal
|
||||
- Creative composition that encourages social media engagement
|
||||
- Modern design elements with business context
|
||||
- Optimized for LinkedIn feed visibility
|
||||
- MUST visually represent the key themes and messages from the content above
|
||||
|
||||
3. INDUSTRY-SPECIFIC STYLE:
|
||||
- Tailored specifically to the {industry} industry
|
||||
- Industry-relevant imagery, colors, and visual elements
|
||||
- Professional yet creative approach
|
||||
- Balanced design suitable for business audience
|
||||
- Industry-specific symbolism and aesthetics
|
||||
- MUST incorporate visual elements that directly support the content's industry context
|
||||
|
||||
Each prompt should:
|
||||
- Be specific and detailed (50-100 words)
|
||||
- Include visual composition guidance
|
||||
- Specify color schemes and lighting
|
||||
- Mention LinkedIn optimization
|
||||
- Follow image generation best practices
|
||||
- Be suitable for the {aspect_ratio} aspect ratio
|
||||
- DIRECTLY reference and visualize the key themes, messages, and context from the generated content above
|
||||
- Create images that would naturally accompany and enhance the specific LinkedIn content provided
|
||||
|
||||
Return the prompts in this exact JSON format:
|
||||
[
|
||||
{{
|
||||
"style": "Professional",
|
||||
"prompt": "Detailed prompt description that directly relates to the content above...",
|
||||
"description": "Brief description of the visual style and how it relates to the content"
|
||||
}},
|
||||
{{
|
||||
"style": "Creative",
|
||||
"prompt": "Detailed prompt description that directly relates to the content above...",
|
||||
"description": "Brief description of the visual style and how it relates to the content"
|
||||
}},
|
||||
{{
|
||||
"style": "Industry-Specific",
|
||||
"prompt": "Detailed prompt description that directly relates to the content above...",
|
||||
"description": "Brief description of the visual style and how it relates to the content"
|
||||
}}
|
||||
]
|
||||
|
||||
Focus on creating prompts that will generate high-quality, LinkedIn-optimized images that directly enhance and complement the specific content provided above.
|
||||
"""
|
||||
|
||||
return prompt.strip()
|
||||
|
||||
def _analyze_content_for_image_context(self, content: str, content_type: str) -> Dict[str, Any]:
|
||||
"""
|
||||
Analyze the generated LinkedIn content to extract key elements for image context.
|
||||
|
||||
Args:
|
||||
content: The generated LinkedIn content
|
||||
content_type: Type of content (post, article, carousel, etc.)
|
||||
|
||||
Returns:
|
||||
Dictionary containing content analysis for image generation
|
||||
"""
|
||||
try:
|
||||
# Basic content analysis
|
||||
content_lower = content.lower()
|
||||
word_count = len(content.split())
|
||||
|
||||
# Extract key themes based on content analysis
|
||||
key_themes = self._extract_key_themes(content_lower, content_type)
|
||||
|
||||
# Determine tone based on content analysis
|
||||
tone = self._determine_content_tone(content_lower)
|
||||
|
||||
# Identify visual elements that could be represented
|
||||
visual_elements = self._identify_visual_elements(content_lower, content_type)
|
||||
|
||||
# Determine target audience
|
||||
target_audience = self._determine_target_audience(content_lower, content_type)
|
||||
|
||||
# Determine content purpose
|
||||
content_purpose = self._determine_content_purpose(content_lower, content_type)
|
||||
|
||||
return {
|
||||
'key_themes': ', '.join(key_themes),
|
||||
'tone': tone,
|
||||
'visual_elements': ', '.join(visual_elements),
|
||||
'target_audience': target_audience,
|
||||
'content_purpose': content_purpose,
|
||||
'word_count': word_count,
|
||||
'content_type': content_type
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error analyzing content for image context: {str(e)}")
|
||||
return {
|
||||
'key_themes': 'business, professional',
|
||||
'tone': 'professional',
|
||||
'visual_elements': 'business concepts',
|
||||
'target_audience': 'professionals',
|
||||
'content_purpose': 'informational',
|
||||
'word_count': len(content.split()) if content else 0,
|
||||
'content_type': content_type
|
||||
}
|
||||
|
||||
def _extract_key_themes(self, content_lower: str, content_type: str) -> List[str]:
|
||||
"""Extract key themes from the content for image generation context."""
|
||||
themes = []
|
||||
|
||||
# Industry and business themes
|
||||
if any(word in content_lower for word in ['ai', 'artificial intelligence', 'machine learning']):
|
||||
themes.append('AI & Technology')
|
||||
if any(word in content_lower for word in ['marketing', 'branding', 'advertising']):
|
||||
themes.append('Marketing & Branding')
|
||||
if any(word in content_lower for word in ['leadership', 'management', 'strategy']):
|
||||
themes.append('Leadership & Strategy')
|
||||
if any(word in content_lower for word in ['innovation', 'growth', 'transformation']):
|
||||
themes.append('Innovation & Growth')
|
||||
if any(word in content_lower for word in ['data', 'analytics', 'insights']):
|
||||
themes.append('Data & Analytics')
|
||||
if any(word in content_lower for word in ['customer', 'user experience', 'engagement']):
|
||||
themes.append('Customer Experience')
|
||||
if any(word in content_lower for word in ['team', 'collaboration', 'workplace']):
|
||||
themes.append('Team & Collaboration')
|
||||
if any(word in content_lower for word in ['sustainability', 'environmental', 'green']):
|
||||
themes.append('Sustainability')
|
||||
if any(word in content_lower for word in ['finance', 'investment', 'economy']):
|
||||
themes.append('Finance & Economy')
|
||||
if any(word in content_lower for word in ['healthcare', 'medical', 'wellness']):
|
||||
themes.append('Healthcare & Wellness')
|
||||
|
||||
# Content type specific themes
|
||||
if content_type == 'post':
|
||||
if any(word in content_lower for word in ['tip', 'advice', 'insight']):
|
||||
themes.append('Tips & Advice')
|
||||
if any(word in content_lower for word in ['story', 'experience', 'journey']):
|
||||
themes.append('Personal Story')
|
||||
if any(word in content_lower for word in ['trend', 'future', 'prediction']):
|
||||
themes.append('Trends & Future')
|
||||
|
||||
elif content_type == 'article':
|
||||
if any(word in content_lower for word in ['research', 'study', 'analysis']):
|
||||
themes.append('Research & Analysis')
|
||||
if any(word in content_lower for word in ['case study', 'example', 'success']):
|
||||
themes.append('Case Studies')
|
||||
if any(word in content_lower for word in ['guide', 'tutorial', 'how-to']):
|
||||
themes.append('Educational Content')
|
||||
|
||||
elif content_type == 'carousel':
|
||||
if any(word in content_lower for word in ['steps', 'process', 'framework']):
|
||||
themes.append('Process & Framework')
|
||||
if any(word in content_lower for word in ['comparison', 'vs', 'difference']):
|
||||
themes.append('Comparison & Analysis')
|
||||
if any(word in content_lower for word in ['checklist', 'tips', 'best practices']):
|
||||
themes.append('Checklists & Best Practices')
|
||||
|
||||
# Default theme if none identified
|
||||
if not themes:
|
||||
themes.append('Business & Professional')
|
||||
|
||||
return themes[:3] # Limit to top 3 themes
|
||||
|
||||
def _determine_content_tone(self, content_lower: str) -> str:
|
||||
"""Determine the tone of the content for appropriate image styling."""
|
||||
if any(word in content_lower for word in ['excited', 'amazing', 'incredible', 'revolutionary']):
|
||||
return 'Enthusiastic & Dynamic'
|
||||
elif any(word in content_lower for word in ['challenge', 'problem', 'issue', 'difficult']):
|
||||
return 'Thoughtful & Analytical'
|
||||
elif any(word in content_lower for word in ['success', 'achievement', 'win', 'victory']):
|
||||
return 'Celebratory & Positive'
|
||||
elif any(word in content_lower for word in ['guide', 'tutorial', 'how-to', 'steps']):
|
||||
return 'Educational & Helpful'
|
||||
elif any(word in content_lower for word in ['trend', 'future', 'prediction', 'forecast']):
|
||||
return 'Forward-looking & Innovative'
|
||||
else:
|
||||
return 'Professional & Informative'
|
||||
|
||||
def _identify_visual_elements(self, content_lower: str, content_type: str) -> List[str]:
|
||||
"""Identify visual elements that could be represented in images."""
|
||||
visual_elements = []
|
||||
|
||||
# Technology and digital elements
|
||||
if any(word in content_lower for word in ['ai', 'robot', 'computer', 'digital']):
|
||||
visual_elements.extend(['Digital interfaces', 'Technology symbols', 'Abstract tech patterns'])
|
||||
|
||||
# Business and professional elements
|
||||
if any(word in content_lower for word in ['business', 'corporate', 'office', 'meeting']):
|
||||
visual_elements.extend(['Business environments', 'Professional settings', 'Corporate aesthetics'])
|
||||
|
||||
# Growth and progress elements
|
||||
if any(word in content_lower for word in ['growth', 'progress', 'improvement', 'success']):
|
||||
visual_elements.extend(['Growth charts', 'Progress indicators', 'Success symbols'])
|
||||
|
||||
# Data and analytics elements
|
||||
if any(word in content_lower for word in ['data', 'analytics', 'charts', 'metrics']):
|
||||
visual_elements.extend(['Data visualizations', 'Charts and graphs', 'Analytics dashboards'])
|
||||
|
||||
# Team and collaboration elements
|
||||
if any(word in content_lower for word in ['team', 'collaboration', 'partnership', 'network']):
|
||||
visual_elements.extend(['Team dynamics', 'Collaboration symbols', 'Network connections'])
|
||||
|
||||
# Industry-specific elements
|
||||
if 'healthcare' in content_lower:
|
||||
visual_elements.extend(['Medical symbols', 'Healthcare imagery', 'Wellness elements'])
|
||||
elif 'finance' in content_lower:
|
||||
visual_elements.extend(['Financial symbols', 'Money concepts', 'Investment imagery'])
|
||||
elif 'education' in content_lower:
|
||||
visual_elements.extend(['Learning symbols', 'Educational elements', 'Knowledge imagery'])
|
||||
|
||||
# Default visual elements
|
||||
if not visual_elements:
|
||||
visual_elements = ['Professional business concepts', 'Modern design elements', 'Corporate aesthetics']
|
||||
|
||||
return visual_elements[:4] # Limit to top 4 elements
|
||||
|
||||
def _determine_target_audience(self, content_lower: str, content_type: str) -> str:
|
||||
"""Determine the target audience for the content."""
|
||||
if any(word in content_lower for word in ['ceo', 'executive', 'leader', 'manager']):
|
||||
return 'C-Suite & Executives'
|
||||
elif any(word in content_lower for word in ['entrepreneur', 'startup', 'founder', 'business owner']):
|
||||
return 'Entrepreneurs & Business Owners'
|
||||
elif any(word in content_lower for word in ['marketer', 'sales', 'business development']):
|
||||
return 'Marketing & Sales Professionals'
|
||||
elif any(word in content_lower for word in ['developer', 'engineer', 'technical', 'it']):
|
||||
return 'Technical Professionals'
|
||||
elif any(word in content_lower for word in ['student', 'learner', 'aspiring', 'career']):
|
||||
return 'Students & Career Changers'
|
||||
else:
|
||||
return 'General Business Professionals'
|
||||
|
||||
def _determine_content_purpose(self, content_lower: str, content_type: str) -> str:
|
||||
"""Determine the primary purpose of the content."""
|
||||
if any(word in content_lower for word in ['tip', 'advice', 'how-to', 'guide']):
|
||||
return 'Educational & Instructional'
|
||||
elif any(word in content_lower for word in ['story', 'experience', 'journey', 'case study']):
|
||||
return 'Storytelling & Experience Sharing'
|
||||
elif any(word in content_lower for word in ['trend', 'prediction', 'future', 'insight']):
|
||||
return 'Trend Analysis & Forecasting'
|
||||
elif any(word in content_lower for word in ['challenge', 'problem', 'solution', 'strategy']):
|
||||
return 'Problem Solving & Strategy'
|
||||
elif any(word in content_lower for word in ['success', 'achievement', 'result', 'outcome']):
|
||||
return 'Success Showcase & Results'
|
||||
else:
|
||||
return 'Informational & Awareness'
|
||||
|
||||
def _parse_gemini_response(
|
||||
self,
|
||||
response: str,
|
||||
linkedin_content: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Parse Gemini response into structured prompt objects.
|
||||
|
||||
Args:
|
||||
response: Raw response from Gemini
|
||||
linkedin_content: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
List of parsed prompt objects
|
||||
"""
|
||||
try:
|
||||
# Try to extract JSON from response
|
||||
import json
|
||||
import re
|
||||
|
||||
# Look for JSON array in the response
|
||||
json_match = re.search(r'\[.*\]', response, re.DOTALL)
|
||||
if json_match:
|
||||
json_str = json_match.group(0)
|
||||
prompts = json.loads(json_str)
|
||||
|
||||
# Validate prompt structure
|
||||
if isinstance(prompts, list) and len(prompts) >= 3:
|
||||
return prompts[:3]
|
||||
|
||||
# Fallback: parse response manually
|
||||
return self._parse_response_manually(response, linkedin_content)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error parsing Gemini response: {str(e)}")
|
||||
return self._parse_response_manually(response, linkedin_content)
|
||||
|
||||
def _parse_response_manually(
|
||||
self,
|
||||
response: str,
|
||||
linkedin_content: Dict[str, Any]
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Manually parse response if JSON parsing fails.
|
||||
|
||||
Args:
|
||||
response: Raw response from Gemini
|
||||
linkedin_content: LinkedIn content context
|
||||
|
||||
Returns:
|
||||
List of parsed prompt objects
|
||||
"""
|
||||
try:
|
||||
prompts = []
|
||||
lines = response.split('\n')
|
||||
|
||||
current_style = None
|
||||
current_prompt = []
|
||||
current_description = None
|
||||
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
|
||||
if 'professional' in line.lower() and 'style' in line.lower():
|
||||
if current_style and current_prompt:
|
||||
prompts.append({
|
||||
'style': current_style,
|
||||
'prompt': ' '.join(current_prompt),
|
||||
'description': current_description or f'{current_style} style for LinkedIn'
|
||||
})
|
||||
current_style = 'Professional'
|
||||
current_prompt = []
|
||||
current_description = None
|
||||
|
||||
elif 'creative' in line.lower() and 'style' in line.lower():
|
||||
if current_style and current_prompt:
|
||||
prompts.append({
|
||||
'style': current_style,
|
||||
'prompt': ' '.join(current_prompt),
|
||||
'description': current_description or f'{current_style} style for LinkedIn'
|
||||
})
|
||||
current_style = 'Creative'
|
||||
current_prompt = []
|
||||
current_description = None
|
||||
|
||||
elif 'industry' in line.lower() and 'specific' in line.lower():
|
||||
if current_style and current_prompt:
|
||||
prompts.append({
|
||||
'style': current_style,
|
||||
'prompt': ' '.join(current_prompt),
|
||||
'description': current_description or f'{current_style} style for LinkedIn'
|
||||
})
|
||||
current_style = 'Industry-Specific'
|
||||
current_prompt = []
|
||||
current_description = None
|
||||
|
||||
elif line and not line.startswith('-') and current_style:
|
||||
current_prompt.append(line)
|
||||
|
||||
elif line.startswith('description:') and current_style:
|
||||
current_description = line.replace('description:', '').strip()
|
||||
|
||||
# Add the last prompt
|
||||
if current_style and current_prompt:
|
||||
prompts.append({
|
||||
'style': current_style,
|
||||
'prompt': ' '.join(current_prompt),
|
||||
'description': current_description or f'{current_style} style for LinkedIn'
|
||||
})
|
||||
|
||||
# Ensure we have exactly 3 prompts
|
||||
while len(prompts) < 3:
|
||||
style_name = ['Professional', 'Creative', 'Industry-Specific'][len(prompts)]
|
||||
prompts.append({
|
||||
'style': style_name,
|
||||
'prompt': f"Create a {style_name.lower()} LinkedIn image for {linkedin_content.get('topic', 'business')}",
|
||||
'description': f'{style_name} style for LinkedIn content'
|
||||
})
|
||||
|
||||
return prompts[:3]
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error in manual response parsing: {str(e)}")
|
||||
return self._get_fallback_prompts(linkedin_content, "1:1")
|
||||
|
||||
def _enhance_prompt_for_linkedin(
|
||||
self,
|
||||
prompt: Dict[str, Any],
|
||||
linkedin_content: Dict[str, Any],
|
||||
aspect_ratio: str,
|
||||
prompt_index: int
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Enhance individual prompt with LinkedIn-specific optimizations.
|
||||
|
||||
Args:
|
||||
prompt: Individual prompt object
|
||||
linkedin_content: LinkedIn content context
|
||||
aspect_ratio: Image aspect ratio
|
||||
prompt_index: Index of the prompt (0-2)
|
||||
|
||||
Returns:
|
||||
Enhanced prompt object
|
||||
"""
|
||||
try:
|
||||
topic = linkedin_content.get('topic', 'business')
|
||||
industry = linkedin_content.get('industry', 'business')
|
||||
content_type = linkedin_content.get('content_type', 'post')
|
||||
|
||||
# Get the base prompt text
|
||||
base_prompt = prompt.get('prompt', '')
|
||||
style = prompt.get('style', 'Professional')
|
||||
|
||||
# LinkedIn-specific enhancements based on style
|
||||
if style == 'Professional':
|
||||
enhancements = [
|
||||
f"Professional LinkedIn {content_type} image for {topic}",
|
||||
"Corporate aesthetics with clean lines and geometric shapes",
|
||||
"Professional color palette (blues, grays, whites)",
|
||||
"Modern business environment or abstract business concepts",
|
||||
f"Aspect ratio: {aspect_ratio}",
|
||||
"Mobile-optimized for LinkedIn feed viewing",
|
||||
"High-quality, professional business aesthetic"
|
||||
]
|
||||
elif style == 'Creative':
|
||||
enhancements = [
|
||||
f"Creative LinkedIn {content_type} image for {topic}",
|
||||
"Eye-catching and engaging visual style",
|
||||
"Vibrant colors while maintaining professional appeal",
|
||||
"Creative composition that encourages social media engagement",
|
||||
f"Aspect ratio: {aspect_ratio}",
|
||||
"Optimized for LinkedIn feed visibility and sharing",
|
||||
"Modern design elements with business context"
|
||||
]
|
||||
else: # Industry-Specific
|
||||
enhancements = [
|
||||
f"{industry} industry-specific LinkedIn {content_type} image for {topic}",
|
||||
f"Industry-relevant imagery and colors for {industry}",
|
||||
"Professional yet creative approach",
|
||||
"Balanced design suitable for business audience",
|
||||
f"Aspect ratio: {aspect_ratio}",
|
||||
f"Industry-specific symbolism and {industry} aesthetics",
|
||||
"Professional business appeal for LinkedIn"
|
||||
]
|
||||
|
||||
# Combine base prompt with enhancements
|
||||
enhanced_prompt_text = f"{base_prompt}\n\n"
|
||||
enhanced_prompt_text += "\n".join(enhancements)
|
||||
|
||||
# Ensure prompt length is within limits
|
||||
if len(enhanced_prompt_text) > self.max_prompt_length:
|
||||
enhanced_prompt_text = enhanced_prompt_text[:self.max_prompt_length] + "..."
|
||||
|
||||
return {
|
||||
'style': style,
|
||||
'prompt': enhanced_prompt_text,
|
||||
'description': prompt.get('description', f'{style} style for LinkedIn'),
|
||||
'prompt_index': prompt_index,
|
||||
'enhanced_at': datetime.now().isoformat(),
|
||||
'linkedin_optimized': True
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error enhancing prompt: {str(e)}")
|
||||
return prompt
|
||||
|
||||
def _get_fallback_prompts(
|
||||
self,
|
||||
linkedin_content: Dict[str, Any],
|
||||
aspect_ratio: str
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""
|
||||
Generate fallback prompts if AI generation fails.
|
||||
|
||||
Args:
|
||||
linkedin_content: LinkedIn content context
|
||||
aspect_ratio: Image aspect ratio
|
||||
|
||||
Returns:
|
||||
List of fallback prompt objects
|
||||
"""
|
||||
topic = linkedin_content.get('topic', 'business')
|
||||
industry = linkedin_content.get('industry', 'business')
|
||||
content_type = linkedin_content.get('content_type', 'post')
|
||||
content = linkedin_content.get('content', '')
|
||||
|
||||
# Analyze content for better context
|
||||
content_analysis = self._analyze_content_for_image_context(content, content_type)
|
||||
|
||||
# Create context-aware fallback prompts
|
||||
fallback_prompts = [
|
||||
{
|
||||
'style': 'Professional',
|
||||
'prompt': f"""Create a professional LinkedIn {content_type} image for {topic} in the {industry} industry.
|
||||
|
||||
Key Content Themes: {content_analysis['key_themes']}
|
||||
Content Tone: {content_analysis['tone']}
|
||||
Visual Elements: {content_analysis['visual_elements']}
|
||||
|
||||
Corporate aesthetics with clean lines and geometric shapes
|
||||
Professional color palette (blues, grays, whites)
|
||||
Modern business environment or abstract business concepts
|
||||
Aspect ratio: {aspect_ratio}
|
||||
Mobile-optimized for LinkedIn feed viewing
|
||||
High-quality, professional business aesthetic
|
||||
Directly represents the content themes: {content_analysis['key_themes']}""",
|
||||
'description': f'Clean, business-appropriate visual for LinkedIn {content_type} about {topic}',
|
||||
'prompt_index': 0,
|
||||
'fallback': True,
|
||||
'content_context': content_analysis
|
||||
},
|
||||
{
|
||||
'style': 'Creative',
|
||||
'prompt': f"""Generate a creative LinkedIn {content_type} image for {topic} in {industry}.
|
||||
|
||||
Key Content Themes: {content_analysis['key_themes']}
|
||||
Content Purpose: {content_analysis['content_purpose']}
|
||||
Target Audience: {content_analysis['target_audience']}
|
||||
|
||||
Eye-catching and engaging visual style
|
||||
Vibrant colors while maintaining professional appeal
|
||||
Creative composition that encourages social media engagement
|
||||
Aspect ratio: {aspect_ratio}
|
||||
Optimized for LinkedIn feed visibility and sharing
|
||||
Modern design elements with business context
|
||||
Visually represents: {content_analysis['visual_elements']}""",
|
||||
'description': f'Eye-catching, shareable design for LinkedIn {content_type} about {topic}',
|
||||
'prompt_index': 1,
|
||||
'fallback': True,
|
||||
'content_context': content_analysis
|
||||
},
|
||||
{
|
||||
'style': 'Industry-Specific',
|
||||
'prompt': f"""Design a {industry} industry-specific LinkedIn {content_type} image for {topic}.
|
||||
|
||||
Key Content Themes: {content_analysis['key_themes']}
|
||||
Content Tone: {content_analysis['tone']}
|
||||
Visual Elements: {content_analysis['visual_elements']}
|
||||
|
||||
Industry-relevant imagery and colors for {industry}
|
||||
Professional yet creative approach
|
||||
Balanced design suitable for business audience
|
||||
Aspect ratio: {aspect_ratio}
|
||||
Industry-specific symbolism and {industry} aesthetics
|
||||
Professional business appeal for LinkedIn
|
||||
Incorporates visual elements: {content_analysis['visual_elements']}""",
|
||||
'description': f'Industry-tailored professional design for {industry} {content_type} about {topic}',
|
||||
'prompt_index': 2,
|
||||
'fallback': True,
|
||||
'content_context': content_analysis
|
||||
}
|
||||
]
|
||||
|
||||
logger.info(f"Using context-aware fallback prompts for LinkedIn {content_type} about {topic}")
|
||||
return fallback_prompts
|
||||
|
||||
async def validate_prompt_quality(
|
||||
self,
|
||||
prompt: Dict[str, Any]
|
||||
) -> Dict[str, Any]:
|
||||
"""
|
||||
Validate the quality of a generated prompt.
|
||||
|
||||
Args:
|
||||
prompt: Prompt object to validate
|
||||
|
||||
Returns:
|
||||
Validation results
|
||||
"""
|
||||
try:
|
||||
prompt_text = prompt.get('prompt', '')
|
||||
style = prompt.get('style', '')
|
||||
|
||||
# Quality metrics
|
||||
length_score = min(len(prompt_text) / 100, 1.0) # Optimal length around 100 words
|
||||
specificity_score = self._calculate_specificity_score(prompt_text)
|
||||
linkedin_optimization_score = self._calculate_linkedin_optimization_score(prompt_text)
|
||||
|
||||
# Overall quality score
|
||||
overall_score = (length_score + specificity_score + linkedin_optimization_score) / 3
|
||||
|
||||
return {
|
||||
'valid': overall_score >= 0.7,
|
||||
'overall_score': round(overall_score, 2),
|
||||
'metrics': {
|
||||
'length_score': round(length_score, 2),
|
||||
'specificity_score': round(specificity_score, 2),
|
||||
'linkedin_optimization_score': round(linkedin_optimization_score, 2)
|
||||
},
|
||||
'recommendations': self._get_quality_recommendations(overall_score, prompt_text)
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error validating prompt quality: {str(e)}")
|
||||
return {
|
||||
'valid': False,
|
||||
'overall_score': 0.0,
|
||||
'error': str(e)
|
||||
}
|
||||
|
||||
def _calculate_specificity_score(self, prompt_text: str) -> float:
|
||||
"""Calculate how specific and detailed the prompt is."""
|
||||
# Count specific visual elements, colors, styles mentioned
|
||||
specific_elements = [
|
||||
'wide-angle', 'close-up', 'low-angle', 'aerial',
|
||||
'blue', 'gray', 'white', 'red', 'green', 'yellow',
|
||||
'modern', 'minimalist', 'corporate', 'professional',
|
||||
'geometric', 'clean lines', 'sharp focus', 'soft lighting'
|
||||
]
|
||||
|
||||
element_count = sum(1 for element in specific_elements if element.lower() in prompt_text.lower())
|
||||
return min(element_count / 8, 1.0) # Normalize to 0-1
|
||||
|
||||
def _calculate_linkedin_optimization_score(self, prompt_text: str) -> float:
|
||||
"""Calculate how well the prompt is optimized for LinkedIn."""
|
||||
linkedin_keywords = [
|
||||
'linkedin', 'professional', 'business', 'corporate',
|
||||
'mobile', 'feed', 'social media', 'engagement',
|
||||
'networking', 'professional audience'
|
||||
]
|
||||
|
||||
keyword_count = sum(1 for keyword in linkedin_keywords if keyword.lower() in prompt_text.lower())
|
||||
return min(keyword_count / 5, 1.0) # Normalize to 0-1
|
||||
|
||||
def _get_quality_recommendations(self, score: float, prompt_text: str) -> List[str]:
|
||||
"""Get recommendations for improving prompt quality."""
|
||||
recommendations = []
|
||||
|
||||
if score < 0.7:
|
||||
if len(prompt_text) < 100:
|
||||
recommendations.append("Add more specific visual details and composition guidance")
|
||||
|
||||
if 'linkedin' not in prompt_text.lower():
|
||||
recommendations.append("Include LinkedIn-specific optimization terms")
|
||||
|
||||
if 'aspect ratio' not in prompt_text.lower():
|
||||
recommendations.append("Specify the desired aspect ratio")
|
||||
|
||||
if 'professional' not in prompt_text.lower() and 'business' not in prompt_text.lower():
|
||||
recommendations.append("Include professional business aesthetic guidance")
|
||||
|
||||
return recommendations
|
||||
62
backend/services/linkedin/quality_handler.py
Normal file
62
backend/services/linkedin/quality_handler.py
Normal file
@@ -0,0 +1,62 @@
|
||||
"""
|
||||
Quality Handler for LinkedIn Content Generation
|
||||
|
||||
Handles content quality analysis and metrics conversion.
|
||||
"""
|
||||
|
||||
from typing import Dict, Any, Optional
|
||||
from models.linkedin_models import ContentQualityMetrics
|
||||
from loguru import logger
|
||||
|
||||
|
||||
class QualityHandler:
|
||||
"""Handles content quality analysis and metrics conversion."""
|
||||
|
||||
def __init__(self, quality_analyzer=None):
|
||||
self.quality_analyzer = quality_analyzer
|
||||
|
||||
def create_quality_metrics(
|
||||
self,
|
||||
content: str,
|
||||
sources: list,
|
||||
industry: str,
|
||||
grounding_enabled: bool = False
|
||||
) -> Optional[ContentQualityMetrics]:
|
||||
"""
|
||||
Create ContentQualityMetrics object from quality analysis.
|
||||
|
||||
Args:
|
||||
content: Content to analyze
|
||||
sources: Research sources used
|
||||
industry: Target industry
|
||||
grounding_enabled: Whether grounding was used
|
||||
|
||||
Returns:
|
||||
ContentQualityMetrics object or None if analysis fails
|
||||
"""
|
||||
if not grounding_enabled or not self.quality_analyzer:
|
||||
return None
|
||||
|
||||
try:
|
||||
quality_analysis = self.quality_analyzer.analyze_content_quality(
|
||||
content=content,
|
||||
sources=sources,
|
||||
industry=industry
|
||||
)
|
||||
|
||||
# Convert the analysis result to ContentQualityMetrics format
|
||||
return ContentQualityMetrics(
|
||||
overall_score=quality_analysis.get('overall_score', 0.0),
|
||||
factual_accuracy=quality_analysis.get('metrics', {}).get('factual_accuracy', 0.0),
|
||||
source_verification=quality_analysis.get('metrics', {}).get('source_verification', 0.0),
|
||||
professional_tone=quality_analysis.get('metrics', {}).get('professional_tone', 0.0),
|
||||
industry_relevance=quality_analysis.get('metrics', {}).get('industry_relevance', 0.0),
|
||||
citation_coverage=quality_analysis.get('metrics', {}).get('citation_coverage', 0.0),
|
||||
content_length=quality_analysis.get('content_length', 0),
|
||||
word_count=quality_analysis.get('word_count', 0),
|
||||
analysis_timestamp=quality_analysis.get('analysis_timestamp', ''),
|
||||
recommendations=quality_analysis.get('recommendations', [])
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning(f"Quality metrics creation failed: {e}")
|
||||
return None
|
||||
76
backend/services/linkedin/research_handler.py
Normal file
76
backend/services/linkedin/research_handler.py
Normal file
@@ -0,0 +1,76 @@
|
||||
"""
|
||||
Research Handler for LinkedIn Content Generation
|
||||
|
||||
Handles research operations and timing for content generation.
|
||||
"""
|
||||
|
||||
from typing import List
|
||||
from datetime import datetime
|
||||
from loguru import logger
|
||||
from models.linkedin_models import ResearchSource
|
||||
|
||||
|
||||
class ResearchHandler:
|
||||
"""Handles research operations and timing for LinkedIn content."""
|
||||
|
||||
def __init__(self, linkedin_service):
|
||||
self.linkedin_service = linkedin_service
|
||||
|
||||
async def conduct_research(
|
||||
self,
|
||||
request,
|
||||
research_enabled: bool,
|
||||
search_engine: str,
|
||||
max_results: int = 10
|
||||
) -> tuple[List[ResearchSource], float]:
|
||||
"""
|
||||
Conduct research if enabled and return sources with timing.
|
||||
|
||||
Returns:
|
||||
Tuple of (research_sources, research_time)
|
||||
"""
|
||||
research_sources = []
|
||||
research_time = 0
|
||||
|
||||
if research_enabled:
|
||||
# Debug: Log the search engine value being passed
|
||||
logger.info(f"ResearchHandler: search_engine='{search_engine}' (type: {type(search_engine)})")
|
||||
|
||||
research_start = datetime.now()
|
||||
research_sources = await self.linkedin_service._conduct_research(
|
||||
topic=request.topic,
|
||||
industry=request.industry,
|
||||
search_engine=search_engine,
|
||||
max_results=max_results
|
||||
)
|
||||
research_time = (datetime.now() - research_start).total_seconds()
|
||||
logger.info(f"Research completed in {research_time:.2f}s, found {len(research_sources)} sources")
|
||||
|
||||
return research_sources, research_time
|
||||
|
||||
def determine_grounding_enabled(self, request, research_sources: List[ResearchSource]) -> bool:
|
||||
"""Determine if grounding should be enabled based on request and research results."""
|
||||
# Normalize values from possible Enum or string
|
||||
try:
|
||||
level_raw = getattr(request, 'grounding_level', 'enhanced')
|
||||
level = (getattr(level_raw, 'value', level_raw) or '').strip().lower()
|
||||
except Exception:
|
||||
level = 'enhanced'
|
||||
try:
|
||||
engine_raw = getattr(request, 'search_engine', 'google')
|
||||
engine_val = getattr(engine_raw, 'value', engine_raw)
|
||||
engine_str = str(engine_val).split('.')[-1].strip().lower()
|
||||
except Exception:
|
||||
engine_str = 'google'
|
||||
research_enabled = bool(getattr(request, 'research_enabled', True))
|
||||
|
||||
if not research_enabled or level == 'none':
|
||||
return False
|
||||
|
||||
# For Google native grounding, Gemini returns sources in the generation metadata,
|
||||
# so we should not require pre-fetched research_sources.
|
||||
if engine_str == 'google':
|
||||
return True
|
||||
|
||||
# For other engines, require that research actually returned sources
|
||||
return bool(research_sources)
|
||||
Reference in New Issue
Block a user