Base code

This commit is contained in:
Kunthawat Greethong
2026-01-08 22:39:53 +07:00
parent 697115c61a
commit c35fa52117
2169 changed files with 626670 additions and 0 deletions

View File

@@ -0,0 +1,184 @@
#####################################################
#
# Alwrity, AI essay writer - Essay_Writing_with_Prompt_Chaining
#
#####################################################
import os
from pathlib import Path
from dotenv import load_dotenv
from pprint import pprint
from loguru import logger
import sys
from ..gpt_providers.text_generation.main_text_generation import llm_text_gen
def generate_with_retry(prompt, system_prompt=None):
"""
Generates content using the llm_text_gen function with retry handling for errors.
Parameters:
prompt (str): The prompt to generate content from.
system_prompt (str, optional): Custom system prompt to use instead of the default one.
Returns:
str: The generated content.
"""
try:
# Use llm_text_gen instead of directly calling the model
return llm_text_gen(prompt, system_prompt)
except Exception as e:
logger.error(f"Error generating content: {e}")
return ""
def ai_essay_generator(essay_title, selected_essay_type, selected_education_level, selected_num_pages):
"""
Write an Essay using prompt chaining and iterative generation.
Parameters:
essay_title (str): The title or topic of the essay.
selected_essay_type (str): The type of essay to write.
selected_education_level (str): The education level of the target audience.
selected_num_pages (int): The number of pages or words for the essay.
"""
logger.info(f"Starting to write Essay on {essay_title}..")
try:
# Define persona and writing guidelines
guidelines = f'''\
Writing Guidelines
As an expert Essay writer and academic researcher, demostrate your world class essay writing skills.
Follow the below writing guidelines for writing your essay:
1). You specialize in {selected_essay_type} essay writing.
2). Your target audiences include readers from {selected_education_level} level.
3). The title of the essay is {essay_title}.
5). The final essay should of {selected_num_pages} words/pages.
3). Plant the seeds of subplots or potential character arc shifts that can be expanded later.
Remember, your main goal is to write as much as you can. If you get through
the story too fast, that is bad. Expand, never summarize.
'''
# Generate prompts
premise_prompt = f'''\
As an expert essay writer, specilizing in {selected_essay_type} essay writing.
Write an Essay title for given keywords {essay_title}.
The title should appeal to audience level of {selected_education_level}.
'''
outline_prompt = f'''\
As an expert essay writer, specilizing in {selected_essay_type} essay writing.
Your Essay title is:
{{premise}}
Write an outline for the essay.
'''
starting_prompt = f'''\
As an expert essay writer, specilizing in {selected_essay_type} essay writing.
Your essay title is:
{{premise}}
The outline of the Essay is:
{{outline}}
First, silently review the outline and the essay title. Consider how to start the Essay.
Start to write the very beginning of the Essay. You are not expected to finish
the whole Essay now. Your writing should be detailed enough that you are only
scratching the surface of the first bullet of your outline. Try to write AT
MINIMUM 1000 WORDS.
{guidelines}
'''
continuation_prompt = f'''\
As an expert essay writer, specilizing in {selected_essay_type} essay writing.
Your essay title is:
{{premise}}
The outline of the Essay is:
{{outline}}
You've begun to write the essay and continue to do so.
Here's what you've written so far:
{{story_text}}
=====
First, silently review the outline and essay so far.
Identify what the single next part of your outline you should write.
Your task is to continue where you left off and write the next part of the Essay.
You are not expected to finish the whole essay now. Your writing should be
detailed enough that you are only scratching the surface of the next part of
your outline. Try to write AT MINIMUM 1000 WORDS. However, only once the essay
is COMPLETELY finished, write IAMDONE. Remember, do NOT write a whole chapter
right now.
{guidelines}
'''
# Generate prompts
try:
premise = generate_with_retry(premise_prompt)
logger.info(f"The title of the Essay is: {premise}")
except Exception as err:
logger.error(f"Essay title Generation Error: {err}")
return
outline = generate_with_retry(outline_prompt.format(premise=premise))
logger.info(f"The Outline of the essay is: {outline}\n\n")
if not outline:
logger.error("Failed to generate Essay outline. Exiting...")
return
try:
starting_draft = generate_with_retry(
starting_prompt.format(premise=premise, outline=outline))
pprint(starting_draft)
except Exception as err:
logger.error(f"Failed to Generate Essay draft: {err}")
return
try:
draft = starting_draft
continuation = generate_with_retry(
continuation_prompt.format(premise=premise, outline=outline, story_text=draft))
pprint(continuation)
except Exception as err:
logger.error(f"Failed to write the initial draft: {err}")
# Add the continuation to the initial draft, keep building the story until we see 'IAMDONE'
try:
draft += '\n\n' + continuation
except Exception as err:
logger.error(f"Failed as: {err} and {continuation}")
while 'IAMDONE' not in continuation:
try:
continuation = generate_with_retry(
continuation_prompt.format(premise=premise, outline=outline, story_text=draft))
draft += '\n\n' + continuation
except Exception as err:
logger.error(f"Failed to continually write the Essay: {err}")
return
# Remove 'IAMDONE' and print the final story
final = draft.replace('IAMDONE', '').strip()
pprint(final)
return final
except Exception as e:
logger.error(f"Main Essay writing: An error occurred: {e}")
return ""

View File

@@ -0,0 +1,102 @@
######################################################
#
# Alwrity, as an AI news writer, will have to be factually correct.
# We will do multiple rounds of web research and cite our sources.
# 'include_urls' will focus news articles only from well known sources.
# Choosing a country will help us get better results.
#
######################################################
import sys
import os
import json
from textwrap import dedent
from pathlib import Path
from datetime import datetime
from dotenv import load_dotenv
load_dotenv(Path('../../.env'))
from loguru import logger
logger.remove()
logger.add(sys.stdout,
colorize=True,
format="<level>{level}</level>|<green>{file}:{line}:{function}</green>| {message}"
)
from ..gpt_providers.text_generation.main_text_generation import llm_text_gen
from ..ai_web_researcher.google_serp_search import perform_serper_news_search
def ai_news_generation(news_keywords, news_country, news_language):
""" Generate news aritcle based on given keywords. """
# Use to store the blog in a string, to save in a *.md file.
blog_markdown_str = ""
logger.info(f"Researching and Writing News Article on keywords: {news_keywords}")
# Call on the got-researcher, tavily apis for this. Do google search for organic competition.
try:
google_news_result = perform_serper_news_search(news_keywords, news_country, news_language)
blog_markdown_str = write_news_google_search(news_keywords, news_country, news_language, google_news_result)
#print(blog_markdown_str)
except Exception as err:
logger.error(f"Failed in Google News web research: {err}")
logger.info("\n######### Draft1: Finished News article from Google web search: ###########\n\n")
return blog_markdown_str
def write_news_google_search(news_keywords, news_country, news_language, search_results):
"""Combine the given online research and gpt blog content"""
news_language = get_language_name(news_language)
news_country = get_country_name(news_country)
prompt = f"""
As an experienced {news_language} news journalist and editor,
I will provide you with my 'News keywords' and its 'google search results'.
Your goal is to write a News report, backed by given google search results.
Important, as a news report, its imperative that your content is factually correct and cited.
Follow below guidelines:
1). Understand and utilize the provided google search result json.
2). Always provide in-line citations and provide referance links.
3). Understand the given news item and adapt your tone accordingly.
4). Always include the dates when then news was reported.
6). Do not explain, describe your response.
7). Your blog should be highly formatted in markdown style and highly readable.
8). Important: Please read the entire prompt before writing anything. Follow the prompt exactly as I instructed.
\n\nNews Keywords: "{news_keywords}"\n\n
Google search Result: "{search_results}"
"""
logger.info("Generating blog and FAQs from Google web search results.")
try:
response = llm_text_gen(prompt)
return response
except Exception as err:
logger.error(f"Exit: Failed to get response from LLM: {err}")
exit(1)
def get_language_name(language_code):
languages = {
"es": "Spanish",
"vn": "Vietnamese",
"en": "English",
"ar": "Arabic",
"hi": "Hindi",
"de": "German",
"zh-cn": "Chinese (Simplified)"
# Add more language codes and corresponding names as needed
}
return languages.get(language_code, "Unknown")
def get_country_name(country_code):
countries = {
"es": "Spain",
"vn": "Vietnam",
"pk": "Pakistan",
"in": "India",
"de": "Germany",
"cn": "China"
# Add more country codes and corresponding names as needed
}
return countries.get(country_code, "Unknown")

View File

@@ -0,0 +1,115 @@
import streamlit as st
import json
from ..gpt_providers.text_generation.main_text_generation import llm_text_gen
def generate_product_description(title, details, audience, tone, length, keywords):
"""
Generates a product description using OpenAI's API.
Args:
title (str): The title of the product.
details (list): A list of product details (features, benefits, etc.).
audience (list): A list of target audience segments.
tone (str): The desired tone of the description (e.g., "Formal", "Informal").
length (str): The desired length of the description (e.g., "short", "medium", "long").
keywords (str): Keywords related to the product (comma-separated).
Returns:
str: The generated product description.
"""
prompt = f"""
Write a compelling product description for {title}.
Highlight these key features: {', '.join(details)}
Emphasize the benefits of these features for the target audience ({audience}).
Maintain a {tone} tone and aim for a length of approximately {length} words.
Use these keywords naturally throughout the description: {', '.join(keywords)}.
Remember to be persuasive and focus on the value proposition.
"""
try:
response = llm_text_gen(prompt)
return response
except Exception as err:
logger.error(f"Exit: Failed to get response from LLM: {err}")
exit(1)
def display_inputs():
st.title("📝 AI Product Description Writer 🚀")
st.markdown("**Generate compelling and accurate product descriptions with AI.**")
col1, col2 = st.columns(2)
with col1:
product_title = st.text_input("🏷️ **Product Title**", placeholder="Enter the product title (e.g., Wireless Bluetooth Headphones)")
with col2:
product_details = st.text_area("📄 **Product Details**", placeholder="Enter features, benefits, specifications, materials, etc. (e.g., Noise Cancellation, Long Battery Life, Water Resistant, Comfortable Design)")
col3, col4 = st.columns(2)
with col3:
keywords = st.text_input("🔑 **Keywords**", placeholder="Enter keywords, comma-separated (e.g., wireless headphones, noise cancelling, Bluetooth 5.0)")
with col4:
target_audience = st.multiselect(
"🎯 **Target Audience**",
["Teens", "Adults", "Seniors", "Music Lovers", "Fitness Enthusiasts", "Tech Savvy", "Busy Professionals", "Travelers", "Casual Users"],
placeholder="Select target audience (optional)"
)
col5, col6 = st.columns(2)
with col5:
description_length = st.selectbox(
"📏 **Desired Description Length**",
["Short (1-2 sentences)", "Medium (3-5 sentences)", "Long (6+ sentences)"],
help="Select the desired length of the product description"
)
with col6:
brand_tone = st.selectbox(
"🎨 **Brand Tone**",
["Formal", "Informal", "Fun & Energetic"],
help="Select the desired tone for the description"
)
return product_title, product_details, target_audience, brand_tone, description_length, keywords
def display_output(description):
if description:
st.subheader("✨ Generated Product Description:")
st.write(description)
json_ld = {
"@context": "https://schema.org",
"@type": "Product",
"name": product_title,
"description": description,
"audience": target_audience,
"brand": {
"@type": "Brand",
"name": "Your Brand Name"
},
"keywords": keywords.split(", ")
}
def write_ai_prod_desc():
product_title, product_details, target_audience, brand_tone, description_length, keywords = display_inputs()
if st.button("Generate Product Description 🚀"):
with st.spinner("Generating description..."):
description = generate_product_description(
product_title,
product_details.split(", "), # Split details into a list
target_audience,
brand_tone,
description_length.split(" ")[0].lower(), # Extract length from selectbox
keywords
)
display_output(description)

View File

@@ -0,0 +1,220 @@
import streamlit as st
from lib.utils.alwrity_utils import (essay_writer, ai_news_writer, ai_finance_ta_writer)
from lib.ai_writers.ai_story_writer.story_writer import story_input_section
from lib.ai_writers.ai_product_description_writer import write_ai_prod_desc
from lib.ai_writers.ai_copywriter.copywriter_dashboard import copywriter_dashboard
from lib.ai_writers.linkedin_writer import LinkedInAIWriter
from lib.ai_writers.blog_rewriter_updater.ai_blog_rewriter import write_blog_rewriter
from lib.ai_writers.ai_blog_faqs_writer.faqs_ui import main as faqs_generator
from lib.ai_writers.ai_blog_writer.ai_blog_generator import ai_blog_writer_page
from lib.ai_writers.ai_outline_writer.outline_ui import main as outline_generator
from lib.alwrity_ui.dashboard_styles import apply_dashboard_style, render_dashboard_header, render_category_header, render_card
from loguru import logger
# Try to import AI Content Performance Predictor (AI-first approach)
try:
from lib.content_performance_predictor.ai_performance_predictor import render_ai_predictor_ui as render_content_performance_predictor
AI_PREDICTOR_AVAILABLE = True
logger.info("AI Content Performance Predictor loaded successfully")
except ImportError:
logger.warning("AI Content Performance Predictor not available")
render_content_performance_predictor = None
AI_PREDICTOR_AVAILABLE = False
# Try to import Bootstrap AI Competitive Suite
try:
from lib.ai_competitive_suite.bootstrap_ai_suite import render_bootstrap_ai_suite
BOOTSTRAP_SUITE_AVAILABLE = True
logger.info("Bootstrap AI Competitive Suite loaded successfully")
except ImportError:
logger.warning("Bootstrap AI Competitive Suite not available")
render_bootstrap_ai_suite = None
BOOTSTRAP_SUITE_AVAILABLE = False
def list_ai_writers():
"""Return a list of available AI writers with their metadata (no UI rendering)."""
writers = []
# Add Content Performance Predictor if available
if render_content_performance_predictor:
# AI-first approach description
if AI_PREDICTOR_AVAILABLE:
description = "🎯 AI-powered content performance prediction with competitive intelligence - perfect for solo entrepreneurs"
name = "AI Content Performance Predictor"
else:
description = "Predict content success before publishing with AI-powered performance analysis"
name = "Content Performance Predictor"
writers.append({
"name": name,
"icon": "🎯",
"description": description,
"category": "⭐ Featured",
"function": render_content_performance_predictor,
"path": "performance_predictor",
"featured": True
})
# Add Bootstrap AI Competitive Suite if available
if render_bootstrap_ai_suite:
writers.append({
"name": "Bootstrap AI Competitive Suite",
"icon": "🚀",
"description": "🥷 Complete AI-powered competitive toolkit: content performance prediction + competitive intelligence for solo entrepreneurs",
"category": "⭐ Featured",
"function": render_bootstrap_ai_suite,
"path": "bootstrap_ai_suite",
"featured": True
})
# Add existing writers
writers.extend([
{
"name": "AI Blog Writer",
"icon": "📝",
"description": "Generate comprehensive blog posts from keywords, URLs, or uploaded content",
"category": "Content Creation",
"function": ai_blog_writer_page,
"path": "ai_blog_writer"
},
{
"name": "AI Blog Rewriter",
"icon": "🔄",
"description": "Rewrite and update existing blog content with improved quality and SEO optimization",
"category": "Content Creation",
"function": write_blog_rewriter,
"path": "blog_rewriter"
},
{
"name": "Story Writer",
"icon": "📚",
"description": "Create engaging stories and narratives with AI assistance",
"category": "Creative Writing",
"function": story_input_section,
"path": "story_writer"
},
{
"name": "Essay writer",
"icon": "✍️",
"description": "Generate well-structured essays on any topic",
"category": "Academic",
"function": essay_writer,
"path": "essay_writer"
},
{
"name": "Write News reports",
"icon": "📰",
"description": "Create professional news articles and reports",
"category": "Journalism",
"function": ai_news_writer,
"path": "news_writer"
},
{
"name": "Write Financial TA report",
"icon": "📊",
"description": "Generate technical analysis reports for financial markets",
"category": "Finance",
"function": ai_finance_ta_writer,
"path": "financial_writer"
},
{
"name": "AI Product Description Writer",
"icon": "🛍️",
"description": "Create compelling product descriptions that drive sales",
"category": "E-commerce",
"function": write_ai_prod_desc,
"path": "product_writer"
},
{
"name": "AI Copywriter",
"icon": "✒️",
"description": "Generate persuasive copy for marketing and advertising",
"category": "Marketing",
"function": copywriter_dashboard,
"path": "copywriter"
},
{
"name": "LinkedIn AI Writer",
"icon": "💼",
"description": "Create professional LinkedIn content that engages your network",
"category": "Professional",
"function": lambda: LinkedInAIWriter().run(),
"path": "linkedin_writer"
},
{
"name": "FAQ Generator",
"icon": "",
"description": "Generate comprehensive, well-researched FAQs from any content source with customizable options",
"category": "Content Creation",
"function": faqs_generator,
"path": "faqs_generator"
},
{
"name": "Blog Outline Generator",
"icon": "📋",
"description": "Create detailed blog outlines with AI-powered content generation and image integration",
"category": "Content Creation",
"function": outline_generator,
"path": "outline_generator"
}
])
return writers
def get_ai_writers():
"""Main function to display AI writers dashboard with premium glassmorphic design."""
logger.info("Starting AI Writers Dashboard")
# Apply common dashboard styling
apply_dashboard_style()
# Render dashboard header
render_dashboard_header(
"🤖 AI Content Writers",
"Choose from our collection of specialized AI writers, each designed for specific content types and industries. Create engaging, high-quality content with just a few clicks."
)
writers = list_ai_writers()
logger.info(f"Found {len(writers)} AI writers")
# Group writers by category for better organization
categories = {}
for writer in writers:
category = writer["category"]
if category not in categories:
categories[category] = []
categories[category].append(writer)
# Render writers by category with common cards
for category_name, category_writers in categories.items():
render_category_header(category_name)
# Create columns for this category
cols = st.columns(min(len(category_writers), 3))
for idx, writer in enumerate(category_writers):
with cols[idx % 3]:
# Use the common card renderer
if render_card(
icon=writer['icon'],
title=writer['name'],
description=writer['description'],
category=writer['category'],
key_suffix=f"{writer['path']}_{category_name}",
help_text=f"Launch {writer['name']} - {writer['description']}"
):
logger.info(f"Selected writer: {writer['name']} with path: {writer['path']}")
st.session_state.selected_writer = writer
st.query_params["writer"] = writer['path']
logger.info(f"Updated query params with writer: {writer['path']}")
st.rerun()
# Add spacing between categories
st.markdown('<div class="category-spacer"></div>', unsafe_allow_html=True)
logger.info("Finished rendering AI Writers Dashboard")
return writers
# Remove the old ai_writers function since it's now integrated into get_ai_writers

View File

@@ -0,0 +1,247 @@
#####################################################
#
# Alwrity, AI Long form writer - Writing_with_Prompt_Chaining
# and generative AI.
#
#####################################################
import os
import re
import time #iwish
import sys
import yaml
from pathlib import Path
from dotenv import load_dotenv
from configparser import ConfigParser
import streamlit as st
from pprint import pprint
from textwrap import dedent
from loguru import logger
logger.remove()
logger.add(sys.stdout,
colorize=True,
format="<level>{level}</level>|<green>{file}:{line}:{function}</green>| {message}"
)
from ..utils.read_main_config_params import read_return_config_section
from ..ai_web_researcher.gpt_online_researcher import do_metaphor_ai_research
from ..ai_web_researcher.gpt_online_researcher import do_google_serp_search, do_tavily_ai_search
from ..blog_metadata.get_blog_metadata import get_blog_metadata_longform
from ..blog_postprocessing.save_blog_to_file import save_blog_to_file
from ..gpt_providers.text_generation.main_text_generation import llm_text_gen
def generate_with_retry(prompt, system_prompt=None):
"""
Generates content from the model with retry handling for errors.
Parameters:
prompt (str): The prompt to generate content from.
system_prompt (str, optional): Custom system prompt to use instead of the default one.
Returns:
str: The generated content.
"""
try:
# FIXME: Need a progress bar here.
return llm_text_gen(prompt, system_prompt)
except Exception as e:
logger.error(f"Error generating content: {e}")
st.error(f"Error generating content: {e}")
return False
def long_form_generator(keywords, search_params=None, blog_params=None):
"""
Generate a long-form blog post based on the given keywords
Args:
keywords (str): Topic or keywords for the blog post
search_params (dict, optional): Search parameters for research
blog_params (dict, optional): Blog content characteristics
"""
# Initialize default parameters if not provided
if blog_params is None:
blog_params = {
"blog_length": 3000, # Default longer for long-form content
"blog_tone": "Professional",
"blog_demographic": "Professional",
"blog_type": "Informational",
"blog_language": "English"
}
else:
# Ensure we have a higher word count for long-form content
if blog_params.get("blog_length", 0) < 2500:
blog_params["blog_length"] = max(3000, blog_params.get("blog_length", 0))
# Extract parameters with defaults
blog_length = blog_params.get("blog_length", 3000)
blog_tone = blog_params.get("blog_tone", "Professional")
blog_demographic = blog_params.get("blog_demographic", "Professional")
blog_type = blog_params.get("blog_type", "Informational")
blog_language = blog_params.get("blog_language", "English")
st.subheader(f"Long-form {blog_type} Blog ({blog_length}+ words)")
with st.status("Generating comprehensive long-form content...", expanded=True) as status:
# Step 1: Generate outline
status.update(label="Creating detailed content outline...")
# Use a customized prompt based on the blog parameters
outline_prompt = f"""
As an expert content strategist writing in a {blog_tone} tone for {blog_demographic} audience,
create a detailed outline for a comprehensive {blog_type} blog post about "{keywords}"
that will be approximately {blog_length} words in {blog_language}.
The outline should include:
1. An engaging headline
2. 5-7 main sections with descriptive headings
3. 2-3 subsections under each main section
4. Key points to cover in each section
5. Ideas for relevant examples or case studies
6. Suggestions for data points or statistics to include
Format the outline in markdown with proper headings and bullet points.
"""
try:
outline = llm_text_gen(outline_prompt)
st.markdown("### Content Outline")
st.markdown(outline)
status.update(label="Outline created successfully ✓")
# Step 2: Research the topic using the search parameters
status.update(label="Researching topic details...")
research_results = research_topic(keywords, search_params)
status.update(label="Research completed ✓")
# Step 3: Generate the full content
status.update(label=f"Writing {blog_length}+ word {blog_tone} {blog_type} content...")
full_content_prompt = f"""
You are a professional content writer who specializes in {blog_type} content with a {blog_tone} tone
for {blog_demographic} audiences. Write a comprehensive, in-depth blog post in {blog_language} about:
"{keywords}"
Use this outline as your structure:
{outline}
And incorporate these research findings where relevant:
{research_results}
The blog post should:
- Be approximately {blog_length} words
- Include an engaging introduction and strong conclusion
- Use appropriate subheadings for all sections in the outline
- Include examples, data points, and actionable insights
- Be formatted in markdown with proper headings, bullet points, and emphasis
- Maintain a {blog_tone} tone throughout
- Address the needs and interests of a {blog_demographic} audience
Do not include phrases like "according to research" or "based on the outline" in your content.
"""
full_content = llm_text_gen(full_content_prompt)
status.update(label="Long-form content generated successfully! ✓", state="complete")
# Display the full content
st.markdown("### Your Complete Long-form Blog Post")
st.markdown(full_content)
return full_content
except Exception as e:
status.update(label=f"Error generating long-form content: {str(e)}", state="error")
st.error(f"Failed to generate long-form content: {str(e)}")
return None
def research_topic(keywords, search_params=None):
"""
Research a topic using search parameters and return a summary
Args:
keywords (str): Topic to research
search_params (dict, optional): Search parameters
Returns:
str: Research summary
"""
# Display a placeholder for research results
placeholder = st.empty()
placeholder.info("Researching topic... Please wait.")
try:
from .ai_blog_writer.keywords_to_blog_streamlit import do_tavily_ai_search
# Use provided search params or defaults
if search_params is None:
search_params = {
"max_results": 10,
"search_depth": "advanced",
"time_range": "year"
}
# Conduct research using Tavily
tavily_results = do_tavily_ai_search(
keywords,
max_results=search_params.get("max_results", 10),
search_depth=search_params.get("search_depth", "advanced"),
include_domains=search_params.get("include_domains", []),
time_range=search_params.get("time_range", "year")
)
# Extract research data
research_data = ""
if tavily_results and len(tavily_results) == 3:
results, titles, answer = tavily_results
if answer and len(answer) > 50:
research_data += f"Summary: {answer}\n\n"
if results and 'results' in results and len(results['results']) > 0:
research_data += "Key Sources:\n"
for i, result in enumerate(results['results'][:7], 1):
title = result.get('title', 'Untitled Source')
content_snippet = result.get('content', '')[:300] + "..."
research_data += f"{i}. {title}\n{content_snippet}\n\n"
# If research data is empty or too short, provide a generic response
if not research_data or len(research_data) < 100:
research_data = f"No specific research data found for '{keywords}'. Please provide more specific information in your content."
placeholder.success("Research completed successfully!")
return research_data
except Exception as e:
placeholder.error(f"Research failed: {str(e)}")
return f"Unable to gather research for '{keywords}'. Please continue with the content based on your knowledge."
finally:
# Remove the placeholder after a short delay
import time
time.sleep(1)
placeholder.empty()
def generate_long_form_content(content_keywords):
"""
Main function to generate long-form content based on the provided keywords.
Parameters:
content_keywords (str): The main keywords or topic for the long-form content.
Returns:
str: The generated long-form content.
"""
return long_form_generator(content_keywords)
# Example usage
if __name__ == "__main__":
# Example usage of the function
content_keywords = "artificial intelligence in healthcare"
generated_content = generate_long_form_content(content_keywords)
print(f"Generated content: {generated_content[:100]}...")

View File

@@ -0,0 +1,202 @@
import sys
import os
import datetime
import tiktoken
from .arxiv_schlorly_research import fetch_arxiv_data, create_dataframe, get_arxiv_main_content
from .arxiv_schlorly_research import arxiv_bibtex, scrape_images_from_arxiv, download_image
from .arxiv_schlorly_research import read_written_ids, extract_arxiv_ids_from_line, append_id_to_file
from .write_research_review_blog import review_research_paper
from .combine_research_and_blog import blog_with_research
from .write_blog_scholar_paper import write_blog_from_paper
from .gpt_providers.gemini_pro_text import gemini_text_response
from .generate_image_from_prompt import generate_image
from .convert_content_to_markdown import convert_tomarkdown_format
from .get_blog_metadata import blog_metadata
from .get_code_examples import gemini_get_code_samples
from .save_blog_to_file import save_blog_to_file
from .take_url_screenshot import screenshot_api
from loguru import logger
logger.remove()
logger.add(sys.stdout,
colorize=True,
format="<level>{level}</level>|<green>{file}:{line}:{function}</green>| {message}"
)
def blog_arxiv_keyword(query):
""" Write blog on given arxiv paper."""
arxiv_id = None
arxiv_url = None
bibtex = None
research_review = None
column_names = ['Title', 'Date', 'Id', 'Summary', 'PDF URL']
papers = fetch_arxiv_data(query)
df = create_dataframe(papers, column_names)
for paper in papers:
# Extracting the arxiv_id
arxiv_id = paper[2].split('/')[-1]
arxiv_url = "https://browse.arxiv.org/html/" + arxiv_id
bibtex = arxiv_bibtex(arxiv_id)
logger.info(f"Get research paper text from the url: {arxiv_url}")
research_content = get_arxiv_main_content(arxiv_url)
num_tokens = num_tokens_from_string(research_content, "cl100k_base")
logger.info(f"Number of tokens sent: {num_tokens}")
# If the number of tokens is below the threshold, process and print the review
if 1000 < num_tokens < 30000:
logger.info(f"Writing research review on {paper[0]}")
research_review = review_research_paper(research_content)
research_review = f"\n{research_review}\n\n" + f"```{bibtex}```"
#research_review = research_review + "\n\n\n" + f"{df.to_markdown()}"
research_review = convert_tomarkdown_format(research_review, "gemini")
break
else:
# Skip to the next iteration if the condition is not met
continue
logger.info(f"Final scholar article: \n\n{research_review}\n")
# TBD: Scrape images from research reports and pass to vision to get conclusions out of it.
#image_urls = scrape_images_from_arxiv(arxiv_url)
#print("Downloading images found on the page:")
#for img_url in image_urls:
# download_image(img_url, arxiv_url)
try:
blog_postprocessing(arxiv_id, research_review)
except Exception as err:
logger.error(f"Failed in blog post processing: {err}")
sys.exit(1)
logger.info(f"\n\n ################ Finished writing Blog for : #################### \n")
def blog_arxiv_url_list(file_path):
""" Write blogs on all the arxiv links given in a file. """
extracted_ids = []
try:
with open(file_path, 'r', encoding="utf-8") as file:
for line in file:
arxiv_id = extract_arxiv_ids_from_line(line)
if arxiv_id:
extracted_ids.append(arxiv_id)
except FileNotFoundError:
logger.error(f"File not found: {file_path}")
raise FileNotFoundError
except Exception as e:
logger.error(f"Error while reading the file: {e}")
raise e
# Read already written IDs
written_ids = read_written_ids('papers_already_written_on.txt')
# Loop through extracted IDs
for arxiv_id in extracted_ids:
if arxiv_id not in written_ids:
# This ID has not been written on yet
arxiv_url = "https://browse.arxiv.org/html/" + arxiv_id
logger.info(f"Get research paper text from the url: {arxiv_url}")
research_content = get_arxiv_main_content(arxiv_url)
try:
num_tokens = num_tokens_from_string(research_content, "cl100k_base")
except Exception as err:
logger.error(f"Failed in counting tokens: {err}")
sys.exit(1)
logger.info(f"Number of tokens sent: {num_tokens}")
# If the number of tokens is below the threshold, process and print the review
# FIXME: Docs over 30k tokens, need to be chunked and summarized.
if 1000 < num_tokens < 30000:
try:
logger.info(f"Getting bibtex for arxiv ID: {arxiv_id}")
bibtex = arxiv_bibtex(arxiv_id)
except Exception as err:
logger.error(f"Failed to get Bibtex: {err}")
try:
logger.info(f"Writing a research review..")
research_review = review_research_paper(research_content, "gemini")
logger.info(f"Research Review: \n{research_review}\n\n")
except Exception as err:
logger.error(f"Failed to write review on research paper: {arxiv_id}{err}")
research_blog = write_blog_from_paper(research_content, "gemini")
logger.info(f"\n\nResearch Blog: {research_blog}\n\n")
research_blog = f"\n{research_review}\n\n" + f"```\n{bibtex}\n```"
#research_review = blog_with_research(research_review, research_blog, "gemini")
#logger.info(f"\n\n\nBLOG_WITH_RESEARCh: {research_review}\n\n\n")
research_review = convert_tomarkdown_format(research_review, "gemini")
research_review = f"\n{research_review}\n\n" + f"```{bibtex}```"
logger.info(f"Final blog from research paper: \n\n{research_review}\n\n\n")
try:
blog_postprocessing(arxiv_id, research_review)
except Exception as err:
logger.error(f"Failed in blog post processing: {err}")
sys.exit(1)
logger.info(f"\n\n ################ Finished writing Blog for : #################### \n")
else:
# Skip to the next iteration if the condition is not met
logger.error("FIXME: Docs over 30k tokens, need to be chunked and summarized.")
continue
else:
logger.warning(f"Already written, skip writing on Arxiv paper ID: {arxiv_id}")
def blog_postprocessing(arxiv_id, research_review):
""" Common function to do blog postprocessing. """
try:
append_id_to_file(arxiv_id, "papers_already_written_on.txt")
except Exception as err:
logger.error(f"Failed to write/append ID to papers_already_written_on.txt: {err}")
raise err
try:
blog_title, blog_meta_desc, blog_tags, blog_categories = blog_metadata(research_review)
except Exception as err:
logger.error(f"Failed to get blog metadata: {err}")
raise err
try:
arxiv_url_scrnsht = f"https://arxiv.org/abs/{arxiv_id}"
generated_image_filepath = take_paper_screenshot(arxiv_url_scrnsht)
except Exception as err:
logger.error(f"Failed to tsk paper screenshot: {err}")
raise err
try:
save_blog_to_file(research_review, blog_title, blog_meta_desc, blog_tags,\
blog_categories, generated_image_filepath)
except Exception as err:
logger.error(f"Failed to save blog to a file: {err}")
sys.exit(1)
def take_paper_screenshot(arxiv_url):
""" Common function to take paper screenshot. """
# fixme: Remove the hardcoding, need add another option OR in config ?
image_dir = os.path.join(os.getcwd(), "blog_images")
generated_image_name = f"generated_image_{datetime.datetime.now():%Y-%m-%d-%H-%M-%S}.png"
generated_image_filepath = os.path.join(image_dir, generated_image_name)
if arxiv_url:
try:
generated_image_filepath = screenshot_api(arxiv_url, generated_image_filepath)
except Exception as err:
logger.error(f"Failed in taking url screenshot: {err}")
return generated_image_filepath
def num_tokens_from_string(string, encoding_name):
"""Returns the number of tokens in a text string."""
try:
encoding = tiktoken.get_encoding(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
except Exception as err:
logger.error(f"Failed to count tokens: {err}")
sys.exit(1)

View File

@@ -0,0 +1,49 @@
import sys
from .gpt_providers.openai_chat_completion import openai_chatgpt
from .gpt_providers.gemini_pro_text import gemini_text_response
from loguru import logger
logger.remove()
logger.add(sys.stdout,
colorize=True,
format="<level>{level}</level>|<green>{file}:{line}:{function}</green>| {message}"
)
def write_blog_from_paper(paper_content):
""" Write blog from given paper url. """
prompt = f"""As an expert in NLP and AI, I will provide you with a content of a research paper.
Your task is to write a highly detailed blog(at least 2000 words), breaking down complex concepts for beginners.
Take your time and do not rush to respond.
Do not provide explanations, suggestions in your response.
Include the below section in your blog:
Highlights: Include a list of 5 most important and unique claims of the given research paper.
Abstract: Start by reading the abstract, which provides a concise summary of the research, including its purpose, methodology, and key findings.
Introduction: This section will give you background information and set the context for the research. It often ends with a statement of the research question or hypothesis.
Methodology: Include description of how authors conducted the research. This can include data sources, experimental setup, analytical techniques, etc.
Results: This section presents the data or findings of the research. Pay attention to figures, tables, and any statistical analysis provided.
Discussion/Analysis: In this section, Explain how research paper answers the research questions or how they fit with existing knowledge.
Conclusion: This part summarizes the main findings and their implications. It might also suggest areas for further research.
References: The cited works can provide additional context or background reading.
Remember, Please use MLA format and markdown syntax.
Do not provide description, explanations for your response.
Take your time in crafting your blog content, do not rush to give the response.
Using the blog structure above, please write a detailed and original blog on given research paper: \n'{paper_content}'\n\n"""
if 'gemini' in gpt_providers:
try:
response = gemini_text_response(prompt)
return response
except Exception as err:
logger.error(f"Failed to get response from gemini: {err}")
raise err
elif 'openai' in gpt_providers:
try:
logger.info("Calling OpenAI LLM.")
response = openai_chatgpt(prompt)
return response
except Exception as err:
logger.error(f"failed to get response from Openai: {err}")
raise err

View File

@@ -0,0 +1,89 @@
import sys
from .gpt_providers.openai_chat_completion import openai_chatgpt
from .gpt_providers.gemini_pro_text import gemini_text_response
from .gpt_providers.mistral_chat_completion import mistral_text_response
from loguru import logger
logger.remove()
logger.add(sys.stdout,
colorize=True,
format="<level>{level}</level>|<green>{file}:{line}:{function}</green>| {message}"
)
def review_research_paper(research_blog):
""" """
prompt = f"""As world's top researcher and academician, I will provide you with research paper.
Your task is to write a highly detailed review report.
Important, your report should be factual, original and demostrate your expertise.
Review guidelines:
1). Read the Abstract and Introduction Carefully:
Begin by thoroughly reading the abstract and introduction of the paper.
Try to understand the research question, the objectives, and the background information.
Identify the central argument or hypothesis that the study is examining.
2). Examine the Methodology and Methods:
Read closely at the research design, whether it is experimental, observational, qualitative, or a combination of methods.
Check the sampling strategy and the size of the sample.
Review the methods of data collection and the instruments used for this purpose.
Think about any ethical issues and possible biases in the study.
3). Analyze the Results and Discussion:
Review how the results are presented, including any tables, graphs, and statistical analysis.
Evaluate the findings' validity and reliability.
Analyze whether the results support or contradict the research question and hypothesis.
Read the discussion section where the authors interpret their findings and their significance.
4). Consider the Limitations and Strengths:
Spot any limitations or potential weaknesses in the study.
Evaluate the strengths and contributions that the research makes.
Think about how generalizable the findings are to other populations or situations.
5). Assess the Writing and Organization:
Judge the clarity and structure of the report.
Consider the use of language, grammar, and the overall formatting.
Assess how well the arguments are logically organized and how coherent the report is.
6). Evaluate the Literature Review:
Examine how comprehensive and relevant the literature review is.
Consider how the study adds to or builds upon existing research.
Evaluate the timeliness and quality of the sources cited in the research.
7). Review the Conclusion and Implications:
Look at the conclusions drawn from the study and how well they align with the findings.
Think about the practical implications and potential applications of the research.
Evaluate the suggestions for further research or policy actions.
8). Overall Assessment:
Formulate an overall opinion about the research report's quality and thoroughness.
Consider the significance and impact of the findings.
Evaluate how the study contributes to its field of research.
9). Provide Constructive Feedback:
Offer constructive criticism and suggestions for improvement, where necessary.
Think about possible biases or alternative ways to interpret the findings.
Suggest ideas for future research or for replicating the study.
Do not provide description, explanations for your response.
Using the above review guidelines, write a detailed review report on the below research paper.
Research Paper: '{research_blog}'
"""
if 'gemini' in gpt_providers:
try:
response = gemini_text_response(prompt)
return response
except Exception as err:
logger.error(f"Failed to get response from gemini: {err}")
response = mistral_text_response(prompt)
return response
elif 'openai' in gpt_providers:
try:
logger.info("Calling OpenAI LLM.")
response = openai_chatgpt(prompt)
return response
except Exception as err:
SystemError(f"Failed to get response from Openai: {err}")