God

 import requests

from bs4 import BeautifulSoup

from googleapiclient.discovery import build


# Define the URL of the news website you want to scrape

news_url = "https://example.com/news"  # Replace with the actual URL


# Function to scrape news headlines from the website

def scrape_news_headlines(url):

    try:

        response = requests.get(url)

        soup = BeautifulSoup(response.text, 'html.parser')

        headlines = []


        # Replace 'headline_selector' with the appropriate CSS selector for the headlines

        for headline in soup.select('headline_selector'):

            headlines.append(headline.text.strip())

        

        return headlines

    except Exception as e:

        print(f"Error: {e}")

        return []


# Function to post news headlines to a Blogger.com blog

def post_to_blogger(blog_id, api_key, headlines):

    try:

        service = build('blogger', 'v3', developerKey=api_key)

        

        for headline in headlines:

            post_body = {

                "kind": "blogger#post",

                "title": headline,

                # You may customize the content and labels as needed

                "content": f"<p>{headline}</p>",

                "labels": ["News"]

            }


            service.posts().insert(blogId=blog_id, body=post_body).execute()

            print(f"Posted: {headline}")

    

    except Exception as e:

        print(f"Error: {e}")


if __name__ == "__main__":

    blogger_blog_id = "your_blog_id"  # Replace with your Blogger.com blog ID

    google_api_key = "your_api_key"  # Replace with your Google API Key

    

    news_headlines = scrape_news_headlines(news_url)

    

    if news_headlines:

        post_to_blogger(blogger_blog_id, google_api_key, news_headlines)

    else:

        print("No headlines to post.")


Comments