import sys import html from time import sleep cur_dir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(os.path.join(cur_dir, "../utils")) from twitter_api import setup_api from set_logger import get_logger from time import sleep from random import randint, shuffle, random, seed from logging import DEBUG import arrow import sqlite3 as sql home = os.environ.get("HOME", "/home/fairfrog/") logger = get_logger(home, "old_tweets") # logger.setLevel(DEBUG) now = arrow.now() def get_conn(): conn = sql.connect(cur_dir + "/fairfrog_tweets.db") conn.row_factory = sql.Row conn.text_factory = str return conn def close_conn(conn): conn.commit() conn.close()
"fairtrade_india", "ECOCENT_NL", "EnergieOverheid", "GroeneCourant", "TrouwGroen", "bnrduurzaam", "DuurzaamNieuws", "DuurzaamBV", "NatGeoPhotos", "SpaceX", "NatGeo", "NASA", ] if __name__ == "__main__": logger = get_logger(home, "retweeter") logger.info("Setting up api connection...") api = setup_api(home) user = api.me() logger.info( "Name: {0}\nLocation: {1}\nFriends: {2}\nFollowers: {3}\n".format( user.name, user.location, user.friends_count, user.followers_count)) while 1: now = arrow.now() new_tweets = get_tweets(api, relevant_accounts, logger) for tweet in new_tweets: try: logger.info("Retweeting: " + tweet.text) tweet_message = tweet.retweet() break
import os import sys from tweepy import Cursor from time import sleep cur_dir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(os.path.join(cur_dir, "../utils")) from twitter_api import setup_api from set_logger import get_logger from csv import DictReader home = os.environ.get("HOME", "/home/fairfrog/") logger = get_logger(home, "follow_unfollow") def get_followers(api): me = api.me() followers = set() for page in Cursor(api.followers_ids, screen_name=me.screen_name).pages(): followers.update([str(fol) for fol in page]) return followers def follow_desired_divas(api): me = api.me() cntr = 0 with open(os.path.join(cur_dir, "diva_list.csv"), "rb") as f: csvreader = DictReader(f) new_friends = {row["Id"]: row["Screen_Name"] for row in csvreader} for item in Cursor(api.friends_ids, screen_name=me.screen_name).items():
#!/usr/bin/env python3 import os import sys cur_dir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(os.path.join(cur_dir, "../utils/")) from tweepy import Cursor from twitter_api import setup_api from set_logger import get_logger from csv import writer from operator import attrgetter home = os.environ.get("HOME", "/home/kush/") logger = get_logger(home, "get_tweets") api = setup_api(home) me = api.me() with open(cur_dir + "/fairfrog_tweets2.csv", "wt") as f: csvwriter = writer(f) for my_tweet in Cursor(api.user_timeline, screen_name=me.screen_name, tweet_mode="extended").items(): if not hasattr(my_tweet, "retweeted_status"): try: csvwriter.writerow([ my_tweet.id, my_tweet.created_at.strftime("%Y-%m-%d %H:%M"), my_tweet.retweet_count, my_tweet.favorite_count, my_tweet.full_text, ",".join([
PARSER = ET.XMLParser(ns_clean=True, remove_blank_text=True, remove_comments=True, strip_cdata=True, encoding='utf-8') TODAY = dt.datetime.now() MAX_NUM_BLOGS = 3 TOTAL_NUM_TWEETS = 6 TWEET_LENGTH = 280 NUM_HOURS = 22 - 8 HOME = os.getenv('HOME', '/home/fairfrog') INSERT_BLOG_QUERY = """INSERT INTO FairFrog_Blogs (Title, Url, Publish_Date, Description, Image, Tags, Author, Last_Update) VALUES (?, ?, ?, ?, ?, ?, ?, ?) """ logger = get_logger(HOME, f'parrot_{TODAY.date()}', 'Parrot') PRODUCT_INTROS = [ 'Check out dit mooie product uit onze collectie', ] @lru_cache(8) def get_api_config_settings(api): config = api.configuration() return config['characters_reserved_per_media'], config[ 'short_url_length_https'] def download_image_to_temp(image_url: str) -> str: try: response = requests.get(image_url, allow_redirects=True)
from tweepy import Stream, StreamListener from datetime import datetime from time import sleep import os import sys cur_dir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(os.path.join(cur_dir, "../utils/")) from twitter_api import setup_api from set_logger import get_logger forbidden_word_list = ["job", "factuur"] home = os.environ.get("HOME", "/home/fairfrog/") logger = get_logger(home, "streaming_heelholland") class MyStreamListener(StreamListener): def on_connect(self): logger.info("Connecting the stream...") def on_status(self, status): tweet = status.text.encode("UTF-8") if not any(word in tweet for word in forbidden_word_list): try: api.retweet(status.id) status.favorite() except Exception as e: print(e) finally: logger.info("Retweeted: " + tweet) now_hour = datetime.now().hour
import os import sys from tweepy import Stream, StreamListener from time import sleep cur_dir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(os.path.join(cur_dir, "../utils/")) from twitter_api import setup_api from set_logger import get_logger home = os.environ.get("HOME", "/home/kush/") logger = get_logger(home, "fairfrog_streamer") class MyStreamListener(StreamListener): def __init__(self, api): self.api = api def on_connect(self): user = self.api.me() logger.debug( "Name: {0}\nLocation: {1}\nFriends: {2}\nFollowers: {3}\n".format( user.name, user.location, user.friends_count, user.followers_count ) ) last_updates = self.api.user_timeline("fairfrogNL", count=10) for status in last_updates: logger.debug(status.text.encode("UTF-8")) if not status.in_reply_to_user_id and not hasattr(status, "retweeted_status"): try: status.retweet()