def put_object(filename, bucket, backend, debug): log_level = logging.INFO if not debug else logging.DEBUG setup_lithops_logger(log_level) storage = Storage(backend=backend) logger.info('Uploading file {} to bucket {}'.format(filename, bucket)) with open(filename, 'rb') as in_file: storage.put_object(bucket, filename, in_file) logger.info('File uploaded successfully')
def get_tweets(keyword, location): auth = tweepy.OAuthHandler(CONSUMERKEY, SECRETKEY) auth.set_access_token(TWITTERKEY, TWITTERSECRET) twitterAPI = tweepy.API(auth, wait_on_rate_limit=True) searchstr = '"' + keyword + '"' + " " + '"' + location + '"' + "lang:ca OR lang:es -filter:retweets" # Only look for tweets in catalan or spanish and exclude retweets list_tweets = [ ] # In this dictionary array we will store the structured tweets # Start to iterate over the twitter API to download tweets for tweet in tweepy.Cursor( twitterAPI.search, q=searchstr, tweet_mode="extended").items(500): # numberOftwets # Start saving tweets, separating all the relevant data tweetstr = tweet.full_text url = "https://twitter.com/twitter/statuses/" + str(tweet.id) fecha = tweet.created_at.strftime("%m/%d/%Y %H:%M:%S") localizacion = str(tweet.user.location) packed_tweet = { "Texto tweet": tweetstr, "URL": url, "Fecha": fecha, "Ubicacion": localizacion # Localizacion del usuario del tweet y no del tema (madrid, cataluña, etc) } list_tweets.append(packed_tweet) # Add all the tweets from the list to another dictionary packed_tweets = {"tweets": list_tweets} # Upload them to the cloud object storage storage = Storage() storage.put_object(bucket=STORAGEBUCKET, key=keyword + location + ".json", body=json.dumps(packed_tweets))