def add_tweet(self, tweet_meta): '''Add the tweet into the database''' text = tweet_meta.get('text') author = tweet_meta.get('author') created_at = tweet_meta.get('created_at') entities = tweet_meta.get('entities') retweet = tweet_meta.get('retweet') retweet_author = tweet_meta.get('retweet_author') retweeted = tweet_meta.get('retweeted') #check if tweet already exists tw = Tweet.objects(text=text, author=author, created_at=created_at) \ .first() if tw: # self.logger.info('Tweet Already Exists, id: {0}'.format(tw.id)) return tw tw = Tweet(text=text, entities=entities, author=author, retweeted=retweeted, retweet_author=retweet_author, retweet=retweet, created_at=created_at) tw.save() author.tweets.append(tw) author.save() return tw
def __init__(self): tweet = Tweet() self.stream = tweet.start_stream(async=True) self.root = widgets.RootWidget() self.current_tweet = None self.next_tweet = None self.tweets = tweet.get_tweets(count=7) self.index = 0 self.stream.add_watcher(self.on_update)
def of(d: dict) -> Optional[CollectTweetsMessage]: try: tweet = Tweet(d['tweet']) options = TweetHandleOptions.of(d['options']) return CollectTweetsMessage(tweet, options) except KeyError: return None
def getValue(): clear_session() if (request.form['submit_btn'] == "Submit"): text = request.form['user_str'] length = request.form['user_len'] elif (request.form['submit_btn'] == "Generate"): text = " " length = random.randint(1, 40) print('Load model from checkpoint...') model = load_trained_model_from_checkpoint(config_path, checkpoint_path) print('Load BPE from files...') bpe = get_bpe_from_files(encoder_path, vocab_path) #if(text!=None): print('Generate text...') #output = generate(model, bpe, ['From the day forth, my arm'], length=20, top_k=1) output = generate(model, bpe, [str(text)], length=int(length), top_k=2) #print(output) ind = output[0].rfind("\n") temp = output[0] temp = temp[0:ind] #print(temp) output[0] = temp #print(output) try: if (request.form['tweet'] == "post"): Tweet(str(output[0])) except: print("") return render_template('index.html', t=output)
def twit(content): user = TwitterUser() try: tweet = Tweet(content) logging.info('twitting {} ...'.format(tweet._raw)) result = user.twit(tweet) return result except Exception as e: print(e)
def collect_tweets(api: TwitterAPI, twitter_list: TwitterList, count: int) -> List[Tweet]: res = api.request( 'lists/statuses', { 'owner_screen_name': twitter_list.owner_screen_name, 'slug': twitter_list.slug, 'count': count, 'tweet_mode': 'extended', }) return [Tweet(x) for x in res]
def delete(content): urls = Tweet.extractURL(content) if not urls: return '仅支持贴链接删除哦😯' else: result = [] for url in urls: username = url.split('/')[-3] user = TwitterUser(username) result.append(user.delete(link=url)) return result
def twit(bot, update, content): user = TwitterUser() try: tweet = Tweet(content) logging.info('twitting {} ...'.format(tweet._raw)) result = user.twit(tweet) logging.info('post success, now clearing staging') if result: clearStaging(bot, update, mode='after_post') return result except TooManyHyperLinks: return TooManyHyperLinks.msg
def get_tweet_data(self) -> None: """Gathers all tweets from the data set""" f = open("data/Climate_Tweets.txt", encoding="utf-8") for tweet_json in f.readlines(): tweet_json = eval(tweet_json) # turns the dict text into a dict hashtags = {hashtag['text'].lower() for hashtag in tweet_json['entities']['hashtags']} tweet = Tweet( text=tweet_json['full_text'], hashtags=hashtags, date=self.parse_date(tweet_json['created_at']), location=tweet_json['user']['location'], user=tweet_json['user']['name'] ) # annoyingly, retweets have truncated text # if a tweet is a retweet, take the text from the original if "retweeted_status" in tweet_json: tweet.text = tweet_json['retweeted_status']['full_text'] self.tweets.append(tweet)
def photo(bot, update): ruser = RabonaUser(update.effective_user) user = TwitterUser() user.dir = ruser.dir if update.message.document: photo_file_obj = bot.get_file(update.message.document.file_id) else: photo_file_obj = bot.get_file(update.message.photo[-1].file_id) local_file_name = user.savePhoto(bot, photo_file_obj) try: update.message.reply_text('嚯,要发图片喔😯') tweet = Tweet(local_file_name) logging.info('twitting photo ...') update.message.reply_text('在发了喔😯') result = user.twit(tweet) update.message.reply_text('发好了喔😯') return result except Exception as e: print(e)
def generateTweets(recipe): tweets = getTweet("" + recipe.name + " -politics -filter:retweets", 3, recipe.foodKeyWords) if len(tweets) == 0: tweets.append(Tweet("", "No tweets found", "")) return tweets
def tearDown(self): self.tp = None self.data = None TwitterAccount.drop_collection() Tweet.drop_collection()
#!/usr/bin/python # -*- coding: utf-8 -* from twitter import Tweet, TwitterOAuth import sys from bucket import Bucket ''' Classe Main do sistema ''' keywords = list(sys.argv)[1:len(sys.argv)] try: if keywords: print("TWITTER BUSCA PARAMETROS = %s" % keywords) twitter = TwitterOAuth() tweet = Tweet(twitter,keywords) tweet.save_tweets() else: print("NO KEYWORDS") except Exception as e: print(e)
test_text_0 = 'Telegram has recently shut down its highly-publicized Initial Coin Offering (ICO). While reasons are presently unclear, tightening regulations are likely a major cause. Others are claiming Telegram’s already raised the money they once wanted. https://www.bitsonline.com/telegram-cancels-ico/' test_text_1 = "['https://github.com/codysoyland/surlex0', 'https://github.com/codysoyland/surlex1', 'https://github.com/codysoyland/surlex2', 'https://github.com/codysoyland/surlex3', 'https://github.com/codysoyland/surlex4', 'https://github.com/codysoyland/surlex5', 'https://github.com/codysoyland/surlex6', 'https://github.com/codysoyland/surlex7', 'https://github.com/codysoyland/surlex8', 'https://github.com/codysoyland/surlex9']" test_text_2 = "['https://github.com/codysoyland/surlex0', 'https://github.com/codysoyland/surlex1', 'https://github.com/codysoyland/surlex2', 'https://github.com/codysoyland/surlex3', 'https://github.com/codysoyland/surlex4']" test_text_3 = '''The new governor of South Korea’s Financial Supervisory Service, Yoon Suk-heun said FSS will consider easing cryptocurrency regulations. “The FSC inspects policies, while the FSS examines and supervises financial institutions but with the oversight of the FSC.” #DailyFAS https://coinjournal.net/s-korea-new-fss-governor-to-ease-cryptocurrency-regulation/''' test_text_4 = '''The new governor of South Korea’s Financial Supervisory Service, The new governor of South Korea’s Financial Supervisory Service, The new governor of South Korea’s Financial Supervisory Service, The new governor of South Korea’s Financial Supervisory Service, The new governor of South Korea’s Financial Supervisory Service, Yoon Suk-heun said FSS will consider easing cryptocurrency regulations. “The FSC inspects policies, while the FSS examines and supervises financial institutions but with the oversight of the FSC.” #DailyFAS https://coinjournal.net/s-korea-new-fss-governor-to-ease-cryptocurrency-regulation/''' t3 = Tweet(test_text_3) # t4 = Tweet(test_text_4) class TestTwitterUser(unittest.TestCase): def test_init(self): print('test0') self.user = TwitterUser() class Test3(unittest.TestCase): def test_init(self): t = t3 self.maxDiff = None print('test1') self.assertTrue(isinstance(t, Tweet))
def main(args): # For class 0, use lines [GID x 5500 ... (GID+1) x 5500 -1 ] # For class 4, use liens 800,000 + [GID x 5500 ... (GID+1) x 5500 -1] # my group_id = 100 LINES_BTW_CLASS = 800000 c0start = -1 c0end = -1 c4start = -1 c4end = -1 is_group_exist = False #print len(args) ## arguments checking if(len(args) == 4): input_filename = args[1] output_filename = args[3] try: group_id = int(args[2]) c0start = (group_id * 5500) c0end = ((group_id+1) * 5500)-1 c4start = LINES_BTW_CLASS + c0start c4end = LINES_BTW_CLASS + c0end is_group_exist = True except ValueError: print "Parameter (%s) is not a numeric" % args[2] elif(len(args) == 3): # variables must be a stirngs. input and output input_filename = args[1] output_filename = args[2] group_id = -1 else: print "Wrong number of arguments" print "Usage: python twtt.py <input_filename> <group_number> <output_filename>" sys.exit() print 'Number of arguments:', len(args), 'arguments.' print 'Input csv filename: ', input_filename, len(input_filename) if(group_id != -1): print 'Group ID: ', group_id print 'Output filename: ', output_filename #### # Read CSV file and Write the preprocessing results #### tagger = NLPlib.NLPlib() # init tagger wfp = open(output_filename,"w") # file pointer for writing result into outputfile count = 0 with open(input_filename,'r+') as f: reader = csv.reader(f) if(group_id != -1): #group id is provided try: for i,row in enumerate(reader): if(i >= c0start and i<=c0end): count = count +1 tweet = Tweet(row) tweet.do_preprocess() tweet.tagging(tagger) result = tweet.printable_tweet() #print result wfp.write(result+"\n") elif(i >= c4start and i<=c4end): count = count + 1 tweet = Tweet(row) tweet.do_preprocess() tweet.tagging(tagger) result = tweet.printable_tweet() #print result wfp.write(result+"\n") except csv.Error as e: sys.exit(" file %s, line %d: %s" % (input_filename, reader.line_num,e)) else: # group _id is not provided, use all data try: for i,row in enumerate(reader): tweet = Tweet(row) tweet.do_preprocess() tweet.tagging(tagger) result = tweet.printable_tweet() #print result wfp.write(result+"\n") except csv.Error as e: sys.exit(" file %s, line %d: %s" % (input_filename, reader.line_num,e)) print "Count is %s" % count wfp.close()
def main(args): # For class 0, use lines [GID x 5500 ... (GID+1) x 5500 -1 ] # For class 4, use liens 800,000 + [GID x 5500 ... (GID+1) x 5500 -1] # my group_id = 100 LINES_BTW_CLASS = 800000 c0start = -1 c0end = -1 c4start = -1 c4end = -1 is_group_exist = False print len(args) ## arguments checking if (len(args) == 4): input_filename = args[1] output_filename = args[3] try: group_id = int(args[2]) c0start = (group_id * 5500) c0end = ((group_id + 1) * 5500) - 1 c4start = LINES_BTW_CLASS + c0start c4end = LINES_BTW_CLASS + c0end is_group_exist = True except ValueError: print "Parameter (%s) is not a numeric" % args[2] elif (len(args) == 3): # variables must be a stirngs. input and output input_filename = args[1] output_filename = args[2] group_id = -1 else: print "Wrong number of arguments" print "Usage: python twtt.py <input_filename> <group_number> <output_filename>" sys.exit() print 'Number of arguments:', len(args), 'arguments.' print 'Input csv filename: ', input_filename, len(input_filename) if (group_id != -1): print 'Group ID: ', group_id print 'Output filename: ', output_filename #### # Read CSV file and Write the preprocessing results #### tagger = NLPlib.NLPlib() # init tagger wfp = open(output_filename, "w") # file pointer for writing result into outputfile count = 0 with open(input_filename, 'r+') as f: reader = csv.reader(f) if (group_id != -1): #group id is provided try: for i, row in enumerate(reader): if (i >= c0start and i <= c0end): count = count + 1 tweet = Tweet(row) tweet.do_preprocess() tweet.tagging(tagger) result = tweet.printable_tweet() print result wfp.write(result + "\n") elif (i >= c4start and i <= c4end): count = count + 1 tweet = Tweet(row) tweet.do_preprocess() tweet.tagging(tagger) result = tweet.printable_tweet() print result wfp.write(result + "\n") except csv.Error as e: sys.exit(" file %s, line %d: %s" % (input_filename, reader.line_num, e)) else: # group _id is not provided, use all data try: for i, row in enumerate(reader): tweet = Tweet(row) tweet.do_preprocess() tweet.tagging(tagger) result = tweet.printable_tweet() print result wfp.write(result + "\n") except csv.Error as e: sys.exit(" file %s, line %d: %s" % (input_filename, reader.line_num, e)) print "Count is %s" % count wfp.close()
def _url_detection_needed(tweet: Tweet, has_related_keyword: bool, evaluate_image: TweetEvaluateOption) -> bool: if len(tweet.get_urls() ) == 0 or evaluate_image == TweetEvaluateOption.NONE: return False return not has_related_keyword or evaluate_image == TweetEvaluateOption.ALWAYS