def add(rq): user = current_user(rq) d = rq.form() d['user_id'] = user.id t = Tweet(d) t.add() return redirect('/tweet')
def tweet_command(opts, args): y_flag = False v_flag = False cache = io_service.read_cache() text = ' '.join(args) tweet = Tweet(cache['index'], text, None, cache['last_id']) for opt, arg in opts: if opt == "-y": y_flag = True if opt in ("-s", "--stats"): tweet.tone_analysis = tone_analyzer.analyze(text) if opt in ("-v", "--verbose"): v_flag = True tweet_str = tweet.to_string() log_tweet(tweet_str) if not y_flag: logger.color = Color.Red logger.log("Do you want to tweet? (y/n)") choice = input() if choice != "y" and choice != "Y": return push_tweet(tweet_str, tweet.in_reply_to_id, v_flag)
def update(): if Tweet.check_token(): form = request.form Tweet.check_id(form) Tweet.update(form) # todo Tweet update 完成之后 需要到 Tweet 的 index 页面 还是 detail 页面呢? return redirect(url_for('.index'))
async def add_tweet(user, msg, tags): """ add user tweet with specific tags and after that add to redis to fetch in future :param user: login user json data :param msg: tweet message :param tags: list of tags :return: """ try: async with db.transaction(): tweet = Tweet(msg=msg, user_id=user.get('user_id')) await tweet.create() for tag_item in tags: tag = await get_or_create(Tag, *(Tag.tag_name == tag_item, ), **({ "tag_name": tag_item })) tweet_tag = TagTweet(tag_id=tag.id, tweet_id=tweet.id) await tweet_tag.create() if tweet_tag: await redis.append_value(tag.tag_name, tweet.as_json()) return tweet.as_json(), None except Exception as ex: global_logger.write_log('error', f"error: {ex}") return None, ex
def delike(tweet_id): user = current_user() t = Tweet.find(tweet_id) if Tweet.check_token(): t.delike(user.id) user.delike_tweet(tweet_id) return redirect( url_for('.detail', tweet_id=tweet_id, token=gg.token[user.id]))
def update(): token = request.args.get('token') if Tweet.check_token(token, gg.csrf_tokens): form = request.form Tweet.check_id(form) newTweet = Tweet.update(form) # redirect有必要加query吗 return redirect(url_for('.index'))
def edit(tweet_id): user = current_user() if Tweet.check_token(): # tweet_id = int(request.args.get('id', -1)) t = Tweet.find(tweet_id) Tweet.check_id(id=tweet_id) return render_template('tweet/tweet_edit.html', t=t, token=gg.token[user.id], user=user)
def main(): now = datetime.utcnow() yesterday = now - timedelta(days=1) start_date_str = yesterday.strftime('%Y-%m-%d') end_date_str = now.strftime('%Y-%m-%d') tweet_accounts = util.read_tweet_accounts() for screen_name in tweet_accounts: tweets = TweetScraper.get_tweets_from_user_timeline( screen_name, start_date_str, end_date_str) Tweet.init() print("Tweet account name: %s" % str(screen_name)) len_of_tweets = len(tweets) print("Total length of tweets: %s" % str(len_of_tweets)) for tweet in tweets: try: obj = Tweet(meta={'id': tweet['id']}) obj.screen_name = tweet['screen_name'] obj.full_text = tweet['full_text'] obj.created_at = tweet['created_at'] obj.save() except: pass
def add(): user = current_user() token = request.args.get('token') if Tweet.check_token(token, gg.csrf_tokens): form = request.form t = Tweet.new(form, user_id=user.id, user_name=user.username) # t.user_id = u.id # t.save() # redirect有必要加query吗 # return redirect('/tweet/index?user_id={}'.format(user.id)) return redirect(url_for('.index'))
def edit(tweet_id): u = current_user() token = request.args.get('token') if Tweet.check_token(token, gg.csrf_tokens): # tweet_id = int(request.args.get('id', -1)) t = Tweet.find(tweet_id) if u.id == t.user_id: body = render_template('tweet_edit.html', tweet_id=t.id, tweet_content=t.content, token=token) return make_response(body) return redirect(url_for('.index'))
def tweets_and_boards(board_id, current_page): import math if board_id == -1: tweets = Tweet.find_all() else: tweets = Tweet.find_all(board_id=board_id) # 每页显示4个Tweet pages = math.ceil(len(tweets) / 4) if current_page < pages: tweets = tweets[(current_page - 1) * 4:current_page * 4] else: tweets = tweets[(current_page - 1) * 4::] bs = Board.find_all() return tweets, bs, pages
def all_tweets(): """ This method handles All Tweets View. :return: If "GET" rendering template "all_tweets", If "POST" adding new Tweet to db and redirecting back to all_tweets. """ if not session['logged_in']: return redirect(url_for('login')) if request.method == "GET": cnx = connect_db() tweets = Tweet.load_all_tweets(cnx.cursor()) return render_template('all_tweets.html', tweets=tweets) elif request.method == "POST": tweet = Tweet() tweet.user_id = session['user_id'] tweet.text = request.form['new_tweet'] tweet.creation_date = datetime.now() cnx = connect_db() tweet.add_tweet(cnx.cursor()) cnx.commit() return redirect(url_for('all_tweets'))
def rerank_results(self, results, user_vector, user_gender, user_location, user_sentiment): """ reranks the results of a query by using the similarity between the user thematic vector and the vector from the tweets :param results: the documents resulting from a query :param user_vector: the thematic vector of a user :param user_gender: the gender of a user :param user_location: the location of a user :param user_sentiment: the sentiment of a user :return: the reranked list of documents """ reranked = [] user_vec = ProfileOneHotEncoder.add_info_to_vec( user_vector, user_gender, user_location, user_sentiment).reshape(1, -1) for i in range(len(results)): doc_infos = Tweet.load(int(results[i]['TweetID'])) if doc_infos is None: reranked.append({'doc': results[i], 'sim': 0.}) else: doc_vector = ProfileOneHotEncoder.add_info_to_vec( doc_infos.vector, doc_infos.gender, doc_infos.country, doc_infos.sentiment).reshape(1, -1) sim = cosine_similarity(user_vec, doc_vector) reranked.append({'doc': doc_infos, 'sim': sim[0][0]}) reranked = sorted(reranked, key=lambda k: k['sim'], reverse=True) return [x['doc'] for x in reranked]
def tweet_by_id(tweet_id): """ This method handles Tweet by its Id View. :param tweet_id: Id of Tweet for which we want to display Comments. :return: If "GET" rendering template "tweet_by_id" and displays all comments for that Tweet, If "POST" adds new Comment to db and redirecting back to tweet_by_id. """ if not session['logged_in']: return redirect(url_for('login')) if request.method == "GET": cnx = connect_db() tweet = Tweet.load_tweet_by_id(cnx.cursor(), tweet_id) user = User.load_user_by_id(cnx.cursor(), tweet.user_id) comments = Comment.load_comments_by_tweet_id(cnx.cursor(), tweet_id) return render_template('tweet_by_id.html', tweet=tweet, user=user, comments=comments) elif request.method == "POST": comment = Comment() comment.user_id = session['user_id'] comment.tweet_id = tweet_id comment.text = request.form['new_comment'] comment.creation_date = datetime.now() cnx = connect_db() comment.add_comment(cnx.cursor()) cnx.commit() return redirect(('tweet_by_id/{}'.format(tweet_id)))
def index(): """ 显示该用户所有tweet :return: 显示tweet页面 """ user_id = int(request.args.get('user_id', -1)) if user_id == -1: u = current_user() user_id = u.id user = User.find(user_id) if user is None: return redirect(url_for('user.login')) else: # 用字典对每个tweet进行token和user.id的匹配 # token = str(uuid.uuid4()) # csrf_tokens[token] = user.id # 保证每次调用index函数时清空gg gg.delete_value() # 保证每次调用index函数时都有新的token可用 gg.set_value(user.id) log('from tweet', gg.csrf_tokens, gg.token) tweets = Tweet.find_all(user_id=user.id, deleted=False) body = render_template('tweet_index.html', tweets=tweets, user=user, token=gg.token) return make_response(body)
def suicide(): alive_players = get_alive_players() if len(alive_players) < 5: return False player = random.choice(alive_players) kill_player(player) tweet = Tweet() tweet.type = TweetType.somebody_suicided tweet.place = player.location tweet.player = player write_tweet(tweet) if config.general.match_type == MatchType.districts: destroy_tweet = destroy_district_if_needed(player.district) if destroy_tweet is not None: write_tweet(destroy_tweet) return True
def main_page(): html = """ <!doctype html> <html> <head> <meta charset="utf-8"> </head> <body> <form method="POST"> Lista tweetow: <br> </form> </body> </html> """ if request.method == "GET": cnx = connect(user="******", password="******", host="localhost", database="twitter") print("Connected") cursor = cnx.cursor() all_tweets = Tweet.load_all_tweets(cursor) for tweet in all_tweets: html += """ {} {} {} <br>""".format(tweet.user_id, tweet.text, tweet.creation_date) return html else: pass
def post(self): if not request.is_json: abort(400, message="Could not parse JSON") content = request.get_json() try: user = User.query.filter_by(username=content['username']).one() except NoResultFound as e: abort(401, message="No user found") try: auth = UserAuthorization.query.filter_by(user_id=user.id).one() except NoResultFound as e: abort(401, message="No user authorization found") if auth.token != content['token']: abort(403, message="Invalid token found") tweet = Tweet( user=user, content=content['content'], tag=content['tag'], ) db.session.add(tweet) db.session.commit() return {'tweet': repr(tweet)}
def add(request): user = current_user(request) form = request.form() t = Tweet.new(form, user_id=user.id, user_name=user.username) # t.user_id = u.id # t.save() # redirect有必要加query吗 return redirect('/tweet/index?user_id={}'.format(user.id))
def get50lastTweetsOfUserIn(listId): """return the last 50 tweets (chronologicaly) of a list of users""" # tweets that have an authorId in the given list of id order by date tweets = Tweet.query( Tweet.authorId.IN(listId)).order(-Tweet.dateTime).fetch(50) return tweets
def delete(tweet_id): u = current_user() # tweet_id = int(request.args.get('id')) token = request.args.get('token') if Tweet.check_token(token, gg.csrf_tokens): # gg.delete_value() # csrf_tokens.pop(token) t = Tweet.find(tweet_id) if u.id == t.user_id: # 这里只是删除了tweet,但是其所拥有的comment的deleted字段变成False t.remove(tweet_id) for c in t.comments(): c.deleted = True c.save() # redirect有必要加query吗 # return redirect('/tweet/index?user_id={}'.format(u.id)) return redirect(url_for('.index'))
def pick_special(player, item): if item.special == SpecialType.injure_immunity: player.injure_immunity = True if item.special == SpecialType.monster_immunity: player.monster_immunity = True if item.special == SpecialType.infection_immunity: player.infection_immunity = True if item.special == SpecialType.movement_boost: player.movement_boost = True if config.general.match_type == MatchType.districts: others_in_district = [x for x in get_alive_players() if x.district.name == player.district.name and x.get_name() != player.get_name()] if len(others_in_district) > 0: for i, pl in enumerate(others_in_district): if item.special == SpecialType.injure_immunity: pl.injure_immunity = player.injure_immunity if item.special == SpecialType.monster_immunity: pl.monster_immunity = player.monster_immunity if item.special == SpecialType.infection_immunity: pl.infection_immunity = player.infection_immunity if item.special == SpecialType.movement_boost: pl.movement_boost = player.movement_boost player.location.items.pop(player.location.items.index(item)) tweet = Tweet() tweet.type = TweetType.somebody_got_special tweet.place = player.location tweet.item = item tweet.player = player if config.general.match_type == MatchType.districts and len(others_in_district) > 0: tweet.player_list = others_in_district write_tweet(tweet) return True
def revive(): dead_players = get_dead_players() if len(dead_players) > 0: player = random.choice(dead_players) player.is_alive = True rebuild_district = config.general.match_type == MatchType.districts and player.district.destroyed if rebuild_district: place = player.district place.destroyed = False else: place = player.location while place.destroyed: place = random.choice(place_list) tweet = Tweet() player.location = place for i, pl in enumerate(place.players): if pl.infected: player.infected = True tweet.there_was_infection = True place.players.append(player) tweet.type = TweetType.somebody_revived tweet.place = player.location tweet.player = player tweet.double = rebuild_district write_tweet(tweet) return True else: suicide()
def wrapper(rq): tweet_id = int(rq.query.get('id', -1)) t = Tweet.find(tweet_id) user = current_user(rq) if t is not None and t.user_id == user.id: response_msg = route_func(rq) else: response_msg = redirect('/login') return response_msg
def load_data(mabed_file): event_results = None mabed_filename = get_file_name(mabed_file) tmp_file = TMP_EVENT_DATA_TEMPLATE % get_file_name(mabed_filename) if os.path.isfile(tmp_file): with open(tmp_file, 'rb') as tmp_result: event_results = pickle.load(tmp_result) else: with open(mabed_file, 'r') as events_in: events = json.load(events_in) if not events: sys.exit(1) event_results = [] for event_no, event in enumerate(events): print('event number %s ' % event_no) start_ts = _get_timestamp_from_date(event['start_date']) end_ts = _get_timestamp_from_date(event['end_date']) tweets = Tweet.objects( created_at__gte=datetime.datetime.fromtimestamp(start_ts), created_at__lte=datetime.datetime.fromtimestamp(end_ts + 86400 * 61)) matched_tweets = [] for tweet in tweets: main_words_found = [] related_words_found = [] for main_word in event['main_words']: if main_word in tweet.text: main_words_found.append(main_word) for related_word in event['related_words']: if related_word['word'] in tweet.text: related_words_found.append(related_word['word']) if len(event['main_words'] + event['related_words'] ) * 0.2 < len(related_words_found + main_words_found): matched_tweets.append({ 'main_word': main_words_found, 'related_word': related_words_found, 'tweet_id': tweet.twitter_id }) event_results.append({ 'event': event, 'matched_tweets': matched_tweets }) with open(tmp_file, 'wb') as tmp_result: pickle.dump(event_results, tmp_result) return event_results
def index(rq): user = current_user(rq) if user is None: username = '******' else: username = user.username tweets = Tweet.get_all() body = template(env, 'index.html', username=username, tweets=tweets) response_msg = make_response_msg(body=body) return response_msg
def edit(request): u = current_user(request) tweet_id = int(request.query.get('id', -1)) t = Tweet.find(tweet_id) if u.id == t.user_id: body = template('tweet_edit.html', tweet_id=t.id, tweet_content=t.content) return http_response(body) return redirect('/tweet/index?user_id={}'.format(u.id))
def test_command(opts, args): t_flag = False v_flag = False cache = io_service.read_cache() text = ' '.join(args) tweet = Tweet(cache['index'], text, None, cache['last_id']) for opt, arg in opts: if opt in ("-s", "--stats"): tweet.tone_analysis = tone_analyzer.analyze(text) if opt == "-t": t_flag = True if opt in ("-v", "--verbose"): v_flag = True tweet_str = tweet.to_string() log_tweet(tweet_str) if t_flag: push_tweet(tweet_str, tweet.in_reply_to_id, v_flag)
def add(): user = current_user() # 这个board_id 是为了跳转index页面而存在,而不是为title和content服务,form里面还应有board_id board_id = int(request.args.get('board_id', -1)) if Tweet.check_token(): form = request.form if form.get('title'): t = Tweet.new(form, user_id=user.id, user_name=user.username) # t.user_id = u.id # t.save() # redirect有必要加query吗 # return redirect('/tweet/index?user_id={}'.format(user.id)) return redirect(url_for('.index', board_id=board_id)) else: bs = Board.find_all() return render_template('tweet/tweet_new.html', token=gg.token[user.id], bs=bs, board_id=board_id, user=user)
def delete(request): u = current_user(request) tweet_id = int(request.query.get('id')) t = Tweet.find(tweet_id) if u.id == t.user_id: # 这里只是删除了tweet,但是其所拥有的comment的deleted字段变成False t.remove(tweet_id) for c in t.comments(): c.deleted = True c.save() # redirect有必要加query吗 return redirect('/tweet/index?user_id={}'.format(u.id))
elif ( l_argv == 3 ): date = datetime.date.today() + datetime.timedelta(days=3) date = date.strftime('%Y-%m-%dT%XZ') account_id = sys.argv[1] instrument_id = sys.argv[2] account = Account.read( id=account_id ) instrument = account.read_funding_instrument( id=instrument_id ) campaign = instrument.init_campaign( 'campaing_test', date, 500000000, 50000000 ) print( campaign ) line = campaign.init_line( 1500000, 'PROMOTED_TWEET', 'ALL_ON_TWITTER' ) print( line ) location = Target_location.get_city( 'San Francisco' )[0] print ( m.mock_calls ) phrase = Target_phrase( value='grumpy cat' ) line.add_target( location ) line.add_target( phrase ) tweet = Tweet( account=account ) tweet.status = 'Hola mundo' tweet.save() line.add_tweet_promoted( tweet )
CONSUMER_KEY = "YOUR_CONSUMER_KEY" CONSUMER_SECRET = "YOUR_CONSUMER_SECRET" t = Twitter( auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET, CONSUMER_KEY, CONSUMER_SECRET) ) employees = TwitterEmployee.find() num_employees = employees.count() num_tweets = 0 curr_employee = 0 max_id_dict = {} # Clear out all existing tweets. Tweet.remove() print "Starting Process at: ", time.localtime() while num_tweets < 100000: # cycle over employees employee = employees[curr_employee % num_employees] sn = employee['screen_name'] max_id = max_id_dict.get(sn,None) try: if max_id: tweets = t.statuses.user_timeline(screen_name=sn,count=200,max_id=max_id) else: tweets = t.statuses.user_timeline(screen_name=sn, count=200) except TwitterHTTPError, e: print "Error: ", e curr_employee += 1
def extract_and_store_tweets(csvfile,nlp,minetweet): print print "Start processing %s ..."%csvfile print "*"*20 start=time() # measure time # LOGGING tweets_count=0 mentions_count=0 urls_count=0 hashtags_count=0 tags_count=0 unvalid_tweets=0 i=1 # iteroator to remember row number on csv with open(csvfile, 'r') as f: # print 'Processing data...' next(f) # skip csv header data = csv.reader(f) # one row at a time for row in data: # create Tweet object t=Tweet() # Populate Tweet t.mid=row[0] t.retweetFromPostId=row[1] t.userId=row[2] t.retweetFromUserId=row[3] t.source=row[4] t.hasImage=row[5] t.txt=row[6] t.geo=row[7] t.created_at=row[8] t.deleted_last_seen=row[9] t.permission_denied=row[10] # Extract tweet entities mentions,urls,hashtags,clean=minetweet.extract_tweet_entities(t.txt) # add to Tweet t.mentions=mentions t.urls=urls t.hashtags=hashtags clean=clean # text-only version of the tweet for NLP # Extract keywords dico=nlp.extract_dictionary(clean) # remove stopwords and store clean dico t.dico=nlp.remove_stopwords(dico) # extract entities # TODO : ignore stopwords # t.entities=nlp.extract_named_entities_from_dico(t.dico) # Some count for stats mentions_count+=len(mentions) urls_count+=len(urls) hashtags_count+=len(hashtags) tags_count+=len(t.entities) t.row=i valid_utf8 = True try: t.txt.decode('utf-8') except UnicodeDecodeError: unvalid_tweets+=1 valid_utf8 = False print ' bad encoding : tweet ',t.mid # pprint(t) if valid_utf8 is True: try: t.save() tweets_count+=1 except bson.errors.InvalidStringData: print ' bad encoding : tweet ',t.mid # pprint(t) # LOG print print "-"*10 print " mentions_count : %d "%mentions_count print " urls_count : %d "%urls_count print " hashtags_count : %d "%hashtags_count print " unvalid tweets : %d "%unvalid_tweets print " TOTAL tweet entities : %d "%(mentions_count+urls_count+hashtags_count) print " TOTAL named entities (NER): %d "%tags_count print print "-"*10 print "TOTAL tweets processed : %d"%tweets_count print " done in %.3fs"%(time()-start) print