def tweet(request): if request.method == 'POST': form = TweetForm(request.POST) if form.is_valid(): twit = Tweet() status = twit.tweet = form.cleaned_data['tweet'] twit.name = request.user.username twit.save() variables = RequestContext(request,{ 'status':status }) return render_to_response('tweet.html',RequestContext(request),variables) else: return render_to_response('tweet.html',{'request':RequestContext(request)}) else: tweets = [] queryset=Tweet.objects.all() for query in queryset: if request.user.username == query.name: tweets.append(query) status = '' for tweet in tweets: status = tweet.tweet form = TweetForm() form.name = request.user variables = RequestContext(request, { 'form': form,'status': status }) return render_to_response('tweet.html',RequestContext(request),variables)
def add_tweet(): new_tweet = Tweet() new_tweet.text = request.form['text'] new_tweet.created_at = request.form['created_at'] db.session.add(new_tweet) db.session.commit() return tweet_schema.jsonify(new_tweet)
def post(self): data = request.json validation_error = validate_user(data) if validation_error is not None: return validation_error user = get_user(data['user']['username']) if user is None: return jsonify(status=400, message="user does not exist") if user.password != data['user']['password']: return jsonify(status=403, message="incorrect password") text = data['text'] if (len(text) > 140): return jsonify(status=400, message="tweet too long"), 400 if (len(text) == 0): return jsonify(status=400, message="empty tweet"), 400 for word in [u"#arsenal", u"#denfølelsen"]: if word in text.lower(): abort(418) # I am a teapot tweet = Tweet(user, text) db['tweets'].append(tweet) return jsonify(tweet.serialize()), 200
def post(self): data = request.json validation_error = validate_user(data) if validation_error is not None: return validation_error user = get_user(data['user']['username']) if user is None: return jsonify(status=400, message="user does not exist") if user.password != data['user']['password']: return jsonify(status=403, message="incorrect password") text = data['text'] if(len(text) > 140): return jsonify(status=400, message="tweet too long"), 400 if(len(text) == 0): return jsonify(status=400, message="empty tweet"), 400 for word in [u"#arsenal", u"#denfølelsen"]: if word in text.lower(): abort(418) # I am a teapot tweet = Tweet(user, text) db['tweets'].append(tweet) return jsonify(tweet.serialize()), 200
def feed(request, username): url = "https://twitrss.me/twitter_user_to_rss/?user="******"" for number in range(5): out += dicc.entries[number].title + "<br>" #Obtener las urls del tweet si hay urls = dicc.entries[number].title.split() for i in urls: if i.startswith("http://") or i.startswith("https://"): i = i.split('&')[0] out += "<li><a href=" + i + ">" + i + "</a></li>" #Obtener primer elemento <p> soup = BeautifulSoup(urllib.urlopen(i).read()) out += str(soup.p).decode('utf8') #Obtener primer elemento <img> out += str(soup.img).decode('utf8') + "<br><br>" #Comprobar y guardar autor del tweet user = dicc.entries[number].title.split(':')[0] try: p = User.objects.get(name=user) except ObjectDoesNotExist: p = User(name=user) p.save() #Comprobar y guardar tweet try: t = Tweet.objects.get(content=dicc.entries[number].title) except ObjectDoesNotExist: t = Tweet(content=dicc.entries[number].title, url= dicc.entries[number].link, name=p) t.save() return HttpResponse(out)
def tweet(request): if request.method == 'POST': form = TweetForm(request.POST) if form.is_valid(): twit = Tweet() status = twit.tweet = form.cleaned_data['tweet'] twit.name = request.user.username twit.save() variables = RequestContext(request, {'status': status}) return render_to_response('tweet.html', RequestContext(request), variables) else: return render_to_response('tweet.html', {'request': RequestContext(request)}) else: tweets = [] queryset = Tweet.objects.all() for query in queryset: if request.user.username == query.name: tweets.append(query) status = '' for tweet in tweets: status = tweet.tweet form = TweetForm() form.name = request.user variables = RequestContext(request, {'form': form, 'status': status}) return render_to_response('tweet.html', RequestContext(request), variables)
def search_tweets(search_query): result = {} json_data = raw_to_json(Tweet.search(search_query.replace("&", " ").split())[0]) result[search_query] = json_data json_data = raw_to_json(Tweet.search(search_query.replace("&", " ").split())[1]) result[search_query+"_or"] = json_data return jsonify(result)
def get(self): self.response.write(self.request.get("keyword")) # Prompt for login credentials and setup stream object consumer_key = "" consumer_secret = "" access_token = "" access_token_secret = "" auth = tweepy.auth.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) keyword = self.request.get("keyword") count =0 search_results = api.search(q=keyword, result_type = "recent", count=100) for tweet in search_results: if tweet.place != None: count+=1 tweet.text.replace("\n", " "); text = tweet.text.encode('utf8') print text #logging.info(tweet.text) latitude = tweet.place.bounding_box.coordinates[0][0][0] longtitude = tweet.place.bounding_box.coordinates[0][0][1] e = Tweet(search_key=keyword, text=tweet.text, latitude= latitude, longtitude= longtitude) e.put()
def fromUser(self, screen_name, tweets_number=10, is_bot=False): user = self.createUser(screen_name, is_bot) tweets = self.twitter_client.user_timeline(screen_name=screen_name, count=tweets_number) for i, status in enumerate(tweets): tweet = status._json text = tweet['text'] date = tweet['created_at'] entities = tweet['entities'] user_mentions = entities['user_mentions'] mentions_list = [] if len(user_mentions) > 0: for mention in user_mentions: mentions_list.append(mention['screen_name']) text_string = unicodedata.normalize('NFKD', text).encode('ascii','ignore') date_string = unicodedata.normalize('NFKD', date).encode('ascii','ignore') name_mentions_string = ",".join(mentions_list) Tweet.create( user = user, text = text_string, date = date_string, source = status.source, mentions = name_mentions_string )
def saveTweet(self, username, tweetContent, publishedDate): tweetObject = Tweet(username=username, text=tweetContent, date=publishedDate) # Creates an object to be saved in the Tweet model in the database. tweetObject.save()
def fromUser(self, screen_name, tweets_number=10, is_bot=False): user = self.createUser(screen_name, is_bot) tweets = self.twitter_client.user_timeline(screen_name=screen_name, count=tweets_number) for i, status in enumerate(tweets): tweet = status._json text = tweet['text'] date = tweet['created_at'] entities = tweet['entities'] user_mentions = entities['user_mentions'] mentions_list = [] if len(user_mentions) > 0: for mention in user_mentions: mentions_list.append(mention['screen_name']) text_string = unicodedata.normalize('NFKD', text).encode( 'ascii', 'ignore') date_string = unicodedata.normalize('NFKD', date).encode( 'ascii', 'ignore') name_mentions_string = ",".join(mentions_list) Tweet.create(user=user, text=text_string, date=date_string, source=status.source, mentions=name_mentions_string)
def get_context_data(self, *args, **kwargs): context = super(TestTweetsView, self).get_context_data(*args, **kwargs) # retrieve tweets from db whose actual classifications have been set (already verified from previous tests) tweets_train_tourism = Tweet.objects.filter(classified=True, actual_classification='tourism') tweets_train_nontourism = Tweet.objects.filter(classified=True, actual_classification='nontourism') # train classifier using tourism and nontourism files and tweets from db train_result = train_db(tweets_train_tourism, tweets_train_nontourism) classifier = train_result['classifier'] # retrieve random tweets from a file tweets = self.randomize_tweets('classifier/data/2015-03-06.happydata.txt', 10) tweets_test = [] for tweet in tweets: if not Tweet.objects.filter(tweet_id=tweet['id']) and tweet['coordinates']: tweet_id = tweet['id'] user = tweet['user']['name'].encode('utf-8') lat = tweet['coordinates']['coordinates'][1] lng = tweet['coordinates']['coordinates'][0] text = tweet['text'].encode('utf-8') if tweet_id and user and lat and lng and text: # classify tweet classification = classifier.classify(feature_extractor_lda_tripadvisor_top_words_weights(text)) # save tweet with classification (to be verified later) tweet_obj = Tweet(tweet_id=tweet_id, user=user, lat=lat, lng=lng, text=text, classification=classification) tweet_obj.save() tweets_test.append(tweet_obj) context['tweets'] = tweets_test return context
def extract_tweet_and_user(self, fetched_tweet): tweet = Tweet() user = None for attribute_string in dir(fetched_tweet): if not attribute_string.startswith("__"): fetched_tweet_field = getattr(fetched_tweet, attribute_string) if fetched_tweet_field == "extended_tweet": setattr(tweet, "extended_text", fetched_tweet_field["full_text"]) try: getattr(Tweet, attribute_string) setattr(tweet, attribute_string, fetched_tweet_field) except AttributeError as e: pass if isinstance(fetched_tweet_field, tweepy.User) and not user: user = User() tweet.user_id = fetched_tweet_field.id user.twitter_user_id = fetched_tweet_field.id user.name = fetched_tweet_field.name user.screen_name = fetched_tweet_field.screen_name user.statuses_count = fetched_tweet_field.statuses_count user.followers_count = fetched_tweet_field.followers_count user.friends_count = fetched_tweet_field.friends_count user.location = fetched_tweet_field.location return tweet, user
def do(self): logger.info("Initiating cronjob: {}".format(self.code)) try: auth = tweepy.OAuthHandler(self._tak, self._tas) auth.set_access_token(self._tat, self._tats) api = tweepy.API(auth) screen_name = api.me().screen_name timeline = api.home_timeline() except Exception: e = traceback.format_exc() logger.error("Could not get home timeline. Error: {}".format(self.screen_name, e)) return False for t in timeline: if t.user.screen_name == screen_name: msg = t.text if msg.startswith('RT @everycolorbot'): chex = msg.split()[2].strip("\"") evcinst = EveryColorBotTweet.objects.get_or_none(color__hex = chex) if evcinst is not None: evcinst.tweeted = True evcinst.save() inst = Tweet.objects.get_or_none(message = msg) if inst is None: logger.info('Cronjob encountered unsaved tweet: "{}" Saving it to database.'.format(msg)) inst = Tweet(color_code = chex, message = msg) inst.save()
def schedule(request): if request.POST.get('auth') != settings.TWITGHOST_AUTH_KEY: logging.error("Unauthorized access") return HttpResponse('forbidden', status=403) new_tweet = Tweet(tweet=request.POST['tweet']) new_tweet.save() return HttpResponse('ok', mimetype="text/xml")
def create(self, status): tweet = Tweet() create = tweet.create(status) if create is True: # Count inserted tweet and store to variable self.count += 1
def update_tweet(): new_tweet = Tweet() new_tweet.id = request.form['id'] new_tweet.text = request.form['text'] db.session.query(Tweet).filter_by(id=new_tweet.id).update( {"text": new_tweet.text}) db.session.commit() return tweet_schema.jsonify(new_tweet)
def add(request): user = current_user(request) # 创建微博 form = request.form() w = Tweet(form) w.user_id = user.id w.save() return redirect('/tweet/index?user_id={}'.format(user.id))
def create_tweet(): if not 'userid' in session: return redirect('/') if Tweet.validate_tweet(request.form): return redirect('/dashboard') else: Tweet.add_tweet(request.form) return redirect('/dashboard')
def create_tweet(): #import ipdb; ipdb.set_trace() tweet = Tweet() tweet.text = request.json['text'] tweet.created_at = datetime.now().time() db.session.add(tweet) db.session.commit() return tweet_schema.jsonify(tweet), 201
def search(search_query): search_query_strings = search_query.replace("&", " ").split() json_data = { "tweets" : raw_to_json(Tweet.search(search_query_strings)[0]), "tweets_or": raw_to_json(Tweet.search(search_query_strings)[1]), "hashtags" : raw_to_json(Hashtag.search(search_query_strings)[0]), "hashtags_or": raw_to_json(Hashtag.search(search_query_strings)[1]) } return jsonify(json_data)
def buscar_en_todos_los_tweets(busqueda, cantidad_de_tweets=50, partir_del_tweet=None): ## App keys APP_KEY = 'IR5sQyceHa34Cxxm2hAw' APP_SECRET = 'y5Nti0zdCFfzY1ifokC6iHIZFZ14Z2GAjTI6VFx2mg' ## User keys OAUTH_TOKEN = '202826188-6OCQEmzi7JCu8S6UERP5nhjUo4NHirk0Kd4VRRvl' OAUTH_TOKEN_SECRET = '1wmkXUkqkDhBnbugzaNIzf07VKeCddjEPynVG2gayU' # requires authentication as of Twitter API v1.1 twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) try: search_results = twitter.search(q=busqueda, count=cantidad_de_tweets) print '===============', busqueda, '===============' except TwythonError as e: print e # Initialize array of tweet objects tweets = [] for tweet in search_results['statuses']: # Parameters of a tweet tweet_id = tweet['id_str'] tweet_user = tweet['user']['screen_name'].encode('utf-8') tweet_created_at = tweet['created_at'] tweet_text = tweet['text'].encode('utf-8') tweet_reply_to_status_id = tweet['in_reply_to_user_id_str'] tweet_retweet_count = tweet['retweet_count'] try: Tweet.objects.get(pk=tweet_id) print "Tweet %s already in the db." % tweet_id except: # Print in console print 'Added %s.- Tweet from @%s Date: %s' % (tweet_id, tweet_user, tweet_created_at) print tweet_text, '\n', 'reply to %s' % tweet_reply_to_status_id ## Change twitter api time to python datetime ## ej. Wed Jan 16 22:08:18 +0000 2013 tweet_created_at = datetime.strptime(tweet_created_at, "%a %b %d %H:%M:%S +0000 %Y").replace(tzinfo=utc) tmp_tweet = Tweet(id=tweet_id, user=tweet_user, text=tweet_text, created_at=tweet_created_at, retweet_count=tweet_retweet_count) add_tweet_search = TwitterSearch(search=busqueda, tweet=tmp_tweet) # extract, save and link hastags with tweet extract_hash_tags(tmp_tweet) # save newly created objects tmp_tweet.save() add_tweet_search.save() ## Tests are to be performed here # buscar_en_todos_los_tweets('#uabc')
def index(): if request.method == 'GET': return render_template('index.html', tweets=Tweet.select()) elif request.method == 'POST': tweet_text = request.form['text'] if tweet_text: Tweet.create(text=tweet_text) return redirect('/')
def create_tweet(): tweet = Tweet() user = request.json.get('username') userdb = db.session.query(User).filter(User.username == user).first() tweet.created_by = userdb.id tweet.text = request.json.get('text') db.session.add(tweet) db.session.commit() return 'Created', 201
def post(self, request , *args , **Kwargs): # print "in post method" slack_request_msg = request.data # checking if request coming from correct workspace if slack_request_msg.get('token') != VeriFication_Token : return Response(status = status.HTTP_403_FORBIDDEN) #verification challenge if slack_request_msg.get('type') == 'url_verification' : return Response(data=slack_request_msg , status = status.HTTP_200_OK) # event listening if 'event' in slack_request_msg: msg_event = slack_request_msg.get('event') # check if msg coming from slack api if msg_event.get('subtype') == 'bot_message': return Response(status=status.HTTP_200_OK) # process user's message user = msg_event.get('user') text = msg_event.get('text') channel = msg_event.get('channel') # respond with hello msg bot_text = 'Hi <@{}> :wave:'.format(user) if 'go' in text.lower(): slack_client.api_call(method='chat.postMessage', channel=channel, text=bot_text) # tweet_obj = Tweet(tweet_id=1, tweet_text="Hi Hesham", tweet_time=datetime.datetime.now()) # tweet_obj.save() # x = Tweet.objects() # for t in x : # print t.tweet_id , t.tweet_text # print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # to get all tweets in the timeline All_Tweets = tweepy.Cursor(api.home_timeline).items() for iter in All_Tweets : newTweet = Tweet.objects(tweet_id=iter.id) # check if this tweet dosen't exist before if not newTweet : tweet_obj = Tweet(tweet_id = iter.id,tweet_text= iter.text,tweet_time = iter.created_at) tweet_obj.save() else : break return Response(status=status.HTTP_200_OK) return Response(status=status.HTTP_200_OK)
def on_success(self, data): if 'text' in data: data['fetched_timestamp'] = datetime.datetime.now() data['fresh_tweet'] = True tweet = Tweet() tweet.tweet_id = data['id_str'] tweet.tweets.append(data) tweet.save() print("saved", self.i) self.i += 1
def delete_tweet(): try: tweet_id = request.form['tweet_id'] Tweet.delete_tweet(tweet_id) flash('Your Tweet was successfully deleted', 'success') return redirect('/profile') except: flash(app.config['ERROR_MSG'].format('Could not delete tweet!'), 'danger') return redirect('/profile')
def add_tweet(): if "cur_user" not in session: flash("Please Log In") return redirect("/") new_tweet = Tweet(message=request.form['tweet'], author_id=int(session['cur_user']['id'])) if len(new_tweet.message) > 0: Tweet.add_new_tweet(new_tweet) else: flash("need more tweet length yo!") return redirect("/twitter")
def tweet_add(): user = current_user() if user is None: return redirect(url_for('login_view')) else: t = Tweet(request.form) # 设置是谁发的 t.user = user # 保存到数据库 t.save() return redirect(url_for('timeline_view', username=user.username))
def save_raw(results: Generator) -> Generator: """Save the results and yields the id values """ for result in results: id_str = result.__dict__.get('id_str') data = result.__dict__.get('_json') tweet = Tweet(id_str) tweet.save(data) yield id_str
def parse_tweet(json_data): """Parse the raw output data we get from Twitter's search results """ parsed_tweets = [] if len(json_data['items_html'].strip()) == 0: return parsed_tweets tweets = PyQuery(json_data['items_html'])('div.js-stream-tweet') if len(tweets) == 0: return parsed_tweets for tweet in tweets: tweetPQ = PyQuery(tweet) username_tweet = tweetPQ("span.username b").text() txt = re.sub( r"\s+", " ", tweetPQ("p.js-tweet-text").text().replace('# ', '#').replace('@ ', '@')) retweets = int( tweetPQ( "span.ProfileTweet-action--retweet span.ProfileTweet-actionCount" ).attr("data-tweet-stat-count").replace(",", "")) favorites = int( tweetPQ( "span.ProfileTweet-action--favorite span.ProfileTweet-actionCount" ).attr("data-tweet-stat-count").replace(",", "")) dateSec = int( tweetPQ("small.time span.js-short-timestamp").attr("data-time")) id = tweetPQ.attr("data-tweet-id") permalink = tweetPQ.attr("data-permalink-path") user_id = int(tweetPQ("a.js-user-profile-link").attr("data-user-id")) geo = '' geoSpan = tweetPQ('span.Tweet-geo') if len(geoSpan) > 0: geo = geoSpan.attr('title') urls = [] for link in tweetPQ("a"): try: urls.append((link.attrib["data-expanded-url"])) except KeyError: pass t = Tweet() t.username = username_tweet t.txt = txt t.date = datetime.datetime.fromtimestamp(dateSec) parsed_tweets.append(t) return parsed_tweets
def tweets(handle): statuses = api.GetUserTimeline(screen_name=handle) tweet_list = [] for s in statuses: tweet = Tweet(id = s.id, text=s.text, retweet_count = s.retweet_count, created_at = convert_datetime(s.created_at), influence_score=influence_score_calc(s.text), picture = (None if s.media==[] else s.media[0].get("media_url", None))) tweet_list.append(tweet) return jsonify(tweets=[tweet.serialize() for tweet in tweet_list])
def post(self, request, username): form = TweetForm(self.request.POST) if form.is_valid(): user = User.objects.get(username=username) tweet = Tweet(text=form.cleaned_data['text'], user=user, country=form.cleaned_data['country']) tweet.save() words = form.cleaned_data['text'].split(" ") for word in words: if word[0] == "#": #Separamos todas las letras del tweet y si empieza con #, se va a crear un hashtag de esa palabra hashtag, created = HashTag.objects.get_or_create(name=word[1:]) hashtag.tweet.add(tweet) return HttpResponseRedirect('/user/' +username)
def parse_tweet_media(tweet: Tweet, extended_entities: list): for entity in extended_entities['media']: tweet.text = tweet.text.replace(entity['url'], '') if 'video_info' in entity: video_urls = entity['video_info']['variants'] video_url = max( [video for video in video_urls if ('bitrate') in video], key=lambda x: x['bitrate'])['url'] tweet.add_media(Media('video', video_url)) else: photo_url = entity['media_url_https'] tweet.add_media(Media('photo', photo_url))
def load(filename): with open(filename) as f: data = json.load(f) for word_dict in data: tweet = Tweet() tweet.twitterId = word_dict['twitterId'] tweet.text = word_dict['text'] find_or_add_tweet(session, tweet) session.commit()
def post(self): if not self.has_permission: return user = self.current_user content = self.get_argument('content', None) image_ids = self.get_argument('image_ids', None) images = [] if content and len(strip_tags(content)) >= 3: tweet = Tweet(content=strip_xss_tags(content), user_id=user.id).save() tweet.put_notifier() if image_ids: image_ids = image_ids.split(',') for image_id in image_ids: image_id = int(image_id) image = Image.get(id=image_id) if image: image.tweet_id = tweet.id images.append({ 'id': image.id, 'path': image.path, 'width': image.width, 'height': image.height, }) if images != []: tweet.has_img = 'true' result = { 'status' : 'success', 'message' : '推文创建成功', 'content' : tweet.content, 'name' : tweet.author.name, 'nickname' : tweet.author.nickname, 'author_avatar' : tweet.author.get_avatar(size=48), 'author_url' : tweet.author.url, 'author_name' : tweet.author.name, 'author_nickname' : tweet.author.nickname, 'tweet_url' : tweet.url, 'created' : tweet.created, 'id' : tweet.id, 'images' : images, } if self.is_ajax: return self.write(result) self.flash_message(result) return self.redirect('/timeline') result = { 'status': 'error', 'message': '推文内容至少 3 字符' } if self.is_ajax: return self.write(result) self.flash_message(result) return self.redirect('/timeline')
def add(request): headers = { 'Content-Type': 'text/html', } uid = current_user(request) header = response_with_headers(headers) user = User.find(uid) # 创建微博 form = request.form() w = Tweet(form) w.user_id = user.id w.save() return redirect('/tweet?user_id={}'.format(user.id))
def post(self, request, username): form = TweetForm(self.request.POST) if form.is_valid(): user = User.objects.get(username = username) tweet = Tweet(text = form.cleaned_data['text'], user = user, country = "Vietname" )#form.cleaned_data['country']) tweet.save() words = form.cleaned_data['text'].split(" ") for word in words: hashtag, created = HashTag.objects.get_or_create(name = word[1:]) hashtag.tweet.add(tweet) return HttpResponseRedirect('/tweets/user/%s' % username) return redirect(Profile.as_view())
def get(self): self.response.headers["Content-Type"] = "text/plain" myplace = self.request.get('place') urlbase = 'http://search.twitter.com/search.json' if myplace == 'Oxford': search_url = urlbase + '?geocode=51.751944%2C-1.257778%2C10km' elif myplace == 'London': search_url = urlbase + '?geocode=51.511676%2C-0.133209%2C10km' elif myplace == 'York': search_url = urlbase + '?geocode=53.958333%2C-1.080278%2C10km' elif myplace == 'Margate': search_url = urlbase + '?geocode=51.382681%2C1.3664245%2C10km' elif myplace == 'Bristol': search_url = urlbase + '?geocode=51.45%2C-2.583333%2C10km' else: self.response.out.write( ("Please provide a 'place' parameter:" "Oxford, London, York, Bristol or Margate.")) self.response.out.write('About to access: %s\n\n' % search_url) f = urlfetch.fetch(url=search_url) if f.status_code == 200: j = json.loads(f.content) for result in j['results']: self.response.out.write(result) self.response.out.write('\n') if (result['id']): same_id = Tweet.gql('WHERE tweet_id = :1', result['id']) if (same_id.count() > 0): self.response.out.write( 'Tweet id #%s exists in the datastore\n' % result['id']) else: self.response.out.write( 'Adding tweet id #%s to datastore\n' % result['id']) created_at = datetime.fromtimestamp( mktime_tz(parsedate_tz(result['created_at']))) tweet = Tweet(tweet_id=result['id'], tweet_content=result['text'], place=myplace, created_at=created_at) tweet.put() elif f.status_code == 400: self.response.out.write('UHOH! You have hit the rate limit...')
def GetPastTweets(app, searchStrings): with app.app_context(): auth = OAuthHandler( current_app.config['TWITTER_CONSUMER_KEY'], current_app.config['TWITTER_CONSUMER_SECRET']) auth.set_access_token(current_app.config['TWITTER_ACCESS_TOKEN'], current_app.config['TWITTER_ACCESS_TOKEN_SECRET']) api = API(auth) deleteEntries = connect(current_app.config['MONGODB_SETTINGS']['DB']) deleteEntries.drop_database(current_app.config['MONGODB_SETTINGS']['DB']) listTweetIDs = [] user_query = session['userSentence'] session['completeTweetFetch'] = False session['completedMetaModel'] = False for searchString in searchStrings: # Sleep for 28 secs to avoid Twitter download rate restrictions searchTweets = [status for status in Cursor(api.search, q=searchString).items(current_app.config['MAX_FETCH_TWEETS'])] for tweet in searchTweets: tweet_id = tweet.id if listTweetIDs.__contains__(tweet_id): continue if len(Tweet.objects(tweet_id=tweet_id)) > 1: continue tweet_message = tweet.text.encode("utf-8") tweet_userhandle = tweet.user.screen_name tweet_retweet_count = tweet.retweet_count tweet_createtime = tweet.created_at tweet_location = None tweet_geo = None tweet_favoritecount = tweet.favorite_count tweet_username = tweet.user.name tweet_user_no_of_following = tweet.user.friends_count tweet_user_no_of_followers = tweet.user.followers_count tweet_positiveOrnegative = 0 tweet_polarOrneutral = 0 tweet_isRetweet = 0 oneTweet = Tweet(tweet_id=tweet_id, tweet_msg=tweet_message, tweet_likes=tweet_favoritecount, tweet_retweets=tweet_retweet_count, tweet_search_category=searchString, tweet_user_search_query=user_query, tweet_positiveOrnegative=tweet_positiveOrnegative, tweet_polarOrneutral=tweet_polarOrneutral, tweet_user_handle=tweet_userhandle, tweet_user_name=tweet_username, tweet_user_followers=tweet_user_no_of_followers, tweet_user_following=tweet_user_no_of_following, tweet_isretweet=tweet_isRetweet, tweet_time=tweet_createtime, tweet_location=tweet_location, tweet_geo=tweet_geo) oneTweet.save() listTweetIDs.append(tweet_id) if session['metamodelThread'] is None: session['metamodelThread'] = thread.start_new_thread(twittermetamodelBuilding, ()) print 'completed tweet fetch' session['completeTweetFetch'] = True pass
def main(): auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_secret) api = tweepy.API(auth) results = api.user_timeline(screen_name=ACCOUNT, count=COUNT, tweet_mode="extended") for r in results: save_word_from_tweet(r.full_text, r.entities['hashtags']) tweet = Tweet() tweet.twitterId = r.id tweet.text = r.full_text find_or_add_tweet(session, tweet)
def search_anime_tweet(search_query): import requests anime_json = requests.get("http://animedb.me/search/"+search_query).json()["searchResults"] result = [] for anime in anime_json: anime_result = {"title" : anime["title"], "id" : anime["id"]} if all([x in anime["title"].lower() for x in search_query.replace("&", " ").split()]): json_data = raw_to_json(Tweet.search(search_query.replace("&", " ").split())[0]) anime_result['tweets'] = json_data json_data = raw_to_json(Tweet.search(search_query.replace("&", " ").split())[1]) anime_result['tweets_or'] = json_data if "tweets" in anime_result or "tweets_or" in anime_result: result.append(anime_result); return json.dumps(result, ensure_ascii=False, sort_keys=True, indent=4, separators=(',', ': '))
def homepage(request): """Renders homepage, with public timeline""" if request.method == 'POST': # If the form has been submitted... form = TweetForm(request.POST) # A form bound to the POST data if form.is_valid(): # All validation rules pass # Process the data in form.cleaned_data t=Tweet(text=form.cleaned_data['text'], owner=request.user) t.save() return HttpResponseRedirect('/') # Redirect after POST else: form = TweetForm() # An unbound form return render_to_response(request, 'homepage.html', { 'form': form, })
def get(self): self.response.headers["Content-Type"] = "text/plain" myplace = self.request.get('place') urlbase = 'http://search.twitter.com/search.json' if myplace == 'Oxford': search_url = urlbase + '?geocode=51.751944%2C-1.257778%2C10km' elif myplace == 'London': search_url = urlbase + '?geocode=51.511676%2C-0.133209%2C10km' elif myplace == 'York': search_url = urlbase + '?geocode=53.958333%2C-1.080278%2C10km' elif myplace == 'Margate': search_url = urlbase + '?geocode=51.382681%2C1.3664245%2C10km' elif myplace == 'Bristol': search_url = urlbase + '?geocode=51.45%2C-2.583333%2C10km' else: self.response.out.write(("Please provide a 'place' parameter:" "Oxford, London, York, Bristol or Margate.")) self.response.out.write('About to access: %s\n\n' % search_url) f = urlfetch.fetch(url=search_url) if f.status_code == 200: j = json.loads(f.content) for result in j['results']: self.response.out.write(result) self.response.out.write('\n') if (result['id']): same_id = Tweet.gql('WHERE tweet_id = :1', result['id']) if (same_id.count() > 0): self.response.out.write('Tweet id #%s exists in the datastore\n' % result['id']) else: self.response.out.write('Adding tweet id #%s to datastore\n' % result['id']) created_at = datetime.fromtimestamp( mktime_tz(parsedate_tz(result['created_at']))) tweet = Tweet( tweet_id=result['id'], tweet_content=result['text'], place=myplace, created_at=created_at ) tweet.put() elif f.status_code == 400: self.response.out.write('UHOH! You have hit the rate limit...')
def post(self, request, username): form = TweetForm(self.request.POST) if form.is_valid(): user = User.objects.get(username=username) tweet = Tweet(text=form.cleaned_data['text'], user=user, country=form.cleaned_data['country']) tweet.save() words = form.cleaned_data['text'].split(" ") for word in words: if word[0] == "#": hashtag, created = HashTag.objects.get_or_create(name=word[1:]) hashtag.tweet.add(tweet) return HttpResponseRedirect('/user/'+username)
def on_data(self, feed): #print feed try: if feed != []: tweet = json.loads(feed) tweet_id = tweet['id'] tweet_message = tweet['text'].encode("utf-8") tweet_userhandle = tweet['user']['screen_name'] tweet_retweet_count = tweet['retweet_count'] tweet_createtime = datetime.datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y') tweet_location = None tweet_geo = None tweet_favoritecount = tweet['favorite_count'] tweet_username = tweet['user']['name'] tweet_user_no_of_following = tweet['user']['friends_count'] tweet_user_no_of_followers = tweet['user']['followers_count'] tweet_positiveOrnegative = 0 tweet_polarOrneutral = 0 tweet_isRetweet = 0 oneTweet = Tweet(tweet_id=tweet_id, tweet_msg=tweet_message, tweet_likes=tweet_favoritecount, tweet_retweets=tweet_retweet_count, tweet_search_category=None, tweet_user_search_query=session['userSentence'], tweet_positiveOrnegative=tweet_positiveOrnegative, tweet_polarOrneutral=tweet_polarOrneutral, tweet_user_handle=tweet_userhandle, tweet_user_name=tweet_username, tweet_user_followers=tweet_user_no_of_followers, tweet_user_following=tweet_user_no_of_following, tweet_isretweet=tweet_isRetweet, tweet_time=tweet_createtime, tweet_location=tweet_location, tweet_geo=tweet_geo) oneTweet.save() if session['metamodelThread'] is None: print 'starting metamodel building' session['metamodelThread'] = thread.start_new_thread(twittermetamodelBuilding, ()) self.num_tweets += 1 if self.num_tweets < current_app.config['MAX_LIVE_TWEETS']: return True else: print 'completed tweet fetch' session['completeTweetFetch'] = True return False else: return True except: print 'Unexpected error', sys.exc_info()[0] return True
def tweets(handle): statuses = api.GetUserTimeline(screen_name=handle) tweet_list = [] for s in statuses: tweet = Tweet(id=s.id, text=s.text, retweet_count=s.retweet_count, created_at=convert_datetime(s.created_at), influence_score=influence_score_calc(s.text), picture=(None if s.media == [] else s.media[0].get( "media_url", None))) tweet_list.append(tweet) return jsonify(tweets=[tweet.serialize() for tweet in tweet_list])
def index(request): if request.method == "POST": form = TweetForm(request.POST) if form.is_valid(): uid = request.session['uid'] Tweet.create_post(text=form.cleaned_data["text"], uid=uid) return HttpResponseRedirect(reverse("twitter_home")) else: form = TweetForm() context = { 'username': request.session['username'], 'tweet_form': form, 'timeline': Tweet.get_timeline(request.session['uid']), } return render(request, "twitter/index.html", context)
def test_twitter_search_gets_processed(): """Ensures the data can be loaded from twitter and stored as a raw source Run one search query Run worker Check two tweets in raw source Run worker Check the pixels have been averaged out """ with Connection(connection=redis_db): source_queue = Queue(name='source') process_queue = Queue(name='process') query = 'Test Query' size = 2 twitter.search(input=query, size=size) worker = SimpleWorker([source_queue]) worker.work(burst=True) assert len(Tweet.keys()) == size worker = SimpleWorker([process_queue]) worker.work(burst=True) # # assert len(Tweet.keys()) == size
def tweet_add(user_id): user = User.query.filter_by(id=user_id).first() if user is not None: log('有这个人注册过') u = current_user() log('debug current_user:'******'当前有人登陆且登陆的人是这个人') tweet = Tweet(request.form) tweet.user = u tweet.save() return redirect(url_for('tweet_view', user_id=u.id)) else: return redirect(url_for('login_view')) else: abort(404)
def createStreamingContext(): # Create a local StreamingContext with two working thread and batch interval of 1 second sc = SparkContext("spark://%s:7077" % MASTER_NAME, appName="GlutenTweet", pyFiles=PYFILES) ssc = StreamingContext(sc, 2) # Create a DStream of raw data raw = ssc.socketTextStream(MASTER_IP, 9999) # Convert into models tweets = raw.map(lambda r: Tweet(raw_json=r)) # Store models tweets.foreachRDD(storeTweetsRDD) # Sliding window analysis window = tweets.window(20 * 60, 30) hashtagCounts = analysisHahtagCount(window) streamTop(hashtagCounts).pprint() # Keyword extraction - note tweets is immutable tweetsKeyword = tweets.map(lambda t: keywordExtraction(t)) # Update models tweetsKeyword.foreachRDD(updateTweetsRDD) # Sliding window analysis window2 = tweetsKeyword.window(20 * 60, 30) keywordCounts = analysisKeywordCount(window2) streamTop(keywordCounts).pprint() ssc.checkpoint(CHECKPOINT_DIR) return ssc
def on_data(self, raw_data): data = json.loads(raw_data) if 'limit' in data and 'track' in data['limit']: return tweet = Tweet.from_raw_tweet(data) if tweet: return self.on_tweet(tweet)
def tweet_list(cls, request): try: token = jwt.decode(request.tokenint, 'secret') #CHECA EL TOKEN user = Usuarios.get_by_id( token['user_id']) #obtiene usuario dado el token lista = [] #crea lista para guardar contenido de la BD lstMessage = TweetList(code=1) #CREA el mensaje de salida lstBd = Tweet.query().fetch() #obtiene de la base de datos for i in lstBd: #recorre la base de datos #inserta a la lista creada con los elementos que se necesiten de la base de datos #i.empresa_key.urlsafe() obtiene el entityKey lista.append( TweetUpdate( token='', entityKey=i.entityKey, #empresa_key=i.empresa_key.urlsafe(), title=i.title, description=i.description, urlImage=i.urlImage)) lstMessage.data = lista #ASIGNA a la salida la lista message = lstMessage except jwt.DecodeError: message = TweetList(code=-1, data=[]) except jwt.ExpiredSignatureError: message = TweetList(code=-2, data=[]) return message
def put(self, tweet_id): tweet_id = int(tweet_id) tweet = Tweet.get(id=tweet_id) if not tweet: raise tornado.web.HTTPError(404) action = self.get_argument('action', None) user = self.current_user if not action: result = {'status': 'error', 'message': '缺少参数'} return self.send_result(result) if action == 'up': if tweet.user_id != user.id: result = user.up(tweet_id=tweet.id) else: result = {'status': 'info', 'message': '不能为自己的推文投票'} if action == 'down': if tweet.user_id != user.id: result = user.down(tweet_id=tweet.id) else: result = {'status': 'info', 'message': '不能为自己的推文投票'} if action == 'collect': result = user.collect(tweet_id=tweet.id) if action == 'thank': result = user.thank(tweet_id=tweet.id) if action == 'report': result = user.report(tweet_id=tweet.id) return self.send_result(result)
def removeTweets(): if request.method == 'POST': app.logger.info('request form: {}'.format(request.form)) topic = request.form.get('topic') elif request.method == 'GET': app.logger.info('request args: {}'.format(request.args)) topic = request.args.get('topic') if not topic: abort(400) app.logger.info('Topic param received: {}'.format(topic)) # delete old tweets (> 1 year) cnt = Tweet.removeOld(datetime.datetime.utcnow() - datetime.timedelta(days=30), topic) # continue with deleting urls taskqueue.add(url='/cron/delete/urls', params={'topic': topic}) # mail.send_mail( # sender='*****@*****.**', # to='*****@*****.**', # subject='Remove tweets {}'.format(topic), # body='{} tweets deleted for topic {}'.format(cnt, topic), # ) app.logger.info('{} tweets deleted for topic {}'.format(cnt, topic)) return Response('OK')
def specific_tweet_fact_check(fake_news_url): from mongoengine.queryset.visitor import Q query = Tweet.objects( Q(fake_news_url=fake_news_url) & Q(fact_checked=False)).order_by( '-user__followers_count').values_list('id_str', 'fake_news_url', 'user__screen_name').first() return query