def feed(request, username): url = "https://twitrss.me/twitter_user_to_rss/?user="******"" for number in range(5): out += dicc.entries[number].title + "<br>" #Obtener las urls del tweet si hay urls = dicc.entries[number].title.split() for i in urls: if i.startswith("http://") or i.startswith("https://"): i = i.split('&')[0] out += "<li><a href=" + i + ">" + i + "</a></li>" #Obtener primer elemento <p> soup = BeautifulSoup(urllib.urlopen(i).read()) out += str(soup.p).decode('utf8') #Obtener primer elemento <img> out += str(soup.img).decode('utf8') + "<br><br>" #Comprobar y guardar autor del tweet user = dicc.entries[number].title.split(':')[0] try: p = User.objects.get(name=user) except ObjectDoesNotExist: p = User(name=user) p.save() #Comprobar y guardar tweet try: t = Tweet.objects.get(content=dicc.entries[number].title) except ObjectDoesNotExist: t = Tweet(content=dicc.entries[number].title, url= dicc.entries[number].link, name=p) t.save() return HttpResponse(out)
def do(self): logger.info("Initiating cronjob: {}".format(self.code)) try: auth = tweepy.OAuthHandler(self._tak, self._tas) auth.set_access_token(self._tat, self._tats) api = tweepy.API(auth) screen_name = api.me().screen_name timeline = api.home_timeline() except Exception: e = traceback.format_exc() logger.error("Could not get home timeline. Error: {}".format(self.screen_name, e)) return False for t in timeline: if t.user.screen_name == screen_name: msg = t.text if msg.startswith('RT @everycolorbot'): chex = msg.split()[2].strip("\"") evcinst = EveryColorBotTweet.objects.get_or_none(color__hex = chex) if evcinst is not None: evcinst.tweeted = True evcinst.save() inst = Tweet.objects.get_or_none(message = msg) if inst is None: logger.info('Cronjob encountered unsaved tweet: "{}" Saving it to database.'.format(msg)) inst = Tweet(color_code = chex, message = msg) inst.save()
def tweet(request): if request.method == 'POST': form = TweetForm(request.POST) if form.is_valid(): twit = Tweet() status = twit.tweet = form.cleaned_data['tweet'] twit.name = request.user.username twit.save() variables = RequestContext(request,{ 'status':status }) return render_to_response('tweet.html',RequestContext(request),variables) else: return render_to_response('tweet.html',{'request':RequestContext(request)}) else: tweets = [] queryset=Tweet.objects.all() for query in queryset: if request.user.username == query.name: tweets.append(query) status = '' for tweet in tweets: status = tweet.tweet form = TweetForm() form.name = request.user variables = RequestContext(request, { 'form': form,'status': status }) return render_to_response('tweet.html',RequestContext(request),variables)
def tweet(request): if request.method == 'POST': form = TweetForm(request.POST) if form.is_valid(): twit = Tweet() status = twit.tweet = form.cleaned_data['tweet'] twit.name = request.user.username twit.save() variables = RequestContext(request, {'status': status}) return render_to_response('tweet.html', RequestContext(request), variables) else: return render_to_response('tweet.html', {'request': RequestContext(request)}) else: tweets = [] queryset = Tweet.objects.all() for query in queryset: if request.user.username == query.name: tweets.append(query) status = '' for tweet in tweets: status = tweet.tweet form = TweetForm() form.name = request.user variables = RequestContext(request, {'form': form, 'status': status}) return render_to_response('tweet.html', RequestContext(request), variables)
def load_tweets(profile, api): tweets = [] for status in tweepy.Cursor(api.user_timeline).items(): print str(status.id) tweet = Tweet(temp=str(status.id)[-9:]) tweet.text = unicode(status.text) tweet.created = timezone.make_aware(status.created_at, timezone.get_current_timezone()) tweet.is_retweet = status.retweeted tweet.retweet_count = status.retweet_count tweet.is_quote = status.is_quote_status if status.in_reply_to_status_id is not None: tweet.is_reply = True for media in status.entities.get("media", [{}]): if media.get("type", None): tweet.media_url = media.get('media_url') if 'video' in tweet.media_url: tweet.is_video = True else: tweet.is_image = True # tweets.append(tweet) tweet.save() profile.tweets.add(tweet) # profile.tweets.bulk_create(tweets) profile.save()
def saveTweet(self, username, tweetContent, publishedDate): tweetObject = Tweet(username=username, text=tweetContent, date=publishedDate) # Creates an object to be saved in the Tweet model in the database. tweetObject.save()
def schedule(request): if request.POST.get('auth') != settings.TWITGHOST_AUTH_KEY: logging.error("Unauthorized access") return HttpResponse('forbidden', status=403) new_tweet = Tweet(tweet=request.POST['tweet']) new_tweet.save() return HttpResponse('ok', mimetype="text/xml")
def get_context_data(self, *args, **kwargs): context = super(TestTweetsView, self).get_context_data(*args, **kwargs) # retrieve tweets from db whose actual classifications have been set (already verified from previous tests) tweets_train_tourism = Tweet.objects.filter(classified=True, actual_classification='tourism') tweets_train_nontourism = Tweet.objects.filter(classified=True, actual_classification='nontourism') # train classifier using tourism and nontourism files and tweets from db train_result = train_db(tweets_train_tourism, tweets_train_nontourism) classifier = train_result['classifier'] # retrieve random tweets from a file tweets = self.randomize_tweets('classifier/data/2015-03-06.happydata.txt', 10) tweets_test = [] for tweet in tweets: if not Tweet.objects.filter(tweet_id=tweet['id']) and tweet['coordinates']: tweet_id = tweet['id'] user = tweet['user']['name'].encode('utf-8') lat = tweet['coordinates']['coordinates'][1] lng = tweet['coordinates']['coordinates'][0] text = tweet['text'].encode('utf-8') if tweet_id and user and lat and lng and text: # classify tweet classification = classifier.classify(feature_extractor_lda_tripadvisor_top_words_weights(text)) # save tweet with classification (to be verified later) tweet_obj = Tweet(tweet_id=tweet_id, user=user, lat=lat, lng=lng, text=text, classification=classification) tweet_obj.save() tweets_test.append(tweet_obj) context['tweets'] = tweets_test return context
def add(request): user = current_user(request) # 创建微博 form = request.form() w = Tweet(form) w.user_id = user.id w.save() return redirect('/tweet/index?user_id={}'.format(user.id))
def buscar_en_todos_los_tweets(busqueda, cantidad_de_tweets=50, partir_del_tweet=None): ## App keys APP_KEY = 'IR5sQyceHa34Cxxm2hAw' APP_SECRET = 'y5Nti0zdCFfzY1ifokC6iHIZFZ14Z2GAjTI6VFx2mg' ## User keys OAUTH_TOKEN = '202826188-6OCQEmzi7JCu8S6UERP5nhjUo4NHirk0Kd4VRRvl' OAUTH_TOKEN_SECRET = '1wmkXUkqkDhBnbugzaNIzf07VKeCddjEPynVG2gayU' # requires authentication as of Twitter API v1.1 twitter = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) try: search_results = twitter.search(q=busqueda, count=cantidad_de_tweets) print '===============', busqueda, '===============' except TwythonError as e: print e # Initialize array of tweet objects tweets = [] for tweet in search_results['statuses']: # Parameters of a tweet tweet_id = tweet['id_str'] tweet_user = tweet['user']['screen_name'].encode('utf-8') tweet_created_at = tweet['created_at'] tweet_text = tweet['text'].encode('utf-8') tweet_reply_to_status_id = tweet['in_reply_to_user_id_str'] tweet_retweet_count = tweet['retweet_count'] try: Tweet.objects.get(pk=tweet_id) print "Tweet %s already in the db." % tweet_id except: # Print in console print 'Added %s.- Tweet from @%s Date: %s' % (tweet_id, tweet_user, tweet_created_at) print tweet_text, '\n', 'reply to %s' % tweet_reply_to_status_id ## Change twitter api time to python datetime ## ej. Wed Jan 16 22:08:18 +0000 2013 tweet_created_at = datetime.strptime(tweet_created_at, "%a %b %d %H:%M:%S +0000 %Y").replace(tzinfo=utc) tmp_tweet = Tweet(id=tweet_id, user=tweet_user, text=tweet_text, created_at=tweet_created_at, retweet_count=tweet_retweet_count) add_tweet_search = TwitterSearch(search=busqueda, tweet=tmp_tweet) # extract, save and link hastags with tweet extract_hash_tags(tmp_tweet) # save newly created objects tmp_tweet.save() add_tweet_search.save() ## Tests are to be performed here # buscar_en_todos_los_tweets('#uabc')
def post(self, request , *args , **Kwargs): # print "in post method" slack_request_msg = request.data # checking if request coming from correct workspace if slack_request_msg.get('token') != VeriFication_Token : return Response(status = status.HTTP_403_FORBIDDEN) #verification challenge if slack_request_msg.get('type') == 'url_verification' : return Response(data=slack_request_msg , status = status.HTTP_200_OK) # event listening if 'event' in slack_request_msg: msg_event = slack_request_msg.get('event') # check if msg coming from slack api if msg_event.get('subtype') == 'bot_message': return Response(status=status.HTTP_200_OK) # process user's message user = msg_event.get('user') text = msg_event.get('text') channel = msg_event.get('channel') # respond with hello msg bot_text = 'Hi <@{}> :wave:'.format(user) if 'go' in text.lower(): slack_client.api_call(method='chat.postMessage', channel=channel, text=bot_text) # tweet_obj = Tweet(tweet_id=1, tweet_text="Hi Hesham", tweet_time=datetime.datetime.now()) # tweet_obj.save() # x = Tweet.objects() # for t in x : # print t.tweet_id , t.tweet_text # print "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" # to get all tweets in the timeline All_Tweets = tweepy.Cursor(api.home_timeline).items() for iter in All_Tweets : newTweet = Tweet.objects(tweet_id=iter.id) # check if this tweet dosen't exist before if not newTweet : tweet_obj = Tweet(tweet_id = iter.id,tweet_text= iter.text,tweet_time = iter.created_at) tweet_obj.save() else : break return Response(status=status.HTTP_200_OK) return Response(status=status.HTTP_200_OK)
def on_success(self, data): if 'text' in data: data['fetched_timestamp'] = datetime.datetime.now() data['fresh_tweet'] = True tweet = Tweet() tweet.tweet_id = data['id_str'] tweet.tweets.append(data) tweet.save() print("saved", self.i) self.i += 1
def save_raw(results: Generator) -> Generator: """Save the results and yields the id values """ for result in results: id_str = result.__dict__.get('id_str') data = result.__dict__.get('_json') tweet = Tweet(id_str) tweet.save(data) yield id_str
def tweet_add(): user = current_user() if user is None: return redirect(url_for('login_view')) else: t = Tweet(request.form) # 设置是谁发的 t.user = user # 保存到数据库 t.save() return redirect(url_for('timeline_view', username=user.username))
def post(self, request, username): form = TweetForm(self.request.POST) if form.is_valid(): user = User.objects.get(username=username) tweet = Tweet(text=form.cleaned_data['text'], user=user, country=form.cleaned_data['country']) tweet.save() words = form.cleaned_data['text'].split(" ") for word in words: if word[0] == "#": #Separamos todas las letras del tweet y si empieza con #, se va a crear un hashtag de esa palabra hashtag, created = HashTag.objects.get_or_create(name=word[1:]) hashtag.tweet.add(tweet) return HttpResponseRedirect('/user/' +username)
def add(request): headers = { 'Content-Type': 'text/html', } uid = current_user(request) header = response_with_headers(headers) user = User.find(uid) # 创建微博 form = request.form() w = Tweet(form) w.user_id = user.id w.save() return redirect('/tweet?user_id={}'.format(user.id))
def post(self, request, username): form = TweetForm(self.request.POST) if form.is_valid(): user = User.objects.get(username = username) tweet = Tweet(text = form.cleaned_data['text'], user = user, country = "Vietname" )#form.cleaned_data['country']) tweet.save() words = form.cleaned_data['text'].split(" ") for word in words: hashtag, created = HashTag.objects.get_or_create(name = word[1:]) hashtag.tweet.add(tweet) return HttpResponseRedirect('/tweets/user/%s' % username) return redirect(Profile.as_view())
def GetPastTweets(app, searchStrings): with app.app_context(): auth = OAuthHandler( current_app.config['TWITTER_CONSUMER_KEY'], current_app.config['TWITTER_CONSUMER_SECRET']) auth.set_access_token(current_app.config['TWITTER_ACCESS_TOKEN'], current_app.config['TWITTER_ACCESS_TOKEN_SECRET']) api = API(auth) deleteEntries = connect(current_app.config['MONGODB_SETTINGS']['DB']) deleteEntries.drop_database(current_app.config['MONGODB_SETTINGS']['DB']) listTweetIDs = [] user_query = session['userSentence'] session['completeTweetFetch'] = False session['completedMetaModel'] = False for searchString in searchStrings: # Sleep for 28 secs to avoid Twitter download rate restrictions searchTweets = [status for status in Cursor(api.search, q=searchString).items(current_app.config['MAX_FETCH_TWEETS'])] for tweet in searchTweets: tweet_id = tweet.id if listTweetIDs.__contains__(tweet_id): continue if len(Tweet.objects(tweet_id=tweet_id)) > 1: continue tweet_message = tweet.text.encode("utf-8") tweet_userhandle = tweet.user.screen_name tweet_retweet_count = tweet.retweet_count tweet_createtime = tweet.created_at tweet_location = None tweet_geo = None tweet_favoritecount = tweet.favorite_count tweet_username = tweet.user.name tweet_user_no_of_following = tweet.user.friends_count tweet_user_no_of_followers = tweet.user.followers_count tweet_positiveOrnegative = 0 tweet_polarOrneutral = 0 tweet_isRetweet = 0 oneTweet = Tweet(tweet_id=tweet_id, tweet_msg=tweet_message, tweet_likes=tweet_favoritecount, tweet_retweets=tweet_retweet_count, tweet_search_category=searchString, tweet_user_search_query=user_query, tweet_positiveOrnegative=tweet_positiveOrnegative, tweet_polarOrneutral=tweet_polarOrneutral, tweet_user_handle=tweet_userhandle, tweet_user_name=tweet_username, tweet_user_followers=tweet_user_no_of_followers, tweet_user_following=tweet_user_no_of_following, tweet_isretweet=tweet_isRetweet, tweet_time=tweet_createtime, tweet_location=tweet_location, tweet_geo=tweet_geo) oneTweet.save() listTweetIDs.append(tweet_id) if session['metamodelThread'] is None: session['metamodelThread'] = thread.start_new_thread(twittermetamodelBuilding, ()) print 'completed tweet fetch' session['completeTweetFetch'] = True pass
def post(self, request, username): form = TweetForm(self.request.POST) if form.is_valid(): user = User.objects.get(username=username) tweet = Tweet(text=form.cleaned_data['text'], user=user, country=form.cleaned_data['country']) tweet.save() words = form.cleaned_data['text'].split(" ") for word in words: if word[0] == "#": hashtag, created = HashTag.objects.get_or_create(name=word[1:]) hashtag.tweet.add(tweet) return HttpResponseRedirect('/user/'+username)
def homepage(request): """Renders homepage, with public timeline""" if request.method == 'POST': # If the form has been submitted... form = TweetForm(request.POST) # A form bound to the POST data if form.is_valid(): # All validation rules pass # Process the data in form.cleaned_data t=Tweet(text=form.cleaned_data['text'], owner=request.user) t.save() return HttpResponseRedirect('/') # Redirect after POST else: form = TweetForm() # An unbound form return render_to_response(request, 'homepage.html', { 'form': form, })
def on_data(self, feed): #print feed try: if feed != []: tweet = json.loads(feed) tweet_id = tweet['id'] tweet_message = tweet['text'].encode("utf-8") tweet_userhandle = tweet['user']['screen_name'] tweet_retweet_count = tweet['retweet_count'] tweet_createtime = datetime.datetime.strptime(tweet['created_at'], '%a %b %d %H:%M:%S +0000 %Y') tweet_location = None tweet_geo = None tweet_favoritecount = tweet['favorite_count'] tweet_username = tweet['user']['name'] tweet_user_no_of_following = tweet['user']['friends_count'] tweet_user_no_of_followers = tweet['user']['followers_count'] tweet_positiveOrnegative = 0 tweet_polarOrneutral = 0 tweet_isRetweet = 0 oneTweet = Tweet(tweet_id=tweet_id, tweet_msg=tweet_message, tweet_likes=tweet_favoritecount, tweet_retweets=tweet_retweet_count, tweet_search_category=None, tweet_user_search_query=session['userSentence'], tweet_positiveOrnegative=tweet_positiveOrnegative, tweet_polarOrneutral=tweet_polarOrneutral, tweet_user_handle=tweet_userhandle, tweet_user_name=tweet_username, tweet_user_followers=tweet_user_no_of_followers, tweet_user_following=tweet_user_no_of_following, tweet_isretweet=tweet_isRetweet, tweet_time=tweet_createtime, tweet_location=tweet_location, tweet_geo=tweet_geo) oneTweet.save() if session['metamodelThread'] is None: print 'starting metamodel building' session['metamodelThread'] = thread.start_new_thread(twittermetamodelBuilding, ()) self.num_tweets += 1 if self.num_tweets < current_app.config['MAX_LIVE_TWEETS']: return True else: print 'completed tweet fetch' session['completeTweetFetch'] = True return False else: return True except: print 'Unexpected error', sys.exc_info()[0] return True
def tweet_add(user_id): user = User.query.filter_by(id=user_id).first() if user is not None: log('有这个人注册过') u = current_user() log('debug current_user:'******'当前有人登陆且登陆的人是这个人') tweet = Tweet(request.form) tweet.user = u tweet.save() return redirect(url_for('tweet_view', user_id=u.id)) else: return redirect(url_for('login_view')) else: abort(404)
def create_tweet(payload, user): errors = validate_tweet_payload(payload) new_tweet = None if not errors: new_tweet = Tweet(text=payload.get("text"), user=user) new_tweet.save() if payload.get("reply_to", None): tweet_being_replied_to = get_tweet(payload.get("reply_to")) reply_to = TweetReplies(tweet=new_tweet, reply_to=tweet_being_replied_to) reply_to.save() return new_tweet, errors
def tweet_add(): user = current_user() if user is None: return redirect(url_for('login_view')) else: # form = request.form; form = request.get_json() t = Tweet(form) # 设置是谁发的 t.user = user # 保存到数据库 t.save() j = t.json() print('debug', j) r = { 'success': True, 'data': j, } return jsonify(r)
def get_context_data(self, *args, **kwargs): context = super(TestTweetsView, self).get_context_data(*args, **kwargs) # retrieve tweets from db whose actual classifications have been set (already verified from previous tests) tweets_train_tourism = Tweet.objects.filter( classified=True, actual_classification='tourism') tweets_train_nontourism = Tweet.objects.filter( classified=True, actual_classification='nontourism') # train classifier using tourism and nontourism files and tweets from db train_result = train_db(tweets_train_tourism, tweets_train_nontourism) classifier = train_result['classifier'] # retrieve random tweets from a file tweets = self.randomize_tweets( 'classifier/data/2015-03-06.happydata.txt', 10) tweets_test = [] for tweet in tweets: if not Tweet.objects.filter( tweet_id=tweet['id']) and tweet['coordinates']: tweet_id = tweet['id'] user = tweet['user']['name'].encode('utf-8') lat = tweet['coordinates']['coordinates'][1] lng = tweet['coordinates']['coordinates'][0] text = tweet['text'].encode('utf-8') if tweet_id and user and lat and lng and text: # classify tweet classification = classifier.classify( feature_extractor_lda_tripadvisor_top_words_weights( text)) # save tweet with classification (to be verified later) tweet_obj = Tweet(tweet_id=tweet_id, user=user, lat=lat, lng=lng, text=text, classification=classification) tweet_obj.save() tweets_test.append(tweet_obj) context['tweets'] = tweets_test return context
def tweet(self, include, reply, data): tweet = data.get("text", "") log.info("In tweet(), ") h = data.get('handler', None) wrap = TW_API.get(h, None) try: if include: inObj = Includable(include) tweet = tweet + " " + str(inObj) if reply: q = reply.get('topic', None) sr_list = SearchResult.objects.filter(query=q, autoPostStatus=None)[:1] log.info("length sr_list: " + str(sr_list)) if len(sr_list) < 1: search = Searching() search.search(data.get('handler'), reply.get('topic')) log.info("reply input: " + str(reply)) r = Reply(reply) tweetId = r.getTweetId() screenName = r.getScreenName() tweet = wrap.replyTweet(tweet, screenName, tweetId) reply = {"screenName": screenName, "tweetId": tweetId} else: log.info("about to post a tweet...") try: wrap.postTweet(tweet) except Exception as e: log.info("error in posting tweet, " + str(e)) log.info(("came here")) tw = Tweet() secret = TwitterSecret.objects.get(handler=h) tw.handler = secret tw.state = "updated" tw.uuid = uuid.uuid4() tw.text = tweet tw.save() except Exception as e: log.info("An err while posting tweet" + str(e)) return Response({'tweet': tweet, "reply": reply})
def tweet(self, include, reply, data): tweet=data.get("text", "") log.info("In tweet(), ") h = data.get('handler', None) wrap = TW_API.get(h, None) try: if include: inObj = Includable(include) tweet = tweet+" "+str(inObj) if reply: q=reply.get('topic', None) sr_list=SearchResult.objects.filter(query=q, autoPostStatus=None)[:1] log.info("length sr_list: "+str(sr_list)) if len(sr_list)<1: search = Searching() search.search(data.get('handler'), reply.get('topic')) log.info("reply input: "+str(reply)) r = Reply(reply) tweetId = r.getTweetId() screenName = r.getScreenName() tweet=wrap.replyTweet(tweet, screenName, tweetId) reply={"screenName": screenName, "tweetId": tweetId} else: log.info("about to post a tweet...") try: wrap.postTweet(tweet) except Exception as e: log.info("error in posting tweet, "+str(e)) log.info(("came here")) tw = Tweet() secret = TwitterSecret.objects.get(handler=h) tw.handler=secret tw.state="updated" tw.uuid= uuid.uuid4() tw.text=tweet tw.save() except Exception as e: log.info("An err while posting tweet"+str(e)) return Response({'tweet':tweet, "reply":reply})
def get_tweets_data_for_hash_tag_from_twitter(hash_tag): """ Get tweets by hash tag data from twitter :param hash_tag: :return: """ hash_tag = HashTag.objects.get(hash_tag=hash_tag) tweets = [] new_tweets = 0 tweet_objs = [] base_url = 'https://api.twitter.com/1.1/search/tweets.json' next_url = '?q=%23'+ hash_tag.hash_tag + '&lang=en' logger.info("Getting Tweet data for hashtag: %s" % hash_tag.hash_tag) while next_url: url = base_url + next_url page_data = client.request(url) tweets += page_data.get('statuses') next_url = page_data.get('search_metadata').get('next_results') for tweet in tweets: try: tweet_obj = Tweet.objects.get(id_str=tweet.get('id_str')) except ObjectDoesNotExist: tweet_obj = Tweet() tweet_obj.id_str = tweet.get('id_str') tweet_obj.created_at = dateutil.parser.parse((tweet.get('created_at'))) tweet_obj.text = tweet.get('text') tweet_obj.hash_tag = hash_tag # get the tweet typos get_tweet_typos(tweet_obj) tweet_obj.save() new_tweets += 1 tweet_objs.append(tweet_obj) logger.info("Found %s tweets for hashtag: %s" % (len(tweet_objs), hash_tag.hash_tag)) logger.info("Found %s new tweets for hashtag: %s" % (str(new_tweets), hash_tag.hash_tag)) return tweet_objs
def import_tweets(request): consumer = oauth.Consumer(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET) token = oauth.Token(settings.TWITTER_ACCESS_TOKEN, settings.TWITTER_ACCESS_TOKEN_SECRET) client = oauth.Client(consumer, token) # GET YOUR PROFILE response, content = client.request('https://api.twitter.com/1/statuses/user_timeline.json', 'GET') tweets = [] if response.status == 200: tweets = simplejson.loads(content) for tweet in tweets: if Tweet.objects.filter(tweet_id=long(tweet['id'])).count() < 1: db_tweet = Tweet() db_tweet.text = tweet['text'] db_tweet.created_at = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')) db_tweet.tweet_id = tweet['id'] db_tweet.save() # put the logging stuff here return render_to_response('templates/import_tweets.html', locals())
def update_tweets(request): twitter_auth = request.user.social_auth.filter(provider='twitter')[0] oauth_token = twitter_auth.tokens['oauth_token'] oauth_secret = twitter_auth.tokens['oauth_token_secret'] twitter = Twython(settings.TWITTER_CONSUMER_KEY, settings.TWITTER_CONSUMER_SECRET, oauth_token, oauth_secret) tweets = twitter.get_home_timeline(count=200) for tweet in tweets: if len(tweet['entities']['urls']) != 0: if not (Tweet.objects.filter(tweet_id=tweet['id'], user=request.user)): tweet_object = Tweet() tweet_object.tweet_id = tweet['id'] tweet_object.text = tweet['text'] tweet_object.source_url = tweet['entities']['urls'][0]['expanded_url'] tweet_object.display_url = tweet['entities']['urls'][0]['display_url'] tweet_object.host_url = urlparse.urlparse(tweet['entities']['urls'][0]['expanded_url']).hostname tweet_object.created_at = time.strftime('%Y-%m-%d %H:%M:%S', time.strptime(tweet['created_at'],'%a %b %d %H:%M:%S +0000 %Y')) tweet_object.tweeted_by = tweet['user']['screen_name'] tweet_object.user = request.user tweet_object.save()
def save(self): '''Save tweet to database. :returns: bool -- True is the save was made, False if not exceptions happened during the save ''' from models import EveryColorBotTweet, Tweet, ReTweet if self.tweet == "": logger.info("Saving called for empty tweet. Skipping.") return False try: twinst = Tweet(message = self.tweet, value = self.appreciation,\ muse = self.muse_classname,\ context = self.context_classname,\ color_code = self.color_code,\ color_name = self.color_name) twinst.save() if self.retweet: screen_name = self.screen_name if screen_name == 'everycolorbot': inst = EveryColorBotTweet.objects.get_or_none(url = self.retweet_url) if inst: inst.tweeted = True inst.save() reinst = ReTweet(tweet_url = self.retweet_url,\ screen_name = screen_name, tweet = twinst) reinst.save() logger.info("Tweet saved to database: {}".format(self.tweet)) except Exception: e = traceback.format_exc() logger.error("Could not save tweet to database, because of error: {}".format(e)) return False return True
def mifeed(request, username): url = "http://twitrss.me/twitter_user_to_rss/?user="******"" for number in range(5): salida += dicc.entries[number].title + "<br>" urls = dicc.entries[number].title.split() for url in urls: if url.find("http") == False: url = url.split('&')[0] salida += "<li><a href=" + url + ">" + url + "</a></li>" bSoup = BeautifulSoup(urllib.urlopen(url).read()) if (bSoup.p) != None: salida += str(bSoup.p).decode('utf8') + "</br>" if (bSoup.img) != None: salida += str(bSoup.img).decode('utf8') + "</br>" salida+= "</br>" try: fila = Persona.objects.get(name= username) except Persona.DoesNotExist: fila = Persona(name = username) fila.save() try: f = Tweet.objects.get(content= dicc.entries[number].title) except Tweet.DoesNotExist: f = Tweet(content= dicc.entries[number].title, url= dicc.entries[number].link, name=fila) f.save() return HttpResponse(salida)
def handler(request, username): url = 'http://twitrss.me/twitter_user_to_rss/?user='******'img'): salida += str(img) + "<br>\n\t\t\t" salida += "<br>\n\t\t" salida += "\n\t</body>\n</html>" return HttpResponse(salida)
def get_tweets(request): consumer_key = keys.CONSUMER_KEY consumer_secret = keys.CONSUMER_SECRET access_token = keys.ACCESS_TOKEN access_token_secret = keys.ACCESS_TOKEN_SECRET auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) coxinha_words = ['AvanteTemer', 'ForaDilma', 'petralha', 'ForaLula', 'TchauQuerida', 'Petista', 'Esquerdista'] petralha_words = ['NãoVaiTerGolpe', 'ForaTemer', 'GloboGolpista', 'DilmaFica', 'FicaDilma', 'TemerJamais', 'VoltaDilma', 'VaiTerLuta', 'Golpista'] #potential_words = ['Dilma', 'Lula', 'Temer', 'Cunha', 'Aécio', 'Moro', 'Golpe', 'Oposição', 'Impeachment', 'governo', 'democracia', 'político', 'política', 'operação', 'Corrupção', 'Corrupto', 'Jucá', 'cpiminc', 'Esquerda', 'Sarney'] #potential_words = ['Comunista', 'Comuna', 'esquerdopata', 'Jucá', 'Direita', 'Ministro'] #potential_words = ['Petralhas', 'PassaDilma', 'Protesto', 'Protestos', 'Ministros'] # is_Petralha = False # for query in coxinha_words: # for status in tweepy.Cursor(api.search, q=query + '&place:d9d978b087a92583').items(): # # if status.geo != None: # print "Tweet:", status.text.encode('utf8') # print "Geo:", status.geo['coordinates'] # print "//////////////////" # try: # tweet = Tweet() # tweet.is_Petralha = is_Petralha # tweet.text = status.text.encode('utf-8') # tweet.lat = status.geo['coordinates'][0] # tweet.lng = status.geo['coordinates'][1] # tweet.save() # print "ADDED" # except IntegrityError as e: # print e.message # is_Petralha = True for query in petralha_words: for status in tweepy.Cursor(api.search, q=query + '&place:d9d978b087a92583').items(): if status.geo != None: print "Tweet:", status.text.encode('utf8') print "Geo:", status.geo['coordinates'] print "//////////////////" try: tweet = Tweet() tweet.is_Petralha = is_Petralha tweet.text = status.text.encode('utf-8') tweet.lat = status.geo['coordinates'][0] tweet.lng = status.geo['coordinates'][1] tweet.save() print "ADDED" except IntegrityError as e: print e.message # # for query in potential_words: # for status in tweepy.Cursor(api.search, q=query + '&place:d9d978b087a92583').items(): # # if status.geo != None: # print "Tweet:", status.text.encode('utf8') # print "Geo:", status.geo['coordinates'] # print "//////////////////" # try: # tweet = Tweets(); # tweet.text = status.text.encode('utf-8') # tweet.lat = status.geo['coordinates'][0] # tweet.lng = status.geo['coordinates'][1] # tweet.save() # print "ADDED POTENTIAL" # except IntegrityError as e: # print e.message print "That's end" return render(request, 'coxinhaoupetralha/index.html', {})
def mapper(page, params): """The mapper function for the MapReduce which analyzes tweets. We do a word N-gram analysis after stripping accents, decoding to UTF-8 and then removing stop words. Args: page: The number of the twitter search page for the query that this mapper has to fetch and analyze. params: The parameter object that holds the map reduce initialization parameters. This contains the query string! """ import datetime import json import requests from email import utils from mongoengine.base import ValidationError from models import Tweet from models import GeoLocation from models import FetchMetaData trained_vectorizer = params.trained_vectorizer try: fetch_meta_data = FetchMetaData( query_data={ 'query_terms': params.query, 'page': page }, searched_at=datetime.datetime.now(), tweets=[] ) r = requests.get( 'http://search.twitter.com/search.json', params={'q': params.query, 'rpp': 100, 'page': page, 'lang': 'en'}) page_of_tweets = json.loads( r.text.decode(trained_vectorizer.charset, trained_vectorizer.charset_error)) tweets = page_of_tweets.get('results') if not tweets: print ('No tweets were fetched in this mapper with page ' 'number %s.' % (page)) print 'HTTP status was: ', r.status_code return # List of tweet database object to write to metadata object. tweets_saved = [] for tweet in tweets: tweet_text = tweet['text'] tweet_id = tweet['id'] # Save it in the database. try: tweet_inserted = Tweet(**tweet) tweet_inserted.save() fetch_meta_data.tweets.append(tweet_inserted) analyze = trained_vectorizer.build_analyzer() tokens = analyze(tweet_text) for token in tokens: yield (token, (tweet_id, 1)) except ValidationError, e: print e fetch_meta_data.save()
def route_add(request): form = request.form() t = Tweet(form) t.user_id = int(current_user(request)) t.save() return redirect('/')
def get_tweets(request): consumer_key = keys.CONSUMER_KEY consumer_secret = keys.CONSUMER_SECRET access_token = keys.ACCESS_TOKEN access_token_secret = keys.ACCESS_TOKEN_SECRET auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) coxinha_words = [ 'AvanteTemer', 'ForaDilma', 'petralha', 'ForaLula', 'TchauQuerida', 'Petista', 'Esquerdista' ] petralha_words = [ 'NãoVaiTerGolpe', 'ForaTemer', 'GloboGolpista', 'DilmaFica', 'FicaDilma', 'TemerJamais', 'VoltaDilma', 'VaiTerLuta', 'Golpista' ] #potential_words = ['Dilma', 'Lula', 'Temer', 'Cunha', 'Aécio', 'Moro', 'Golpe', 'Oposição', 'Impeachment', 'governo', 'democracia', 'político', 'política', 'operação', 'Corrupção', 'Corrupto', 'Jucá', 'cpiminc', 'Esquerda', 'Sarney'] #potential_words = ['Comunista', 'Comuna', 'esquerdopata', 'Jucá', 'Direita', 'Ministro'] #potential_words = ['Petralhas', 'PassaDilma', 'Protesto', 'Protestos', 'Ministros'] # is_Petralha = False # for query in coxinha_words: # for status in tweepy.Cursor(api.search, q=query + '&place:d9d978b087a92583').items(): # # if status.geo != None: # print "Tweet:", status.text.encode('utf8') # print "Geo:", status.geo['coordinates'] # print "//////////////////" # try: # tweet = Tweet() # tweet.is_Petralha = is_Petralha # tweet.text = status.text.encode('utf-8') # tweet.lat = status.geo['coordinates'][0] # tweet.lng = status.geo['coordinates'][1] # tweet.save() # print "ADDED" # except IntegrityError as e: # print e.message # is_Petralha = True for query in petralha_words: for status in tweepy.Cursor(api.search, q=query + '&place:d9d978b087a92583').items(): if status.geo != None: print "Tweet:", status.text.encode('utf8') print "Geo:", status.geo['coordinates'] print "//////////////////" try: tweet = Tweet() tweet.is_Petralha = is_Petralha tweet.text = status.text.encode('utf-8') tweet.lat = status.geo['coordinates'][0] tweet.lng = status.geo['coordinates'][1] tweet.save() print "ADDED" except IntegrityError as e: print e.message # # for query in potential_words: # for status in tweepy.Cursor(api.search, q=query + '&place:d9d978b087a92583').items(): # # if status.geo != None: # print "Tweet:", status.text.encode('utf8') # print "Geo:", status.geo['coordinates'] # print "//////////////////" # try: # tweet = Tweets(); # tweet.text = status.text.encode('utf-8') # tweet.lat = status.geo['coordinates'][0] # tweet.lng = status.geo['coordinates'][1] # tweet.save() # print "ADDED POTENTIAL" # except IntegrityError as e: # print e.message print "That's end" return render(request, 'coxinhaoupetralha/index.html', {})
def save_tweets(tweets): print("save %s tweets" % len(tweets)) for item in tweets: tweet = Tweet(**item) tweet.save()