def get_tweets_by_user_id(request): api = get_api_with_auth(request) response = {} response['tweets'] = Tweet.get_recent_tweets(api, request.GET['user_id'], 40) return JsonResponse(response)
def get_tweets(search_term, username, date, phrase, search_phrase): base_url = 'https://api.twitter.com/' auth = OAuth1(API_KEY, API_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET) search_keyword = '' if search_term: search_keyword = search_term else: search_keyword = 'from:'+str(username) if not date: date = datetime.now().strftime ("%Y-%m-%d") search_params = { 'q': search_keyword, 'result_type': 'recent', 'until': date, 'count': 100 } search_url = '{}1.1/search/tweets.json'.format(base_url) tweets = requests.get(search_url, params=search_params, auth=auth) # creating Tweet model objects to be added to database list_ = [] # count = 0 for i in tweets.json()['statuses']: s = i['created_at'] f1 = '%a %b %d %H:%M:%S +0000 %Y' f2 = '%Y-%m-%d' out = datetime.strptime(s, f1).strftime(f2) list_.append(Tweet(search_phrase=search_phrase, date=out, user=i['user']['name'], post=i['text'].encode("utf-8"))) return list_
def populate_tweets(_, __): cols = ['sentiment', 'id', 'date', 'query_string', 'user', 'content'] dataframe = pandas.read_csv( './data_processing/data/training_data.16000.csv', header=None, names=cols, encoding="ISO-8859-1", low_memory=False) # removes unnecessary columns dataframe.drop(['date', 'query_string', 'user'], axis=1, inplace=True) for i in range(0, dataframe['id'].count()): tweet = Tweet( id=dataframe.loc[i, 'id'], content=dataframe.loc[i, 'content'], created_at=datetime.now(), updated_at=datetime.now(), ) tweet.save()
def tweets(uid,min_date=None,max_date=None,limit=100): "returns all the tweets for a user between min_date and max_date" start = parse_date(min_date) end = parse_date(max_date) limit = int(limit) if limit else None tweets = Tweet.find( (Tweet.user_id==int(uid)) & (Tweet.created_at.range(start,end)), limit=limit, sort=Tweet._id) return [t.to_d() for t in tweets]
def get_tweets(search_term, tweet_count): consumer_key = str(os.environ.get('TWITTER_API_CONSUMER_KEY')) consumer_secret = str(os.environ.get('TWITTER_CONSUMER_SECRET')) access_token = str(os.environ.get('TWITTER_ACCESS_TOKEN')) access_token_secret = str(os.environ.get('TWITTER_ACCESS_SECRET')) auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) tweets = api.search(search_term, count=tweet_count, lang='en', show_user=True, tweet_mode='extended') for tweet in tweets: try: formatted_tweet = Tweet( tweet_id=tweet.id, date=tweet.created_at, name=tweet.user.name, twitter_name=tweet.user.screen_name, text=tweet.full_text, url='https://twitter.com/' + tweet.user.screen_name + '/status/' + str(tweet.id), sentiment_score=get_sentiment(tweet.full_text)) formatted_tweet.save() print('Tweet ' + str(tweet.id) + ' saved!') except IntegrityError: print('An error occured...') pass
def create_tweet(request): """ Create a tuit by user :param request: :return: """ tweet_instance = Tweet() tweet_instance.id_user = request.POST.get('id_user', 0) tweet_instance.content = request.POST.get('content', 0) tweet_instance.save() return HttpResponse('Tweet created')
def make_graph(time, users, tri_edges): tweets = Tweet.find(Tweet.created_at.range(time, time+TIME_DELAY)) edges = ((tweet.user_id,at) for tweet in tweets for at in tweet.mentions if tweet.user_id!=at and at in users) graph = defaultdict(int) for e in edges: if tuple(sorted(e)) in tri_edges: graph[e]+=1 return GraphSnapshot( _id = time, from_ids = (k[0] for k in graph.iterkeys()), to_ids = (k[1] for k in graph.iterkeys()), weights = graph.itervalues(), )