def _get_tweets_based_on_request(request): search_term = request.GET.get('topic') tweet_url = request.GET.get('tweet_url') username = request.GET.get('user') limit = int(request.GET.get('limit', 50)) if search_term: tweets, error = data_fetch_public.get_tweets_from_search( search_term, limit, data_fetch_constants.DATA_SOURCE_TWEEPY ) return tweets elif tweet_url: try: user, tweet_id = utils.get_user_tweet_id_from_tweet_url(tweet_url) except: return render(request, 'clientapp/error.html', {}) tweets, error = data_fetch_public.get_replies_of_tweet( tweet_id, user, limit, data_fetch_constants.DATA_SOURCE_TWEEPY ) return tweets elif username: tweets, error = data_fetch_public.get_tweets_of_user( username, limit, data_fetch_constants.DATA_SOURCE_TWEEPY ) return tweets return None
def _get_tweets_from_user_scores(user_scores, limit): all_tweets = [] it_count = 0 for username, score in user_scores: it_count += 1 if it_count > MAX_DEPTH: continue # print('Getting data for: ', username) tweets, error = data_fetch_public.get_tweets_of_user( username, limit, data_fetch_constants.DATA_SOURCE_TWEEPY) all_tweets.extend(tweets) return all_tweets
def get_graph_for_user(user, limit=50): tweets, error = data_fetch_public.get_tweets_of_user( user, limit, data_fetch_constants.DATA_SOURCE_TWEEPY) if error: return [] summary = summarizer.get_results_summary(tweets, TOXICITY_THRESHOLD) # print('Top mentions:', summary.top_mentioned_users) all_tweets = tweets all_tweets.extend( _get_tweets_from_user_scores(summary.top_mentioned_users.items(), limit)) # print('Top hashtags:', summary.top_hashtags) all_tweets.extend( _get_tweets_from_hashtag_scores(summary.top_hashtags.items(), limit)) return [ data_point for data_point in yield_data_points_from_tweets(all_tweets) ]
def summary(request): search_term = request.GET.get('topic') tweet_url = request.GET.get('tweet_url') username = request.GET.get('user') limit = int(request.GET.get('limit', 50)) error = False tweets = [] about = None analysis_type = TYPE_USER if search_term: tweets, error = data_fetch_public.get_tweets_from_search( search_term, limit, data_fetch_constants.DATA_SOURCE_TWEEPY ) about = 'Topic: %s' % search_term analysis_type = TYPE_TOPIC elif tweet_url: try: user, tweet_id = utils.get_user_tweet_id_from_tweet_url(tweet_url) except: return render(request, 'clientapp/error.html', {}) tweets, error = data_fetch_public.get_replies_of_tweet( tweet_id, user, limit, data_fetch_constants.DATA_SOURCE_TWEEPY ) about = 'User: %s; Tweet: %s' % (user, tweet_id) analysis_type = TYPE_CONVERSATION elif username: tweets, error = data_fetch_public.get_tweets_of_user( username, limit, data_fetch_constants.DATA_SOURCE_TWEEPY ) about = 'User: %s' % username analysis_type = TYPE_USER else: context = { 'homeresult': { 'top_trends': _get_top_trends_data(), 'top_searches': _get_top_searches_data(), } } return render(request, 'clientapp/summary.html', context) if error: return render(request, 'clientapp/error.html', {}) return _render_results_with_summary(request, about, analysis_type, search_term, tweets)
def analyze_user(request): user = request.GET.get('user') user_clean = utils.get_clean_username(user) tweets, error = data_fetch_public.get_tweets_of_user( user_clean, 100, data_fetch_constants.DATA_SOURCE_TWEEPY ) if error: return JsonResponse({ 'error': error }, safe=False) logger.info('Analyzing toxicity of results') start = time.time() result = [ tweet.to_dict() for tweet in tweets if tweet.user == user ] end = time.time() logger.info('Took ' + str(end - start) + ' seconds') return JsonResponse(result, safe=False)