def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) # TODO positive, negative, neutral = 0.0, 0.0, 0.0 positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") analyzer = Analyzer(positives, negatives) tweets = helpers.get_user_timeline(screen_name, 100) if tweets == None: return redirect(url_for("index")) for tweet in tweets: score = analyzer.analyze(tweet) if score > 0.0: positive += 1 elif score < 0.0: negative += 1 else: neutral += 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, count=100) # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives) # calculate the number of positive, negative and neutral tweets positive, negative, neutral = 0.0, 0.0, 0.0 for tweet in tweets: score = analyzer.analyze(tweet) if score > 0.0: positive += 1 elif score < 0.0: negative += 1 else: neutral += 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "").lstrip("@") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, 100) if tweets == None: return redirect(url_for("index")) # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer and sentiment values analyzer = Analyzer(positives, negatives) positive, negative, neutral = 0.0, 0.0, 0.0 # for each tweet, determine overall sentiment and increase sentiment score accordingly for i in range(len(tweets)): score = analyzer.analyze(tweets[i]) if score > 0.0: positive += 1 elif score < 0.0: negative += 1 else: neutral += 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) # TODO positive, negative, neutral = 0.0, 0.0, 0.0 analyzer = Analyzer('positive-words.txt', 'negative-words.txt') for tweet in tweets: score = analyzer.analyze(tweet) if score > 0: positive += 1 elif score < 0: negative += 1 else: neutral += 1 positive /= len(tweets) negative /= len(tweets) neutral /= len(tweets) # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def visualisation(): screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) tweets = helpers.get_user_timeline(screen_name, 200) positives = "static/SentiWS_v18c_Positive.txt" negatives = "static/SentiWS_v18c_Negative.txt" poENG = "static/positive-words.txt" neENG = "static/negative-words.txt" analyzer = Analyzer(positives, negatives, poENG, neENG) positive = 0 negative = 0 for tweet in tweets: tweet['score'] = analyzer.analyze(tweet['tweet']) if tweet['score'] > 0.0: positive += tweet['score'] elif tweet['score'] < 0.0: negative -= tweet['score'] chart = helpers.chart(positive, negative) # render results return render_template("visualisation.html", chart=chart, screen_name=screen_name, tweets=tweets)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, 100) # classifies tweets positive, negative, neutral = 0, 0, 0 for tweet in tweets: score = analyzer.analyze(tweet) # if score is neutral if (score == 0): neutral += 1 elif (score > 0): positive += 1 else: negative += 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "").lstrip("@") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) # TODO positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives) positive, negative, neutral = 0.0, 0.0, 0.0 # analyze text for i in range(0, 200): score = analyzer.analyze(tweets[i]) if score > 0.0: print(colored(str(tweets[i]), "green")) positive = positive + 1 elif score < 0.0: print(colored(str(tweets[i]), "red")) negative = negative + 1 else: print(colored(str(tweets[i]), "yellow")) neutral = neutral + 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") # If user doesn't exist / has private account - redirect to start page. if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) if not tweets: return redirect(url_for("index")) positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # Instantiate analyzer. analyzer = Analyzer(positives, negatives) positive, negative, neutral = 0.0, 0.0, 100.0 # Get percentage of sentiment for each word in a tweet. for tweet in tweets: score = analyzer.analyze(tweet) if score > 0.0: positive += 100 / len(tweets) elif score < 0.0: negative += 100 / len(tweets) else: neutral += 100 / len(tweets) # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "").lstrip("@") if not screen_name: return redirect(url_for("index")) positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, 100) if tweets == None: sys.exit(colored("Unexpected error!", "red")) positive, negative, neutral = 0.0, 0.0, 0.0 for tweet in tweets: score = analyzer.analyze(tweet) if score < 0.0: negative +=1.0 elif score > 0.0: positive += 1.0 else: neutral += 1.0 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "").lstrip("@") if not screen_name: return render_template("error index.html") # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) if tweets == []: return render_template("error index.html") # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives) positive, negative, neutral = 0.0, 0.0, 0.0 for tweet in tweets: temp = analyzer.analyze(tweet) if temp > 0: positive += 1 elif temp < 0: negative += 1 else: neutral += 1 # generate chart chart = helpers.chart(positive / (positive + negative + neutral), negative / (positive + negative + neutral), neutral / (positive + negative + neutral)) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") analyzer = Analyzer(positives, negatives) analyzer.getTweetTokens(screen_name) positive = analyzer.classifications["positives"] / analyzer.total_tweets negative = analyzer.classifications["negatives"] / analyzer.total_tweets neutral = analyzer.classifications["neutrals"] / analyzer.total_tweets # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, count=100) positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") anz = Analyzer(positives, negatives) po = ne = nu = 0 for tweet in tweets: if anz.analyze(tweet) > 0: po += 1 elif anz.analyze(tweet) < 0: ne += 1 else: nu += 1 positive, negative, neutral = po / len(tweets) * 100, ne / len( tweets) * 100, nu / len(tweets) * 100 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "").lstrip("@") if not screen_name: return redirect(url_for("index")) # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, 100) # initialise percentages positive, negative, neutral = 0.0, 0.0, 0.0 # classify each tweet and add to percentages for tweet in tweets: score = analyzer.analyze(tweet) if score == 1: positive += 1 if score == -1: negative += 1 if score == 0: neutral += 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) if (tweets == None): sys.exit( "Screen name doesn't exist or a screen name's tweets are private") # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") analyzer = Analyzer(positives, negatives) positive, negative, neutral = 0.0, 0.0, 0 for tweet in tweets: if (analyzer.analyze(tweet) > 0): positive += 1 elif (analyzer.analyze(tweet) < 0): negative += 1 else: neutral += 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "").lstrip("@") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, count=100) # the main part positive, negative, neutral = 0.0, 0.0, 0.0 positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives) for tweet in tweets: score = analyzer.analyze(tweet) if score > 0.0: positive = positive + 1 elif score < 0.0: negative = negative + 1 else: neutral = neutral + 1 # generate chart chart = helpers.chart(positive / len(tweets), negative / len(tweets), neutral / len(tweets)) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name").lstrip("@") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, 100) if not tweets: return redirect(url_for("index")) positives = os.path.join(os.path.dirname(os.path.realpath(__file__)), "positive-words.txt") negatives = os.path.join(os.path.dirname(os.path.realpath(__file__)), "negative-words.txt") analyzer = Analyzer(positives, negatives) positive, negative, neutral = 0.0, 0.0, 100.0 for tweet in tweets: score = analyzer.analyze(tweet) if score > 0.0: positive += 1 elif score < 0.0: negative += 1 else: neutral += 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, count=100) # get analyzer object and set counter to 0 positive, negative, neutral = 0.0, 0.0, 0.0 analyzer = Analyzer(os.path.join(sys.path[0], "positive-words.txt"), os.path.join(sys.path[0], "negative-words.txt")) # iterate through all the tweets for i in tweets: # calculate score and then add to counter if analyzer.analyze(i) > 0.0: positive += 1.0 elif analyzer.analyze(i) < 0.0: negative += 1.0 else: neutral += 1.0 # map all to percentage positive = (positive / (positive + negative + neutral)) * 100.0 negative = (negative / (positive + negative + neutral)) * 100.0 neutral = (neutral / (positive + negative + neutral)) * 100.0 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) positive_total = 0 negative_total = 0 neutral_total = 0 # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) if tweets == None: return redirect(url_for("index")) # for each tweet for tweet in tweets: analyzer = Analyzer(positives, negatives) score = analyzer.analyze(tweet) if (score > 0.0): positive_total += 1 elif (score < 0.0): negative_total += 1 else: neutral_total += 1 # generate chart chart = helpers.chart(positive_total, negative_total, neutral_total) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "").lstrip("@") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweetss = helpers.get_user_timeline(screen_name) # TODO positive, negative, neutral = 0.0, 0.0, 0.0 positives, negatives = [], [] analyzer = Analyzer(positives, negatives) for tweet in tweetss: score = analyzer.analyze(tweet) if score > 0.0: positive += 1 elif score < 0.0: negative += 1 else: neutral += 1 total = (positive + negative + neutral) # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) positive, negative, neutral = 0.0, 0.0, 100.0 for i in range(len(tweets)): tally = analyzer.analyze(tweets[i]) if tally > 0.0: positive = positive + 1.0 neutral = neutral - 1.0 elif tally < 0.0: negative = negative + 1.0 neutral = neutral - 1.0 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def get_tweets(screen_name): if not screen_name: return jsonify({'Response': '404'}) #include the tweets file positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives) # tweets and scores to conform new dictionary with scores tweets = helpers.get_user_timeline(screen_name) scores = [] for tweet in tweets: scores.append(analyzer.analyze(tweet)) tweets_with_scores = {} for tweet, score in zip(tweets, scores): tweets_with_scores[tweet] = score return jsonify({ 'Response': '200', 'tweets_with_scores': tweets_with_scores })
def search(): # validate screen_name screen_name = request.args.get("screen_name") if not screen_name: redirect(url_for("/")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, 100) positive, negative, neutral = 0.0, 0.0, 0.0 for tweet in tweets: score = Analyzer().analyze(tweet) if score > 0.0: positive += 1 elif score < 0.0: negative += 1 else: neutral += 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def main(): # ensure proper usage if len(sys.argv) != 2: sys.exit("Usage: ./smile @username") # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives) # get latest 50 tweets of the user tweets = get_user_timeline(sys.argv[1].strip('@'), count=50) if tweets == None: sys.exit("User doesn't exist or is private") # analyze tweets for tweet in tweets: score = analyzer.analyze(tweet) if score > 0.0: print(colored(tweet, "green")) elif score < 0.0: print(colored(tweet, "red")) else: print(colored(tweet, "yellow"))
def search(): # validate screen_name screen_name = request.args.get("screen_name", "").lstrip("@") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) # absolute paths to lists for analysis positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") positive, negative, neutral = 0.0, 0.0, 0.0 analyzer = Analyzer(positives, negatives) if tweets != None: for tweet in tweets: score = analyzer.analyze(tweet.lower()) if score > 0.0: positive += 1 elif score < 0.0: negative += 1 else: neutral += 1 else: sys.exit("Error") # TODO # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "").lstrip("@") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, 100) # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # classify tweets positive, negative, neutral = 0.0, 0.0, 100.0 positive_count, negative_count, neutral_count = 0, 0, 0 if tweets is not None and len(tweets) != 0: # instantiate analyzer analyzer = Analyzer(positives, negatives) # analyze tweets for tweet in tweets: score = analyzer.analyze(tweet) if score > 0.0: positive_count += 1 elif score < 0.0: negative_count += 1 else: neutral_count += 1 # get percentages positive, negative, neutral = positive_count / len(tweets), \ negative_count / len(tweets), \ neutral_count / len(tweets) # generate chart chart = helpers.chart(positive, negative, neutral) # render results if tweets is not None and len(tweets) != 0: return render_template("search.html", chart=chart, screen_name=screen_name, tweets=str(len(tweets)), tweet=tweets[0]) elif len(tweets) == 0: return render_template("search.html", chart=chart, screen_name=screen_name, tweets=0, tweet="No tweets") else: return render_template("search.html", chart=chart, screen_name=screen_name, tweets="N/A", tweet="Not an account")
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name,100) # TODO totaltweets = len(tweets) # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives) positive, negative, neutral = 0.0, 0.0, 0.0 for tweet in tweets: if analyzer.analyze(tweet)>0: positive = positive + 1 elif analyzer.analyze(tweet)<0: negative = negative + 1 else: neutral = neutral + 1 positive = positive/totaltweets*100 negative = negative/totaltweets*100 neutral = neutral/totaltweets*100 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "").lstrip("@") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name,50) # TODO if tweets==None: return redirect(url_for("index")) obj=Analyzer() positive, negative, neutral =0,0,0 for tweet in tweets: tokeni=nltk.tokenize.TweetTokenizer() tokens=tokeni.tokenize(tweet) score=0 for word in tokens: score=obj.analyze(word)+score if score>0: positive=positive+1 elif score < 0: negative=negative+1 else: neutral=neutral+1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name # this works because a form will be send using get not post screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # create object by passing the above arguments analyzer = Analyzer(positives, negatives) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) # just intializing positive, negative, neutral = 0.0, 0.0, 100.0 # count the number of positive, negative and neutral tweets for tweet in tweets: score = analyzer.analyze(tweet) if score > 0.0: positive += 1 elif score < 0.0: negative += 1 else: neutral += 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's last 50 tweets tweets = helpers.get_user_timeline(screen_name, 50) if not tweets: sys.exit("sorry no tweets found") # set up for analyzing and counting positive, negative, neutral = 0.0, 0.0, 0.0 positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") analyzer = Analyzer(positives, negatives) # analyze and count each tweet for tweet in tweets: curScore = analyzer.analyze(tweet) if curScore > 0.0: positive += 1 elif curScore < 0.0: negative += 1 else: neutral += 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, 100) if tweets == None: return redirect(url_for("index")) # TODO positive, negative, neutral = 0.0, 0.0, 100.0 analyzer = Analyzer(positive, negative) for tweet in tweets: c = analyzer.analyze(tweet) if c > 0: positive += 1 neutral -= 1 elif c < 0: negative += 1 neutral -= 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, 100) # Check if tweets array contains None if tweets is None: sys.exit("Error: No tweets was returned!") # Absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # Initialize an Analyze object analyzer = Analyzer(positives, negatives) # Initialize sentiment analysis counts for chart values positive, negative, neutral = 0.0, 0.0, 0.0 # Iterate through tweets for tweet in tweets: # Return score analysis for tweet score = analyzer.analyze(tweet) # Increment respective sentiment analysis counts if score > 0.0: positive += 1 elif score < 0.0: negative += 1 else: neutral += 1 # Set sentiment analysis counts to percentages num_tweets = positive + negative + neutral positive = positive / num_tweets negative = negative / num_tweets neutral = neutral / num_tweets # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) # TODO positive, negative, neutral = 0.0, 0.0, 100.0 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name, 100) # handle get_user_timeline errors if tweets == None: return redirect(url_for("index")) # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives) # counts for sentiment categories pos_count, neg_count, neut_count = 0, 0, 0 # score and assign sentiment category to each tweet for tweet in tweets: score = analyzer.analyze(tweet) if score > 0.0: pos_count += 1 elif score < 0.0: neg_count += 1 else: neut_count += 1 whole = pos_count + neg_count + neut_count positive, negative, neutral = (pos_count / whole), (neg_count / whole), (neut_count / whole) # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search(): # validate screen_name screen_name = request.args.get("screen_name", "").lstrip("@") if not screen_name: return redirect(url_for("index")) # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives) # get screen_name's most recent 100 tweets tweets = helpers.get_user_timeline(screen_name, 100) # return to index if screen_name doesn't exist if tweets == None: return redirect(url_for("index")) # create positive, negative and neutral count positive, negative, neutral = 0, 0, 0 # analyze each tweet & increase corresponding sentimen count for tweet in tweets: score = analyzer.analyze(tweet) if score > 0.0: positive += 1 elif score < 0.0: negative += 1 else: neutral += 1 # generate chart chart = helpers.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)