def search(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") # get screen_name's tweets tweets = helper.get_user_timeline(screen_name) # TODO analyzer = Analyzer(positives, negatives) s = tweets s = str(s) # analyze word tw = TweetTokenizer() #print(tw.tokenize(s)) p = tw.tokenize(s) score = analyzer.analyze2(p) positive = float(score[0]) if score[1] < 0: score[1] = -score[1] negative = float(score[1]) else: negative = float(score[1]) neutral = score[2] # generate chart chart = helper.chart(positive, negative, neutral) # render results return render_template("search.html", chart=chart, screen_name=screen_name)
def search2(): # validate screen_name screen_name = request.args.get("screen_name", "") if not screen_name: return redirect(url_for("index")) # get screen_name's tweets tweets = helpers.get_user_timeline(screen_name) if not tweets: return redirect(url_for("index")) # absolute paths to lists positives = os.path.join(sys.path[0], "positive-words.txt") negatives = os.path.join(sys.path[0], "negative-words.txt") classics = os.path.join(sys.path[0], "classics-words.txt") arthistory = os.path.join(sys.path[0], "arthistory-words.txt") tech = os.path.join(sys.path[0], "tech-words.txt") # instantiate analyzer analyzer = Analyzer(positives, negatives, classics, arthistory, tech) # initialise classics, arthistory, tech, neutral counters classics, arthistory, tech, neutral = 0.0, 0.0, 0.0, 0.0 # loop through tweets list to analyse it, adding to counter for tweet in tweets: interest = analyzer.analyze2(tweet) if interest == 'classics': classics += 1 elif interest == 'arthistory': arthistory += 1 elif interest == 'tech': tech += 1 else: neutral += 1 # generate chart chart = helpers.chart2(classics, arthistory, tech, neutral) # render results return render_template("search2.html", chart=chart, screen_name=screen_name)