def post(self): K.clear_session() model, graph = init_model() hashtag = session.get('hashtag') tokenized = session.get('tokenized') # for table display tweets = TweetsModel.find_by_hashtag(hashtag=hashtag) tweets = [element.tweet for element in tweets] # apply model predictions = [] with graph.as_default(): for element in tokenized: element = pad_sequences(element, maxlen=23, dtype='int32', value=0) prediction = model.predict(element, batch_size=1, verbose=2)[0] if (np.argmax(prediction) == 0): prediction = 'Negative' elif (np.argmax(prediction) == 1): prediction = 'Neutral' else: prediction = 'Positive' predictions.append(prediction) # zip predictions with ORIGINAL tweet for better clarity sentiment_predictions = list(zip(tweets, predictions)) sentiment_counter = dict(Counter(predictions)) return {'sentiment_predictions': sentiment_predictions, 'sentiment_counter': sentiment_counter, 'hashtag': hashtag}
def manage_db(): """Manage Databases View""" mongo_hashtags = Collection.hashtags() sqlite_hashtags = TweetsModel.distinct_hashtags() return render_template('manage_db/dashboard.html', mongo_hashtags=mongo_hashtags, sqlite_hashtags=sqlite_hashtags)
def manage_db(): """Manage Databases View""" for attempt in range(0, 3): try: mongo_hashtags = Collection.hashtags() except AutoReconnect: time.sleep(2) sqlite_hashtags = TweetsModel.distinct_hashtags() return render_template('manage_db/dashboard.html', mongo_hashtags=mongo_hashtags, sqlite_hashtags=sqlite_hashtags)
def post(self): hashtag = request.form.get('hashtag_cleaning') # Prevent overpopulating TweetsModel.delete_by_hashtag(hashtag=hashtag) if hashtag is None: return redirect('trendsearch', message="hashtag was not specified") # Get chosen hashtag tweets hashtag_tweets = Collection.find_by_hashtag(hashtag=hashtag) # Preprocess text for future senitment analysis text = [element['text'] for element in hashtag_tweets] corpus = [] for i in range(0, len(text)): try: tweet = re.sub('[^a-zA-Z]', ' ', text[i]) ## all the indexes tweet = tweet.lower() tweet = tweet.split() ps = PorterStemmer() tweet = [ ps.stem(word) for word in tweet if not word in stopwords ] tweet = ' '.join(tweet) corpus.append(tweet) new_tweet = TweetsModel(hashtag=hashtag, tweet=tweet) new_tweet.save_to_db() except Exception as e: pass return redirect('manage_db')
def post(self): hashtag = request.form.get('hashtag') # for backend sentiment analysis hashtag_tweets = TweetsModel.find_by_hashtag(hashtag=hashtag) hashtag_tweets = [element.tweet for element in hashtag_tweets] # load pickled tokenizer with open(tokenizer_path, 'rb') as handle: tokenizer = pickle.load(handle) # preprocess data for prediction tokenized = [] for element in hashtag_tweets: element = element.split() element = tokenizer.texts_to_sequences(element) tokenized.append(element) session['tokenized'] = tokenized session['hashtag'] = hashtag return hashtag
def sentiment_analysis(): """Apply LSTM View""" sqlite_hashtags = TweetsModel.distinct_hashtags() return render_template('sentiment_analysis/dashboard.html', sqlite_hashtags=sqlite_hashtags)
def post(self): hashtag = request.form.get('hashtag_table') TweetsModel.delete_by_hashtag(hashtag=hashtag) return redirect('manage_db')