def on_data(self, data): try: maintain_log = {} tweet_data = json.loads(data) tweet = tweet_data['text'].replace('RT ', '') tweet_id = str(tweet_data['id']) user_id = str(tweet_data['user']['id']) maintain_log['tweet'] = tweet # if p == 1: user = json.loads(data)['user']['screen_name'] status = "@" + user + " " + text_model.make_short_sentence(138 - len(user)) maintain_log['response'] = status # logfile = open("logfile.txt", 'a') print('----------'*5) print("tweet: " + maintain_log['tweet']) # detect the language of the tweet tweet_language, tweet_language_score = get_language(maintain_log['tweet']) if tweet_language_score<0.80 or tweet_language != "English": # logfile.write("tweet: " + maintain_log['tweet']) # logfile.write("NR :: language :: "+str(tweet_language_score) + "response:: " + maintain_log['response']) print("NR :: language_score_is_low :: "+str(tweet_language_score)) return True tweet_sentiment = get_sentiment(maintain_log['tweet']) response_sentiment = get_sentiment(maintain_log['response']) if tweet_sentiment>0.75: # logfile.write("tweet: " + maintain_log['tweet']) # logfile.write("NR :: tweet_sentiment_is_high :: " + str(tweet_sentiment)) print("NR :: tweet_sentiment_is_high ::" + str(tweet_sentiment)) return True if response_sentiment < 0.65: # logfile.write("tweet: " + maintain_log['tweet']) # logfile.write("NR :: sentiment :: "+str(response_sentiment) + "response:: " + maintain_log['response']) print("NR :: response_sentiment_is_low :: "+str(response_sentiment)) return True print("response: " + maintain_log['response']) print('----------'*5) api.update_status(maintain_log['response'] +" https://twitter.com/"+user_id+"/status/"+tweet_id) time.sleep(randint(50, 150)) except BaseException as e: print("[Errno {0}] {1}".format(e.errno, e.strerror)) return True
def get_input(): """ This function gets the input from the user """ line = "" try: while line != "end": line = input("\r\n" + "Enter a sentence: ") if line == "end": print("Ending Program ...") sys.exit() else: print("") print("Afinn Sentiment: ") print(sa.get_afinn_score(line)) print("") print("NLTK Vader Sentiment: ") print(sa.get_nltk_score(line)) print("") print("Consolidated Sentiment:") print(sa.get_sentiment(line)) except KeyboardInterrupt: sys.exit()
def ServerMain(): ''' The main server logic which connects to the webserver and redis ''' try: r = redis.StrictRedis.from_url(environ['REDIS_URI']) p = r.pubsub() p.subscribe('sentiment_analysis_py') while True: message = p.get_message() if message and message['data'] != 1: command = message['data'].decode('utf-8').split(':') if command[0] == "param": r.publish("diarization_node_log", f"Got file name: {command[0]}") print("sending hello " + command[1]) r.publish( "diarization_node", "Welcome to RE:VERB api server. Upload a file to /upload in order to get diarization results (use HTTP POST request)!") elif command[0] == "file": r.publish("diarization_node_log", f"Got file name: {command[1]}") r.publish("diarization_node_log", f"Got os dir: {os.listdir()}") print(f"Got file name: {command[1]}") print(os.listdir()) sentiment_analysis_result = get_sentiment(command[1]) r.publish("diarization_node", f"{sentiment_analysis_result}") except Exception as e: print(e)
def test_consolidated_neutral(self): """ Test for the consolidation function with a neutral result """ self.assertEqual( get_sentiment("I have seen better movies than this."), 'Neutral', )
def test_consolidated_negative(self): """ Test for the consolidation function with a negative result """ self.assertEqual( get_sentiment("Don't go to that restaurant, it's the worst ever."), 'Negative', )
def test_consolidated_positive(self): """ Test for the consolidation function with a positive result """ self.assertEqual( get_sentiment( "That was the best book I have read in a long time."), 'Positive', )
def sentiment_analysis(): file_name = request.args.get('filename') print(file_name) sentiment_score, result = get_sentiment(file_name) print(sentiment_score, result) return render_template('sentiment_analysis.html', score=sentiment_score, result=result, score_graph=f'static/images/{file_name}_score_graph.png')
def download_photo(self, media_id, path='photos/', filename=None, description=True): delay.small_delay(self) if not os.path.exists(path): os.makedirs(path) media = self.get_media_info(media_id)[0] caption = media['caption']['text'] model_json = json.load(open('model.json', 'r')) reconstituted_model = POSifiedText.from_json(model_json) urls_save = open('links.txt', 'a') if caption: caption_sentiment = get_sentiment(caption) language, language_score = get_language(caption) if language_score < 0.80 or language != "English": return True if caption_sentiment < 0.3: comment_and_log(self, caption, reconstituted_model, media_id, urls_save) return True elif caption_sentiment < 0.5: photo = super(self.__class__, self).downloadPhoto(media_id, filename, False, path) if photo: sad_sentiment, max_key = get_image_sentiment(photo) if sad_sentiment == None: return True return photo happy_array = ['happiness', 'surprise'] if (max_key not in happy_array) and sad_sentiment > 0.5: comment_and_log(self, caption, reconstituted_model, media_id, urls_save) # urls_save.write("caption::"+caption+" \n") # res = reconstituted_model.make_short_sentence(140) # urls_save.write("response::"+res+"\n") # self.comment(media_id, res) # self.like(media_id) # urls_save.write(get_instagram_url_from_media_id(media_id)+"\n") return True else: return True self.logger.info("Media with %s is not %s ." % (media_id, 'downloaded')) return False
def run(args): genre = args.genre if genre == 'pop': model_path = pop_model data_path = pop_save elif genre == 'rock': model_path = rock_model data_path = rock_save elif genre == 'rap': model_path = rap_model data_path = rap_save else: print("Unexpected input!") with open(data_path, 'rb') as f: data = pickle.load(f) print('generating...') lyrics = generate_model(data['wordNum'], data['wordToID'], data['words'], model_path=model_path) print('\n\n') tf.reset_default_graph() predicted = classify_model(lyrics[0], classify_save, genre, model_path=classify_model_path) print("\n\nOur classification model predict it to be: ") print(predicted) print("\nAnd the sentiment analysis result of the generated lyric is:") get_sentiment(lyrics[0])
def inference(event, context): try: # Fetch data data = fetch_post_data(event) infer_config = fetch_inference_json() print('post data and inference config fetched') # Check if token exists if not data['token'] in infer_config: print(f'Token {data["token"]} not found') return create_response({ 'result': 'error', 'message': 'No such token found.' }) # Make predictions task_config = infer_config[data['token']] if task_config['task_type'] == 'classification': model = fetch_classification_model(task_config['model_filename']) output = classify(model, data['input'], task_config['classes']) else: model_path, model_metadata_path = fetch_sa_data( task_config['model_filename'], task_config['metadata_filename'], ) output = get_sentiment(data['input'], model_path, model_metadata_path) return create_response({ 'result': 'success', 'prediction': output, }) except Exception as e: print(repr(e)) return create_response( { 'result': 'internal_error', 'message': repr(e), }, status_code=500)
def website_sentiment(url): site = requests.get(url) soup = BeautifulSoup(site.text) return get_sentiment(soup.get_text())
def getStatuses(username): statuses = t.GetUserTimeline(username) texts = [s.text for s in statuses] combinedText = "".join(texts) return sentiment_analysis.get_sentiment(combinedText)
def updateIsAngry(self, tweetText): analysedData = sentiment_analysis.get_sentiment(tweetText) return analysedData[1]