def del_a_comment(insta_username): media_id = get_post_id(insta_username) url = BASE_URL + "/media/media-id/comments?access_token={}".format( ACCESS_TOKEN) print(url) comment_info = requests.get(url).json() if comment_info['meta']['code'] == 200: if len(comment_info['data']): #Here's a naive implementation of how to delete the negative comments : for x in range(0, len(comment_info["data"])): comment_id = comment_info['data'][x]['id'] comment_text = comment_info['data'][x]['text'] blob = TextBlob(comment_text, analyzer=NaiveBayesAnalyzer()) if (blob.sentiment.p_neg > blob.sentiment.p_pos): print('Negative comment : {}').format(comment_text) delete_url = BASE_URL + "/media/{}/comments/{}/?access_token={}".format( media_id, comment_id, ACCESS_TOKEN) print('DELETE request url : %s').format(delete_url) delete_info = requests.delete(delete_url).json() if delete_info['meta']['code'] == 200: print('Comment successfully deleted!\n') else: print('Unable to delete comment!') else: print('Positive comment : %s\n').format(comment_text) else: print('There are no existing comments on the post!') else: print('Status code other than 200 received!')
def get_sentiment_for_text(text): ''' Returns the sentiment score for the given text. ''' text_analyzer = TextBlob(text, analyzer=NaiveBayesAnalyzer()) sentiment = text_analyzer.sentiment return (sentiment.classification, sentiment.p_pos, sentiment.p_neg)
def __init__(self, chunk: ('page', 'sentence'), verbose=True): self.chunk = chunk self.verbose = verbose self.logger = self.get_logger() self.scorers = {'vader': SentimentIntensityAnalyzer(), 'pattern': PatternAnalyzer(), 'huggingface': pipeline('sentiment-analysis'), 'naive': Blobber(analyzer=NaiveBayesAnalyzer())}
def __init__(self, load_classifier=False): if load_classifier: self._load_classifier() print("classifier loaded !") self.tb = Blobber(analyzer=NaiveBayesAnalyzer()) self.stop = set(stopwords.words('french')) self.stop_en = set(stopwords.words('english'))
def get_tweet_sentiment_NBA(tweet): ''' Utility function to classify sentiment of passed tweet using textblob's sentiment method ''' # create TextBlob object of passed tweet text analysis = TextBlob(clean_tweet(tweet), analyzer=NaiveBayesAnalyzer()) # set sentiment return analysis.sentiment
def calculate_and_persist_subs_sentiment(): for row in rows: print(row[1], row[2]) pattern_analyser = TextBlob(row[2]) naive_bayes_analyser = TextBlob(row[2], analyzer=NaiveBayesAnalyzer()) cursor.execute( f'UPDATE Video SET ' f'PA = {pattern_analyser.sentiment.polarity}, ' f'NBA = {naive_bayes_analyser.sentiment.p_pos} WHERE VIDEO_ID = \'{row[1]}\'' )
def sentiment_analysis(text, nb=False): if nb: sa = TextBlob(text, analyzer=NaiveBayesAnalyzer()).sentiment return { "class": sa.classification, "p_pos": sa.p_pos, "p_neg": sa.p_neg } sa = TextBlob(text).sentiment sa_class = "pos" if sa.polarity >= 0 else "neg" return { "class": sa_class, "polarity": sa.polarity, "subjectivity": sa.subjectivity }
naive = load_naiveclassifier() else: naive = NaiveBayesClassifier(train) save_naiveclassifier(naive) print "Naive Bayes Trained" if os.path.exists('/home/lakeesh10/Documents/projectdemo/decisiontree_classifier.pickle'): decision = load_decisionclassifier() else: decision = DecisionTreeClassifier(train) save_decisionclassifier(decision) print "Decision Tree Trained" print("Naive Bayes : ",naive.classify("fried chip good and crunchy dig thattaco tropical omg so eyeopening")) #print(decision.classify("fried chip good and crunchy dig thattaco tropical omg so eyeopening")) cl=NaiveBayesAnalyzer() print (cl.analyze("fried chip good and crunchy dig thattaco tropical omg so eyeopening")) blob = TextBlob("fried chip good and crunchy dig thattaco tropical omg so eyeopening") polarity=0 i=0 for sentence in blob.sentences: polarity=polarity+sentence.sentiment.polarity i=i+1 polarity=polarity/i print(polarity) negids = movie_reviews.fileids('neg') posids = movie_reviews.fileids('pos') negfeats = [(word_feats(movie_reviews.words(fileids=[f])), 'neg') for f in negids] posfeats = [(word_feats(movie_reviews.words(fileids=[f])), 'pos') for f in posids]
tweet_text) return map_user_to_tweets if __name__ == '__main__': text_tweet = "This is Rachel Crooks. In 2005 she accused Donald Trump of kissing her on the mouth without her permission. Now she is running for the state legislature in Ohio. https://t.co/BiFKQe6WVh" normal = "RT @realDonaldTrump: Justice Ginsburg of the U.S. Supreme Court has embarrassed all by making very dumb political statements about me. Her…" text = "RT @gitagatubixi #Trump Sing along with us: 🎶Better not do us wrong!🎺 https://t.co/NPhXbfZ92g" pp = preprocess_tweet_text_advanced(text) # re.sub(r'RT @[^ ]*?:', '', text, count=1, flags=re.MULTILINE) tt = preprocess_tweet_text_advanced(text_tweet) blobber = Blobber(analyzer=NaiveBayesAnalyzer()) words_number, polarity, subjectivity, tag = stanford_nltk_blob_analysis( text_tweet, blobber) x = 5 tweets_texts_list = [ 'RT Trump Sing along with us: 🎶Better not do us wrong!🎺', 'RT Trump Sing along with us: 🎶Better not do us wrong!🎺', 'RT LorettoRegina Trump Sing along with us: 🎶We honor our veterans!🎺', 'RT WilliamRolar Trump Sing along with us: 🎶Be bad, you’ll get banned!🎺' ] session, embedded_placeholder, placeholder = prepare_tensorflow_graph_and_session( ) check_message_similatiry(session, embedded_placeholder, placeholder, tweets_texts_list, 0.8)