def initialize_analysers(config): """Initialise analysers """ if 'skyttle' in ANALYZERS_TO_USE: skyttle = Skyttle(mashape_auth=config['mashape_auth'], language=config['language']) ANALYZERS.append(skyttle) if 'chatterbox' in ANALYZERS_TO_USE: chatterbox = Chatterbox(mashape_auth=config['mashape_auth'], language=config['language']) ANALYZERS.append(chatterbox) if 'datumbox' in ANALYZERS_TO_USE: datumbox = Datumbox(api_key=config['datumbox_key']) ANALYZERS.append(datumbox) if 'repustate' in ANALYZERS_TO_USE: repustate = Repustate(api_key=config['repustate_key']) ANALYZERS.append(repustate) if 'bitext' in ANALYZERS_TO_USE: bitext = Bitext(user=config['bitext_user'], password=config['bitext_pwd'], language=config['language']) ANALYZERS.append(bitext) if 'semantria' in ANALYZERS_TO_USE: semantria = Semantria( consumer_key=config['semantria_consumer_key'], consumer_secret=config['semantria_consumer_secret']) ANALYZERS.append(semantria) if 'viralheat' in ANALYZERS_TO_USE: viralheat = Viralheat(api_key=config['viralheat_key']) ANALYZERS.append(viralheat) if 'lymbix' in ANALYZERS_TO_USE: lymbix = Lymbix(api_key=config['lymbix_key']) ANALYZERS.append(lymbix) if 'aiapplied' in ANALYZERS_TO_USE: aiapplied = AIApplied(api_key=config['aiapplied_key'], language=config['language']) ANALYZERS.append(aiapplied) if 'sentigem' in ANALYZERS_TO_USE: sentigem = Sentigem(api_key=config['sentigem_key']) ANALYZERS.append(sentigem)
from classifiers.SentimentPolarityEmotion import SentimentPolarityEmotionClassifier from pprint import pprint from lymbix import Lymbix apikey = "8a77503e17148fb5f3d5fd7d1e80ce86c23b6133" # a few other api keys you use if this one has reached its limit #apikey = "06bb0ef22cc21896451d7d9ed0f53eff6c99cd93" #apikey = "1321dd7e3e05237514db9ae7812202c23f6b2f5e" #SPEC = SentimentPolarityEmotionClassifier(apikey) #result = SPEC.analyse("I love this movie") #results = SPEC.analyseAll(["I love this movie", "I hate this movie"]) #pprint(results) #print result["dominant_emotion"] #print result["article_sentiment"]["sentiment"] l = Lymbix(apikey) r = l.tonalize_multiple(["I love you", "I hate you"]) print [i["article_sentiment"] for i in r]
f = open('../test set.txt', 'r') from lymbix import Lymbix lymbix = Lymbix('c6a2c5af610c650d2db35a98ab922741618b734e') #rdata = lymbix.tonalize("My name is Paul.") #print rdata['article_sentiment'] total = 0 count = 0 for line in f: total += 1 tweet = line.split(' .:. ') sentiment = tweet[0] result = lymbix.tonalize(tweet[1]) # if sentiment == result['article_sentiment']['sentiment'].lower().replace('negative', 'neg').replace('positive', 'pos'): # count += 1 # else: # print total, result['article_sentiment'] score = result['article_sentiment']['score'] if abs(score) < 0.25: sent_temp = 'neutral' elif score > 0: sent_temp = 'pos' else:
from models.FinalDBUsers import FinalDBUsers import re, string, pprint from lymbix import Lymbix def _processPunctuation(text): t = re.sub("https?:\/\/.*(\s+|$)", '', text) t = t.replace("\n", "").replace("'", "") for p in string.punctuation: t = t.replace(p, " "+p+" ") return t apikey = "8a77503e17148fb5f3d5fd7d1e80ce86c23b6133" l = Lymbix(apikey) fdbu = FinalDBUsers() fdbt = FinalDBTweets() users = fdbu._db.keys() done = 0 total = len(users) for i, uid in enumerate(users): u = fdbu.get(uid) texts = [_processPunctuation(t["text"]) for t in u["tweets"]] if not fdbt.hasTweetsOfUser(uid) and len(texts) > 0: r = l.tonalize_multiple(texts)