def get_sentics_of_sentence(self, sentence): words = sentence.split() list_concepts = [] conc = [] to_add = "" for word in words: if (word in self.G): conc.append(word) to_add += word + " " elif (to_add != ""): list_concepts.append(to_add[:-1]) to_add = "" if (to_add != ""): list_concepts.append(to_add[:-1]) parserList = SenticParser.getOutputConcepts(sentence) list_concept = list(set(list_concepts) | set(parserList)) list_concept = filter(bool, list_concept) list_concept = set(list(list_concepts)) to_search = [] for phrase in list_concepts: concepts = phrase.split() to_search = to_search + concepts for i in range(len(concepts) - 1): for j in range(i + 1, len(concepts)): try: k = nx.dijkstra_path(self.G, concepts[i], concepts[j]) if (len(k) == j - i + 1 and k == concepts[i:j + 1]): to_search = list(set(to_search) - set(k)) word_to_add = "_".join(k) to_search.append(word_to_add) except: continue to_search = list(set(to_search)) sorted_by_length = sorted(to_search, key=lambda tup: len(tup.split("_"))) return filter(lambda x: x is not None, [self.sn.lookup(concept) for concept in to_search])
def get_sentics_of_sentence(self, sentence): words = sentence.split() list_concepts = [] conc = [] to_add = "" for word in words: if (word in self.G): conc.append(word) to_add += word + " " elif(to_add != ""): list_concepts.append(to_add[:-1]) to_add = "" if(to_add != ""): list_concepts.append(to_add[:-1]) parserList = SenticParser.getOutputConcepts(sentence) list_concept = list(set(list_concepts) | set(parserList)) list_concept = filter(bool, list_concept) list_concept = set(list(list_concepts)) to_search = [] for phrase in list_concepts: concepts = phrase.split() to_search = to_search + concepts for i in range(len(concepts) - 1): for j in range(i + 1, len(concepts)): try: k = nx.dijkstra_path(self.G, concepts[i], concepts[j]) if(len(k) == j - i + 1 and k == concepts[i:j + 1]): to_search = list(set(to_search) - set(k)) word_to_add = "_".join(k) to_search.append(word_to_add) except: continue to_search = list(set(to_search)) sorted_by_length = sorted(to_search, key=lambda tup:len(tup.split("_"))) return filter(lambda x: x is not None, [self.sn.lookup(concept) for concept in to_search])
if (word in G): conc.append(word) to_add = to_add + word + " " #print to_add else: if (to_add != ""): list_concepts.append(to_add[:-1]) to_add = "" if (to_add != ""): list_concepts.append(to_add[:-1]) print list_concepts parserList = SenticParser.getOutputConcepts(sentence) print parserList list_concept = list(set(list_concepts) | set(parserList)) list_concept = filter(bool, list_concept) list_concept = set(list(list_concepts)) sn = senticnet.Senticnet() to_search = [] for phrase in list_concepts: concepts = phrase.split() to_search = to_search + concepts
conc.append(word) to_add = to_add + word + " " # print to_add else: if to_add != "": list_concepts.append(to_add[:-1]) to_add = "" if to_add != "": list_concepts.append(to_add[:-1]) print list_concepts parserList = SenticParser.getOutputConcepts(sentence) print parserList list_concept = list(set(list_concepts) | set(parserList)) list_concept = filter(bool, list_concept) list_concept = set(list(list_concepts)) sn = senticnet.Senticnet() to_search = [] for phrase in list_concepts: concepts = phrase.split()
def process_sentence(sentence): G = nx.read_gpickle( "test.gpickle" ) sentence = sentence.lower() bigrams = [] words = sentence.split() list_concepts = [] conc = [] to_add = "" for word in words: if ( word in G ): conc.append(word) to_add = to_add+ word+" " #print to_add else: if( to_add != "" ): list_concepts.append(to_add[:-1]) to_add = "" if( to_add != "" ): list_concepts.append(to_add[:-1]) #print "list of concepts" #print list_concepts parserList = SenticParser.getOutputConcepts(sentence) #print "parseList" #print parserList list_concept = list( set(list_concepts) | set(parserList) ) list_concept = filter(bool, list_concept) list_concept = set(list(list_concepts)) sn = senticnet.Senticnet() to_search = [] for phrase in list_concepts: concepts = phrase.split() to_search = to_search + concepts for i in range(len(concepts) - 1): for j in range(i+1, len(concepts)): try: k = nx.dijkstra_path(G,concepts[i], concepts[j]) #print k if( len(k) == j-i+1 and k == concepts[i:j+1] ): to_search = list( set(to_search) - set(k) ) word_to_add = "_".join(k) to_search.append( word_to_add ) except: continue to_search = list( set( to_search ) ) sorted_by_length = sorted(to_search, key=lambda tup:len(tup.split("_")) ) #print "sorted by length" #print sorted_by_length #print "to search" #print to_search # My features accumulated pos=0 neg=0 pleasan=0 atten=0 sensi=0 apti=0 for concept in to_search: try: # print concept # print "---------------------------" # print sn.concept( concept) pol=sn.polarity( concept ) if(pol>0): pos += pol else: neg += pol # print sn.semantics( concept ) # print sn.sentics(concept) sentics = sn.sentics(concept) pleasan += sentics.get("pleasantness") atten += sentics.get("attention") sensi += sentics.get("sensitivity") apti += sentics.get("aptitude") # print "===================================================================================" except: continue result = {"positivity" : pos, "negativity": neg, "pleasantness" : pleasan, "attention" : atten, "sensitivity" : sensi, "aptitude" : apti} return result