def analyse_entry(self, entry, activity): params = activity.params text_input = entry.text text = self._my_preprocessor(text_input) dictionary = self._dictionary[params['language']] feature_set = self._extract_features(text, dictionary, params['language']) emotions = EmotionSet() emotions.id = "Emotions0" emotion1 = Emotion(id="Emotion0") emotion1["emoml:pad-dimensions_pleasure"] = feature_set['V'] emotion1["emoml:pad-dimensions_arousal"] = feature_set['A'] emotion1["emoml:pad-dimensions_dominance"] = feature_set['D'] emotion1.prov(activity) emotions.prov(activity) emotions.onyx__hasEmotion.append(emotion1) entry.emotions = [ emotions, ] yield entry
def analyse(self, **params): logger.debug("Hashtag SVM Analysing with params {}".format(params)) text_input = params.get("input", None) self.ESTIMATOR = params.get("estimator", 'LinearSVC') # EXTRACTING FEATURES text = self._text_preprocessor(text_input) X = self._convert_text_to_vector(text=text, text_input=text_input, Dictionary=self._Dictionary) feature_text = self._extract_features(X=X, classifiers=self._classifiers, estimator=self.ESTIMATOR) # GENERATING RESPONSE response = Results() entry = Entry() entry.nif__isString = text_input emotionSet = EmotionSet() emotionSet.id = "Emotions" if self.ESTIMATOR == 'SVC': emotionSet.onyx__maxIntensityValue = float(100.0) emotion1 = Emotion() for dimension in ['V','A','D']: weights = [feature_text[i] for i in feature_text if (i != 'surprise')] if not all(v == 0 for v in weights): value = np.average([self.centroids[i][dimension] for i in feature_text if (i != 'surprise')], weights=weights) else: value = 5.0 emotion1[self.centroid_mappings[dimension]] = value emotionSet.onyx__hasEmotion.append(emotion1) for i in feature_text: if(self.ESTIMATOR == 'SVC'): emotionSet.onyx__hasEmotion.append(Emotion( onyx__hasEmotionCategory=self.wnaffect_mappings[i], onyx__hasEmotionIntensity=feature_text[i])) else: if(feature_text[i] > 0): emotionSet.onyx__hasEmotion.append(Emotion( onyx__hasEmotionCategory=self.wnaffect_mappings[i])) entry.emotions = [emotionSet,] response.entries.append(entry) return response
def analyse(self, **params): logger.debug("emotionService with params {}".format(params)) filename = params.get("i", None) ## FILE MANIPULATIONS ------------------------------- \ if validators.url(filename): filename = self._download_file(saveFolder = self._storage_path, url = filename) else: filename = os.path.join(self._storage_path,filename) logger.info("{} {}".format(datetime.now(), filename)) if not os.path.isfile(filename): raise Error("File %s does not exist" % filename) ## EXTRACTING FEATURES ------------------------------- \ feature_set = self._extract_features(filename, convert=True) # self._remove_file(filename) ## GENERATING OUTPUT --------------------------------- \ response = Results() entry = Entry() entry['filename'] = os.path.basename(filename) emotionSet = EmotionSet() emotionSet.id = "Emotions" emotion1 = Emotion() for dimension in self._dimensions: emotion1[ self._centroid_mappings[dimension] ] = 5*(1+feature_set[dimension]) emotionSet.onyx__hasEmotion.append(emotion1) entry.emotions = [emotionSet,] response.entries.append(entry) return response
def analyse_entry(self, entry, params): text_input = entry.get("text", None) text= self._my_preprocessor(text_input) dictionary={} lang = params.get("language", "auto") if lang == 'es': with open(self.anew_path_es,'rb') as tabfile: reader = csv.reader(tabfile, delimiter='\t') for row in reader: dictionary[row[2]]={} dictionary[row[2]]['V']=row[3] dictionary[row[2]]['A']=row[5] dictionary[row[2]]['D']=row[7] else: with open(self.anew_path_en,'rb') as tabfile: reader = csv.reader(tabfile, delimiter='\t') for row in reader: dictionary[row[0]]={} dictionary[row[0]]['V']=row[2] dictionary[row[0]]['A']=row[4] dictionary[row[0]]['D']=row[6] feature_set=self._extract_features(text,dictionary,lang) emotions = EmotionSet() emotions.id = "Emotions0" emotion1 = Emotion(id="Emotion0") emotion1["onyx:hasEmotionCategory"] = self.emotions_ontology[feature_set['emotion']] emotion1["http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence"] = feature_set['V'] emotion1["http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal"] = feature_set['A'] emotion1["http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance"] = feature_set['D'] emotions.onyx__hasEmotion.append(emotion1) entry.emotions = [emotions,] yield entry
def analyse(self, **params): logger.debug( "wassaRegression LSTM Analysing with params {}".format(params)) st = datetime.now() text_input = params.get("input", None) text = self._text_preprocessor(text_input) self.ESTIMATOR = params.get("estimator", 'LSTM') if self.ESTIMATOR == 'LSTM': X_lstm = self._lists_to_vectors(text=text) feature_text = self._extract_features(X_lstm) elif self.ESTIMATOR == 'averaged': X_lstm = self._lists_to_vectors(text=text) X_svr = self._convert_text_to_vector(text=text, text_input=text_input) feature_text_lstm = self._extract_features(X_lstm) feature_text_svr = self._extract_features_svr(X_svr) feature_text = { emo: np.mean([feature_text_lstm[emo], feature_text_svr[emo]]) for emo in self._emoNames } else: X_svr = self._convert_text_to_vector(text=text, text_input=text_input) feature_text = self._extract_features_svr(X_svr) logger.info("{} {}".format(datetime.now() - st, "string analysed")) response = Results() entry = Entry() entry.nif__isString = text_input emotionSet = EmotionSet() emotionSet.id = "Emotions" emotionSet.onyx__maxIntensityValue = float(100.0) emotion1 = Emotion() for dimension in ['V', 'A', 'D']: weights = [feature_text[i] for i in feature_text] if not all(v == 0 for v in weights): value = np.average( [self.centroids[i][dimension] for i in feature_text], weights=weights) else: value = 5.0 emotion1[self.centroid_mappings[dimension]] = value emotionSet.onyx__hasEmotion.append(emotion1) for i in feature_text: emotionSet.onyx__hasEmotion.append( Emotion(onyx__hasEmotionCategory=self.wnaffect_mappings[i], onyx__hasEmotionIntensity=float(feature_text[i]) * emotionSet.onyx__maxIntensityValue)) entry.emotions = [ emotionSet, ] response.entries.append(entry) return response
def analyse(self, **params): logger.debug("Hashtag LSTM Analysing with params {}".format(params)) text_input = params.get("input", None) self._ESTIMATION = params.get("estimation", 'Probabilities') # EXTRACTING FEATURES text = self._text_preprocessor(text_input) X = self._lists_to_vectors(text=text) feature_text = self._extract_features(X=X) # GENERATING RESPONSE response = Results() entry = Entry() entry.nif__isString = text_input emotionSet = EmotionSet() emotionSet.id = "Emotions" if self._ESTIMATION == 'Probabilities': emotionSet.onyx__maxIntensityValue = float(100.0) emotion1 = Emotion() for dimension in ['V', 'A', 'D']: weights = [ feature_text[i] for i in feature_text if (i != 'surprise') ] if not all(v == 0 for v in weights): value = np.average([ self.centroids[i][dimension] for i in feature_text if (i != 'surprise') ], weights=weights) else: value = 5.0 emotion1[self.centroid_mappings[dimension]] = value emotionSet.onyx__hasEmotion.append(emotion1) for i in feature_text: if self._ESTIMATION == 'Probabilities': emotionSet.onyx__hasEmotion.append( Emotion(onyx__hasEmotionCategory=self.wnaffect_mappings[i], onyx__hasEmotionIntensity=float(feature_text[i]) * 100)) elif self._ESTIMATION == 'Classes': if feature_text[i] > 0: emotionSet.onyx__hasEmotion.append( Emotion(onyx__hasEmotionCategory=self. wnaffect_mappings[i])) #onyx__hasEmotionIntensity=int(feature_text[i]))) entry.emotions = [ emotionSet, ] response.entries.append(entry) return response