Esempio n. 1
0
    def analyse(self, **params):
        lang = params.get("language", "auto")
        res = requests.post("http://www.sentiment140.com/api/bulkClassifyJson",
                            json.dumps({"language": lang,
                                        "data": [{"text": params["input"]}]
                                        }
                                       )
                            )

        p = params.get("prefix", None)
        response = Results(prefix=p)
        polarity_value = self.maxPolarityValue*int(res.json()["data"][0]
                                                   ["polarity"]) * 0.25
        polarity = "marl:Neutral"
        neutral_value = self.maxPolarityValue / 2.0
        if polarity_value > neutral_value:
            polarity = "marl:Positive"
        elif polarity_value < neutral_value:
            polarity = "marl:Negative"

        entry = Entry(id="Entry0",
                      nif__isString=params["input"])
        sentiment = Sentiment(id="Sentiment0",
                            prefix=p,
                            marl__hasPolarity=polarity,
                            marl__polarityValue=polarity_value)
        sentiment.prov__wasGeneratedBy = self.id
        entry.sentiments = []
        entry.sentiments.append(sentiment)
        entry.language = lang
        response.entries.append(entry)
        return response
Esempio n. 2
0
 def analyse(self, *args, **kwargs):
     self.sh['counter'] = self.sh['counter'] + 1
     e = Entry()
     e.nif__isString = self.sh['counter']
     r = Results()
     r.entries.append(e)
     return r
Esempio n. 3
0
    def analyse(self, **params):
        lang = params.get("language", "auto")
        res = requests.post(
            "http://www.sentiment140.com/api/bulkClassifyJson",
            json.dumps({
                "language": lang,
                "data": [{
                    "text": params["input"]
                }]
            }))

        p = params.get("prefix", None)
        response = Response(prefix=p)
        polarity_value = self.maxPolarityValue * int(
            res.json()["data"][0]["polarity"]) * 0.25
        polarity = "marl:Neutral"
        neutral_value = self.maxPolarityValue / 2.0
        if polarity_value > neutral_value:
            polarity = "marl:Positive"
        elif polarity_value < neutral_value:
            polarity = "marl:Negative"
        entry = Entry(id="Entry0", text=params["input"], prefix=p)
        opinion = Opinion(id="Opinion0",
                          prefix=p,
                          hasPolarity=polarity,
                          polarityValue=polarity_value)
        opinion["prov:wasGeneratedBy"] = self.id
        entry.opinions.append(opinion)
        entry.language = lang
        response.entries.append(entry)
        return response
Esempio n. 4
0
 def analyse(self, *args, **kwargs):
     self.sh['counter'] = self.sh['counter'] + 1
     e = Entry()
     e.nif__isString = self.sh['counter']
     r = Results()
     r.entries.append(e)
     return r
Esempio n. 5
0
    def analyse(self, **params):
        logger.debug(
            "SentimentAnalysisDL Analysing with params {}".format(params))

        text_input = params.get("input", None)

        # st = datetime.now()
        text = self.cleanTweet(text_input)
        # logger.info("{} {}".format(datetime.now() - st, "tweet cleaned"))

        X_test = self.convert_text_to_vector([text], self._tokenizer)
        y_pred = self.classify(X_test)

        response = Results()
        entry = Entry()

        _mapping_labels = {0: 'positive', 1: 'negative', 2: 'neutral'}
        _mapping_values = {0: "1", 1: "-1", 2: "0"}

        for sentence, y_i in zip([text], y_pred):
            sentiment = Sentiment()
            sentiment['marl:hasPolarity'] = _mapping_labels[y_i]
            sentiment["marl:polarityValue"] = _mapping_values[y_i]
            entry.sentiments.append(sentiment)

        entry.nif__isString = text_input
        response.entries.append(entry)

        return response
    def analyse(self, **params):
        logger.debug("SuggestionMiningDL Analysing with params {}".format(params))

        text_input = params.get("input", None)

        st = datetime.now()
        text_sentences = self.cleanTweet(text_input) #[self.cleanTweet(sentence) for sentence in self.split_into_sentences(text_input)] 
        
        #X_test = [ self.convert_text_to_vector(sentence, self._tokenizer) for sentence in text_sentences]
        X_test = self.convert_text_to_vector([text_sentences], self._tokenizer)
        
        y_pred = self.classify(X_test)
        logger.info("{} {}".format(datetime.now() - st, "tweet analysed, predicted: "+str(y_pred)))

        response = Results()
        
        entry = Entry()
        entry.nif__isString = text_input
           
        _mapping_labels = {0:False, 1:True}
        
        for sentence, y_i in zip([text_sentences], y_pred):
            suggestion = Suggestion()            
            suggestion['hasSuggestion'] = _mapping_labels[y_i]
            suggestion["nif:beginIndex"] = 0
            suggestion["nif:endIndex"] = len(sentence)
            suggestion["nif:anchorOf"] = sentence
            entry.suggestions.append(suggestion)
        
        
        response.entries.append(entry)
            
        return response
Esempio n. 7
0
    def analyse(self, **params):
        lang = params.get("language", "auto")
        res = requests.post("http://www.sentiment140.com/api/bulkClassifyJson",
                            json.dumps({"language": lang,
                                        "data": [{"text": params["input"]}]
                                        }
                                       )
                            )

        p = params.get("prefix", None)
        response = Response(prefix=p)
        polarity_value = self.maxPolarityValue*int(res.json()["data"][0]
                                                   ["polarity"]) * 0.25
        polarity = "marl:Neutral"
        neutral_value = self.maxPolarityValue / 2.0
        if polarity_value > neutral_value:
            polarity = "marl:Positive"
        elif polarity_value < neutral_value:
            polarity = "marl:Negative"
        entry = Entry(id="Entry0",
                      text=params["input"],
                      prefix=p)
        opinion = Opinion(id="Opinion0",
                          prefix=p,
                          hasPolarity=polarity,
                          polarityValue=polarity_value)
        opinion["prov:wasGeneratedBy"] = self.id
        entry.opinions.append(opinion)
        entry.language = lang
        response.entries.append(entry)
        return response
Esempio n. 8
0
 def test_results(self):
     r = Results()
     e = Entry()
     e.nif__isString = "Results test"
     r.entries.append(e)
     r.id = ":test_results"
     r.validate()
Esempio n. 9
0
    def analyse(self, **params):
        lang = params.get("language", "auto")
        res = requests.post(
            "http://www.sentiment140.com/api/bulkClassifyJson",
            json.dumps({
                "language": lang,
                "data": [{
                    "text": params["input"]
                }]
            }))

        p = params.get("prefix", None)
        response = Results(prefix=p)
        polarity_value = self.maxPolarityValue * int(
            res.json()["data"][0]["polarity"]) * 0.25
        polarity = "marl:Neutral"
        neutral_value = self.maxPolarityValue / 2.0
        if polarity_value > neutral_value:
            polarity = "marl:Positive"
        elif polarity_value < neutral_value:
            polarity = "marl:Negative"

        entry = Entry(id="Entry0", nif__isString=params["input"])
        sentiment = Sentiment(id="Sentiment0",
                              prefix=p,
                              marl__hasPolarity=polarity,
                              marl__polarityValue=polarity_value)
        sentiment.prov__wasGeneratedBy = self.id
        entry.sentiments = []
        entry.sentiments.append(sentiment)
        entry.language = lang
        response.entries.append(entry)
        return response
Esempio n. 10
0
 def test_results(self):
     r = Results()
     e = Entry()
     e.nif__isString = "Results test"
     r.entries.append(e)
     r.id = ":test_results"
     r.validate()
Esempio n. 11
0
 def test_id(self):
     """ Adding the id after creation should overwrite the automatic ID
     """
     r = Entry(_auto_id=True)
     j = r.jsonld()
     assert '@id' in j
     r.id = "test"
     j2 = r.jsonld()
     assert j2['@id'] == 'test'
     assert 'id' not in j2
Esempio n. 12
0
 def test_id(self):
     """ Adding the id after creation should overwrite the automatic ID
     """
     r = Entry()
     j = r.jsonld()
     assert '@id' in j
     r.id = "test"
     j2 = r.jsonld()
     assert j2['@id'] == 'test'
     assert 'id' not in j2
    def analyse(self, **params):
        
        logger.debug("Hashtag SVM Analysing with params {}".format(params))
                
        text_input = params.get("input", None)
        self.ESTIMATOR = params.get("estimator", 'LinearSVC')
        
        
        # EXTRACTING FEATURES
        
        text = self._text_preprocessor(text_input)      
        X = self._convert_text_to_vector(text=text, text_input=text_input, Dictionary=self._Dictionary)   
        feature_text = self._extract_features(X=X, classifiers=self._classifiers, estimator=self.ESTIMATOR)              
            
            
        # GENERATING RESPONSE
        
        response = Results()
        entry = Entry()
        entry.nif__isString = text_input

        emotionSet = EmotionSet()
        emotionSet.id = "Emotions"
        
        if self.ESTIMATOR == 'SVC':
            emotionSet.onyx__maxIntensityValue = float(100.0)

        emotion1 = Emotion()        

        for dimension in ['V','A','D']:            
            weights = [feature_text[i] for i in feature_text if (i != 'surprise')]              
            if not all(v == 0 for v in weights):
                value = np.average([self.centroids[i][dimension] for i in feature_text if (i != 'surprise')], weights=weights) 
            else:
                value = 5.0
            emotion1[self.centroid_mappings[dimension]] = value 
        

        emotionSet.onyx__hasEmotion.append(emotion1)    
                    
        for i in feature_text:
            if(self.ESTIMATOR == 'SVC'):
                emotionSet.onyx__hasEmotion.append(Emotion(
                                    onyx__hasEmotionCategory=self.wnaffect_mappings[i],
                                    onyx__hasEmotionIntensity=feature_text[i]))
            else:
                if(feature_text[i] > 0):
                    emotionSet.onyx__hasEmotion.append(Emotion(
                            onyx__hasEmotionCategory=self.wnaffect_mappings[i]))
        
        entry.emotions = [emotionSet,]        
        response.entries.append(entry)
            
        return response
Esempio n. 14
0
 def test_template(self):
     r = Results()
     e = Entry()
     e.nif__isString = 'testing the template'
     sent = Sentiment()
     sent.polarity = 'marl:Positive'
     r.entries.append(e)
     e.sentiments.append(sent)
     template = ('{% for entry in entries %}'
                 '{{ entry["nif:isString"] | upper }}'
                 ',{{entry.sentiments[0]["marl:hasPolarity"].split(":")[1]}}'
                 '{% endfor %}')
     res = r.serialize(template=template)
     assert res == 'TESTING THE TEMPLATE,Positive'
Esempio n. 15
0
    def analyse(self, **params):

        logger.debug("Analysing with params {}".format(params))

        text_input = params.get("input", None)

        text=self._my_preprocessor(text_input)
        dictionary={}
        lang = params.get("language", "auto")
        if lang == 'es':
            with open(self.local_path+self.anew_path_es,'rb') as tabfile:
                reader = csv.reader(tabfile, delimiter='\t')
                for row in reader:
                    dictionary[row[2]]={}
                    dictionary[row[2]]['V']=row[4]
                    dictionary[row[2]]['A']=row[6]
                    dictionary[row[2]]['D']=row[8]
        else:
            with open(self.local_path+self.anew_path_en,'rb') as tabfile:
                reader = csv.reader(tabfile, delimiter='\t')
                for row in reader:
                    dictionary[row[0]]={}
                    dictionary[row[0]]['V']=row[2]
                    dictionary[row[0]]['A']=row[4]
                    dictionary[row[0]]['D']=row[6]

        feature_set=self._extract_features(text,dictionary,lang)

        p = params.get("prefix", None)
        response = Response(prefix=p)

        entry = Entry(id="Entry",
                  text=text_input,
                  prefix=p)
        emotions = EmotionSet(id="Emotions0")
        emotion1 = Emotion(id="Emotion0")
        
        emotion1["onyx:hasEmotionCategory"] = self.emotions_ontology[feature_set['emotion']]
        emotion1["http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#valence"] = feature_set['V']
        emotion1["http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#arousal"] = feature_set['A']
        emotion1["http://www.gsi.dit.upm.es/ontologies/onyx/vocabularies/anew/ns#dominance"] = feature_set['D']

        emotions.emotions.append(emotion1)


        entry.emotionSets.append(emotions)
        entry.language = lang
        response.entries.append(entry)
        return response
Esempio n. 16
0
 def test_template(self):
     r = Results()
     e = Entry()
     e.nif__isString = 'testing the template'
     sent = Sentiment()
     sent.polarity = 'marl:Positive'
     r.entries.append(e)
     e.sentiments.append(sent)
     template = (
         '{% for entry in entries %}'
         '{{ entry["nif:isString"] | upper }}'
         ',{{entry.sentiments[0]["marl:hasPolarity"].split(":")[1]}}'
         '{% endfor %}')
     res = r.serialize(template=template)
     assert res == 'TESTING THE TEMPLATE,Positive'
Esempio n. 17
0
 def analyse_entry(self, entry, params):
     chunker_type = params["delimiter"]
     original_text = entry['nif:isString']
     if chunker_type == "sentence":
         tokenizer = PunktSentenceTokenizer()
     if chunker_type == "paragraph":
         tokenizer = LineTokenizer()
     chars = list(tokenizer.span_tokenize(original_text))
     for i, chunk in enumerate(tokenizer.tokenize(original_text)):
         print(chunk)
         e = Entry()
         e['nif:isString'] = chunk
         if entry.id:
             e.id = entry.id + "#char={},{}".format(chars[i][0], chars[i][1])
         yield e
Esempio n. 18
0
    def test_turtle(self):
        """Any model should be serializable as a turtle file"""
        ana = EmotionAnalysis()
        res = Results()
        res.analysis.append(ana)
        entry = Entry(text='Just testing')
        eSet = EmotionSet()
        emotion = Emotion()
        entry.emotions.append(eSet)
        res.entries.append(entry)
        eSet.onyx__hasEmotion.append(emotion)
        eSet.prov__wasGeneratedBy = ana.id
        triples = ('ana a :Analysis',
                   'entry a :entry',
                   '      nif:isString "Just testing"',
                   '      onyx:hasEmotionSet eSet',
                   'eSet a onyx:EmotionSet',
                   '     prov:wasGeneratedBy ana',
                   '     onyx:hasEmotion emotion',
                   'emotion a onyx:Emotion',
                   'res a :results',
                   '    me:AnalysisInvoloved ana',
                   '    prov:used entry')

        t = res.serialize(format='turtle')
        print(t)
        g = rdflib.Graph().parse(data=t, format='turtle')
        assert len(g) == len(triples)
Esempio n. 19
0
 def test(self):
     params = dict()
     results = list()
     for i in range(100):
         res = next(self.analyse_entry(Entry(nif__isString="Hello"), params))
         res.validate()
         results.append(res.emotions[0]['onyx:hasEmotion'][0]['onyx:hasEmotionCategory'])
Esempio n. 20
0
 def test_serializable(self):
     r = Results()
     e = Entry()
     r.entries.append(e)
     d = r.serializable()
     assert d
     assert d['entries']
    def test(self, *args, **kwargs):
        results = list()
        params = {
            'algo': 'emotion-wnaffect',
            'intype': 'direct',
            'expanded-jsonld': 0,
            'informat': 'text',
            'prefix': '',
            'plugin_type': 'analysisPlugin',
            'urischeme': 'RFC5147String',
            'outformat': 'json-ld',
            'i': 'Hello World',
            'input': 'Hello World',
            'conversion': 'full',
            'language': 'en',
            'algorithm': 'emotion-wnaffect'
        }

        self.activate()
        texts = {
            'I hate you': 'anger',
            'i am sad': 'sadness',
            'i am happy with my marks': 'joy',
            'This movie is scary': 'negative-fear'
        }

        for text in texts:
            response = next(
                self.analyse_entry(Entry(nif__isString=text),
                                   self.activity(params)))
            expected = texts[text]
            emotionSet = response.emotions[0]
            max_emotion = max(emotionSet['onyx:hasEmotion'],
                              key=lambda x: x['onyx:hasEmotionIntensity'])
            assert max_emotion['onyx:hasEmotionCategory'] == expected
Esempio n. 22
0
    def test_jsonld(self):
        prueba = {"id": "test", "analysis": [], "entries": []}
        r = Results(**prueba)
        print("Response's context: ")
        pprint(r._context)

        assert r.id == "test"

        j = r.jsonld(with_context=True)
        print("As JSON:")
        pprint(j)
        assert ("@context" in j)
        assert ("marl" in j["@context"])
        assert ("entries" in j["@context"])
        assert (j["@id"] == "test")
        assert "id" not in j

        r6 = Results(**prueba)
        e = Entry({"@id": "ohno", "nif:isString": "Just testing"})
        r6.entries.append(e)
        logging.debug("Reponse 6: %s", r6)
        assert ("marl" in r6._context)
        assert ("entries" in r6._context)
        j6 = r6.jsonld(with_context=True)
        logging.debug("jsonld: %s", j6)
        assert ("@context" in j6)
        assert ("entries" in j6)
        assert ("analysis" in j6)
        resp = r6.flask()
        received = json.loads(resp.data.decode())
        logging.debug("Response: %s", j6)
        assert (received["entries"])
        assert (received["entries"][0]["nif:isString"] == "Just testing")
        assert (received["entries"][0]["nif:isString"] != "Not testing")
    def analyse(self, **params):
        
        logger.debug("emotionService with params {}".format(params))      
                
        filename = params.get("i", None)        
             
        ## FILE MANIPULATIONS ------------------------------- \ 
        
        if validators.url(filename): 
            filename = self._download_file(saveFolder = self._storage_path, url = filename)
        else:            
            filename = os.path.join(self._storage_path,filename)
        
        logger.info("{} {}".format(datetime.now(), filename))
        
        if not os.path.isfile(filename):
            raise Error("File %s does not exist" % filename) 
            
        
        ## EXTRACTING FEATURES ------------------------------- \ 
        
        feature_set = self._extract_features(filename, convert=True)
        # self._remove_file(filename)
        
        
        ## GENERATING OUTPUT --------------------------------- \        
                
        response = Results()
        entry = Entry()   
        entry['filename'] = os.path.basename(filename)
        
        emotionSet = EmotionSet()
        emotionSet.id = "Emotions"
        
        emotion1 = Emotion() 
        
        for dimension in self._dimensions:
            emotion1[ self._centroid_mappings[dimension] ] = 5*(1+feature_set[dimension])           

        emotionSet.onyx__hasEmotion.append(emotion1)
    
        entry.emotions = [emotionSet,]        
        response.entries.append(entry)
        
        return response
Esempio n. 24
0
 def test_entries(self):
     e = Entry()
     self.assertRaises(jsonschema.ValidationError, e.validate)
     e.nif__isString = "this is a test"
     e.nif__beginIndex = 0
     e.nif__endIndex = 10
     e.validate()
Esempio n. 25
0
 def test_serializable(self):
     r = Results()
     e = Entry()
     ent = Entity()
     e.entities.append(ent)
     r.entries.append(e)
     d = r.serializable()
     assert d
     assert d['entries']
     assert d['entries'][0]['entities']
Esempio n. 26
0
 def analyse_entry(self, entry, activity):
     yield entry
     chunker_type = activity.params["delimiter"]
     original_text = entry['nif:isString']
     if chunker_type == "sentence":
         tokenizer = PunktSentenceTokenizer()
     if chunker_type == "paragraph":
         tokenizer = LineTokenizer()
     chars = list(tokenizer.span_tokenize(original_text))
     if len(chars) == 1:
         # This sentence was already split
         return
     for i, chunk in enumerate(chars):
         start, end = chunk
         e = Entry()
         e['nif:isString'] = original_text[start:end]
         if entry.id:
             e.id = entry.id + "#char={},{}".format(start, end)
         yield e
Esempio n. 27
0
 def analyse_entry(self, entry, params):
     yield entry
     chunker_type = params["delimiter"]
     original_text = entry['nif:isString']
     if chunker_type == "sentence":
         tokenizer = PunktSentenceTokenizer()
     if chunker_type == "paragraph":
         tokenizer = LineTokenizer()
     chars = list(tokenizer.span_tokenize(original_text))
     if len(chars) == 1:
         # This sentence was already split
         return
     for i, chunk in enumerate(chars):
         start, end = chunk
         e = Entry()
         e['nif:isString'] = original_text[start:end]
         if entry.id:
             e.id = entry.id + "#char={},{}".format(start, end)
         yield e
Esempio n. 28
0
 def test(self):
     params = dict()
     results = list()
     for i in range(100):
         res = next(self.analyse_entry(Entry(nif__isString="Hello"),
                                       params))
         res.validate()
         results.append(res.sentiments[0]['marl:hasPolarity'])
     assert 'marl:Positive' in results
     assert 'marl:Negative' in results
Esempio n. 29
0
    def analyse(self, *args, **kwargs):
        logger.warn('Analysing with the example.')
        logger.warn('The answer to this response is: %s.' %
                    kwargs['parameter'])
        resp = Response()
        ent = Entry(kwargs['input'])
        ent['example:reversed'] = kwargs['input'][::-1]
        ent['example:the_answer'] = kwargs['parameter']
        resp.entries.append(ent)

        return resp
Esempio n. 30
0
    def analyse(self, **params):
        lang = params.get("language", "auto")

        response = Results()
        polarity_value = max(-1, min(1, random.gauss(0.2, 0.2)))
        polarity = "marl:Neutral"
        if polarity_value > 0:
            polarity = "marl:Positive"
        elif polarity_value < 0:
            polarity = "marl:Negative"
        entry = Entry({"id":":Entry0",
                       "nif:isString": params["input"]})
        sentiment = Sentiment({"id": ":Sentiment0",
                               "marl:hasPolarity": polarity,
                               "marl:polarityValue": polarity_value})
        sentiment["prov:wasGeneratedBy"] = self.id
        entry.sentiments = []
        entry.sentiments.append(sentiment)
        entry.language = lang
        response.entries.append(entry)
        return response
Esempio n. 31
0
 def test_entries(self):
     e = Entry()
     self.assertRaises(jsonschema.ValidationError, e.validate)
     e.nif__isString = "this is a test"
     e.nif__beginIndex = 0
     e.nif__endIndex = 10
     e.validate()
Esempio n. 32
0
    def analyse(self, **params):
        lang = params.get("language", "auto")

        response = Results()
        polarity_value = max(-1, min(1, random.gauss(0.2, 0.2)))
        polarity = "marl:Neutral"
        if polarity_value > 0:
            polarity = "marl:Positive"
        elif polarity_value < 0:
            polarity = "marl:Negative"
        entry = Entry({"id": ":Entry0", "nif:isString": params["input"]})
        sentiment = Sentiment({
            "id": ":Sentiment0",
            "marl:hasPolarity": polarity,
            "marl:polarityValue": polarity_value
        })
        sentiment["prov:wasGeneratedBy"] = self.id
        entry.sentiments = []
        entry.sentiments.append(sentiment)
        entry.language = lang
        response.entries.append(entry)
        return response
Esempio n. 33
0
    def test_response(self):
        r = Response(context=os.path.normpath(
            os.path.join(__file__, "..", "..", "context.jsonld")))
        assert("@context" in r)
        assert(r._frame)
        logging.debug("Default frame: %s", r._frame)
        assert("marl" in r.context)
        assert("entries" in r.context)

        r2 = Response(context=json.loads('{"test": "roger"}'))
        assert("test" in r2.context)

        r3 = Response(context=None)
        del r3.context
        assert("@context" not in r3)
        assert("entries" in r3)
        assert("analysis" in r3)

        r4 = Response()
        assert("@context" in r4)
        assert("entries" in r4)
        assert("analysis" in r4)

        dummy = SenpyPlugin({"name": "dummy", "version": 0})
        r5 = Response({"dummy": dummy}, context=None, frame=None)
        logging.debug("Response 5: %s", r5)
        assert("dummy" in r5)
        assert(r5["dummy"].name == "dummy")
        js = r5.jsonld(context={}, frame={})
        logging.debug("jsonld 5: %s", js)
        assert("dummy" in js)
        assert(js["dummy"].name == "dummy")

        r6 = Response()
        r6.entries.append(Entry(text="Just testing"))
        logging.debug("Reponse 6: %s", r6)
        assert("@context" in r6)
        assert("marl" in r6.context)
        assert("entries" in r6.context)
        js = r6.jsonld()
        logging.debug("jsonld: %s", js)
        assert("entries" in js)
        assert("entries" in js)
        assert("analysis" in js)
        resp = r6.flask()
        received = json.loads(resp.data)
        logging.debug("Response: %s", js)
        assert(received["entries"])
        assert(received["entries"][0]["text"] == "Just testing")
        assert(received["entries"][0]["text"] != "Not testing")
Esempio n. 34
0
    def analyse(self, **params):
        lang = params.get("language", "auto")

        p = params.get("prefix", None)
        response = Response(prefix=p)
        polarity_value = max(-1, min(1, random.gauss(0.2, 0.2)))
        polarity = "marl:Neutral"
        if polarity_value > 0:
            polarity = "marl:Positive"
        elif polarity_value < 0:
            polarity = "marl:Negative"
        entry = Entry(id="Entry0",
                      text=params["input"],
                      prefix=p)
        opinion = Opinion(id="Opinion0",
                          prefix=p,
                          hasPolarity=polarity,
                          polarityValue=polarity_value)
        opinion["prov:wasGeneratedBy"] = self.id
        entry.opinions.append(opinion)
        entry.language = lang
        response.entries.append(entry)
        return response
    def analyse(self, **params):

        logger.debug("Analysing with params {}".format(params))

        text_input = params.get("input", None)

        text=self._my_preprocessor(text_input)

        feature_text=self._extract_features(text)

        response = Results()

        entry = Entry(id="Entry",
                      text=text_input)
        emotionSet = EmotionSet(id="Emotions0")
        emotions = emotionSet.onyx__hasEmotion

        for i in feature_text:
            emotions.append(Emotion(onyx__hasEmotionCategory=self._wnaffect_mappings[i],
                                    onyx__hasEmotionIntensity=feature_text[i]))

        entry.emotions = [emotionSet]
        response.entries.append(entry)
        return response
Esempio n. 36
0
 def analyse(self, **params):
     classifier = params.get("classifier", "polyglot-es")
     p = params.get("prefix", None)
     response = Results(prefix=p)
     (entities, types, startIndexes,
      endIndexes) = wrapper.service(params.get("input"), classifier)
     print(entities, types, startIndexes, endIndexes)
     for x in range(0, len(entities)):
         entry = Entry(id="Entry" + str(x),
                       prefix=p,
                       anchorOf=entities[x],
                       taClassRef="dbo:" + types[x],
                       startIndex=startIndexes[x],
                       endIndex=endIndexes[x])
         response.entries.append(entry)
     return response
Esempio n. 37
0
 def test_convert_emotions(self):
     self.senpy.activate_all(sync=True)
     plugin = Plugin({
         'id': 'imaginary',
         'onyx:usesEmotionModel': 'emoml:fsre-dimensions'
     })
     eSet1 = EmotionSet()
     activity = plugin.activity()
     eSet1.prov(activity)
     eSet1['onyx:hasEmotion'].append(
         Emotion({
             'emoml:arousal': 1,
             'emoml:potency': 0,
             'emoml:valence': 0
         }))
     response = Results({
         'activities': [activity],
         'entries': [
             Entry({
                 'nif:isString': 'much ado about nothing',
                 'onyx:hasEmotionSet': [eSet1]
             })
         ]
     })
     params = {
         'emotion-model': 'emoml:big6',
         'algorithm': ['conversion'],
         'conversion': 'full'
     }
     r1 = deepcopy(response)
     r1.parameters = params
     self.senpy.analyse(r1)
     assert len(r1.entries[0].emotions) == 2
     params['conversion'] = 'nested'
     r2 = deepcopy(response)
     r2.parameters = params
     self.senpy.analyse(r2)
     assert len(r2.entries[0].emotions) == 1
     assert r2.entries[0].emotions[0]['prov:wasDerivedFrom'] == eSet1
     params['conversion'] = 'filtered'
     r3 = deepcopy(response)
     r3.parameters = params
     self.senpy.analyse(r3)
     assert len(r3.entries[0].emotions) == 1
     r3.jsonld()
Esempio n. 38
0
 def test_convert_emotions(self):
     self.senpy.activate_all(sync=True)
     plugin = Plugin({
         'id': 'imaginary',
         'onyx:usesEmotionModel': 'emoml:fsre-dimensions'
     })
     eSet1 = EmotionSet()
     eSet1.prov__wasGeneratedBy = plugin['id']
     eSet1['onyx:hasEmotion'].append(
         Emotion({
             'emoml:arousal': 1,
             'emoml:potency': 0,
             'emoml:valence': 0
         }))
     response = Results({
         'entries':
         [Entry({
             'text': 'much ado about nothing',
             'emotions': [eSet1]
         })]
     })
     params = {'emotionModel': 'emoml:big6', 'conversion': 'full'}
     r1 = deepcopy(response)
     self.senpy.convert_emotions(r1, [
         plugin,
     ], params)
     assert len(r1.entries[0].emotions) == 2
     params['conversion'] = 'nested'
     r2 = deepcopy(response)
     self.senpy.convert_emotions(r2, [
         plugin,
     ], params)
     assert len(r2.entries[0].emotions) == 1
     assert r2.entries[0].emotions[0]['prov:wasDerivedFrom'] == eSet1
     params['conversion'] = 'filtered'
     r3 = deepcopy(response)
     self.senpy.convert_emotions(r3, [
         plugin,
     ], params)
     assert len(r3.entries[0].emotions) == 1
    def analyse(self, **params):
        logger.debug(
            "wassaRegression LSTM Analysing with params {}".format(params))

        st = datetime.now()

        text_input = params.get("input", None)
        text = self._text_preprocessor(text_input)

        self.ESTIMATOR = params.get("estimator", 'LSTM')

        if self.ESTIMATOR == 'LSTM':
            X_lstm = self._lists_to_vectors(text=text)
            feature_text = self._extract_features(X_lstm)

        elif self.ESTIMATOR == 'averaged':
            X_lstm = self._lists_to_vectors(text=text)
            X_svr = self._convert_text_to_vector(text=text,
                                                 text_input=text_input)

            feature_text_lstm = self._extract_features(X_lstm)
            feature_text_svr = self._extract_features_svr(X_svr)

            feature_text = {
                emo: np.mean([feature_text_lstm[emo], feature_text_svr[emo]])
                for emo in self._emoNames
            }

        else:
            X_svr = self._convert_text_to_vector(text=text,
                                                 text_input=text_input)
            feature_text = self._extract_features_svr(X_svr)

        logger.info("{} {}".format(datetime.now() - st, "string analysed"))

        response = Results()

        entry = Entry()
        entry.nif__isString = text_input

        emotionSet = EmotionSet()
        emotionSet.id = "Emotions"

        emotionSet.onyx__maxIntensityValue = float(100.0)

        emotion1 = Emotion()
        for dimension in ['V', 'A', 'D']:
            weights = [feature_text[i] for i in feature_text]
            if not all(v == 0 for v in weights):
                value = np.average(
                    [self.centroids[i][dimension] for i in feature_text],
                    weights=weights)
            else:
                value = 5.0
            emotion1[self.centroid_mappings[dimension]] = value

        emotionSet.onyx__hasEmotion.append(emotion1)

        for i in feature_text:
            emotionSet.onyx__hasEmotion.append(
                Emotion(onyx__hasEmotionCategory=self.wnaffect_mappings[i],
                        onyx__hasEmotionIntensity=float(feature_text[i]) *
                        emotionSet.onyx__maxIntensityValue))

        entry.emotions = [
            emotionSet,
        ]

        response.entries.append(entry)

        return response
Esempio n. 40
0
    def analyse(self, **params):
        logger.debug("Analysing with params {}".format(params))

        text = params.get("input", None)
        tokens = self._tokenize(text)
        tokens = self._pos(tokens)
        
        
        for i in tokens:
            tokens[i]['lemmas'] = {}
            for w in tokens[i]['tokens']:
                lemmas = wn.lemmas(w[0], lang='spa')
                if len(lemmas) == 0:
                    continue
                tokens[i]['lemmas'][w[0]] = lemmas
        logger.debug("Tokens: {}".format(tokens))
        
        trans = TextBlob(unicode(text)).translate(from_lang='es',to='en')
        useful_synsets = {}
        for s_i, t_s in enumerate(trans.sentences):
            useful_synsets[s_i] = {}
            for w_i, t_w in enumerate(trans.sentences[s_i].words):
                synsets = wn.synsets(trans.sentences[s_i].words[w_i])
                if len(synsets) == 0:
                    continue
                eq_synset = self._compare_synsets(synsets, tokens, s_i)
                useful_synsets[s_i][t_w] = eq_synset
        logger.debug("Synsets used for analysis: {}".format(useful_synsets))

        scores = {}
        for i in tokens:
            scores[i] = {}
            if useful_synsets is None:   
                for word in useful_synsets[i]:
                    if useful_synsets[i][word] is None:
                        continue
                    temp_scores = self._swn.get_score(useful_synsets[i][word].name().split('.')[0].replace(' ',' '))
                    for score in temp_scores:
                        if score['synset'] == useful_synsets[i][word]:
                            t_score = score['pos'] - score['neg']
                            f_score = 'neu'
                            if t_score > 0:
                                f_score = 'pos'
                            elif t_score < 0:
                                f_score = 'neg'
                            score['score'] = f_score
                            scores[i][word] = score
                            break
        logger.debug("All scores (some not used): {}".format(scores))


        lang = params.get("language", "auto")
        p = params.get("prefix", None)
        response = Results()

        for i in scores:
            n_pos = 0.0
            n_neg = 0.0
            for w in scores[i]:
                if scores[i][w]['score'] == 'pos':
                    n_pos += 1.0
                elif scores[i][w]['score'] == 'neg':
                    n_neg += 1.0

            inter = interp1d([-1.0, 1.0], [0.0, 1.0])
            try:
                g_score = (n_pos - n_neg) / (n_pos + n_neg)
                g_score = float(inter(g_score))
            except:
                if n_pos == 0 and n_neg == 0:
                    g_score = 0.5

            polarity = 'marl:Neutral'
            if g_score > 0.5:
                polarity = 'marl:Positive'
            elif g_score < 0.5:
                polarity = 'marl:Negative'

            entry = Entry(id="Entry"+str(i),
                      nif_isString=tokens[i]['sentence'])

            opinion = Sentiment(id="Opinion0"+'_'+str(i),
                          marl__hasPolarity=polarity,
                          marL__polarityValue=float("{0:.2f}".format(g_score)))

            opinion["prov:wasGeneratedBy"] = self.id

            entry.sentiments = []
            entry.sentiments.append(opinion)
            entry.language = lang
            response.entries.append(entry)
        return response
    def analyse(self, **params):

        logger.debug("Hashtag LSTM Analysing with params {}".format(params))

        text_input = params.get("input", None)
        self._ESTIMATION = params.get("estimation", 'Probabilities')

        # EXTRACTING FEATURES

        text = self._text_preprocessor(text_input)

        X = self._lists_to_vectors(text=text)
        feature_text = self._extract_features(X=X)

        # GENERATING RESPONSE

        response = Results()

        entry = Entry()
        entry.nif__isString = text_input

        emotionSet = EmotionSet()
        emotionSet.id = "Emotions"

        if self._ESTIMATION == 'Probabilities':
            emotionSet.onyx__maxIntensityValue = float(100.0)

        emotion1 = Emotion()
        for dimension in ['V', 'A', 'D']:
            weights = [
                feature_text[i] for i in feature_text if (i != 'surprise')
            ]
            if not all(v == 0 for v in weights):
                value = np.average([
                    self.centroids[i][dimension]
                    for i in feature_text if (i != 'surprise')
                ],
                                   weights=weights)
            else:
                value = 5.0
            emotion1[self.centroid_mappings[dimension]] = value

        emotionSet.onyx__hasEmotion.append(emotion1)

        for i in feature_text:
            if self._ESTIMATION == 'Probabilities':
                emotionSet.onyx__hasEmotion.append(
                    Emotion(onyx__hasEmotionCategory=self.wnaffect_mappings[i],
                            onyx__hasEmotionIntensity=float(feature_text[i]) *
                            100))
            elif self._ESTIMATION == 'Classes':
                if feature_text[i] > 0:
                    emotionSet.onyx__hasEmotion.append(
                        Emotion(onyx__hasEmotionCategory=self.
                                wnaffect_mappings[i]))
                    #onyx__hasEmotionIntensity=int(feature_text[i])))

        entry.emotions = [
            emotionSet,
        ]
        response.entries.append(entry)

        return response