예제 #1
0
    def analyzeWithTargets(self, text, name, symbol):
        if text == None or text == "":
            raise Exception(
                "The function analyze() received an None or empty text parameter."
            )

        try:  # With name and symbol targets
            response = Watson.natural_language_understanding.analyze(
                text=text,
                features=Features(
                    emotion=EmotionOptions(targets=[name, "$" + symbol]),
                    sentiment=SentimentOptions(targets=[name, "$" + symbol])),
                #language="en",
                return_analyzed_text=True)
        except BaseException as e:
            print(e)
            try:  # Just with name targets
                response = Watson.natural_language_understanding.analyze(
                    text=text,
                    features=Features(
                        emotion=EmotionOptions(targets=[name]),
                        sentiment=SentimentOptions(targets=[name])),
                    #language="en",
                    return_analyzed_text=True)
            except BaseException as e:
                print(e)
                try:  # Just with symbol targets
                    response = Watson.natural_language_understanding.analyze(
                        text=text,
                        features=Features(
                            emotion=EmotionOptions(targets=[symbol]),
                            sentiment=SentimentOptions(targets=[symbol])),
                        #language="en",
                        return_analyzed_text=True)
                except BaseException as e:
                    print(e)
                    try:  # Without targets
                        response = Watson.natural_language_understanding.analyze(
                            text=text,
                            features=Features(emotion=EmotionOptions(),
                                              sentiment=SentimentOptions()),
                            #language="en",
                            return_analyzed_text=True)
                    except BaseException as e:
                        print(e)
                        return False

        print(json.dumps(response, indent=4))

        return response
예제 #2
0
def sentiment_analysis(data):
    try:
        response = natural_language_understanding.analyze(
            text=data, features=Features(sentiment=SentimentOptions()))
        return response['sentiment']['document']['label']
    except:
        return 'unknown'
예제 #3
0
def main(dict):
    try:
        model_id=""
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2018-04-05',
            iam_apikey="",
            url='')        
        resposta = natural_language_understanding.analyze(
            text=dict['texto'],
            features=Features(
                entities=EntitiesOptions(emotion=True, sentiment=True, model=model_id),
                sentiment=SentimentOptions()),
                ).get_result()
        retorno = {}

        # Salvar as entidades no retorno
        if 'entities' in resposta:
            for i in range(len(resposta['entities'])):
                retorno[resposta['entities'][i]['type']] = resposta['entities'][i]['text']

        # Salvar o sentimento no retorno
        if 'sentiment' in resposta:
            retorno['sentiment'] = resposta['sentiment']['document']['label']

        dict['err'] = False
        dict['resposta'] = retorno
        return dict

    except:
        dict['err'] = True
        dict['resposta'] = "Erro na chamada ao NLU."
        return dict
예제 #4
0
def IntelligentCrawlUrl(URL):
    """
	This Function uses IBM Watson's Natural Language Understanding API to crawl the links and get company or person names based on a 	 knowledge graph it already has.
	This Function also return Company/Person names based on relevance score by IBM Natural Language Cognitive API.
	"""
    ListOfEntityOutput = []
    try:
        response = NaturalLanguageUnderstanding.analyze(
            url=URL,
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=250),
                              sentiment=SentimentOptions(),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=250)))

    except Exception as e:
        response = {}

    if response:
        for EveryEntity in response["entities"]:
            if EveryEntity["type"] == "Company":
                if EveryEntity["relevance"] > 0.25:
                    ListOfEntityOutput.append(EveryEntity["text"])
    print(ListOfEntityOutput)
    return ListOfEntityOutput
예제 #5
0
def IBMNonTran(df):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12',
        iam_apikey='v8j7M76fx4hOFr35AhLUso35qgmsocV5_WM-Ag0IdKg5',
        url=
        'https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/dbf791a6-366c-48d9-81ac-9a08ac7f130c'
    )

    ibm = []
    i = 0
    x = 0.5

    for i in range(0, len(df)):
        try:
            response = natural_language_understanding.analyze(
                language=df['langCode'][i],
                text=str(df['review_body'][i]),
                features=Features(sentiment=SentimentOptions())).get_result()
            res = response.get('sentiment').get('document').get('score')

            ibm.append(res)
        except:
            ibm.append('NA')

    df['ibm1'] = ibm
    return df
    def analyze_text(self, text):
        if len(text) > 15:
            response = self.natural_language_understanding.analyze(
                text=text,
                features=Features(sentiment=SentimentOptions(),
                                  emotion=EmotionOptions())).get_result()

            print(json.dumps(response, indent=2))
        else:
            response = {
                "usage": {
                    "text_units": 1,
                    "text_characters": 65,
                    "features": 2
                },
                "sentiment": {
                    "document": {
                        "score": 0.0,
                        "label": "neutral"
                    }
                },
                "language": "en",
                "emotion": {
                    "document": {
                        "emotion": {
                            "sadness": 0.0,
                            "joy": 0.0,
                            "fear": 0.0,
                            "disgust": 0.0,
                            "anger": 0.0
                        }
                    }
                }
            }
        return response
예제 #7
0
def SentimentClassify(target_text):
    response_senti = natural_language_understanding.analyze(
        text=target_text,
        features=Features(sentiment=SentimentOptions()),
        language=TextBlob(target_text).detect_language()).get_result()
    sentiscore = response_senti["sentiment"]["document"]["score"]
    return sentiscore
예제 #8
0
def getres(file_loc):
    with open(file_loc, 'r+') as f:
        head = f.readline()
        content = f.read()
        req = '<html><body><h2>{0}</h2>{1}</body></html>'.format(head, content)
        text = head + content
    tone_res = tone_analyzer.tone(req, content_type='text/html').get_result()

    res = natural_language_understanding.analyze(
        html=req,
        features=Features(
            categories=CategoriesOptions(limit=1),
            concepts=ConceptsOptions(limit=5),
            keywords=KeywordsOptions(limit=5, sentiment=True, emotion=True),
            sentiment=SentimentOptions(),
            # entities=EntitiesOptions(limit=5, mentions=True, sentiment=True, emotion=True),
        ),
    ).get_result()
    sentiment = res["sentiment"]["document"]["score"]
    concepts = [(concepts["text"], concepts["relevance"])
                for concepts in res["concepts"]]
    categories = (res["categories"][0]["label"].split("/"),
                  res["categories"][0]["score"])
    keywords = [(keywords["text"], keywords["relevance"])
                for keywords in res["keywords"]]
    tones = [(tone["tone_id"], tone["score"])
             for tone in tone_res["document_tone"]["tones"]]
    return (sentiment, concepts, keywords, tones, text)
예제 #9
0
    def concepts(self):
        IBM_dict = {}
        IBM_response = self.naturalLanguageUnderstanding.analyze(
            text=self.text,
            features=Features(
                entities=EntitiesOptions(emotion=True, sentiment=True, limit=10),
                keywords=KeywordsOptions(emotion=True, sentiment=True,limit=10),
                sentiment=SentimentOptions(),
                categories=CategoriesOptions()
                )).get_result()

        sent_dict = {'sentiment': IBM_response['sentiment']['document']['score']}
        IBM_dict['sentiment'] = sent_dict
        
        ent_result = []
        ents = IBM_response['entities']
        for e in ents:
            ent_result.append(e['text'].lower())
        ent_result.sort()
        IBM_dict['entities'] = ent_result
        
        kws = []
        for keyword in IBM_response['keywords']:
            kws.append(keyword['text'].lower())
        kws.sort()
        IBM_dict['keywords'] = kws
        
        cats = []
        for category in IBM_response['categories']:
            cats.append(category['label'])
        IBM_dict['categories'] = cats
        
        return IBM_dict
예제 #10
0
def get_average_sentiment():
    print(request.get_json(force=True))
    company = request.get_json()['company']
    print(company_dict[company])
    url = ('https://newsapi.org/v2/everything?'
        'q="' + company_dict[company] + '"&'
        'from=2018-10-10&'
        'sortBy=relevancy&'
        'apiKey=accb9b5d018348ff86fd7f9565673758&'
        'language=en'
        '&pageSize=5')
    response = requests.get(url)

    response = response.json()
    #print(response)
    if 'articles' in response:
        articles = response['articles']
    else:
        articles = []
    avg = 0

    for article in articles:
        try:
            response = natural_language_understanding.analyze(
                url=article['url'],
                features=Features(sentiment=SentimentOptions())).get_result()

            avg += response['sentiment']['document']['score']
        except:
            print("Error")

    return jsonify({ "sentiment": avg / 5 })
예제 #11
0
    def process_text(self, conv):
        unicodedata.normalize('NFKD', conv).encode('ascii', 'ignore')
        nlp = self.nlp_api.analyze(
            text=conv,
            language='es',
            features=Features(keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True),
                              categories=CategoriesOptions(),
                              sentiment=SentimentOptions(targets=self.flags)))

        if self.log_file != None:
            print('INFO: logging NLP to %s\n' % self.log_file)
            with open(self.log_file, 'w') as outfile:
                json.dump(nlp, outfile)

        doc_score = 0
        target_score = []
        try:
            doc_score = nlp['sentiment']['document']['score']
            for target in nlp['sentiment']['targets']:
                target_score.append({
                    'text': target['text'],
                    'score': target['score']
                })
        except KeyError:
            print('INFO: no target found')

        return doc_score, target_score
예제 #12
0
def watson_nlp_analysis(text):

    if text == '': return text

    max_limit_one = 10
    max_limit_two = 30

    naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey=os.environ['WATSON'],
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )

    response = naturalLanguageUnderstanding.analyze(
        text=text,
        features=Features(concepts=ConceptsOptions(limit=max_limit_one),
                          categories=CategoriesOptions(limit=max_limit_one),
                          sentiment=SentimentOptions(document=True),
                          emotion=EmotionOptions(document=True),
                          entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=max_limit_two),
                          keywords=KeywordsOptions(
                              emotion=True,
                              sentiment=True,
                              limit=max_limit_two))).get_result()
    return response
    def understanding(self):
        if not self.transcription:
            self.transcript()

        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2017-02-27',
            username=os.environ['UNDERSTANDING_USERNAME'],
            password=os.environ['UNDERSTANDING_PASSWORD'])

        self.analysis = natural_language_understanding.analyze(
            text=self.transcription['results'][0]['alternatives'][0]
            ['transcript'],
            features=Features(categories=CategoriesOptions(),
                              concepts=ConceptsOptions(),
                              emotion=EmotionOptions(),
                              entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       mentions=True),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True),
                              relations=RelationsOptions(),
                              sentiment=SentimentOptions()))

        logger.info('Completed analysis of recorded file')
        return self.analysis
예제 #14
0
def sentiment(input):
        try:                            
            #using IBM Watson
            response = naturalLanguageUnderstanding.analyze(
                    text=input,
                    #an object that is part of Watson API
                    features=Features(
                        sentiment=SentimentOptions(document=None, targets=None))
                    ).get_result()

            #not sure why dumping and immedietly loading?
            #is it a formatting reason?
            parsed_json = json.loads(json.dumps(response, indent=2))
            
            #sentiment: negative, 0, or positive
            sentiment = parsed_json['sentiment']
            document = sentiment['document']
            score = document['score']
            sentiment_value = float(score)
                    
        except:
            #use nltk instead
            sentiment_value = sid().polarity_scores(input)['compound']
                        
        print(sentiment_value)  
        react_with_sound(sentiment_value)
        return 6
예제 #15
0
def handleMoodLogging():
    # Display the form
    if request.method == 'GET':
        return render_template('index.html')
    else:
        # Validate using credentials
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            username=secret_dictionary['username'],
            password=secret_dictionary['password'],
            version='2018-03-16')

        # Grab the text from the user
        journal_contents = request.form['journal_content']
        #print('journal contents: ', journal_contents.encode('ascii', 'ignore'))

        # Make a call to the API with the text passed in
        alchemy_results = natural_language_understanding.analyze(
            text=journal_contents.encode('ascii', 'ignore'),
            features=Features(emotion=EmotionOptions(),
                              sentiment=SentimentOptions()))

        #print 'Writing results to a file:'
        fo = open('static/mockresponses/emotion_response.json', 'w+')
        fo.write(json.dumps(alchemy_results, indent=2))
        fo.close()
        return render_template('gauge.html')
예제 #16
0
 def get_sentiment(self, song, artist):
     warnings.filterwarnings('ignore')
     natural_language_understanding = NaturalLanguageUnderstandingV1(
         version='2017-02-27', username="******", password="******")
     response = natural_language_understanding.analyze(
         text=PyLyrics.getLyrics(artist, song),
         features=Features(emotion=EmotionOptions(),
                           sentiment=SentimentOptions()))
     return response.get('sentiment').get('document').get('score')
 def __init__(self):
     self.naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1(
         version='2018-09-21',
         iam_apikey='z22B_pFOjawl36w4CwyWGRs55jVnXE4y464VlyY7o-67',
         url=
         'https://gateway-syd.watsonplatform.net/natural-language-understanding/api'
     )
     self.def_features = Features(sentiment=SentimentOptions(document=True),
                                  emotion=EmotionOptions(document=True))
예제 #18
0
def analizarTexto(USER):
  #Open the file with the name of the user.
  resultfile = open("result_"+USER+".json", "w+")

  targets =[
      'vida', 'Guatemala', 'amor', 'sexo', 'politico',
      'poliltica','Yo', 'sonrisa', 'pais','novio','novia',
      'enojo', 'hermano', 'hermana','mama','papa','familia',
      'deporte', 'relacion'
    ]
  #Get the configuration file
  with open('config.json', 'r') as f:
      config = json.load(f)

  #reading the data file
  datafile = open("data_" +USER+".txt","r")
  data = datafile.read()
  
  #print the data... remove this..
  print (data)

  #Authentication
  natural_language_understanding = NaturalLanguageUnderstandingV1(
      version=config["version"],
      username=config["username"],
      password=config["password"]
  )

  response = natural_language_understanding.analyze(
    text=data,
    features=Features(
      entities=EntitiesOptions(
        emotion=True,
        sentiment=True,
        limit=2),
      keywords=KeywordsOptions(
        emotion=True,
        sentiment=True,
        limit=2),
      sentiment=SentimentOptions(
        targets=targets
      )
      #Doesn't support spanish language yet
      # ,
      # emotion=EmotionOptions(
      #   targets=targets
      # )
    )
  )

  result = str(response)
  print (result)
  resultfile.write(result + "")
  resultfile.close()
  f.close()
  datafile.close()
예제 #19
0
def get_label(text):
    try:
        response = natural_language_understanding.analyze(
            text=text,
            features=Features(sentiment=SentimentOptions())).get_result()

        return response['sentiment']['document']['label']

    except:
        return 'neutral'
예제 #20
0
def analyzeSentiment(absUrl):
    """
        Analyzes content for general sentiment/tone
    :param absUrl: Article Url
    :returns: floating point value between -1 and 1. -1 being most negative and
              1 being most positive.
    """
    response = natural_language_understanding.analyze(
        url=absUrl, features=Features(sentiment=SentimentOptions()))
    return response['sentiment']['document']['score']
예제 #21
0
def get_ibm_sent(text_sample):
    '''
    Analyze sentiment using IBM nlp API
    '''
    response = natural_language_understanding.analyze(
        text=text_sample, features=Features(sentiment=SentimentOptions()))

    score = response["sentiment"]["document"]["score"]
    #print(json.dumps(response, indent=2))
    return score
def analyze_sentiment(news):
    if news['title']:
        analysis = natural_language_understanding.analyze(
            text=news['title'],
            features=Features(sentiment=SentimentOptions())).get_result()

        analyzed = json.dumps(analysis['sentiment']['document']['score'])
        return analyzed
    else:
        return ' '
예제 #23
0
def sentiment(text):
    try:
        response = nlu.analyze(text=text,
                               features=Features(sentiment=SentimentOptions()))

        sentiment = json.dumps(response['sentiment']['document']['label'])
    except Exception as e:
        print(e)
        sentiment = "No Enough Text"

    return sentiment.strip('"')
예제 #24
0
def get_news_sentiment(request):
    try:
        response = natural_language_understanding.analyze(
            url=request.GET.get('url'),
            features=Features(sentiment=SentimentOptions(),
                              emotion=EmotionOptions(),
                              concepts=ConceptsOptions(limit=5),
                              categories=CategoriesOptions()))

        return Response(response)
    except:
        return Response({"error": 'problem retrieving'})
예제 #25
0
    def analyze_sentiment(self, tweet):
        if 'Samsung' in tweet['text']:
            analysis = natural_language_understanding.analyze(
                text=tweet['text'],
                features=Features(sentiment=SentimentOptions(
                    targets=['Samsung']))).get_result()

            analyzed = json.dumps(analysis['sentiment']['targets'][0]['score'])

            return analyzed
        else:
            return ' '
예제 #26
0
def getSentient(data):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username='******',
        password='******')

    response = natural_language_understanding.analyze(
        text=data,
        features=Features(entities=EntitiesOptions(), sentiment=SentimentOptions()))

    print (json.dumps(response,indent=2))
    return (json.dumps(response, indent=2))
예제 #27
0
 def get_keywords(self, sentence):
     response = self.natural_language_understanding.analyze(
         text=sentence,
         return_analyzed_text='True',
         features=Features(concepts=ConceptsOptions(),
                           categories=CategoriesOptions(),
                           relations=RelationsOptions(),
                           semantic_roles=SemanticRolesOptions(),
                           sentiment=SentimentOptions(),
                           entities=EntitiesOptions(),
                           keywords=KeywordsOptions())).get_result()
     keywords = map(lambda x: (x['text'], x['type']), response['entities'])
     return keywords
예제 #28
0
	def getScore(self, articleurl):

		try:
			response = natural_language_understanding.analyze(
				url=articleurl,
				features=Features(
				sentiment=SentimentOptions()),
				language='en')

			#print(json.dumps(response, indent=2))
			return response['sentiment']['document']['score']
		except Exception:
			return 0
예제 #29
0
def analyze_using_NLU(analysistext):
    res = dict()
    response = natural_language_understanding.analyze(
        text=analysistext,
        features=Features(sentiment=SentimentOptions(),
                          entities=EntitiesOptions(),
                          keywords=KeywordsOptions(),
                          emotion=EmotionOptions(),
                          concepts=ConceptsOptions(),
                          categories=CategoriesOptions(),
                          semantic_roles=SemanticRolesOptions()))
    res['results'] = response
    return json.dumps(res)
 def test_text_analyze(self):
     nlu_url = "http://bogus.com/v1/analyze"
     responses.add(responses.POST,
                   nlu_url,
                   body="{\"resulting_key\": true}",
                   status=200,
                   content_type='application/json')
     nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
                                          url='http://bogus.com',
                                          username='******',
                                          password='******')
     nlu.analyze(Features(sentiment=SentimentOptions()),
                 text="hello this is a test")
     assert len(responses.calls) == 1