def get_api_response(self):
     if self.API_options == "aspects":
         self.current_api_response = self.nlu.analyze(
             text=self.current_review_text,
             features=Features(entities=EntitiesOptions(emotion=True,
                                                        sentiment=True,
                                                        limit=10000),
                               keywords=KeywordsOptions(
                                   emotion=True,
                                   sentiment=True,
                                   limit=10000))).get_result()
     elif self.API_options == "overall":
         self.current_api_response = self.nlu.analyze(
             text=self.current_review_text,
             features=Features(sentiment=SentimentOptions(
                 document=True))).get_result()
     else:
         self.current_api_response = self.nlu.analyze(
             text=self.current_review_text,
             features=Features(
                 entities=EntitiesOptions(emotion=True,
                                          sentiment=True,
                                          limit=10000),
                 keywords=KeywordsOptions(emotion=True,
                                          sentiment=True,
                                          limit=10000),
                 sentiment=SentimentOptions(document=True))).get_result()
 def get_sentiment(self, service, sentence):
     """Run IBM API and get sentiment for given sentence"""
     try:
         sentiment = SentimentOptions()
         response = service.analyze(
             text=sentence,
             features=Features(sentiment=SentimentOptions())).get_result()
     except Exception as e:
         print(e.message)
         return "neutral"  # If unable to categorize, set sentiment to "Neutral"
     return response['sentiment']['document']['label']
Ejemplo n.º 3
0
def analyze_tweet_sentiment(texts):
    negative = 0
    neutral = 0
    positive = 0
    for text in texts:
        response = natural_language_understanding.analyze(
            features=Features(sentiment=SentimentOptions(targets=[text])),
            text=text).get_result()
        sentiment = response['sentiment']
        target = sentiment['targets']
        new_dict = target[0]
        final = new_dict['label']
        if final == 'negative':
            negative += 1
            tweet_and_sentiment[text] = 'negative'
        elif final == 'positive':
            positive += 1
            tweet_and_sentiment[text] = 'positive'
        else:
            neutral += 1
            tweet_and_sentiment[text] = 'neutral'
    print("Positive tweets: " + str(positive) + ". Neutral tweets: " +
          str(neutral) + ". Negative tweets: " + str(negative) + ".")
    percent_positivity = positive / len(texts)
    print("The percentage of positive Tweets is: " + str(percent_positivity))
    return tweet_and_sentiment
def main(request):
    query = request.GET.get("q")
    args = {}
    if query:
        content = get_content(query)
        input = f'{content}'
        algo = client.algo('SummarAI/Summarizer/0.1.3')
        algo.set_options(timeout=300)  # optional
        summary = algo.pipe(input).result
        args['summary'] = summary['summarized_data']
        authenticator = IAMAuthenticator(
            'o48P8tGlhPPecmxPmu_autXoYp4U13mnb7dggkkiyk22')
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2019-07-12', authenticator=authenticator)
        natural_language_understanding.set_service_url(
            "https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/972bec20-f75a-46fd-bdbc-9840fb7f7b16"
        )

        response = natural_language_understanding.analyze(
            url=f'{query}',
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=5),
                              categories=CategoriesOptions(limit=1),
                              sentiment=SentimentOptions(
                                  targets=summary['auto_gen_ranked_keywords'])
                              )).get_result()
        args['category'] = response['categories'][0]['label']
        args['category'] = args['category'].replace("/", ", ")
        args['category_score'] = response['categories'][0]['score']
        args['category_score'] = f"{(args['category_score']*100)}%"
        args['targets'] = response['sentiment']['targets']
        args['content_sentiment'] = response['sentiment']['document']['label']

    return render(request, 'index.html', args)
Ejemplo n.º 5
0
def sentiment(text):
    response = natural_language_understanding.analyze(
        text=text,
        features=Features(sentiment=SentimentOptions(
            document=True))).get_result()

    return json.dumps(response["sentiment"])
def understand(news):
    urllist = [
        news["articles"][i]["url"]
        for i in range(0, min(maxresults, len(news["articles"])))
    ]
    values = []
    for j, i in enumerate(urllist):
        value = {}
        response = natural_language_understanding.analyze(
            url=i,
            features=Features(sentiment=SentimentOptions(),
                              emotion=EmotionOptions())).get_result()
        value['url'] = response['retrieved_url']
        value['sentiment'] = response["sentiment"]["document"]["score"]
        value['sadness'] = response["emotion"]["document"]["emotion"][
            "sadness"]
        value['joy'] = response["emotion"]["document"]["emotion"]["joy"]
        value['fear'] = response["emotion"]["document"]["emotion"]["fear"]
        value['disgust'] = response["emotion"]["document"]["emotion"][
            "disgust"]
        value['anger'] = response["emotion"]["document"]["emotion"]["anger"]
        values.append(value)
        print("Analyzed and saved article " + str(j + 1))

    return values
Ejemplo n.º 7
0
def sentimentKeywordsWatson(file):

    df = pd.read_csv(file, sep=';')
    df['score'] = None
    df['sentiment'] = None
    df['keywords'] = None

    authenticator = IAMAuthenticator(config('apikey'))
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12', authenticator=authenticator)

    natural_language_understanding.set_service_url(config('url'))

    for i in range(len(df)):
        print('Aplicando IA... buscando keywords de {}'.format(i))
        tweet = df.loc[i, 'text_sanitized']

        try:
            response = natural_language_understanding.analyze(
                text=tweet,
                features=Features(keywords=KeywordsOptions(
                    limit=1))).get_result()

            doc_ini = json.dumps(response)
            doc_fim = json.loads(doc_ini)

            keywords = []

            [
                keywords.append(i['text'].lower())
                for i in doc_fim['keywords'][0:]
            ]

            df.loc[i, 'keywords'] = keywords

        except:
            None

    for i in range(len(df)):
        print('Aplicando IA... calculando score e sentimento de {}'.format(i))
        text_sanitized = df.loc[i, 'text_sanitized']

        try:
            response = natural_language_understanding.analyze(
                text=text_sanitized,
                features=Features(sentiment=SentimentOptions())).get_result()

            doc_ini = json.dumps(response)
            doc_fim = json.loads(doc_ini)

            df.loc[i, 'score'] = doc_fim['sentiment']['document']['score']
            df.loc[i, 'sentiment'] = doc_fim['sentiment']['document']['label']

        except:
            None

    file_name = 'dataSet_final.csv'
    df.to_csv(file_name, sep=';')

    print('Arquivo {} gerado com sucesso!'.format(file_name))
Ejemplo n.º 8
0
def extractFeatures(text, followers, friends, verified):
    text = text.replace("-", " ")
    sentiment = 0.0
    entity_num = 0.0
    word_count = len(text.split())
    char_count = len(text)
    avg_word_len = char_count / word_count
    follower_count = float(followers)
    anger = 0.0
    disgust = 0.0
    fear = 0.0
    joy = 0.0
    sadness = 0.0
    is_quote = 0.0
    friends = float(friends)
    verified = 1.0 if verified == "true" else 0.0

    try:
        # sentiment analysis
        sentiment_response = natural_language_understanding.analyze(
            text=text,
            features=Features(sentiment=SentimentOptions())).get_result()
        sentiment = sentiment_response['sentiment']['document']['score']

        # entity analysis
        entities_response = natural_language_understanding.analyze(
            text=text,
            features=Features(entities=EntitiesOptions(
                sentiment=True, emotion=True))).get_result()
        sentiment_sum = 0
        for entity in entities_response['entities']:
            sentiment_sum += entity['sentiment']['score'] * entity['relevance']
            entity_num += 1
            anger += entity['emotion']['anger'] * entity['relevance']
            disgust += entity['emotion']['disgust'] * entity['relevance']
            fear += entity['emotion']['fear'] * entity['relevance']
            joy += entity['emotion']['joy'] * entity['relevance']
            sadness += entity['emotion']['sadness'] * entity['relevance']

        sentiment = sentiment + sentiment_sum / 2
    except:
        pass

    dict = {
        "sentiment": sentiment,
        "entity_num": entity_num,
        "word_count": word_count,
        "char_count": char_count,
        "avg_word_len": avg_word_len,
        "follower_count": follower_count,
        "anger": anger,
        "disgust": disgust,
        "fear": fear,
        "joy": joy,
        "sadness": sadness,
        "is_quote": is_quote,
        "friends": friends,
        "verified": verified
    }
    return (jsonify(dict))
Ejemplo n.º 9
0
def sn(rr):
    #Categorías de respuesta
    pagina = rr
    print(pagina)
    response = natural_language_understanding.analyze(
        url=pagina,
        features=Features(categories=CategoriesOptions(limit=3))).get_result()
    print(json.dumps(response, indent=2))

    #Respuesta de conceptos

    response2 = natural_language_understanding.analyze(
       url=pagina,
        features=Features(concepts=ConceptsOptions(limit=3))).get_result()

    print(json.dumps(response2, indent=2))

    #Emoción
   # response3 = natural_language_understanding.analyze(
   #        url=pagina,
   #        features=Features(emotion=EmotionOptions())).get_result()

   # print(json.dumps(response3, indent=2))

    #Sentimiento
    response4 = natural_language_understanding.analyze(
        url=pagina,
        features=Features(sentiment=SentimentOptions())).get_result()

    print(json.dumps(response4, indent=2))
Ejemplo n.º 10
0
 def get_sentiments(cls, review):
     response = cls.natural_language_understanding.analyze(
         text=review,
         features=Features(sentiment=SentimentOptions()),
         language='en',
     ).get_result()
     return response
Ejemplo n.º 11
0
def analyse_sentiment(sentence):
    """Calculates the compound index of the sentence using IBM Watson Natural Language Understanding API"""
    response = nlu.analyze(
        text=sentence,
        language="en",
        features=Features(sentiment=SentimentOptions())).get_result()
    return float(response["sentiment"]["document"]["score"])
Ejemplo n.º 12
0
 def set_response(self, tweet_text):
     response = self.natural_language_understanding.analyze(
         text=tweet_text,
         features=Features(sentiment=SentimentOptions(document=True),
                           entities=EntitiesOptions(sentiment=True)),
         language='en').get_result()
     return response
Ejemplo n.º 13
0
 def watson(input_text):
     response = service.analyze(
         text=input_text,
         features=Features(sentiment=SentimentOptions(),
                           keywords=KeywordsOptions())).get_result()
     print(json.dumps(response, indent=2))
     return json.dumps(response, indent=2)
Ejemplo n.º 14
0
    def extractSentiment(self, text, keywords):
        try:
            if isinstance(keywords, str):
                if ',' in keywords:
                    keywords = keywords.split(',')
                else:
                    keywords = [keywords]
            else:
                # print(type(keywords))
                # print(keywords.keys())
                keywords = [
                    word['text'] for word in keywords['keywords']
                    if (word['relevance'] > 0.50)
                ]

            response = self.natural_language_understanding.analyze(
                text=text,
                features=Features(sentiment=SentimentOptions(targets=keywords,
                                                             document=True)),
                language='en').get_result()
            # print(response)
        except Exception as e:
            return {
                'message': traceback.format_exc(),
                'status': 400,
                'success': False
            }

        return {'data': response, 'success': True}
Ejemplo n.º 15
0
def analyze_review_sentiments(dealerreview):

    api_key = 'aeTwGNkBiaWt6EX-PagfARYlLsmNPq_k7IhPr0XUxETj'
    url = 'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/0642a3b4-3ca7-4e5d-80c3-d642010acbd2'
    '''
    params = dict()
    params["text"] = kwargs["text"]
    params["version"] = kwargs["version"]
    params["features"] = kwargs["features"]
    params["return_analyzed_text"] = kwargs["return_analyzed_text"]
    response = requests.get(url, params=params, headers={'Content-Type': 'application/json'},
                                    auth=HTTPBasicAuth('apikey', api_key))
    '''
    authenticator = IAMAuthenticator(api_key)

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2020-08-01', authenticator=authenticator)

    natural_language_understanding.set_service_url(url)

    try:
        response = natural_language_understanding.analyze(
            text=dealerreview,
            features=Features(sentiment=SentimentOptions())).get_result()
        return response['sentiment']['document']['label']
    except:
        return "neutral"
Ejemplo n.º 16
0
    def one(self):
        import json
        from ibm_watson import NaturalLanguageUnderstandingV1
        from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
        from ibm_watson.natural_language_understanding_v1 import Features, EmotionOptions, SentimentOptions

        authenticator = IAMAuthenticator(
            '-GEDGacgnI36ctk77Aa4X5k3PAXBA_AaRQIxp6G71sOP')
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2019-07-12', authenticator=authenticator)
        natural_language_understanding.set_service_url(
            'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/b61e5fb9-726b-4cba-8b4b-12f1403ed4a1'
        )

        # ii = "Hello, I'm having a problem with your service. Nothing is working well. The service here is very bad. I am really very upset. I was expecting better than that. And my service has been stopped since yesterday. I have been suffering from this problem for a long time and cannot find a solution. The service here is bad most of times. Why you do not solve these problems. Some had left your service for this reason. The network is weak all the time, and it stops at the call. why this happen!? I wait. I'm fed up with complaining from the service."
        ii = "Hello, I need some help. I've subscribed to some news services and want to cancel them.They were not helpful with me plus they used a lot of balance. I feel bad because I used this service. Please remove it and try to improve these services. It has more harm than good. I hope to improve some services and offer some offers soon. I have another problem. My service has been disabled since yesterday. I have been suffering from this problem for a different times and cannot find a solution. It affects my work and communication in some important times."
        response1 = natural_language_understanding.analyze(
            text=ii,
            features=Features(emotion=EmotionOptions(
                targets=[ii.split()[1]]))).get_result()

        response2 = natural_language_understanding.analyze(
            text=ii,
            features=Features(sentiment=SentimentOptions(
                targets=[ii.split()[1]]))).get_result()
        global sad, joy, fear, disgust, anger, sentiment_label, sentiment
        sad = response1['emotion']['document']['emotion']['sadness']
        joy = response1['emotion']['document']['emotion']['joy']
        fear = response1['emotion']['document']['emotion']['fear']
        disgust = response1['emotion']['document']['emotion']['disgust']
        anger = response1['emotion']['document']['emotion']['anger']
        sentiment_label = response2['sentiment']['document']['label']
        sentiment = response2['sentiment']['document']['score']

        print(sad, joy, fear, disgust, anger, sentiment_label, sentiment)
Ejemplo n.º 17
0
def analyze_review_sentiments(text):
    """
    url = 'https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/56571563-69dc-42a1-be00-f8358659585c/v1/analyze?version=2019-07-12'
    api_key= '8F95_SowNELZg2RrQBm3BuwZf3R0TVdFW3-qDxctbJte'
    params = dict()
    print(text)
    params["text"] = text  
    params["features"] = {
        "sentiment": { "targets": ["apples", "oranges", "broccoli"] },
        "keywords": {
        "emotion": True
        }
    }  
    response = requests.post(url, headers={'Content-Type': 'application/json'},
                            auth=HTTPBasicAuth('apikey', api_key), data=params)
    print(response)
    """
    authenticator = IAMAuthenticator('8F95_SowNELZg2RrQBm3BuwZf3R0TVdFW3-qDxctbJte')
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2020-08-01',
        authenticator=authenticator
    )

    natural_language_understanding.set_service_url('https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/56571563-69dc-42a1-be00-f8358659585c')
    try:
        response = natural_language_understanding.analyze(
            text=text,
            features=Features(sentiment=SentimentOptions())).get_result()

        print(json.dumps(response, indent=2))
        return response['sentiment']['document']['label']
    except:
        return 'neutral'
def analise(texto):
    lista = []
    for t in texto:
        response =  services.analyze(text=t, language='pt',features=Features(
            sentiment=SentimentOptions()
        )).get_result()
        lista.append(response)
    return lista
Ejemplo n.º 19
0
def getWatsonNLP(text):

    response = watsonService.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(),
                          keywords=KeywordsOptions(),
                          sentiment=SentimentOptions())).get_result()

    return response
Ejemplo n.º 20
0
def get_nlu_tone_analysis(tweet):
    """
    This function takes input as a tweet and
    returns their sentiment (Positive, Neutral or Negative),
    concepts (high level concepts or ideas),
    emotions (anger, disgust, fear, joy, or sadness),
    and tones (emotional and language tone)
    """
    ## Encode ASCII
    tweet = tweet.encode(encoding='ASCII', errors='ignore').decode('ASCII')
    ## Remove URLs
    tweet_cleaned = re.sub(r'http\S+', '', tweet)
    if tweet_cleaned:

        ## Call NLU API
        nlu_analysis = natural_language_understanding.analyze(
            text=tweet_cleaned,
            language='en',
            features=Features(concepts=ConceptsOptions(limit=2),
                              sentiment=SentimentOptions(),
                              emotion=EmotionOptions())).get_result()

        concepts = ', '.join(
            [concept['text'] for concept in nlu_analysis['concepts']])
        sentiment = nlu_analysis['sentiment']['document']['label']
        emotions = nlu_analysis['emotion']['document']['emotion']
        dominant_emotion = max(emotions, key=emotions.get)

        ## Call tone analyzer API
        tone_analysis = tone_analyzer.tone({
            'text': tweet_cleaned
        },
                                           content_type='text').get_result()

        tones = ', '.join([
            tone['tone_name']
            for tone in tone_analysis['document_tone']['tones']
        ])

        ## Create result table
        result = {
            'tweet': tweet,
            'sentiment': sentiment,
            "emotion": dominant_emotion,
            'concepts': concepts,
            'tones': tones
        }
    else:
        result = {
            'tweet': tweet,
            'sentiment': '',
            "emotion": '',
            'concepts': '',
            'tones': ''
        }

    return (result)
Ejemplo n.º 21
0
 def test_text_analyze(self):
     nlu_url = "https://gateway.watsonplatform.net/natural-language-understanding/api/v1/analyze"
     responses.add(responses.POST, nlu_url,
                   body="{\"resulting_key\": true}", status=200,
                   content_type='application/json')
     authenticator = BasicAuthenticator('username', 'password')
     nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
                                          authenticator=authenticator)
     nlu.analyze(Features(sentiment=SentimentOptions()), text="hello this is a test")
     assert len(responses.calls) == 1
def sentiment_analysis(data):
    service = NaturalLanguageUnderstandingV1(
        version='2018-03-16',
        url='https://gateway-syd.watsonplatform.net/natural-language-understanding/api',
        iam_apikey='HFjq-HFR7KeRxS09ZVWkO4mAofmcvJeU2OHxmzJ35jTN')

    response = service.analyze(
        text=data,
        features=Features(sentiment=SentimentOptions())).get_result()
    return response["sentiment"]["document"]["score"]
Ejemplo n.º 23
0
    def analyze(self, text):
        if text == "empty" or text == "":
            return 5.0

        response = self.analyzer.analyze(
            text=text,
            language='en',
            features=Features(sentiment=SentimentOptions(
                document=True))).get_result()
        return (float(response["sentiment"]["document"]["score"]) + 1) * 5
Ejemplo n.º 24
0
def getNews(publisher, keyword):
    # URL PARAMETERS
    url = "https://newsapi.org/v2/everything?"

    keyword = keyword.replace(" ", "%20")  # Make it URL friendly
    url += "q=" + keyword

    if publisher != "all":
        url += "&sources=" + publisher

    url += "&language=en&sortBy=publishedAt&apiKey=8916674ee011411aae3f5d83992abd18"

    # Get the returned json and pull the info we need
    jsonurl = urlopen(url)
    fulltext = json.loads(jsonurl.read())

    if fulltext["totalResults"] == 0:
        return "none"

    counter = 0  # Used in HTML for helping with favouriate feature
    sentiments = []  # For calculating the average overall sentiment
    results = {}  # Dictionary to return to POST for fancy formatting
    for article in fulltext["articles"]:
        title = article["title"]
        description = article["description"]
        url = article["url"]
        author = article["author"]
        date = article["publishedAt"]
        response = natural_language_understanding.analyze(
            text=description,
            features=Features(sentiment=SentimentOptions())).get_result()

        sentimentLabel = response['sentiment']['document']['label']
        sentimentScore = response['sentiment']['document']['score']
        sentiments.append(sentimentScore)
        sentiment = sentimentLabel + ": " + str(sentimentScore)

        results[title] = [url, sentiment, author, date,
                          counter]  # Add dictionary entry
        counter += 1
        time.sleep(
            0.5
        )  # Necessary to have multiple API calls, o/w IBM rejects all of them

    sentimentAverage = round(sum(sentiments) / len(sentiments), 5)

    os = ""  # Overall Sentiment
    if sentimentAverage > 0.0:
        os = ("Overall Sentiment: POSITIVE (" + str(sentimentAverage) + ")")
    elif sentimentAverage < 0.0:
        os = ("Overall Sentiment: NEGATIVE (" + str(sentimentAverage) + ")")
    else:
        os = ("Overall Sentiment: NEUTRAL (0)")

    return [results, os]
Ejemplo n.º 25
0
def analyze(txt):
    response = None
    while response is None:
        try:
            response = natural_language_understanding.analyze(
                text=txt,
                features=Features(
                    sentiment=SentimentOptions(False, [""]))).get_result()
        except Exception as ex:
            pass
    return response
Ejemplo n.º 26
0
def sentiment_and_keyword(st, service=service):
    """
    Função para fazer requisição com o servidor IBM e obter keywords e sentimento do texto a ser analisado
    return: JSON com análises do texto informado
    """
    return (service.analyze(text=st,
                            features=Features(
                                keywords=KeywordsOptions(sentiment=True,
                                                         emotion=True,
                                                         limit=3),
                                sentiment=SentimentOptions())).get_result())
Ejemplo n.º 27
0
def get_sentiment_emotions(input_text):
    response = natural_language_understanding.analyze(
        text=input_text,
        features=Features(
            emotion=EmotionOptions(document=True),
            sentiment=SentimentOptions(document=True))).get_result()
    #print(json.dumps(response, indent=2))
    data = {
        'sentiment': response["sentiment"]["document"],
        'emotion': response["emotion"]["document"]["emotion"]
    }
    return data
    def senti(self, keywords, text):
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2018-11-16',
            iam_apikey=aggregation_api.key,
            url=aggregation_api.url)

        response = natural_language_understanding.analyze(
            text=text,
            features=Features(sentiment=SentimentOptions(
                targets=keywords))).get_result()

        print(json.dumps(response, indent=2))
Ejemplo n.º 29
0
 def test_url_analyze(self):
     nlu_url = "https://gateway.watsonplatform.net/natural-language-understanding/api/v1/analyze"
     responses.add(responses.POST, nlu_url,
                   body="{\"resulting_key\": true}", status=200,
                   content_type='application/json')
     authenticator = BasicAuthenticator('username', 'password')
     nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
                                          authenticator=authenticator)
     nlu.analyze(Features(sentiment=SentimentOptions(),
                          emotion=EmotionOptions(document=False)),
                 url="http://cnn.com",
                 xpath="/bogus/xpath", language="en")
     assert len(responses.calls) == 1
Ejemplo n.º 30
0
def sentiment_analysis(text):
    print('Realizando Análise de Sentimentos')
    # initialize IBM NLU client
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey=ibmConfig.iam_apikey,
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )
    # send text to IBM Cloud to fetch analysis result
    response = natural_language_understanding.analyze(
        text=text,
        features=Features(sentiment=SentimentOptions())).get_result()
    return response