def understand(news):
    urllist = [
        news["articles"][i]["url"]
        for i in range(0, min(maxresults, len(news["articles"])))
    ]
    values = []
    for j, i in enumerate(urllist):
        value = {}
        response = natural_language_understanding.analyze(
            url=i,
            features=Features(sentiment=SentimentOptions(),
                              emotion=EmotionOptions())).get_result()
        value['url'] = response['retrieved_url']
        value['sentiment'] = response["sentiment"]["document"]["score"]
        value['sadness'] = response["emotion"]["document"]["emotion"][
            "sadness"]
        value['joy'] = response["emotion"]["document"]["emotion"]["joy"]
        value['fear'] = response["emotion"]["document"]["emotion"]["fear"]
        value['disgust'] = response["emotion"]["document"]["emotion"][
            "disgust"]
        value['anger'] = response["emotion"]["document"]["emotion"]["anger"]
        values.append(value)
        print("Analyzed and saved article " + str(j + 1))

    return values
Exemplo n.º 2
0
def result(request):
    auth = tweepy.OAuthHandler('', '')
    auth.set_access_token('', '')
    api = tweepy.API(auth)
    query = request.GET['query']
    data = ''
    for tweet in tweepy.Cursor(api.search, q=query).items(500):
        data = data + tweet.text
        print(tweet.text)
    authenticator = IAMAuthenticator('')
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12', authenticator=authenticator)

    natural_language_understanding.set_service_url('')
    data = re.sub(r'\w+:\/{2}[\d\w-]+(\.[\d\w-]+)*(?:(?:\/[^\s/]*))*', '',
                  data)
    data = re.sub('[^A-Za-z0-9\s]+', '', data)
    data = re.sub('@', '', data)
    data = data.lower()
    print(data)
    response = natural_language_understanding.analyze(
        text=data, features=Features(emotion=EmotionOptions()),
        language='en').get_result()
    s = response['emotion']['document']['emotion']
    for i in s:
        s[i] = "{:.2f}".format(abs(float(s[i]) * 100))
    return render(request, 'result.html', s)
def main():

    #Example image
    server = 'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com'
    IMAGE_API_KEY = os.getenv('NARURALANALYSERAPI')
    naturalLanguageAnalyser = NaturalLanguageUnderstandingV1(
        version='2018-03-19', authenticator=IAMAuthenticator(IMAGE_API_KEY))
    naturalLanguageAnalyser.set_service_url(server)

    #Example text
    text = 'Team, I know that times are tough! Product'\
           'sales have been disappointing for the past three '\
           'quarters. We have a competitive product, but we '\
           'need to do a better job of selling it!'

    response = naturalLanguageAnalyser.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(mentions=True,
                                                   emotion=True,
                                                   sentiment=True,
                                                   limit=10),
                          emotion=EmotionOptions(),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=10),
                          relations=RelationsOptions(),
                          syntax=SyntaxOptions(sentences=True))).get_result()
    print("Start")
    print(json.dumps(response, indent=2))
Exemplo n.º 4
0
def analyzeFunc():
    if 'data' in request.args:
        text = request.args['data']

        authenticatorNLU = IAMAuthenticator(
            'rW0-13R2RqRbko3bNzOaz1E8toSIy2qH1019AWiHkMZ9')
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2019-07-12', authenticator=authenticatorNLU)
        natural_language_understanding.set_service_url(
            'https://gateway-lon.watsonplatform.net/natural-language-understanding/api'
        )
        emotions = natural_language_understanding.analyze(
            text=text,
            features=Features(emotion=EmotionOptions(
                document=True))).get_result()

        authenticatorPI = IAMAuthenticator(
            'uA6GpdKCXJyJCqJyhQEqwH9jSJxqlJhgYq7-uBBfPYL5')
        personality_insights = PersonalityInsightsV3(
            version='2017-10-13', authenticator=authenticatorPI)
        personality_insights.set_service_url(
            'https://gateway-lon.watsonplatform.net/personality-insights/api')
        personality = personality_insights.profile(
            text.encode('utf-8'), 'application/json').get_result()

        return jsonify(emotions=emotions, personality=personality)
Exemplo n.º 5
0
def getEmotions(text):
    try:
        return nlu.analyze(text=text,
                           features=Features(emotion=EmotionOptions(
                               document=True))).get_result()
    except (Exception, ApiException) as err:
        return {'err': True, 'errMsg': err.__str__()}
Exemplo n.º 6
0
def libreria(request):
    querySet = request.GET.get("buscar")
    print('Buscando: ', querySet)
    car = []
    if querySet:
        datos = pd.read_csv('static/book_data1.csv', encoding='ISO-8859-1')
        descripcion = datos['book_title']
        titulo = []
        descri = []
        url = []
        pos = 0
        dfs = None
        for i in descripcion:
            try:
                dfs = service.analyze(html=i,
                                      features=Features(emotion=EmotionOptions(
                                          targets=[querySet]))).get_result()
                car.append(datos['book_title'][pos])
                car.append(datos['book_desc'][pos])
                car.append(datos['image_url'][pos])
                print(datos['book_title'][pos])
            except:
                print('No hay coincidencia')
                if dfs is None:
                    car.append(datos['book_title'][pos])
                    car.append(datos['book_desc'][pos])
                    car.append(datos['image_url'][pos])
                pos += 1
                #print(i, pos)

        print(car)
        descripciones = Libro.objects.filter(book_title=querySet)

    #car = "datos"
    return render(request, 'libreria.html', {'lib': car})
def annotate_file(nlu, in_file, offset=0, n_steps=10000):
    with open(in_file, encoding='utf8') as f:
        lines = f.readlines()

    with open(in_file, 'w', encoding='utf8') as f:
        f.write(lines[0].strip() + '\tfear\tsadness\tjoy\tanger\tdisgust\n')
        for idx, line in enumerate(lines[1:]):
            if idx < offset or idx >= offset + n_steps:
                f.write(line)
                continue
            text = line.split('\t')[4]
            try:
                scores = nlu.analyze(
                    text=text,
                    language='en',
                    features=Features(emotion=EmotionOptions()))
                scores = scores.get_result()['emotion']['document']['emotion']
                f.write(line.strip())
                for e in ['fear', 'sadness', 'joy', 'anger', 'disgust']:
                    f.write('\t' + str(scores[e]))
                f.write('\n')
                if idx % 250 == 0:
                    print(idx, text, scores)
            except ApiException:
                print('ApiException:', ApiException)
                print('Successfully added emotion scores to instances ' +
                      str(offset) + '-' + str(idx - 1) + '.')
                print('Will copy the lines for this instance and the remaining'
                      ' ones, so you can rerun this method on the same file'
                      ' after fixing the bug.')
Exemplo n.º 8
0
def get_emotions(df, nlu_api_key, nlu_base_url):
    """ Detects anger, disgust, fear, joy, or sadness that is conveyed in the contents
    from a Pandas DataFrame's 'text' column.
    
    :param DataFrame df: DataFrame containing an 'id', 'text' and 'text_en' column
    :param str subscription_key: MS Azure subscription key
    :param str text_analytics_base_url: endpoint for the Text Analytics REST API
    :return: Values of the outputs to the API calls (list of tuples)
    :rtype: list
    """
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16', iam_apikey=nlu_api_key, url=nlu_base_url)

    value_list = []
    progress_bar = tqdm(df.itertuples(), total=df.shape[0])
    progress_bar.set_description('Updating emotions')
    for row in progress_bar:
        try:
            response = natural_language_understanding.analyze(
                text=row.text,
                features=Features(emotion=EmotionOptions())).get_result()
            emotions = response['emotion']['document']['emotion']
            values = (emotions['anger'], emotions['disgust'], emotions['fear'],
                      emotions['joy'], emotions['sadness'], row.id)
            value_list.append(values)
        except:
            values = ('N/A', 'N/A', 'N/A', 'N/A', 'N/A', row.id)
            value_list.append(values)
    return value_list
def analyze(text):
    response = natural_language_understanding.analyze(
        text=text,

        features = Features(
            entities=EntitiesOptions(emotion=True, sentiment=True, limit=2),
        # keywords=KeywordsOptions(emotion=True, sentiment=True,
            #                         limit=2))).get_result()
            emotion = EmotionOptions())).get_result()

    print(json.dumps(response, indent=2))
    emotion = json.dumps(response)
    loaded_json = json.loads(emotion)
    #for x in loaded_json:
        #print("%s: %s" % (x, loaded_json[x]))
    emotions = loaded_json['emotion']['document']['emotion']

    max = 0
    emo = ""
    for key , value in emotions.items():

        if (value > max):
            max = value
            emo = key
            #print (key, value)
        #print(key, value)
    #print ("The max value is: " + str(max) + " and the emotion associated is: " + str(emo))

    return emo

#mystring = "I feel hungry and cold"
#analyze(mystring)
Exemplo n.º 10
0
    def one(self):
        import json
        from ibm_watson import NaturalLanguageUnderstandingV1
        from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
        from ibm_watson.natural_language_understanding_v1 import Features, EmotionOptions, SentimentOptions

        authenticator = IAMAuthenticator(
            '-GEDGacgnI36ctk77Aa4X5k3PAXBA_AaRQIxp6G71sOP')
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2019-07-12', authenticator=authenticator)
        natural_language_understanding.set_service_url(
            'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/b61e5fb9-726b-4cba-8b4b-12f1403ed4a1'
        )

        # ii = "Hello, I'm having a problem with your service. Nothing is working well. The service here is very bad. I am really very upset. I was expecting better than that. And my service has been stopped since yesterday. I have been suffering from this problem for a long time and cannot find a solution. The service here is bad most of times. Why you do not solve these problems. Some had left your service for this reason. The network is weak all the time, and it stops at the call. why this happen!? I wait. I'm fed up with complaining from the service."
        ii = "Hello, I need some help. I've subscribed to some news services and want to cancel them.They were not helpful with me plus they used a lot of balance. I feel bad because I used this service. Please remove it and try to improve these services. It has more harm than good. I hope to improve some services and offer some offers soon. I have another problem. My service has been disabled since yesterday. I have been suffering from this problem for a different times and cannot find a solution. It affects my work and communication in some important times."
        response1 = natural_language_understanding.analyze(
            text=ii,
            features=Features(emotion=EmotionOptions(
                targets=[ii.split()[1]]))).get_result()

        response2 = natural_language_understanding.analyze(
            text=ii,
            features=Features(sentiment=SentimentOptions(
                targets=[ii.split()[1]]))).get_result()
        global sad, joy, fear, disgust, anger, sentiment_label, sentiment
        sad = response1['emotion']['document']['emotion']['sadness']
        joy = response1['emotion']['document']['emotion']['joy']
        fear = response1['emotion']['document']['emotion']['fear']
        disgust = response1['emotion']['document']['emotion']['disgust']
        anger = response1['emotion']['document']['emotion']['anger']
        sentiment_label = response2['sentiment']['document']['label']
        sentiment = response2['sentiment']['document']['score']

        print(sad, joy, fear, disgust, anger, sentiment_label, sentiment)
 def extract(filename, row):
     reviews = row["Review"]
     user_contrib = row["Contribution"]
     hotel_name = row["Hotel Name"]
     recency = row["Recency"]
     for sent in nlp(str(reviews)).sents:
         sent_lemma = lemmatize(sent, nlp)
         try:
             response = natural_language_understanding.analyze(
                 text=str(sent_lemma),
                 features=Features(keywords=KeywordsOptions(
                     sentiment=True, limit=10))).get_result()
         except Exception as e:
             # print(e,sent_lemma)
             continue
         for i in response["keywords"]:
             keyword = i["text"]
             sentiment = i["sentiment"]["score"]
             if sentiment >= 0:  # Skip sentences which have positive sentiment
                 continue
             category = which_bucket(keyword, nlp, domain)
             # and (not connection[category]["Review"].str.contains(sent).any()):
             if category:
                 response_emo = natural_language_understanding.analyze(
                     text=str(sent_lemma),
                     features=Features(emotion=EmotionOptions(
                         targets=[keyword]))).get_result()
                 joy = response_emo["emotion"]["targets"][0]["emotion"][
                     "joy"]
                 sadness = response_emo["emotion"]["targets"][0]["emotion"][
                     "sadness"]
                 anger = response_emo["emotion"]["targets"][0]["emotion"][
                     "anger"]
                 disgust = response_emo["emotion"]["targets"][0]["emotion"][
                     "disgust"]
                 fear = response_emo["emotion"]["targets"][0]["emotion"][
                     "fear"]
                 try:
                     connection[category] = connection[category].append(
                         {
                             "Hotel Name": hotel_name,
                             "Review": str(sent),
                             "Review_Lemma": sent_lemma,
                             "Keyword": keyword,
                             "Sentiment": sentiment,
                             "User Contribution": user_contrib,
                             "Recency": recency,
                             "joy": joy,
                             "sadness": sadness,
                             "anger": anger,
                             "disgust": disgust,
                             "fear": fear
                         },
                         ignore_index=True)
                 except:
                     # print("Error")
                     pass
Exemplo n.º 12
0
def get_nlu_tone_analysis(tweet):
    """
    This function takes input as a tweet and
    returns their sentiment (Positive, Neutral or Negative),
    concepts (high level concepts or ideas),
    emotions (anger, disgust, fear, joy, or sadness),
    and tones (emotional and language tone)
    """
    ## Encode ASCII
    tweet = tweet.encode(encoding='ASCII', errors='ignore').decode('ASCII')
    ## Remove URLs
    tweet_cleaned = re.sub(r'http\S+', '', tweet)
    if tweet_cleaned:

        ## Call NLU API
        nlu_analysis = natural_language_understanding.analyze(
            text=tweet_cleaned,
            language='en',
            features=Features(concepts=ConceptsOptions(limit=2),
                              sentiment=SentimentOptions(),
                              emotion=EmotionOptions())).get_result()

        concepts = ', '.join(
            [concept['text'] for concept in nlu_analysis['concepts']])
        sentiment = nlu_analysis['sentiment']['document']['label']
        emotions = nlu_analysis['emotion']['document']['emotion']
        dominant_emotion = max(emotions, key=emotions.get)

        ## Call tone analyzer API
        tone_analysis = tone_analyzer.tone({
            'text': tweet_cleaned
        },
                                           content_type='text').get_result()

        tones = ', '.join([
            tone['tone_name']
            for tone in tone_analysis['document_tone']['tones']
        ])

        ## Create result table
        result = {
            'tweet': tweet,
            'sentiment': sentiment,
            "emotion": dominant_emotion,
            'concepts': concepts,
            'tones': tones
        }
    else:
        result = {
            'tweet': tweet,
            'sentiment': '',
            "emotion": '',
            'concepts': '',
            'tones': ''
        }

    return (result)
Exemplo n.º 13
0
    def processString(self, string_tweet=""):
        string_tweet = string_tweet.replace("\\u", " ")
        self.natural_language_understanding.set_service_url('<service URL>')
        response = self.natural_language_understanding.analyze(
            html=string_tweet,
            features=Features(emotion=EmotionOptions(
                targets=['biden', 'trump']))).get_result()

        return response
Exemplo n.º 14
0
def analyze(natural_language_understanding, input_text):
    response = natural_language_understanding.analyze(
        text=input_text,
        features=Features(emotion=EmotionOptions(),
                          categories=CategoriesOptions(limit=3),
                          concepts=ConceptsOptions(limit=3),
                          keywords=KeywordsOptions(limit=2))).get_result()

    return response
Exemplo n.º 15
0
def is_fearful(input_text, nlu):
    """ Classifies the text based on the fear level. Above a threshold is fearful. 
    Returns:
        True or False if fear detected
    """
    tweet_score = nlu.analyze(text=input_text,
                              features=Features(emotion=EmotionOptions()),
                              language='en').get_result()
    return True if tweet_score["emotion"]["document"]["emotion"][
        "fear"] > 0.4 else False
Exemplo n.º 16
0
def get_sentiment_emotions(input_text):
    response = natural_language_understanding.analyze(
        text=input_text,
        features=Features(
            emotion=EmotionOptions(document=True),
            sentiment=SentimentOptions(document=True))).get_result()
    #print(json.dumps(response, indent=2))
    data = {
        'sentiment': response["sentiment"]["document"],
        'emotion': response["emotion"]["document"]["emotion"]
    }
    return data
Exemplo n.º 17
0
 def test_html_analyze(self):
     nlu_url = "https://gateway.watsonplatform.net/natural-language-understanding/api/v1/analyze"
     responses.add(responses.POST, nlu_url,
                   body="{\"resulting_key\": true}", status=200,
                   content_type='application/json')
     authenticator = BasicAuthenticator('username', 'password')
     nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
                                          authenticator=authenticator)
     nlu.analyze(Features(sentiment=SentimentOptions(),
                          emotion=EmotionOptions(document=False)),
                 html="<span>hello this is a test</span>")
     assert len(responses.calls) == 1
Exemplo n.º 18
0
def extract_watson_features(natural_language_understanding, comment, feats):

    raw_text = comment["raw_text"]

    try:
        # If there are more than ten words, get the most common category for this text
        if feats[16] * feats[14] > 5:
            response = natural_language_understanding.analyze(
                text=raw_text,
                features=Features(categories=CategoriesOptions(limit=3),
                                  emotion=EmotionOptions(),
                                  sentiment=SentimentOptions())).get_result()

            # Save the category score to feats if it belongs to the watson_categories dict
            if len(response["categories"]) > 0:
                categories = {}
                for category in response["categories"]:
                    label = category["label"]
                    label = label.strip("/")
                    label = label[0:label.
                                  find("/")] if label.rfind("/") > 0 else label
                    score = category["score"]
                    categories[label] = score

                for i in range(len(watson_categories)):
                    j = i + 29
                    category = watson_categories[i]
                    feats[j] = categories[
                        category] if category in categories else 0.0

            # Save emotional scores to feats
            emotions = response["emotion"]["document"]["emotion"]
            for i in range(len(watson_emotions)):
                j = i + 49
                emotion = watson_emotions[i]
                feats[j] = emotions[emotion]

            # Save sentiment scores to feats
            sentiment = response["sentiment"]["document"]["label"]
            score = response["sentiment"]["document"]["score"]
            for i in range(len(watson_sentiments)):
                j = i + 54
                if sentiment == watson_sentiments[i]:
                    feats[j] = score

    except ApiException:
        print(ApiException)
        pass

    return feats
Exemplo n.º 19
0
def analyze_using_NLU(analysistext):
    """ Extract results from Watson Natural Language Understanding for each news item
    """
    res = dict()
    response = natural_language_understanding.analyze(
        text=analysistext,
        features=Features(sentiment=SentimentOptions(),
                          entities=EntitiesOptions(),
                          keywords=KeywordsOptions(),
                          emotion=EmotionOptions(),
                          concepts=ConceptsOptions(),
                          categories=CategoriesOptions(),
                          semantic_roles=SemanticRolesOptions()))
    res['results'] = response
    return res
Exemplo n.º 20
0
 def test_html_analyze(self):
     nlu_url = "http://bogus.com/v1/analyze"
     responses.add(responses.POST,
                   nlu_url,
                   body="{\"resulting_key\": true}",
                   status=200,
                   content_type='application/json')
     nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
                                          url='http://bogus.com',
                                          username='******',
                                          password='******')
     nlu.analyze(Features(sentiment=SentimentOptions(),
                          emotion=EmotionOptions(document=False)),
                 html="<span>hello this is a test</span>")
     assert len(responses.calls) == 1
 def annotateEmotions(self, text):
     try:
         response = self.nlu.analyze(
             text=text,
             features=Features(emotion=EmotionOptions(
                 document=True))).get_result()
         return response['emotion']['document']['emotion']
     except ApiException:
         return {
             'sadness': 0,
             'joy': 0,
             'fear': 0,
             'disgust': 0,
             'anger': 0
         }
Exemplo n.º 22
0
def natural_language_understanding(text):
    authenticator = IAMAuthenticator(api_keys["ibm-watson-nl"]["key"])
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2020-08-01', authenticator=authenticator)
    natural_language_understanding.set_service_url(
        api_keys["ibm-watson-nl"]["url"])

    response = natural_language_understanding.analyze(
        text=text,
        features=Features(
            categories=CategoriesOptions(limit=3),
            emotion=EmotionOptions(),
            sentiment=SentimentOptions(document=True))).get_result()

    return response
Exemplo n.º 23
0
def get_score(request, twitter_handle=None):
    # initial data
    identity_score = False
    financial_footprint = False
    social_score = False

    BAD_RESPONSE = HttpResponse(
        json.dumps(
            DEFAULT.update({
                "identity_score": identity_score,
                "financial_footprint": financial_footprint,
                'social_score': social_score
            })))

    if twitter_handle is None or not twitter_handle or not twitter_handle.isalnum(
    ):
        return BAD_RESPONSE

    # tweet = list(query_tweets(f"{twitter_handle} -user", 5))
    text = "I ordered just once from TerribleCo, they screwed up, never used the app again."

    try:
        result = natural_language_understanding.analyze(
            text=text,
            features=Features(
                # categories=CategoriesOptions(limit=3),
                # concepts=ConceptsOptions(limit=3),
                # entities=EntitiesOptions(sentiment=True, limit=1),
                # keywords=KeywordsOptions(sentiment=True,emotion=True,limit=2),
                sentiment=SentimentOptions(),
                emotion=EmotionOptions())).get_result()

        social_score = dict(result["sentiment"]["document"])
        social_score.update(result["emotion"]["document"]["emotion"])

        return HttpResponse(
            json.dumps(
                {
                    "identity_score": 1 / len(twitter_handle),
                    "financial_footprint": financial_footprint,
                    'social_score': social_score
                },
                indent=2))

    except Exception as e:
        print(e)

    return BAD_RESPONSE
Exemplo n.º 24
0
def getScore(comment):
    authenticator = IAMAuthenticator('YOUR API KEY')
    service = NaturalLanguageUnderstandingV1(version='2018-03-16',
                                             authenticator=authenticator)
    service.set_service_url(
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )

    response = service.analyze(text=comment,
                               features=Features(
                                   sentiment=SentimentOptions(),
                                   keywords=KeywordsOptions(),
                                   emotion=EmotionOptions())).get_result()
    result = ast.literal_eval(json.dumps(response))
    final = result["sentiment"]["document"]["score"] * 100
    return final
Exemplo n.º 25
0
def prospection_nlu():

    namefile, sheet, Line_of_first_article_to_be_treated_in_the_excel_file, Line_of_last_article_to_be_treated_in_the_excel_file = input_file_to_treat_with_cells(
    )
    list_occurences_keywords = []
    list_all_keywords = []
    list_occurences_keywords.append(["initialisation", 1])
    for text_index in range(
            Line_of_first_article_to_be_treated_in_the_excel_file,
            Line_of_last_article_to_be_treated_in_the_excel_file + 1):
        print("\nArticle number %d in the file about to be prospected\n" %
              text_index)
        # On amorçe la lecture du fichier excel
        print("Analysis of text number %d is starting. \n" % text_index)
        text = sheet.cell_value(text_index, 0).replace("\n", "")
        text = identification_language_and_translation(text)

        # On envoie le texte à NLU
        response_nlu = naturalLanguageUnderstanding.analyze(
            text=text,
            features=Features(
                concepts=ConceptsOptions(limit=5),
                entities=EntitiesOptions(emotion=True, sentiment=True),
                keywords=KeywordsOptions(emotion=True, sentiment=True),
                sentiment=SentimentOptions(document=True),
                emotion=EmotionOptions(document=True))).get_result()
        for l in range(len(response_nlu["keywords"])):
            split_it = response_nlu["keywords"][l]["text"].split()
            for keyword in split_it:
                list_all_keywords.append(keyword)


#            Manual way of storing the list of already appeared keywords, but counter module does the same faster below
#            flag_keyword_already_appeared=0
#            index_already_stored_keywords=0
#            length=len(list_occurences_keywords)
#            while flag_keyword_already_appeared==0 and index_already_stored_keywords<length:
#                if response_nlu["keywords"][l]["text"]==list_occurences_keywords[index_already_stored_keywords][0]:
#                    list_occurences_keywords[index_already_stored_keywords][1]+=1
#                    flag_keyword_already_appeared=1
#                index_already_stored_keywords+=1
#            if flag_keyword_already_appeared==0:
#                list_occurences_keywords.append([response_nlu["keywords"][l]["text"],1])
    count_intermediate = Counter(map(str.lower, list_all_keywords))
    most_occur = count_intermediate.most_common(10)

    return most_occur
Exemplo n.º 26
0
    def __init__(self, reviews):
        authenticator = IAMAuthenticator(apikey=IBM_TTS_API_KEY)
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2020-08-01',
            authenticator=authenticator
        )

        natural_language_understanding.set_service_url('https://api.us-south.natural-language-understanding.watson'
                                                       '.cloud.ibm.com/instances/d8f62821-c5c5-49d0-8ac0-25b31f59e257')

        sentiment_response = natural_language_understanding.analyze(text=reviews, features=Features(
            sentiment=SentimentOptions(targets=['book']))).get_result()
        sentiment_res = sentiment_response["sentiment"]["document"]
        self.sentiment = sentiment_res['label'] + ": " + str(sentiment_res['score'])

        response = natural_language_understanding.analyze(text=reviews, features=Features(
            emotion=EmotionOptions(targets=['book']))).get_result()
        self.emotion_doc = response["emotion"]["document"]["emotion"]
Exemplo n.º 27
0
def get_moods(text):
    apikey = config('nlp_apikey')
    url = config('nlp_url')

    authenticator = IAMAuthenticator(apikey)
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2020-08-01', authenticator=authenticator)

    natural_language_understanding.set_service_url(url)

    response = natural_language_understanding.analyze(
        html=text,
        features=Features(emotion=EmotionOptions(document=True))).get_result()

    # temp = response["emotion"]["document"]["emotion"]
    # emotion = max(temp, key=temp.get)
    # print(emotion)

    return response["emotion"]["document"]["emotion"]
Exemplo n.º 28
0
def get_results():
    authenticator = IAMAuthenticator()  #enter IBM IAM token for authentication
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12', authenticator=authenticator)
    natural_language_understanding.set_service_url(
        'https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/'
    )  #Add the instance id here

    if request.method == "POST":

        data = request.form.get('text')
        tweets = get_tweets(data, 5)
        results = []
        response = []
        for tw in tweets:
            #            for each in tweets:
            #each_tokens = remove_noise(word_tokenize(tw))
            #results.append(model.classify(dict([token, True] for token in each_tokens)))
            respo = (natural_language_understanding.analyze(
                html=tw,
                features=Features(emotion=EmotionOptions(targets=[
                    ' ', 'nation', 'WHO', 'Stay', 'home', 'sick', 'wear',
                    'safety', 'Clean', 'clean', 'Mask', 'India', 'condition',
                    'masks', 'The', 'is', 'a', 'the', 'covid', 'quarantine',
                    'lockdown', 'COVID-19'
                ]))).get_result())
            respoarr = [
                respo['emotion']['document']['emotion']['anger'],
                respo['emotion']['document']['emotion']['sadness'],
                respo['emotion']['document']['emotion']['fear'],
                respo['emotion']['document']['emotion']['joy'],
                respo['emotion']['document']['emotion']['disgust']
            ]
            response.append(respoarr)

        return render_template("index.html",
                               token=response[0],
                               token1=response[1],
                               token2=response[2],
                               token3=response[3],
                               token4=response[4],
                               handle=data,
                               temp=response)
Exemplo n.º 29
0
def main(args):
    # Authentication via IAM
    authenticator = IAMAuthenticator(
        'ewfuHONkTNZwuU4iEi9V1dMc_5zj5jFIVPV2bnIIVS9a')
    service = NaturalLanguageUnderstandingV1(version='2018-05-01',
                                             authenticator=authenticator)
    service.set_service_url(
        'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/473c30e7-ae76-4297-9ce2-dd439323e43e'
    )
    response = service.analyze(text=args["mydata"],
                               features=Features(
                                   categories=CategoriesOptions(limit=1),
                                   emotion=EmotionOptions(document=True),
                                   keywords=KeywordsOptions(limit=2,
                                                            sentiment=False,
                                                            emotion=True),
                                   sentiment=SentimentOptions()),
                               return_analyzed_text=True).get_result()
    return {"msg": json.dumps(response, indent=2)}
Exemplo n.º 30
0
    def smiley(self,event):
        message = event['message']
        
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2018-11-16',
            iam_apikey='gRWFjDNlBUzpm-p9AknzexWMklizrvr7lRZ0343PYxDS',
            url='https://gateway-lon.watsonplatform.net/natural-language-understanding/api'
        )
        
        if len(message)>14:
            response = natural_language_understanding.analyze(
                text=str(message),
                features=Features(emotion=EmotionOptions())).get_result()

            print(json.dumps(response, indent=2))
            emo_joy = (response['emotion']['document']['emotion']['joy'])
            emo_anger = (response['emotion']['document']['emotion']['anger'])
            emo_fear = (response['emotion']['document']['emotion']['fear'])
            emo_sadness = (response['emotion']['document']['emotion']['sadness'])
            emo_disgust = (response['emotion']['document']['emotion']['disgust'])
            msg_emotion = ''
            if (emo_joy) > (emo_anger and emo_sadness ):
                msg_emotion = 'emo_joy'
                message = message +' :-)'
            elif emo_anger > (emo_joy  and emo_sadness ):
                msg_emotion = "emo_anger"
                message = message +' >:@'
        
            elif emo_sadness > (emo_anger and emo_joy):
                msg_emotion = "emo_sadness"
                message = message +' :-('
        else:
            pass
        print(message,"XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")
        content = {
            'message':message,
            'from': event['from'],
            'roomId':event['roomId']
        }
        print(content)
        self.new_message(content)