Example #1
0
 def calculate_extensions(self):
     return # need to pass because this cost money that we don't have
     tweet_text = self.tweet.text
     response = "Languange not supported"
     try:
         if self.tweet.lang == 'ar':
             response = NLU.analyze(text=tweet_text, features=Features(categories=CategoriesOptions(limit=1)),
                                    language='ar').get_result()
             if len(response['categories']) > 0:
                 self.category = response['categories'][0]['label']
             translated = NLT.translate(text=tweet_text, model_id='ar-en', source='ar', target='en').get_result()
             translated = translated['translations'][0]['translation']
             response = NLU.analyze(text=translated, features=Features(concepts=ConceptsOptions(limit=1),
                                                                       entities=EntitiesOptions(limit=1, sentiment=True),
                                                                       keywords=KeywordsOptions(limit=1, sentiment=True),
                                                                       ), language='en').get_result()
             self.extract_englishonly_catagories(response)
         elif self.tweet.lang == 'en':
             response = NLU.analyze(text=tweet_text, features=Features(concepts=ConceptsOptions(limit=1),
                                                                       entities=EntitiesOptions(limit=1, sentiment=True),
                                                                       keywords=KeywordsOptions(limit=1, sentiment=True),
                                                                       categories=CategoriesOptions(limit=1),
                                                                       ), language='en').get_result()
             if len(response['categories']) > 0:
                 self.category = response['categories'][0]['label']
             self.extract_englishonly_catagories(response)
     except ApiException as ex:
         print("error in calculate_AI_things")
         print(exc)
         return
def sn(rr):
    #Categorías de respuesta
    pagina = rr
    print(pagina)
    response = natural_language_understanding.analyze(
        url=pagina,
        features=Features(categories=CategoriesOptions(limit=3))).get_result()
    print(json.dumps(response, indent=2))

    #Respuesta de conceptos

    response2 = natural_language_understanding.analyze(
       url=pagina,
        features=Features(concepts=ConceptsOptions(limit=3))).get_result()

    print(json.dumps(response2, indent=2))

    #Emoción
   # response3 = natural_language_understanding.analyze(
   #        url=pagina,
   #        features=Features(emotion=EmotionOptions())).get_result()

   # print(json.dumps(response3, indent=2))

    #Sentimiento
    response4 = natural_language_understanding.analyze(
        url=pagina,
        features=Features(sentiment=SentimentOptions())).get_result()

    print(json.dumps(response4, indent=2))
def main(request):
    query = request.GET.get("q")
    args = {}
    if query:
        content = get_content(query)
        input = f'{content}'
        algo = client.algo('SummarAI/Summarizer/0.1.3')
        algo.set_options(timeout=300)  # optional
        summary = algo.pipe(input).result
        args['summary'] = summary['summarized_data']
        authenticator = IAMAuthenticator(
            'o48P8tGlhPPecmxPmu_autXoYp4U13mnb7dggkkiyk22')
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2019-07-12', authenticator=authenticator)
        natural_language_understanding.set_service_url(
            "https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/972bec20-f75a-46fd-bdbc-9840fb7f7b16"
        )

        response = natural_language_understanding.analyze(
            url=f'{query}',
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=5),
                              categories=CategoriesOptions(limit=1),
                              sentiment=SentimentOptions(
                                  targets=summary['auto_gen_ranked_keywords'])
                              )).get_result()
        args['category'] = response['categories'][0]['label']
        args['category'] = args['category'].replace("/", ", ")
        args['category_score'] = response['categories'][0]['score']
        args['category_score'] = f"{(args['category_score']*100)}%"
        args['targets'] = response['sentiment']['targets']
        args['content_sentiment'] = response['sentiment']['document']['label']

    return render(request, 'index.html', args)
Example #4
0
def extractEntities(input_filepath, output_filepath):
    df = pd.read_csv(input_filepath)
    (rows, _) = df.shape
    for idx in range(0, rows, 1):
        hotline_url = df["URL"][idx]
        nlu_categories = natural_language_understanding.analyze(
            url=hotline_url,
            features=Features(categories=CategoriesOptions())).get_result()
        nlu_keywords = natural_language_understanding.analyze(
            url=hotline_url,
            features=Features(
                keywords=KeywordsOptions(sentiment=True, emotion=True)),
        ).get_result()
        nlu_concepts = natural_language_understanding.analyze(
            url=hotline_url,
            features=Features(concepts=ConceptsOptions())).get_result()
        categories_list = list(
            map(lambda x: x["label"], nlu_categories["categories"]))
        keywords_list = list(map(lambda x: x["text"],
                                 nlu_keywords["keywords"]))
        concepts_list = list(map(lambda x: x["text"],
                                 nlu_concepts["concepts"]))
        categories_list_extracted = list(
            map(lambda x: x.split("/")[1:], categories_list))
        categories_list_flattened = list(
            set(list(itertools.chain(*categories_list_extracted))))
        # If there are not enough concepts add keywords to the list
        if len(concepts_list) < 3:
            concepts_list = concepts_list + keywords_list
        df["Concepts"][idx] = concepts_list
        df["Subject"][idx] = categories_list_flattened
    df.to_csv(output_filepath, index=False)
    return df
def nlp_watson(url):
    print(url)
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey='',
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )
    try:
        response = natural_language_understanding.analyze(
            url=url,
            features=Features(categories=CategoriesOptions(limit=15),
                              concepts=ConceptsOptions(limit=10),
                              entities=EntitiesOptions(sentiment=True,
                                                       limit=20),
                              keywords=KeywordsOptions(sentiment=True,
                                                       emotion=True,
                                                       limit=5),
                              metadata=MetadataOptions())
            #relations=RelationsOptions()),
        ).get_result()

        data = json.dumps(response, indent=2)
        # new = json.loads(response)
        # print(data)
        db = client.asalvo
        news = db.news
        new_id = news.insert_one(response).inserted_id
        # print(new_id)
    except:
        print('Error ocurred')

    return 0
Example #6
0
def watsonNlpApiCall(txtOrUrl: str, isText: bool):
    if (isText):
        return getWatsonServiceInstance(0).analyze(
            text=txtOrUrl,
            language='en',
            features=Features(entities=EntitiesOptions(),
                              categories=CategoriesOptions(),
                              keywords=KeywordsOptions(
                                  sentiment=True, emotion=True))).get_result()
    else:
        return getWatsonServiceInstance(0).analyze(
            url=txtOrUrl,
            language='en',
            features=Features(entities=EntitiesOptions(),
                              categories=CategoriesOptions(),
                              keywords=KeywordsOptions(
                                  sentiment=True, emotion=True))).get_result()
Example #7
0
def analyze(natural_language_understanding, input_text):
    response = natural_language_understanding.analyze(
        text=input_text,
        features=Features(emotion=EmotionOptions(),
                          categories=CategoriesOptions(limit=3),
                          concepts=ConceptsOptions(limit=3),
                          keywords=KeywordsOptions(limit=2))).get_result()

    return response
def get_tags_from_fav(user):
    try:
        auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
        auth.set_access_token(access_token, access_token_secret)
        
        api = tweepy.API(auth)
        
        favorites = api.favorites( user,3)
        
        index = 0
        dic_tweets = dict()
        
        for tweets in favorites:
            
            text_of_tweet = tweets.text.encode("ascii", "ignore").decode("ascii", "ignore")
        
            dic_tweets[index] = text_of_tweet
            
            index += 1
            
        
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2018-11-16',
            iam_apikey='zq_JRbDtCInoaWml-ZAjfGFn2Vj2b9wvZzfRE1O5U_wJ',
            url='https://gateway.watsonplatform.net/natural-language-understanding/api/v1/analyze?version=2018-11-16'
        )
        
        tags = []
        
        for keys in dic_tweets.keys():
            try:
            
                response_categories = natural_language_understanding.analyze(
                    text=dic_tweets[keys],
                    features=Features(categories=CategoriesOptions(limit=5))).get_result()
                
                response_concepts = natural_language_understanding.analyze(
                text=dic_tweets[keys],
                features=Features(concepts=ConceptsOptions(limit=5))).get_result()
            
            
                if len(response_concepts["concepts"]) != 0:
                    for i in range(len(response_concepts["concepts"])):
                        tags.append(response_concepts["concepts"][i]["text"])
                        print('     '+str(response_concepts["concepts"][i]["text"]))
                    
                tags.append(response_categories["categories"][0]["label"].split("/")[-1])
                print('     '+str(response_categories["categories"][0]["label"].split("/")[-1]))
            except:
                continue
        
        return tags
    
    except:
        return None
Example #9
0
def analyzeFrame(text):
    try:
        return nlu.analyze(text=text,
                           features=Features(
                               categories=CategoriesOptions(limit=3),
                               concepts=ConceptsOptions(limit=3),
                               entities=EntitiesOptions(limit=5),
                               keywords=KeywordsOptions(limit=10),
                               relations=RelationsOptions())).get_result()
    except (Exception, ApiException) as err:
        return {'err': True, 'errMsg': err.__str__()}
Example #10
0
def get_nlu_reponse(text):
    authenticator = IAMAuthenticator(f'{api_key}')
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12',
        authenticator=authenticator,
    )
    natural_language_understanding.set_service_url(f'{base_url}')
    response = natural_language_understanding.analyze(
        text=text,
        features=Features(categories=CategoriesOptions(limit=3, model=model_id))).get_result()
    print(response['categories'])
    return response['categories']
Example #11
0
    def analyze_categories(url):
        authenticator = IAMAuthenticator(
            'Wa3GWgIQ4n-T4ULUGv7W9nSTMSFdkeIgrwcDyGa1BY-q')
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2019-07-12', authenticator=authenticator)
        natural_language_understanding.set_service_url(
            'https://api.us-east.natural-language-understanding.watson.cloud.ibm.com/instances/c0d30a3e-6ca3-4b31-a8dc-4e1c8a160e73'
        )

        response = natural_language_understanding.analyze(
            url=url, features=Features(categories=CategoriesOptions(
                limit=3))).get_result()

        return response
Example #12
0
def extract_watson_features(natural_language_understanding, comment, feats):

    raw_text = comment["raw_text"]

    try:
        # If there are more than ten words, get the most common category for this text
        if feats[16] * feats[14] > 5:
            response = natural_language_understanding.analyze(
                text=raw_text,
                features=Features(categories=CategoriesOptions(limit=3),
                                  emotion=EmotionOptions(),
                                  sentiment=SentimentOptions())).get_result()

            # Save the category score to feats if it belongs to the watson_categories dict
            if len(response["categories"]) > 0:
                categories = {}
                for category in response["categories"]:
                    label = category["label"]
                    label = label.strip("/")
                    label = label[0:label.
                                  find("/")] if label.rfind("/") > 0 else label
                    score = category["score"]
                    categories[label] = score

                for i in range(len(watson_categories)):
                    j = i + 29
                    category = watson_categories[i]
                    feats[j] = categories[
                        category] if category in categories else 0.0

            # Save emotional scores to feats
            emotions = response["emotion"]["document"]["emotion"]
            for i in range(len(watson_emotions)):
                j = i + 49
                emotion = watson_emotions[i]
                feats[j] = emotions[emotion]

            # Save sentiment scores to feats
            sentiment = response["sentiment"]["document"]["label"]
            score = response["sentiment"]["document"]["score"]
            for i in range(len(watson_sentiments)):
                j = i + 54
                if sentiment == watson_sentiments[i]:
                    feats[j] = score

    except ApiException:
        print(ApiException)
        pass

    return feats
Example #13
0
def analizarTexto(texto):
    if texto:
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2018-11-16', iam_apikey=NLU_KEY, url=NLU_URL)

        response = natural_language_understanding.analyze(
            text=texto,
            language="es",
            features=Features(categories=CategoriesOptions(
                limit=5))).get_result()

        return json.dumps(response, indent=2)
    else:
        return {"error": "No se encontro texto"}
Example #14
0
def analyze_using_NLU(analysistext):
    """ Extract results from Watson Natural Language Understanding for each news item
    """
    res = dict()
    response = natural_language_understanding.analyze(
        text=analysistext,
        features=Features(sentiment=SentimentOptions(),
                          entities=EntitiesOptions(),
                          keywords=KeywordsOptions(),
                          emotion=EmotionOptions(),
                          concepts=ConceptsOptions(),
                          categories=CategoriesOptions(),
                          semantic_roles=SemanticRolesOptions()))
    res['results'] = response
    return res
Example #15
0
def natural_language_understanding(text):
    authenticator = IAMAuthenticator(api_keys["ibm-watson-nl"]["key"])
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2020-08-01', authenticator=authenticator)
    natural_language_understanding.set_service_url(
        api_keys["ibm-watson-nl"]["url"])

    response = natural_language_understanding.analyze(
        text=text,
        features=Features(
            categories=CategoriesOptions(limit=3),
            emotion=EmotionOptions(),
            sentiment=SentimentOptions(document=True))).get_result()

    return response
Example #16
0
def classification(user_input):
    splitted=user_input.split()
    if(len(splitted)>3):
        response=natural_language_understanding.analyze(text=user_input,
        features=Features(categories=CategoriesOptions(limit=1))).get_result()
        categories=response["categories"]
        try:
            category=categories[0]
            label=category["label"]
            label=label.split("/")
            topic=label[1]
            return topic
        except:
            return "None"
    else:
        return "None"
def disease_tagger(text):
    authenticator = IAMAuthenticator(API_KEY)
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12', authenticator=authenticator)

    natural_language_understanding.set_service_url(URL)

    response = natural_language_understanding.analyze(
        text=text, features=Features(categories=CategoriesOptions(
            limit=10))).get_result()

    diseases = []
    for category in response['categories']:

        if category['label'].startswith('/health and fitness/disease/'):
            disease = category['label'].split('/')[-1]
            diseases.append({'score': category['score'], 'disease': disease})

    return diseases
Example #18
0
def main(args):
    # Authentication via IAM
    authenticator = IAMAuthenticator(
        'ewfuHONkTNZwuU4iEi9V1dMc_5zj5jFIVPV2bnIIVS9a')
    service = NaturalLanguageUnderstandingV1(version='2018-05-01',
                                             authenticator=authenticator)
    service.set_service_url(
        'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/473c30e7-ae76-4297-9ce2-dd439323e43e'
    )
    response = service.analyze(text=args["mydata"],
                               features=Features(
                                   categories=CategoriesOptions(limit=1),
                                   emotion=EmotionOptions(document=True),
                                   keywords=KeywordsOptions(limit=2,
                                                            sentiment=False,
                                                            emotion=True),
                                   sentiment=SentimentOptions()),
                               return_analyzed_text=True).get_result()
    return {"msg": json.dumps(response, indent=2)}
Example #19
0
def analyze(url):
    service = NaturalLanguageUnderstandingV1(
        version=config.Config.IBM_VERSION,
        ## url is optional, and defaults to the URL below. Use the correct URL for your region.
        url=config.Config.IBM_URL,
        iam_apikey=config.Config.IBM_API_KEY)

    response = service.analyze(
        url=url,
        # text='what is the application of NLP in web page search?',
        features=Features(categories=CategoriesOptions(),
                          concepts=ConceptsOptions(limit=10),
                          entities=EntitiesOptions(),
                          relations=RelationsOptions(),
                          semantic_roles=SemanticRolesOptions(),
                          keywords=KeywordsOptions()
                          ),
        return_analyzed_text=True,
        clean=True
    ).get_result()

    return response
    def aspect_fn(self):
        with open("C:\\Users\\Kripa\\Desktop\\convo.csv") as file:
            data = list(csv.reader(file))

        strin = " ".join(str(x) for x in data)

        natural_language_understanding.set_service_url(
            'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com/instances/c70c1850-5873-495c-b449-d84d30415f06'
        )
        natural_language_understanding.set_disable_ssl_verification(True)
        response = natural_language_understanding.analyze(
            text=strin,
            features=Features(
                categories=CategoriesOptions(limit=3), )).get_result()

        cat1 = response['categories']

        di1 = cat1[0]
        di2 = cat1[1]
        di3 = cat1[2]

        str1 = di1['label']
        str11 = str(di1['score'])
        str2 = di2['label']
        str21 = str(di2['score'])
        str3 = di3['label']
        str31 = str(di3['score'])

        screen = Builder.load_string(screen_helper3)
        screen.ids.aspectlist.add_widget(
            TwoLineListItem(text=str1, secondary_text=str11))
        screen.ids.aspectlist.add_widget(
            TwoLineListItem(text=str2, secondary_text=str21))
        screen.ids.aspectlist.add_widget(
            TwoLineListItem(text=str3, secondary_text=str31))

        sm.add_widget(screen)
Example #21
0
def Watson_categories(article):

    # If service instance provides API key authentication
    service = NaturalLanguageUnderstandingV1(
        version='2018-03-16',
        ## url is optional, and defaults to the URL below. Use the correct URL for your region.
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api',
        iam_apikey='VP6Axcp-Hfx_NMyaQpg-imNVEiwyw6E8rznikt1Virxg')

    # service = NaturalLanguageUnderstandingV1(
    #     version='2018-03-16',
    #     ## url is optional, and defaults to the URL below. Use the correct URL for your region.
    #     # url='https://gateway.watsonplatform.net/natural-language-understanding/api',
    #     username='******',
    #     password='******')

    response = service.analyze(
        #text='In the rugged Colorado Desert of California, there lies buried a treasure ship sailed there hundreds of years ago by either Viking or Spanish explorers. Some say this is legend; others insist it is fact. A few have even claimed to have seen the ship, its wooden remains poking through the sand like the skeleton of a prehistoric beast. Among those who say they’ve come close to the ship is small-town librarian Myrtle Botts. In 1933, she was hiking with her husband in the Anza-Borrego Desert, not far from the border with Mexico. It was early March, so the desert would have been in bloom, its washed-out yellows and grays beaten back by the riotous invasion of wildflowers. Those wildflowers were what brought the Bottses to the desert, and they ended up near a tiny settlement called Agua Caliente. Surrounding place names reflected the strangeness and severity of the land: Moonlight Canyon, Hellhole Canyon, Indian Gorge. To enter the desert is to succumb to the unknowable. One morning, a prospector appeared in the couple’s camp with news far more astonishing than a new species of desert flora: He’d found a ship lodged in the rocky face of Canebrake Canyon. The vessel was made of wood, and there was a serpentine figure carved into its prow. There were also impressions on its flanks where shields had been attached—all the hallmarks of a Viking craft. Recounting the episode later, Botts said she and her husband saw the ship but couldn’t reach it, so they vowed to return the following day, better prepared for a rugged hike. That wasn’t to be, because, several hours later, there was a 6.4 magnitude earthquake in the waters off Huntington Beach, in Southern California. Botts claimed it dislodged rocks that buried her Viking ship, which she never saw again.There are reasons to doubt her story, yet it is only one of many about sightings of the desert ship. By the time Myrtle and her husband had set out to explore, amid the blooming poppies and evening primrose, the story of the lost desert ship was already about 60 years old. By the time I heard it, while working on a story about desert conservation, it had been nearly a century and a half since explorer Albert S. Evans had published the first account. Traveling to San Bernardino, Evans came into a valley that was “the grim and silent ghost of a dead sea,” presumably Lake Cahuilla. “The moon threw a track of shimmering light,” he wrote, directly upon “the wreck of a gallant ship, which may have gone down there centuries ago.” The route Evans took came nowhere near Canebrake Canyon, and the ship Evans claimed to see was Spanish, not Norse. Others have also seen this vessel, but much farther south, in Baja California, Mexico. Like all great legends, the desert ship is immune to its contradictions: It is fake news for the romantic soul, offering passage into some ancient American dreamtime when blood and gold were the main currencies of civic life. The legend does seem, prima facie, bonkers: a craft loaded with untold riches, sailed by early-European explorers into a vast lake that once stretched over much of inland Southern California, then run aground, abandoned by its crew and covered over by centuries of sand and rock and creosote bush as that lake dried out…and now it lies a few feet below the surface, in sight of the chicken-wire fence at the back of the Desert Dunes motel, $58 a night and HBO in most rooms. Totally insane, right? Let us slink back to our cubicles and never speak of the desert ship again. Let us only believe that which is shared with us on Facebook. Let us banish forever all traces of wonder from our lives. Yet there are believers who insist that, using recent advances in archaeology, the ship can be found. They point, for example, to a wooden sloop from the 1770s unearthed during excavations at the World Trade Center site in lower Manhattan, or the more than 40 ships, dating back perhaps 800 years, discovered in the Black Sea earlier this year.',
        text=article,
        features=Features(entities=EntitiesOptions(),
                          categories=CategoriesOptions())).get_result()

    return json.dumps(response, indent=2)
Example #22
0
 def test_categories(self):
     c = Features(categories=CategoriesOptions())
     assert c._to_dict() == {'categories': {}}
    else:
        userId.add(result.user.id)

    statuses = tweepy.Cursor(api.user_timeline,
                             user_id=result.user.id,
                             tweet_mode="extended").items(5)
    csvRow3 = [0] * len(csvRow2)
    for status in statuses:
        if (not status.retweeted) and ('RT @' not in status.full_text) and (not status.in_reply_to_user_id)\
                and result.id != status.id:
            try:
                response = service.analyze(
                    text=status.full_text,
                    features=Features(
                        sentiment=SentimentOptions(),
                        categories=CategoriesOptions(),
                        keywords=KeywordsOptions())).get_result()
                print(status.full_text)
                doc = nlp("u" + deEmojify(status.full_text))
                # sentiment analysis here
                for keyword in response['keywords']:
                    if searcher.searchTaxMap(keyword['text'].lower()):
                        try:
                            index = csvRow2.index(keyword['text'])
                            value = csvRow3[index]
                            score = response['sentiment']['document']['score']
                            print(index, score + value)
                            csvRow3.insert(index, score + value)
                        except:
                            csvRow2.append(keyword['text'])
                            index = csvRow2.index(keyword['text'])
Example #24
0
import json
from ibm_watson import NaturalLanguageUnderstandingV1
from ibm_watson.natural_language_understanding_v1 import Features, CategoriesOptions

NLU_KEY = "7AGk8HyuLvEairikmsN1hkUwo9Xz7TzmB4ggaPxUOtvd"
NLU_URL = "https://gateway.watsonplatform.net/natural-language-understanding/api"
natural_language_understanding = NaturalLanguageUnderstandingV1(
    version='2018-11-16', iam_apikey=NLU_KEY, url=NLU_URL)

response = natural_language_understanding.analyze(
    text="""
       Hola amigo, se que me necesitas""",
    language="es",
    features=Features(categories=CategoriesOptions(limit=5))).get_result()

print(json.dumps(response, indent=2))
Example #25
0
]

for row in big_list:

    twitter_screen_name = row[0]
    twitter_id = row[1]
    target_tweet = row[2]
    csvRow = [twitter_screen_name, twitter_id]
    targets = []

    try:
        response = service.analyze(
            text=target_tweet,
            features=Features(sentiment=SentimentOptions(),
                              keywords=KeywordsOptions(),
                              categories=CategoriesOptions())).get_result()

        print(response)
        csvRow.append(" ")

        for target in response['keywords']:
            if target['relevance'] > 0.5 and searcher.searchTaxMap(target['text'].lower()) \
                    and target['text'].lower() != "gift":
                targets.append(target['text'].lower())

    except Exception as e:
        print(e)

    try:
        doc = npl(target_tweet)
Example #26
0
for j in range(len(data)):
    text_data = data['free_data'][j]
    dev_id = data['device_survey_id'][j]

    try:
        response = natural_language_understanding.analyze(
            text=text_data,
            features=Features(
                entities=EntitiesOptions(emotion=True,
                                         sentiment=True,
                                         limit=10),
                keywords=KeywordsOptions(emotion=True, sentiment=True,
                                         limit=3),
                concepts=ConceptsOptions(limit=1),
                emotion=EmotionOptions(document=True),
                categories=CategoriesOptions(limit=3),
                sentiment=SentimentOptions(document=True))).get_result()

        sentiment_p = json.dumps(response['sentiment']['document']['label'])

        sentiment_score = json.dumps(
            response['sentiment']['document']['score'])

        # print('sentiment done')

        keyword_1 = ""
        keyword_2 = ""
        keyword_3 = ""
        # print(len(response['keywords']))
        if 'keywords' in response:
            for i in range(len(response['keywords'])):
Example #27
0
def categories(text):
    response = natural_language_understanding.analyze(
        text=text,
        features=Features(categories=CategoriesOptions(limit=1))).get_result()

    return json.dumps(response["categories"])
def analyseText():
    options = NluOptions
    fileName = NluOptions["file"].split('.')[0]+'.txt'
    filename_converted = fileName.replace(
        " ", "-").replace("'", "").lower()
    
    ''' Prepare the text for Analysis'''
    
    with open(app.config["TRANSCRIPT_UPLOAD"]+filename_converted, 'r') as text_file:
        text = text_file.read()
        text = text.replace('%HESITATION', '')

    print(text)

    ''' Initialize a return variable '''

    myJsonDict = {}

    ''' Extract Category with NLU '''

    if options.get('category') == "True":
        response = natural_language_understanding.analyze(
            language='en',
            text=text,
            features=Features(categories=CategoriesOptions(limit=1))).get_result()

        category = response['categories'][0]

        # Return category ['label'] ['score']
        myJsonDict.update({"category": category})
    else:
        pass

    ''' Extract Concepts with NLU '''

    if options.get('concepts') == "True":
        response = natural_language_understanding.analyze(
            language='en',
            text=text,
            features=Features(concepts=ConceptsOptions(limit=3))).get_result()

        concepts = sorted(response['concepts'],
                            key=itemgetter('relevance'), reverse=True)

        myJsonDict.update({"concepts": concepts})
        # Return concepts ['text'] ['relevence'] ['dbpedia_resource']
    else:
        pass

    ''' Extract Entity with NLU '''

    if options.get('entity') == "True":
        response = natural_language_understanding.analyze(
            language='en',
            text=text,
            features=Features(entities=EntitiesOptions(limit=1))).get_result()

        entity = sorted(response['entities'],
                        key=itemgetter('relevance'), reverse=True)

        myJsonDict.update({"entity": entity[0]})
        # Return entity[0] ['type'] ['text'] ['relevance']
    else:
        pass

    ''' Extract Sentiments and Emotions with NLU '''

    if options.get('sentiments') == "True":
        response = natural_language_understanding.analyze(
            language='en',
            text=text,
            features=Features(keywords=KeywordsOptions(sentiment=True, emotion=True, limit=10))).get_result()

        keywords = sorted(response['keywords'],
                            key=itemgetter('relevance'), reverse=True)

        keywords_sentiments_emotions = []

        for i in keywords:

            keywords_sentiments_emotions_buffer = {
                'keyword': i['text'],
                'sentiment': i['sentiment']['label'],
                'emotion': ''
            }
            maximum = i['emotion']['sadness']
            keywords_sentiments_emotions_buffer['emotion'] = 'sadness'

            if i['emotion']['joy'] > maximum:
                maximum = i['emotion']['joy']
                keywords_sentiments_emotions_buffer['emotion'] = 'joy'

            elif i['emotion']['fear'] > maximum:
                maximum = i['emotion']['fear']
                keywords_sentiments_emotions_buffer['emotion'] = 'fear'

            elif i['emotion']['disgust'] > maximum:
                maximum = i['emotion']['disgust']
                keywords_sentiments_emotions_buffer['emotion'] = 'disguest'

            elif i['emotion']['anger'] > maximum:
                maximum = i['emotion']['anger']
                keywords_sentiments_emotions_buffer['emotion'] = 'anger'

            keywords_sentiments_emotions.append(
                keywords_sentiments_emotions_buffer)

        myJsonDict.update({"sentiments": keywords_sentiments_emotions})
        # Return keywords_sentiments_emotions ['keyword'] ['sentiment'] ['emotion']
    else:
        pass

    ''' Analyse tone to get top 5 positive sentences '''

    if options.get('positiveSentences') == "True":
        tone_analysis = tone_analyzer.tone(
            {'text': text},
            content_type='application/json'
        ).get_result()

        sentences_with_joy = []
        print(json.dumps(tone_analysis, indent=2))

        try:
            for tone in tone_analysis['sentences_tone']:
                try:
                    if tone['tones'][0]['tone_name'] == "Joy":
                        tempDict = {"sentence_id": tone['sentence_id'],
                                    "text": tone['text'],
                                    "score": tone['tones'][0]['score']}
                        sentences_with_joy.append(tempDict)
                except:
                    continue

            sentences_with_joy = sorted(
                sentences_with_joy, key=itemgetter('score'), reverse=True)

            myJsonDict.update(
                {"positiveSentences": sentences_with_joy[:5]})
        except:
            tempDict = {"sentence_id": '',
                        "text": 'Text file too small to get positive sentences, please try again with a bigger document.',
                        "score": '100'}
            myJsonDict.update(
                {"positiveSentences": [tempDict]})
        # return sentences_with_joy[:5] ['text'] ['score']
    else:
        pass

    ''' Pre-Processing parts of speech to plot Word Cloud '''

    response = natural_language_understanding.analyze(
        language='en',
        text=text,
        features=Features(
            syntax=SyntaxOptions(
                sentences=True,
                tokens=SyntaxOptionsTokens(
                    lemma=True,
                    part_of_speech=True,
                )))).get_result()

    verbs = []
    for i in response['syntax']['tokens']:
        if i['part_of_speech'] == 'VERB':
            verbs.append(i['text'])

    nouns = []
    for i in response['syntax']['tokens']:
        if i['part_of_speech'] == 'NOUN':
            nouns.append(i['text'])

    adj = []
    for i in response['syntax']['tokens']:
        if i['part_of_speech'] == 'ADJ':
            adj.append(i['text'])

    nouns_adjectives = []
    for x in nouns:
        nouns_adjectives.append(x)

    for y in adj:
        nouns_adjectives.append(y)

    comment_words_verbs = ' '
    comment_words_nouns_adj = ' '
    stopwords = set(STOPWORDS)

    for val in verbs:
        val = str(val)
        tokens = val.split()
        for i in range(len(tokens)):
            tokens[i] = tokens[i].lower()
        for words in tokens:
            comment_words_verbs = comment_words_verbs + words + ' '

    for val in nouns_adjectives:
        val = str(val)
        tokens = val.split()
        for i in range(len(tokens)):
            tokens[i] = tokens[i].lower()
        for words in tokens:
            comment_words_nouns_adj = comment_words_nouns_adj + words + ' '

    wordcloud_verbs = WordCloud(width=800, height=800,
                                background_color='white',
                                stopwords=stopwords,
                                min_font_size=10,
                                max_font_size=150,
                                random_state=42).generate(comment_words_verbs)

    wordcloud_nouns_adj = WordCloud(width=800, height=800,
                                    background_color='white',
                                    colormap="Dark2",
                                    stopwords=stopwords,
                                    min_font_size=10,
                                    max_font_size=150,
                                    random_state=42).generate(comment_words_nouns_adj)

    todayDate = datetime.today().strftime('%m-%d-%Y-%s')

    verbsWC = app.config["VERBS"]+todayDate+'.png'
    plt.switch_backend('Agg')
    plt.figure(figsize=(5, 5), facecolor=None)
    plt.imshow(wordcloud_verbs)
    plt.axis("off")
    plt.tight_layout(pad=0)
    plt.title("Verbs")
    plt.savefig(verbsWC, title=True)

    nounsAdjWC = app.config["NOUNS_ADJECTIVES"]+todayDate+'.png'
    plt.switch_backend('Agg')
    plt.figure(figsize=(5, 5), facecolor=None)
    plt.imshow(wordcloud_nouns_adj)
    plt.axis("off")
    plt.tight_layout(pad=0)
    plt.title("Nouns & Adjectives")
    plt.savefig(nounsAdjWC, title=True)

    wordclouds = [nounsAdjWC, verbsWC]

    myJsonDict.update({"wordclouds": wordclouds})
    # print(json.dumps(options, indent=2))
    return jsonify(myJsonDict)
Example #29
0
    def __perform(self, df_try, API_KEY, URL):
        data = pd.DataFrame(columns=[
            "Data", "Language", "Sentiment", "Emotion", "Keyword", "Category"
        ])
        try:
            service = self.authenticate(API_KEY=API_KEY, URL=URL)
        except:
            print("ERROR IN AUTHENTICATION. Please verify your credentials.")

        for tweet in df_try["Data"]:
            try:
                tw = tweet
                response = service.analyze(
                    text=tweet,
                    features=Features(
                        sentiment=DocumentSentimentResults(),
                        emotion=EmotionOptions(),
                        keywords=KeywordsOptions(),
                        categories=CategoriesOptions())).get_result()
            except:
                print("Error in analysing data: ", tw)
                continue

            try:
                lan = response["language"]
                sent = response["sentiment"]["document"]["label"]
            except:
                lan = 'en'
                sent = 'neutral'

            ans = -1
            place = -1
            emotion = []
            try:
                for i in response["emotion"]["document"]["emotion"]:
                    emotion.append(
                        response["emotion"]["document"]["emotion"][i])
                for j in range(len(emotion)):
                    if emotion[j] > ans:
                        ans = emotion[j]
                        place = j
                if (place == 0):
                    emot = 'sadness'
                elif (place == 1):
                    emot = 'joy'
                elif (place == 2):
                    emot = 'fear'
                elif (place == 3):
                    emot = 'disgust'
                else:
                    emot = 'anger'

            except:
                emot = "neutral"

            try:
                word = response["keywords"][0]["text"]
            except:
                word = '----'

            try:
                cat = response['categories'][0]['label']
            except:
                cat = 'Unknown'

            final = {
                "Data": tw,
                "Language": lan,
                "Sentiment": sent,
                "Emotion": emot,
                "Keyword": word,
                "Category": cat
            }
            data = data.append(final, ignore_index=True)
        return data
Example #30
0
# service = NaturalLanguageUnderstandingV1(
#     version='2018-03-16',
#     ## url is optional, and defaults to the URL below. Use the correct URL for your region.
#     # url='https://gateway.watsonplatform.net/natural-language-understanding/api',
#     username='******',
#     password='******')
texto="El presente informe abarca un rápido análisis de lo sucedido en el primer mes luego de haber ocurrido el Terremoto de Pedernales. Toda la información presentada ha sido tomada de cada una de las Mesas Técnicas de Trabajo del COE Nacional. En cuatro semanas luego de un terremoto es muy difícil tener todo el detalle de daños, pérdidas y afectaciones, pero el esfuerzo que han hecho todas y cada una de las entidades gubernamentales que son parte del Sistema Nacional Descentralizado de Gestión de Riesgos han hecho posible el tener este acercamiento a lo sucedido ese trágico 16 de abril de 2016"
texto1='La noche del sábado 16 de abril de 2016 todo el territorio ecuatoriano fue sorprendido por un sismo muy fuerte, que alarmó a toda la población. La primera información emitida por el Instituto Geofísico mencionaba que fue un terremoto de magnitud 7.8 con epicentro en la costa ecuatoriana entre las provincias de Esmeraldas y Manabí.'
response = service.analyze(
    text=texto,
    features=Features(entities=EntitiesOptions(sentiment=True,limit=50),
                      #keywords=KeywordsOptions(),
#                        metadata=MetadataOptions(), #solo para urls y htmls
#                        relations=Reltexto='La noche del sábado 16 de abril de 2016 todo el territorio ecuatoriano fue sorprendido por un sismo muy fuerte, que alarmó a toda la población. La primera información emitida por el Instituto Geofísico mencionaba que fue un terremoto de magnitud 7.8 con epicentro en la costa ecuatoriana entre las provincias de Esmeraldas y Manabí.',ationsOptions(),
                      categories=CategoriesOptions(limit=5))).get_result()

resp_json=json.dumps(response, indent=2)
for entidad in response["entities"]:
    print (entidad["type"])
    print(entidad["text"])
    #print(entidad["disambiguation"]["subtype"])
    print("")

for categorias in response["categories"]:
    categoria=categorias["label"]
    categoria=categoria.split('/')
    #print (categoria)
    for item in categoria:
        print (item)
    print ("")