Beispiel #1
0
 def calculate_extensions(self):
     return # need to pass because this cost money that we don't have
     tweet_text = self.tweet.text
     response = "Languange not supported"
     try:
         if self.tweet.lang == 'ar':
             response = NLU.analyze(text=tweet_text, features=Features(categories=CategoriesOptions(limit=1)),
                                    language='ar').get_result()
             if len(response['categories']) > 0:
                 self.category = response['categories'][0]['label']
             translated = NLT.translate(text=tweet_text, model_id='ar-en', source='ar', target='en').get_result()
             translated = translated['translations'][0]['translation']
             response = NLU.analyze(text=translated, features=Features(concepts=ConceptsOptions(limit=1),
                                                                       entities=EntitiesOptions(limit=1, sentiment=True),
                                                                       keywords=KeywordsOptions(limit=1, sentiment=True),
                                                                       ), language='en').get_result()
             self.extract_englishonly_catagories(response)
         elif self.tweet.lang == 'en':
             response = NLU.analyze(text=tweet_text, features=Features(concepts=ConceptsOptions(limit=1),
                                                                       entities=EntitiesOptions(limit=1, sentiment=True),
                                                                       keywords=KeywordsOptions(limit=1, sentiment=True),
                                                                       categories=CategoriesOptions(limit=1),
                                                                       ), language='en').get_result()
             if len(response['categories']) > 0:
                 self.category = response['categories'][0]['label']
             self.extract_englishonly_catagories(response)
     except ApiException as ex:
         print("error in calculate_AI_things")
         print(exc)
         return
 def get_api_response(self):
     if self.API_options == "aspects":
         self.current_api_response = self.nlu.analyze(
             text=self.current_review_text,
             features=Features(entities=EntitiesOptions(emotion=True,
                                                        sentiment=True,
                                                        limit=10000),
                               keywords=KeywordsOptions(
                                   emotion=True,
                                   sentiment=True,
                                   limit=10000))).get_result()
     elif self.API_options == "overall":
         self.current_api_response = self.nlu.analyze(
             text=self.current_review_text,
             features=Features(sentiment=SentimentOptions(
                 document=True))).get_result()
     else:
         self.current_api_response = self.nlu.analyze(
             text=self.current_review_text,
             features=Features(
                 entities=EntitiesOptions(emotion=True,
                                          sentiment=True,
                                          limit=10000),
                 keywords=KeywordsOptions(emotion=True,
                                          sentiment=True,
                                          limit=10000),
                 sentiment=SentimentOptions(document=True))).get_result()
Beispiel #3
0
def twitter_crwaler():
    pre_time = datetime.datetime.now() + datetime.timedelta(days=-7)
    pre_time = str(pre_time)
    send_data = {}
    end_data2 = []
    cursor = tweepy.Cursor(
        api.search,
        q='코로나',
        since=pre_time[:11],  # 2020-09-08 이후에 작성된 트윗들로 가져옴
        count=2  # 페이지당 반환할 트위터 수 1
    ).items(100)  #최대 100개 까지만
    for i, tweet in enumerate(cursor):
        #print("{}: {}".format(i, tweet.text))
        send_data['location'] = "Unknown"
        send_data['categorized'] = ""
        send_data['score'] = 0

        response = natural_language_understanding.analyze(
            text=tweet.text,
            features=Features(entities=EntitiesOptions(emotion=False),
                              categories=EntitiesOptions(emotion=False, ),
                              semantic_roles=EntitiesOptions(
                                  emotion=False,
                                  sentiment=False,
                              ),
                              keywords=KeywordsOptions(
                                  emotion=False,
                                  sentiment=False,
                              ))).get_result()

        for re in response['entities']:
            if re['type'] == "Location":
                send_data['location'] = re['text']
            else:
                send_data['location'] = "Unknown"

        for re1 in response['categories']:
            send_data['categorized'] = re1['label']
            send_data['score'] = re1['score']

        send_data['author'] = tweet.author.name
        send_data['title'] = tweet.author.id
        send_data['contents'] = tweet.text
        send_data['created'] = tweet.created_at
        send_data['published'] = datetime.datetime.now()
        send_data['imageurl'] = ""
        try:
            send_data['imageurl'] = tweet.entities['media'][0]['media_url']
        except (NameError, KeyError):
            #we dont want to have any entries without the media_url so lets do nothing
            pass
        dictionary_copy = send_data.copy()
        end_data2.append(dictionary_copy)

    return end_data2
Beispiel #4
0
def callback():
    global call_results
    add_ons = json.loads(request.values['AddOns'])

    if 'ibm_watson_speechtotext' not in add_ons['results']:
        return 'Add Watson Speech to Text add-on in your Twilio console'

    payload_url = add_ons["results"]["ibm_watson_speechtotext"]["payload"][0]["url"]

    account_sid = twilio_account_sid
    auth_token = twilio_auth_token

    resp = requests.get(payload_url, auth=(account_sid, auth_token)).json()
    results = resp['results'][0]['results']

    transcripts = map(lambda res: res['alternatives'][0]['transcript'], results)

    call_results = ''.join(transcripts)
    response = natural_language_understanding.analyze(
                text=call_results,
                features=Features(
                    entities=EntitiesOptions(emotion=True, sentiment=True, limit=2),
                    keywords=KeywordsOptions(emotion=True, sentiment=True,
                                             limit=2))).get_result()
    print(json.dumps(response, indent=2))

    return ''.join(transcripts)
Beispiel #5
0
def demo():

    global call_results
    data = natural_language_understanding.analyze(text=call_results,features=Features(
                                                                            entities=EntitiesOptions(emotion=True, sentiment=True, limit=2),
                                                                            keywords=KeywordsOptions(emotion=True, sentiment=True, limit=2))).get_result()
    callData = {'incident_no': 201907061, 'name': [], 'location': [], 'disaster_type': [], 'sentiment': None, 'remarks': []}

    for entity in data['entities']:
        if entity['type'] == "Person":
            callData["name"].append(entity['text'])
        elif entity['type'] == "Quantity":
            callData["remarks"].append(entity['text'])
        else:
            callData["location"].append(entity['text'])

    sentiment = -1

    if sentiment < 0:
        callData["sentiment"] = "horrified"
    else:
        callData["sentiment"] = "scared"

    keywordList = []
    for keyword in data['keywords']:
        keywordList.append(keyword['text'])

    keywordList = ''.join(keywordList)
    keywordList = keywordList.replace('HESITATION ', '')
    callData['remarks'] = keywordList

    return callData
def analyze(text):
    response = natural_language_understanding.analyze(
        text=text,

        features = Features(
            entities=EntitiesOptions(emotion=True, sentiment=True, limit=2),
        # keywords=KeywordsOptions(emotion=True, sentiment=True,
            #                         limit=2))).get_result()
            emotion = EmotionOptions())).get_result()

    print(json.dumps(response, indent=2))
    emotion = json.dumps(response)
    loaded_json = json.loads(emotion)
    #for x in loaded_json:
        #print("%s: %s" % (x, loaded_json[x]))
    emotions = loaded_json['emotion']['document']['emotion']

    max = 0
    emo = ""
    for key , value in emotions.items():

        if (value > max):
            max = value
            emo = key
            #print (key, value)
        #print(key, value)
    #print ("The max value is: " + str(max) + " and the emotion associated is: " + str(emo))

    return emo

#mystring = "I feel hungry and cold"
#analyze(mystring)
Beispiel #7
0
 def set_response(self, tweet_text):
     response = self.natural_language_understanding.analyze(
         text=tweet_text,
         features=Features(sentiment=SentimentOptions(document=True),
                           entities=EntitiesOptions(sentiment=True)),
         language='en').get_result()
     return response
def main():

    #Example image
    server = 'https://api.eu-gb.natural-language-understanding.watson.cloud.ibm.com'
    IMAGE_API_KEY = os.getenv('NARURALANALYSERAPI')
    naturalLanguageAnalyser = NaturalLanguageUnderstandingV1(
        version='2018-03-19', authenticator=IAMAuthenticator(IMAGE_API_KEY))
    naturalLanguageAnalyser.set_service_url(server)

    #Example text
    text = 'Team, I know that times are tough! Product'\
           'sales have been disappointing for the past three '\
           'quarters. We have a competitive product, but we '\
           'need to do a better job of selling it!'

    response = naturalLanguageAnalyser.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(mentions=True,
                                                   emotion=True,
                                                   sentiment=True,
                                                   limit=10),
                          emotion=EmotionOptions(),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=10),
                          relations=RelationsOptions(),
                          syntax=SyntaxOptions(sentences=True))).get_result()
    print("Start")
    print(json.dumps(response, indent=2))
def nlp_watson(url):
    print(url)
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey='',
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )
    try:
        response = natural_language_understanding.analyze(
            url=url,
            features=Features(categories=CategoriesOptions(limit=15),
                              concepts=ConceptsOptions(limit=10),
                              entities=EntitiesOptions(sentiment=True,
                                                       limit=20),
                              keywords=KeywordsOptions(sentiment=True,
                                                       emotion=True,
                                                       limit=5),
                              metadata=MetadataOptions())
            #relations=RelationsOptions()),
        ).get_result()

        data = json.dumps(response, indent=2)
        # new = json.loads(response)
        # print(data)
        db = client.asalvo
        news = db.news
        new_id = news.insert_one(response).inserted_id
        # print(new_id)
    except:
        print('Error ocurred')

    return 0
Beispiel #10
0
def extractFeatures(text, followers, friends, verified):
    text = text.replace("-", " ")
    sentiment = 0.0
    entity_num = 0.0
    word_count = len(text.split())
    char_count = len(text)
    avg_word_len = char_count / word_count
    follower_count = float(followers)
    anger = 0.0
    disgust = 0.0
    fear = 0.0
    joy = 0.0
    sadness = 0.0
    is_quote = 0.0
    friends = float(friends)
    verified = 1.0 if verified == "true" else 0.0

    try:
        # sentiment analysis
        sentiment_response = natural_language_understanding.analyze(
            text=text,
            features=Features(sentiment=SentimentOptions())).get_result()
        sentiment = sentiment_response['sentiment']['document']['score']

        # entity analysis
        entities_response = natural_language_understanding.analyze(
            text=text,
            features=Features(entities=EntitiesOptions(
                sentiment=True, emotion=True))).get_result()
        sentiment_sum = 0
        for entity in entities_response['entities']:
            sentiment_sum += entity['sentiment']['score'] * entity['relevance']
            entity_num += 1
            anger += entity['emotion']['anger'] * entity['relevance']
            disgust += entity['emotion']['disgust'] * entity['relevance']
            fear += entity['emotion']['fear'] * entity['relevance']
            joy += entity['emotion']['joy'] * entity['relevance']
            sadness += entity['emotion']['sadness'] * entity['relevance']

        sentiment = sentiment + sentiment_sum / 2
    except:
        pass

    dict = {
        "sentiment": sentiment,
        "entity_num": entity_num,
        "word_count": word_count,
        "char_count": char_count,
        "avg_word_len": avg_word_len,
        "follower_count": follower_count,
        "anger": anger,
        "disgust": disgust,
        "fear": fear,
        "joy": joy,
        "sadness": sadness,
        "is_quote": is_quote,
        "friends": friends,
        "verified": verified
    }
    return (jsonify(dict))
Beispiel #11
0
 def test_analyze(self):
     response = self.natural_language_understanding.analyze(
         text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! '
         'Superman fears not Banner, but Wayne.',
         features=Features(entities=EntitiesOptions(),
                           keywords=KeywordsOptions())).get_result()
     assert response is not None
def processML(input_text):
    authenticator = IAMAuthenticator('kMIg5T92Ts2PAsmwEQCt7fE8WyBFiUUt-QCyAuD1Ng60')
    natural_language_understanding = NaturalLanguageUnderstandingV1(version='2019-07-12',authenticator=authenticator)
    natural_language_understanding.set_service_url('https://api.eu-de.natural-language-understanding.watson.cloud.ibm.com/instances/19f9b49e-7732-45c5-8b30-0a9be587dcb0')
    response = natural_language_understanding.analyze(text=str(input_text),
    features=Features(entities=EntitiesOptions(sentiment=True,limit=10,model='57cbfcd3-ed32-4cc1-b5f9-72ce8eff4e15'))).get_result()
    #print(json.dumps(response, indent=2))
    max_confidence=0
    entity_type=''
    for entity in response['entities']:
        if entity['type'] in ('EMERGENCY','REQUEST','VOLUNTEER'):
            if entity['confidence'] > max_confidence:
                max_confidence = float(entity['confidence'])
                entity_type = entity['type']
    
    if max_confidence == 0:
        pass
    
    if entity_type == 'EMERGENCY':
        return 'P1'
    elif entity_type == 'REQUEST':
        return 'P2'
    elif entity_type == 'VOLUNTEER':
        return 'P3'
    else:
        return ''
Beispiel #13
0
def analyzeSampleMessages_Custom(questions_problems_text, nlu,
                                 custom_model_id):
    results_list = []
    for message in questions_problems_text:
        result = nlu.analyze(text=message,
                             features=Features(entities=EntitiesOptions(
                                 model=custom_model_id))).get_result()
        result_entities = {
            "action": [],
            "docs": [],
            "obj": [],
            "persona": [],
            "tech": []
        }
        if ("entities" in result):
            for entity in result["entities"]:
                entity_type = entity["type"]
                result_entities[entity_type].append(entity["text"])
        results_list.append({
            "header":
            "-------------------------------------------------------------",
            "message": message,
            "actions": result_entities["action"],
            "objects": result_entities["obj"],
            "tech": result_entities["tech"],
            "docs": result_entities["docs"],
            "persona": result_entities["persona"],
            "spacer": ""
        })
    return results_list
def main(request):
    query = request.GET.get("q")
    args = {}
    if query:
        content = get_content(query)
        input = f'{content}'
        algo = client.algo('SummarAI/Summarizer/0.1.3')
        algo.set_options(timeout=300)  # optional
        summary = algo.pipe(input).result
        args['summary'] = summary['summarized_data']
        authenticator = IAMAuthenticator(
            'o48P8tGlhPPecmxPmu_autXoYp4U13mnb7dggkkiyk22')
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2019-07-12', authenticator=authenticator)
        natural_language_understanding.set_service_url(
            "https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/972bec20-f75a-46fd-bdbc-9840fb7f7b16"
        )

        response = natural_language_understanding.analyze(
            url=f'{query}',
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=5),
                              categories=CategoriesOptions(limit=1),
                              sentiment=SentimentOptions(
                                  targets=summary['auto_gen_ranked_keywords'])
                              )).get_result()
        args['category'] = response['categories'][0]['label']
        args['category'] = args['category'].replace("/", ", ")
        args['category_score'] = response['categories'][0]['score']
        args['category_score'] = f"{(args['category_score']*100)}%"
        args['targets'] = response['sentiment']['targets']
        args['content_sentiment'] = response['sentiment']['document']['label']

    return render(request, 'index.html', args)
def main():
    #import packages
    from datetime import date
    from ibm_watson.natural_language_understanding_v1 import Features, EntitiesOptions

    #Example URL
    URL = [
        'https://www.cnbc.com/2019/03/22/reuters-america-update-4-german-bund-yield-crashes-below-zero-for-first-time-since-2016-as-bleak-data-rattles-markets.html'
    ]

    #NLP Processing begin
    #Get NPL authentication key object
    natural_language_understanding = NLP_Authentication()

    url = []
    company = []
    relevance = []
    current_date = []
    count = 0

    for u in URL:
        count += 1

        print("Processing " + str(count))
        try:
            response = natural_language_understanding.analyze(
                url=u,
                features=Features(entities=EntitiesOptions(
                    sentiment=True, limit=50))).get_result()
            entities = response['entities']
            company_list = []

            for i in entities:
                if i['type'] == 'Company':
                    company_list.append(i)

            if len(entities) > 0:
                relevance_score = max([i['relevance'] for i in company_list])
                for i in company_list:
                    if i['relevance'] == relevance_score:
                        company_name = i['text']
            else:
                relevance_score = 'NA'
                company_name = 'NA'

            url.append(u)
            relevance.append(relevance_score)
            company.append(company_name)
            current_date.append(date.today().strftime('%m/%d/%Y'))

        except:
            continue

    Processed_USA_Entity_DF = EntityDataframe(url, company, relevance,
                                              current_date)

    if len(Processed_USA_Entity_DF) > 0:
        Processed_USA_Entity_DF.to_csv('<Enter CSV file name>')
    else:
        return
Beispiel #16
0
def watsonNlpApiCall(txtOrUrl: str, isText: bool):
    if (isText):
        return getWatsonServiceInstance(0).analyze(
            text=txtOrUrl,
            language='en',
            features=Features(entities=EntitiesOptions(),
                              categories=CategoriesOptions(),
                              keywords=KeywordsOptions(
                                  sentiment=True, emotion=True))).get_result()
    else:
        return getWatsonServiceInstance(0).analyze(
            url=txtOrUrl,
            language='en',
            features=Features(entities=EntitiesOptions(),
                              categories=CategoriesOptions(),
                              keywords=KeywordsOptions(
                                  sentiment=True, emotion=True))).get_result()
Beispiel #17
0
def get_entities(input_text):
    response = natural_language_understanding.analyze(
        text=input_text,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2), )).get_result()
    #print(json.dumps(response, indent=2))
    return response["entities"]
Beispiel #18
0
def fetch_meta_create_df(titles, dates, contents, url, current_date, params):

    ibmapikey = params.get('IBM_API_KEY')
    authenticator = IAMAuthenticator(
        'NQntjlNUfMEKASwXHQY32rB9BPjEE7-gbBAEE0mWGPcv')
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12', authenticator=authenticator)

    natural_language_understanding.set_service_url(
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )

    organizations = []
    locations = []
    persons = []
    extraction_date = []
    news_url = []
    for i in range(len(contents)):
        response = natural_language_understanding.analyze(
            text=contents[i],
            language="de",
            features=Features(entities=EntitiesOptions(
                sentiment=False, limit=15))).get_result()
        organization = ""
        location = ""
        person = ""
        for item in response["entities"]:
            if item["type"] == "Organization":
                organization += item["text"] + " "
            if item["type"] == "Location":
                location += item["text"] + " "
            if item["type"] == "Person":
                person += item["text"] + " "
        organizations.append(organization)
        locations.append(location)
        persons.append(person)
        extraction_date.append(datetime.now().strftime('%Y-%m-%d'))
        news_url.append(url)

    datetimes = []
    for date in dates:
        try:
            dt_temp = datetime.strptime(date[:-4], '%d.%m.%Y %H:%M')
        except:
            dt_temp = datetime.strptime(current_date, '%d.%m.%Y')
        datetimes.append(dt_temp)

    #list(map(list, zip(*l)))

    table_columns = [
        datetimes, titles, contents, locations, organizations, persons,
        extraction_date, news_url
    ]

    #df = pd.DataFrame(list(zip(datetimes, titles, contents, locations, organizations, persons, extraction_date, news_url)),
    #           columns =['date','title', 'content', 'locations', 'organizations', 'persons', 'extraction_date', 'news_url']).iloc[1:,:]
    print("--- constructed the columns ---")
    return table_columns
Beispiel #19
0
def getWatsonNLP(text):

    response = watsonService.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(),
                          keywords=KeywordsOptions(),
                          sentiment=SentimentOptions())).get_result()

    return response
Beispiel #20
0
def nlpResponse(sitename):
    try:
        response = natural_language_understanding.analyze(
            url=sitename,
            features=Features(entities=EntitiesOptions(sentiment=True,
                                                       limit=1))).get_result()
        return response['entities']
    except:
        return f"Something went wrong. Please try with https:// or http://"
Beispiel #21
0
def analyze_text(text):
    return natural_language_understanding.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2))).get_result()
Beispiel #22
0
def NLU(df1):
    # Entity values
    df1['INTENTS_NLU'] = ''
    df1['ENTITIES_NLU'] = ''
    df1['ENTITIES_SPACY'] = ''
    # Fetching important keywords for Intent using NLU using 3 keywords
    i = 0
    authenticator = IAMAuthenticator(
        'FMUFQJHtKvAqukIATHrBQqnVy8GP_5lvN5Iq0JzokZgn')
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12', authenticator=authenticator)

    natural_language_understanding.set_service_url(
        'https://api.us-south.natural-language-understanding.watson.cloud.ibm.com/instances/f9f3fbd1-3de7-44e6-a53a-dc93992d6627'
    )
    for i in range(0, len(df1)):
        try:
            response = natural_language_understanding.analyze(
                text=df1['INTENTEXAMPLES'][i],
                features=Features(keywords=KeywordsOptions(
                    sentiment=False, emotion=False, limit=3))).get_result()
            if response['keywords'] != []:
                if len(response['keywords']) == 1:
                    df1['INTENTS_NLU'][i] = response['keywords'][0]['text']
                elif len(response['keywords']) == 2:
                    a = response['keywords'][0]['text']
                    b = response['keywords'][1]['text']
                    df1['INTENTS_NLU'][i] = (a + '_' + b)

                else:
                    a = response['keywords'][0]['text']
                    b = response['keywords'][1]['text']
                    c = response['keywords'][2]['text']
                    df1['INTENTS_NLU'][i] = (a + '_' + b + '_' + c)
            else:
                df1['INTENTS_NLU'][i] = ''
        except:
            df1['INTENTS_NLU'][i] = ''
        i = i + 1
    #fetching entities from NLU
    i = 0
    for i in range(0, len(df1)):
        try:
            response = natural_language_understanding.analyze(
                text=df1['INTENTEXAMPLES'][i],
                features=Features(entities=EntitiesOptions(
                    sentiment=False, limit=2))).get_result()
            #    features=Features(concepts=ConceptsOptions(limit=3))).get_result()
            if response['entities'] == []:
                df1['ENTITIES_NLU'][i] = ''
            else:
                df1['ENTITIES_NLU'][i] = response['entities'][0]['text']
        except:
            df1['ENTITIES_NLU'][i] = ''
        i = i + 1
    return df1
def textanalysis(tweet):
	response = natural_language_understanding.analyze(
		text=tweet,
		features=Features(
			entities=EntitiesOptions(emotion=True, sentiment=True, limit=4),
			keywords=KeywordsOptions(emotion=True, sentiment=True,
									 limit=4))).get_result()

	jsonformat(json.dumps(response, indent=2))
	print(json.dumps(response, indent=2))
Beispiel #24
0
    def ask_watson(self):
        input_text = self.cleaned_data['comment']

        response = service.analyze(
            text=input_text,
            features=Features(entities=EntitiesOptions(),
                              keywords=KeywordsOptions())).get_result()

        #response = service.analyze(text="isso eh um teste", features=Features(entities=EntitiesOptions(), keywords=KeywordsOptions())).get_result()
        return json.dumps(response, indent=2)
Beispiel #25
0
def extractEntities(transcriptText):

    authenticator = IAMAuthenticator(apikey)
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2019-07-12', authenticator=authenticator)

    natural_language_understanding.set_service_url(nlu_url)

    response = natural_language_understanding.analyze(
        text=transcriptText.get("transcript"),
        features=Features(entities=EntitiesOptions(
            model=wks_model_id))).get_result()

    name = ''
    address = ''
    phone = ''
    orders = ''

    for entity in response['entities']:
        if entity['type'] == "ADDRESS":
            address = entity['text']

        if entity['type'] == "CUSTOMER_NAME":
            name = entity['text']

        if entity['type'] == "ORDER_ITEMS":
            orders = entity['text']

        if entity['type'] == "CUSTOMER_PHONE":
            phone = entity['text']

    response.update({'filepath': transcriptText.get('filepath')})
    print('WKS ENTITIES DETECTED: ')
    print('(NAME): ' + name, ('(PHONE): ') + phone, ('(ORDERS): ') + orders,
          ('(ADDRESS): ') + address,
          sep="\n")

    try:
        ids = getIDs() + 1
    except:
        pass

    a = "\'"
    n = a + name + a
    o = a + orders + a
    p = a + phone + a
    add = a + address + a
    insert = 'INSERT INTO RVB49192.ORDERS VALUES(%d, %s, %s, %s, %s)' % (
        ids, n, p, o, add)
    try:
        ibm_db.exec_immediate(conn, insert)
    except:
        pass

    return jsonify(response)
    def nlp_understanding(self, text=None, url=None):
        service = NaturalLanguageUnderstandingV1(version='2018-03-16',
                                                 authenticator=self.NLP_AUTH)

        service.set_service_url(
            'https://gateway.watsonplatform.net/natural-language-understanding/api'
        )

        if url:
            response = service.analyze(
                url=url,
                features=Features(entities=EntitiesOptions(),
                                  keywords=KeywordsOptions())).get_result()
        elif text:
            response = service.analyze(
                text=text,
                features=Features(entities=EntitiesOptions(),
                                  keywords=KeywordsOptions())).get_result()

        return response
 def analyze_url(self, url_p: str) -> dict:
     """
     É passado um texto para o Watson analizar e ele devolve um dicionário,
     contendo as entidades e palavras chaves do texto.
     """
     response = self.service.analyze(
         url=url_p,
         features=Features(entities=EntitiesOptions(),
                           keywords=KeywordsOptions())
     ).get_result()
     return response
Beispiel #28
0
def analyzeFrame(text):
    try:
        return nlu.analyze(text=text,
                           features=Features(
                               categories=CategoriesOptions(limit=3),
                               concepts=ConceptsOptions(limit=3),
                               entities=EntitiesOptions(limit=5),
                               keywords=KeywordsOptions(limit=10),
                               relations=RelationsOptions())).get_result()
    except (Exception, ApiException) as err:
        return {'err': True, 'errMsg': err.__str__()}
def recommendation():
    json_final = {"recommendation": "", "entities": []}
    carro = None
    text = None
    audio_file = None
    restructured_json_entities_list = None
    worst_entity_type = None
    worst_entity_text = None
    worst_score = None

    if "car" in request.form.keys():
        carro = request.form.get("car").upper()
        if not isinstance(carro, str):
            return jsonify(json_final), 200

    if "text" in request.form.keys():
        text = request.form.get("text")

    elif "audio" in request.files.keys():
        audio_file = request.files.get("audio").read()  # Bytes
        stt_response = stt_service.recognize(audio=audio_file,
                                             content_type='audio/flac',
                                             model=stt_entity_model,
                                             timestamps=False,
                                             word_confidence=False)
        stt_response = stt_response.get_result()
        text = stt_response['results'][0]['alternatives'][0]['transcript']

    if text:
        nlu_response = nlu_service.analyze(
            text=text,
            features=Features(entities=EntitiesOptions(model=nlu_entity_model,
                                                       sentiment=True)),
            language='pt')
        nlu_response = nlu_response.get_result()

        restructured_json_entities_list = restructure_nlu_json_result(
            nlu_response)
        worst_entity_type, worst_entity_text, worst_score = get_worst_entity_data(
            nlu_response)

    if restructured_json_entities_list:
        recommendation = get_recommendation(carro, worst_entity_type,
                                            restructured_json_entities_list)
        json_final['recommendation'] = recommendation
        json_final['entities'] = restructured_json_entities_list

    #if worst_entity_type:
    #json_final['piorEntity'] = worst_entity_text
    #json_final['piorScore'] = worst_score
    #json_final['prioridadeDeMelhora'] = worst_entity_type

    return jsonify(json_final), 200
Beispiel #30
0
def relevancy_dict(chunk):
    service = NLU(
        version='2018-03-16',
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api',
        iam_apikey='#####')
    response = service.analyze(text=chunk,
                               features=Features(
                                   entities=EntitiesOptions(),
                                   keywords=KeywordsOptions())).get_result()
    analysis = json.dumps(response, indent=2)
    return json.loads(analysis)