def understanding(self):
        if not self.transcription:
            self.transcript()

        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2017-02-27',
            username=os.environ['UNDERSTANDING_USERNAME'],
            password=os.environ['UNDERSTANDING_PASSWORD'])

        self.analysis = natural_language_understanding.analyze(
            text=self.transcription['results'][0]['alternatives'][0]
            ['transcript'],
            features=Features(categories=CategoriesOptions(),
                              concepts=ConceptsOptions(),
                              emotion=EmotionOptions(),
                              entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       mentions=True),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True),
                              relations=RelationsOptions(),
                              sentiment=SentimentOptions()))

        logger.info('Completed analysis of recorded file')
        return self.analysis
def topic_ibm(content):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey="MpIV_8-gUahq8R3WzNh7C0g1JgDZGueVrUzq8441rQVb",
        url=
        "https://gateway-lon.watsonplatform.net/natural-language-understanding/api"
    )
    response = natural_language_understanding.analyze(
        text=content,
        features=Features(categories=CategoriesOptions(limit=3))).get_result()

    fichier = json.dumps(response, indent=2)
    print(fichier)
    data_dict_02 = json.loads(fichier)

    if len(data_dict_02["categories"]) > 2:
        category1 = data_dict_02["categories"][0]["label"]
        category2 = data_dict_02["categories"][1]["label"]
        resultat1 = category1.split("/")
        resultat2 = category2.split("/")
        print('resultat1 ', resultat1)
        print('resultat2 ', resultat2)
        resultat = []
        resultat.append(resultat1[1])
        resultat.append(resultat2[1])
        print(resultat)
    return resultat
Example #3
0
def ibmContent(text):

    response1 = natural_language_understanding.analyze(
        text=text,
        features=Features(keywords=KeywordsOptions(limit=10))).get_result()

    jData = json.loads(json.dumps(response1, indent=2,
                                  ensure_ascii=False))  #keyword 추출

    response2 = natural_language_understanding.analyze(
        text=text,
        features=Features(categories=CategoriesOptions(limit=3))).get_result()

    jData2 = json.loads(json.dumps(response2, indent=2,
                                   ensure_ascii=False))  #카테고리 추출

    keywords = ''
    relevance = ''
    categories = ''

    for i in jData['keywords']:
        keywords += i['text']
        keywords += '#'

    for i in jData['keywords']:
        relevance += str(i['relevance'])
        relevance += '#'

    for i in jData2['categories']:
        categories += i['label']
        categories += '#'

    return keywords, relevance, categories
Example #4
0
def watson_nlp_analysis(text):

    if text == '': return text

    max_limit_one = 10
    max_limit_two = 30

    naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey=os.environ['WATSON'],
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )

    response = naturalLanguageUnderstanding.analyze(
        text=text,
        features=Features(concepts=ConceptsOptions(limit=max_limit_one),
                          categories=CategoriesOptions(limit=max_limit_one),
                          sentiment=SentimentOptions(document=True),
                          emotion=EmotionOptions(document=True),
                          entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=max_limit_two),
                          keywords=KeywordsOptions(
                              emotion=True,
                              sentiment=True,
                              limit=max_limit_two))).get_result()
    return response
    def process_text(self, conv):
        unicodedata.normalize('NFKD', conv).encode('ascii', 'ignore')
        nlp = self.nlp_api.analyze(
            text=conv,
            language='es',
            features=Features(keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True),
                              categories=CategoriesOptions(),
                              sentiment=SentimentOptions(targets=self.flags)))

        if self.log_file != None:
            print('INFO: logging NLP to %s\n' % self.log_file)
            with open(self.log_file, 'w') as outfile:
                json.dump(nlp, outfile)

        doc_score = 0
        target_score = []
        try:
            doc_score = nlp['sentiment']['document']['score']
            for target in nlp['sentiment']['targets']:
                target_score.append({
                    'text': target['text'],
                    'score': target['score']
                })
        except KeyError:
            print('INFO: no target found')

        return doc_score, target_score
Example #6
0
def nlu():
    f = open(november_tweets, 'r', encoding='UTF8')
    g = open(november_gt, 'a')
    e = open(november_opinion, 'a')
    num_lines = sum(1 for line in open(november_tweets, encoding='UTF8'))
    while num_lines != 0:
        nline = f.readline()
        newline = str(nline)
        if len(newline) > 15:
            response2 = natural_language_understanding.analyze(
                text=newline,
                language='en',
                features=Features(keywords=KeywordsOptions(emotion=True,
                                                           sentiment=True,
                                                           limit=2),
                                  categories=CategoriesOptions()))
            aaa = (json.dumps(response2, indent=2))
            print(aaa)
            bbb = json.loads(aaa)
            single_thing = (str(bbb['categories']))
            single_sentiment = (str(bbb['keywords']))
            g.write(single_thing + '\n')
            e.write(single_sentiment + '\n')
        else:
            g.write("Not enough data" + '\n')
            e.write("Not enough data" + '\n')
        num_lines -= 1
    return num_lines
Example #7
0
    def concepts(self):
        IBM_dict = {}
        IBM_response = self.naturalLanguageUnderstanding.analyze(
            text=self.text,
            features=Features(
                entities=EntitiesOptions(emotion=True, sentiment=True, limit=10),
                keywords=KeywordsOptions(emotion=True, sentiment=True,limit=10),
                sentiment=SentimentOptions(),
                categories=CategoriesOptions()
                )).get_result()

        sent_dict = {'sentiment': IBM_response['sentiment']['document']['score']}
        IBM_dict['sentiment'] = sent_dict
        
        ent_result = []
        ents = IBM_response['entities']
        for e in ents:
            ent_result.append(e['text'].lower())
        ent_result.sort()
        IBM_dict['entities'] = ent_result
        
        kws = []
        for keyword in IBM_response['keywords']:
            kws.append(keyword['text'].lower())
        kws.sort()
        IBM_dict['keywords'] = kws
        
        cats = []
        for category in IBM_response['categories']:
            cats.append(category['label'])
        IBM_dict['categories'] = cats
        
        return IBM_dict
Example #8
0
def get_keywords_from_url(url):
    text = None
    if 'reddit' in url:
        submission = _reddit.submission(url=url)
        if submission.selftext is not None:
            text = submission.selftext
            url = None
        else:
            return []
    res = _watson_nlu.analyze(
        url=url,
        text=text,
        features=Features(
            categories=CategoriesOptions(),
            concepts=ConceptsOptions(limit=5)
        )).get_result()
    kwds = set()
    if 'categories' in res:
        categories = res['categories']
        categories = sorted(categories, key=extract_relevancy, reverse=True)[:10]
        for category in categories:
            labels = re.split(',| |/', category['label'])
            for label in labels:
                kwds.add(label)
    if 'concepts' in res:
        for concept in res['concepts']:
            kwds.add(concept['text'])
    for stopword in _stopwords:
        if stopword in kwds:
            kwds.remove(stopword)
    return list(kwds)
Example #9
0
def getres(file_loc):
    with open(file_loc, 'r+') as f:
        head = f.readline()
        content = f.read()
        req = '<html><body><h2>{0}</h2>{1}</body></html>'.format(head, content)
        text = head + content
    tone_res = tone_analyzer.tone(req, content_type='text/html').get_result()

    res = natural_language_understanding.analyze(
        html=req,
        features=Features(
            categories=CategoriesOptions(limit=1),
            concepts=ConceptsOptions(limit=5),
            keywords=KeywordsOptions(limit=5, sentiment=True, emotion=True),
            sentiment=SentimentOptions(),
            # entities=EntitiesOptions(limit=5, mentions=True, sentiment=True, emotion=True),
        ),
    ).get_result()
    sentiment = res["sentiment"]["document"]["score"]
    concepts = [(concepts["text"], concepts["relevance"])
                for concepts in res["concepts"]]
    categories = (res["categories"][0]["label"].split("/"),
                  res["categories"][0]["score"])
    keywords = [(keywords["text"], keywords["relevance"])
                for keywords in res["keywords"]]
    tones = [(tone["tone_id"], tone["score"])
             for tone in tone_res["document_tone"]["tones"]]
    return (sentiment, concepts, keywords, tones, text)
Example #10
0
def getAPIResponse(textToConvert):	
	response = understandingObj.analyze(
		text = str(textToConvert),
		features=Features(
			categories=CategoriesOptions()
		)
	)
	return response
Example #11
0
    def __init__(self, nlu_details: dict) -> None:
        self.version = nlu_details["version"]
        self.url = nlu_details["url"]
        self.apikey = nlu_details["apikey"]

        self.nlu = NaturalLanguageUnderstandingV1(
            version=self.version, url=self.url, iam_apikey=self.apikey)

        self.features = Features(categories=CategoriesOptions(), entities=EntitiesOptions(
            emotion=True, sentiment=True), keywords=KeywordsOptions(emotion=True, sentiment=True))
Example #12
0
def get_news_sentiment(request):
    try:
        response = natural_language_understanding.analyze(
            url=request.GET.get('url'),
            features=Features(sentiment=SentimentOptions(),
                              emotion=EmotionOptions(),
                              concepts=ConceptsOptions(limit=5),
                              categories=CategoriesOptions()))

        return Response(response)
    except:
        return Response({"error": 'problem retrieving'})
Example #13
0
def get_categories(text):
    try:
        response = natural_language_understanding.analyze(
            text=text, features=Features(categories=CategoriesOptions()))

        categories = []
        for category in response["categories"]:
            categories.append(category["label"])

        return categories
    except:
        return []
Example #14
0
def queryWatson(headline, story):
    return nlu.analyze(text=headline + "; " + story,
                       language="en",
                       features=Features(
                           entities=EntitiesOptions(emotion=False,
                                                    sentiment=False,
                                                    limit=60),
                           keywords=KeywordsOptions(emotion=False,
                                                    sentiment=False,
                                                    limit=60),
                           categories=CategoriesOptions(limit=60),
                           concepts=ConceptsOptions(limit=50)))
Example #15
0
 def call_api(self, content):
     try:
         response = self.client.analyze(
             text=content,
             features=Features(entities=EntitiesOptions(),
                               keywords=KeywordsOptions(),
                               categories=CategoriesOptions(),
                               concepts=ConceptsOptions())).get_result()
     except WatsonException as exception:
         print(exception)
         response = {"ibm_exception": str(exception)}
     return json.dumps(response, sort_keys=True, indent=4)
def analyze_using_NLU(analysistext):
    res = dict()
    response = natural_language_understanding.analyze(
        text=analysistext,
        features=Features(sentiment=SentimentOptions(),
                          entities=EntitiesOptions(),
                          keywords=KeywordsOptions(),
                          emotion=EmotionOptions(),
                          concepts=ConceptsOptions(),
                          categories=CategoriesOptions(),
                          semantic_roles=SemanticRolesOptions()))
    res['results'] = response
    return json.dumps(res)
Example #17
0
    def get_analysis(self, text):
        """
        Returns a dictionary containing categories and entities
        """
        response = self.user.analyze(
            text=text,
            clean=True,
            language="pt",
            features=Features(categories=CategoriesOptions(),
                              entities=EntitiesOptions()),
        )

        return response
Example #18
0
 def get_keywords(self, sentence):
     response = self.natural_language_understanding.analyze(
         text=sentence,
         return_analyzed_text='True',
         features=Features(concepts=ConceptsOptions(),
                           categories=CategoriesOptions(),
                           relations=RelationsOptions(),
                           semantic_roles=SemanticRolesOptions(),
                           sentiment=SentimentOptions(),
                           entities=EntitiesOptions(),
                           keywords=KeywordsOptions())).get_result()
     keywords = map(lambda x: (x['text'], x['type']), response['entities'])
     return keywords
Example #19
0
def analyze(links):
    for link in links:
        response = natural_language_understanding.analyze(
            url=link,
            features=Features(
                entities=EntitiesOptions(emotion=True,
                                         sentiment=True,
                                         limit=15),
                emotion=EmotionOptions(targets=['keyword1', 'keyword2']),
                keywords=KeywordsOptions(emotion=True, sentiment=True,
                                         limit=2),
                concepts=ConceptsOptions(limit=5),
                sentiment=SentimentOptions(targets=['stocks']),
                categories=CategoriesOptions()))
 def analyze_using_NLU(analysistext):
     """ Extract results from Watson Natural Language Understanding for each news item
     """
     res = dict()
     response = natural_language_understanding.analyze(
         text=analysistext,
         features=Features(
             sentiment=SentimentOptions(),
             entities=EntitiesOptions(),
             keywords=KeywordsOptions(),
             emotion=EmotionOptions(),
             concepts=ConceptsOptions(),
             categories=CategoriesOptions(),
         ))
     res['results'] = response
     return res
Example #21
0
    def __init__(self, nlu_vcap):
        self.version = nlu_vcap["version"]
        self.url = nlu_vcap["url"]
        self.username = nlu_vcap["username"]
        self.password = nlu_vcap["password"]

        self.nlu = NaturalLanguageUnderstandingV1(version=self.version,
                                                  url=self.url,
                                                  username=self.username,
                                                  password=self.password)

        self.features = Features(categories=CategoriesOptions(),
                                 entities=EntitiesOptions(emotion=True,
                                                          sentiment=True),
                                 keywords=KeywordsOptions(emotion=True,
                                                          sentiment=True))
Example #22
0
def interface(Purl):
    response = naturalLanguageUnderstanding.analyze(
        url=Purl,
        features=Features(sentiment=SentimentOptions(),
                          categories=CategoriesOptions(limit=1),
                          concepts=ConceptsOptions(limit=1),
                          entities=EntitiesOptions(limit=1),
                          metadata=MetadataOptions())).get_result()
    result = json.dumps(response, indent=2)

    #Sentiment
    vLabel = response['sentiment']['document']['label']
    score = response['sentiment']['document']['score']

    mdTitle = response['metadata']['title']
    mdDate = response['metadata']['publication_date']
    vScore = math.ceil(float(score) * 100)

    #category
    try:
        for category in response['categories']:
            categoryLabel = category['label']

        for concept in response['concepts']:
            conceptText = concept['text']
            conceptRelevance = str(concept['relevance'])
            conceptSource = concept['dbpedia_resource']

        for entity in response['entities']:
            entityText = entity['text']
            entityType = entity['type']

    except:
        vScore = "ERROR"
        vLabel = "ERROR"
        categoryLabel = "ERROR"
        conceptText = "ERROR"
        conceptRelevance = "ERROR"
        conceptSource = "ERROR"
        entityText = "ERROR"
        entityType = "ERROR"
        mdTitle = "ERROR"
        mdDate = "ERROR"

    print(response)

    return vLabel, vScore, categoryLabel, conceptText, conceptRelevance, conceptSource, entityText, entityType, mdTitle, mdDate
def analyze_category_test(input_folder_path, user_id, output_folder_path):
    error_list = list()
    file_path = input_folder_path + user_id + ".txt"
    with open(file_path, 'r', encoding='utf-8') as myfile:
        data = myfile.read()
    try:
        response = natural_language_understanding.analyze(
            text=data, features=Features(categories=CategoriesOptions()))
    except:
        print(user_id)
        response = {'categories': 'unknown'}
    #output
    output_path = output_folder_path + user_id + ".json"
    if not os.path.exists(output_folder_path):
        os.makedirs(output_folder_path)
    with open(output_path, 'w') as outfile:
        json.dump(response, outfile)
    def process_batch(self, collection):
        target_score = []
        doc_score = 0
        found = 0
        for conv in collection:
            conv = conv['sentence']
            unicodedata.normalize('NFKD', conv).encode('ascii', 'ignore')
            nlp = self.nlp_api.analyze(
                text=conv,
                language='es',
                features=Features(
                    keywords=KeywordsOptions(emotion=True, sentiment=True),
                    categories=CategoriesOptions(),
                    sentiment=SentimentOptions(targets=self.flags)))

            try:
                doc_score += nlp['sentiment']['document']['score']
                for target in nlp['sentiment']['targets']:
                    target_score.append({
                        'text': target['text'],
                        'score': target['score']
                    })
                found += 1
            except KeyError:
                print('INFO: no target found in sentence')
        if found == 0:
            doc_score = 0
        else:
            doc_score /= found

        json_out = {
            'sentiment': {
                'document': {
                    'score': doc_score
                },
                'targets': target_score
            }
        }

        if self.log_file != None:
            print('INFO: logging NLP to %s\n' % self.log_file)
            with open(self.log_file, 'w') as outfile:
                json.dump(json_out, outfile)

        return doc_score, target_score
Example #25
0
def insights(text):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username='******',
        password='******',
        version='2018-03-16')

    response = natural_language_understanding.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2),
                          categories=CategoriesOptions(),
                          relations=RelationsOptions(),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2)))

    return response
Example #26
0
    def generate(self, duvida=8):
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            username='******', password='******', version='2018-03-16')

        vr_text = duvida

        response = natural_language_understanding.analyze(
            text=vr_text,
            language='pt',
            features=Features(categories=CategoriesOptions(),
                              entities=EntitiesOptions(model='xxxxxxxxxxxx')))

        resposta = ''

        for i in response['entities']:
            resposta.join(i['text'])

        return ''.join(resposta)
Example #27
0
def resultjson():

    if request.method == 'POST':
        #text1 = request.form
        text1 = request.get_json(force=True)
        user = natural_language_understanding.analyze(
            text=str(text1),
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2),
                              relations=RelationsOptions(),
                              categories=CategoriesOptions(),
                              semantic_roles=SemanticRolesOptions(),
                              concepts=ConceptsOptions(limit=3),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2)))
        return jsonify(user)
Example #28
0
def understand_transcript(transcription):
    nlp_client = NaturalLanguageUnderstandingV1(
        version="2017-02-27",
        username=os.environ["WATSON_UNDERSTANDING_USERNAME"],
        password=os.environ["WATSON_UNDERSTANDING_PASSWORD"],
    )

    return nlp_client.analyze(
        text=transcription["results"][0]["alternatives"][0]["transcript"],
        features=Features(
            categories=CategoriesOptions(),
            concepts=ConceptsOptions(),
            emotion=EmotionOptions(),
            entities=EntitiesOptions(),
            keywords=KeywordsOptions(),
            sentiment=SentimentOptions(),
        ),
    )
Example #29
0
def analyseVideo(text):
    response = natural_language_understanding.analyze(
        text = text,
        features=Features(
            categories=CategoriesOptions(),
            entities=EntitiesOptions(
            emotion=True,
            sentiment=True,
            limit=3),
            keywords=KeywordsOptions(
            emotion=True,
            sentiment=True,
            limit=3)))

    #json.dumps(response, indent=2)
    print("Classification: ")
    for cat in response["categories"]:
        print(str(cat["label"]) + "(" + str(round(cat["score"]*100)) + "%)")
    print("\n")
Example #30
0
def watson_nlc():

    import json
    from watson_developer_cloud import NaturalLanguageUnderstandingV1
    from watson_developer_cloud.natural_language_understanding_v1 import Features, CategoriesOptions

    from ConfigParser import SafeConfigParser
    config = SafeConfigParser()
    config.read('/etc/watson_cfg.ini')

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username=config.get('watson', 'username'),
        password=config.get('watson', 'password'),
        version=config.get('watson', 'version'))

    response = natural_language_understanding.analyze(
        url='https://fr.wikipedia.org/wiki/Gen%C3%A8ve',
        features=Features(categories=CategoriesOptions()))

    return (json.dumps(response))