Пример #1
0
def fetch_watson_and_return_keywords(sentence):
    """ Use IBM Watson IA for applying tags

        Provides IBM Watson a sentece and returns a list of tags
    """

    NLU = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey=settings.WATSON_API_KEY,
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api/v1/analyze?version=2018-11-16'
    )

    try:
        response = NLU.analyze(
            text=sentence,
            features=Features(keywords=KeywordsOptions(limit=5))).get_result()

        # It gets all texts from the response and turn each of them into an element of the keywords list
        refined_response = response.get("keywords")
        keywords = [x.get("text") for x in refined_response]

        return keywords
    except Exception as error:
        print(error)
Пример #2
0
def get_nlu_data(samples):
    """Query IBM NLU to get keyword data for each sample."""
    data = {}
    nlu = NaturalLanguageUnderstandingV1(
        version='2018-03-16',
        username='******',
        password='******')
    for s in samples:
        response = nlu.analyze(
            text=s,
            language='en',
            features=Features(
                keywords=KeywordsOptions(
                    emotion=True,
                    limit=5),
                entities=EntitiesOptions(
                    emotion=True,
                    limit=5)
            ))
        data[s] = {'key' : {}, 'ent' : {}}
        for kwd_data in response.result['keywords']:
            if ('relevance' not in kwd_data or 'emotion' not in kwd_data):
                continue # skip this one, it doesn't have full data?
            data[s]['key'][kwd_data['text']] = kwd_data
        for ent_data in response.result['entities']:
            if ('relevance' not in ent_data or 'emotion' not in ent_data):
                continue #yuh yeet
            data[s]['ent'][ent_data['text']] = ent_data
    return data
    def understanding(self):
        if not self.transcription:
            self.transcript()

        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2017-02-27',
            username=os.environ['UNDERSTANDING_USERNAME'],
            password=os.environ['UNDERSTANDING_PASSWORD'])

        self.analysis = natural_language_understanding.analyze(
            text=self.transcription['results'][0]['alternatives'][0]
            ['transcript'],
            features=Features(categories=CategoriesOptions(),
                              concepts=ConceptsOptions(),
                              emotion=EmotionOptions(),
                              entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       mentions=True),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True),
                              relations=RelationsOptions(),
                              sentiment=SentimentOptions()))

        logger.info('Completed analysis of recorded file')
        return self.analysis
Пример #4
0
def nlu():
    f = open(november_tweets, 'r', encoding='UTF8')
    g = open(november_gt, 'a')
    e = open(november_opinion, 'a')
    num_lines = sum(1 for line in open(november_tweets, encoding='UTF8'))
    while num_lines != 0:
        nline = f.readline()
        newline = str(nline)
        if len(newline) > 15:
            response2 = natural_language_understanding.analyze(
                text=newline,
                language='en',
                features=Features(keywords=KeywordsOptions(emotion=True,
                                                           sentiment=True,
                                                           limit=2),
                                  categories=CategoriesOptions()))
            aaa = (json.dumps(response2, indent=2))
            print(aaa)
            bbb = json.loads(aaa)
            single_thing = (str(bbb['categories']))
            single_sentiment = (str(bbb['keywords']))
            g.write(single_thing + '\n')
            e.write(single_sentiment + '\n')
        else:
            g.write("Not enough data" + '\n')
            e.write("Not enough data" + '\n')
        num_lines -= 1
    return num_lines
Пример #5
0
def getAnalysis(review):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username='******',
        password='******',
        version='2018-03-16')
    response = natural_language_understanding.analyze(
        text=review,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2))).get_result()
    keywords = response["keywords"]
    numKeywords = len(keywords)
    if numKeywords == 0:
        print("no keywords")
        return np.zeros(6)
    sentiments = np.array([
        keyword["sentiment"]["score"] * keyword["relevance"]
        for keyword in keywords
    ])
    totalSentiment = np.sum(sentiments) / numKeywords

    emotionNames = ['sadness', 'joy', 'fear', 'disgust', 'anger']
    emotions = np.array([
        np.array([keyword["emotion"][name] for name in emotionNames])
        for keyword in keywords
    ])
    totalEmotions = np.sum(emotions, 0) / numKeywords

    features = np.insert(totalEmotions, 0, totalSentiment, axis=0)
    return features
Пример #6
0
def getres(file_loc):
    with open(file_loc, 'r+') as f:
        head = f.readline()
        content = f.read()
        req = '<html><body><h2>{0}</h2>{1}</body></html>'.format(head, content)
        text = head + content
    tone_res = tone_analyzer.tone(req, content_type='text/html').get_result()

    res = natural_language_understanding.analyze(
        html=req,
        features=Features(
            categories=CategoriesOptions(limit=1),
            concepts=ConceptsOptions(limit=5),
            keywords=KeywordsOptions(limit=5, sentiment=True, emotion=True),
            sentiment=SentimentOptions(),
            # entities=EntitiesOptions(limit=5, mentions=True, sentiment=True, emotion=True),
        ),
    ).get_result()
    sentiment = res["sentiment"]["document"]["score"]
    concepts = [(concepts["text"], concepts["relevance"])
                for concepts in res["concepts"]]
    categories = (res["categories"][0]["label"].split("/"),
                  res["categories"][0]["score"])
    keywords = [(keywords["text"], keywords["relevance"])
                for keywords in res["keywords"]]
    tones = [(tone["tone_id"], tone["score"])
             for tone in tone_res["document_tone"]["tones"]]
    return (sentiment, concepts, keywords, tones, text)
Пример #7
0
def analyze_audio():
    print(request.json['texto'])

    ''' Parte para programar y traducir al inglés '''
    language_translator = LanguageTranslator(
        version='2018-03-16',
        iam_api_key='XmyHrVcLnTgWC3Ou33zGB989tcrOxocykZeZDUJxdlP6',
        url='https://gateway.watsonplatform.net/language-translator/api')

    translation = language_translator.translate(
        text=request.json['texto'],
        model_id='es-en')

    ''' Parte para sacar insights del texto '''
    natural_language_understanding = NaturalLanguageUnderstandingV1(
      username='******',
      password='******',
      version='2018-03-16')

    response = natural_language_understanding.analyze(
      text=json.loads(json.dumps(translation, indent=2, ensure_ascii=False))["translations"][0]["translation"],
      features=Features(
        entities=EntitiesOptions(
          emotion=True,
          sentiment=True,
          limit=2),
        keywords=KeywordsOptions(
          emotion=True,
          sentiment=True,
          limit=2)))

    return jsonify(json.dumps(response, indent=2))
Пример #8
0
def keywords(text):
    json_output = natural_language_understanding.analyze(
        text=text,
        # url='https://gateway.watsonplatform.net/natural-language-understanding/api',
        features=Features(
            keywords=KeywordsOptions(sentiment=True, emotion=True, limit=2)))
    return json_output
Пример #9
0
def IntelligentCrawlUrl(URL):
    """
	This Function uses IBM Watson's Natural Language Understanding API to crawl the links and get company or person names based on a 	 knowledge graph it already has.
	This Function also return Company/Person names based on relevance score by IBM Natural Language Cognitive API.
	"""
    ListOfEntityOutput = []
    try:
        response = NaturalLanguageUnderstanding.analyze(
            url=URL,
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=250),
                              sentiment=SentimentOptions(),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=250)))

    except Exception as e:
        response = {}

    if response:
        for EveryEntity in response["entities"]:
            if EveryEntity["type"] == "Company":
                if EveryEntity["relevance"] > 0.25:
                    ListOfEntityOutput.append(EveryEntity["text"])
    print(ListOfEntityOutput)
    return ListOfEntityOutput
def posting_generator():
    jobs = []
    #job_title = "software engineer"
    #job_location = "Pittsburgh, PA"
    search_url = 'https://www.careerbuilder.com/jobs-software-engineer-in-pittsburgh,pa?keywords=Software+Engineer&location=Pittsburgh%2C+PA'
    base_url = 'https://www.careerbuilder.com'

    next_page = urllib.request.urlopen(search_url, None, None)

    nlu = NLU(
        _apikey='BU11gy3frJMRMKz4XQ_sPJ_HGF3p-qEr74xUlEVTWvsY',
	    version='2018-03-19'
	)

    def nextPage(soup):
	    print("BREAK 1")
	    next_link = soup.find("a", class_="Next Page")

	    if next_link is not None:
		    print("BREAK 2")
		    next_url = next_link.find_parent("a")['href']
		    next_page = next_url
		    return next_page

	    else:
	        print("BREAK 3")
	        return 0

    while True:
        soup = BeautifulSoup(next_page, 'html.parser')

		#next_page = nextPage(soup)
              
        for job in soup.find_all('h2'):
            if job.get('class') == 'job-title show-for-medium-up':
                url = 'https://www.careerbuilder.com' + job.a['href']
                response = nlu.analyze(
                    url=url,
                    features=Features(
                        entities=EntitiesOptions(
                            limit=1000
						),
						keywords=KeywordsOptions(
							limit=1000
						),
					)
				).get_result()
                jobs.append(response)
				# jsonprinter(response)
                yield response

        next_url = nextPage(soup)

        if next_url == 0:
            break
        else:
            next_page = urllib.request.urlopen(next_url, None, None)


    print("END OF PROGRAM!")
Пример #11
0
    def process_text(self, conv):
        unicodedata.normalize('NFKD', conv).encode('ascii', 'ignore')
        nlp = self.nlp_api.analyze(
            text=conv,
            language='es',
            features=Features(keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True),
                              categories=CategoriesOptions(),
                              sentiment=SentimentOptions(targets=self.flags)))

        if self.log_file != None:
            print('INFO: logging NLP to %s\n' % self.log_file)
            with open(self.log_file, 'w') as outfile:
                json.dump(nlp, outfile)

        doc_score = 0
        target_score = []
        try:
            doc_score = nlp['sentiment']['document']['score']
            for target in nlp['sentiment']['targets']:
                target_score.append({
                    'text': target['text'],
                    'score': target['score']
                })
        except KeyError:
            print('INFO: no target found')

        return doc_score, target_score
Пример #12
0
def nlu_data():
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username='******',
        password='******')
    r = open("C:\\Users\\kishan.sampat\\Desktop\\user_input.txt", "r")
    extracted = r.read().splitlines()
    #print(extracted)
    mainArray = []

    for elements in extracted:
        array = []
        if len(elements) > 0:
            #print(elements)
            response = natural_language_understanding.analyze(
                text=elements,
                features=Features(entities=EntitiesOptions(sentiment=True,
                                                           limit=3),
                                  keywords=KeywordsOptions()))

            with open('C:\\Users\\kishan.sampat\\Desktop\\user_input.csv',
                      'w',
                      newline='') as outfile:

                for each in response['keywords']:
                    tex = each['text']
                    lemet = porter_stemmer.stem(tex)
                    array.append(lemet)
                mainArray.append(array)
                csv.writer(outfile).writerows(mainArray)

        else:
            break
Пример #13
0
def findKeywords(filename):
    file = open(filename, "r")
    outputfile = open("jsonOutput.json", "w")
    keywords = []

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey='m620e2y3lML5qG_oRJy9JERrlR0-159j3vJVrtPJkhJg',
        url=
        'https://gateway-wdc.watsonplatform.net/natural-language-understanding/api'
    )

    try:
        response = natural_language_understanding.analyze(
            text=string,
            features=Features(keywords=KeywordsOptions(
                sentiment=False, emotion=False))).get_result()
    except:
        return []
    #print(json.dumps(response, indent=2))
    file.close()
    outputfile.write(json.dumps(response, indent=2))
    outputfile.close()
    with open("jsonOutput.json", "r") as read_file:
        data = json.load(read_file)
    keywordslist = data['keywords']
    my_dict = {}
    for i in range((len(keywordslist))):
        x = keywordslist[i]
        keywords.append(x['text'])
        my_dict.update({x['text']: x['relevance']})
    #for i in keywords:
    #print(i)
    return createRelavantKeywordsList(my_dict, keywords)
Пример #14
0
def ibmContent(text):

    response1 = natural_language_understanding.analyze(
        text=text,
        features=Features(keywords=KeywordsOptions(limit=10))).get_result()

    jData = json.loads(json.dumps(response1, indent=2,
                                  ensure_ascii=False))  #keyword 추출

    response2 = natural_language_understanding.analyze(
        text=text,
        features=Features(categories=CategoriesOptions(limit=3))).get_result()

    jData2 = json.loads(json.dumps(response2, indent=2,
                                   ensure_ascii=False))  #카테고리 추출

    keywords = ''
    relevance = ''
    categories = ''

    for i in jData['keywords']:
        keywords += i['text']
        keywords += '#'

    for i in jData['keywords']:
        relevance += str(i['relevance'])
        relevance += '#'

    for i in jData2['categories']:
        categories += i['label']
        categories += '#'

    return keywords, relevance, categories
Пример #15
0
 def send_for_analysis(self, tweets, word):
     return self.text_analyzer.analyze(
         text=tweets,
         features=Features(
             keywords=KeywordsOptions(emotion=True, limit=2),
             emotion=EmotionOptions(targets=[word],
                                    document=True))).get_result()
Пример #16
0
def get_sentiment(target_text):
	from watson_developer_cloud import NaturalLanguageUnderstandingV1
	from watson_developer_cloud.natural_language_understanding_v1 \
	import Features, EntitiesOptions, KeywordsOptions

	natural_language_understanding = NaturalLanguageUnderstandingV1(
	 username="******",
	 password="******",
	 version="2018-03-16")

	""" Opening the file and reading the text contained """
	file = open("messages_to_be_analyzed.txt",'r')
	target_text = file.read()

	response = natural_language_understanding.analyze(
	 text= target_text,
	 features=Features(
	   entities=EntitiesOptions(
	     emotion=True,
	     sentiment=True,
	     limit=2),
	   keywords=KeywordsOptions(
	     emotion=True,
	     sentiment=True,
	     limit=2)))

	return [response.result.get("keywords")[0].get("emotion").get(emotion) for emotion in
		[entry for entry in response.result.get("keywords")[0].get("emotion")]]
Пример #17
0
def watson_analyze_text_understanding(text):
    """
    Input:
        text to be analyzed
    Output:
        response from the watson API

    Taken from the watson API docs.
    """
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username=settings.WATSON_UNDERSTANDING_USERNAME,
        password=settings.WATSON_UNDERSTANDING_PASSWORD,
        version="2017-02-27",
    )

    response = {}
    try:
        response = natural_language_understanding.analyze(
            text=text,
            features=Features(
                entities=EntitiesOptions(emotion=True, sentiment=True,
                                         limit=2),
                keywords=KeywordsOptions(emotion=True, sentiment=True,
                                         limit=2),
            ),
        )
    except Exception as e:
        print e
        print "Proceeding without the watson data"

    return response
Пример #18
0
def process(key, text):
    naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey=key,
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )

    # t = 'IBM is an American multinational technology company '
    # 'headquartered in Armonk, New York, United States, '
    # 'with operations in over 170 countries.'

    t = text

    try:
        response = naturalLanguageUnderstanding.analyze(
            text=t,
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2))).get_result()
    except:
        return False

    print(json.dumps(response, indent=2))
    return (json.dumps(response, indent=2))
Пример #19
0
def nlu_fact():
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username='******',
        password='******')
    r = open("C:\\Users\\kishan.sampat\\Desktop\\fact.txt","r")
    extracted = r.read()    

    response = natural_language_understanding.analyze(

        text= extracted,
   
        features=Features(entities=EntitiesOptions(sentiment=True,limit=3), keywords=KeywordsOptions())) 
    #print(response)
   # print(json.dumps(response, indent=5))
    #json_parsed = json.dumps(response)
    #print(json_parsed)
    with open('C:\\Users\\kishan.sampat\\Desktop\\fact_data.csv', 'w') as outfile:
        #json.dump(response , outfile)
        for each in response['keywords']:
            tex = each['text']
            lemet = porter_stemmer.stem(tex)
            #print(lemet)
            json.dump(lemet , outfile)
            outfile.write('\n')
        for ent in response['entities']:
            ents = ent['text']
            lemet_ent = porter_stemmer.stem(ents)
            #print(lemet_ent)
            json.dump(lemet_ent , outfile)
            outfile.write('\n')
Пример #20
0
    def identifyKeyworkdsAndEntities(self, data):
        self.app.logger.info(
            'Preparing to invoke Natural Language Understanding service')
        txt = data.encode("utf-8", "replace")
        nlu = self.getNLUService()

        results = nlu.analyze(text=data,
                              return_analyzed_text=True,
                              features=Features(entities=EntitiesOptions(),
                                                keywords=KeywordsOptions()))

        self.app.logger.info(json.dumps(results, indent=2))

        primeEntity = None
        primeKeyword = None

        if 'entities' in results:
            entities = results['entities']
            if 0 < len(entities):
                primeEntity = entities[0].get('text', None)

        if 'keywords' in results:
            keywords = results['keywords']
            if 0 < len(keywords):
                primeKeyword = keywords[0].get('text', None)

        retData = {"prime_entity": primeEntity, "prime_keyword": primeKeyword}
        return retData
Пример #21
0
def requestWatsonSentence(sentences):

    nlu = NaturalLanguageUnderstandingV1(version='2017-02-27',
                                         username=credentials.nluKey,
                                         password=credentials.nluId)

    keywords = dict()
    entities = dict()

    for i in range(len(sentences)):
        tmp = nlu.analyze(text=sentences[i],
                          features=Features(
                              entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2)))
        keywords[len(keywords)] = [
            item.get('text', {}) for item in tmp.get('keywords', {})
            if item.get("relevance") > 0.1
        ]
        entities[len(entities)] = [
            item.get('text', {}) for item in tmp.get('entities', {})
            if item.get("relevance") > 0.75
        ]

    # return keywords, tones #'rtype': dictionary
    return entities, keywords
Пример #22
0
    def concepts(self):
        IBM_dict = {}
        IBM_response = self.naturalLanguageUnderstanding.analyze(
            text=self.text,
            features=Features(
                entities=EntitiesOptions(emotion=True, sentiment=True, limit=10),
                keywords=KeywordsOptions(emotion=True, sentiment=True,limit=10),
                sentiment=SentimentOptions(),
                categories=CategoriesOptions()
                )).get_result()

        sent_dict = {'sentiment': IBM_response['sentiment']['document']['score']}
        IBM_dict['sentiment'] = sent_dict
        
        ent_result = []
        ents = IBM_response['entities']
        for e in ents:
            ent_result.append(e['text'].lower())
        ent_result.sort()
        IBM_dict['entities'] = ent_result
        
        kws = []
        for keyword in IBM_response['keywords']:
            kws.append(keyword['text'].lower())
        kws.sort()
        IBM_dict['keywords'] = kws
        
        cats = []
        for category in IBM_response['categories']:
            cats.append(category['label'])
        IBM_dict['categories'] = cats
        
        return IBM_dict
Пример #23
0
    def makeWikiSection(self, sectiontitle):
        print("Accessing IBM Watson for NLP understanding on " + sectiontitle +
              " (subtopic of " + self._topic + ")")

        response = self.watsonobj.analyze(
            text=self._page.section(sectiontitle),
            features=Features(concepts=ConceptsOptions(limit=3),
                              entities=EntitiesOptions(limit=3),
                              keywords=KeywordsOptions(limit=5),
                              relations=RelationsOptions(),
                              semantic_roles=SemanticRolesOptions(limit=3)))

        if sectiontitle in wikipedia.search(
                sectiontitle) and sectiontitle is not "See also":
            return Node("Section",
                        title=sectiontitle,
                        content=self._page.section(sectiontitle),
                        concepts=json.dumps(response["concepts"]),
                        entities=json.dumps(response["entities"]),
                        keywords=json.dumps(response["keywords"]),
                        relations=json.dumps(response["relations"]),
                        semantic_roles=json.dumps(response["semantic_roles"]),
                        mainarticleurl=wikipedia.page(self._topic).url)

        return Node("Section",
                    title=sectiontitle,
                    content=self._page.section(sectiontitle),
                    concepts=json.dumps(response["concepts"]),
                    entities=json.dumps(response["entities"]),
                    keywords=json.dumps(response["keywords"]),
                    relations=json.dumps(response["relations"]),
                    semantic_roles=json.dumps(response["semantic_roles"]))
Пример #24
0
def watson_nlp_analysis(text):

    if text == '': return text

    max_limit_one = 10
    max_limit_two = 30

    naturalLanguageUnderstanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey=os.environ['WATSON'],
        url=
        'https://gateway.watsonplatform.net/natural-language-understanding/api'
    )

    response = naturalLanguageUnderstanding.analyze(
        text=text,
        features=Features(concepts=ConceptsOptions(limit=max_limit_one),
                          categories=CategoriesOptions(limit=max_limit_one),
                          sentiment=SentimentOptions(document=True),
                          emotion=EmotionOptions(document=True),
                          entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=max_limit_two),
                          keywords=KeywordsOptions(
                              emotion=True,
                              sentiment=True,
                              limit=max_limit_two))).get_result()
    return response
Пример #25
0
def get_keywords(content):
    response = service.analyze(text=content,
                               features=Features(
                                   entities=EntitiesOptions(),
                                   keywords=KeywordsOptions())).get_result()

    kword_list = [keyword["text"] for keyword in response["keywords"]]
    return json.dumps(kword_list)
Пример #26
0
def analyze_using_NLU(analysistext):
    """ Call Watson Natural Language Understanding service to obtain analysis results.
    """
    response = natural_language_understanding.analyze( 
        text=analysistext,
        features=Features(keywords=KeywordsOptions()))
    response = [r['text'] for r in response['keywords']]
    return response
Пример #27
0
    def analyze(self, sentence):
        response_places = []
        response = self.natural_language_understanding.analyze(
            text=sentence['input'],
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=1),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=1)))

        # print(json.dumps(response, indent=2))
        '''
        places
        '''
        if len(response['entities']) > 0 and str(
                response['entities'][0]['type']).lower() in place_entity_type:
            new_input = {}
            destination_name = response['keywords'][0]['text']
            new_input['current_lat'] = sentence['current_lat']
            new_input['current_lng'] = sentence['current_lng']
            new_input['destination_name'] = destination_name
            location_request = requests.get(
                url=config.GOOGLE_GET_LOCATION_GEO.format(
                    destination_name, config.GOOGLE_API_KEY))
            location_geo = json.loads(
                location_request.content)['results'][0]['geometry']['location']
            new_input['destination_lat'] = location_geo['lat']
            new_input['destination_lng'] = location_geo['lng']
            return get_recommended_trip(new_input)
        else:
            keyword = response['keywords'][0]['text']
            message_res = WatsonWatsonAssistantAdapter().message(keyword)
            places = message_res['output']['text']
            for place in places:
                google_places_response = requests.get(
                    url=config.GOOGLE_PLACE_URL.format(place,
                                                       config.GOOGLE_API_KEY))
                google_place = json.loads(
                    google_places_response.content)['candidates'][0]
                place_element = {}
                place_element['name'] = google_place['name']
                place_element['image'] = config.GOOGLE_IMAGE_URL.format(
                    google_place['photos'][0]['photo_reference'],
                    config.GOOGLE_API_KEY)
                place_element['rating'] = google_place['rating']
                place_element['lat'] = google_place['geometry']['location'][
                    'lat']
                place_element['lng'] = google_place['geometry']['location'][
                    'lng']
                response_places.append(place_element)

            print response_places
            response_dict = {}
            response_dict['places'] = response_places
            response_dict['trips'] = {}
        return response_dict
Пример #28
0
def tweetEmotion(tweet):
    # *** WATSON ANALYSIS ***
    #returns emotion: joy, anger, disgust, sadness, fear

    # ----- WATSON CLOUD LOGIN -----
    natural_language_understanding = NaturalLanguageUnderstandingV1(
      username="******",
      password="******",
      version="2017-02-27")

    try:
        response = natural_language_understanding.analyze(
          text=tweet,
          features=Features(entities=EntitiesOptions(
                                  emotion=True, sentiment=True,limit=1),
                           keywords=KeywordsOptions(
                                  emotion=True, sentiment=True,limit=1
                                            ))
        )

    except watson_developer_cloud.watson_service.WatsonApiException as e:
        response = []

    # print "response", response

    json_data = json.dumps(response, indent=2)          #loads into objects
    json_data = json.loads(json_data)                   #converts into dictionary

    # print "jason_data", json_data

    if len(json_data) > 0:
        # check where emotions located: keywords or entities
        # print len(json_data["keywords"])
        # print len(json_data["entities"])

        #check if emotion data exists
        if (len(json_data["keywords"]) > 0 and "emotion" in json_data["keywords"][0]):
            result = json_data["keywords"][0]["emotion"]
            print result
        elif (len(json_data["entities"]) > 0 and "emotion" in json_data["entities"][0]):
            result = json_data["entities"][0]["emotion"]
            print result
        else:
            result = "undef"
            print result
            # return result
    else:
        result = "undef"
        # print result
        # return result

    if result == "undef":
        return "undef"
    else:
        maxEmotion = max(result, key=result.get)

    print maxEmotion
Пример #29
0
def analizarTexto(USER):
  #Open the file with the name of the user.
  resultfile = open("result_"+USER+".json", "w+")

  targets =[
      'vida', 'Guatemala', 'amor', 'sexo', 'politico',
      'poliltica','Yo', 'sonrisa', 'pais','novio','novia',
      'enojo', 'hermano', 'hermana','mama','papa','familia',
      'deporte', 'relacion'
    ]
  #Get the configuration file
  with open('config.json', 'r') as f:
      config = json.load(f)

  #reading the data file
  datafile = open("data_" +USER+".txt","r")
  data = datafile.read()
  
  #print the data... remove this..
  print (data)

  #Authentication
  natural_language_understanding = NaturalLanguageUnderstandingV1(
      version=config["version"],
      username=config["username"],
      password=config["password"]
  )

  response = natural_language_understanding.analyze(
    text=data,
    features=Features(
      entities=EntitiesOptions(
        emotion=True,
        sentiment=True,
        limit=2),
      keywords=KeywordsOptions(
        emotion=True,
        sentiment=True,
        limit=2),
      sentiment=SentimentOptions(
        targets=targets
      )
      #Doesn't support spanish language yet
      # ,
      # emotion=EmotionOptions(
      #   targets=targets
      # )
    )
  )

  result = str(response)
  print (result)
  resultfile.write(result + "")
  resultfile.close()
  f.close()
  datafile.close()
Пример #30
0
    def __init__(self, nlu_details: dict) -> None:
        self.version = nlu_details["version"]
        self.url = nlu_details["url"]
        self.apikey = nlu_details["apikey"]

        self.nlu = NaturalLanguageUnderstandingV1(
            version=self.version, url=self.url, iam_apikey=self.apikey)

        self.features = Features(categories=CategoriesOptions(), entities=EntitiesOptions(
            emotion=True, sentiment=True), keywords=KeywordsOptions(emotion=True, sentiment=True))