예제 #1
0
파일: watson.py 프로젝트: assembl/assembl
def prepare_computation(id):
    """Prepare computation parameters according to discussion preferences"""
    from assembl.models import Content
    post = Content.get(id)
    active = any([post.discussion.preferences['watson_' + x]
                  for x in watson_languages.keys()])
    if active and post.body:
        api_version = config.get("watson_api_version", "2018-03-16")
        features = {}
        post_loc = post.body.first_original().locale.root_locale
        for feature_name, langs in watson_languages.items():
            if not post.discussion.preferences['watson_' + feature_name]:
                continue
            if post_loc not in langs:
                continue
            features[feature_name] = watson_feature_classes[feature_name]()
        if not features:
            return
        features = Features(**features)

        get_or_create_computation_on_post(
            post, "watson_" + api_version, features._to_dict())
        return True
예제 #2
0
def extractPopularityUrl(pytrends, natural_language_understanding, url,numberConcepts=4):

    response = natural_language_understanding.analyze(
        url=url,
        features=Features(concepts=ConceptsOptions(limit=numberConcepts)))

    concepts = response["concepts"]
    concepts = [k for k in concepts if k['relevance'] > 0.75]

    kw_list = [k['text'] for k in concepts]
    pytrends.build_payload(kw_list, cat=0, timeframe='today 1-m', geo='', gprop='') 
    data = pytrends.interest_over_time()
    data = data.drop('isPartial', axis=1)

    average = data.mean()

    dict_averages = average.to_dict()
    result = json.dumps(dict_averages)
    print(result, '\n')
예제 #3
0
def getEntities_2(reference):
    """This function accepts the text to be processed and makes an API call to
    the Stage 2 NLU model. Returns detected entities in JSON format"""

    #Create new client for NLU service
    nlu_stage1 = NaturalLanguageUnderstandingV1(
        version='2018-08-11',
        iam_api_key='2rwmGRHIqfyUnmVKoOfsJX4XjABZx33g_7hKNlKh9NDi',
        url=
        'https://gateway-wdc.watsonplatform.net/natural-language-understanding/api'
    )

    #Send text to API and return entities
    response = nlu_stage1.analyze(
        text=reference,
        features=Features(entities=EntitiesOptions(
            model='7e0e13ae-1d62-4820-9564-46cd4c9a7f9e', mentions=True)))

    return (response)
예제 #4
0
def analyseVideo(text):
    response = natural_language_understanding.analyze(
        text = text,
        features=Features(
            categories=CategoriesOptions(),
            entities=EntitiesOptions(
            emotion=True,
            sentiment=True,
            limit=3),
            keywords=KeywordsOptions(
            emotion=True,
            sentiment=True,
            limit=3)))

    #json.dumps(response, indent=2)
    print("Classification: ")
    for cat in response["categories"]:
        print(str(cat["label"]) + "(" + str(round(cat["score"]*100)) + "%)")
    print("\n")
예제 #5
0
파일: text.py 프로젝트: ocoiel/sintetic
    def get_keywords(self, sentences):
        for sentence in sentences:
            try:
                response = self.natural_language_understanding.analyze(
                    text=sentence,
                    features=Features(keywords=KeywordsOptions(
                        emotion=False, sentiment=False,
                        limit=2))).get_result()
            except:
                print("Not enough text in summary. Press any key to close.")
                error()

            temp_list = []
            for keyword in response["keywords"]:
                temp_list.append(keyword["text"])

            self.keywords_list.append(temp_list)

        return self.keywords_list
예제 #6
0
def sentiment(input):
    try:
        response = naturalLanguageUnderstanding.analyze(
            text=input,
            features=Features(sentiment=SentimentOptions(
                document=None, targets=None))).get_result()

        parsed_json = json.loads(json.dumps(response, indent=2))
        sentiment = parsed_json['sentiment']
        document = sentiment['document']
        score = document['score']
        sentiment_value = float(score)

    except:
        sentiment_value = sid().polarity_scores(input)['compound']

    print(sentiment_value)
    react_with_sound(sentiment_value)
    return 7
예제 #7
0
def getEntities_1(text):
    """This function accepts the text to be processed and makes an API call to
    the Stage 1 NLU model. Returns detected entities in JSON format"""

    #Create new client for NLU service
    nlu_stage1 = NaturalLanguageUnderstandingV1(
        version='2018-08-11',
        iam_api_key='m92KNssmXlVm4n8G3fYhlkSE90Yw3xCmSh93XKdiu4oy',
        url=
        'https://gateway-wdc.watsonplatform.net/natural-language-understanding/api'
    )

    #Send text to API and return entities
    response = nlu_stage1.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(
            model='a0f0773d-d9c0-4b66-9880-be459ac0b3f7', mentions=True)))

    return (response)
예제 #8
0
def understanding(request):
    if request.method == 'POST':
        if request.POST['text'] != '':
            service = NaturalLanguageUnderstandingV1(
                version='2018-03-16',
                ## url is optional, and defaults to the URL below. Use the correct URL for your region.
                # url='https://gateway.watsonplatform.net/natural-language-understanding/api',
                username=USERNAME,
                password=PASSWORD)
            text = request.POST['text']

            dados = service.analyze(
                text=text,
                features=Features(
                    entities=EntitiesOptions(emotion=True, sentiment=True),
                    keywords=KeywordsOptions(emotion=True,
                                             sentiment=True))).get_result()

            keywords = dados['keywords']

            entities = dados['entities']

            sentiments = dados['keywords'][0]['sentiment']

            print(sentiments)

            emotions = dados['keywords'][0]['emotion']

            print(emotions)

            return render(
                request, 'understanding.html', {
                    'text': text,
                    'keywords': keywords,
                    'entities': entities,
                    'sentiments': sentiments,
                    'emotions': emotions
                })

    else:
        print('caiu no else')
        return render(request, 'understanding.html')
def printSomething(t):
    res = []
    rel = []
    #using IBM api
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username='******',
        password='******',
        version='2017-02-27')

    response = natural_language_understanding.analyze(
        text=t,
        features=Features(keywords=KeywordsOptions(
            emotion=False, sentiment=False, limit=30)))

    for key in response['keywords']:
        res.append(key['text'])
        rel.append(key['relevance'])

    result = dict(zip(res, rel))
    return result
예제 #10
0
    def get_concepts(self, user_input, max_number_of_concepts=3):
        '''
        Extract concepts from a given string
        :param str user_input:
        :param int max_number_of_concepts:
        :return: a list of concepts(str)
        '''
        result = []
        response = self.nlu.analyze(text=user_input,
                                    features=Features(concepts=ConceptsOptions(
                                        limit=max_number_of_concepts)),
                                    language='en')
        if self.debug_mode:
            print(json.dumps(response, indent=2))

        concepts = response.get('concepts')
        if len(concepts) != 0:
            for concept in concepts:
                result.append(concept.get('text'))
        return result
예제 #11
0
파일: views.py 프로젝트: sjfke/Watson1
def watson_nlc():

    import json
    from watson_developer_cloud import NaturalLanguageUnderstandingV1
    from watson_developer_cloud.natural_language_understanding_v1 import Features, CategoriesOptions

    from ConfigParser import SafeConfigParser
    config = SafeConfigParser()
    config.read('/etc/watson_cfg.ini')

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username=config.get('watson', 'username'),
        password=config.get('watson', 'password'),
        version=config.get('watson', 'version'))

    response = natural_language_understanding.analyze(
        url='https://fr.wikipedia.org/wiki/Gen%C3%A8ve',
        features=Features(categories=CategoriesOptions()))

    return (json.dumps(response))
    def negaposi_analyze(self, sentence: str):
        """
        sentence: 会話文
        return: list
        """
        result = []
        #文字列から感情分析
        response = self.natural_language_understanding.analyze(
            text=sentence,
            #text= 'I like dog, but i don\'t ,nooooooooooooooooooooooo',
            features=Features(keywords=KeywordsOptions(
                sentiment=True, emotion=True, limit=50))).get_result()

        for sentiment in response["keywords"]:
            senti_dict = {
                "keyword": sentiment["text"],
                "label": sentiment["sentiment"]["label"]
            }
            result.append(senti_dict)
        return result
예제 #13
0
def SentimentClassify(target_text):
    supportinglist = [
        'ar', 'en', 'fr', 'de', 'it', 'ja', 'ko', 'pt', 'ru', 'es'
    ]
    try:
        response_senti = natural_language_understanding.analyze(
            text=target_text,
            features=Features(sentiment=SentimentOptions())).get_result()

        sentiscore = response_senti["sentiment"]["document"]["score"]
        if sentiscore == 0:
            global true0
            true0 = true0 + 1
        return sentiscore
    except:
        # print("discription: ",target_text)
        global err0
        err0 = err0 + 1
        print("a language error")
        return 24
예제 #14
0
def nlp_text_manager(text_path):
    text = text_path
    txt = Path(text_path).read_text(encoding='cp1252')
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username='******',
        password='******',
        version='2018-03-16')

    response = natural_language_understanding.analyze(
        text=txt,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2)))
    # print(response)
    dict_response = dict(response)
    print(dict_response)
    return dict_response
예제 #15
0
    def get(self, urlin):

        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2018-03-16',
            iam_apikey='CCtMpcdQn7LpNbxY91vvQfWKXhF8y_F9uddx3-bvJKuZ',
            url=
            'https://gateway.watsonplatform.net/natural-language-understanding/api'
        )

        response = natural_language_understanding.analyze(
            url=urlin,
            features=Features(keywords=KeywordsOptions(
                sentiment=False, emotion=False))).get_result()
        keywords = {}
        for keyword in response["keywords"]:
            keywords[keyword['text']] = keyword['relevance']

        #print(json.dumps(response, indent=2))

        return json.dumps(keywords)
예제 #16
0
def call_watson(tweet):
    print "call watson called."
    try:
        response = natural_language_understanding.analyze(
            text=tweet["tweet"],
            features=Features(entities=EntitiesOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2),
                              keywords=KeywordsOptions(emotion=True,
                                                       sentiment=True,
                                                       limit=2)),
            language='en')
        response["tweet"] = tweet["tweet"]
        response["tweetId"] = tweet["id"]
        response["timeStamp"] = tweet["created_at"]
        response["userProfile"] = tweet["userProfile"]
        save_results(response)
    except Exception as e:
        print("Error in call watson")
        print(e)
    def _parallel_NlU(self, text):
            
            # A Function to call Watson Natural Language Understanding

        if self.config['keywords']:
            keyword_option = KeywordsOptions(limit=self.config['keyword_limit'])
        else:
            keyword_option = None

        if self.config['concepts']:
            concepts_option = ConceptsOptions(
                limit=self.config['concept_limit'])
        else:
            concepts_option = None

        try:
            results = self.model.analyze(
                text=text,
                features=Features(
                    concepts=concepts_option,
                    keywords=keyword_option),
                language='en'
            )

            json_results = results.get_result()

            our_concepts = []
            for concept in json_results['concepts']:
                our_concepts.append(concept['text'])
            
            our_keywords = []
            for keyword in json_results['keywords']:
                our_keywords.append(keyword['text'])
            
            self.lock.acquire()
            self.concepts = self.concepts + our_concepts
            self.keywords = self.keywords + our_keywords
            self.lock.release()
        
        except Exception as e:
            print(str(e))
예제 #18
0
def ibmIndex(text, summaries):

    response1 = natural_language_understanding.analyze(
        text=text,
        features=Features(keywords=KeywordsOptions(limit=2))).get_result()

    jData = json.loads(json.dumps(response1, indent=2,
                                  ensure_ascii=False))  #keyword 추출

    keywords = []
    indexList = []
    for i in jData['keywords']:
        keywords.append(i['text'])

    for c in summaries:
        if (keywords[0] in c) and (keywords[1] in c):
            return c
        elif (keywords[0] in c):
            return c
        elif (keywords[1] in c):
            return c
예제 #19
0
    def analyze(self, text, name, symbol):

        if text == None or text == "":
            raise Exception(
                "The function analyze() received an None or empty text parameter."
            )

        try:
            response = Watson.natural_language_understanding.analyze(
                text=text,
                features=Features(emotion=EmotionOptions(),
                                  sentiment=SentimentOptions()),
                #language="en",
                return_analyzed_text=False)
        except BaseException as e:
            print(e)
            response = str(e)
        else:
            print(json.dumps(response, indent=4))

        return response
예제 #20
0
def getEmotion(line, f):
  natural_language_understanding = NaturalLanguageUnderstandingV1(
    username='******',
    password='******',
    version='2018-03-16')

  wordCount = len(line.split(" "))
  response = natural_language_understanding.analyze(
    language="en",
    # url='https://www.time-to-change.org.uk/blog/school-i-never-wanted-anyone-know-about-my-anxiety',
    text = line,
    features=Features(
      emotion=EmotionOptions(),
      sentiment=SentimentOptions()))

  f.writerow([response['emotion']['document']['emotion']['joy'],
              response['emotion']['document']['emotion']['anger'],
              response['emotion']['document']['emotion']['sadness'],
              response['emotion']['document']['emotion']['fear'],
              response['sentiment']['document']['score'],
              wordCount])
예제 #21
0
def getSentiment(id):

    file = open("comments/" + id + ".txt", "r")
    text = file.read()
    file.close()

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username='******',
        password='******',
        version='2017-02-27')

    response = natural_language_understanding.analyze(
        text=text,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=2)))

    return response
예제 #22
0
def analyze(image_data):
    image_caption = subprocess.check_output('python captioner.py --image=' +
                                            image_data['url'],
                                            shell=True)
    # Cleaning Image Caption response:
    text = str(image_caption)
    text = text.split('>')[1]
    text = text.split('<')[0]
    print(text)
    # Adding Image Caption to Image Data
    image_data['caption'] = text
    # Calling NLU Analysis
    response = natural_language_understanding.analyze(
        text=text,
        features=Features(emotion=EmotionOptions(),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=10)))
    # Adding Emotion to Image Data
    image_data['emotion'] = response['emotion']['document']['emotion']
    return image_data
예제 #23
0
def get_sentiment(text):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username=ibm['u'], password=ibm['p'], version="2017-02-27")

    response = natural_language_understanding.analyze(
        text=text,
        features=Features(emotion=EmotionOptions(document=True),
                          sentiment=SentimentOptions(document=True)))

    return {
        # => valence
        'positivity': (response["sentiment"]["document"]["score"] + 1) / 2,
        # => loundness
        'anger': -60 * response["emotion"]["document"]["emotion"]["anger"],
        # => danceability
        'joy': response["emotion"]["document"]["emotion"]["joy"],
        # => bpm
        'fear': 200 * response["emotion"]["document"]["emotion"]["fear"],
        # => acoustincess
        'sadness': response["emotion"]["document"]["emotion"]["sadness"]
    }
예제 #24
0
def detect_emotion(sentence, APIKEY):  #IBM emotion detector
    try:
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2018-03-16',
            iam_apikey=APIKEY,
            url=
            'https://gateway-syd.watsonplatform.net/natural-language-understanding/api'
        )

        response = natural_language_understanding.analyze(
            text=sentence,
            features=Features(emotion=EmotionOptions())).get_result()

        emotions_response = response["emotion"]["document"]["emotion"]

        max_likely_emotion = max(emotions_response.items(),
                                 key=operator.itemgetter(1))[0]
        print('max_likely_emotion:', max_likely_emotion)
        return max_likely_emotion
    except Exception as e:
        return 25
예제 #25
0
def nluRun(txt):
    response = natural_language_understanding.analyze(
        text=txt,
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=3),
                          keywords=KeywordsOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=3),
                          concepts=ConceptsOptions(limit=3)))

    #Prints the relevant texts and scores, len() is used because earlier it looped each tweet individually then we
    #compressed them all to one string
    for x in range(0, len(response["concepts"])):
        # print(x)
        con = response["concepts"][x]["text"]
        rel = response["concepts"][x]["relevance"]
        # print(response["concepts"][x]["text"])
        # print(response["concepts"][x]["relevance"])
        sendArray = {'Concept': con, 'Relevance': rel}
        print(sendArray)
예제 #26
0
def analyse_tweet(text):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2018-11-16',
        iam_apikey='<key_value>',
        url=
        'https://gateway-syd.watsonplatform.net/natural-language-understanding/api'
    )

    response = natural_language_understanding.analyze(
        text=text,
        features=Features(emotion=EmotionOptions(document=["true"])),
        language='en').get_result()

    record = {}
    e = response['emotion']['document']['emotion']
    record['sadness'] = e['sadness']
    record['joy'] = e['joy']
    record['fear'] = e['fear']
    record['disgust'] = e['disgust']
    record['anger'] = e['anger']

    return record
예제 #27
0
def sentiment_analyze(speech_text):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        url=
        "https://gateway.watsonplatform.net/natural-language-understanding/api",
        username="******",
        password="******",
        version='2018-03-16')

    response = natural_language_understanding.analyze(
        text=speech_text,
        features=Features(
            #entities=EntitiesOptions(
            #emotion=True,
            #sentiment=True,
            #limit=2),
            keywords=KeywordsOptions(
                #emotion=True,
                sentiment=True,
                limit=2)))

    #print(json.dumps(response, indent=2))
    return json.dumps(response, indent=2)
def ibm_nlu(data, filename):
    clean = [clean_tweet(tweet) for tweet in data['Text']]
    cleaned = [x for x in clean if x not in stopwords]
    # print(cleaned)
    response = natural_language_understanding.analyze(
        text=str(cleaned),
        features=Features(entities=EntitiesOptions(emotion=True,
                                                   sentiment=True,
                                                   limit=100),
                          categories=CategoriesOptions(),
                          keywords=KeywordsOptions(sentiment=True,
                                                   emotion=True,
                                                   limit=100),
                          relations=RelationsOptions(),
                          semantic_roles=SemanticRolesOptions(),
                          sentiment=SentimentOptions()))
    # print(json.dumps(response, indent=2))
    res = json.dumps(response, indent=2)
    filename = filename + '.json'
    file = open(filename, 'w')
    file.write(res)
    file.close
예제 #29
0
def api(request, fileurl):
    print(fileurl)
    text = open(fileurl,'r').read()

    response = naturalLanguageUnderstanding.analyze(
        text=text,
        features=Features(
            entities=EntitiesOptions(model=model_id))).get_result()
    print(response)
    #print(json.dumps(response, indent=2))

    # with open('upload.txt','U') as f:
    # filedata = f.read()
    for i in range(len(response.get('entities'))):
        x = response.get('entities')[i].get('text')
        while x in text:
            text = text.replace(x, '*' * len(x))
    with open('./media/upload1.txt', 'w') as new:
        new.write(text)
    print("return")
    txt = "/media/upload1.txt"
    return txt
예제 #30
0
def get_from_wdc(sentence):
    '''
        gets the sentiment score and entities in the sentence
    '''

    API_KEY = foo.API_KEY
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version=API_KEY['version'],
        username=API_KEY['username'],
        password=API_KEY['password'])

    try:
        response = natural_language_understanding.analyze(
            text=sentence,
            features=Features(entities=EntitiesOptions(),
                              sentiment=SentimentOptions()))
        # print(json.dumps(response, indent=2))
    except:
        print("entered exception")
        return get_from_wdc(sentence)

    return response["sentiment"]["document"]["score"], response["entities"]
예제 #31
0
def post(tweet):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username='******',
        password='******',
        version='2017-02-27')

    response = natural_language_understanding.analyze(
      text=tweet,
      features=Features(
        entities=EntitiesOptions(
          emotion=True,
          sentiment=True,
          limit=5),
        sentiment=SentimentOptions(
          document=True),
        keywords=KeywordsOptions(
          emotion=True,
          sentiment=True,
          limit=5),
        emotion=EmotionOptions(
            document=True)))
    return response
예제 #32
0
파일: watson.py 프로젝트: assembl/assembl
def do_watson_computation(id):
    from ..models.generic import Content
    from ..models.langstrings import Locale
    from ..models.nlp import (
        DBPediaConcept,
        PostKeywordAnalysis,
        PostLocalizedConceptAnalysis,
        PostWatsonV1SentimentAnalysis,
        Tag,
    )
    with transaction.manager:
        post = waiting_get(Content, id)
        assert post
        discussion = post.discussion
        desired_locales = set(discussion.discussion_locales)
        desired_locales.add('en')  # always translate tags to english
        translator = discussion.translation_service()
        source_locale = post.body.first_original().locale.code
        if not translator.canTranslate(source_locale, "en", True):
            log.error("Not a real translation service")
        api_key = config.get("watson_api_key")
        assert api_key
        endpoint = get_endpoint(api_key)
        for computation in post.computations:
            if computation.status != "pending":
                log.debug('skipping computation %d in state %s' % (
                    computation.id, computation.status))
            else:
                features = Features._from_dict(computation.parameters)
                try:
                    lse = post.body.first_original()
                    lang = lse.locale.code
                    log.debug('watson analyzing %d' % post.id)
                    result = endpoint.analyze(
                        html=lse.value,
                        language=lang if lang != Locale.UNDEFINED else None,
                        clean=False,
                        return_analyzed_text=True,
                        features=features)
                    if result.get_status_code() != 200:
                        computation.status = "failure"
                        computation.result = result.get_headers()
                        continue
                    else:
                        result = result.get_result()
                    log.debug('watson analyzed %d' % post.id)
                    if lang == Locale.UNDEFINED:
                        lse.locale = Locale.get_or_create(result['language'])
                    computation.result = result
                    computation.status = "success"
                    for keyword in result.get('keywords', ()):
                        tag = Tag.getOrCreateTag(
                            keyword['text'], lse.locale, post.db)
                        tag.simplistic_unify(translator)
                        post.db.add(PostKeywordAnalysis(
                            post=post, source=computation,
                            value=tag, score=keyword['relevance']))
                    for category in result.get('categories', ()):
                        tag = Tag.getOrCreateTag(
                            category['label'], lse.locale, post.db)
                        tag.simplistic_unify(translator)
                        post.db.add(PostKeywordAnalysis(
                            post=post, source=computation, category=True,
                            value=tag, score=category['score']))
                    for concept in result.get('concepts', ()):
                        dbconcept = DBPediaConcept.get_or_create(
                            concept['dbpedia_resource'], post.db)
                        dbconcept.identify_languages(desired_locales, post.db)
                        post.db.add(PostLocalizedConceptAnalysis(
                            post=post, source=computation,
                            value=dbconcept, score=keyword['relevance']))
                    sentiments = {}
                    if result.get('emotion', None):
                        emotion = result['emotion']['document']['emotion']
                        sentiments.update(dict(
                            anger=emotion['anger'],
                            disgust=emotion['disgust'],
                            fear=emotion['fear'],
                            joy=emotion['joy'],
                            sadness=emotion['sadness'],
                        ))
                    if result.get('sentiment', None):
                        sentiments['sentiment'] = \
                            result['sentiment']['document']['score']
                    if sentiments:
                        post.db.add(PostWatsonV1SentimentAnalysis(
                            post=post,
                            source=computation,
                            text_length=len(result['analyzed_text']),
                            **sentiments
                        ))
                except Exception:
                    capture_exception()
                    computation.result = traceback.format_exc()
                    computation.status = "failure"
                    computation.retries = (computation.retries or 0) + 1