Ejemplo n.º 1
0
    def replace_words(self, newpoem, feelings):
        try:
            feelings = str(feelings)
            newpoem = str(newpoem)

            poem_nla = \
                self.natural_language_understanding.analyze(text=newpoem,
                                                            features=[features.Keywords()])
            feelings_nla = \
                self.natural_language_understanding.analyze(text=feelings,
                                                            features=[features.Keywords()])

            fw = []
            for keyword in feelings_nla["keywords"]:
                fw.append(keyword['text'].encode('utf-8'))

            pw = []
            for keyword in poem_nla["keywords"]:
                pw.append(keyword['text'].encode('utf-8'))

            x = randint(0, len(pw) - 1)
            y = randint(0, len(fw) - 1)

            return newpoem.replace(pw[x], fw[y])
        except:
            return None
Ejemplo n.º 2
0
def eval_default():
    response = nlu.analyze(
        text='Bruce Banner is the Hulk and Bruce Wayne is BATMAN! '
        'Superman fears not Banner, but Wayne.',
        features=[features.Entities(),
                  features.Keywords()])
    return jsonify(response)
Ejemplo n.º 3
0
    def parse_foods(path, format='mp3'):
        """
        Parses the audio file & returns the foods that are contained in the speech
        :param path: path to the audio file
        :return: the list of foods
        """
        with open(path, 'rb') as audio:
            txt = json.dumps(SpeechAnalysis.speech_to_text(
                '6d736cd4-1e01-4d6c-9c61-78ce7f803024',
                'SBE03B3o5PTP').recognize(audio,
                                          content_type='audio/' + format,
                                          timestamps=True,
                                          word_confidence=True),
                             indent=2)

            json_text = json.loads(txt)
            text = json_text['results'][0]['alternatives'][0]['transcript']
            keywords = SpeechAnalysis.nlp_parse(
                '991ee8cd-25f9-4b5a-a1f8-cfe5834dfcb9',
                'Jtera8xCDVup').analyze(text=text,
                                        features=[features.Keywords()])
            s = pd.Series(keywords)

            keyword_list = []
            for k in s.keywords:
                keyword_list.append(k['text'])

            #food = wn.synset('food.n.02')
            food = pickle.load(open("./nltk_food2.p", "rb"))

            foods = [k for k in keyword_list if k in food]
            return json.dumps(foods)
Ejemplo n.º 4
0
def run_watson_nlu():
    files = glob.glob('work/bug-*.json')
    (user, passwd) = get_auth()
    for fname in files:
        with open(fname) as f:
            LOG.debug("Processing %s" % fname)
            bug = json.loads(f.read())
            num = bug["link"].split("/")[-1]
            with open("work/res-%s.json" % num, "w") as out:
                nlu = watson_developer_cloud.NaturalLanguageUnderstandingV1(
                    version='2017-02-27', username=user, password=passwd)
                res = nlu.analyze(text=bug["comments"],
                                  features=[
                                      features.Concepts(),
                                      features.Keywords(),
                                      features.Emotion(),
                                      features.Sentiment(),
                                  ])
                output = {
                    "link": bug["link"],
                    "tags": bug["tags"],
                    "importance": bug["importance"],
                    "length": len(bug["comments"]),
                    "results": res
                }
                out.write(json.dumps(output, indent=4))
Ejemplo n.º 5
0
def get_keywords(text):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username="******",
        password="******",
        version="2017-02-27")

    response = natural_language_understanding.analyze(
        text=text,
        features=[
            Features.Entities(emotion=True, sentiment=True, limit=10),
            Features.Keywords(emotion=True, sentiment=True, limit=10)
        ])

    # print(json.dumps(response, indent=2))

    to_ret = {}

    for a in response["keywords"]:
        to_ret.update({a['text']: '#'})

    for a in response["entities"]:
        try:
            to_ret.update({a['text']: a['disambiguation']['dbpedia_resource']})
        except:
            to_ret.update({a['text']: '#'})

    return to_ret
Ejemplo n.º 6
0
def watsonanalyze(stringinput):
    response = natural_language_understanding.analyze(
        text=stringinput,
        features=[
            Features.Entities(emotion=True, sentiment=True, limit=2),
            Features.Keywords(emotion=True, sentiment=True, limit=20)
        ])
    return json.loads(json.dumps(response, indent=2))
Ejemplo n.º 7
0
def alexa_analyze():
    if (request.method == 'POST'):
        content = request.get_json(force=True)
        name = content["request"]["intent"]["name"]
        if (name == "IsRecyclable"):
            query = content["request"]["intent"]["slots"]["recycle"]["value"]
        elif (name == "IsCompostable"):
            query = content["request"]["intent"]["slots"]["compost"]["value"]
        if (query):
            NLU_response = NLU.analyze(text=query,
                                       features=[features.Keywords()])
            desc = str(NLU_response)
            for i in NLU_response["keywords"]:
                payload = "key={}&txt={}&model={}".format(
                    MC_KEY, i["text"], MC_MODEL)
                MC_response = requests.request("POST",
                                               url=MC_URL,
                                               data=payload,
                                               headers=MC_HEADERS)
                MC_response = json.loads(MC_response.text)
                desc = ("No, you cannot recycle a " + query) if (
                    name == "IsRecyclable") else ("No, you cannot compost a " +
                                                  query)
                if (len(MC_response["category_list"]) > 0):
                    if (MC_response["category_list"][0]["label"]
                            == "Compostable" and name == "IsCompostable"):
                        desc = "Yes, you can compost a {}".format(query)
                        break
                    elif (MC_response["category_list"][0]["label"]
                          == "Recyclable" and name == "IsRecyclable"):
                        desc = "Yes, you can recycle a {}".format(query)
                        break
            alexa_response = {
                "version": "1.0",
                "response": {
                    "outputSpeech": {
                        "type": "PlainText",
                        "text": desc,
                    }
                }
            }
            return jsonify(alexa_response)
        else:
            desc = ("No, you cannot recycle a " + query) if (
                name == "IsRecyclable") else ("No, you cannot compost a " +
                                              query)
            alexa_response = {
                "version": "1.0",
                "response": {
                    "outputSpeech": {
                        "type": "PlainText",
                        "text": desc,
                    }
                }
            }
            return jsonify(alexa_response)
Ejemplo n.º 8
0
def nl_processing(reqd_text):
    response = natural_language_understanding.analyze(text=reqd_text,
                                                      features=[
                                                          features.Entities(),
                                                          features.Keywords(),
                                                          features.Emotion(),
                                                          features.Concepts(),
                                                          features.Sentiment()
                                                      ])
    return response
def nlp(input_stuff):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username="******",
        password="******")

    response = natural_language_understanding.analyze(
        text=input_stuff, features=[features.Entities(),
                                    features.Keywords()])
    return (response["entities"])
Ejemplo n.º 10
0
def analyze(s):
    response = natural_language_understanding.analyze(text=s,
                                                      features=[
                                                          Features.Keywords(
                                                              emotion=True,
                                                              sentiment=True,
                                                              limit=2),
                                                          Features.Sentiment()
                                                      ])
    return response
Ejemplo n.º 11
0
def nlp(input_stuff):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username='******',
        password='******')

    response = natural_language_understanding.analyze(
        text=input_stuff, features=[features.Entities(),
                                    features.Keywords()])
    return (response["entities"])
Ejemplo n.º 12
0
def _execute_request(text):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username=constants.WATSON_USER,
        password=constants.WATSON_PASS,
        version="2017-02-27")

    response = natural_language_understanding.analyze(
        text=text, features=[features.Keywords()])

    return response
Ejemplo n.º 13
0
 def analyze_sentiment(self, answer):
     result = self.nlu.analyze(
         text=answer, features=[features.Keywords(),
                                features.Sentiment()])
     if result['keywords']:
         keywords = result['keywords'][0]
         keyword = keywords['text']
     else:
         keyword = None
     sentiment = result['sentiment']['document']['score']
     return sentiment, keyword
Ejemplo n.º 14
0
def main():
    with open("compost.txt", "r") as f:
        strings = f.read().split()
    while (strings):
        response = NLU.analyze(text=strings.pop(0),
                               features=[
                                   features.Keywords(),
                                   features.Categories(),
                                   features.Concepts()
                               ])
        print str(response.keywords[0].text)
Ejemplo n.º 15
0
def analyze_url(url):
    try:
        response = natural_language_understanding.analyze(
            url=url, features=[features.Keywords(),
                               features.Categories()])
    except WatsonException as e:
        print("analyze_url: WatsonException")
        if 'Error: limit exceeded for free plan, Code: 403' in e.args:
            raise e
        return None

    return response
Ejemplo n.º 16
0
def worker():
    global q
    print 'Worker Initialized'
    attributes = ['id', 'text', 'time', 'lat', 'lon']
    while True:
        responses = q.receive_messages(MessageAttributeNames=attributes)
        if len(responses) != 0:
            for response in responses:
                if response.message_attributes is None:
                    response.delete()
                    continue
                id = response.message_attributes.get('id').get('StringValue')
                text = response.message_attributes.get('text').get(
                    'StringValue')
                time = response.message_attributes.get('time').get(
                    'StringValue')
                lat = response.message_attributes.get('lat').get('StringValue')
                lon = response.message_attributes.get('lon').get('StringValue')
                try:
                    natural_language_understanding = NaturalLanguageUnderstandingV1(\
                        version='2017-02-27',\
                        username=nlu_creds['username'],\
                        password=nlu_creds['password']\
                    )

                    nlu_response = natural_language_understanding.analyze(\
                        text=text,\
                        features=[features.Entities(), features.Keywords(), features.Sentiment()]\
                    )

                    sentiment = nlu_response['sentiment']['document']['label']
                except Exception as e:
                    print 'Error:', e
                    sentiment = 'neutral'

                # Send to AWS SNS
                notification = {
                    'id': id,
                    'text': text,
                    'time': time,
                    'lat': lat,
                    'lon': lon,
                    'sentiment': sentiment
                }
                try:
                    print notification
                    sns.publish(TargetArn=sns_arn,
                                Message=json.dumps(
                                    {'default': json.dumps(notification)}))
                    response.delete()
                except Exception as e:
                    print 'Error:', e
        sleep(2)
Ejemplo n.º 17
0
def get_keywords(text):
    try:
        response = natural_language_understanding.analyze(
            text=text, features=[features.Keywords()])

        l = list(map(lambda k: k['text'], response['keywords']))

    except Exception as e:
        print(e)
        return []

    return l
Ejemplo n.º 18
0
def open_request():
    data = request.get_json()
    text = data['transcription']
    patient_id = data['patient_id']
    if text not in watson_cache:
        try:
            watson_cache[text] = natural_language_understanding.analyze(
                text=text,
                features=[features.Keywords(), features.Sentiment()])
        except WatsonException as err:
            print err
    enqueue(patient_id, text, watson_cache[text])
    return jsonify({'result': watson_cache[text]})
Ejemplo n.º 19
0
def extract_sentiment_ner_trec_full(src, dst_dir):
    """
    Extracts tweet overall sentiment, sentiment per NER, NERs,
    keywords, sentiment per keyword  for the full dataset that's read from a
    .txt file.

    Parameters
    ----------
    src: str - path to dataset.
    dst_dir: - directory in which results will be stored.

    """
    tweets = read_txt(src)
    # Since tweets are ordered according to topic, label them in a
    # random order
    keys = tweets.keys()
    random.shuffle(keys)

    for idx, tid in enumerate(keys):
        fname = "{}.json".format(tid)
        dst = os.path.join(dst_dir, fname)
        # If file already exists, data was extracted before and due to
        # rate-limiting the rest couldn't be extracted
        if not os.path.isfile(dst):
            try:
                # Extract features for a tweet via Watson
                response = natural_language_understanding.analyze(
                    text=tweets[tid]["text"],
                    # Get entities and their
                    features=[
                        # Overall tweet sentiment
                        Features.Sentiment(),
                        # NER detection and sentiment per NER
                        Features.Entities(sentiment=False),
                        Features.Keywords(sentiment=False),
                    ])

                # Store results in UTF-8 encoding
                fname = "{}.json".format(tid)
                dst = os.path.join(dst_dir, fname)
                with codecs.open(dst, "w", encoding="utf-8") as f:
                    # https://stackoverflow.com/questions/18337407/saving-utf-8-texts-in-json-dumps-as-utf8-not-as-u-escape-sequence
                    data = json.dumps(response,
                                      ensure_ascii=False,
                                      encoding='utf8')
                    f.write(unicode(data))
            # Illegal language
            except watson_developer_cloud.watson_developer_cloud_service.\
                    WatsonException:
                pass
        print "Finished extraction for {} tweets".format(idx + 1)
Ejemplo n.º 20
0
def get_text_data(text,language):
    username = os.environ.get("BLUEMIX-NLU-USERNAME")
    password = os.environ.get("BLUEMIX-NLU-PASSWORD")

    natural_language_understanding = NaturalLanguageUnderstanding(
        version = "2017-02-27",
        username=username,
        password=password
    )
    return natural_language_understanding.analyze(
        text = text,
        features = [features.Emotion(), features.Sentiment(), features.Keywords()],
        language = language
    )
Ejemplo n.º 21
0
 def map_feature(name):
     feature_name_mappings = {
         'keywords': features.Keywords(),
         'entities': features.Entities(),
         'concepts': features.Concepts(),
         'categories': features.Categories(),
         'sentiment': features.Sentiment(),
         'emotion': features.Emotion()
     }
     if name in feature_name_mappings:
         return feature_name_mappings[name]
     else:
         print("Invalid feature name")
         return None
Ejemplo n.º 22
0
def watson(user_url):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        username="******",
        password="******",
        version="2017-02-27")

    response = natural_language_understanding.analyze(
        url=user_url,
        features=[Features.Keywords(emotion=False, sentiment=False, limit=15)])
    keywords = []
    for keyword in response['keywords']:
        if keyword['relevance'] > 0.80 and len(keywords) < 8:
            keywords.append(keyword['text'].encode('utf-8'))
    return keywords
def nlu(text):
    response = n.analyze(text=text,
                         features=[
                             features.Emotion(),
                             features.Concepts(),
                             features.Categories(),
                             features.Entities(),
                             features.Keywords(),
                             features.SemanticRoles(),
                             features.Relations(),
                             features.Sentiment()
                         ],
                         language='en')
    return json.dumps(response, indent=2)
Ejemplo n.º 24
0
def extract_data(text):
    # Use Watson's NLU API to extract the keywords, entities and concepts from a text
    bm_username = "******"
    bm_password = "******"

    nlu = watson_developer_cloud.NaturalLanguageUnderstandingV1(
        version='2017-02-27', username=bm_username, password=bm_password)
    ents = nlu.analyze(text=text,
                       features=[
                           features.Entities(),
                           features.Keywords(),
                           features.Concepts()
                       ])

    ents["tweet"] = text
    return ents
Ejemplo n.º 25
0
def test_model():

    model = request.forms.get('model')

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username="******",
        password='******')

    response = natural_language_understanding.analyze(text=model,
                                                      features=[
                                                          features.Entities(),
                                                          features.Keywords(),
                                                          features.Concepts()
                                                      ])

    return (json.dumps(response, indent=2))
Ejemplo n.º 26
0
def extract_sentiment_ner_twitter(cleaned, min_annos, dst_dir):
    """
    Extracts tweet overall sentiment, sentiment per NER, NERs,
    keywords, sentiment per keyword.

    Parameters
    ----------
    cleaned: bool - True if only cleaned data should be used (i.e. any
    additional labels (and their annotation times) assigned to tweets considered
    "Irrelevant" are ignored)
    min_annos: int -  minimum number of annotators who must've labeled a tweet
    for it to be considered.
    dst_dir: - directory in which results will be stored.

    """
    tweets = read_twitter(cleaned, min_annos)
    for idx, tid in enumerate(tweets):
        tweet = tweets[tid]
        fname = "{}.json".format(tid)
        dst = os.path.join(dst_dir, fname)
        print tweet["text"]
        # If file already exists, data was extracted before and due to
        # rate-limiting the rest couldn't be extracted
        if not os.path.isfile(dst):
            # Extract features for a tweet via Watson
            response = natural_language_understanding.analyze(
                text=tweet["text"],
                # Get entities and their
                features=[
                    # Overall tweet sentiment
                    Features.Sentiment(),
                    # NER detection and sentiment per NER
                    Features.Entities(sentiment=True),
                    Features.Keywords(sentiment=True),
                ])

            # Store results in UTF-8 encoding
            fname = "{}.json".format(tid)
            dst = os.path.join(dst_dir, fname)
            with codecs.open(dst, "w", encoding="utf-8") as f:
                # https://stackoverflow.com/questions/18337407/saving-utf-8-texts-in-json-dumps-as-utf8-not-as-u-escape-sequence
                data = json.dumps(response,
                                  ensure_ascii=False,
                                  encoding='utf8')
                f.write(unicode(data))
        print "Finished extraction for {} tweets".format(idx + 1)
Ejemplo n.º 27
0
def respond_chat(request):

    print("respond_chat got called")
    global response
    global inp

    # Getting the last context for the concerned user
    last_context = Mess.objects.filter(user=request.user).last().context
    # print(last_context.last().created)

    # Sending the message to the bot and fetching a response
    print("INP------------")
    print(inp)

    nlu_response = nlu.analyze(
        text=inp, features=[features.Entities(),
                            features.Keywords()])

    if (last_context == ""):
        response = conversation.message(workspace_id=workspace_id,
                                        message_input={'text': inp})
    else:
        response = conversation.message(workspace_id=workspace_id,
                                        message_input={'text': inp},
                                        context=eval(last_context))

    action = identifier(response, nlu_response)

    print("ACTION REQUESTED")
    print(action)

    # CHECK WHAT THE ACTION IS
    # print(type(response['intents']))
    # if(response['intents']['intent'][0] == "create_playlist"):
    # print("USER WANTS A PLAYLIST MAN")

    new_mess = Mess(text=response['output']['text'][0],
                    user=request.user,
                    created=timezone.now(),
                    reality_coefficient=False,
                    context=repr(response['context']))
    new_mess.save()
    mess = Mess.objects.all()
    response_text = serializers.serialize('json', Mess.objects.all())
    return HttpResponse(response_text, content_type='application/json')
Ejemplo n.º 28
0
def clasificarDescripcion(pDescripcion):
    # if 'VCAP_SERVICES' in os.environ:
    #     services = json.loads(os.getenv('VCAP_SERVICES'))
    with open('AutosClasificados\core\config.json') as json_data_file:
        vConfig = json.load(json_data_file)
        vAPIUserNLU= vConfig["watsonNLU"]["vAPIUser"]
        vAPIPassNLU = vConfig["watsonNLU"]["vAPIPass"]
        vAPIVersionNLU = vConfig["watsonNLU"]["vAPIVersion"]
        vUmbralMinScore_WNLU = vConfig["watsonNLU"]["vUmbralMinScore_WNLU"]
        vUmbralMinDescripcion = vConfig["otros"]["vUmbralMinDescripcion"]
    vResultado_NLU = ''
    vWatson_NLU = NaturalLanguageUnderstandingV1(username=vAPIUserNLU, password=vAPIPassNLU, version=vAPIVersionNLU)
    vListaKeywords = list()
    try:
        if len(pDescripcion) > vUmbralMinDescripcion:
            vResultado_NLU = vWatson_NLU.analyze(
                                          text=pDescripcion,
                                          features=[
                                             Features.Entities(
                                                               emotion=True,
                                                               sentiment=True,
                                                               limit=6
                                                               ),
                                             Features.Keywords(
                                                              emotion=True,
                                                              sentiment=True,
                                                              limit=6
                                                             )
                                          ],
                                          language="en"
                                        )

            vResultado_NLU = json.loads(json.dumps(vResultado_NLU, indent=2))

            if vResultado_NLU['keywords']:
                for entitien in vResultado_NLU['entities']:
                    print(entitien)
                for vResultado in vResultado_NLU['keywords']:
                    print(vResultado)
                    if vResultado['relevance'] > vUmbralMinScore_WNLU:
                        vListaKeywords.append(vResultado['text'])
                return vListaKeywords
    except:
            vListaKeywords.append('No hay Keywords disponibles')
            return vListaKeywords
Ejemplo n.º 29
0
def get_sentiment(txt):
    response = natural_language_understanding.analyze(
      text=txt,
      features=[
        Features.Entities(
          emotion=True,
          sentiment=True,
          limit=2
        ),
        Features.Keywords(
          emotion=True,
          sentiment=True,
          limit=2
        )
      ]
    )

    print(json.dumps(response, indent=2))
Ejemplo n.º 30
0
    def report(self, text):
        """
        Returns the Watson Data for a specific text.
        """

        # Real Call
        payload = self.natural_language_understanding.analyze(
            text=text,
            features=[
                features.Entities(),
                features.Keywords(),
                features.Emotion()
            ])

        # Fake Call, since we only have limited access to IBM
        # payload = self.mock_watson(text)

        return payload