def getSentimentAnalysis(url):
"""
Get sentiment analysis of a given URL

:param: url: URL to get sentiment analysis
:returns: Sentiment magnitude and emotion analysis in JSON format
"""
	html = urllib.request.urlopen(url).read()
	soup = BeautifulSoup(html, 'html.parser')
	text = textFromHtml(html)
	natural_language_understanding = nl.enableWatsonNatLang()
	response = natural_language_understanding.analyze(
		text= text,
	  	features=[
	  		# Get general sentiment of text
	  		Features.Sentiment(
	  			document=True
	  		),
	  		# Get emotion towards relevant entities (max:3)
		    Features.Entities(
		      emotion=True,
		      limit = 3
	    	)
	  	]
	)
	return json.dumps(response, indent=2)
Пример #2
0
    def on_data(self, data):
        try:
            #json_data = status._json
            tweet = json.loads(data)
            textdata = tweet['text']
            # print textdata
            if tweet['coordinates']:
                print tweet['coordinates']
            #response = queue.send_message(MessageBody=tweet['text'])
            # print (tweet)
            producer = KafkaProducer(
                value_serializer=lambda v: json.dumps(v).encode('utf-8'))
            producer.send('fizzbuzz', textdata)

            consumer = KafkaConsumer(bootstrap_servers='localhost:9092',
                                     auto_offset_reset='earliest')

            consumer.subscribe(['fizzbuzz'])
            for message in consumer:
                print(message)

            nlu = watson_developer_cloud.NaturalLanguageUnderstandingV1(
                version='2017-02-27', username='', password='')
            nlu.analyze(text=textdata, features=[features.Sentiment()])

        except Exception as e:
            #print("exception: "+e)
            pass

        def on_error(self, status):
            print(status)
            return True
Пример #3
0
def run_watson_nlu():
    files = glob.glob('work/bug-*.json')
    (user, passwd) = get_auth()
    for fname in files:
        with open(fname) as f:
            LOG.debug("Processing %s" % fname)
            bug = json.loads(f.read())
            num = bug["link"].split("/")[-1]
            with open("work/res-%s.json" % num, "w") as out:
                nlu = watson_developer_cloud.NaturalLanguageUnderstandingV1(
                    version='2017-02-27', username=user, password=passwd)
                res = nlu.analyze(text=bug["comments"],
                                  features=[
                                      features.Concepts(),
                                      features.Keywords(),
                                      features.Emotion(),
                                      features.Sentiment(),
                                  ])
                output = {
                    "link": bug["link"],
                    "tags": bug["tags"],
                    "importance": bug["importance"],
                    "length": len(bug["comments"]),
                    "results": res
                }
                out.write(json.dumps(output, indent=4))
Пример #4
0
def worker_main(queue):
    while True:
        messages = queue.receive_messages(MessageAttributeNames=['Id', 'Tweet', 'Latitude', 'Longitude'])
        print("Received messages")
        if len(messages)>0:
            for message in messages:
                # Get the custom author message attribute if it was set
                if message.message_attributes is not None:
                    id = message.message_attributes.get('Id').get('StringValue')
                    tweet = message.message_attributes.get('Tweet').get('StringValue')
                    lat = message.message_attributes.get('Latitude').get('StringValue')
                    lng = message.message_attributes.get('Longitude').get('StringValue')
                    try:
                        response = natural_language_understanding.analyze(
                            text=tweet, features=[features.Sentiment()])
                        senti = response["sentiment"]["document"]["label"]
                        print senti
                        # senti = response.get('docSentiment').get('type')
                    except Exception as e:
                        print("ERROR: "+str(e))
                        senti = "neutral"
                    # Using SNS
                    sns_message = {"id":id, "tweet":tweet, "lat":lat, "lng": lng, "sentiment":senti}
                    print("SNS messsage: "+str(sns_message))
                    sns.publish(TargetArn=arn, Message=json.dumps({'default':json.dumps(sns_message)}))
                # print('Id: {0}; Tweet: {1}; Latitude: {2}; Longitude: {3}; sentiment: {4}'.format(id,tweet,lat,lng,senti))
                # Let the queue know that the message is processed
                message.delete()
        else:
            time.sleep(1)
Пример #5
0
def analyse(queue, sns_topic):
    messages = queue.receive_messages(MessageAttributeNames=['All'],
                                      VisibilityTimeout=30,
                                      MaxNumberOfMessages=10)
    for message in messages:
        if message.body is not None and message.message_attributes is not None:
            #print(message.body)
            sns_message = {}
            nlp = NaturalLanguageUnderstandingV1(version='2017-02-27',\
                url = "https://gateway.watsonplatform.net/natural-language-understanding/api",\
                username = configure.ibm_username,\
                password = configure.ibm_password)
            response = nlp.analyze(text=message.body,
                                   features=[features.Sentiment()])
            print(response["sentiment"]["document"]["label"])
            if response["sentiment"]["document"]["label"] is not None:
                username = message.message_attributes["username"]
                sentiment = response["sentiment"]["document"]["label"]
                lat = message.message_attributes["lat"]
                long = message.message_attributes["long"]
                timestamp_ms = message.message_attributes["timestamp_ms"]
                sns_message["username"] = username
                sns_message["content"] = message.body
                sns_message["lat"] = lat
                sns_message["long"] = long
                sns_message["sentiment"] = sentiment
                sns_message["timestamp_ms"] = timestamp_ms
                message_for_send = json.dumps(sns_message)
                response1 = sns_topic.publish(Message=message_for_send)
                print(response1)
            else:
                print("sentiment analyse error!")
Пример #6
0
    def on_status(self, status):
        natural_language_understanding = NaturalLanguageUnderstandingV1(
            version='2017-02-27',
            username='******',
            password='******')
        twitts = status.text
        coordinates = status.coordinates
        language = status.user.lang

        if status.place and language == 'en':
            if coordinates is not None and len(coordinates) > 0:
                coordinates = status.coordinates['coordinates']
                print 'coordinates: ', coordinates
                print 'twitts: ', twitts

                try:
                    response = natural_language_understanding.analyze(
                        text=twitts, features=[features.Sentiment()])
                    sentiment = response['sentiment']['document']['label']
                except Exception as e:
                    sentiment = "neutral"
                print sentiment

                upload_data = {
                    "twitts": twitts,
                    "coordinates": coordinates,
                    "sentiment": sentiment
                }

                print requests.post(
                    'https://search-trends-pnoxxtizp4zrbmwnvgsifem74y.us-east-1.es.amazonaws.com/twittmap/data',
                    json=upload_data)

        return True
Пример #7
0
def emotionRank(url):
    response = natural_language_understanding.analyze(
    url=url,
    features=[
        Features.Sentiment()
    ])
    return response
Пример #8
0
def get_sentiment(content):
	try:
		resp = nlp.analyze(text=content, features=[features.Sentiment()])
		response = resp["sentiment"]["document"]["score"]
		print str(response) + ": " + content
		return response
	except:
		print "Error getting sentiment from Watson NLU"
		return None
Пример #9
0
def sentiment(tips):
    # Helper function to return text sentiment analysis
    # Load Watson credentials
    username=os.environ.get('NLU_USERNAME')
    password = os.environ.get('NLU_PASSWORD')
    nlu = watson_developer_cloud.NaturalLanguageUnderstandingV1(version='2017-02-27',
        username=username, password=password)
    output = nlu.analyze(text=tips, features=[features.Sentiment()])
    return output['sentiment']['document']['score']
Пример #10
0
def main():
    load_dotenv(find_dotenv())
    nlu_username = os.environ.get('NLU_USERNAME')
    nlu_password = os.environ.get('NLU_PASSWORD')
    nlu = NLU(username=nlu_username, password=nlu_password, version='2017-02-27')

    result = nlu.analyze(text='I hate galvanize', features=[features.Sentiment()])['sentiment']['document']

    print(result['label'], result['score'])
 def test_analyze_throws(self):
     nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
                                          url='http://bogus.com',
                                          username='******',
                                          password='******')
     with pytest.raises(ValueError):
         nlu.analyze([features.Sentiment()])
     with pytest.raises(ValueError):
         nlu.analyze([], text="this will not work")
Пример #12
0
def nl_processing(reqd_text):
    response = natural_language_understanding.analyze(text=reqd_text,
                                                      features=[
                                                          features.Entities(),
                                                          features.Keywords(),
                                                          features.Emotion(),
                                                          features.Concepts(),
                                                          features.Sentiment()
                                                      ])
    return response
Пример #13
0
def analyze(s):
    response = natural_language_understanding.analyze(text=s,
                                                      features=[
                                                          Features.Keywords(
                                                              emotion=True,
                                                              sentiment=True,
                                                              limit=2),
                                                          Features.Sentiment()
                                                      ])
    return response
Пример #14
0
def getData(user, pw, doc):
    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-02-27',
        username=user,
        password=pw)
    url = 'https://gateway.watsonplatform.net/natural-language-understanding/api'
    response = natural_language_understanding.analyze(
        text=doc,
        features=[features.Sentiment(), features.Emotion()])
    return json.dumps(response)
Пример #15
0
    def analyse_emotions(self, tweet):
        # print('Analysis Text:',tweet)
        response = natural_language_understanding.analyze(
            text=tweet, features=[Features.Emotion(),
                                  Features.Sentiment()])

        list_emotion = list(
            response['emotion']['document']['emotion'].values())
        list_emotion.append(response['sentiment']['document']['score'])
        print('List emotion is : ', list_emotion)
        return list_emotion
Пример #16
0
 def analyze_sentiment(self, answer):
     result = self.nlu.analyze(
         text=answer, features=[features.Keywords(),
                                features.Sentiment()])
     if result['keywords']:
         keywords = result['keywords'][0]
         keyword = keywords['text']
     else:
         keyword = None
     sentiment = result['sentiment']['document']['score']
     return sentiment, keyword
 def test_text_analyze(self):
     nlu_url = "http://bogus.com/v1/analyze"
     responses.add(responses.POST, nlu_url,
                   body="{\"resulting_key\": true}", status=200,
                   content_type='application/json')
     nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
                                          url='http://bogus.com',
                                          username='******',
                                          password='******')
     nlu.analyze([features.Sentiment()], text="hello this is a test")
     assert len(responses.calls) == 1
Пример #18
0
def worker():
    global q
    print 'Worker Initialized'
    attributes = ['id', 'text', 'time', 'lat', 'lon']
    while True:
        responses = q.receive_messages(MessageAttributeNames=attributes)
        if len(responses) != 0:
            for response in responses:
                if response.message_attributes is None:
                    response.delete()
                    continue
                id = response.message_attributes.get('id').get('StringValue')
                text = response.message_attributes.get('text').get(
                    'StringValue')
                time = response.message_attributes.get('time').get(
                    'StringValue')
                lat = response.message_attributes.get('lat').get('StringValue')
                lon = response.message_attributes.get('lon').get('StringValue')
                try:
                    natural_language_understanding = NaturalLanguageUnderstandingV1(\
                        version='2017-02-27',\
                        username=nlu_creds['username'],\
                        password=nlu_creds['password']\
                    )

                    nlu_response = natural_language_understanding.analyze(\
                        text=text,\
                        features=[features.Entities(), features.Keywords(), features.Sentiment()]\
                    )

                    sentiment = nlu_response['sentiment']['document']['label']
                except Exception as e:
                    print 'Error:', e
                    sentiment = 'neutral'

                # Send to AWS SNS
                notification = {
                    'id': id,
                    'text': text,
                    'time': time,
                    'lat': lat,
                    'lon': lon,
                    'sentiment': sentiment
                }
                try:
                    print notification
                    sns.publish(TargetArn=sns_arn,
                                Message=json.dumps(
                                    {'default': json.dumps(notification)}))
                    response.delete()
                except Exception as e:
                    print 'Error:', e
        sleep(2)
Пример #19
0
 def analyze(self, content, keyword):
     response = self.natural_language_understanding.analyze(
         text=content,
         features=[
             Features.Sentiment(
                 # Emotion options
                 targets=[keyword]
             )
         ]
     )
     score = response["sentiment"]["document"]["score"]
     return score
 def test_url_analyze(self):
     nlu_url = "http://bogus.com/v1/analyze"
     responses.add(responses.POST, nlu_url,
                   body="{\"resulting_key\": true}", status=200,
                   content_type='application/json')
     nlu = NaturalLanguageUnderstandingV1(version='2016-01-23',
                                          url='http://bogus.com',
                                          username='******',
                                          password='******')
     nlu.analyze([features.Sentiment(),
                  features.Emotion(document=False)], url="http://cnn.com",
                 xpath="/bogus/xpath", language="en")
     assert len(responses.calls) == 1
Пример #21
0
def open_request():
    data = request.get_json()
    text = data['transcription']
    patient_id = data['patient_id']
    if text not in watson_cache:
        try:
            watson_cache[text] = natural_language_understanding.analyze(
                text=text,
                features=[features.Keywords(), features.Sentiment()])
        except WatsonException as err:
            print err
    enqueue(patient_id, text, watson_cache[text])
    return jsonify({'result': watson_cache[text]})
Пример #22
0
def extract_sentiment_ner_trec_full(src, dst_dir):
    """
    Extracts tweet overall sentiment, sentiment per NER, NERs,
    keywords, sentiment per keyword  for the full dataset that's read from a
    .txt file.

    Parameters
    ----------
    src: str - path to dataset.
    dst_dir: - directory in which results will be stored.

    """
    tweets = read_txt(src)
    # Since tweets are ordered according to topic, label them in a
    # random order
    keys = tweets.keys()
    random.shuffle(keys)

    for idx, tid in enumerate(keys):
        fname = "{}.json".format(tid)
        dst = os.path.join(dst_dir, fname)
        # If file already exists, data was extracted before and due to
        # rate-limiting the rest couldn't be extracted
        if not os.path.isfile(dst):
            try:
                # Extract features for a tweet via Watson
                response = natural_language_understanding.analyze(
                    text=tweets[tid]["text"],
                    # Get entities and their
                    features=[
                        # Overall tweet sentiment
                        Features.Sentiment(),
                        # NER detection and sentiment per NER
                        Features.Entities(sentiment=False),
                        Features.Keywords(sentiment=False),
                    ])

                # Store results in UTF-8 encoding
                fname = "{}.json".format(tid)
                dst = os.path.join(dst_dir, fname)
                with codecs.open(dst, "w", encoding="utf-8") as f:
                    # https://stackoverflow.com/questions/18337407/saving-utf-8-texts-in-json-dumps-as-utf8-not-as-u-escape-sequence
                    data = json.dumps(response,
                                      ensure_ascii=False,
                                      encoding='utf8')
                    f.write(unicode(data))
            # Illegal language
            except watson_developer_cloud.watson_developer_cloud_service.\
                    WatsonException:
                pass
        print "Finished extraction for {} tweets".format(idx + 1)
Пример #23
0
 def map_feature(name):
     feature_name_mappings = {
         'keywords': features.Keywords(),
         'entities': features.Entities(),
         'concepts': features.Concepts(),
         'categories': features.Categories(),
         'sentiment': features.Sentiment(),
         'emotion': features.Emotion()
     }
     if name in feature_name_mappings:
         return feature_name_mappings[name]
     else:
         print("Invalid feature name")
         return None
Пример #24
0
def get_text_data(text,language):
    username = os.environ.get("BLUEMIX-NLU-USERNAME")
    password = os.environ.get("BLUEMIX-NLU-PASSWORD")

    natural_language_understanding = NaturalLanguageUnderstanding(
        version = "2017-02-27",
        username=username,
        password=password
    )
    return natural_language_understanding.analyze(
        text = text,
        features = [features.Emotion(), features.Sentiment(), features.Keywords()],
        language = language
    )
def nlu(text):
    response = n.analyze(text=text,
                         features=[
                             features.Emotion(),
                             features.Concepts(),
                             features.Categories(),
                             features.Entities(),
                             features.Keywords(),
                             features.SemanticRoles(),
                             features.Relations(),
                             features.Sentiment()
                         ],
                         language='en')
    return json.dumps(response, indent=2)
Пример #26
0
 def featureList(self, tags):
     f_list = []
     for tag in tags:
         if tag == "sentiment":
             f_list.append(features.Sentiment())
         elif tag == "categories":
             f_list.append(features.Categories())
         elif tag == "concepts":
             f_list.append(features.Concepts())
         elif tag == "emotion":
             f_list.append(features.Emotion())
         elif tag == "entities":
             f_list.append(features.Entities())
     return f_list
Пример #27
0
def worker_task():
    while True:
        msgs = queue.receive_messages(
            MessageAttributeNames=['Id', 'Tweet', 'Latitude', 'Longitude'])

        sns.subscribe(
            TopicArn=arn,
            Protocol='http',
            Endpoint=
            'http://twitter-senti-env.2mvfwajxvd.us-east-1.elasticbeanstalk.com/sns_test_handler/'
        )
        if len(msgs) > 0:
            for message in msgs:
                id = message.message_attributes.get('Id').get('StringValue')
                tweet = message.message_attributes.get('Tweet').get(
                    'StringValue')
                lat = message.message_attributes.get('Latitude').get(
                    'StringValue')
                lon = message.message_attributes.get('Longitude').get(
                    'StringValue')
                #print(tweet)
                try:
                    response = nlu.analyze(text=tweet,
                                           features=[features.Sentiment()])
                    print(response)
                    s = json.dumps(response)
                    r = json.loads(s)
                    sentiment = r['sentiment']['document']['label']
                    senti_score = r['sentiment']['document']['score']
                    #sentiment = "pos"
                    #senti_score = 0.5
                    sns_message = {
                        'id': id,
                        'tweet': tweet,
                        'lat': lat,
                        'lon': lon,
                        'senti': sentiment,
                        'score': senti_score
                    }
                    print("SNS message:" + str(sns_message))
                    sns.publish(TopicArn=arn,
                                Message=json.dumps(
                                    {'default': json.dumps(sns_message)}))

                except Exception as e:
                    print("ERROR:" + str(e))

        else:
            time.sleep(1)
Пример #28
0
 def IBMWatsonSentimentAnalyzer(
     self, text
 ):  # IBM Watson Sentiment Analysis. Returns score and label (float, str pair)
     try:
         natural_language_understanding = NaturalLanguageUnderstandingV1(
             username=WATSON_USERNAME,
             password=WATSON_PASSWORD,
             version=WATSON_VERSION,
             url=WATSON_APIURL)
         response = natural_language_understanding.analyze(
             text=text, features=[Features.Sentiment()])
         return float(response["sentiment"]["document"]["score"]), str(
             response["sentiment"]["document"]["label"])
     except:
         return -1000, "neutral"
Пример #29
0
def _addAnalysis(text, sentimentArray, emotionsDict):
    try:
        result = natural_language_understanding.analyze(
            text=text, features=[Features.Sentiment(),
                                 Features.Emotion()])
        result['emotion']
    except:
        return
    sentiment_score = result['sentiment']['document']['score']
    sentimentArray.append(sentiment_score)
    emotions = result['emotion']['document']['emotion']
    for emo in emotions:
        if emo not in emotionsDict:
            emotionsDict[emo] = []
        emotionsDict[emo].append(emotions[emo])
Пример #30
0
def sentimentMining():
    score = 0
    totalCount = 0
    negativeCount = 0
    positiveCount = 0
    inputText = ''

    natural_language_understanding = NaturalLanguageUnderstandingV1(
        version='2017-04-14',
        username='******',
        password='******')

    # the input file to be used for sentiment analysis
    inputFile = ''
    with open(inputFile, 'r') as f:
        reader = csv.reader(f)
        cnt = 0
        for row in reader:
            # skipping the first row which is the heading
            if cnt == 0:
                cnt += 1
            else:
                if len(row) == 0:
                    pass
                    # if empty review
                else:
                    inputText = row[0]
                    cnt += 1
                    totalCount += 1
                    # sentiment analysis of every review
                    response = natural_language_understanding.analyze(
                        text=inputText,
                        features=[features.Sentiment(),
                                  features.Emotion()],
                        language='en')
                    print(totalCount)

                    # counting the number of positive and negative review with the score
                    if response["sentiment"]["document"][
                            "label"] == 'positive':
                        positiveCount += 1
                        score += response["sentiment"]["document"]["score"]
                    elif response["sentiment"]["document"][
                            "label"] == 'negative':
                        negativeCount += 1
                        score += response["sentiment"]["document"]["score"]
    return positiveCount, negativeCount, score, totalCount