Exemplo n.º 1
0
def test():
    set_api_key("write your api key here")
    similarity("Sachin is the greatest batsman",
               "Tendulkar is the finest cricketer")
    sentiment("Come on, lets play together")
    ner("Narendra Modi is the prime minister of India")
    keywords(
        "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
    )
    emotion("Did you hear the latest Porcupine Tree song ? It's rocking !")
    intent(
        "Finance ministry calls banks to discuss new facility to drain cash")
    abuse("you f**king a$$hole")
    batch_intent([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_abuse([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_ner([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_sentiment([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
    batch_phrase_extractor([
        "drugs are fun", "don\'t do drugs, stay in school",
        "lol you a f*g son", "I have a throat infection"
    ])
Exemplo n.º 2
0
def main():
    paralleldots.set_api_key(API_KEY)
    messages = []
    with open("messages.csv") as csv_file:
        reader = csv.reader(csv_file, delimiter='|')
        for row in reader:
            messages.append(row[2])

    messages = messages[1:]

    emotions = []
    response = paralleldots.batch_emotion(messages)
    for result in response['batch']:
        emotions.append(result['emotion']['emotion'])

    languages = []
    response = paralleldots.batch_language_detection(messages)
    for result in response['batch']:
        languages.append(result['output'])

    sentiments = []
    response = paralleldots.batch_sentiment(messages)
    for result in response['batch']:
        sentiments.append(result['sentiment'])

    data = {}

    data['languages'] = languages
    data['emotions'] = emotions
    data['sentiments'] = sentiments

    generareRaport(data)
Exemplo n.º 3
0
def analyze_entry(raw_text):
    sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
    text_sentences = sent_detector.tokenize(raw_text.strip())
    emotions_overall = paralleldots.emotion(raw_text)
    sentiment_overall = paralleldots.sentiment(raw_text)
    emotions_sentences = paralleldots.batch_emotion(text_sentences)
    sentiment_sentences = paralleldots.batch_sentiment(text_sentences)
    #print("type of emotions_overall: ", type(emotions_overall))
    overall = {}
    overall.update(emotions_overall)
    overall.update(sentiment_overall)
    sentences = {}
    sentences.update(emotions_sentences)
    sentences.update(sentiment_sentences)
    data = {
        'Overall': overall,
        'Sentences': sentences,
        'Source Text': raw_text
    }
    #print("type of data: ", type(data))
    #data = json.dumps(data)
    #print("type of data: ",type(data))
    # data['Overall'].append(emotions_overall)
    # data['Overall'].append(sentiment_overall)
    # data['Sentences'] = []
    # data['Sentences'].append(emotions_sentences)
    # data['Sentences'].append(sentiment_sentences)
    #print(type(data))
    return data
Exemplo n.º 4
0
def make_request_pd(api, text):
    if (api == 'sentiment'):
        response = paralleldots.batch_sentiment(text)
    elif (api == 'emotion'):
        response = paralleldots.batch_emotion(text)
    elif (api == 'abuse'):
        response = paralleldots.batch_abuse(text)
    return response
def getNetSentiments(commentArray):
    jsonResponse = paralleldots.batch_sentiment(commentArray)
    print(jsonResponse)
    netSentimentScores = []
    for sentimentScores in jsonResponse['sentiment']:
        # Subtract positivity probability from negative prob to get net sentiment score. Neutral does not need
        # consideration because it would reduce the extremity of results by taking away from pos and neg scores.
        netScore = float(sentimentScores['positive']) - float(
            sentimentScores['negative'])
        netSentimentScores.append(netScore)
    return netSentimentScores
Exemplo n.º 6
0
def call_sentiment_api(database_filename, conn, cur):

    # Print opening message for this program action
    print("""
    ================================================================================
    = Collect text sentiment values for headlines/abstracts from Parallel Dots API =
    ================================================================================
    """)

    # Create the negative, neutral, and positive sentiment tables in the sentiment database
    cur.execute(
        'CREATE TABLE IF NOT EXISTS "Negative Sentiment Per Headline and Abstract" '
        +
        '(id INTEGER PRIMARY KEY, "Headline and Abstract" TEXT, "Negative Sentiment" REAL)'
    )
    cur.execute(
        'CREATE TABLE IF NOT EXISTS "Neutral Sentiment Per Headline and Abstract" '
        +
        '(id INTEGER PRIMARY KEY, "Headline and Abstract" TEXT, "Neutral Sentiment" REAL)'
    )
    cur.execute(
        'CREATE TABLE IF NOT EXISTS "Positive Sentiment Per Headline and Abstract" '
        +
        '(id INTEGER PRIMARY KEY, "Headline and Abstract" TEXT, "Positive Sentiment" REAL)'
    )

    # Select the headlines
    cur.execute(
        "SELECT Headlines.Headline, Abstracts.Abstract FROM Headlines INNER JOIN Abstracts ON Headlines.id"
        + " = Abstracts.id")
    call_list = []
    id_count = 0

    for row in cur.fetchall():
        headline = row[0]
        abstract = row[1]
        headline_and_abstract = headline + " " + abstract

        # If not in one table, is not in the others too
        cur.execute(
            'SELECT "Headline and Abstract" FROM "Negative Sentiment Per Headline and Abstract" WHERE '
            + '"Headline and Abstract" = ?', (headline_and_abstract, ))

        # If this headline is not in the sentiment database, add it to the call list
        if cur.fetchone() == None:
            call_list.append(headline_and_abstract)

        id_count += 1

        # Once the call_list reaches 20 new items, move on to calling API
        if len(call_list) == 20:
            break

    # Call API if there are any new items in call list
    if len(call_list) > 0:

        # Call Paralleldots API on list of new items
        english = "en"
        response = paralleldots.batch_sentiment(call_list, english)
        sentiment_list = response["sentiment"]

        # Loop through each headline and add the headline and three sentiment values
        # to sentiment database
        try:
            for ix in range(len(sentiment_list)):
                headline_and_abstract = call_list[ix]
                neg_sent = sentiment_list[ix]["negative"]
                neut_sent = sentiment_list[ix]["neutral"]
                pos_sent = sentiment_list[ix]["positive"]

                # Calculating the id based on the headline
                id_val = id_count - len(sentiment_list) + 1 + ix

                cur.execute(
                    'INSERT INTO "Negative Sentiment Per Headline and Abstract" VALUES (?,?,?)',
                    (id_val, headline_and_abstract, neg_sent))
                cur.execute(
                    'INSERT INTO "Neutral Sentiment Per Headline and Abstract" VALUES (?,?,?)',
                    (id_val, headline_and_abstract, neut_sent))
                cur.execute(
                    'INSERT INTO "Positive Sentiment Per Headline and Abstract" VALUES (?,?,?)',
                    (id_val, headline_and_abstract, pos_sent))

            conn.commit()

            print("\tAdded {} rows to the sentiment tables in \"{}\".".format(
                len(call_list), database_filename))
            cur.execute(
                'SELECT "Headline and Abstract" FROM "Negative Sentiment Per Headline and Abstract"'
            )
            print(
                "\tThere are now {} total rows for each sentiment table in \"{}\"."
                .format(len(cur.fetchall()), database_filename))

            get_more = input(
                "\tWould you like to calculate 20 more values? Yes or No: ")

            if get_more == "Yes":
                call_sentiment_api(database_filename, conn, cur)
        except:
            print(
                "\tThe API sent back a call limit exceeded message. Please wait a moment and try again or restart the program."
            )
Exemplo n.º 7
0
Jews were forbidden to be out on the streets between 8 P.M. and 6 A.M.; Jews were
forbidden to attend theaters, movies or any other forms of entertainment; Jews were
forbidden to use swimming pools, tennis courts, hockey fields or any other athletic
fields; Jews were forbidden to go rowing; Jews were forbidden to take part in any
athletic activity in public; Jews were forbidden to sit in their gardens or those of their
friends after 8 P.M.; Jews were forbidden to visit Christians in their homes; Jews
were required to attend Jewish schools, etc. You couldn't do this and you couldn't do
that, but life went on. Jacque always said to me, "I don't dare do anything anymore,
'cause I'm afraid it's not allowed."""

sent_detector = nltk.data.load('tokenizers/punkt/english.pickle')
text_sentences = sent_detector.tokenize(text.strip())
emotions_overall=paralleldots.emotion(text)
sentiment_overall = paralleldots.sentiment(text)
emotions_sentences = paralleldots.batch_emotion(text_sentences)
sentiment_sentences = paralleldots.batch_sentiment(text_sentences)
# print("Emotions Overall: ", emotions_overall)
# print("Sentiment Overall: ", sentiment_overall)
# print("Emotions Sentences: ", emotions_sentences)
# print("Sentiment Sentences: ", sentiment_overall)


data = {}
data['Overall'] = []
data['Overall'].append(emotions_overall)
data['Overall'].append(sentiment_overall)
data['Sentences'] = []
data['Sentences'].append(emotions_sentences)
data['Sentences'].append(sentiment_sentences)
# with open('data.txt','w') as outfile:
#     json.dump(data,outfile)
Exemplo n.º 8
0
def main():

    # https://developer.spotify.com/documentation/web-api/reference/browse/get-recommendations/
    recommendationToken = 'BQBHOYwD4SvHekv_FpdRoFLgWNGW7_dirrgPd2xjFrEBZy-tNy7RsLOQlQjNc15SliWzfeo_fJa3jdRx06JqBEhxRnF2dV9-3gc4bmHHz-WBiWFSi57VWOVG1HHbpeqZibXDTAXkveB92Uc'

    # https://developer.spotify.com/documentation/web-api/reference/tracks/ choose the link to /v1/audio-features/{id}
    featureToken = 'BQDRAn4Ip5H0T86o3b9hgMAB-H-IK5OUq8FlYfkWiNHNILf2HXrev4GYzPSzaoHLIYXFsyQ-3zWZzwetGYJz8DSR7X5Du23CUWhgAqbzjMHteTtcbMdSkIvWPP-SxvFMVZp7xvYCdDVMI2k'

    paralleldots.set_api_key("CWTPMu1Z9kaCUVeghKKecMyXLbfZPpfUWEnjytlHh4Q")

    cur, conn = setUpDatabase('emotify.db')

    input_text = [
        "I am counting my calories, yet I really want dessert.",
        "If you like tuna and tomato sauce- try combining the two. It’s really not as bad as it sounds.",
        "I would have gotten the promotion, but my attendance wasn’t good enough.",
        "I was very proud of my nickname throughout high school but today- I couldn’t be any different to what my nickname was.",
        "I really want to go to work, but I am too sick to drive."
    ]

    cur.execute("DROP TABLE IF EXISTS Valence")
    cur.execute("DROP TABLE IF EXISTS Energy")
    cur.execute("DROP TABLE IF EXISTS Sentiment")
    cur.execute("DROP TABLE IF EXISTS Emotion")
    conn.commit()

    sentiment_text = paralleldots.batch_sentiment(input_text)

    emotion_text = paralleldots.batch_emotion(input_text)

    negative_valence = []
    bored_energy = []

    for item in sentiment_text['sentiment']:
        negative_valence.append(str(item['negative']))

    for item in emotion_text['emotion']:
        bored_energy.append(str(item['Bored']))

    for i in range(len(negative_valence)):

        recommendation_headers = {
            'Accept': 'application/json',
            'Content-Type': 'application/json',
            'Authorization': 'Bearer ' + recommendationToken,
        }

        recommendation_params = (
            ('limit', '20'),
            ('market', 'US'),
            ('seed_tracks', '0c6xIDDpzE81m2q797ordA'),
            ('target_energy', bored_energy[i]),
            ('target_valence', negative_valence[i]),
            ('min_popularity', '0'),
        )

        spotifyRecommendations = requests.get(
            'https://api.spotify.com/v1/recommendations',
            headers=recommendation_headers,
            params=recommendation_params)
        spotifyRecs = spotifyRecommendations.json()

        setUpSpotifyValence(featureToken, spotifyRecs, cur, conn)
        setUpSpotifyEnergy(featureToken, spotifyRecs, cur, conn)
        conn.commit()

    conn.commit()

    setUpSentiment(sentiment_text, spotifyRecs, cur, conn)

    setUpEmotion(emotion_text, spotifyRecs, cur, conn)

    setUpEmotify(cur, conn)

    conn.close()
Exemplo n.º 9
0
def test():
    set_api_key("Put your Api key here")
    category = {
        "finance": ["markets", "economy", "shares"],
        "world politics": ["diplomacy", "UN", "war"],
        "india": ["congress", "india", "bjp"]
    }
    print(
        similarity("Sachin is the greatest batsman",
                   "Tendulkar is the finest cricketer"))
    print(sentiment("Come on, lets play together"))
    print(ner("Narendra Modi is the prime minister of India", "en"))
    print(
        taxonomy(
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019"
        ))
    print(
        keywords(
            "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
        ))
    print(
        phrase_extractor(
            "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University."
        ))
    print(
        emotion(
            "Did you hear the latest Porcupine Tree song ? It's rocking !"))
    print(
        intent(
            "Finance ministry calls banks to discuss new facility to drain cash"
        ))
    print(abuse("you f**king a$$hole"))
    print(
        custom_classifier("Narendra Modi is the prime minister of India",
                          category))
    print(
        batch_intent([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(batch_abuse(["drugs are fun", "dont do drugs, stay in school"]))
    print(
        batch_sentiment([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(
        batch_phrase_extractor([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(
        batch_taxonomy([
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019",
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019"
        ]))
    print(
        batch_ner([
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019",
            "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019"
        ]))
    print(
        batch_emotion([
            "drugs are fun", "don\'t do drugs, stay in school",
            "lol you a f*g son", "I have a throat infection"
        ]))
    print(facial_emotion_url("https://i.imgur.com/klb812s.jpg"))
    print(object_recognizer_url("https://i.imgur.com/klb812s.jpg"))
    print(
        sarcasm(
            "The movie that i watched last night is so funny that i get rolled out with laughter"
        ))
    print(
        batch_sarcasm([
            "The movie that i watched last night is so funny that i get rolled out with laughter",
            "I want to spend my life alone"
        ]))