def test(): set_api_key("write your api key here") similarity("Sachin is the greatest batsman", "Tendulkar is the finest cricketer") sentiment("Come on, lets play together") ner("Narendra Modi is the prime minister of India") keywords( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." ) emotion("Did you hear the latest Porcupine Tree song ? It's rocking !") intent( "Finance ministry calls banks to discuss new facility to drain cash") abuse("you f**king a$$hole") batch_intent([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ]) batch_abuse([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ]) batch_ner([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ]) batch_sentiment([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ]) batch_phrase_extractor([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ])
def test(): similarity("Sachin is the greatest batsman", "Tendulkar is the finest cricketer") sentiment("Come on, lets play together") taxonomy("Narendra Modi is the prime minister of India") ner("Narendra Modi is the prime minister of India") keywords( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." ) emotion("Did you hear the latest Porcupine Tree song ? It's rocking !") intent( "Finance ministry calls banks to discuss new facility to drain cash") abuse("you f**king a$$hole")
def search_tweet(): query = input("Your Query: ") max_search = int(input("Maximum Results: ")) print('\n') searched_tweets = [status for status in tweepy.Cursor(api.search, q=query).items(max_search)] for i in range(len(searched_tweets)): json = searched_tweets[i]._json json_user = json['user'] user = json_user['name'] twitter_id = json_user['screen_name'] created_at = json['created_at'] tweet = json['text'] loc = json_user['location'] lang = json_user['lang'] t_zone = json_user['time_zone'] sentiment = pd.sentiment(tweet)['sentiment'] emotion = max(pd.emotion(tweet)['emotion']['probabilities']) abuse = pd.abuse(tweet)['sentence_type'] print(str(i+1)+'.\tUser: '******' (@'+twitter_id+')') print('\tTweet Created: '+created_at) print('\tLocation: '+loc) print('\tLanguage: '+lang) print('\tTime Zone: ', t_zone) print('\tTweet: '+tweet) print('\n\tSentiment Analysis:\n') print('\t\tSentiment: '+sentiment) print('\t\tEmotion: '+emotion) print('\t\tAbuse: '+abuse) print('-------------------------------------------------------------------------------------------------------') time.sleep(0.2) time.sleep(1) input("\nPress Enter to Continue...") main_menu()
def comment_view(request): user = check_validation(request) if user and request.method == 'POST': form = CommentForm(request.POST) if form.is_valid(): post_id = form.cleaned_data.get('post').id comment_text = str(form.cleaned_data.get('comment_text')) abusive_content = abuse(comment_text) print(abusive_content) if abusive_content['sentence_type'] == "Abusive": error_message = "You are trying to add an inapproprite comment!!" return render( request, 'error.html', {'error_message': error_message} ) # redirecting to page displaying error if user tries to post an inapproprite comment else: comment = CommentModel.objects.create( user=user, post_id=post_id, comment_text=comment_text) comment.save() post = PostModel.objects.get(id=post_id) recipient_mail = post.user.email recipient_name = comment.user.username sending_mail(recipient_mail, content_text=recipient_name + " has commented on your post") return redirect('/feed/') else: return redirect('/feed/') else: return redirect('/login/')
def scoreAPI(text): abuse = paralleldots.abuse(text) answer = "" if abuse['neither'] < 0.95: print("Your text may be seen as abuse or hate speech: ", abuse['neither']) answer = answer + "Your text may be seen as abuse or hate speech: " + str( abuse['neither']) + "\n" emotion = paralleldots.emotion(text)['emotion'] if emotion['Sad'] > 0.3: print("Your text may be seen as sad: ", emotion['Sad']) answer = answer + "Your text may be seen as sad: " + str( emotion['Sad']) + "\n" if emotion['Bored'] > 0.2: print("Your text may be seen as boring: ", emotion['Bored']) answer = answer + "Your text may be seen as boring: " + str( emotion['Bored']) + "\n" if emotion['Excited'] < 0.1: print("Try to sound more excited!: ", emotion['Excited']) answer = answer + "Try to sound more excited!: " + str( emotion['Excited']) + "\n" sarcastic = paralleldots.sarcasm(text) if sarcastic['Sarcastic'] > 0.5: print("You may sound sarcastic: ", sarcastic['Sarcastic']) answer = answer + "You may sound sarcastic: " + str( sarcastic['Sarcastic']) + "\n" return answer
def get_large_audio_transcription(path): """ Splitting the large audio file into chunks and apply speech recognition on each of these chunks """ allfiles = os.listdir(path) print("SRC: ", allfiles) whole_text = "" # process each chunk for entry in allfiles: # recognize the chunk with sr.AudioFile(path + entry) as source: audio_listened = r.record(source) # try converting it to text try: text = r.recognize_google(audio_listened) except sr.UnknownValueError as e: print("Error:", str(e)) else: text = f"{text.capitalize()}. " print(entry, ":", text) whole_text += text # return the text for all chunks detected abuse = paralleldots.abuse(whole_text) print(abuse) return abuse
def test(): set_api_key("tEgayBzxTpAZZNppX62n8niYYoHeTna20DqQw8S9TQU") # category = { "finance": [ "markets", "economy", "shares" ], "world politics": [ "diplomacy", "UN", "war" ], "india": [ "congress", "india", "bjp" ] } # print(similarity( "Sachin is the greatest batsman", "Tendulkar is the finest cricketer" )) print(sentiment("Come on, lets play together")) # print(ner( "Narendra Modi is the prime minister of India","en" )) # print(taxonomy("Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019")) # print(keywords( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." )) # print(phrase_extractor( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." )) print(emotion("i have nothing else to do. life is so boring man.")) # print(intent("Finance ministry calls banks to discuss new facility to drain cash")) print(abuse("you f**king a$$hole"))
def get_abuse(string): """Returns abuse and abuse score of a string as a PD Series.""" try: text_abuse = abuse(string) text_abuse = (sorted(((text_abuse).items()), key=lambda kv: (kv[1], kv[0]), reverse=True))[0] text_abuse_type = text_abuse[0] text_abuse_score = text_abuse[1] return pd.Series([text_abuse_type, text_abuse_score]) except: print("Error in ", string)
def subnote_add(): if request.method == "GET": return "i get a paper_uplodad Get http" if request.method == "POST": DBsession = sessionmaker(bind=db.engine) data = request.get_data() json_data = json.loads(data.decode('utf-8')) sid = json_data.get('sid') uid = json_data.get('uid') note = json_data.get('note') pid = json_data.get('pid') parentid = json_data.get('parentid') paraentUserID = json_data.get('paraentUserID') senderAccount = json_data.get('senderAccount') paraentUserAccount = json_data.get('paraentUserAccount') # print('sid:'+str(sid['0'])) result = paralleldots.abuse(note) if (result['abusive'] > 0.9 or result['hate_speech'] > 0.9): return {"state": "badcomment"} messagequeue = MessageQueue(parentid) new_note = notes(sid=sid['0'], notes=note, uid=uid, pid=pid, parentid=parentid) dbsession = DBsession() try: inserded = dbsession.add(new_note) dbsession.flush() dbsession.refresh(new_note) new_id = new_note._id new_notes = dbsession.query(notes).filter( notes._id == parentid).all() new_notes[0]._numOfnotes += 1 dbsession.commit() dbsession.close() print('uid:' + str(senderAccount) + ' parentid:' + str(paraentUserAccount)) messagequeue.enqueue('reply', senderAccount, paraentUserAccount) except Exception as e: print('log' + str(e)) dbsession.rollback() dbsession.close() return ({"state": "failed"}) dbsession.close() return jsonify({"state": "success", "new_id": new_id})
def hello_world(): if (request.method == "POST"): all_comments = request.form.get('test') all_comments = json.loads(all_comments) videoId = [] isGood = 0 for dictionary in all_comments: neither_count = 0 for i in dictionary["videoComments"]: k = paralleldots.abuse(i) neither_count += k['neither'] if (neither_count > isGood): isGood = neither_count if (dictionary["video-id"] not in videoId): videoId.append(dictionary["video-id"]) print(isGood) return render_template('final.html', videoId=videoId) return render_template('newproject.html')
database="hackathon") mycursor = db.cursor() ## defining the Query query = "SELECT comment FROM usercomments" ## getting records from the table mycursor.execute(query) ## fetching all records from the 'cursor' object records = mycursor.fetchall() ## Showing the data for record in records: r = record str = ''.join(r) #text="she is ugly" response = paralleldots.abuse(r) #print(r) #rint(response) #print(type(str)) v1 = response['abusive'] #if(v>0.5): #print(r) #s="Jyothi" query1 = "update usercomments set v=%s where comment=%s" val = (v1, str) mycursor.execute(query1, val) #print(v); db.commit()
def get_opinion(text): intent = paralleldots.intent(text) sentiment = paralleldots.sentiment(text) abuse = paralleldots.abuse(text) emotion = paralleldots.emotion(text) return intent, sentiment, abuse, emotion
def extract_info(): twitter_client = TwitterClient() tweet_analyzer = TweetAnalyzer() api = twitter_client.get_twitter_client_api() tknzr = TweetTokenizer(strip_handles=True, reduce_len=True) tweets = api.user_timeline(count=10, lang='en') df = tweet_analyzer.tweets_to_data_frame(tweets) for i in range(1): text = df['text'][ i] # "need food for 6 people at mesra contact no. 9932356934 email = [email protected]" temp = '' print(paralleldots.abuse(text)) response1 = paralleldots.abuse(text) print(response1) # Find intent of the user input response2 = paralleldots.intent(text) print(response2) if response1['sentence_type'] == 'Abusive' or response2[ 'probabilities']['spam/junk'] > 0.5: flag = False ans = ['0, 0', '0', '0'] return ans else: flag = True # print(flag) if flag: flag1 = False allsyns1 = set(ss for word in flood_related_words for ss in wordnet.synsets(word)) allsyns2 = set(ss for word in tknzr.tokenize(text) for ss in wordnet.synsets(word)) best = max((wordnet.wup_similarity(s1, s2) or 0, s1, s2) for s1, s2 in product(allsyns1, allsyns2)) print(best) if best[0] > 0.6: flag1 = True if flag1: response = paralleldots.ner(text) print(response) for j in range(len(response['entities'])): if (response['entities'][j]['category'] == 'place' and response['entities'][j]['confidence_score'] > 0.6): print(response['entities'][j]['name']) # get_location(response['entities'][i]['name']) category = { "need": ['need', 'require', 'want', 'lack'], "offer": [ 'send', 'have', 'give', 'donate', 'transfer', 'distribute', 'aid', 'help', 'procure' ] } response = paralleldots.custom_classifier(text, category) print(response) if response['taxonomy'][0]['confidence_score'] > response[ 'taxonomy'][1]['confidence_score']: temp = "need" else: temp = "offer" num = get_contact(text, tweets[0]) if temp == "need": category = { "food": [], "water": [], "shelter": [], "first-aid": [], "help": [] } response = paralleldots.custom_classifier(text, category) print(response) x = 0 for j in range(5): if response['taxonomy'][i]['confidence_score'] > x: cat = response['taxonomy'][i]['tag'] else: category = { "food": [], "water": [], "shelter": [], "first-aid": [] } response = paralleldots.custom_classifier(text, category) print(response) x = 0 for j in range(4): if response['taxonomy'][i]['confidence_score'] > x: cat = response['taxonomy'][i]['tag'] quantity = re.findall(quant_no, text) qnt = [] for j in quantity: if len(j) < 10: qnt.append(j) print(qnt) s = tweets[0] loc1 = False if s.coordinates is None: sn = s.user.screen_name m = "@%s Hello! please share your location while tweeting" % ( sn) s = api.update_status(m, s.id) else: loc1 = True ans = [] if loc1: ans.append( str(tweets[0].coordinates['coordinates'][1]) + ', ' + str(tweets[0].coordinates['coordinates'][0])) else: ans.append('0, 0') ans.append(num) print(len(qnt)) if len(qnt) > 0: ans.append(qnt[0]) else: ans.append('0') print(ans) return ans
import paralleldots paralleldots.set_api_key("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx") # for single sentence text="you f**king a$$hole" response=paralleldots.abuse(text) print(response) # for multiple sentence as array text=["you f**king a$$hole","f**k this shit"] response=paralleldots.batch_abuse(text) print(response)
def test(): set_api_key("Put your Api key here") category = { "finance": ["markets", "economy", "shares"], "world politics": ["diplomacy", "UN", "war"], "india": ["congress", "india", "bjp"] } print( similarity("Sachin is the greatest batsman", "Tendulkar is the finest cricketer")) print(sentiment("Come on, lets play together")) print(ner("Narendra Modi is the prime minister of India", "en")) print( taxonomy( "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019" )) print( keywords( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." )) print( phrase_extractor( "Prime Minister Narendra Modi tweeted a link to the speech Human Resource Development Minister Smriti Irani made in the Lok Sabha during the debate on the ongoing JNU row and the suicide of Dalit scholar Rohith Vemula at the Hyderabad Central University." )) print( emotion( "Did you hear the latest Porcupine Tree song ? It's rocking !")) print( intent( "Finance ministry calls banks to discuss new facility to drain cash" )) print(abuse("you f**king a$$hole")) print( custom_classifier("Narendra Modi is the prime minister of India", category)) print( batch_intent([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ])) print(batch_abuse(["drugs are fun", "dont do drugs, stay in school"])) print( batch_sentiment([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ])) print( batch_phrase_extractor([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ])) print( batch_taxonomy([ "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019", "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019" ])) print( batch_ner([ "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019", "Michael Jordan of the Chicago Bulls is getting a 10-hour Netflix documentary in 2019" ])) print( batch_emotion([ "drugs are fun", "don\'t do drugs, stay in school", "lol you a f*g son", "I have a throat infection" ])) print(facial_emotion_url("https://i.imgur.com/klb812s.jpg")) print(object_recognizer_url("https://i.imgur.com/klb812s.jpg")) print( sarcasm( "The movie that i watched last night is so funny that i get rolled out with laughter" )) print( batch_sarcasm([ "The movie that i watched last night is so funny that i get rolled out with laughter", "I want to spend my life alone" ]))