def translate(sentence, to_sen='bn', to_file=False, file_name=None): """ input: sentence to convert, and language to convert returns translated sentence """ err_sentence = "Sir, I can't translate this. Make sure you have said translate when you start detecting." def detect(sentence): """ input: sentence or string returns detected language """ try: return blob.detect_language(blob(sentence)) except Exception as e: print(e) return None try: answer = blob.detect_language(blob(sentence)) if answer != None: translated = str( blob.translate(blob(sentence), from_lang=answer, to=to_sen)) else: return err_sentence if to_file: saveFile(translated, file_name) print("File written on {file}".format(file=file_name)) return translated except Exception as e: print(e) return err_sentence
def make_textblob_object(self): # assigns the local attribute to the string-type converted BeautifulSoup object ### MUST MAKE SEPARATE METHOD FOR TEXT EXTRACTION AND STRING CONVERSION ### THIS IS MY CURRENT LOCATION TO WORK ON !!!! self.blob = blob(str(self.article.soup.text))
def file_translate(file, to_sen='bn', sfile="tranlated.txt"): """ input: file to translate, language to translate and a file name to save file translated and save whole file """ paragraph = '' print("File translating Start....") print("File Reading begin...") try: with open(file, 'r') as file: sentences = blob(file.read()).sentences #print(sentences) print("Translating: ", end='') progress = tqdm(total=len(sentences), desc="Translating:", unit="sentence/s") for item in sentences: progress.update(1) paragraph += translate(str(item)) + '\n' time.sleep(0.2) progress.close() print("\nTranslating Done! Saving your file") saveFile(paragraph, sfile) except Exception as e: print(e)
def comment(movie_id): form = Comment_Form() if form.validate_on_submit(): user_id = session['user_id'] comment_obj = blob(form.comment.data) rating = comment_obj.sentiment.polarity rating = round(rating, 1) if rating <= -0.7: rating = 1 elif rating > -0.7 and rating <= -0.2: rating = 2 elif rating > -0.2 and rating <= 0.1: rating = 3 elif rating > 0.1 and rating <= 0.6: rating = 4 else: rating = 5 review = Reviews(comment=form.comment.data, rating=rating, user_id=user_id, movie_id=movie_id) db.session.add(review) db.session.commit() avg_rating = get_avg_rating(movie_id) movie = Movies.query.filter_by(id=movie_id).first_or_404() movie.avg_rating = avg_rating db.session.commit() return redirect(url_for('home.movie', movie_id=movie_id))
def sentiment(): with open('my.txt') as t: a = t.read() tb = blob( a) # textblob is a string which works like natural language processing c = str(tb.sentiment) # here we are extracting sentiment from the string with open('sentiment.txt', 'w') as s: s.write(c)
def detect(sentence): """ input: sentence or string returns detected language """ try: return blob.detect_language(blob(sentence)) except Exception as e: print(e) return None
def textblob_sentiment_analysis(self): ### this adds the overall article sentiment to index 0 ### self.sentiment.append(self.blob.sentiment) # this loop iterates over each line within the article and assigns it a sentiment value # it then adds those individual values to the empty sentiment list for sentences in self.blob.split( "." ): # this function call results in a list consisting of strings separated # by a period sentence_sentiment = blob(sentences).sentiment self.sentiment.append(sentence_sentiment)
def getsentiment(text): ''' Take input as text and Return Sentimental ''' tb=blob(text) result=tb.sentiment polarity=result.polarity print({'polarity':polarity,'text':text}) if polarity==0: return "Neutral" elif polarity>0: return "Positive" else: return "Negative"
def handleFileUpload(): extra_line = '' if request.method == "POST": if "file" not in request.files: flash("No file part") return redirect(request.url) file = request.files["file"] if file.filename == '': flash("No selected file") return redirect(request.url) if file: recognizer = sr.Recognizer() audio_file = sr.AudioFile(file) with audio_file as source: audio_data = recognizer.record(source) text = recognizer.recognize_google(audio_data, show_all=False) tb = blob(str(text)) tb_text = tb.sentiment if tb_text[0] > 0: a = 'Positive' elif tb_text[0] < 0: a = 'Negative' else: a = 'Neutral' extra_line = str(text) + ": " + str(tb_text) + ": " + a filename = secure_filename(file.filename) filepath = os.path.join(app.config["UPLOAD_FOLDER"], filename) file.save(filepath) extra_line += f"<br>File saved to {filepath}" #return redirect(url_for('fileFrontPage')) return render_template( 'index.html', prediction_text='Voice to Text: {} \n'.format(extra_line))
Original file is located at https://colab.research.google.com/drive/1gvA21zINvGSUN4ieDfbxhD0FfAQg1RdE """ !pip install textblob import nltk nltk.download('punkt') nltk.download("averaged_perceptron_tagger") from textblob import TextBlob as blob tb=blob("Am unhappy ") tb.sentiment tb.tags !pip install SpeechRecognition !apt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg !pip install pyaudio import speech_recognition as sr t=sr.Recognizer()
if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--ip", default="127.0.0.1", help="The ip of the OSC server") parser.add_argument("--port", type=int, default=12000, help="The port the OSC server is listening on") args = parser.parse_args() client = udp_client.SimpleUDPClient(args.ip, args.port) ##create an object from the text blob ##text blob contains information about Part of Speech tagging tb = blob('Hi, here is my sentiment analysis of speech!') ##object of the speech recognition r = sr.Recognizer() iter_num = 10 index = 0 ##the loop below is running 10 times while (index < iter_num): with sr.Microphone() as source: print('Say Something') ##if there is no sound for 5 seconds, it will stop recording and move on ##to the next speech message audio = r.listen(source, timeout=5) try:
elif "whatsapp to friend" in query: driver = webdriver.Chrome('C:\\Users\\DANISH\\PycharmProject\\Examples\\venv\\Lib\\site-packages\\selenium\\webdriver\\chrome\\chromedriver.exe') driver.get('https://web.whatsapp.com/') time.sleep(15) sender_name = input("Enter the name to whom you want to send message: ") user = driver.find_element_by_xpath('//span[@title="{}"]'.format(sender_name)) user.click() message = input("Enter th message: ") mess_box = driver.find_element_by_class_name("_3u328 copyable-text selectable-text") mess_box.click() mess_box.send_keys(message) button = driver.find_element_by_class_name('_3M-N-') button.click() speak('message sent successfully') elif 'run sentiment analysis' in query: speak('running the sentiment analysis') for _ in range(10): data = takeCommand() tb = blob(data) a = tb.sentiment print(a) speak(a) elif 'terminate' in query: speak("Thanks for using my service. happy to help you") break
nouns = [] #empty to array to hold all nouns #select nouns froms the sentences for sentence in sentences: for word, pos in nltk.pos_tag(nltk.word_tokenize(str(sentence))): if (pos == 'NN' or pos == 'NNP' or pos == 'NNS' or pos == 'NNPS'): nouns.append(word) #stemming the nouns p_stemmer = PorterStemmer() nouns = [p_stemmer.stem(i) for i in nouns] nouns = set(nouns) nouns = list(nouns) print(nouns) #creating textblobs stu1 = blob(doc_a) stu2 = blob(doc_b) stu3 = blob(doc_c) stu4 = blob(doc_d) stu5 = blob(doc_e) #creating a list of textblobs blob_set = [stu1, stu2, stu3, stu4, stu5] #creating the opinion list opinion = [0] * len(nouns) for i in range(0, len(nouns)): opinion[i] = 0 for j in blob_set: if nouns[i] in j: opinion[i] += j.sentiment[0]