def met(): import paralleldots import json api_key = "zIAZOZfZvvLW6luxNluGHa0Pvt623evzdR42paLpWNY" paralleldots.set_api_key(api_key) p1 = "/home/ganesh/Desktop/Projects/exp/photos/" p2 = os.listdir(p1) p1 = p1 + str(p2[0]) k = paralleldots.facial_emotion(p1) if "No face detected." in k: shutil.rmtree('/home/ganesh/Desktop/Projects/exp/photos') os.mkdir('/home/ganesh/Desktop/Projects/exp/photos') return render_template('error.html') else: m = 0 mv = "p" if 'facial_emotion' not in k: shutil.rmtree('/home/ganesh/Desktop/Projects/exp/photos') os.mkdir('/home/ganesh/Desktop/Projects/exp/photos') return render_template('error.html') for j in k['facial_emotion']: e = j['tag'] if j['score'] > m: mv = e m = j['score'] shutil.rmtree('/home/ganesh/Desktop/Projects/exp/photos') os.mkdir('/home/ganesh/Desktop/Projects/exp/photos') return render_template('success.html', emotion=mv)
def song(): if request.method == 'POST': if 'file' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file'] if file == '': flash('No selected file') return redirect(request.url) if file: filename = secure_filename(file.filename) print( os.path.abspath( os.path.join(app.config['UPLOAD_FOLDER'], filename))) file.save(filename) path = filename results = paralleldots.facial_emotion(path) # print(results['facial_emotion'][0]) genre = results['facial_emotion'][0]['tag'] # cur = mysql.connection.cursor() cursor.execute("select * from songs where mood = %s;", (genre, )) a = [] for song in cursor: a.append(song[0]) print(song[0]) print(song[1]) result = random.choice(a) print(result) # cursor.close() # mysql.connection.commit() # cur.close() return render_template("song.html", songname=result) else: return Response(500)
def find_mood(path): # Using ParallelDots API to find the Facial Emotion in the Image results = paralleldots.facial_emotion(path) user_mood = "" if 'facial_emotion' in results: user_mood = results['facial_emotion'][0]['tag'] else: user_mood = 'Neutral' print('Found user_mood', user_mood) return user_mood
def get_frame(self): # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. self.video = cv2.VideoCapture(0) img_counter = 0 while True: ret, frame = self.video.read() #cv2.imshow("test", frame) if not ret: break k = cv2.waitKey(1) if k%256 == 27: # ESC pressed print("Escape hit, closing...") break elif keyboard.is_pressed('space'): # SPACE pressed img_name = "opencv_frame_{}.png".format(img_counter) cv2.imwrite(img_name, frame) print("{} written!".format(img_name)) client = SightengineClient('523702522', 'SoMh4T2mBCTB848RmhqS') output = client.check('celebrities').set_file('/Users/jeffrosal1/Desktop/NUhomework/project3/combined/flasktest/opencv_frame_0.png') print(output) set_api_key("i0bqh0wRTlMqHMTHXZPxXFumRAcETw698GaIqBN9vuM") # when sending a image file path = "/Users/jeffrosal1/Desktop/NUhomework/project3/combined/flasktest/opencv_frame_0.png" emoout = paralleldots.facial_emotion(path) print(emoout) rete, jpeg = cv2.imencode('.jpg', frame) return jpeg.tobytes()
def get_frame(self): # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. self.video = cv2.VideoCapture(0) img_counter = 0 while True: ret, frame = self.video.read() #cv2.imshow("test", frame) if not ret: break k = cv2.waitKey(1) if k % 256 == 27: # ESC pressed print("Escape hit, closing...") break elif keyboard.is_pressed('space'): # SPACE pressed img_name = "opencv_frame_{}.png".format(img_counter) cv2.imwrite(img_name, frame) print("{} written!".format(img_name)) client = SightengineClient('', '') output = client.check('celebrities').set_file('') print(output) set_api_key("") # when sending a image file path = "" emoout = paralleldots.facial_emotion(path) print(emoout) rete, jpeg = cv2.imencode('.jpg', frame) return jpeg.tobytes()
def take_picture_with_camera(): paralleldots.set_api_key("br13ubwK9UvtgVahL09oDrw2KxLtRGKygrgonAmLqjY") paralleldots.get_api_key() camera.start_preview() #camera.rotation(180) sleep(0.5) camera.capture('/home/pi/Desktop/image.jpg') camera.stop_preview() path = "/home/pi/Desktop/image.jpg" s = (paralleldots.facial_emotion(path)) ans = dict(s) if ("facial_emotion" in ans): a = list(ans['facial_emotion']) b = dict(a[0]) emotions = b['tag'] #while True: print(emotions) if (emotions == "Angry"): pygame.mixer.music.load("/home/pi/Sounds/Angry.mp3") pygame.mixer.music.play() else: if (emotions == "Disgust"): pygame.mixer.music.load("/home/pi/Sounds/Disgust.mp3") pygame.mixer.music.play() else: if (emotions == "Fear"): pygame.mixer.music.load("/home/pi/Sounds/Fear.mp3") pygame.mixer.music.play() else: if (emotions == "Happy"): pygame.mixer.music.load("/home/pi/Sounds/Happy.mp3") pygame.mixer.music.play() else: if (emotions == "Neutral"): pygame.mixer.music.load( "/home/pi/Sounds/Normal.mp3") pygame.mixer.music.play() else: if (emotions == "Normal"): pygame.mixer.music.load( "/home/pi/Sounds/Normal.mp3") pygame.mixer.music.play() else: if (emotions == "Sad"): pygame.mixer.music.load( "/home/pi/Sounds/Sad.mp3") pygame.mixer.music.play() else: if (emotions == "Surprise"): pygame.mixer.music.load( "/home/pi/Sounds/Surprise.mp3") pygame.mixer.music.play() if (emotions == "Neutral"): CODE("Normal") CODE(emotions) # morse code for the emotions else: print("Face is not detected clearly :( ") GPIO.output(buzzer, GPIO.HIGH) sleep(1) GPIO.output(buzzer, GPIO.LOW) # Play NoFace pygame.mixer.music.load("/home/pi/Sounds/NoFace.mp3") pygame.mixer.music.play() CODE("No Face")
def video_recorder(request): try: if (request.method == "POST"): get_random_value = get_random_string(length=6) request.session['video_filled'] = "True" video_data = request.POST.get('image') format, imgstr = video_data.split(';base64,') ext = format.split('/')[-1] value = str(f'profile_pics/{get_random_value}.{ext}') video_data = ContentFile( base64.b64decode(imgstr), name=get_random_value + '.' + ext) # You can save this as file instance. user_code = request.session['Invitation_code'] user_id = request.session['job_user_fetch'] video = newtable(user_id=user_id, Code_id=user_code, profile_video=video_data) instance = video.save() data = newtable.objects.get(profile_video=value) happy = 0 url = str(f"media/{data.profile_video}") try: happy = paralleldots.facial_emotion(url).get( "facial_emotion")[0].get("score") fear = paralleldots.facial_emotion(url).get( "facial_emotion")[1].get("score") surprise = paralleldots.facial_emotion(url).get( "facial_emotion")[2].get("score") sad = paralleldots.facial_emotion(url).get( "facial_emotion")[3].get("score") neutral = paralleldots.facial_emotion(url).get( "facial_emotion")[4].get("score") disgust = paralleldots.facial_emotion(url).get( "facial_emotion")[5].get("score") angry = paralleldots.facial_emotion(url).get( "facial_emotion")[6].get("score") newtable.objects.update_or_create( profile_video=value, defaults={ 'happy': happy, 'fear': fear, 'surprise': surprise, 'sad': sad, 'neutral': neutral, 'disgust': disgust, 'angry': angry }, ) video_filled = 'True' return render(request, 'firstapp/interviewr_portal/videorecorder.html', {'video_filled': video_filled}) except: video_filled = 'True' newtable.objects.update_or_create( profile_video=value, defaults={ 'happy': 0, 'fear': 0, 'surprise': 0, 'sad': 0, 'neutral': 0, 'disgust': 0, 'angry': 0 }, ) return render(request, 'firstapp/interviewr_portal/videorecorder.html', {'video_filled': video_filled}) else: video_filled = request.session['video_filled'] return render(request, 'firstapp/interviewr_portal/videorecorder.html', {'video_filled': video_filled}) except: return HttpResponse('Can,t Access this template Directly')
def get_frame(self): # Using OpenCV to capture from device 0. If you have trouble capturing # from a webcam, comment the line below out and use a video file # instead. from app import buttons self.video = cv2.VideoCapture(0) img_counter = 0 while True: ret, frame = self.video.read() #cv2.imshow("test", frame) if not ret: break k = cv2.waitKey(1) if k % 256 == 27: # ESC pressed print("Escape hit, closing...") break elif keyboard.is_pressed('space'): # SPACE pressed path_img = "/Users/jeffrosal1/Desktop/NUhomework/project3/Project3/dopple/static/img/" img_name = "dopplegang.png" #cv2.imwrite(img_name, frame) cv2.imwrite(str(path_img) + 'dopplegang.png', frame) print("{} written!".format(img_name)) client = SightengineClient('523702522', 'SoMh4T2mBCTB848RmhqS') output = client.check('celebrities').set_file( '/Users/jeffrosal1/Desktop/NUhomework/project3/Project3/dopple/static/img/dopplegang.png' ) print(output) set_api_key("i0bqh0wRTlMqHMTHXZPxXFumRAcETw698GaIqBN9vuM") # when sending a image file path = "/Users/jeffrosal1/Desktop/NUhomework/project3/Project3/dopple/static/img/dopplegang.png" emoout = paralleldots.facial_emotion(path) print(emoout) #start matplotlib faces = output["faces"] celebrity = faces[0] name_set = celebrity["celebrity"] index = 0 totalprob = 0 names = [] probability = [] while index < len(name_set): x = name_set[index] name = x['name'] names.append(name) prob = x['prob'] probability.append(prob) index = index + 1 print(names) print(probability) from matplotlib import rcParams rcParams.update({'figure.autolayout': True}) plt.bar(names, probability, color='yellow', alpha=0.5, align="center", edgecolor="black") plt.xticks(rotation=45) plt.ylim(0, 1) plt.title("Celebrity Doppelganger") plt.xlabel("Celebrity Name") plt.ylabel("Probability") plt.savefig(str(path_img) + "prob_bar_graph.png") plt.close() #end matplotlib rete, jpeg = cv2.imencode('.jpg', frame) return jpeg.tobytes()
import paralleldots paralleldots.set_api_key("PBgqFCR9T70FVQh7AJ688gugecn5doufgAiSmz3137A") print(paralleldots.get_api_key()) #path= "/try.jpg" path = "D:\HackRU/try.jpg" print("\nFacial Emotion") values = paralleldots.facial_emotion(path) print(values) import json from pprint import pprint #values[""] #print(values['facial_emotion'][0]['tag']) #OUTPUTS HAPPY. WORKING significantemotions_andvalues = {} emotions = [] #Happy, Neutral, Surprise, Sad, Fear, Disgust, Angry if (values['facial_emotion'][0]['tag']) == 'Happy': happinessscore = values['facial_emotion'][0]['score'] emotions.append(happinessscore) if (values['facial_emotion'][1]['tag']) == 'Neutral': neutralcore = values['facial_emotion'][1]['score'] emotions.append(neutralcore)
import paralleldots api_key="NubaG0ojxvvFFXBRhPFXWnVuoDRubxNGzzyg1KVZJgY" paralleldots.set_api_key(api_key) print(paralleldots.facial_emotion('1.jpg')) print(paralleldots.emotion("This good thing is bad"))