Exemplo n.º 1
0
    def main_loop(self):
        """
		Single iteration of the application's main loop.
		"""
        # Get current image frame from the video
        success, frame = self.vidcap.read()
        #ret, jpeg = cv2.imencode('.jpg', image)
        #frame = jpeg.tobytes()
        self.h, self.w, _c = frame.shape

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        self.processor.run()
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()
Exemplo n.º 2
0
 def main_loop(self):
     """
     Single iteration of the application's main loop.
     """
     # Get current image frame from the camera
     frame = self.camera.get_frame()
     self.h,self.w,_c = frame.shape
     
     #display unaltered frame
     #imshow("Original",frame)
     
     #set current image frame to the processor's input
     self.processor.frame_in = frame
     #process the image frame to perform all needed analysis
     self.processor.run()
     #collect the output frame for display
     output_frame = self.processor.frame_out
     
     #show the processed/annotated output frame
     imshow("Processed",output_frame)
     
     #create and/or update the raw data display if needed
     if self.bpm_plot:
         self.make_bpm_plot()
     
     #handle any key presses
     self.key_handler()
Exemplo n.º 3
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        frame = self.cameras[self.selected_cam].get_frame()
        self.h, self.w, _c = frame.shape
        self.numofframe = self.numofframe + 1
        self.processor.num_of_frames = self.processor.num_of_frames + 1
        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        if len(self.processor.all_frames) <= 200 and self.processor.flag == 0:
            self.processor.find_face(frame)

        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()
Exemplo n.º 4
0
    def main_loop(self):
        i = 0
        while i<100:
            frame = self.cameras[self.selected_cam].get_frame()
            self.h, self.w, _c = frame.shape

            # set current image frame to the processor's input
            self.processor.frame_in = frame
            # process the image frame to perform all needed analysis
            self.processor.run(self.selected_cam)
            # collect the output frame for display
            output_frame = self.processor.frame_out

            # show the processed/annotated output frame
            imshow("Processed", output_frame)
            
           # create and/or update the raw data display if needed
            if self.send_serial:
                self.serial.write(str(self.processor.bpm) + "\r\n")

            if self.send_udp:
                self.sock.sendto(str(self.processor.bpm), self.udp)

            # handle any key presses
            if i == 15 : 
                self.toggle_search()              
            i = i+1
            self.key_handler()            

        for cam in self.cameras:
            cam.cam.release()
        if self.send_serial:
            self.serial.close()
        if self.send_udp:
            self.sock.close()
Exemplo n.º 5
0
    def main_loop(self):

        frame = self.cameras[self.selected_cam].get_frame()
        self.h, self.w, _c = frame.shape

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis5
        self.processor.run(self.selected_cam)
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()
Exemplo n.º 6
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        ret, frame = self.vidcap.read()
        #self.h, self.w, _c = frame.shape
        self.h, self.w, _c = (480, 640, 3)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        bpm = self.processor.run(self.vidcap)
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        # handle any key presses
        self.key_handler()

        return bpm
Exemplo n.º 7
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        frame = self.camera.get_frame()
        self.h, self.w, _c = frame.shape

        #display unaltered frame
        #imshow("Original",frame)

        #set current image frame to the processor's input
        self.processor.frame_in = frame
        #process the image frame to perform all needed analysis
        self.processor.run()
        #collect the output frame for display
        output_frame = self.processor.frame_out

        #show the processed/annotated output frame
        imshow("Processed", output_frame)

        #create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        #handle any key presses
        self.key_handler()
Exemplo n.º 8
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        frame = self.cameras[self.selected_cam].get_frame()
        self.h, self.w, _c = frame.shape

        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        self.processor.run(self.selected_cam)
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Heart Rate Measurement", output_frame)  # previous : Processed

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()
Exemplo n.º 9
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        frame = self.camera.get_frame()
        self.h,self.w,_c = frame.shape
        

        #display unaltered frame
        #imshow("Original",frame)

        #set current image frame to the processor's input
        self.processor.frame_in = frame
        #process the image frame to perform all needed analysis
        self.processor.run(frame)
        #collect the output frame for display
        output_frame = self.processor.frame_out

        #show the processed/annotated output frame
        imshow("Processed",output_frame)

        #create and/or update the raw data display if needed
        global smileneighbour, mqLoop, smilecount, eyetot
        #if self.bpm_plot:
            #self.make_bpm_plot()
        if mqLoop >= 1:
            x = str(datetime.datetime.now())
            sm.write(str(md.datestr2num(x)) + " " + str(smileneighbour) + "\n")
            e.write(str(md.datestr2num(x)) + " " + str(eyetot) + "\n")
            #hr.write(str(md.datestr2num(x)) + " " + str(self.processor.show_bpm_text.bpm) + "\n")
            hr.write(str(md.datestr2num(x)) + " " + str(self.processor.bpm) + "\n")
            pulse_estimation_log.write(str(int(round(time.time() * 1000))) + " " + str(self.processor.bpm) + "\n")
            smileneighbour+= 2*eyetot
            smileneighbour/=100
            print "bpm: " + str(self.processor.bpm)
            #if (self.processor.show_bpm_text.bpm) > dhr:
            if (self.processor.bpm) > dhr:
                #print (self.processor.fft.samples[-1]/2, self.processor.fft.samples[-1]-dhr/2)
                #overbeat = (self.processor.fft.samples[-1]-dhr)*(self.processor.fft.samples[-1]-dhr)
                #smileneighbour += (self.processor.show_bpm_text.bpm-dhr)
                smileneighbour += (self.processor.bpm - dhr)
            
            
            f.write(str(md.datestr2num(x)) + " " + str(smileneighbour) + "\n")
            mqLoop = 0
        else:
            mqLoop+= 0.9    
        img = cv.QueryFrame(capture)    
        smileneighbour = 0
        eyetot = 0
        #if img:
        #    image = DetectRedEyes(img, faceCascade, smileCascade, eyeCascade)
        #    cv.ShowImage("camera", image)
        #handle any key presses
        self.key_handler()
Exemplo n.º 10
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """

        self.video = cv2.VideoCapture('video1.avi')
        videoframe = self.video.read()

        #This is where we feed the video
        if self.video_flag:

            # Update .csv
            #data = np.vstack((self.processor.times, self.processor.samples)).T
            data = np.vstack((self.processor.ttimes, self.processor.bpms)).T
            np.savetxt(self.csvn + ".csv", data, delimiter=',')
            ret, frame = self.video.read()

            if (ret == False):
                self.video = cv2.VideoCapture('video1.avi')
                ret, frame = self.video.read()

        else:
            # Get current image frame from the camera
            frame = self.irframe
            #frame = self.cameras[self.selected_cam].get_frame()

        if self.callbackFlag:
            frame = self.irframe

        self.h, self.w, _c = frame.shape

        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        self.processor.run(self.selected_cam)
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()
Exemplo n.º 11
0
    def main_loop(self):
        # Get current image frame from the camera
        # frame = self.cameras[self.selected_cam].get_frame()
        # print("[SELF]:",self)
        ret, frame = cap.read()
        # self.h, self.w, _c = frame.shape

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        self.processor.run(self.selected_cam)
        fhRect = self.processor.forehead

        faRect = self.processor.face_rect
        if faRect[2] > 2:
            lcRect = self.processor.get_leftCheekRect()
            rcRect = self.processor.get_rightCheekRect()
            cv2.putText(self.processor.frame_out, "Left Cheek",
                        (lcRect[0], lcRect[1] - 5), cv2.FONT_HERSHEY_PLAIN, 1,
                        (0, 255, 0))
            self.processor.draw_rect(lcRect)
            self.processor.displayColorInfo(
                self.processor.get_subface_means(lcRect), [335, 360, 385],
                "left cheek")
            cv2.putText(self.processor.frame_out, "Right Cheek",
                        (rcRect[0], rcRect[1] - 5), cv2.FONT_HERSHEY_PLAIN, 1,
                        (0, 255, 0))
            self.processor.draw_rect(rcRect)
            self.processor.displayColorInfo(
                self.processor.get_subface_means(fhRect), [410, 435, 460],
                "right cheek")
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()
Exemplo n.º 12
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        frame = self.cameras[self.selected_cam].get_frame()
        self.h, self.w, _c = frame.shape

        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        
        self.processor.run(self.selected_cam)
        
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.writeCSV:
            data = np.array([self.processor.actualTime, 
                         self.processor.bpm, 
                         self.processor.RRvalue]).T
            print(data)
# , self.processor.bpm , self.processor.RRvalue
            self.fileName.write("%s" % self.processor.actualTime + " ")
            self.fileName.write("%s" % self.processor.bpm + " ")
            self.fileName.write("%s" % self.processor.RRvalue + "\n")
            # np.savetxt("./CSV_FILE/"+ self.fileName + ".csv", data , delimiter=',')
        # data = np.array([self.processor.bpms , self.processor.RR]).T                     
        # print(data)
            
        



        # handle any key presses
        self.key_handler()
Exemplo n.º 13
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        frame = self.cameras[self.selected_cam].get_frame()
        self.h, self.w, _c = frame.shape

        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        self.processor.run(self.selected_cam)
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()

        #data = np.vstack((self.processor.times, self.processor.samples)).T
        if len(self.processor.times) == 250:
            #print(len(self.processor.times), self.processor.times[len(self.processor.times)-1], self.processor.samples[len(self.processor.times)-1])
            self.data_acc[self.v_flg, 0] = (self.processor.bpm)
            self.data_acc[self.v_flg, 1] = (
                self.processor.times[len(self.processor.times) - 1])
            self.data_acc[self.v_flg, 2] = (
                self.processor.samples[len(self.processor.times) - 1])
            self.v_flg += 1
Exemplo n.º 14
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        frame = self.cameras[self.selected_cam].get_frame()
        self.h, self.w, _c = frame.shape

        #output the frame to disk, only with green channel
        g = frame.copy()
        # g[:, :, 0] = 0
        # g[:, :, 2] = 0
        success = cv2.imwrite('output/img' + str(self.nFrame) + '.png', g)

        print(success)
        self.nFrame = self.nFrame + 1

        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        self.processor.run(self.selected_cam)
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()
Exemplo n.º 15
0
    def main_loop(self):
        """
            Single iteration of the application's main loop.
            """

        # input videos
        #cap = cv2.VideoCapture('BeforeExercise.mov')
        #cap = cv2.VideoCapture('AfterExercise.mov')
        cap = cv2.VideoCapture('Yuan_Before.mov')
        #cap = cv2.VideoCapture('Yuan_after.mov')

        self.selected_video = 0

        while (cap.isOpened()):

            ret, frame = cap.read()

            # set current image frame to the processor's input
            self.processor.frame_in = frame
            # process the image frame to perform all needed analysis
            self.processor.run(self.selected_video)
            # collect the output frame for display
            output_frame = self.processor.frame_out

            # show the processed/annotated output frame
            imshow("Processed", output_frame)

            # create and/or update the raw data display if needed
            if self.bpm_plot:
                self.make_bpm_plot()


#            if self.send_serial:
#                self.serial.write(str(self.processor.bpm) + "\r\n")

#            if self.send_udp:
#                    self.sock.sendto(str(self.processor.bpm), self.udp)

# handle any key presses
            self.key_handler()

        cap.release()
Exemplo n.º 16
0
 def main_loop(self):
     """
     Single iteration of the application's main loop.
     """
     # Get current image frame from the camera
     frame = self.camera.get_frame()
     self.h,self.w,_c= frame.shape
     
     self.current_centroid = centroid(np.sum(frame,2)/3)
     
     #display unaltered frame
     #imshow("Original",frame)
     
     #collect the output frame for display
     output_frame = process_image(frame,self.current_centroid,self.centroid_1,self.centroid_2,self.centroid_1_active,self.centroid_2_active)
     
     #show the processed/annotated output frame
     imshow("Processed",output_frame)
     
     #handle any key presses
     self.key_handler()
Exemplo n.º 17
0
    def MY_main_loop(self):
        vid = r"Samples\Videos_Pulse\v6.mp4"  # can also use '/' without the raw flag...

        cap = cv2.VideoCapture(vid)

        while True:
            if self.flag_end is True:  # [x] OR Ctrl+e pressed in the GUI
                exit(0)

            _, frame = cap.read()

            if frame is None:
                print("Done running " + vid)
                exit(0)

            frame = cv2.rotate(frame, cv2.cv2.ROTATE_90_CLOCKWISE)

            self.h, self.w, _c = frame.shape
            # set current image frame to the processor's input
            self.processor.frame_in = frame
            # process the image frame to perform all needed analysis
            self.processor.run()
            # collect the output frame for display
            output_frame = self.processor.frame_out

            # show the processed/annotated output frame
            imshow("Processed", output_frame)

            # create and/or update the raw data display if needed
            if self.bpm_plot:
                self.make_bpm_plot()

            # handle any key presses
            self.key_handler()

            if cv2.waitKey(40) == 27:
                break
        cap.release()
        cv2.destroyAllWindows()
Exemplo n.º 18
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        try:
            frame = self.camera.get_frame()
            if ( frame is None ):
                print ("EOF")
                return False
            self.h,self.w,_c = frame.shape
        except AttributeError:
            print ("ERR01 yo")
            return False
        

        #display unaltered frame
        imshow("Original",frame)

        #set current image frame to the processor's input
        self.processor.frame_in = frame
        #process the image frame to perform all needed analysis
        self.processor.run()
        #collect the output frame for display
        output_frame = self.processor.frame_out

        #show the processed/annotated output frame
        imshow("Processed",output_frame)
        self.capture_bpms()

        #create and/or update the raw data display if needed
        #if self.bpm_plot:
        #    testfk = self.make_bpm_plot()
        #    print(testfk)

        #handle any key presses
        #self.key_handler()
        return True
Exemplo n.º 19
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        frame = self.cameras[self.selected_cam].get_frame()
        self.h, self.w, _c = frame.shape

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  #DD
        rects = self.detector(gray, 0)
        for rect in rects:
            self.eye_ratio(rects[0], gray, frame)
        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        self.processor.run(self.selected_cam)
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()
Exemplo n.º 20
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        try:
            frame = self.camera.get_frame()
            if frame is None:
                print ("EOF")
                return False
            self.h, self.w, _c = frame.shape
        except AttributeError:
            print ("ERR01 yo")
            return False

        # display unaltered frame
        imshow("Original", frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        self.processor.run()
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)
        self.capture_bpms()

        # create and/or update the raw data display if needed
        # if self.bpm_plot:
        #    testfk = self.make_bpm_plot()
        #    print(testfk)

        # handle any key presses
        # self.key_handler()
        return True
Exemplo n.º 21
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        frame = self.camera.get_frame()
        self.h,self.w,_c = frame.shape
        

        #display unaltered frame
        #imshow("Original",frame)

        #set current image frame to the processor's input
        self.processor.frame_in = frame
        #process the image frame to perform all needed analysis
        self.processor.run()
        #collect the output frame for display
        output_frame = self.processor.frame_out

        #show the processed/annotated output frame
        imshow("Processed",output_frame)

        #create and/or update the raw data display if needed
        #if self.bpm_plot:
            #self.make_bpm_plot() 
        #print(self.processor.fft.samples) 
        if(self.processor.fft.ready):
            print "Ready to hit escape"
        else:
            print".",
        
        img = cv.QueryFrame(capture)    
        if img:
            image = DetectRedEyes(img, faceCascade)
            cv.ShowImage("camera", image)
        #handle any key presses
        self.key_handler()
Exemplo n.º 22
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        frame = self.camera.get_frame()
        self.h, self.w, _c = frame.shape

        #display unaltered frame
        #imshow("Original",frame)

        #set current image frame to the processor's input
        self.processor.frame_in = frame
        #process the image frame to perform all needed analysis
        self.processor.run()
        #collect the output frame for display
        output_frame = self.processor.frame_out

        #show the processed/annotated output frame
        imshow("Processed", output_frame)

        #create and/or update the raw data display if needed
        #if self.bpm_plot:
        #self.make_bpm_plot()
        #print(self.processor.fft.samples)
        if (self.processor.fft.ready):
            print "Ready to hit escape"
        else:
            print ".",

        img = cv.QueryFrame(capture)
        if img:
            image = DetectRedEyes(img, faceCascade)
            cv.ShowImage("camera", image)
        #handle any key presses
        self.key_handler()
Exemplo n.º 23
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """

        #self.video = cv2.VideoCapture('video1.avi')

        #This is where we feed the video
        if self.video_flag:

            plt.figure(1)
            plt.plot(self.processor.ttimes, self.processor.bpms)

            # Update .csv
            #data = np.vstack((self.processor.times, self.processor.samples)).T
            data = np.vstack((self.processor.ttimes, self.processor.bpms)).T
            #data = np.vstack((self.videoTimes, self.processor.bpms)).T
            np.savetxt(self.csvn + ".csv", data, delimiter=',')

            ret, frame = self.video.read()

            if self.processor.saveBpm:

                timestamp = self.video.get(cv2.CAP_PROP_POS_MSEC) / 1000.0
                bpmCount = np.size(self.processor.bpms)

                #Means the BPM acquisition has started and we should keep timestamps of frame.
                if np.size(self.processor.bpms) > 0:
                    self.videoTimes.append(timestamp)

                timestampCount = np.size(self.videoTimes)

                #print("Frame timestamps count: " + format(timestampCount))
                #print("BPMs saved count: " + format(bpmCount))
                gTruth = np.vstack((self.videoTimes, self.processor.bpms)).T
                np.savetxt("test.csv", gTruth, delimiter=',')

            if ret == False:
                self.video = cv2.VideoCapture('video1.avi')
                ret, frame = self.video.read()

        else:
            # Get current image frame from the camera
            frame = self.cameras[self.selected_cam].get_frame()

        self.h, self.w, _c = frame.shape

        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        self.processor.run(self.selected_cam)
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()
Exemplo n.º 24
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """

        # Get current image frame from the camera
        frame = self.camera.get_frame()
        self.h, self.w, _c = frame.shape

        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        self.processor.run()
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        global smileneighbour, mqLoop, smilecount, eyetot
        # if self.bpm_plot:
        # self.make_bpm_plot()
        x = str(datetime.datetime.now())

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        rects = self.detector(gray, 0)
        #####Record self-report#####
        key = cv2.waitKey(1) & 0xFF
        if key == ord("."):
            c.write(str(md.datestr2num(x)) + " " + str(500) + "\n")
            ############################

        for rect in rects:
            shape = self.predictor(gray, rect)
            shape = face_utils.shape_to_np(shape)

            shape2 = shape.copy()
            newline = ""
            for (w,z) in shape2:
                cv2.circle(frame, (w, z), 1, (0, 0, 255), -1)
                new = str((w,z)) + " "
                newline += new

            #######################
            ########Raw Data#######
            #######################

            raw.write(str(md.datestr2num(x)) + newline + "\n")

            #######################
            ######New Things#######

            leftBrow = shape[21]
            rightBrow = shape[22]
            distance = dist.euclidean(leftBrow, rightBrow)

            upper1 = shape[37]
            lower1 = shape[41]
            right1 = shape[38]
            upper2 = shape[43]
            lower2 = shape[47]
            right2 = shape[44]
            average = (dist.euclidean(upper1, lower1) * dist.euclidean(upper1, right1) + dist.euclidean(upper2,
                                                                                                        lower2) * dist.euclidean(
                upper2, right2))

            leftmouth = shape[48]
            rightmouth = shape[54]

            upperlip = shape[62]
            lowerlip = shape[66]

            ##########End##########
            #######################
            #Time, Frown, Eye size, Mouth width, Mouth heigth, Brow Raise, Length of face, and Width of face
            all.write(str(md.datestr2num(x)) + " " + str(distance) + " " + str(average) + " " + str(
                dist.euclidean(leftmouth, rightmouth)) + " " + str(dist.euclidean(upperlip, lowerlip)) + " " + str(
                (dist.euclidean(shape[24], shape[44]) + dist.euclidean(shape[19], shape[37]))) +  " " + str(dist.euclidean(shape[27], shape[8])) + " " + str(dist.euclidean(shape[2], shape[14])) + "\n")
            #######################
            ######Single File######
        cv2.imshow("Frame", frame)

        if mqLoop >= 1:

            sm.write(str(md.datestr2num(x)) + " " + str(smileneighbour) + "\n")
            e.write(str(md.datestr2num(x)) + " " + str(eyetot) + "\n")
            hr.write(str(md.datestr2num(x)) + " " + str(self.processor.show_bpm_text.bpm) + "\n")
            smileneighbour += 2 * eyetot
            smileneighbour /= 100
            if (self.processor.show_bpm_text.bpm) > dhr:
                # print (self.processor.fft.samples[-1]/2, self.processor.fft.samples[-1]-dhr/2)
                # overbeat = (self.processor.fft.samples[-1]-dhr)*(self.processor.fft.samples[-1]-dhr)
                smileneighbour += (self.processor.show_bpm_text.bpm - dhr)

            f.write(str(md.datestr2num(x)) + " " + str(smileneighbour) + "\n")
            mqLoop = 0
        else:
            mqLoop += 0.9
        img = cv.QueryFrame(capture)
        smileneighbour = 0
        eyetot = 0
        if img:
            image = DetectRedEyes(img, faceCascade, smileCascade, eyeCascade)
            cv.ShowImage("camera", image)
        # handle any key presses
        self.key_handler()
Exemplo n.º 25
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera
        frame = self.cameras[self.selected_cam].get_frame()

        if frame is None:
            if args.video_dir is None:
                self.kill = True
                return

            else:
                pos = self.video_names[self.selected_cam].find("_q")
                self.question_number = self.video_names[
                    self.selected_cam][pos + 2:pos + 4]
                self.stop_record()
                # Change to the next video (in the next camera)
                self.selected_cam += 1

                if self.selected_cam >= len(self.video_names):
                    self.kill = True
                    return

                self.cameras[self.selected_cam].reset_video()
                print('Changing to next video...{}'.format(
                    self.video_names[self.selected_cam]))
                self.start_record()
                return

        self.h, self.w, _c = frame.shape

        # if self.record:
        # self.out.write(frame)

        # else: self.out.release()

        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        try:
            self.processor.run(self.selected_cam)
        except:
            pass
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()
Exemplo n.º 26
0
    def main_loop(self):
        """
        Single iteration of the application's main loop.
        """
        # Get current image frame from the camera

        if len(self.processor.temps) and self.Tx:
            self.ser.SerialWrite(self.processor.temps[-1])
            read = self.ser.SerialReadString()
            self.Tx = False

        # recordRx(read)

        try:
            glob_time = self.processor.ttimes[-1]
            glob_bpm = self.processor.bpms[-1]
            glob_temp = self.processor.temps[-1]
            np.save("body_data.npy", np.array([glob_time, glob_bpm,
                                               glob_temp]))
        except:
            pass

        try:
            glob_env_temp = read.split('_')[0]
            glob_env_humid = read.split('_')[1]
            print('receiving env temp: ', glob_env_temp)
            print('receiving env humid: ', glob_env_humid)

            np.save('env_data.npy', np.array([glob_env_temp, glob_env_humid]))

        except:
            pass
        frame = self.cameras[self.selected_cam].get_frame()

        if frame is None:
            if args.video_dir is None:
                self.kill = True
                return

            else:
                pos = self.video_names[self.selected_cam].find("_q")
                self.question_number = self.video_names[
                    self.selected_cam][pos + 2:pos + 4]
                self.stop_record()
                # Change to the next video (in the next camera)
                self.selected_cam += 1

                if self.selected_cam >= len(self.video_names):
                    self.kill = True
                    return

                self.cameras[self.selected_cam].reset_video()
                print('Changing to next video...{}'.format(
                    self.video_names[self.selected_cam]))
                self.start_record()
                return

        self.h, self.w, _c = frame.shape

        # if self.record:
        # self.out.write(frame)

        # else: self.out.release()

        # display unaltered frame
        # imshow("Original",frame)

        # set current image frame to the processor's input
        self.processor.frame_in = frame
        # process the image frame to perform all needed analysis
        try:
            self.processor.run(self.selected_cam)
        except:
            pass
        # collect the output frame for display
        output_frame = self.processor.frame_out

        # show the processed/annotated output frame
        imshow("Processed", output_frame)

        # create and/or update the raw data display if needed
        if self.bpm_plot:
            self.make_bpm_plot()

        if self.send_serial:
            self.serial.write(str(self.processor.bpm) + "\r\n")

        if self.send_udp:
            self.sock.sendto(str(self.processor.bpm), self.udp)

        # handle any key presses
        self.key_handler()
Exemplo n.º 27
0
    def main_loop(self, lock, is_face_im_data_ready, is_face_coord_detected,
                  terminate_flag, coord_detected, img_data_share):
        #def main_loop(self):
        """
        application's main loop.
        """

        # variable to reduce the sample rate for motion detection
        sample_counts_thre = 15
        # counter for the sample, if the counter is larger than sample_counts_thre,
        # the main program will start provide the data to the child process
        sample_counts = 0

        while True:
            # Get current image frame from the camera
            frame = self.cameras[self.selected_cam].get_frame()
            self.h, self.w, _c = frame.shape

            # set current image frame to the processor's input
            self.processor.frame_in = frame
            # process the image frame to perform all needed analysis
            self.processor.run(self.selected_cam)
            # collect the output frame for display
            output_frame = self.processor.frame_out

            sample_counts = sample_counts + 1

            ################
            # providing data to the other process for face reconistion
            # print('getting data from camera')
            # check if the face reconistion is fully processed
            if (not is_face_im_data_ready.value
                ) and sample_counts > sample_counts_thre:
                #if (not is_face_im_data_ready.value) :
                lock.acquire()
                sample_counts = 0
                # print('preparing data for the other process')

                #print( frame.shape)

                try:
                    gray = cv2.equalizeHist(
                        cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
                    #print(frame.shape)
                    #img_data_share[:] = gray.flatten().copy()
                    img_data_share[:] = gray.flatten()
                    #print(len(coord_detected) )
                    #print( coord_detected )
                    #print( is_face_coord_detected)

                    if is_face_coord_detected.value:
                        #self.processor.detected = np.copy( np.frombuffer(coord_detected.get_obj() ))
                        self.processor.detected = np.array(coord_detected)
                        self.processor.detected = self.processor.detected.astype(
                            np.int32)
                    else:
                        self.processor.detected = np.array([])

                finally:
                    is_face_im_data_ready.value = True
                    lock.release()

            # show the processed/annotated output frame
            imshow("Processed", output_frame)

            # create and/or update the raw data display if needed
            if self.bpm_plot:
                self.make_bpm_plot()

            if self.send_serial:
                self.serial.write(str(self.processor.bpm) + "\r\n")

            if self.send_udp:
                self.sock.sendto(str(self.processor.bpm), self.udp)

            # handle any key presses
            self.key_handler()