def warp_face_in_video(facial_mask_fn, video_in_fn, video_out_fn, show_video=False): """ Function to process frames in video file and 'replace' first found face by the the face from the still image. :param facial_mask_fn: path to the still image with a face :param video_in_fn: path to the input video file :param video_out_fn: path to the video file which will have 'replaced' face :param show_video: bool flag to show window with processed video frames """ facial_mask = cv2.imread(facial_mask_fn) facial_mask = cv2.cvtColor(facial_mask, cv2.COLOR_BGR2GRAY) facial_mask_lm = faceWarp.find_landmarks(facial_mask, faceWarp.predictor) video_in = cv2.VideoCapture(video_in_fn) video_out = cv2.VideoWriter( filename=video_out_fn, fourcc=cv2.VideoWriter_fourcc('m', '2', 'v', '1'), frameSize=(int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT))), fps=25.0, isColor=True) total_frames_in = video_in.get(cv2.CAP_PROP_FRAME_COUNT) while True: ret, frame_in = video_in.read() if ret == True: curr_frame = video_in.get(cv2.CAP_PROP_POS_FRAMES) frame_in = cv2.cvtColor(frame_in, cv2.COLOR_BGR2GRAY) if show_video: cv2.imshow('video_in', frame_in) else: print '{:.2%}\r'.format(curr_frame / total_frames_in), sys.stdout.flush() frame_out = faceWarp.face_warp(facial_mask, facial_mask_lm, frame_in) frame_out = cv2.cvtColor(frame_out, cv2.COLOR_GRAY2BGR) video_out.write(frame_out) if show_video: cv2.imshow('video_out', frame_out) cv2.waitKey(1) else: video_in.release() video_in = None video_out.release() video_out = None cv2.destroyAllWindows() break
def warp_face_in_video(facial_mask_fn, video_in_fn, video_out_fn, show_video=False): """ Function to process frames in video file and 'replace' first found face by the the face from the still image. :param facial_mask_fn: path to the still image with a face :param video_in_fn: path to the input video file :param video_out_fn: path to the video file which will have 'replaced' face :param show_video: bool flag to show window with processed video frames """ facial_mask = cv2.imread(facial_mask_fn) facial_mask = cv2.cvtColor(facial_mask, cv2.COLOR_BGR2GRAY) facial_mask_lm = faceWarp.find_landmarks(facial_mask, faceWarp.predictor) video_in = cv2.VideoCapture(video_in_fn) video_out = cv2.VideoWriter( filename=video_out_fn, fourcc=cv2.cv.CV_FOURCC('m', 'p', '4', 'v'), frameSize=(int(video_in.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)), int(video_in.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT))), fps=25.0, isColor=True) total_frames_in = video_in.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT) while True: ret, frame_in = video_in.read() if ret == True: curr_frame = video_in.get(cv2.cv.CV_CAP_PROP_POS_FRAMES) frame_in = cv2.cvtColor(frame_in, cv2.COLOR_BGR2GRAY) if show_video: cv2.imshow('video_in', frame_in) else: print '{:.2%}\r'.format(curr_frame/total_frames_in), sys.stdout.flush() frame_out = faceWarp.face_warp(facial_mask, facial_mask_lm, frame_in) frame_out = cv2.cvtColor(frame_out, cv2.COLOR_GRAY2BGR) video_out.write(frame_out) if show_video: cv2.imshow('video_out', frame_out) cv2.waitKey(1) else: video_in.release() video_in = None video_out.release() video_out = None cv2.destroyAllWindows() break
def warp_face_from_webcam(facial_mask_fn, video_out_fn): """ Function to read video frames from the web cam, replace first found face by the face from the still image and show processed frames in a window. Also all processed frames will be save as a video. :param facial_mask_fn: path to the still image with a face :param video_out_fn: path to the video file which will have 'replaced' face """ facial_mask = cv2.cvtColor(cv2.imread(facial_mask_fn), cv2.COLOR_BGR2GRAY) facial_mask_lm = faceWarp.find_landmarks(facial_mask, faceWarp.predictor) cam = cv2.VideoCapture(0) frame_size = ( 420, 240 ) # downsample size, without downsampling too many frames dropped video_out = cv2.VideoWriter( filename=video_out_fn, fourcc=cv2.VideoWriter_fourcc( 'm', '2', 'v', '1'), # works good on OSX, for other OS maybe try other codecs frameSize=frame_size, fps=25.0, isColor=True) while True: ret, frame_in = cam.read() # Downsample frame - otherwise processing is too slow frame_in = cv2.resize(frame_in, dsize=frame_size) frame_in = cv2.cvtColor(frame_in, cv2.COLOR_BGR2GRAY) frame_out = faceWarp.face_warp(facial_mask, facial_mask_lm, frame_in) frame_out = cv2.cvtColor(frame_out, cv2.COLOR_GRAY2BGR) video_out.write(frame_out) faceWarp.draw_str(frame_out, (20, 20), 'ESC: stop recording Space: stop & save video') cv2.imshow('webcam', frame_out) ch = 0xFF & cv2.waitKey(1) if ch == 27: break if ch == ord(' '): break cam.release() cv2.destroyAllWindows()
def warp_face_from_webcam(facial_mask_fn, video_out_fn): """ Function to read video frames from the web cam, replace first found face by the face from the still image and show processed frames in a window. Also all processed frames will be save as a video. :param facial_mask_fn: path to the still image with a face :param video_out_fn: path to the video file which will have 'replaced' face """ facial_mask = cv2.cvtColor(cv2.imread(facial_mask_fn), cv2.COLOR_BGR2GRAY) facial_mask_lm = faceWarp.find_landmarks(facial_mask, faceWarp.predictor) cam = cv2.VideoCapture(0) frame_size = (420, 240) # downsample size, without downsampling too many frames dropped video_out = cv2.VideoWriter( filename=video_out_fn, fourcc=cv2.cv.CV_FOURCC('m', 'p', '4', 'v'), # works good on OSX, for other OS maybe try other codecs frameSize=frame_size, fps=25.0, isColor=True) while True: ret, frame_in = cam.read() # Downsample frame - otherwise processing is too slow frame_in = cv2.resize(frame_in, dsize=frame_size) frame_in = cv2.cvtColor(frame_in, cv2.COLOR_BGR2GRAY) frame_out = faceWarp.face_warp(facial_mask, facial_mask_lm, frame_in) frame_out = cv2.cvtColor(frame_out, cv2.COLOR_GRAY2BGR) video_out.write(frame_out) faceWarp.draw_str(frame_out, (20, 20), 'ESC: stop recording Space: stop & save video') cv2.imshow('webcam', frame_out) ch = 0xFF & cv2.waitKey(1) if ch == 27: break if ch == ord(' '): break cam.release() cv2.destroyAllWindows()
def warp_face_from_webcam(self): previousNum = 0 newNumCount = 0 video_out_fn = './demo/demo_arni.mov' facial_mask_fn = './demo/' + self.current_filter """ Function to read video frames from the web cam, replace first found face by the face from the still image and show processed frames in a window. Also all processed frames will be save as a video. :param facial_mask_fn: path to the still image with a face :param video_out_fn: path to the video file which will have 'replaced' face """ #facial_mask = cv2.cvtColor(cv2.imread(facial_mask_fn), cv2.COLOR_HSV2RGB) facial_mask = cv2.imread(facial_mask_fn) facial_mask_lm = faceWarp.find_landmarks(facial_mask, faceWarp.predictor) cam = cv2.VideoCapture(0) frame_size = ( 640, 480 ) # downsample size, without downsampling too many frames dropped frame_width = int(cam.get(3)) frame_height = int(cam.get(4)) # Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file. #out = cv2.VideoWriter('a.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 20, (frame_width,frame_height)) video_out = cv2.VideoWriter( filename=video_out_fn, fourcc=cv2.VideoWriter_fourcc( 'm', '2', 'v', '1'), # works good on OSX, for other OS maybe try other codecs frameSize=frame_size, fps=50.0, isColor=True) def nothing(): pass cv2.namedWindow('Sliders') cv2.createTrackbar("erode", "Sliders", 2, 10, nothing) cv2.createTrackbar("thresh", "Sliders", 70, 249, nothing) cv2.createTrackbar('R', 'Sliders', 0, 255, nothing) cv2.createTrackbar('G', 'Sliders', 90, 255, nothing) cv2.createTrackbar('B', 'Sliders', 0, 255, nothing) while True: erode = cv2.getTrackbarPos("erode", "Sliders") thresh = cv2.getTrackbarPos("thresh", "Sliders") r = cv2.getTrackbarPos("R", "Sliders") g = cv2.getTrackbarPos("G", "Sliders") b = cv2.getTrackbarPos("B", "Sliders") ret, frame_in = cam.read() frame_in = cv2.flip(frame_in, 1) kernel = np.ones((3, 3), np.uint8) roi = frame_in[0:900, 0:900] #cv2.rectangle(frame_in,(0,0),(900,900),(0,255,0),0) hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV) lower_skin = np.array([r, g, b], dtype=np.uint8) upper_skin = np.array([20, 255, 255], dtype=np.uint8) mask = cv2.inRange(hsv, lower_skin, upper_skin) mask = cv2.erode(mask, kernel, iterations=erode) mask = cv2.GaussianBlur(mask, (5, 5), 200) ret, thresh = cv2.threshold(mask, thresh, 255, 0) mask = cv2.resize(mask, (80, 60)) #mask = cv2.cvtColor(mask,cv2.COLOR_GRAY2RGB) cv2.namedWindow("mask", cv2.WINDOW_NORMAL) cv2.resizeWindow('mask', 320, 240) cv2.moveWindow('mask', 980, 0) cv2.imshow('mask', mask) mask = mask.astype("float") / 255.0 mask = img_to_array(mask) mask = np.expand_dims(mask, axis=0) (none, dabL, dabR, hitL, hitR, whipL, whipR, tPose, shoot, keke) = model.predict(mask)[0] values = [ none, dabL, dabR, hitL, hitR, whipL, whipR, tPose, shoot, keke ] currentNum = values.index(max(values)) if (currentNum == previousNum): if (newNumCount < 7): newNumCount = newNumCount + 1 print(str(newNumCount) + " occurences of " + str(currentNum)) else: newNumCount = 0 previousNum = currentNum print("New number: " + str(currentNum) + ", " + str(previousNum)) if (newNumCount == 3): print("Play") if currentNum == 1: mMusicPlayer.PlaySound(0) if currentNum == 2: mMusicPlayer.PlaySound(1) if currentNum == 3: mMusicPlayer.PlaySound(2) if currentNum == 4: mMusicPlayer.PlaySound(3) if currentNum == 5: mMusicPlayer.PlaySound(4) if currentNum == 6: mMusicPlayer.PlaySound(5) if currentNum == 7: mMusicPlayer.PlaySound(6) if currentNum == 8: mMusicPlayer.PlaySound(7) if currentNum == 9: mMusicPlayer.PlaySound(8) # Downsample frame - otherwise processing is too slow frame_in = cv2.resize(frame_in, dsize=frame_size) #frame_in = cv2.cvtColor(frame_in, cv2.COLOR_HSV2RGB) frame_out = faceWarp.face_warp(facial_mask, facial_mask_lm, frame_in) if self.recording: video_out.write(frame_out) cv2.namedWindow("Recording", cv2.WINDOW_NORMAL) cv2.resizeWindow('Recording', 700, 525) cv2.moveWindow("Recording", 280, 0) cv2.imshow('Recording', frame_out) ch = 0xFF & cv2.waitKey(1) if ch == 27: break if cv2.waitKey(1) & 0xFF == ord('q'): break if ch == ord(' '): break cam.release() video_out.release() cv2.destroyAllWindows()