def VideoFrame(videos, detector, predictor): print(videos) past = datetime.datetime.now() cap = cv2.VideoCapture(videos) fps = FPS().start() idx = 0 # fourcc = cv2.VideoWriter_fourcc(*'H264') # out = cv2.VideoWriter('output.mp4',fourcc, 20.0, (640,480)) while cap.isOpened(): ret, frame = cap.read() # frame = cv2.resize(frame,None,fx =1 /4, fy =1 /4 ) vid = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) vid = cv2.transpose(vid) vid = cv2.flip(vid, 1) faces = detector(vid, 1) if len(faces) == 1: for face in faces: landmark = predictor(vid, face).parts() landmarks = [[p.x, p.y] for p in landmark] jaw = reshape_for_polyline(landmarks[0:17]) left_eyebrow = reshape_for_polyline(landmarks[22:27]) right_eyebrow = reshape_for_polyline(landmarks[17:22]) nose_bridge = reshape_for_polyline(landmarks[27:31]) lower_nose = reshape_for_polyline(landmarks[30:35]) left_eye = reshape_for_polyline(landmarks[42:48]) right_eye = reshape_for_polyline(landmarks[36:42]) outer_lip = reshape_for_polyline(landmarks[48:60]) inner_lip = reshape_for_polyline(landmarks[60:68]) color = (255, 255, 255) thickness = 2 cv2.polylines(vid, [jaw], False, color, thickness) cv2.polylines(vid, [left_eyebrow], False, color, thickness) cv2.polylines(vid, [right_eyebrow], False, color, thickness) cv2.polylines(vid, [nose_bridge], False, color, thickness) cv2.polylines(vid, [lower_nose], True, color, thickness) cv2.polylines(vid, [left_eye], True, color, thickness) cv2.polylines(vid, [right_eye], True, color, thickness) cv2.polylines(vid, [outer_lip], True, color, thickness) cv2.polylines(vid, [inner_lip], True, color, thickness) fps.update() cv2.imshow('file', vid) if cv2.waitKey(1) & 0xFF == ord('q'): break cv2.destroyAllWindows() out.release() fps.close() cap.release()