def app(image_path): image = cv.imread(image_path) assert image is not None face_detector = FaceDetector() face_landmarker = FaceLandmarker() scores, bboxes = face_detector.getFaces(image, def_score=0.5) landmarks = [] for i, bbox in enumerate(bboxes): x1, y1, x2, y2 = bbox face_img = image[y1:y2, x1:x2] landmark = face_landmarker.getLandmark(face_img) landmark[:, :] += np.array([x1, y1]) landmarks.append(landmark) for i, landmark in enumerate(landmarks): aligned_img = align(image, bboxes[i], landmark) cv.imshow(str(i), aligned_img) if len(bboxes): for i, landmark in enumerate(landmarks): for j, point in enumerate(landmark): cv.circle(image, tuple(point), 3, (0, 255, 0), -1) image = vis.plotBBoxes(image, bboxes) image = cv.cvtColor(np.asarray(image), cv.COLOR_BGR2RGB) cv.imshow(image_path, image) cv.waitKey(0)
def app(video_link, video_name, show, flip_hor, flip_ver): # initialize Face Detection net face_detector = FaceDetector() LOG.info('Face Detector initialization done') # initialize Face Landmark net face_landmarker = FaceLandmarker() LOG.info('Face Landmarker initialization done') # initialize Video Capturer cap = cv.VideoCapture(video_link) (W, H), FPS = imgproc.cameraCalibrate(cap, size=720, by_height=True) LOG.info('Camera Info: ({}, {}) - {:.3f}'.format(W, H, FPS)) while cap.isOpened(): _, frm = cap.read() if not _: LOG.info('Reached the end of Video source') break if flip_ver: frm = cv.flip(frm, 0) if flip_hor: frm = cv.flip(frm, 1) frm = imgproc.resizeByHeight(frm, 720) _start_t = time.time() scores, bboxes = face_detector.getFaces(frm, def_score=0.5) landmarks = [] for i, bbox in enumerate(bboxes): x1, y1, x2, y2 = bbox face_img = frm[y1:y2, x1:x2] landmark = face_landmarker.getLandmark(face_img) landmark[:, :] += np.array([x1, y1]) landmarks.append(landmark) _prx_t = time.time() - _start_t if len(bboxes): for i, landmark in enumerate(landmarks): for j, point in enumerate(landmark): cv.circle(frm, tuple(point), 3, (0, 255, 0), -1) frm = vis.plotBBoxes(frm, bboxes, len(bboxes) * ['face'], scores) frm = vis.plotInfo(frm, 'Raspberry Pi - FPS: {:.3f}'.format(1 / _prx_t)) frm = cv.cvtColor(np.asarray(frm), cv.COLOR_BGR2RGB) if show: cv.imshow(video_name, frm) key = cv.waitKey(1) if key in [27, ord('q')]: LOG.info('Interrupted by Users') break cap.release() cv.destroyAllWindows()
def app(image_path): # initialize Face Detection net face_detector = FaceDetector() LOG.info('Face Detector initialization done') # initialize Face Landmark net face_landmarker = FaceLandmarker() LOG.info('Face Landmarker initialization done') cap = cv.VideoCapture(0) while cap.isOpened(): _, image = cap.read() if not _: LOG.info('Reached the end of Video source') break image = imgproc.resizeByHeight(image, 720) _start_t = time.time() scores, bboxes = face_detector.getFaces(image, def_score=0.5) landmarks = [] for i, bbox in enumerate(bboxes): x1, y1, x2, y2 = bbox face_img = image[y1:y2, x1:x2] landmark = face_landmarker.getLandmark(face_img) aligned_img = alignFace(face_img, landmark) cv.imshow('aligned-faces' + str(i), aligned_img) landmark[:, :] += np.array([x1, y1]) landmarks.append(landmark) _prx_t = time.time() - _start_t if len(bboxes): for i, landmark in enumerate(landmarks): for j, point in enumerate(landmark): cv.circle(image, tuple(point), 3, (0, 255, 0), -1) image = vis.plotBBoxes(image, bboxes, len(bboxes) * ['face'], scores) image = vis.plotInfo(image, 'Raspberry Pi - FPS: {:.3f}'.format(1 / _prx_t)) image = cv.cvtColor(np.asarray(image), cv.COLOR_BGR2RGB) cv.imshow(image_path, image) key = cv.waitKey(1) if key in [27, ord('q')]: LOG.info('Interrupted by Users') break cap.release() cv.destroyAllWindows()
def app(image_path): # initialize Face Detection net face_detector = FaceDetector() LOG.info('Face Detector initialization done') # initialize Face Landmark net face_landmarker = FaceLandmarker() LOG.info('Face Landmarker initialization done') # initialize Face Alignment class face_aligner = FaceAligner() LOG.info('Face Aligner initialization done') # initialize Video Capturer image = cv.imread(image_path) assert image is not None image = imgproc.resizeByHeight(image, 720) _start_t = time.time() scores, bboxes = face_detector.getFaces(image, def_score=0.5) landmarks = [] for i, bbox in enumerate(bboxes): x1, y1, x2, y2 = bbox face_img = image[y1:y2, x1:x2] landmark = face_landmarker.getLandmark(face_img) aligned_img = face_aligner.align(face_img, landmark) cv.imshow('aligned-faces' + str(i), aligned_img) landmark[:, :] += np.array([x1, y1]) landmarks.append(landmark) _prx_t = time.time() - _start_t if len(bboxes): for i, landmark in enumerate(landmarks): for j, point in enumerate(landmark): cv.circle(image, tuple(point), 3, (0, 255, 0), -1) image = vis.plotBBoxes(image, bboxes, len(bboxes) * ['face'], scores) image = vis.plotInfo(image, 'Raspberry Pi - FPS: {:.3f}'.format(1 / _prx_t)) image = cv.cvtColor(np.asarray(image), cv.COLOR_BGR2RGB) cv.imshow(image_path, image) key = cv.waitKey(0) cv.destroyAllWindows()
def app(image1_path, image2_path): # initialize Face detector net detector = FaceDetector() # initialize Face Landmarker net landmarker = FaceLandmarker() # initialize Face Aligner aligner = FaceAligner() # intializa Face Embedder embedder = FaceEmbedder() # ================================================================ image1 = cv.imread(image1_path) image2 = cv.imread(image2_path) assert image1 is not None and image2 is not None # ================================================================ _, faces_1 = detector.getFaces(image1) _, faces_2 = detector.getFaces(image2) assert len(faces_1) and len(faces_2) # ================================================================ x1, y1, x2, y2 = faces_1[0] face_image1 = image1[y1:y2, x1:x2] lm1 = landmarker.getLandmark(face_image1) aligned_face1 = aligner.align(face_image1, lm1) x1, y1, x2, y2 = faces_2[0] face_image2 = image2[y1:y2, x1:x2] lm2 = landmarker.getLandmark(face_image2) aligned_face2 = aligner.align(face_image2, lm2) # ================================================================ emb1 = embedder.getEmb(face_image1) LOG.info('emb1 shape: {}'.format(emb1.shape)) emb2 = embedder.getEmb(face_image2) LOG.info('emb2 shape: {}'.format(emb2.shape)) dist = getDistance(emb1, emb2) LOG.info('distance: {:.4}'.format(dist))
def app(video_link, video_name, show, flip_hor, flip_ver): video_links = [ '/home/pi/Videos/crowd-6582.mp4', '/home/pi/Videos/india-444.mp4', '/home/pi/Videos/paris-2174.mp4', '/home/pi/Videos/scotland-21847.mp4', ] # initialize Face Detection net face_detector = FaceDetector() LOG.info('Face Detector initialization done') # initialize Face Landmark net face_landmarker = FaceLandmarker() LOG.info('Face Landmarker initialization done') # initialize Video Capturer cap0 = cv.VideoCapture(video_links[0]) cap1 = cv.VideoCapture(video_links[1]) cap2 = cv.VideoCapture(video_links[2]) cap3 = cv.VideoCapture(video_links[3]) # (W, H), FPS = imgproc.cameraCalibrate(cap, size=720, by_height=True) # LOG.info('Camera Info: ({}, {}) - {:.3f}'.format(W, H, FPS)) time_str = time.strftime(cfg.TIME_FM) saved_path = 'output.avi' writer = cv.VideoWriter(saved_path, cv.VideoWriter_fourcc(*'XVID'), 24, (1280, 720)) cnt_frm = 0 while cap0.isOpened() and cap1.isOpened() and cap2.isOpened( ) and cap3.isOpened(): _0, frm0 = cap0.read() _1, frm1 = cap1.read() _2, frm2 = cap2.read() _3, frm3 = cap3.read() if not _0 or not _1 or not _2 or not _3: LOG.info('Reached the end of Video source') break cnt_frm += 1 frm0 = imgproc.resizeByHeight(frm0, 360) frm1 = imgproc.resizeByHeight(frm1, 360) frm2 = imgproc.resizeByHeight(frm2, 360) frm3 = imgproc.resizeByHeight(frm3, 360) frm0 = processFrame(frm0, face_detector, face_landmarker) frm1 = processFrame(frm1, face_detector, face_landmarker) frm2 = processFrame(frm2, face_detector, face_landmarker) frm3 = processFrame(frm3, face_detector, face_landmarker) frm = np.zeros((720, 1280, 3)) frm[:360, :640] = frm0 frm[:360, 640:] = frm1 frm[360:, :640] = frm2 frm[360:, 640:] = frm3 LOG.info('frm shape: {}'.format(frm.shape)) cv.imwrite(str(cnt_frm) + '.jpg', frm) writer.write(frm) LOG.info('Frames processed: {}'.format(cnt_frm)) if show: cv.imshow('output', frm) key = cv.waitKey(1) if key in [27, ord('q')]: LOG.info('Interrupted by Users') break writer.release() cap0.release() cap1.release() cap2.release() cap3.release() cv.destroyAllWindows()