Example #1
0
    def preprocess_frame(self, frame, frame_number, labels, projection):
        frame_file = self.__frame_filename(frame_number)

        if frame_number < self.landmarks_df.shape[0]:
            left_eye = (self.landmarks_df.iloc[[frame_number
                                                ]][0][frame_number],
                        self.landmarks_df.iloc[[frame_number
                                                ]][1][frame_number])
            right_eye = (self.landmarks_df.iloc[[frame_number
                                                 ]][2][frame_number],
                         self.landmarks_df.iloc[[frame_number
                                                 ]][3][frame_number])
        else:
            return False, 'Frame number > number of landmark file rows', None

        if all(v == 0 for v in (left_eye + right_eye)):
            return False, 'Tracker fail: no landmarks available', None

        if labels[-1]:
            return False, 'Tracker fail: human labeled failure', None

        cropped = np.array(
            crop_face(Image.fromarray(frame),
                      eye_left=left_eye,
                      eye_right=right_eye,
                      offset_pct=(0.25, 0.25),
                      dest_sz=(64, 64)))

        # gray_image = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)

        cv2.imwrite(frame_file, cropped)  # save frame as PNG file
        return True, 'Success', cv2.imread(frame_file)
Example #2
0
    def preprocess_frame(self, frame, frame_number, labels):
        frame_file = self.__frame_filename(frame_number)

        if frame_number < self.landmarks_df.shape[0]:
            left_eye = (
                self.landmarks_df.iloc[[frame_number]][0][frame_number],
                self.landmarks_df.iloc[[frame_number]][1][frame_number])
            right_eye = (
                self.landmarks_df.iloc[[frame_number]][2][frame_number],
                self.landmarks_df.iloc[[frame_number]][3][frame_number])
        else:
            return False, 'Frame number > number of landmark file rows', None

        if all(v == 0 for v in (left_eye + right_eye)):
            return False, 'Tracker fail: no landmarks available', None

        if labels[-1]:
            return False, 'Tracker fail: human labeled failure', None

        cropped = np.array(crop_face(Image.fromarray(frame), eye_left=left_eye, eye_right=right_eye,
                                     offset_pct=(0.25, 0.25), dest_sz=(64, 64)))

        to_save = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY) if self.grayscale else cropped

        cv2.imwrite(frame_file, to_save)  # save frame as PNG file
        return True, 'Success', cv2.imread(frame_file)
Example #3
0
def representative_dataset_gen():
    wflw_dir = "/home/robin/data/facial-marks/wflw/WFLW_images"
    ds_wflw = fmd.wflw.WFLW(False, "wflw_test")
    ds_wflw.populate_dataset(wflw_dir)

    for _ in range(100):
        sample = ds_wflw.pick_one()

        # Get image and marks.
        image = sample.read_image()
        marks = sample.marks

        # Crop the face out of the image.
        image_cropped, _, _ = crop_face(image, marks, scale=1.2)

        # Get the prediction from the model.
        image_cropped = cv2.resize(image_cropped, (256, 256))
        img_rgb = cv2.cvtColor(image_cropped, cv2.COLOR_BGR2RGB)
        img_input = normalize(np.array(img_rgb, dtype=np.float32))

        yield [np.expand_dims(img_input, axis=0)]
def evaluate(dataset: fmd.mark_dataset.dataset, model, n_points):
    """Evaluate the model on the dataset. The evaluation method should be the
    same with the official code.

    Args:
        dataset: a FMD dataset
        model: any model having `predict` method.
    """
    # For NME
    nme_count = 0
    nme_sum = 0
    count_failure_008 = 0
    count_failure_010 = 0

    # Loop though the dataset samples.
    for sample in tqdm(dataset):
        # Get image and marks.
        image = sample.read_image()
        marks = sample.marks[:n_points]

        # Crop the face out of the image.
        image_cropped, border, bbox = crop_face(image, marks, scale=1)
        image_size = image_cropped.shape[:2]

        # Get the prediction from the model.
        image_cropped = cv2.resize(image_cropped, (128, 128))
        img_rgb = cv2.cvtColor(image_cropped, cv2.COLOR_BGR2RGB)
        # img_input = normalize(np.array(img_rgb, dtype=np.float32))

        # Do prediction.
        heatmaps = model.predict(tf.expand_dims(img_rgb, 0))
        marks_prediction = np.reshape(heatmaps, (-1, 2))
        marks_prediction *= (bbox[2] - bbox[0])
        marks_prediction[:, 0] += bbox[0]
        marks_prediction[:, 1] += bbox[1]

        # draw_marks(image, marks_prediction)
        # print(bbox)
        # cv2.imwrite("sample.jpg", image)
        # quit()

        # Parse the heatmaps to get mark locations.
        # marks_prediction, _ = parse_heatmaps(heatmaps, image_size)

        # # Transform the marks back to the original image dimensions.
        # x0 = bbox[0] - border
        # y0 = bbox[1] - border
        # marks_prediction[:, 0] += x0
        # marks_prediction[:, 1] += y0

        # Compute NME.
        nme_temp = compute_nme(marks_prediction, marks[:, :2], n_points)

        if nme_temp > 0.08:
            count_failure_008 += 1
        if nme_temp > 0.10:
            count_failure_010 += 1

        nme_sum += nme_temp
        nme_count = nme_count + 1

        # # Visualize the result.
        # for mark in marks_prediction:
        #     cv2.circle(image, tuple(mark.astype(int)), 2, (0, 255, 0), -1)

        # cv2.imshow("cropped", image_cropped)
        # cv2.imshow("image", image)
        # if cv2.waitKey(1) == 27:
        #     break

    # NME
    nme = nme_sum / nme_count
    failure_008_rate = count_failure_008 / nme_count
    failure_010_rate = count_failure_010 / nme_count

    msg = "NME:{:.4f}, [008]:{:.4f}, [010]:{:.4f}".format(
        100 * nme, failure_008_rate, failure_010_rate)

    return msg