Esempio n. 1
0
def test_align():
    image = cv2.imread("timg4.jpg")
    bounds, lmarks = gen_face(MTCNN_DETECT, image)
    crops = MTCNN_DETECT.extract_image_chips(image, lmarks, padding=0.4)
    if len(crops) == 0:
        raise Exception("no crops~~ %s" % image_path)
    bounds, lmarks = gen_face(MTCNN_DETECT, crops[0])
    org_box, first_lmarks = bounds[0], lmarks[0]
    trible_box = gen_boundbox(org_box, first_lmarks)
    pitch, yaw, roll = get_rotation_angle(image, first_lmarks)
    print(pitch, yaw, roll)
    cv2.imwrite("test.jpg", crops[0])
Esempio n. 2
0
    def crop_and_trans_images(self, detector, series):
        # imdb 数据存在多张人脸,所以对于多人脸的数据直接清除掉
        image_path = os.path.join(self.data_dir, series.full_path[0])
        try:
            print(image_path)
            image = cv2.imread(image_path, cv2.IMREAD_COLOR)
            if not np.isnan(series.second_face_score):
                raise Exception("more than one face~---%s~-%s- %s" %
                                (series.name, series.age, image_path))
            bounds, lmarks = gen_face(detector, image, image_path)
            crops = detector.extract_image_chips(
                image, lmarks,
                padding=0.4)  # aligned face with padding 0.4 in papper
            if len(crops) == 0:
                raise Exception("no crops~~ %s---%s" %
                                (image_path, series.age))
            if len(crops) > 1:
                raise Exception("more than one face~---%s~-- %s" %
                                (series.name, image_path))
            bounds, lmarks = gen_face(detector, crops[0],
                                      image_path)  # recaculate landmar
            org_box, first_lmarks = bounds[0], lmarks[0]
            trible_box = gen_boundbox(org_box, first_lmarks)
            pitch, yaw, roll = get_rotation_angle(
                crops[0], first_lmarks)  # gen face rotation for filtering
            image = crops[0]  # select the first align face and replace
        except Exception as ee:
            logging.info("exception as ee: %s" % ee)
            trible_box = np.array([])
            org_box, first_lmarks = np.array([]), np.array([])
            pitch, yaw, roll = np.nan, np.nan, np.nan
            age = np.nan
            gender = np.nan
        status, buf = cv2.imencode(".jpg", image)
        series["image"] = buf.tostring()
        series["org_box"] = org_box.dumps()  # xmin, ymin, xmax, ymax
        series["landmarks"] = first_lmarks.dumps()  # y1..y5, x1..x5
        series["trible_box"] = trible_box.dumps()
        series["yaw"] = yaw
        series["pitch"] = pitch
        series["roll"] = roll

        return series
Esempio n. 3
0
def loadData_preprocessData_and_makeDataFrame():
    meta_dataframe = pd.read_csv(
        dataset_base_path.joinpath(dataset_name + '_meta.csv'))
    properties_list = []  # init lists of all properties gonna be saved
    # loop through meta.csv for all images
    for index, series in meta_dataframe.iterrows():
        image_path = series.full_path  # get image path
        try:
            image = cv2.imread(image_path, cv2.IMREAD_COLOR)
            # image = cv2.copyMakeBorder(image, self.extra_padding, self.extra_padding, self.extra_padding, self.extra_padding, cv2.BORDER_CONSTANT)
            face_count, _, lmarks_list = detect_faces_and_landmarks(
                image)  # Detect face & landmarks
            if face_count != 1:
                raise Exception("more than 1 or no face found in image ",
                                image_path)
            # found exactly 1 face, so now process it
            #extract_image_chips will crop faces from image according to size & padding and align them in upright position and return list of them
            cropped_faces = dlib.get_face_chips(
                image, lmarks_list, padding=extra_padding
            )  # aligned face with padding 0.4 in papper
            image = cropped_faces[0]  # must be only 1 face, so getting it.
            # detect face landmarks again from cropped & align face.  (as positions of lmarks are changed in cropped image)
            _, face_rect_box, lmarks_list = detect_faces_and_landmarks(
                image)  # Detect face from cropped image
            first_lmarks = lmarks_list[
                0]  # getting first face's rectangle box and landmarks
            trible_box = gen_boundbox(
                face_rect_box, first_lmarks
            )  # get 3 face boxes for nput into network, as reauired in paper
            if (trible_box < 0).any():
                print(index, 'Some part of face is out of image ',
                      series.full_path)
                raise Exception("more than 1 or no face found in image ",
                                image_path)
            face_pitch, face_yaw, face_roll = get_rotation_angle(
                image, first_lmarks)  # gen face rotation for filtering
        except Exception as ee:
            # print('index ',index,': exption ',ee)
            properties_list.append([
                np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan,
                np.nan, np.nan
            ])  # add null dummy values to current row & skill this iteration
            continue

        # everything processed succefuly, now serialize values and save them
        status, buf = cv2.imencode(".jpg", image)
        image_buffer = buf.tostring()
        #dumping with `pickle` much faster than `json` (np.dumps is pickling)
        face_rect_box_serialized = face_rect_box.dumps(
        )  # [xmin, ymin, xmax, ymax] : Returns the pickle(encoding to binary format (better than json)) of the array as a string. pickle.loads or numpy.loads will convert the string back to an array
        trible_boxes_serialized = trible_box.dumps(
        )  # 3 boxes of face as required in paper
        landmarks_list = np.array(
            [[point.x, point.y] for point in first_lmarks.parts()]
        )  # Same converting landmarks (face_detection_object) to array so can be converted to json
        face_landmarks_serialized = landmarks_list.dumps(
        )  #json.dumps(landmarks_list,indent = 2)  # y1..y5, x1..x5

        # adding everything to list
        properties_list.append([
            image_path, series.age, series.gender, image_buffer,
            face_rect_box_serialized, trible_boxes_serialized, face_yaw,
            face_pitch, face_roll, face_landmarks_serialized
        ])
        if index % 500 == 0:
            print(index, 'images preprocessed')
    processed_dataset_df = pd.DataFrame(properties_list,
                                        columns=[
                                            'image_path', 'age', 'gender',
                                            'image', 'org_box', 'trible_box',
                                            'yaw', 'pitch', 'roll', 'landmarks'
                                        ])
    # some filtering on df
    processed_dataset_df = processed_dataset_df.dropna()
    processed_dataset_df = processed_dataset_df[
        (processed_dataset_df.age >= 0) & (processed_dataset_df.age <= 100)]
    # processed_dataset_df.to_csv('/content/Dataset.csv',index=False)
    #Dataset_DF = processed_dataset_df # putting it into global Dataset_DF variable
    return processed_dataset_df  # returning now (just in case need to return), maybe later remove...