Exemple #1
0
 def load_images(reference_file_paths, nreference_file_paths):
     """ Load the images """
     retval = dict()
     for fpath in reference_file_paths:
         retval[fpath] = {"image": cv2_read_img(fpath, raise_error=True),
                          "type": "filter"}
     for fpath in nreference_file_paths:
         retval[fpath] = {"image": cv2_read_img(fpath, raise_error=True),
                          "type": "nfilter"}
     logger.debug("Loaded filter images: %s", {k: v["type"] for k, v in retval.items()})
     return retval
Exemple #2
0
    def reload_images(self, group_method, img_list):
        """
        Reloads the image list by replacing the comparative values with those
        that the chosen grouping method expects.
        :param group_method: str name of the grouping method that will be used.
        :param img_list: image list that has been sorted by one of the sort
        methods.
        :return: img_list but with the comparative values that the chosen
        grouping method expects.
        """
        input_dir = self.args.input_dir
        logger.info("Preparing to group...")
        if group_method == 'group_blur':
            temp_list = [[
                img,
                self.estimate_blur(cv2_read_img(img, raise_error=True))
            ] for img in tqdm(
                self.find_images(input_dir), desc="Reloading", file=sys.stdout)
                         ]
        elif group_method == 'group_face_cnn':
            self.launch_aligner()
            temp_list = []
            for img in tqdm(self.find_images(input_dir),
                            desc="Reloading",
                            file=sys.stdout):
                landmarks = self.get_landmarks(img)
                temp_list.append([
                    img,
                    np.array(landmarks) if landmarks else np.zeros((68, 2))
                ])
        elif group_method == 'group_face_yaw':
            self.launch_aligner()
            temp_list = []
            for img in tqdm(self.find_images(input_dir),
                            desc="Reloading",
                            file=sys.stdout):
                landmarks = self.get_landmarks(img)
                temp_list.append(
                    [img,
                     self.calc_landmarks_face_yaw(np.array(landmarks))])
        elif group_method == 'group_hist':
            temp_list = [[
                img,
                cv2.calcHist([cv2_read_img(img, raise_error=True)], [0], None,
                             [256], [0, 256])
            ] for img in tqdm(
                self.find_images(input_dir), desc="Reloading", file=sys.stdout)
                         ]
        else:
            raise ValueError("{} group_method not found.".format(group_method))

        return self.splice_lists(img_list, temp_list)
Exemple #3
0
    def process_face(self, filename, side, is_display):
        """ Load an image and perform transformation and warping """
        logger.trace(
            "Process face: (filename: '%s', side: '%s', is_display: %s)",
            filename, side, is_display)
        image = cv2_read_img(filename, raise_error=True)
        if self.mask_class or self.training_opts["warp_to_landmarks"]:
            src_pts = self.get_landmarks(filename, image, side)
        if self.mask_class:
            image = self.mask_class(src_pts, image, channels=4).mask

        image = self.processing.color_adjust(
            image, self.training_opts["augment_color"], is_display)

        if not is_display:
            image = self.processing.random_transform(image)
            if not self.training_opts["no_flip"]:
                image = self.processing.do_random_flip(image)
        sample = image.copy()[:, :, :3]

        if self.training_opts["warp_to_landmarks"]:
            dst_pts = self.get_closest_match(filename, side, src_pts)
            processed = self.processing.random_warp_landmarks(
                image, src_pts, dst_pts)
        else:
            processed = self.processing.random_warp(image)

        processed.insert(0, sample)
        logger.trace(
            "Processed face: (filename: '%s', side: '%s', shapes: %s)",
            filename, side, [img.shape for img in processed])
        return processed
Exemple #4
0
    def sort_hist_dissim(self):
        """ Sort by histigram of face dissimilarity """
        input_dir = self.args.input_dir

        logger.info("Sorting by histogram dissimilarity...")

        img_list = [
            [img,
             cv2.calcHist([cv2_read_img(img, raise_error=True)], [0], None, [256], [0, 256]), 0]
            for img in
            tqdm(self.find_images(input_dir), desc="Loading", file=sys.stdout)
        ]

        img_list_len = len(img_list)
        for i in tqdm(range(0, img_list_len), desc="Sorting", file=sys.stdout):
            score_total = 0
            for j in range(0, img_list_len):
                if i == j:
                    continue
                score_total += cv2.compareHist(img_list[i][1],
                                               img_list[j][1],
                                               cv2.HISTCMP_BHATTACHARYYA)

            img_list[i][2] = score_total

        logger.info("Sorting...")
        img_list = sorted(img_list, key=operator.itemgetter(2), reverse=True)

        return img_list
Exemple #5
0
    def sort_hist(self):
        """ Sort by histogram of face similarity """
        input_dir = self.args.input_dir

        logger.info("Sorting by histogram similarity...")

        img_list = [
            [img, cv2.calcHist([cv2_read_img(img, raise_error=True)], [0], None, [256], [0, 256])]
            for img in
            tqdm(self.find_images(input_dir), desc="Loading", file=sys.stdout)
        ]

        img_list_len = len(img_list)
        for i in tqdm(range(0, img_list_len - 1), desc="Sorting",
                      file=sys.stdout):
            min_score = float("inf")
            j_min_score = i + 1
            for j in range(i + 1, len(img_list)):
                score = cv2.compareHist(img_list[i][1],
                                        img_list[j][1],
                                        cv2.HISTCMP_BHATTACHARYYA)
                if score < min_score:
                    min_score = score
                    j_min_score = j
            (img_list[i + 1],
             img_list[j_min_score]) = (img_list[j_min_score],
                                       img_list[i + 1])
        return img_list
Exemple #6
0
 def get_landmarks(filename):
     """ Extract the face from a frame (If not alignments file found) """
     image = cv2_read_img(filename, raise_error=True)
     queue_manager.get_queue("in").put(Sort.alignment_dict(image))
     face = queue_manager.get_queue("out").get()
     landmarks = face["landmarks"][0]
     return landmarks
Exemple #7
0
 def load_disk_frames(self):
     """ Load frames from disk """
     logger.debug("Input is separate Frames. Loading images")
     for filename in self.input_images:
         image = cv2_read_img(filename, raise_error=False)
         if image is None:
             continue
         yield filename, image
Exemple #8
0
 def load_image(self, filename):
     """ Load an image """
     if self.is_video:
         image = self.load_video_frame(filename)
     else:
         src = os.path.join(self.folder, filename)
         logger.trace("Loading image: '%s'", src)
         image = cv2_read_img(src, raise_error=True)
     return image
Exemple #9
0
 def get_landmarks(filename):
     """ Extract the face from a frame (If not alignments file found) """
     image = cv2_read_img(filename, raise_error=True)
     feed = Sort.alignment_dict(image)
     feed["filename"] = filename
     queue_manager.get_queue("in").put(feed)
     face = queue_manager.get_queue("out").get()
     landmarks = face["detected_faces"][0].landmarks_xy
     return landmarks
Exemple #10
0
 def estimate_blur(image_file):
     """
     Estimate the amount of blur an image has with the variance of the Laplacian.
     Normalize by pixel number to offset the effect of image size on pixel gradients & variance
     """
     image = cv2_read_img(image_file, raise_error=True)
     if image.ndim == 3:
         image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
     blur_map = cv2.Laplacian(image, cv2.CV_32F)
     score = np.var(blur_map) / np.sqrt(image.shape[0] * image.shape[1])
     return score
Exemple #11
0
 def load_one_image(self, filename):
     """ load requested image """
     logger.trace("Loading image: '%s'", filename)
     if self.is_video:
         if filename.isdigit():
             frame_no = filename
         else:
             frame_no = os.path.splitext(filename)[0][filename.rfind("_") + 1:]
             logger.trace("Extracted frame_no %s from filename '%s'", frame_no, filename)
         retval = self.load_one_video_frame(int(frame_no))
     else:
         retval = cv2_read_img(filename, raise_error=True)
     return retval
Exemple #12
0
    def sort_face(self):
        """ Sort by face similarity """
        input_dir = self.args.input_dir

        logger.info("Sorting by face similarity...")

        images = np.array(self.find_images(input_dir))
        preds = np.array([self.vgg_face.predict(cv2_read_img(img, raise_error=True))
                          for img in tqdm(images, desc="loading", file=sys.stdout)])
        logger.info("Sorting. Depending on ths size of your dataset, this may take a few "
                    "minutes...")
        indices = self.vgg_face.sorted_similarity(preds, method="ward")
        img_list = images[indices]
        return img_list
Exemple #13
0
        face = DetectedFace(x=0, w=width, y=0, h=height)
<<<<<<< HEAD
        face = face.to_dlib_rect()
=======
        face = face.to_bounding_box_dict()
>>>>>>> upstream/master
        return {"image": image,
                "detected_faces": [face]}

    @staticmethod
    def get_landmarks(filename):
        """ Extract the face from a frame (If not alignments file found) """
<<<<<<< HEAD
        image = cv2.imread(filename)
=======
        image = cv2_read_img(filename, raise_error=True)
>>>>>>> upstream/master
        queue_manager.get_queue("in").put(Sort.alignment_dict(image))
        face = queue_manager.get_queue("out").get()
        landmarks = face["landmarks"][0]
        return landmarks

    def sort_process(self):
        """
        This method dynamically assigns the functions that will be used to run
        the core process of sorting, optionally grouping, renaming/moving into
        folders. After the functions are assigned they are executed.
        """
        sort_method = self.args.sort_method.lower()
        group_method = self.args.group_method.lower()
        final_method = self.args.final_process.lower()
Exemple #14
0
 def image_size(self):
     """ Get the training set image size for storing in model data """
     image = cv2_read_img(self.images["a"][0], raise_error=True)
     size = image.shape[0]
     logger.debug("Training image size: %s", size)
     return size