Ejemplo n.º 1
0
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.opts = OptionalActions(self.args, self.images.input_images)
Ejemplo n.º 2
0
    def __init__(self, arguments):
        self.args = arguments

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.output_dir = self.faces.output_dir

        self.export_face = True
Ejemplo n.º 3
0
    def __init__(self, arguments):
        self.args = arguments

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.output_dir = self.faces.output_dir

        self.export_face = True
        self.save_interval = self.args.save_interval if hasattr(
            self.args, "save_interval") else None
Ejemplo n.º 4
0
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.opts = OptionalActions(self.args, self.images.input_images)
Ejemplo n.º 5
0
    def __init__(self, arguments):
        self.args = arguments

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.output_dir = self.faces.output_dir

        self.export_face = True
Ejemplo n.º 6
0
    def __init__(self, arguments):
        self.args = arguments

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.output_dir = self.faces.output_dir

        self.export_face = True
        self.save_interval = self.args.save_interval if hasattr(self.args, "save_interval") else None
Ejemplo n.º 7
0
class Convert(object):
    """ The convert process. """
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.opts = OptionalActions(self.args, self.images.input_images)

    def process(self):
        """ Original & LowMem models go with Adjust or Masked converter

            Note: GAN prediction outputs a mask + an image, while other
            predicts only an image. """
        Utils.set_verbosity(self.args.verbose)

        if not self.alignments.have_alignments_file:
            self.generate_alignments()

        self.faces.faces_detected = self.alignments.read_alignments()

        model = self.load_model()
        converter = self.load_converter(model)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        for item in batch.iterator():
            self.convert(converter, item)

        Utils.finalize(self.images.images_found,
                       self.faces.num_faces_detected,
                       self.faces.verify_output)

    def generate_alignments(self):
        """ Generate an alignments file if one does not already
        exist. Does not save extracted faces """
        print('Alignments file not found. Generating at default values...')
        extract = Extract(self.args)
        extract.export_face = False
        extract.process()

    def load_model(self):
        """ Load the model requested for conversion """
        model_name = self.args.trainer
        model_dir = get_folder(self.args.model_dir)
        num_gpus = self.args.gpus

        model = PluginLoader.get_model(model_name)(model_dir, num_gpus)

        if not model.load(self.args.swap_model):
            print("Model Not Found! A valid model "
                  "must be provided to continue!")
            exit(1)

        return model

    def load_converter(self, model):
        """ Load the requested converter for conversion """
        args = self.args
        conv = args.converter

        converter = PluginLoader.get_converter(conv)(
            model.converter(False),
            trainer=args.trainer,
            blur_size=args.blur_size,
            seamless_clone=args.seamless_clone,
            sharpen_image=args.sharpen_image,
            mask_type=args.mask_type,
            erosion_kernel_size=args.erosion_kernel_size,
            match_histogram=args.match_histogram,
            smooth_mask=args.smooth_mask,
            avg_color_adjust=args.avg_color_adjust)

        return converter

    def prepare_images(self):
        """ Prepare the images for conversion """
        filename = ""
        for filename in tqdm(self.images.input_images, file=sys.stdout):
            if not self.check_alignments(filename):
                continue
            image = Utils.cv2_read_write('read', filename)
            faces = self.faces.get_faces_alignments(filename, image)
            if not faces:
                continue

            yield filename, image, faces

    def check_alignments(self, filename):
        """ If we have no alignments for this image, skip it """
        have_alignments = self.faces.have_face(filename)
        if not have_alignments:
            tqdm.write("No alignment found for {}, "
                       "skipping".format(os.path.basename(filename)))
        return have_alignments

    def convert(self, converter, item):
        """ Apply the conversion transferring faces onto frames """
        try:
            filename, image, faces = item
            skip = self.opts.check_skipframe(filename)

            if not skip:
                for idx, face in faces:
                    image = self.convert_one_face(converter,
                                                  (filename, image, idx, face))
            if skip != "discard":
                filename = str(self.output_dir / Path(filename).name)
                Utils.cv2_read_write('write', filename, image)
        except Exception as err:
            print("Failed to convert image: {}. "
                  "Reason: {}".format(filename, err))

    def convert_one_face(self, converter, imagevars):
        """ Perform the conversion on the given frame for a single face """
        filename, image, idx, face = imagevars

        if self.opts.check_skipface(filename, idx):
            return image

        image = self.images.rotate_image(image, face.r)
        # TODO: This switch between 64 and 128 is a hack for now.
        # We should have a separate cli option for size

        size = 128 if (self.args.trainer.strip().lower()
                       in ('gan128', 'originalhighres')) else 64

        image = converter.patch_image(image,
                                      face,
                                      size)
        image = self.images.rotate_image(image, face.r, reverse=True)
        return image
Ejemplo n.º 8
0
class Extract(object):
    """ The extract process. """
    def __init__(self, arguments):
        self.args = arguments

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.output_dir = self.faces.output_dir

        self.export_face = True

    def process(self):
        """ Perform the extraction process """
        print('Starting, this may take a while...')
        Utils.set_verbosity(self.args.verbose)

        if hasattr(self.args, 'processes') and self.args.processes > 1:
            self.extract_multi_process()
        else:
            self.extract_single_process()

        self.alignments.write_alignments(self.faces.faces_detected)

        images, faces = Utils.finalize(self.images.images_found,
                                       self.faces.num_faces_detected,
                                       self.faces.verify_output)
        self.images.images_found = images
        self.faces.num_faces_detected = faces

    def extract_single_process(self):
        """ Run extraction in a single process """
        for filename in tqdm(self.images.input_images):
            filename, faces = self.process_single_image(filename)
            self.faces.faces_detected[os.path.basename(filename)] = faces

    def extract_multi_process(self):
        """ Run the extraction on the correct number of processes """
        for filename, faces in tqdm(pool_process(
                self.process_single_image,
                self.images.input_images,
                processes=self.args.processes),
                                    total=self.images.images_found):
            self.faces.num_faces_detected += 1
            self.faces.faces_detected[os.path.basename(filename)] = faces

    def process_single_image(self, filename):
        """ Detect faces in an image. Rotate the image the specified amount
            until at least one face is found, or until image rotations are
            depleted.
            Once at least one face has been detected, pass to process_single_face
            to process the individual faces """
        retval = filename, list()
        try:
            image = Utils.cv2_read_write('read', filename)

            for angle in self.images.rotation_angles:
                image = Utils.rotate_image_by_angle(image, angle)
                faces = self.faces.get_faces(image, angle)
                process_faces = [(idx, face) for idx, face in faces]
                if process_faces and angle != 0 and self.args.verbose:
                    print("found face(s) by rotating image {} degrees".format(
                        angle))
                if process_faces:
                    break

            final_faces = [
                self.process_single_face(idx, face, filename, image)
                for idx, face in process_faces
            ]

            retval = filename, final_faces
        except Exception as err:
            if self.args.verbose:
                print("Failed to extract from image: {}. Reason: {}".format(
                    filename, err))
        return retval

    def process_single_face(self, idx, face, filename, image):
        """ Perform processing on found faces """
        output_file = self.output_dir / Path(
            filename).stem if self.export_face else None

        self.faces.draw_landmarks_on_face(face, image)

        resized_face, t_mat = self.faces.extractor.extract(
            image, face, 256, self.faces.align_eyes)

        blurry_file = self.faces.detect_blurry_faces(face, t_mat, resized_face,
                                                     filename)
        output_file = blurry_file if blurry_file else output_file

        if self.export_face:
            filename = "{}_{}{}".format(str(output_file), str(idx),
                                        Path(filename).suffix)
            Utils.cv2_read_write('write', filename, resized_face)

        return {
            "r": face.r,
            "x": face.x,
            "w": face.w,
            "y": face.y,
            "h": face.h,
            "landmarksXY": face.landmarksAsXY()
        }
Ejemplo n.º 9
0
class Convert(object):
    """ The convert process. """
    def __init__(self, arguments):
        self.args = arguments
        self.output_dir = get_folder(self.args.output_dir)

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.opts = OptionalActions(self.args, self.images.input_images)

    def process(self):
        """ Original & LowMem models go with Adjust or Masked converter

            Note: GAN prediction outputs a mask + an image, while other
            predicts only an image. """
        Utils.set_verbosity(self.args.verbose)

        if not self.alignments.have_alignments_file:
            self.generate_alignments()

        self.faces.faces_detected = self.alignments.read_alignments()

        model = self.load_model()
        converter = self.load_converter(model)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        for item in batch.iterator():
            self.convert(converter, item)

        Utils.finalize(self.images.images_found, self.faces.num_faces_detected,
                       self.faces.verify_output)

    def generate_alignments(self):
        """ Generate an alignments file if one does not already
        exist. Does not save extracted faces """
        print('Alignments file not found. Generating at default values...')
        extract = Extract(self.args)
        extract.export_face = False
        extract.process()

    def load_model(self):
        """ Load the model requested for conversion """
        model_name = self.args.trainer
        model_dir = get_folder(self.args.model_dir)
        num_gpus = self.args.gpus

        model = PluginLoader.get_model(model_name)(model_dir, num_gpus)

        if not model.load(self.args.swap_model):
            print(
                "Model Not Found! A valid model must be provided to continue!")
            exit(1)

        return model

    def load_converter(self, model):
        """ Load the requested converter for conversion """
        args = self.args
        conv = args.converter

        converter = PluginLoader.get_converter(conv)(
            model.converter(False),
            trainer=args.trainer,
            blur_size=args.blur_size,
            seamless_clone=args.seamless_clone,
            sharpen_image=args.sharpen_image,
            mask_type=args.mask_type,
            erosion_kernel_size=args.erosion_kernel_size,
            match_histogram=args.match_histogram,
            smooth_mask=args.smooth_mask,
            avg_color_adjust=args.avg_color_adjust)
        return converter

    def prepare_images(self):
        """ Prepare the images for conversion """
        filename = ""
        for filename in tqdm(self.images.input_images, file=sys.stdout):
            if not self.check_alignments(filename):
                continue
            image = Utils.cv2_read_write('read', filename)
            faces = self.faces.get_faces_alignments(filename, image)
            if not faces:
                continue

            yield filename, image, faces

    def check_alignments(self, filename):
        """ If we have no alignments for this image, skip it """
        have_alignments = self.faces.have_face(filename)
        if not have_alignments:
            tqdm.write("No alignment found for {}, skipping".format(
                os.path.basename(filename)))
        return have_alignments

    def convert(self, converter, item):
        """ Apply the conversion transferring faces onto frames """
        try:
            filename, image, faces = item
            skip = self.opts.check_skipframe(filename)

            if not skip:
                for idx, face in faces:
                    image = self.convert_one_face(converter,
                                                  (filename, image, idx, face))
            if skip != "discard":
                filename = str(self.output_dir / Path(filename).name)
                Utils.cv2_read_write('write', filename, image)
        except Exception as err:
            print("Failed to convert image: {}. Reason: {}".format(
                filename, err))

    def convert_one_face(self, converter, imagevars):
        """ Perform the conversion on the given frame for a single face """
        filename, image, idx, face = imagevars

        if self.opts.check_skipface(filename, idx):
            return image

        image = self.images.rotate_image(image, face.r)
        # TODO: This switch between 64 and 128 is a hack for now.
        # We should have a separate cli option for size
        image = converter.patch_image(
            image, face, 64 if "128" not in self.args.trainer else 128)
        image = self.images.rotate_image(image, face.r, reverse=True)
        return image
Ejemplo n.º 10
0
class Extract(object):
    """ The extract process. """
    def __init__(self, arguments):
        self.args = arguments

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.output_dir = self.faces.output_dir

        self.export_face = True
        self.save_interval = self.args.save_interval if hasattr(
            self.args, "save_interval") else None

    def process(self):
        """ Perform the extraction process """
        print('Starting, this may take a while...')
        Utils.set_verbosity(self.args.verbose)

        if (hasattr(self.args, 'multiprocess') and self.args.multiprocess
                and GPUStats().device_count == 0):
            # TODO Checking that there is no available GPU is not
            # necessarily an indicator of whether the user is actually
            # using the CPU. Maybe look to implement further checks on
            # dlib/tensorflow compilations
            self.extract_multi_process()
        else:
            self.extract_single_process()

        self.write_alignments()
        images, faces = Utils.finalize(self.images.images_found,
                                       self.faces.num_faces_detected,
                                       self.faces.verify_output)
        self.images.images_found = images
        self.faces.num_faces_detected = faces

    def write_alignments(self):
        self.alignments.write_alignments(self.faces.faces_detected)

    def extract_single_process(self):
        """ Run extraction in a single process """
        frame_no = 0
        for filename in tqdm(self.images.input_images, file=sys.stdout):
            filename, faces = self.process_single_image(filename)
            self.faces.faces_detected[os.path.basename(filename)] = faces
            frame_no += 1
            if frame_no == self.save_interval:
                self.write_alignments()
                frame_no = 0

    def extract_multi_process(self):
        """ Run the extraction on the correct number of processes """
        frame_no = 0
        for filename, faces in tqdm(pool_process(self.process_single_image,
                                                 self.images.input_images),
                                    total=self.images.images_found,
                                    file=sys.stdout):
            self.faces.num_faces_detected += 1
            self.faces.faces_detected[os.path.basename(filename)] = faces
            frame_no += 1
            if frame_no == self.save_interval:
                self.write_alignments()
                frame_no = 0

    def process_single_image(self, filename):
        """ Detect faces in an image. Rotate the image the specified amount
            until at least one face is found, or until image rotations are
            depleted.
            Once at least one face has been detected, pass to
            process_single_face to process the individual faces """
        retval = filename, list()
        try:
            image = Utils.cv2_read_write('read', filename)

            for angle in self.images.rotation_angles:
                currentimage = Utils.rotate_image_by_angle(image, angle)
                faces = self.faces.get_faces(currentimage, angle)
                process_faces = [(idx, face) for idx, face in faces]
                if process_faces and angle != 0 and self.args.verbose:
                    print("found face(s) by rotating image "
                          "{} degrees".format(angle))
                if process_faces:
                    break

            final_faces = [
                self.process_single_face(idx, face, filename, currentimage)
                for idx, face in process_faces
            ]

            retval = filename, final_faces
        except Exception as err:
            if self.args.verbose:
                print("Failed to extract from image: "
                      "{}. Reason: {}".format(filename, err))
        return retval

    def process_single_face(self, idx, face, filename, image):
        """ Perform processing on found faces """
        output_file = self.output_dir / Path(
            filename).stem if self.export_face else None

        self.faces.draw_landmarks_on_face(face, image)

        resized_face, t_mat = self.faces.extractor.extract(
            image, face, 256, self.faces.align_eyes)

        blurry_file = self.faces.detect_blurry_faces(face, t_mat, resized_face,
                                                     filename)
        output_file = blurry_file if blurry_file else output_file

        if self.export_face:
            filename = "{}_{}{}".format(str(output_file), str(idx),
                                        Path(filename).suffix)
            Utils.cv2_read_write('write', filename, resized_face)

        return {
            "r": face.r,
            "x": face.x,
            "w": face.w,
            "y": face.y,
            "h": face.h,
            "landmarksXY": face.landmarks_as_xy()
        }
Ejemplo n.º 11
0
class Extract(object):
    """ The extract process. """

    def __init__(self, arguments):
        self.args = arguments

        self.images = Images(self.args)
        self.faces = Faces(self.args)
        self.alignments = Alignments(self.args)

        self.output_dir = self.faces.output_dir

        self.export_face = True

    def process(self):
        """ Perform the extraction process """
        print('Starting, this may take a while...')
        Utils.set_verbosity(self.args.verbose)

        if hasattr(self.args, 'processes') and self.args.processes > 1:
            self.extract_multi_process()
        else:
            self.extract_single_process()

        self.alignments.write_alignments(self.faces.faces_detected)

        images, faces = Utils.finalize(self.images.images_found,
                                       self.faces.num_faces_detected,
                                       self.faces.verify_output)
        self.images.images_found = images
        self.faces.num_faces_detected = faces

    def extract_single_process(self):
        """ Run extraction in a single process """
        for filename in tqdm(self.images.input_images, file=sys.stdout):
            filename, faces = self.process_single_image(filename)
            self.faces.faces_detected[os.path.basename(filename)] = faces

    def extract_multi_process(self):
        """ Run the extraction on the correct number of processes """
        for filename, faces in tqdm(pool_process(self.process_single_image,
                                                 self.images.input_images,
                                                 processes=self.args.processes),
                                    total=self.images.images_found,
                                    file=sys.stdout):
            self.faces.num_faces_detected += 1
            self.faces.faces_detected[os.path.basename(filename)] = faces

    def process_single_image(self, filename):
        """ Detect faces in an image. Rotate the image the specified amount
            until at least one face is found, or until image rotations are
            depleted.
            Once at least one face has been detected, pass to
            process_single_face to process the individual faces """
        retval = filename, list()
        try:
            image = Utils.cv2_read_write('read', filename)

            for angle in self.images.rotation_angles:
                currentimage = Utils.rotate_image_by_angle(image, angle)
                faces = self.faces.get_faces(currentimage, angle)
                process_faces = [(idx, face) for idx, face in faces]
                if process_faces and angle != 0 and self.args.verbose:
                    print("found face(s) by rotating image {} degrees".format(angle))
                if process_faces:
                    break

            final_faces = [self.process_single_face(idx, face, filename, currentimage)
                           for idx, face in process_faces]

            retval = filename, final_faces
        except Exception as err:
            if self.args.verbose:
                print("Failed to extract from image: {}. Reason: {}".format(filename, err))
        return retval

    def process_single_face(self, idx, face, filename, image):
        """ Perform processing on found faces """
        output_file = self.output_dir / Path(filename).stem if self.export_face else None

        self.faces.draw_landmarks_on_face(face, image)

        resized_face, t_mat = self.faces.extractor.extract(image,
                                                           face,
                                                           256,
                                                           self.faces.align_eyes)

        blurry_file = self.faces.detect_blurry_faces(face, t_mat, resized_face, filename)
        output_file = blurry_file if blurry_file else output_file

        if self.export_face:
            filename = "{}_{}{}".format(str(output_file), str(idx), Path(filename).suffix)
            Utils.cv2_read_write('write', filename, resized_face)

        return {"r": face.r,
                "x": face.x,
                "w": face.w,
                "y": face.y,
                "h": face.h,
                "landmarksXY": face.landmarksAsXY()}