示例#1
0
    def processThread(self):
        variant = "Original"  # TODO Pass as argument

        print('Loading data, this may take a while...')
        model = PluginLoader.get_model(variant)(self.arguments.model_dir)
        model.load(swapped=False)

        images_A = get_image_paths(self.arguments.input_A)
        images_B = get_image_paths(self.arguments.input_B)
        trainer = PluginLoader.get_trainer(variant)(model, images_A, images_B)

        try:
            print('Starting. Press "Enter" to stop training and save model')

            for epoch in range(0, 1000000):

                save_iteration = epoch % self.arguments.save_interval == 0

                trainer.train_one_step(epoch,
                                       self.show if save_iteration else None)

                if save_iteration:
                    model.save_weights()

                if self.stop:
                    model.save_weights()
                    exit()

        except KeyboardInterrupt:
            try:
                model.save_weights()
            except KeyboardInterrupt:
                print('Saving model weights has been cancelled!')
            exit(0)
示例#2
0
    def process(self):
        # Original & LowMem models go with Adjust or Masked converter
        # GAN converter & model must go together
        # Note: GAN prediction outputs a mask + an image, while other predicts only an image
        model_name = self.arguments.trainer
        conv_name = self.arguments.converter

        if conv_name.startswith("GAN"):
            assert model_name.startswith(
                "GAN"
            ) is True, "GAN converter can only be used with GAN model!"
        else:
            assert model_name.startswith(
                "GAN"
            ) is False, "GAN model can only be used with GAN converter!"

        model = PluginLoader.get_model(model_name)(get_folder(
            self.arguments.model_dir))
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False),
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None

        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0,  # never any frames less than 0
            "max": float("inf")
        }

        if self.arguments.frame_ranges:
            self.frame_ranges = [
                tuple(
                    map(lambda q: minmax[q] if q in minmax.keys() else int(q),
                        v.split("-"))) for v in self.arguments.frame_ranges
            ]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
示例#3
0
    def processThread(self):
        try:
            if self.arguments.allow_growth:
                self.set_tf_allow_growth()

            print('Loading data, this may take a while...')
            # this is so that you can enter case insensitive values for trainer
            trainer = self.arguments.trainer
            trainer = "LowMem" if trainer.lower() == "lowmem" else trainer
            model = PluginLoader.get_model(trainer)(get_folder(
                self.arguments.model_dir))
            model.load(swapped=False)

            images_A = get_image_paths(self.arguments.input_A)
            images_B = get_image_paths(self.arguments.input_B)
            trainer = PluginLoader.get_trainer(trainer)
            trainer = trainer(model, images_A, images_B,
                              self.arguments.batch_size,
                              self.arguments.perceptual_loss)

            print('Starting. Press "Enter" to stop training and save model')

            for epoch in range(0, self.arguments.epochs):

                save_iteration = epoch % self.arguments.save_interval == 0

                trainer.train_one_step(
                    epoch, self.show if
                    (save_iteration or self.save_now) else None,
                    self.arguments.save_interval)

                if save_iteration:
                    model.save_weights()

                if self.stop:
                    model.save_weights()
                    exit()

                if self.save_now:
                    model.save_weights()
                    self.save_now = False

        except KeyboardInterrupt:
            try:
                model.save_weights()
            except KeyboardInterrupt:
                print('Saving model weights has been cancelled!')
            exit(0)
        except Exception as e:
            print(e)
            exit(1)
示例#4
0
    def process_image(self, filename):
        # TODO move the model load and the converter creation in a method called on init, but after the arg parsing
        (face_A,
         face_B) = ('/decoder_A.h5',
                    '/decoder_B.h5') if not self.arguments.swap_model else (
                        '/decoder_B.h5', '/decoder_A.h5')

        model_dir = self.arguments.model_dir
        encoder.load_weights(model_dir + "/encoder.h5")
        decoder_A.load_weights(model_dir + face_A)
        decoder_B.load_weights(model_dir + face_B)

        converter = PluginLoader.get_converter("Masked")(autoencoder_B)

        try:
            image = cv2.imread(filename)
            for (idx, face) in enumerate(detect_faces(image)):
                if idx > 0 and self.arguments.verbose:
                    print('- Found more than one face!')
                    self.verify_output = True

                image = converter.patch_image(image, face)
                self.faces_detected = self.faces_detected + 1

            output_file = self.output_dir / Path(filename).name
            cv2.imwrite(str(output_file), image)
        except Exception as e:
            print('Failed to convert image: {}. Reason: {}'.format(
                filename, e))
示例#5
0
 def process(self):
     extractor_name = "Align"  # TODO Pass as argument
     self.extractor = PluginLoader.get_extractor(extractor_name)()
     processes = self.arguments.processes
     try:
         if processes != 1:
             files = list(self.read_directory())
             for filename, faces in tqdm(pool_process(self.processFiles,
                                                      files,
                                                      processes=processes),
                                         total=len(files)):
                 self.num_faces_detected += 1
                 self.faces_detected[os.path.basename(filename)] = faces
         else:
             try:
                 for filename in tqdm(self.read_directory()):
                     image = cv2.imread(filename)
                     self.faces_detected[os.path.basename(
                         filename)] = self.handleImage(image, filename)
             except Exception as e:
                 print(
                     'Failed to extract from image: {}. Reason: {}'.format(
                         filename, e))
     finally:
         self.write_alignments()
示例#6
0
    def load_extractor():
        """ Load the requested extractor for extraction """
        # TODO Pass as argument
        extractor_name = "Align"
        extractor = PluginLoader.get_extractor(extractor_name)()

        return extractor
示例#7
0
    def load_model(self):
        """ Load the model requested for training """
        model_dir = get_folder(self.args.model_dir)
        model = PluginLoader.get_model(self.trainer_name)(model_dir, self.args.gpus)

        model.load(swapped=False)
        return model
示例#8
0
    def process(self):
        # Original model goes with Adjust or Masked converter
        model_name = "Original"  # TODO Pass as argument
        conv_name = "Masked"  # TODO Pass as argument

        model = PluginLoader.get_model(model_name)(self.arguments.model_dir)
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False))

        batch = BackgroundGenerator(self.prepare_images(), 1)
        for item in batch.iterator():
            self.convert(converter, item)
示例#9
0
    def load_trainer(self, model):
        """ Load the trainer requested for training """
        images_a, images_b = self.images

        trainer = PluginLoader.get_trainer(self.trainer_name)
        trainer = trainer(model, images_a, images_b, self.args.batch_size,
                          self.args.perceptual_loss)
        return trainer
示例#10
0
    def processThread(self):
        print("Loading Data..! This may take a while")

        trainer = self.arguments.trainer
        trainer = "LowMem" if trainer.lower() == "lowmem" else trainer
        model = PluginLoader.get_model(trainer)(get_folder(
            self.arguments.model_dir))
        model.load(swapped=False)

        images_A = get_image_paths(self.arguments.input_A)
        images_B = get_image_paths(self.arguments.input_B)

        trainer = PluginLoader.get_trainer(trainer)
        trainer = trainer(model,
                          images_A,
                          images_B,
                          batch_size=self.arguments.batch_size)

        try:

            print("Starting. Press Enter to stop Training and Save model")

            for epoch in range(0, 100000):
                save_iteration = epoch % self.arguments.save_interval == 0

                trainer.train_one_step(
                    epoch, self.show if
                    (save_iteration or self.save_now) else None)

                if save_iteration:
                    model.save_weights()

                if self.stop:
                    model.save_weights()
                    exit()

                if self.save_now:
                    model.save_weights()
                    self.save_now = False

        except KeyboardInterrupt:
            try:
                model.save_weights()
            except KeyboardInterrupt:
                print("Saving model weights has been cancelled...!")
            exit(0)
示例#11
0
    def process(self):
        # Original model goes with Adjust or Masked converter
        # does the LowMem one work with only one?
        # seems to work with both in testing - although Adjust with LowMem
        # looks a real mess - you can see that it is "working"
        model_name = self.arguments.trainer
        conv_name = self.arguments.converter

        model = PluginLoader.get_model(model_name)(self.arguments.model_dir)
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False),
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None

        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0,  # never any frames less than 0
            "max": float("inf")
        }

        if self.arguments.frame_ranges:
            self.frame_ranges = [
                tuple(
                    map(lambda q: minmax[q] if q in minmax.keys() else int(q),
                        v.split("-"))) for v in self.arguments.frame_ranges
            ]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
示例#12
0
文件: train.py 项目: nhu2000/faceswap
    def processThread(self):
        if self.arguments.allow_growth:
            self.set_tf_allow_growth()
        
        print('Loading data, this may take a while...')
        # this is so that you can enter case insensitive values for trainer
        trainer = self.arguments.trainer
        trainer = "LowMem" if trainer.lower() == "lowmem" else trainer
        model = PluginLoader.get_model(trainer)(get_folder(self.arguments.model_dir))
        model.load(swapped=False)

        images_A = get_image_paths(self.arguments.input_A)
        images_B = get_image_paths(self.arguments.input_B)
        trainer = PluginLoader.get_trainer(trainer)
        trainer = trainer(model, images_A, images_B, batch_size=self.arguments.batch_size)

        try:
            print('Starting. Press "Enter" to stop training and save model')

            for epoch in range(0, self.arguments.epochs):

                save_iteration = epoch % self.arguments.save_interval == 0

                trainer.train_one_step(epoch, self.show if (save_iteration or self.save_now) else None)

                if save_iteration:
                    model.save_weights()

                if self.stop:
                    model.save_weights()
                    exit()

                if self.save_now:
                    model.save_weights()
                    self.save_now = False

        except KeyboardInterrupt:
            try:
                model.save_weights()
            except KeyboardInterrupt:
                print('Saving model weights has been cancelled!')
            exit(0)
        except Exception as e:
            print(e)
            exit(1)
示例#13
0
    def process(self):
    	model_name = self.arguments.trainer
    	conv_name = self.arguments.converter

    	if conv_name.startswith("GAN"):
    		assert  model_name.startswith("GAN") is True, "GAN converter can only be used with GAN model"
    	else:
    		assert model_name.startswith("GAN") is False, "GAN model can only be used with GAN Converter"


    	model = PluginLoader.get_model(model_name)(get_folder(self.arguments.model_dir))

    	if not model.load(self.arguments.swap_model):
    		print("Model Not Found! A valid model must be provided to continue!")
    		exit(1)

    	converter = PluginLoader.get_converter(conv_name)(model.converter(False),
    		blur_size = self.arguments.blur_size,
    		seamless_clone=self.arguments.seamless_clone,
    		mask_type=self.arguments.mask_type,
    		erosion_kernel_size=self.arguments.erosion_kernel_size,
    		smooth_mask=self.arguments.smooth_mask,
    		avg_color_adjust=self.arguments.avg_color_adjust)

    	batch = BackgroundGenerator(self.prepare_images(),1)

    	#frame ranges stuffs
    	self.frame_ranges = None 

    	minmax = {
    		"min":0,
    		"max":float("inf")
    	}

    	if self.arguments.frame_ranges:
    		self.frame_ranges = [tuple(map(lamda q: minmax[q] if q in minmax.keys() else int(q),v.split("-"))) for v in self.arguments.frame_ranges]


    	self.imageidxre - re.compile(r'(\d+)(?!.*\d)')

    	for item in batch.iterator():
    		self.convert(converter,item)
示例#14
0
    def load_trainer(self, model):
        """ Load the trainer requested for training """
        images_a, images_b = self.images

        trainer = PluginLoader.get_trainer(self.trainer_name)
        trainer = trainer(model,
                          images_a,
                          images_b,
                          self.args.batch_size,
                          self.args.perceptual_loss)
        return trainer
示例#15
0
    def processThread(self):
        print('Loading data, this may take a while...')
        # this is so that you can enter case insensitive values for trainer
        trainer = self.arguments.trainer
        trainer = trainer if trainer != "Lowmem" else "LowMem"
        model = PluginLoader.get_model(trainer)(self.arguments.model_dir)
        model.load(swapped=False)

        images_A = get_image_paths(self.arguments.input_A)
        images_B = get_image_paths(self.arguments.input_B)
        trainer = PluginLoader.get_trainer(trainer)(
            model, images_A, images_B, batch_size=self.arguments.batch_size)

        try:
            print('Starting. Press "Enter" to stop training and save model')

            for epoch in range(0, 1000000):

                save_iteration = epoch % self.arguments.save_interval == 0

                trainer.train_one_step(
                    epoch, self.show if
                    (save_iteration or self.save_now) else None)

                if save_iteration:
                    model.save_weights()

                if self.stop:
                    model.save_weights()
                    exit()

                if self.save_now:
                    model.save_weights()
                    self.save_now = False

        except KeyboardInterrupt:
            try:
                model.save_weights()
            except KeyboardInterrupt:
                print('Saving model weights has been cancelled!')
            exit(0)
示例#16
0
    def __init__(self, frames, alignments, size=256,
                 padding=48, align_eyes=False):
        self.size = size
        self.padding = padding
        self.align_eyes = align_eyes
        self.extractor = PluginLoader.get_extractor("Align")()
        self.alignments = alignments
        self.frames = frames

        self.current_frame = None
        self.faces = list()
        self.matrices = list()
示例#17
0
    def load_model(self):
        """ Load the model requested for conversion """
        model_name = self.args.trainer
        model_dir = get_folder(self.args.model_dir)
        num_gpus = self.args.gpus

        model = PluginLoader.get_model(model_name)(model_dir, num_gpus)

        if not model.load(self.args.swap_model):
            print("Model Not Found! A valid model must be provided to continue!")
            exit(1)

        return model
示例#18
0
    def process_arguments(self, arguments):
        if not arguments.swap_model:
            self.face_A, self.face_B = ('/decoder_A.h5', '/decoder_B.h5')
        else:
            self.face_A, self.face_B = ('/decoder_B.h5', '/decoder_A.h5')

        model_dir = arguments.model_dir
        encoder.load_weights(model_dir + "/encoder.h5")

        decoder_A.load_weights(model_dir + self.face_A)
        decoder_B.load_weights(model_dir + self.face_B)
        self.converter = PluginLoader.get_converter("Masked")(autoencoder_B)
        super().process_arguments(arguments)
示例#19
0
    def load_model(self):
        """ Load the model requested for conversion """
        model_name = self.args.trainer
        model_dir = get_folder(self.args.model_dir)
        num_gpus = self.args.gpus

        model = PluginLoader.get_model(model_name)(model_dir, num_gpus)

        if not model.load(self.args.swap_model):
            print("Model Not Found! A valid model must be provided to continue!")
            exit(1)

        return model
示例#20
0
    def process(self):
        extractor_name = "Align" # TODO Pass as argument
        extractor = PluginLoader.get_extractor(extractor_name)()

        try:
            for filename in self.read_directory():
                image = cv2.imread(filename)
                for idx, face in self.get_faces(image):
                    resized_image = extractor.extract(image, face, 256)
                    output_file = self.output_dir / Path(filename).stem
                    cv2.imwrite(str(output_file) + str(idx) + Path(filename).suffix, resized_image)
                
        except Exception as e:
            print('Failed to extract from image: {}. Reason: {}'.format(filename, e))
示例#21
0
    def process(self):
        # Original model goes with Adjust or Masked converter
        # does the LowMem one work with only one?
        model_name = "Original" # TODO Pass as argument
        conv_name = self.arguments.converter
        
        model = PluginLoader.get_model(model_name)(self.arguments.model_dir)
        if not model.load(self.arguments.swap_model):
            print('Model Not Found! A valid model must be provided to continue!')
            exit(1)

        converter = PluginLoader.get_converter(conv_name)(model.converter(False),
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust
        )

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None
        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0, # never any frames less than 0
            "max": float("inf")
        }
        if self.arguments.frame_ranges:
            self.frame_ranges = [tuple(map(lambda q: minmax[q] if q in minmax.keys() else int(q), v.split("-"))) for v in self.arguments.frame_ranges]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
示例#22
0
    def process(self):
        extractor_name = "Align" # TODO Pass as argument
        self.extractor = PluginLoader.get_extractor(extractor_name)()
        self.faces_detected = 0
        processes = self.arguments.processes
        if processes != 1:
            files = list(self.read_directory())
            for _ in tqdm(pool_process(self.processFiles, files, processes=processes), total = len(files)):
                self.faces_detected +=1
        else:
            try:
                for filename in tqdm(self.read_directory()):
                    self.handleImage(filename)

            except Exception as e:
                print('Failed to extract from image: {}. Reason: {}'.format(filename, e))
示例#23
0
    def load_converter(self, model):
        """ Load the requested converter for conversion """
        args = self.args
        conv = args.converter

        converter = PluginLoader.get_converter(conv)(model.converter(False),
                                                     trainer=args.trainer,
                                                     blur_size=args.blur_size,
                                                     seamless_clone=args.seamless_clone,
                                                     sharpen_image=args.sharpen_image,
                                                     mask_type=args.mask_type,
                                                     erosion_kernel_size=args.erosion_kernel_size,
                                                     match_histogram=args.match_histogram,
                                                     smooth_mask=args.smooth_mask,
                                                     avg_color_adjust=args.avg_color_adjust)
        return converter
示例#24
0
    def load_converter(self, model):
        """ Load the requested converter for conversion """
        args = self.args
        conv = args.converter

        converter = PluginLoader.get_converter(conv)(model.converter(False),
                                                     trainer=args.trainer,
                                                     blur_size=args.blur_size,
                                                     seamless_clone=args.seamless_clone,
                                                     sharpen_image=args.sharpen_image,
                                                     mask_type=args.mask_type,
                                                     erosion_kernel_size=args.erosion_kernel_size,
                                                     match_histogram=args.match_histogram,
                                                     smooth_mask=args.smooth_mask,
                                                     avg_color_adjust=args.avg_color_adjust)
        return converter
示例#25
0
    def process(self):
        extractor_name = "Align"  # TODO Pass as argument
        extractor = PluginLoader.get_extractor(extractor_name)()

        try:
            for filename in self.read_directory():
                image = cv2.imread(filename)
                for idx, face in self.get_faces(image):
                    resized_image = extractor.extract(image, face, 256)
                    output_file = self.output_dir / Path(filename).stem
                    cv2.imwrite(
                        str(output_file) + str(idx) + Path(filename).suffix,
                        resized_image)

        except Exception as e:
            print('Failed to extract from image: {}. Reason: {}'.format(
                filename, e))
示例#26
0
    def process_image(self, filename):
        extractor = PluginLoader.get_extractor("Align")()

        try:
            image = cv2.imread(filename)
            for (idx, face) in enumerate(detect_faces(image)):
                if idx > 0 and self.arguments.verbose:
                    print('- Found more than one face!')
                    self.verify_output = True

                resized_image = extractor.extract(image, face, 256)
                output_file = self.output_dir / Path(filename).stem
                cv2.imwrite(
                    str(output_file) + str(idx) + Path(filename).suffix,
                    resized_image)
                self.faces_detected = self.faces_detected + 1
        except Exception as e:
            print('Failed to extract from image: {}. Reason: {}'.format(
                filename, e))
示例#27
0
 def process(self):
     extractor_name = "Align" # TODO Pass as argument
     self.extractor = PluginLoader.get_extractor(extractor_name)()
     processes = self.arguments.processes
     try:
         if processes != 1:
             files = list(self.read_directory())
             for filename, faces in tqdm(pool_process(self.processFiles, files, processes=processes), total = len(files)):
                 self.num_faces_detected += 1
                 self.faces_detected[os.path.basename(filename)] = faces
         else:
             try:
                 for filename in tqdm(self.read_directory()):
                     image = cv2.imread(filename)
                     self.faces_detected[os.path.basename(filename)] = self.handleImage(image, filename)
             except Exception as e:
                 print('Failed to extract from image: {}. Reason: {}'.format(filename, e))
     finally:
         self.write_alignments()
示例#28
0
def extract(input_path, output_path):
    files = os.listdir(input_path)
    if not len(files):
        raise Exception("no files inside {0}!!!".format(input_path))
    extractor = PluginLoader.get_extractor("Align")()
    for n, _file in enumerate(files):
        _file_id = os.path.join(input_path, _file)
        #print (_file_id)
        output = os.path.join(
            output_path, _file)  #"{0}.{1}".format(n, _file.split(".")[1]))
        if not os.path.exists(output):
            if os.path.isfile(_file_id):
                print("file {0}/{1}".format(n, len(files)), _file_id)
                image = cv2.imread(_file_id)
                try:
                    for (idx, face) in enumerate(detect_faces(image)):
                        resized_image = extractor.extract(image, face, 256)
                        cv2.imwrite(output, resized_image)
                except Exception as e:
                    print(
                        'Failed to extract from image: {}. Reason: {}'.format(
                            _file, e))
        else:
            print("Jump ", output)
示例#29
0
    def process(self):
        # Original & LowMem models go with Adjust or Masked converter
        # Note: GAN prediction outputs a mask + an image, while other predicts only an image
        model_name = self.arguments.trainer
        conv_name = self.arguments.converter
        self.input_aligned_dir = None

        model = PluginLoader.get_model(model_name)(get_folder(
            self.arguments.model_dir))
        if not model.load(self.arguments.swap_model):
            print(
                'Model Not Found! A valid model must be provided to continue!')
            exit(1)

        input_aligned_dir = Path(self.arguments.input_dir) / Path('aligned')
        if self.arguments.input_aligned_dir is not None:
            input_aligned_dir = self.arguments.input_aligned_dir
        try:
            self.input_aligned_dir = [
                Path(path) for path in get_image_paths(input_aligned_dir)
            ]
            if len(self.input_aligned_dir) == 0:
                print(
                    'Aligned directory is empty, no faces will be converted!')
            elif len(self.input_aligned_dir) <= len(self.input_dir) / 3:
                print(
                    'Aligned directory contains an amount of images much less than the input, are you sure this is the right directory?'
                )
        except:
            print(
                'Aligned directory not found. All faces listed in the alignments file will be converted.'
            )

        converter = PluginLoader.get_converter(conv_name)(
            model.converter(False),
            trainer=self.arguments.trainer,
            blur_size=self.arguments.blur_size,
            seamless_clone=self.arguments.seamless_clone,
            mask_type=self.arguments.mask_type,
            erosion_kernel_size=self.arguments.erosion_kernel_size,
            match_histogram=self.arguments.match_histogram,
            smooth_mask=self.arguments.smooth_mask,
            avg_color_adjust=self.arguments.avg_color_adjust)

        batch = BackgroundGenerator(self.prepare_images(), 1)

        # frame ranges stuff...
        self.frame_ranges = None

        # split out the frame ranges and parse out "min" and "max" values
        minmax = {
            "min": 0,  # never any frames less than 0
            "max": float("inf")
        }

        if self.arguments.frame_ranges:
            self.frame_ranges = [
                tuple(
                    map(lambda q: minmax[q] if q in minmax.keys() else int(q),
                        v.split("-"))) for v in self.arguments.frame_ranges
            ]

        # last number regex. I know regex is hacky, but its reliablyhacky(tm).
        self.imageidxre = re.compile(r'(\d+)(?!.*\d)')

        for item in batch.iterator():
            self.convert(converter, item)
示例#30
0
 def get_argument_list():
     """ Put the arguments in a list so that they are accessible from both argparse and gui """
     argument_list = []
     argument_list.append({"opts": ("-A", "--input-A"),
                           "action": FullPaths,
                           "dest": "input_A",
                           "default": "input_A",
                           "help": "Input directory. A directory containing training images "
                                   "for face A. Defaults to 'input'"})
     argument_list.append({"opts": ("-B", "--input-B"),
                           "action": FullPaths,
                           "dest": "input_B",
                           "default": "input_B",
                           "help": "Input directory. A directory containing training images "
                                   "for face B Defaults to 'input'"})
     argument_list.append({"opts": ("-m", "--model-dir"),
                           "action": FullPaths,
                           "dest": "model_dir",
                           "default": "models",
                           "help": "Model directory. This is where the training data will "
                                   "be stored. Defaults to 'model'"})
     argument_list.append({"opts": ("-s", "--save-interval"),
                           "type": int,
                           "dest": "save_interval",
                           "default": 100,
                           "help": "Sets the number of iterations before saving the model"})
     argument_list.append({"opts": ("-t", "--trainer"),
                           "type": str,
                           "choices": PluginLoader.get_available_models(),
                           "default": PluginLoader.get_default_model(),
                           "help": "Select which trainer to use, Use LowMem for cards with "
                                   " less than 2GB of VRAM"})
     argument_list.append({"opts": ("-bs", "--batch-size"),
                           "type": int,
                           "default": 64,
                           "help": "Batch size, as a power of 2 (64, 128, 256, etc)"})
     argument_list.append({"opts": ("-ep", "--epochs"),
                           "type": int,
                           "default": 1000000,
                           "help": "Length of training in epochs"})
     argument_list.append({"opts": ("-g", "--gpus"),
                           "type": int,
                           "default": 1,
                           "help": "Number of GPUs to use for training"})
     argument_list.append({"opts": ("-p", "--preview"),
                           "action": "store_true",
                           "dest": "preview",
                           "default": False,
                           "help": "Show preview output. If not specified, write progress "
                                   "to file"})
     argument_list.append({"opts": ("-w", "--write-image"),
                           "action": "store_true",
                           "dest": "write_image",
                           "default": False,
                           "help": "Writes the training result to a file even on "
                                   "preview mode"})
     argument_list.append({"opts": ("-pl", "--use-perceptual-loss"),
                           "action": "store_true",
                           "dest": "perceptual_loss",
                           "default": False,
                           "help": "Use perceptual loss while training"})
     argument_list.append({"opts": ("-ag", "--allow-growth"),
                           "action": "store_true",
                           "dest": "allow_growth",
                           "default": False,
                           "help": "Sets allow_growth option of Tensorflow to spare memory "
                                   "on some configs"})
     argument_list.append({"opts": ("-v", "--verbose"),
                           "action": "store_true",
                           "dest": "verbose",
                           "default": False,
                           "help": "Show verbose output"})
     # This is a hidden argument to indicate that the GUI is being used,
     # so the preview window should be redirected Accordingly
     argument_list.append({"opts": ("-gui", "--gui"),
                           "action": "store_true",
                           "dest": "redirect_gui",
                           "default": False,
                           "help": argparse.SUPPRESS})
     return argument_list
示例#31
0
 def get_optional_arguments():
     """ Put the arguments in a list so that they are accessible from both argparse and gui """
     argument_list = []
     argument_list.append({"opts": ("-m", "--model-dir"),
                           "action": FullPaths,
                           "dest": "model_dir",
                           "default": "models",
                           "help": "Model directory. A directory containing the trained model "
                                   "you wish to process. Defaults to 'models'"})
     argument_list.append({"opts": ("-a", "--input-aligned-dir"),
                           "action": FullPaths,
                           "dest": "input_aligned_dir",
                           "default": None,
                           "help": "Input \"aligned directory\". A directory that should "
                                   "contain the aligned faces extracted from the input files. "
                                   "If you delete faces from this folder, they'll be skipped "
                                   "during conversion. If no aligned dir is specified, all "
                                   "faces will be converted"})
     argument_list.append({"opts": ("-t", "--trainer"),
                           "type": str,
                           # case sensitive because this is used to load a plug-in.
                           "choices": PluginLoader.get_available_models(),
                           "default": PluginLoader.get_default_model(),
                           "help": "Select the trainer that was used to create the model"})
     argument_list.append({"opts": ("-c", "--converter"),
                           "type": str,
                           # case sensitive because this is used to load a plugin.
                           "choices": ("Masked", "Adjust"),
                           "default": "Masked",
                           "help": "Converter to use"})
     argument_list.append({"opts": ("-b", "--blur-size"),
                           "type": int,
                           "default": 2,
                           "help": "Blur size. (Masked converter only)"})
     argument_list.append({"opts": ("-e", "--erosion-kernel-size"),
                           "dest": "erosion_kernel_size",
                           "type": int,
                           "default": None,
                           "help": "Erosion kernel size. Positive values apply erosion "
                                   "which reduces the edge of the swapped face. Negative "
                                   "values apply dilation which allows the swapped face "
                                   "to cover more space. (Masked converter only)"})
     argument_list.append({"opts": ("-M", "--mask-type"),
                           #lowercase this, because its just a string later on.
                           "type": str.lower,
                           "dest": "mask_type",
                           "choices": ["rect", "facehull", "facehullandrect"],
                           "default": "facehullandrect",
                           "help": "Mask to use to replace faces. (Masked converter only)"})
     argument_list.append({"opts": ("-sh", "--sharpen"),
                           "type": str.lower,
                           "dest": "sharpen_image",
                           "choices": ["bsharpen", "gsharpen"],
                           "default": None,
                           "help": "Use Sharpen Image.bsharpen for Box Blur, gsharpen for "
                                   "Gaussian Blur (Masked converter only)"})
     argument_list.append({"opts": ("-g", "--gpus"),
                           "type": int,
                           "default": 1,
                           "help": "Number of GPUs to use for conversion"})
     argument_list.append({"opts": ("-fr", "--frame-ranges"),
                           "nargs": "+",
                           "type": str,
                           "help": "frame ranges to apply transfer to e.g. For frames 10 to "
                                   "50 and 90 to 100 use --frame-ranges 10-50 90-100. Files "
                                   "must have the frame-number as the last number in the "
                                   "name!"})
     argument_list.append({"opts": ("-d", "--discard-frames"),
                           "action": "store_true",
                           "dest": "discard_frames",
                           "default": False,
                           "help": "When used with --frame-ranges discards frames that are "
                                   "not processed instead of writing them out unchanged"})
     argument_list.append({"opts": ("-s", "--swap-model"),
                           "action": "store_true",
                           "dest": "swap_model",
                           "default": False,
                           "help": "Swap the model. Instead of A -> B, swap B -> A"})
     argument_list.append({"opts": ("-S", "--seamless"),
                           "action": "store_true",
                           "dest": "seamless_clone",
                           "default": False,
                           "help": "Use cv2's seamless clone. (Masked converter only)"})
     argument_list.append({"opts": ("-mh", "--match-histogram"),
                           "action": "store_true",
                           "dest": "match_histogram",
                           "default": False,
                           "help": "Use histogram matching. (Masked converter only)"})
     argument_list.append({"opts": ("-sm", "--smooth-mask"),
                           "action": "store_true",
                           "dest": "smooth_mask",
                           "default": True,
                           "help": "Smooth mask (Adjust converter only)"})
     argument_list.append({"opts": ("-aca", "--avg-color-adjust"),
                           "action": "store_true",
                           "dest": "avg_color_adjust",
                           "default": True,
                           "help": "Average color adjust. (Adjust converter only)"})
     return argument_list
示例#32
0
文件: cli.py 项目: Nioy/faceswap
 def get_optional_arguments():
     """ Put the arguments in a list so that they are accessible from both
     argparse and gui """
     argument_list = []
     argument_list.append({"opts": ("-m", "--model-dir"),
                           "action": DirFullPaths,
                           "dest": "model_dir",
                           "default": "models",
                           "help": "Model directory. A directory "
                                   "containing the trained model you wish "
                                   "to process. Defaults to 'models'"})
     argument_list.append({"opts": ("-a", "--input-aligned-dir"),
                           "action": DirFullPaths,
                           "dest": "input_aligned_dir",
                           "default": None,
                           "help": "Input \"aligned directory\". A "
                                   "directory that should contain the "
                                   "aligned faces extracted from the input "
                                   "files. If you delete faces from this "
                                   "folder, they'll be skipped during "
                                   "conversion. If no aligned dir is "
                                   "specified, all faces will be "
                                   "converted"})
     argument_list.append({"opts": ("-t", "--trainer"),
                           "type": str,
                           # case sensitive because this is used to
                           # load a plug-in.
                           "choices": PluginLoader.get_available_models(),
                           "default": PluginLoader.get_default_model(),
                           "help": "Select the trainer that was used to "
                                   "create the model"})
     argument_list.append({"opts": ("-c", "--converter"),
                           "type": str,
                           # case sensitive because this is used
                           # to load a plugin.
                           "choices": ("Masked", "Adjust"),
                           "default": "Masked",
                           "help": "Converter to use"})
     argument_list.append({"opts": ("-b", "--blur-size"),
                           "type": int,
                           "default": 2,
                           "help": "Blur size. (Masked converter only)"})
     argument_list.append({"opts": ("-e", "--erosion-kernel-size"),
                           "dest": "erosion_kernel_size",
                           "type": int,
                           "default": None,
                           "help": "Erosion kernel size. Positive values "
                                   "apply erosion which reduces the edge "
                                   "of the swapped face. Negative values "
                                   "apply dilation which allows the "
                                   "swapped face to cover more space. "
                                   "(Masked converter only)"})
     argument_list.append({"opts": ("-M", "--mask-type"),
                           # lowercase this, because it's just a
                           # string later on.
                           "type": str.lower,
                           "dest": "mask_type",
                           "choices": ["rect",
                                       "facehull",
                                       "facehullandrect"],
                           "default": "facehullandrect",
                           "help": "Mask to use to replace faces. "
                                   "(Masked converter only)"})
     argument_list.append({"opts": ("-sh", "--sharpen"),
                           "type": str.lower,
                           "dest": "sharpen_image",
                           "choices": ["bsharpen", "gsharpen"],
                           "default": None,
                           "help": "Use Sharpen Image. bsharpen for Box "
                                   "Blur, gsharpen for Gaussian Blur "
                                   "(Masked converter only)"})
     argument_list.append({"opts": ("-g", "--gpus"),
                           "type": int,
                           "default": 1,
                           "help": "Number of GPUs to use for conversion"})
     argument_list.append({"opts": ("-fr", "--frame-ranges"),
                           "nargs": "+",
                           "type": str,
                           "help": "frame ranges to apply transfer to e.g. "
                                   "For frames 10 to 50 and 90 to 100 use "
                                   "--frame-ranges 10-50 90-100. Files "
                                   "must have the frame-number as the last "
                                   "number in the name!"})
     argument_list.append({"opts": ("-d", "--discard-frames"),
                           "action": "store_true",
                           "dest": "discard_frames",
                           "default": False,
                           "help": "When used with --frame-ranges discards "
                                   "frames that are not processed instead "
                                   "of writing them out unchanged"})
     argument_list.append({"opts": ("-s", "--swap-model"),
                           "action": "store_true",
                           "dest": "swap_model",
                           "default": False,
                           "help": "Swap the model. Instead of A -> B, "
                                   "swap B -> A"})
     argument_list.append({"opts": ("-S", "--seamless"),
                           "action": "store_true",
                           "dest": "seamless_clone",
                           "default": False,
                           "help": "Use cv2's seamless clone. "
                                   "(Masked converter only)"})
     argument_list.append({"opts": ("-mh", "--match-histogram"),
                           "action": "store_true",
                           "dest": "match_histogram",
                           "default": False,
                           "help": "Use histogram matching. "
                                   "(Masked converter only)"})
     argument_list.append({"opts": ("-sm", "--smooth-mask"),
                           "action": "store_true",
                           "dest": "smooth_mask",
                           "default": True,
                           "help": "Smooth mask (Adjust converter only)"})
     argument_list.append({"opts": ("-aca", "--avg-color-adjust"),
                           "action": "store_true",
                           "dest": "avg_color_adjust",
                           "default": True,
                           "help": "Average color adjust. "
                                   "(Adjust converter only)"})
     return argument_list
示例#33
0
    def convert(self, video_file, swap_model = False, duration = None, start_time = None, use_gan = False, face_filter = False, photos = True, crop_x = None, width = None, side_by_side = False, live=False, webcam=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list="0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan)))
        if not model.load(swap_model):
            print('model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors = True):
            if convert_colors:
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    if (not live and not webcam):
                        frame = frame.astype(numpy.float32)
            if convert_colors:                    
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV
            return frame
        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        if (live):
            # generate dummy content for testing /dev/video1
            #ffmpeg -f x11grab -s 640x480 -i :0.0+10,20 -vf format=pix_fmts=yuv420p -f v4l2 /dev/video1
            print("Staring live mode. Capturing video from webcam!")
            print("Press q to Quit")
            # connect to webcam
            video_capture = cv2.VideoCapture(0)
            time.sleep(1)

            width = video_capture.get(3)  # float
            height = video_capture.get(4) # float
            print("webcam dimensions = {} x {}".format(width,height))
            
            #video_capture = cv2.VideoCapture('./data/videos/ale.mp4')
            if (webcam):
                # create fake webcam device
                camera = pyfakewebcam.FakeWebcam('/dev/video1', 640, 480)
                camera.print_capabilities()
                print("Fake webcam created, try using appear.in on Firefox or  ")
          
            # loop until user clicks 'q' to exit
            while True:
                ret, frame = video_capture.read()
                frame = cv2.resize(frame, (640, 480))
                # flip image, because webcam inverts it and we trained the model the other way! 
                frame = cv2.flip(frame,1)
                image = _convert_frame(frame, convert_colors = False)
                # flip it back
                image = cv2.flip(image,1)

                
                if (webcam):
                    time.sleep(1/30.0)
示例#34
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False):
        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"
        model = PluginLoader.get_model(model_name)(Path(
            self._model_path(use_gan)))
        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        # Load converter
        converter = PluginLoader.get_converter(converter_name)
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            for face in detect_faces(frame, "cnn"):
                if (not face_filter) or (face_filter and filter.check(face)):
                    frame = converter.patch_image(frame, face)
                    frame = frame.astype(numpy.float32)
            if convert_colors:
                frame = cv2.cvtColor(
                    frame,
                    cv2.COLOR_BGR2RGB)  # Swap RGB to BGR to work with OpenCV
            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
示例#35
0
    def convert(self,
                video_file,
                swap_model=False,
                duration=None,
                start_time=None,
                use_gan=False,
                face_filter=False,
                photos=True,
                crop_x=None,
                width=None,
                side_by_side=False,
                live=False):

        # Magic incantation to not have tensorflow blow up with an out of memory error.
        import tensorflow as tf
        import keras.backend.tensorflow_backend as K
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.gpu_options.visible_device_list = "0"
        K.set_session(tf.Session(config=config))

        # Load model
        model_name = "Original"
        converter_name = "Masked"
        if use_gan:
            model_name = "GAN"
            converter_name = "GAN"

        # -----------------------------------------------------------
        # FIXING THE BUG with Model loading:
        # model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan)))
        # TypeError: __init__() takes exactly 3 arguments (2 given)
        # -----------------------------------------------------------

        # tmp_1 = PluginLoader.get_model(model_name)
        # tmp_1 = PluginLoader._import("Model_LIVE", "Model_LIVE") # that works (crutch however)
        # tmp_2 = Path(self._model_path(use_gan)) # models/emma_to_jade
        # print('\n\n\n{}\n{}\n{}\n{}\n\n\n'.format(tmp_1, type(tmp_1), tmp_2, type(tmp_2)))
        # sys.exit(0)

        # values in faceit_live module:
        # plugins.Model_Original.Model
        # <type 'classobj'>
        # models/emma_to_jade
        # <class 'pathlib.PosixPath'>

        # values here:
        # plugins.Model_Original.Model.Model
        # <type 'classobj'>
        # models/emma_to_jade
        # <class 'pathlib.PosixPath'>
        # -----------------------------------------------------------

        # model = PluginLoader.get_model(model_name)(Path(self._model_path(use_gan))) # ==> crash
        model = PluginLoader._import("Model_LIVE", "Model_LIVE")(Path(
            self._model_path(use_gan)))

        # print('\n\n\n{}\n\n\n'.format(self._model_path(use_gan))) # e.g. models/test_2_faces
        # sys.exit(0)

        if not model.load(swap_model):
            print(
                'model Not Found! A valid model must be provided to continue!')
            exit(1)

        print('Checkpoint_1 ... Model loaded')

        # -----------------------------------------------------------
        # FIXING THE BUG with Converter loading:
        # -----------------------------------------------------------

        # tmp_1 = PluginLoader.get_converter(converter_name)
        # tmp_1 = PluginLoader._import("Convert", "Convert_Masked_LIVE")
        # print('\n\n\n{}\n{}\n\n\n'.format(tmp_1, type(tmp_1)))
        # sys.exit(0)

        # faceit_live module:
        # plugins.Convert_Masked.Convert
        # <type 'classobj'>

        # here:
        # plugins.Convert_Masked.Convert
        # <type 'classobj'>
        # -----------------------------------------------------------

        # Load converter
        # converter = PluginLoader.get_converter(converter_name) # ==> crash
        converter = PluginLoader._import("Convert", "Convert_Masked_LIVE")
        converter = converter(model.converter(False),
                              blur_size=8,
                              seamless_clone=True,
                              mask_type="facehullandrect",
                              erosion_kernel_size=None,
                              smooth_mask=True,
                              avg_color_adjust=True)

        print('Checkpoint_2 ... Converter loaded')

        # Load face filter
        filter_person = self._person_a
        if swap_model:
            filter_person = self._person_b
        filter = FaceFilter_LIVE(self._people[filter_person]['faces'])

        # Define conversion method per frame
        def _convert_frame(frame, convert_colors=True):
            # if convert_colors:
            # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV

            DEBUG_MODE = 0
            for face in detect_faces_LIVE(frame, "cnn"):

                if DEBUG_MODE:
                    print('Got face!')
                    # print(dir(face)) # image, x, y, w, h, landmarks
                    print('Face geometry: ({},{},{},{})'.format(
                        face.x, face.y, face.w, face.h))
                    print('Face landmarks: {}'.format(face.landmarks))

                    cv2.imshow('Face', face.image)
                    continue

                if (not face_filter) or (face_filter and filter.check(face)):

                    # if 1:
                    #     print(dir(face.landmarks))
                    #     face.landmarks = []

                    frame = converter.patch_image(frame, face)
                    if not live:
                        frame = frame.astype(numpy.float32)

            # if convert_colors:
            # frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Swap RGB to BGR to work with OpenCV

            return frame

        def _convert_helper(get_frame, t):
            return _convert_frame(get_frame(t))

        # ===================================================
        if live:

            print('Staring live mode ...')
            print('Press "Q" to Quit')

            PATH_TO_VIDEO = './data/videos/emma_360_cut.mp4'

            if TEST_2_FACES_FLAG:
                # PATH_TO_VIDEO = './_data/videos/pair_360p_original.mp4'
                PATH_TO_VIDEO = './data/videos/pair_360p_cut.mp4'

            video_capture = cv2.VideoCapture(PATH_TO_VIDEO)

            width = video_capture.get(3)  # float
            height = video_capture.get(4)  # float
            print("video dimensions = {} x {}".format(width, height))

            while 1:

                ret, frame = video_capture.read()
                # print(frame.shape, frame.dtype) # (360, 640, 3), uint8

                # frame = cv2.resize(frame, (640, 480))

                print('HANDLING NEW FRAME ...')

                if CROP_HALF_OF_FRAME == 'left':
                    frame[:, 0:frame.shape[1] /
                          2] = 0  # ~ cropping left half of an image
                # elif CROP_HALF_OF_FRAME == 'right':
                # pass

                if not ret:
                    print("RET IS NONE ... I'M QUIT")
                    video_capture.release()
                    break

                # block without try/except -  to catch actual errors:
                frame = cv2.flip(frame, 1)
                image = _convert_frame(frame, convert_colors=False)
                print('GOT AN IMAGE!')
                frame = cv2.flip(frame, 1)
                image = cv2.flip(image, 1)

                try:  # with flip:

                    # flip image, because webcam inverts it and we trained the model the other way!
                    frame = cv2.flip(frame, 1)

                    image = _convert_frame(frame, convert_colors=False)
                    print('GOT AN IMAGE!')

                    # flip it back
                    frame = cv2.flip(frame, 1)
                    image = cv2.flip(image, 1)

                except:

                    try:  # without flip:

                        image = _convert_frame(frame, convert_colors=False)
                        print('GOT AN IMAGE!')

                    except:

                        print("HMM ... CONVERTATION FAILED ... I'M QUIT")
                        continue
                        # video_capture.release()
                        # break

                cv2.imshow('Video', image)
                cv2.imshow('Original', frame)

                # Hit 'q' on the keyboard to quit!
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    print("KEYBOARD INTERRUPT ... I'M QUIT")
                    video_capture.release()
                    break

            cv2.destroyAllWindows()
            exit()
        # ===================================================

        media_path = self._video_path({'name': video_file})
        if not photos:
            # Process video; start loading the video clip
            video = VideoFileClip(media_path)

            # If a duration is set, trim clip
            if duration:
                video = video.subclip(start_time, start_time + duration)

            # Resize clip before processing
            if width:
                video = video.resize(width=width)

            # Crop clip if desired
            if crop_x:
                video = video.fx(crop, x2=video.w / 2)

            # Kick off convert frames for each frame
            new_video = video.fl(_convert_helper)

            # Stack clips side by side
            if side_by_side:

                def add_caption(caption, clip):
                    text = (TextClip(caption,
                                     font='Amiri-regular',
                                     color='white',
                                     fontsize=80).margin(40).set_duration(
                                         clip.duration).on_color(
                                             color=(0, 0, 0), col_opacity=0.6))
                    return CompositeVideoClip([clip, text])

                video = add_caption("Original", video)
                new_video = add_caption("Swapped", new_video)
                final_video = clips_array([[video], [new_video]])
            else:
                final_video = new_video

            # Resize clip after processing
            #final_video = final_video.resize(width = (480 * 2))

            # Write video
            if not os.path.exists(os.path.join(self.OUTPUT_PATH)):
                os.makedirs(self.OUTPUT_PATH)
            output_path = os.path.join(self.OUTPUT_PATH, video_file)
            final_video.write_videofile(output_path, rewrite_audio=True)

            # Clean up
            del video
            del new_video
            del final_video
        else:
            # Process a directory of photos
            for face_file in os.listdir(media_path):
                face_path = os.path.join(media_path, face_file)
                image = cv2.imread(face_path)
                image = _convert_frame(image, convert_colors=False)
                cv2.imwrite(os.path.join(self.OUTPUT_PATH, face_file), image)
示例#36
0
 def process(self):
     """ Run extraction """
     print("\n[EXTRACT FACES]")  # Tidy up cli output
     self.check_folder()
     self.extractor = PluginLoader.get_extractor("Align")()
     self.export_faces()
示例#37
0
文件: cli.py 项目: Nioy/faceswap
 def get_argument_list():
     """ Put the arguments in a list so that they are accessible from both
     argparse and gui """
     argument_list = list()
     argument_list.append({"opts": ("-A", "--input-A"),
                           "action": DirFullPaths,
                           "dest": "input_A",
                           "default": "input_A",
                           "help": "Input directory. A directory "
                                   "containing training images for face A. "
                                   "Defaults to 'input'"})
     argument_list.append({"opts": ("-B", "--input-B"),
                           "action": DirFullPaths,
                           "dest": "input_B",
                           "default": "input_B",
                           "help": "Input directory. A directory "
                                   "containing training images for face B. "
                                   "Defaults to 'input'"})
     argument_list.append({"opts": ("-m", "--model-dir"),
                           "action": DirFullPaths,
                           "dest": "model_dir",
                           "default": "models",
                           "help": "Model directory. This is where the "
                                   "training data will be stored. "
                                   "Defaults to 'model'"})
     argument_list.append({"opts": ("-s", "--save-interval"),
                           "type": int,
                           "dest": "save_interval",
                           "default": 100,
                           "help": "Sets the number of iterations before "
                                   "saving the model"})
     argument_list.append({"opts": ("-t", "--trainer"),
                           "type": str,
                           "choices": PluginLoader.get_available_models(),
                           "default": PluginLoader.get_default_model(),
                           "help": "Select which trainer to use, Use "
                                   "LowMem for cards with less than 2GB of "
                                   "VRAM"})
     argument_list.append({"opts": ("-bs", "--batch-size"),
                           "type": int,
                           "default": 64,
                           "help": "Batch size, as a power of 2 "
                                   "(64, 128, 256, etc)"})
     argument_list.append({"opts": ("-it", "--iterations"),
                           "type": int,
                           "default": 1000000,
                           "help": "Length of training in iterations"})
     argument_list.append({"opts": ("-g", "--gpus"),
                           "type": int,
                           "default": 1,
                           "help": "Number of GPUs to use for training"})
     argument_list.append({"opts": ("-p", "--preview"),
                           "action": "store_true",
                           "dest": "preview",
                           "default": False,
                           "help": "Show preview output. If not specified, "
                                   "write progress to file"})
     argument_list.append({"opts": ("-w", "--write-image"),
                           "action": "store_true",
                           "dest": "write_image",
                           "default": False,
                           "help": "Writes the training result to a file "
                                   "even on preview mode"})
     argument_list.append({"opts": ("-pl", "--use-perceptual-loss"),
                           "action": "store_true",
                           "dest": "perceptual_loss",
                           "default": False,
                           "help": "Use perceptual loss while training"})
     argument_list.append({"opts": ("-ag", "--allow-growth"),
                           "action": "store_true",
                           "dest": "allow_growth",
                           "default": False,
                           "help": "Sets allow_growth option of Tensorflow "
                                   "to spare memory on some configs"})
     argument_list.append({"opts": ("-v", "--verbose"),
                           "action": "store_true",
                           "dest": "verbose",
                           "default": False,
                           "help": "Show verbose output"})
     # This is a hidden argument to indicate that the GUI is being used,
     # so the preview window should be redirected Accordingly
     argument_list.append({"opts": ("-gui", "--gui"),
                           "action": "store_true",
                           "dest": "redirect_gui",
                           "default": False,
                           "help": argparse.SUPPRESS})
     return argument_list
示例#38
0
 def get_optional_arguments():
     ''' Put the arguments in a list so that they are accessible from both argparse and gui '''
     argument_list = []
     argument_list.append({
         "opts": ('-m', '--model-dir'),
         "action":
         FullPaths,
         "dest":
         "model_dir",
         "default":
         "models",
         "help":
         "Model directory. A directory containing the trained model \
                            you wish to process. Defaults to 'models'"
     })
     argument_list.append({
         "opts": ('-a', '--input-aligned-dir'),
         "action":
         FullPaths,
         "dest":
         "input_aligned_dir",
         "default":
         None,
         "help":
         "Input \"aligned directory\". A directory that should contain the \
                            aligned faces extracted from the input files. If you delete faces from \
                            this folder, they'll be skipped during conversion. If no aligned dir is \
                            specified, all faces will be converted."
     })
     argument_list.append({
         "opts": ('-t', '--trainer'),
         "type":
         str,
         "choices":
         PluginLoader.get_available_models(
         ),  # case sensitive because this is used to load a plug-in.
         "default":
         PluginLoader.get_default_model(),
         "help":
         "Select the trainer that was used to create the model."
     })
     argument_list.append({
         "opts": ('-s', '--swap-model'),
         "action":
         "store_true",
         "dest":
         "swap_model",
         "default":
         False,
         "help":
         "Swap the model. Instead of A -> B, swap B -> A."
     })
     argument_list.append({
         "opts": ('-c', '--converter'),
         "type": str,
         "choices":
         ("Masked", "Adjust"
          ),  # case sensitive because this is used to load a plugin.
         "default": "Masked",
         "help": "Converter to use."
     })
     argument_list.append({
         "opts": ('-D', '--detector'),
         "type":
         str,
         "choices":
         ("hog",
          "cnn"),  # case sensitive because this is used to load a plugin.
         "default":
         "hog",
         "help":
         "Detector to use. 'cnn' detects much more angles but will be much more resource intensive and may fail on large files."
     })
     argument_list.append({
         "opts": ('-fr', '--frame-ranges'),
         "nargs":
         "+",
         "type":
         str,
         "help":
         "frame ranges to apply transfer to e.g. For frames 10 to 50 and 90 to 100 use --frame-ranges 10-50 90-100. \
                            Files must have the frame-number as the last number in the name!"
     })
     argument_list.append({
         "opts": ('-d', '--discard-frames'),
         "action":
         "store_true",
         "dest":
         "discard_frames",
         "default":
         False,
         "help":
         "When used with --frame-ranges discards frames that are not processed instead of writing them out unchanged."
     })
     argument_list.append({
         "opts": ('-l', '--ref_threshold'),
         "type": float,
         "dest": "ref_threshold",
         "default": 0.6,
         "help": "Threshold for positive face recognition"
     })
     argument_list.append({
         "opts": ('-n', '--nfilter'),
         "type":
         str,
         "dest":
         "nfilter",
         "nargs":
         '+',
         "default":
         "nfilter.jpg",
         "help":
         "Reference image for the persons you do not want to process. Should be a front portrait"
     })
     argument_list.append({
         "opts": ('-f', '--filter'),
         "type":
         str,
         "dest":
         "filter",
         "nargs":
         "+",
         "default":
         "filter.jpg",
         "help":
         "Reference images for the person you want to process. Should be a front portrait"
     })
     argument_list.append({
         "opts": ('-b', '--blur-size'),
         "type": int,
         "default": 2,
         "help": "Blur size. (Masked converter only)"
     })
     argument_list.append({
         "opts": ('-S', '--seamless'),
         "action":
         "store_true",
         "dest":
         "seamless_clone",
         "default":
         False,
         "help":
         "Use cv2's seamless clone. (Masked converter only)"
     })
     argument_list.append({
         "opts": ('-M', '--mask-type'),
         "type":
         str.lower,  #lowercase this, because its just a string later on.
         "dest":
         "mask_type",
         "choices": ["rect", "facehull", "facehullandrect"],
         "default":
         "facehullandrect",
         "help":
         "Mask to use to replace faces. (Masked converter only)"
     })
     argument_list.append({
         "opts": ('-e', '--erosion-kernel-size'),
         "dest":
         "erosion_kernel_size",
         "type":
         int,
         "default":
         None,
         "help":
         "Erosion kernel size. (Masked converter only). Positive values apply erosion which reduces the edge of the swapped face. Negative values apply dilation which allows the swapped face to cover more space."
     })
     argument_list.append({
         "opts": ('-mh', '--match-histgoram'),
         "action":
         "store_true",
         "dest":
         "match_histogram",
         "default":
         False,
         "help":
         "Use histogram matching. (Masked converter only)"
     })
     argument_list.append({
         "opts": ('-sh', ),
         "type":
         str.lower,
         "dest":
         "sharpen_image",
         "choices": ["bsharpen", "gsharpen"],
         "default":
         None,
         "help":
         "Use Sharpen Image - bsharpen = Box Blur, gsharpen = Gaussian Blur (Masked converter only)"
     })
     argument_list.append({
         "opts": ('-sm', '--smooth-mask'),
         "action": "store_true",
         "dest": "smooth_mask",
         "default": True,
         "help": "Smooth mask (Adjust converter only)"
     })
     argument_list.append({
         "opts": ('-aca', '--avg-color-adjust'),
         "action":
         "store_true",
         "dest":
         "avg_color_adjust",
         "default":
         True,
         "help":
         "Average color adjust. (Adjust converter only)"
     })
     argument_list.append({
         "opts": ('-g', '--gpus'),
         "type": int,
         "default": 1,
         "help": "Number of GPUs to use for conversion"
     })
     return argument_list
示例#39
0
    def add_optional_arguments(self, parser):
        parser.add_argument(
            '-m',
            '--model-dir',
            action=FullPaths,
            dest="model_dir",
            default="models",
            help="Model directory. A directory containing the trained model \
                            you wish to process. Defaults to 'models'")

        parser.add_argument(
            '-a',
            '--input-aligned-dir',
            action=FullPaths,
            dest="input_aligned_dir",
            default=None,
            help=
            "Input \"aligned directory\". A directory that should contain the \
                            aligned faces extracted from the input files. If you delete faces from \
                            this folder, they'll be skipped during conversion. If no aligned dir is \
                            specified, all faces will be converted.")

        parser.add_argument(
            '-t',
            '--trainer',
            type=str,
            choices=PluginLoader.get_available_models(
            ),  # case sensitive because this is used to load a plug-in.
            default=PluginLoader.get_default_model(),
            help="Select the trainer that was used to create the model.")

        parser.add_argument(
            '-s',
            '--swap-model',
            action="store_true",
            dest="swap_model",
            default=False,
            help="Swap the model. Instead of A -> B, swap B -> A.")

        parser.add_argument(
            '-c',
            '--converter',
            type=str,
            choices=(
                "Masked", "Adjust"
            ),  # case sensitive because this is used to load a plugin.
            default="Masked",
            help="Converter to use.")

        parser.add_argument(
            '-D',
            '--detector',
            type=str,
            choices=(
                "hog", "cnn"
            ),  # case sensitive because this is used to load a plugin.
            default="hog",
            help=
            "Detector to use. 'cnn' detects much more angles but will be much more resource intensive and may fail on large files."
        )

        parser.add_argument(
            '-fr',
            '--frame-ranges',
            nargs="+",
            type=str,
            help=
            "frame ranges to apply transfer to e.g. For frames 10 to 50 and 90 to 100 use --frame-ranges 10-50 90-100. \
                            Files must have the frame-number as the last number in the name!"
        )

        parser.add_argument(
            '-d',
            '--discard-frames',
            action="store_true",
            dest="discard_frames",
            default=False,
            help=
            "When used with --frame-ranges discards frames that are not processed instead of writing them out unchanged."
        )

        parser.add_argument('-l',
                            '--ref_threshold',
                            type=float,
                            dest="ref_threshold",
                            default=0.6,
                            help="Threshold for positive face recognition")

        parser.add_argument(
            '-n',
            '--nfilter',
            type=str,
            dest="nfilter",
            nargs='+',
            default="nfilter.jpg",
            help=
            "Reference image for the persons you do not want to process. Should be a front portrait"
        )

        parser.add_argument(
            '-f',
            '--filter',
            type=str,
            dest="filter",
            nargs="+",
            default="filter.jpg",
            help=
            "Reference images for the person you want to process. Should be a front portrait"
        )

        parser.add_argument('-b',
                            '--blur-size',
                            type=int,
                            default=2,
                            help="Blur size. (Masked converter only)")

        parser.add_argument(
            '-S',
            '--seamless',
            action="store_true",
            dest="seamless_clone",
            default=False,
            help="Use cv2's seamless clone. (Masked converter only)")

        parser.add_argument(
            '-M',
            '--mask-type',
            type=str.
            lower,  #lowercase this, because its just a string later on.
            dest="mask_type",
            choices=["rect", "facehull", "facehullandrect"],
            default="facehullandrect",
            help="Mask to use to replace faces. (Masked converter only)")

        parser.add_argument(
            '-e',
            '--erosion-kernel-size',
            dest="erosion_kernel_size",
            type=int,
            default=None,
            help=
            "Erosion kernel size. (Masked converter only). Positive values apply erosion which reduces the edge of the swapped face. Negative values apply dilation which allows the swapped face to cover more space."
        )

        parser.add_argument(
            '-mh',
            '--match-histgoram',
            action="store_true",
            dest="match_histogram",
            default=False,
            help="Use histogram matching. (Masked converter only)")

        parser.add_argument('-sm',
                            '--smooth-mask',
                            action="store_true",
                            dest="smooth_mask",
                            default=True,
                            help="Smooth mask (Adjust converter only)")

        parser.add_argument(
            '-aca',
            '--avg-color-adjust',
            action="store_true",
            dest="avg_color_adjust",
            default=True,
            help="Average color adjust. (Adjust converter only)")
        return parser
示例#40
0
文件: fsmedia.py 项目: Nioy/faceswap
    def load_extractor(extractor_name="Align"):
        """ Load the requested extractor for extraction """
        extractor = PluginLoader.get_extractor(extractor_name)()

        return extractor