def onInitialize(self, batch_size=-1, **in_options): exec(nnlib.code_import_all, locals(), globals()) self.set_vram_batch_requirements({2:1,3:1,4:4,5:8,6:16}) resolution = self.options['resolution'] face_type = self.face_type = FaceType.FULL if self.options['face_type'] == 'f' else FaceType.HALF self.model = FUNIT( face_type_str=FaceType.toString(face_type), batch_size=self.batch_size, encoder_nf=64, encoder_downs=2, encoder_res_blk=2, class_downs=4, class_nf=64, class_latent=64, mlp_nf=256, mlp_blks=2, dis_nf=64, dis_res_blks=10, num_classes=2, subpixel_decoder=True, initialize_weights=self.is_first_run(), is_training=self.is_training_mode, tf_cpu_mode=self.options['optimizer_mode']-1 ) if not self.is_first_run(): self.load_weights_safe(self.model.get_model_filename_list()) t = SampleProcessor.Types face_type = t.FACE_TYPE_FULL if self.options['face_type'] == 'f' else t.FACE_TYPE_HALF if self.is_training_mode: output_sample_types=[ {'types': (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'resolution':resolution, 'normalize_tanh':True}, ] self.set_training_data_generators ([ SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=True), output_sample_types=output_sample_types ), SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=True), output_sample_types=output_sample_types ) ]) else: generator = SampleGeneratorFace(self.training_data_src_path, batch_size=1, sample_process_options=SampleProcessor.Options(), output_sample_types=[ {'types': (t.IMG_SOURCE, face_type, t.MODE_BGR), 'resolution':resolution, 'normalize_tanh':True} ] ) io.log_info("Calculating average src face style...") codes = [] for i in io.progress_bar_generator(range(generator.get_total_sample_count())): codes += self.model.get_average_class_code( generator.generate_next() ) self.average_class_code = np.mean ( np.array(codes), axis=0 )[None,...]
def onInitialize(self, batch_size=-1, **in_options): exec(nnlib.code_import_all, locals(), globals()) self.set_vram_batch_requirements({4:16}) resolution = self.options['resolution'] face_type = FaceType.FULL if self.options['face_type'] == 'f' else FaceType.HALF person_id_max_count = SampleGeneratorFace.get_person_id_max_count(self.training_data_src_path) self.model = FUNIT( face_type_str=FaceType.toString(face_type), batch_size=self.batch_size, encoder_nf=64, encoder_downs=2, encoder_res_blk=2, class_downs=4, class_nf=64, class_latent=64, mlp_nf=256, mlp_blks=2, dis_nf=64, dis_res_blks=10, num_classes=person_id_max_count, subpixel_decoder=True, initialize_weights=self.is_first_run(), is_training=self.is_training_mode ) if not self.is_first_run(): self.load_weights_safe(self.model.get_model_filename_list()) if self.is_training_mode: t = SampleProcessor.Types face_type = t.FACE_TYPE_FULL if self.options['face_type'] == 'f' else t.FACE_TYPE_HALF output_sample_types=[ {'types': (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'resolution':128, 'normalize_tanh':True} ] self.set_training_data_generators ([ SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=True), output_sample_types=output_sample_types, person_id_mode=True ), SampleGeneratorFace(self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=True), output_sample_types=output_sample_types, person_id_mode=True ), SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=True), output_sample_types=output_sample_types, person_id_mode=True ), SampleGeneratorFace(self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options(random_flip=True), output_sample_types=output_sample_types, person_id_mode=True ), ])
def onInitialize(self, batch_size=-1, **in_options): exec(nnlib.code_import_all, locals(), globals()) self.set_vram_batch_requirements({4: 16, 11: 24}) resolution = self.options['resolution'] face_type = FaceType.FULL if self.options[ 'face_type'] == 'f' else FaceType.HALF person_id_max_count = SampleGeneratorFacePerson.get_person_id_max_count( self.training_data_src_path) self.model = FUNIT( face_type_str=FaceType.toString(face_type), batch_size=self.batch_size, encoder_nf=64, encoder_downs=2, encoder_res_blk=2, class_downs=4, class_nf=64, class_latent=64, mlp_blks=2, dis_nf=64, dis_res_blks=8, #10 num_classes=person_id_max_count, subpixel_decoder=True, initialize_weights=self.is_first_run(), is_training=self.is_training_mode, tf_cpu_mode=self.options['optimizer_mode'] - 1) if not self.is_first_run(): self.load_weights_safe(self.model.get_model_filename_list()) if self.is_training_mode: t = SampleProcessor.Types if self.options['face_type'] == 'h': face_type = t.FACE_TYPE_HALF elif self.options['face_type'] == 'mf': face_type = t.FACE_TYPE_MID_FULL elif self.options['face_type'] == 'f': face_type = t.FACE_TYPE_FULL output_sample_types = [{ 'types': (t.IMG_TRANSFORMED, face_type, t.MODE_BGR), 'resolution': resolution, 'normalize_tanh': True }] output_sample_types1 = [{ 'types': (t.IMG_SOURCE, face_type, t.MODE_BGR), 'resolution': resolution, 'normalize_tanh': True }] self.set_training_data_generators([ SampleGeneratorFacePerson( self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options( random_flip=True, rotation_range=[0, 0]), output_sample_types=output_sample_types, person_id_mode=1, use_caching=True, generators_count=1), SampleGeneratorFacePerson( self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options( random_flip=True, rotation_range=[0, 0]), output_sample_types=output_sample_types, person_id_mode=1, use_caching=True, generators_count=1), SampleGeneratorFacePerson( self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options( random_flip=True, rotation_range=[0, 0]), output_sample_types=output_sample_types1, person_id_mode=1, use_caching=True, generators_count=1), SampleGeneratorFacePerson( self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, sample_process_options=SampleProcessor.Options( random_flip=True, rotation_range=[0, 0]), output_sample_types=output_sample_types1, person_id_mode=1, use_caching=True, generators_count=1), ])