def onInitialize(self, **in_options): self.set_vram_batch_requirements( {4.5:16,5:16,6:16,7:16,8:24,9:24,10:32,11:32,12:32,13:48} ) ae_input_layer = self.keras.layers.Input(shape=(128, 128, 3)) mask_layer = self.keras.layers.Input(shape=(128, 128, 1)) #same as output self.encoder = self.Encoder(ae_input_layer) self.decoder_src = self.Decoder() self.decoder_dst = self.Decoder() if not self.is_first_run(): self.encoder.load_weights (self.get_strpath_storage_for_file(self.encoderH5)) self.decoder_src.load_weights (self.get_strpath_storage_for_file(self.decoder_srcH5)) self.decoder_dst.load_weights (self.get_strpath_storage_for_file(self.decoder_dstH5)) self.autoencoder_src = self.keras.models.Model([ae_input_layer,mask_layer], self.decoder_src(self.encoder(ae_input_layer))) self.autoencoder_dst = self.keras.models.Model([ae_input_layer,mask_layer], self.decoder_dst(self.encoder(ae_input_layer))) if self.is_training_mode: self.autoencoder_src, self.autoencoder_dst = self.to_multi_gpu_model_if_possible ( [self.autoencoder_src, self.autoencoder_dst] ) optimizer = self.keras.optimizers.Adam(lr=5e-5, beta_1=0.5, beta_2=0.999) dssimloss = DSSIMMaskLossClass(self.tf)([mask_layer]) self.autoencoder_src.compile(optimizer=optimizer, loss=[dssimloss, 'mse'] ) self.autoencoder_dst.compile(optimizer=optimizer, loss=[dssimloss, 'mse'] ) if self.is_training_mode: from models import TrainingDataGenerator f = TrainingDataGenerator.SampleTypeFlags self.set_training_data_generators ([ TrainingDataGenerator(TrainingDataType.FACE, self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[ [f.WARPED_TRANSFORMED | f.FULL_FACE | f.MODE_BGR, 128], [f.TRANSFORMED | f.FULL_FACE | f.MODE_BGR, 128], [f.TRANSFORMED | f.FULL_FACE | f.MODE_M | f.MASK_FULL, 128] ], random_flip=True ), TrainingDataGenerator(TrainingDataType.FACE, self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[ [f.WARPED_TRANSFORMED | f.FULL_FACE | f.MODE_BGR, 128], [f.TRANSFORMED | f.FULL_FACE | f.MODE_BGR, 128], [f.TRANSFORMED | f.FULL_FACE | f.MODE_M | f.MASK_FULL, 128] ], random_flip=True ) ])
def onInitialize(self, **in_options): tf = self.tf keras = self.keras K = keras.backend self.set_vram_batch_requirements( {2.5:2,3:2,4:2,4:4,5:8,6:8,7:16,8:16,9:24,10:24,11:32,12:32,13:48} ) bgr_shape, mask_shape, self.encoder, self.decoder_src, self.decoder_dst = self.Build(self.created_vram_gb) if not self.is_first_run(): self.encoder.load_weights (self.get_strpath_storage_for_file(self.encoderH5)) self.decoder_src.load_weights (self.get_strpath_storage_for_file(self.decoder_srcH5)) self.decoder_dst.load_weights (self.get_strpath_storage_for_file(self.decoder_dstH5)) input_src_bgr = self.keras.layers.Input(bgr_shape) input_src_mask = self.keras.layers.Input(mask_shape) input_dst_bgr = self.keras.layers.Input(bgr_shape) input_dst_mask = self.keras.layers.Input(mask_shape) rec_src_bgr, rec_src_mask = self.decoder_src( self.encoder(input_src_bgr) ) rec_dst_bgr, rec_dst_mask = self.decoder_dst( self.encoder(input_dst_bgr) ) self.ae = self.keras.models.Model([input_src_bgr,input_src_mask,input_dst_bgr,input_dst_mask], [rec_src_bgr, rec_src_mask, rec_dst_bgr, rec_dst_mask] ) if self.is_training_mode: self.ae, = self.to_multi_gpu_model_if_possible ( [self.ae,] ) self.ae.compile(optimizer=self.keras.optimizers.Adam(lr=5e-5, beta_1=0.5, beta_2=0.999), loss=[ DSSIMMaskLossClass(self.tf)([input_src_mask]), 'mae', DSSIMMaskLossClass(self.tf)([input_dst_mask]), 'mae' ] ) self.src_view = K.function([input_src_bgr],[rec_src_bgr, rec_src_mask]) self.dst_view = K.function([input_dst_bgr],[rec_dst_bgr, rec_dst_mask]) if self.is_training_mode: from models import TrainingDataGenerator f = TrainingDataGenerator.SampleTypeFlags self.set_training_data_generators ([ TrainingDataGenerator(TrainingDataType.FACE, self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[ [f.WARPED_TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 128], [f.TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 128], [f.TRANSFORMED | f.HALF_FACE | f.MODE_M | f.MASK_FULL, 128] ], random_flip=True ), TrainingDataGenerator(TrainingDataType.FACE, self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[ [f.WARPED_TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 128], [f.TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 128], [f.TRANSFORMED | f.HALF_FACE | f.MODE_M | f.MASK_FULL, 128] ], random_flip=True ) ])
def onInitialize(self, **in_options): self.set_vram_batch_requirements({ 4.5: 4, 5: 4, 6: 8, 7: 12, 8: 16, 9: 20, 10: 24, 11: 24, 12: 32, 13: 48 }) ae_input_layer = self.keras.layers.Input(shape=(128, 128, 3)) mask_layer = self.keras.layers.Input(shape=(128, 128, 1)) #same as output self.encoder = self.Encoder(ae_input_layer) self.decoderMask = self.DecoderMask() self.decoderCommonA = self.DecoderCommon() self.decoderCommonB = self.DecoderCommon() self.decoderRGB = self.DecoderRGB() self.decoderBW = self.DecoderBW() self.inter_A = self.Intermediate() self.inter_B = self.Intermediate() if not self.is_first_run(): self.encoder.load_weights( self.get_strpath_storage_for_file(self.encoderH5)) self.decoderMask.load_weights( self.get_strpath_storage_for_file(self.decoderMaskH5)) self.decoderCommonA.load_weights( self.get_strpath_storage_for_file(self.decoderCommonAH5)) self.decoderCommonB.load_weights( self.get_strpath_storage_for_file(self.decoderCommonBH5)) self.decoderRGB.load_weights( self.get_strpath_storage_for_file(self.decoderRGBH5)) self.decoderBW.load_weights( self.get_strpath_storage_for_file(self.decoderBWH5)) self.inter_A.load_weights( self.get_strpath_storage_for_file(self.inter_AH5)) self.inter_B.load_weights( self.get_strpath_storage_for_file(self.inter_BH5)) code = self.encoder(ae_input_layer) A = self.inter_A(code) B = self.inter_B(code) inter_A_A = self.keras.layers.Concatenate()([A, A]) inter_B_A = self.keras.layers.Concatenate()([B, A]) x1, m1 = self.decoderCommonA(inter_A_A) x2, m2 = self.decoderCommonA(inter_A_A) self.autoencoder_src = self.keras.models.Model( [ae_input_layer, mask_layer], [ self.decoderBW(self.keras.layers.Concatenate()([x1, x2])), self.decoderMask(self.keras.layers.Concatenate()([m1, m2])) ]) x1, m1 = self.decoderCommonA(inter_A_A) x2, m2 = self.decoderCommonB(inter_A_A) self.autoencoder_src_RGB = self.keras.models.Model( [ae_input_layer, mask_layer], [ self.decoderRGB(self.keras.layers.Concatenate()([x1, x2])), self.decoderMask(self.keras.layers.Concatenate()([m1, m2])) ]) x1, m1 = self.decoderCommonA(inter_B_A) x2, m2 = self.decoderCommonB(inter_B_A) self.autoencoder_dst = self.keras.models.Model( [ae_input_layer, mask_layer], [ self.decoderRGB(self.keras.layers.Concatenate()([x1, x2])), self.decoderMask(self.keras.layers.Concatenate()([m1, m2])) ]) if self.is_training_mode: self.autoencoder_src, self.autoencoder_dst = self.to_multi_gpu_model_if_possible( [self.autoencoder_src, self.autoencoder_dst]) optimizer = self.keras.optimizers.Adam(lr=5e-5, beta_1=0.5, beta_2=0.999) dssimloss = DSSIMMaskLossClass(self.tf)([mask_layer]) self.autoencoder_src.compile(optimizer=optimizer, loss=[dssimloss, 'mse']) self.autoencoder_dst.compile(optimizer=optimizer, loss=[dssimloss, 'mse']) if self.is_training_mode: from models import TrainingDataGenerator f = TrainingDataGenerator.SampleTypeFlags self.set_training_data_generators([ TrainingDataGenerator( TrainingDataType.FACE, self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[ [f.WARPED_TRANSFORMED | f.FULL_FACE | f.MODE_GGG, 128], [f.TRANSFORMED | f.FULL_FACE | f.MODE_G, 128], [ f.TRANSFORMED | f.FULL_FACE | f.MODE_M | f.MASK_FULL, 128 ], [f.TRANSFORMED | f.FULL_FACE | f.MODE_GGG, 128] ], random_flip=True), TrainingDataGenerator( TrainingDataType.FACE, self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[[ f.WARPED_TRANSFORMED | f.FULL_FACE | f.MODE_BGR, 128 ], [f.TRANSFORMED | f.FULL_FACE | f.MODE_BGR, 128], [ f.TRANSFORMED | f.FULL_FACE | f.MODE_M | f.MASK_FULL, 128 ]], random_flip=True) ])
def onInitialize(self, **in_options): tf = self.tf keras = self.keras K = keras.backend self.set_vram_batch_requirements({ 3.5: 8, 4: 8, 5: 12, 6: 16, 7: 24, 8: 32, 9: 48 }) if self.batch_size < 4: self.batch_size = 4 img_shape64, img_shape256, self.encoder64, self.decoder64_src, self.decoder64_dst, self.encoder256, self.decoder256 = self.Build( ) if not self.is_first_run(): self.encoder64.load_weights( self.get_strpath_storage_for_file(self.encoder64H5)) self.decoder64_src.load_weights( self.get_strpath_storage_for_file(self.decoder64_srcH5)) self.decoder64_dst.load_weights( self.get_strpath_storage_for_file(self.decoder64_dstH5)) self.encoder256.load_weights( self.get_strpath_storage_for_file(self.encoder256H5)) self.decoder256.load_weights( self.get_strpath_storage_for_file(self.decoder256H5)) if self.is_training_mode: self.encoder64, self.decoder64_src, self.decoder64_dst, self.encoder256, self.decoder256 = self.to_multi_gpu_model_if_possible( [ self.encoder64, self.decoder64_src, self.decoder64_dst, self.encoder256, self.decoder256 ]) input_A_warped64 = keras.layers.Input(img_shape64) input_B_warped64 = keras.layers.Input(img_shape64) A_rec64 = self.decoder64_src(self.encoder64(input_A_warped64)) B_rec64 = self.decoder64_dst(self.encoder64(input_B_warped64)) self.ae64 = self.keras.models.Model( [input_A_warped64, input_B_warped64], [A_rec64, B_rec64]) if self.is_training_mode: self.ae64, = self.to_multi_gpu_model_if_possible([ self.ae64, ]) self.ae64.compile( optimizer=self.keras.optimizers.Adam(lr=5e-5, beta_1=0.5, beta_2=0.999), loss=[DSSIMLossClass(self.tf)(), DSSIMLossClass(self.tf)()]) self.A64_view = K.function([input_A_warped64], [A_rec64]) self.B64_view = K.function([input_B_warped64], [B_rec64]) input_A_warped64 = keras.layers.Input(img_shape64) input_A_target256 = keras.layers.Input(img_shape256) A_rec256 = self.decoder256(self.encoder256(input_A_warped64)) input_B_warped64 = keras.layers.Input(img_shape64) BA_rec64 = self.decoder64_src(self.encoder64(input_B_warped64)) BA_rec256 = self.decoder256(self.encoder256(BA_rec64)) self.ae256 = self.keras.models.Model([input_A_warped64], [A_rec256]) if self.is_training_mode: self.ae256, = self.to_multi_gpu_model_if_possible([ self.ae256, ]) self.ae256.compile(optimizer=self.keras.optimizers.Adam(lr=5e-5, beta_1=0.5, beta_2=0.999), loss=[DSSIMLossClass(self.tf)()]) self.A256_view = K.function([input_A_warped64], [A_rec256]) self.BA256_view = K.function([input_B_warped64], [BA_rec256]) if self.is_training_mode: from models import TrainingDataGenerator f = TrainingDataGenerator.SampleTypeFlags self.set_training_data_generators([ TrainingDataGenerator( TrainingDataType.FACE, self.training_data_src_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[ [f.WARPED_TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 64], [f.TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 64], [f.TRANSFORMED | f.FULL_FACE | f.MODE_BGR, 256], [f.SOURCE | f.HALF_FACE | f.MODE_BGR, 64], [f.SOURCE | f.HALF_FACE | f.MODE_BGR, 256] ]), TrainingDataGenerator( TrainingDataType.FACE, self.training_data_dst_path, debug=self.is_debug(), batch_size=self.batch_size, output_sample_types=[ [f.WARPED_TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 64], [f.TRANSFORMED | f.HALF_FACE | f.MODE_BGR, 64], [f.SOURCE | f.HALF_FACE | f.MODE_BGR, 64], [f.SOURCE | f.HALF_FACE | f.MODE_BGR, 256] ]) ])