def __init__(self, opts): super(DRIT, self).__init__() # parameters lr = 0.0001 lr_dcontent = lr / 2.5 self.nz = 8 self.concat = opts.concat self.no_ms = opts.no_ms # discriminators if opts.dis_scale > 1: self.disA = networks.MultiScaleDis(opts.input_dim_a, opts.dis_scale, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disB = networks.MultiScaleDis(opts.input_dim_b, opts.dis_scale, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disA2 = networks.MultiScaleDis(opts.input_dim_a, opts.dis_scale, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disB2 = networks.MultiScaleDis(opts.input_dim_b, opts.dis_scale, norm=opts.dis_norm, sn=opts.dis_spectral_norm) else: self.disA = networks.Dis(opts.input_dim_a, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disB = networks.Dis(opts.input_dim_b, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disA2 = networks.Dis(opts.input_dim_a, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disB2 = networks.Dis(opts.input_dim_b, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disContent = networks.Dis_content() # encoders self.enc_c = networks.E_content(opts.input_dim_a, opts.input_dim_b) if self.concat: self.enc_a = networks.E_attr_concat(opts.input_dim_a, opts.input_dim_b, self.nz, \ norm_layer=None, nl_layer=networks.get_non_linearity(layer_type='lrelu')) else: self.enc_a = networks.E_attr(opts.input_dim_a, opts.input_dim_b, self.nz) # generator if self.concat: self.gen = networks.G_concat(opts.input_dim_a, opts.input_dim_b, nz=self.nz) else: self.gen = networks.G(opts.input_dim_a, opts.input_dim_b, nz=self.nz) # optimizers self.disA_opt = torch.optim.Adam(self.disA.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.disB_opt = torch.optim.Adam(self.disB.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.disA2_opt = torch.optim.Adam(self.disA2.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.disB2_opt = torch.optim.Adam(self.disB2.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.disContent_opt = torch.optim.Adam(self.disContent.parameters(), lr=lr_dcontent, betas=(0.5, 0.999), weight_decay=0.0001) self.enc_c_opt = torch.optim.Adam(self.enc_c.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.enc_a_opt = torch.optim.Adam(self.enc_a.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.gen_opt = torch.optim.Adam(self.gen.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) # Setup the loss function for training self.criterionL1 = torch.nn.L1Loss()
def __init__(self, opts): super(UID, self).__init__() # parameters lr = opts.lr self.nz = 8 lr_dcontent = lr / 2.5 self.concat = opts.concat self.lambdaB = opts.lambdaB self.lambdaI = opts.lambdaI self.silent_log = True # discriminators if opts.dis_scale > 1: self.disA = networks.MultiScaleDis(opts.input_dim_a, opts.dis_scale, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disB = networks.MultiScaleDis(opts.input_dim_b, opts.dis_scale, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disA2 = networks.MultiScaleDis(opts.input_dim_a, opts.dis_scale, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disB2 = networks.MultiScaleDis(opts.input_dim_b, opts.dis_scale, norm=opts.dis_norm, sn=opts.dis_spectral_norm) else: self.disA = networks.Dis(opts.input_dim_a, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disB = networks.Dis(opts.input_dim_b, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disA2 = networks.Dis(opts.input_dim_a, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disB2 = networks.Dis(opts.input_dim_b, norm=opts.dis_norm, sn=opts.dis_spectral_norm) # discriminator for domain invariant content embedding # self.disContent = networks.Dis(opts.input_dim_a, n_layer = 64, norm=opts.dis_norm, sn=opts.dis_spectral_norm) self.disContent = networks.Dis_content() # encoders self.enc_c = networks.E_content(opts.input_dim_a, opts.input_dim_b) if self.concat: self.enc_a = networks.E_attr_concat(opts.input_dim_b, self.nz, \ norm_layer=None, nl_layer=networks.get_non_linearity(layer_type='lrelu')) else: self.enc_a = networks.E_attr(opts.input_dim_a, opts.input_dim_b, self.nz) # generator if self.concat: self.gen = networks.G_concat(opts.input_dim_a, opts.input_dim_b, nz=self.nz) else: self.gen = networks.G(opts.input_dim_a, opts.input_dim_b, nz=self.nz) # optimizers self.disA_opt = torch.optim.Adam(self.disA.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.disB_opt = torch.optim.Adam(self.disB.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.disA2_opt = torch.optim.Adam(self.disA2.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.disB2_opt = torch.optim.Adam(self.disB2.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.disContent_opt = torch.optim.Adam(self.disContent.parameters(), lr=lr_dcontent, betas=(0.5, 0.999), weight_decay=0.0001) self.enc_c_opt = torch.optim.Adam(self.enc_c.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.enc_a_opt = torch.optim.Adam(self.enc_a.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) self.gen_opt = torch.optim.Adam(self.gen.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.0001) # Setup the loss function for training self.criterionL1 = torch.nn.L1Loss() self.criterionL2 = torch.nn.MSELoss() if opts.percep == 'default': self.perceptualLoss = networks.PerceptualLoss( nn.MSELoss(), opts.gpu, opts.percp_layer) elif opts.percep == 'face': self.perceptualLoss = networks.PerceptualLoss16( nn.MSELoss(), opts.gpu, opts.percp_layer) else: self.perceptualLoss = networks.MultiPerceptualLoss( nn.MSELoss(), opts.gpu)