def __init__(self, input, conditions, labels, cfg): self.program = fluid.default_main_program().clone() with fluid.program_guard(self.program): model = CGAN_model(cfg.batch_size) d_logit = model.network_D(input, conditions, name="D") self.d_loss = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits(x=d_logit, label=labels)) vars = [] for var in self.program.list_vars(): if fluid.io.is_parameter(var) and (var.name.startswith("D")): vars.append(var.name) optimizer = fluid.optimizer.Adam(learning_rate=cfg.learning_rate, beta1=0.5, name="net_D") optimizer.minimize(self.d_loss, parameter_list=vars)
def __init__(self, input, conditions, cfg): self.program = fluid.default_main_program().clone() with fluid.program_guard(self.program): model = CGAN_model() self.fake = model.network_G(input, conditions, name="G") self.infer_program = self.program.clone() d_fake = model.network_D(self.fake, conditions, name="D") fake_labels = fluid.layers.fill_constant_batch_size_like( input=input, dtype='float32', shape=[-1, 1], value=1.0) self.g_loss = fluid.layers.reduce_mean( fluid.layers.sigmoid_cross_entropy_with_logits( x=d_fake, label=fake_labels)) vars = [] for var in self.program.list_vars(): if fluid.io.is_parameter(var) and (var.name.startswith("G")): vars.append(var.name) optimizer = fluid.optimizer.Adam( learning_rate=cfg.learning_rate, beta1=0.5, name="net_G") optimizer.minimize(self.g_loss, parameter_list=vars)