def __init__(self, image_real, label_org, label_trg, cfg, step_per_epoch): self.program = fluid.default_main_program().clone() with fluid.program_guard(self.program): model = StarGAN_model() self.fake_img = model.network_G(image_real, label_trg, cfg, name="g_main") self.rec_img = model.network_G(self.fake_img, label_org, cfg, name="g_main") self.infer_program = self.program.clone(for_test=False) self.g_loss_rec = fluid.layers.reduce_mean( fluid.layers.abs( fluid.layers.elementwise_sub(x=image_real, y=self.rec_img))) self.pred_fake, self.cls_fake = model.network_D(self.fake_img, cfg, name="d_main") if cfg.gan_mode != 'wgan': raise NotImplementedError( "gan_mode {} is not support! only support wgan".format( cfg.gan_mode)) #wgan self.g_loss_fake = -1 * fluid.layers.mean(self.pred_fake) cls_shape = self.cls_fake.shape self.cls_fake = fluid.layers.reshape( self.cls_fake, [-1, cls_shape[1] * cls_shape[2] * cls_shape[3]]) self.g_loss_cls = fluid.layers.reduce_sum( fluid.layers.sigmoid_cross_entropy_with_logits( self.cls_fake, label_trg)) / cfg.batch_size self.g_loss = self.g_loss_fake + cfg.lambda_rec * self.g_loss_rec + self.g_loss_cls lr = cfg.g_lr vars = [] for var in self.program.list_vars(): if fluid.io.is_parameter(var) and var.name.startswith("g_"): vars.append(var.name) self.param = vars total_iters = step_per_epoch * cfg.epoch boundaries = [cfg.num_iters - cfg.num_iters_decay] values = [lr] for x in range(cfg.num_iters - cfg.num_iters_decay + 1, total_iters): if x % cfg.lr_update_step == 0: boundaries.append(x) lr -= (lr / float(cfg.num_iters_decay)) values.append(lr) lr = values[-1] lr -= (lr / float(cfg.num_iters_decay)) values.append(lr) optimizer = fluid.optimizer.Adam( learning_rate=fluid.layers.piecewise_decay( boundaries=boundaries, values=values), beta1=0.5, beta2=0.999, name="net_G") optimizer.minimize(self.g_loss, parameter_list=vars)
def __init__(self, image_real, label_org, label_trg, cfg, step_per_epoch): self.program = fluid.default_main_program().clone() with fluid.program_guard(self.program): model = StarGAN_model() clone_image_real = [] for b in self.program.blocks: if b.has_var('image_real'): clone_image_real = b.var('image_real') break self.fake_img = model.network_G( image_real, label_trg, cfg, name="g_main") self.pred_real, self.cls_real = model.network_D( image_real, cfg, name="d_main") self.pred_fake, _ = model.network_D( self.fake_img, cfg, name="d_main") cls_shape = self.cls_real.shape self.cls_real = fluid.layers.reshape( self.cls_real, [-1, cls_shape[1] * cls_shape[2] * cls_shape[3]]) self.d_loss_cls = fluid.layers.reduce_sum( fluid.layers.sigmoid_cross_entropy_with_logits( self.cls_real, label_org)) / cfg.batch_size #wgan if cfg.gan_mode == "wgan": self.d_loss_fake = fluid.layers.mean(self.pred_fake) self.d_loss_real = -1 * fluid.layers.mean(self.pred_real) self.d_loss_gp = self.gradient_penalty( getattr(model, "network_D"), clone_image_real, self.fake_img, cfg=cfg, name="d_main") self.d_loss = self.d_loss_real + self.d_loss_fake + self.d_loss_cls + cfg.lambda_gp * self.d_loss_gp #lsgan elif cfg.gan_mode == "lsgan": ones = fluid.layers.fill_constant_batch_size_like( input=self.pred_real, shape=self.pred_real.shape, value=1, dtype='float32') self.d_loss_real = fluid.layers.mean( fluid.layers.square( fluid.layers.elementwise_sub( x=self.pred_real, y=ones))) self.d_loss_fake = fluid.layers.mean( fluid.layers.square(x=self.pred_fake)) self.d_loss = self.d_loss_real + self.d_loss_fake + cfg.lambda_cls * self.d_loss_cls self.d_loss_real.persistable = True self.d_loss_fake.persistable = True self.d_loss_gp.persistable = True self.d_loss_cls.persistable = True vars = [] for var in self.program.list_vars(): if fluid.io.is_parameter(var) and var.name.startswith("d_"): vars.append(var.name) self.param = vars total_iters = step_per_epoch * cfg.epoch boundaries = [cfg.num_iters - cfg.num_iters_decay] values = [cfg.d_lr] lr = cfg.d_lr for x in range(cfg.num_iters - cfg.num_iters_decay + 1, total_iters): if x % cfg.lr_update_step == 0: boundaries.append(x) lr -= (lr / float(cfg.num_iters_decay)) values.append(lr) lr = values[-1] lr -= (lr / float(cfg.num_iters_decay)) values.append(lr) optimizer = fluid.optimizer.Adam( learning_rate=fluid.layers.piecewise_decay( boundaries=boundaries, values=values), beta1=0.5, beta2=0.999, name="net_D") optimizer.minimize(self.d_loss, parameter_list=vars)
def infer(args): data_shape = [-1, 3, args.image_size, args.image_size] input = fluid.layers.data(name='input', shape=data_shape, dtype='float32') label_org_ = fluid.layers.data(name='label_org_', shape=[args.c_dim], dtype='float32') label_trg_ = fluid.layers.data(name='label_trg_', shape=[args.c_dim], dtype='float32') image_name = fluid.layers.data(name='image_name', shape=[args.n_samples], dtype='int32') model_name = 'net_G' if args.model_net == 'CycleGAN': py_reader = fluid.io.PyReader( feed_list=[input, image_name], capacity=4, ## batch_size * 4 iterable=True, use_double_buffer=True) from network.CycleGAN_network import CycleGAN_model model = CycleGAN_model() if args.input_style == "A": fake = model.network_G(input, name="GA", cfg=args) elif args.input_style == "B": fake = model.network_G(input, name="GB", cfg=args) else: raise "Input with style [%s] is not supported." % args.input_style elif args.model_net == 'Pix2pix': py_reader = fluid.io.PyReader( feed_list=[input, image_name], capacity=4, ## batch_size * 4 iterable=True, use_double_buffer=True) from network.Pix2pix_network import Pix2pix_model model = Pix2pix_model() fake = model.network_G(input, "generator", cfg=args) elif args.model_net == 'StarGAN': py_reader = fluid.io.PyReader( feed_list=[input, label_org_, label_trg_, image_name], capacity=32, iterable=True, use_double_buffer=True) from network.StarGAN_network import StarGAN_model model = StarGAN_model() fake = model.network_G(input, label_trg_, name="g_main", cfg=args) elif args.model_net == 'STGAN': from network.STGAN_network import STGAN_model py_reader = fluid.io.PyReader( feed_list=[input, label_org_, label_trg_, image_name], capacity=32, iterable=True, use_double_buffer=True) model = STGAN_model() fake, _ = model.network_G(input, label_org_, label_trg_, cfg=args, name='generator', is_test=True) elif args.model_net == 'AttGAN': from network.AttGAN_network import AttGAN_model py_reader = fluid.io.PyReader( feed_list=[input, label_org_, label_trg_, image_name], capacity=32, iterable=True, use_double_buffer=True) model = AttGAN_model() fake, _ = model.network_G(input, label_org_, label_trg_, cfg=args, name='generator', is_test=True) elif args.model_net == 'CGAN': noise = fluid.layers.data(name='noise', shape=[args.noise_size], dtype='float32') conditions = fluid.layers.data(name='conditions', shape=[1], dtype='float32') from network.CGAN_network import CGAN_model model = CGAN_model(args.n_samples) fake = model.network_G(noise, conditions, name="G") elif args.model_net == 'DCGAN': noise = fluid.layers.data(name='noise', shape=[args.noise_size], dtype='float32') from network.DCGAN_network import DCGAN_model model = DCGAN_model(args.n_samples) fake = model.network_G(noise, name="G") else: raise NotImplementedError("model_net {} is not support".format( args.model_net)) def _compute_start_end(image_name): image_name_start = np.array(image_name)[0].astype('int32') image_name_end = image_name_start + args.n_samples - 1 image_name_save = str(np.array(image_name)[0].astype('int32')) + '.jpg' print("read {}.jpg ~ {}.jpg".format(image_name_start, image_name_end)) return image_name_save # prepare environment place = fluid.CPUPlace() if args.use_gpu: place = fluid.CUDAPlace(0) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) for var in fluid.default_main_program().global_block().all_parameters(): print(var.name) print(args.init_model + '/' + model_name) fluid.io.load_persistables(exe, os.path.join(args.init_model, model_name)) print('load params done') if not os.path.exists(args.output): os.makedirs(args.output) attr_names = args.selected_attrs.split(',') if args.model_net == 'AttGAN' or args.model_net == 'STGAN': test_reader = celeba_reader_creator(image_dir=args.dataset_dir, list_filename=args.test_list, args=args, mode="VAL") reader_test = test_reader.make_reader(return_name=True) py_reader.decorate_batch_generator( reader_test, places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places()) for data in py_reader(): real_img, label_org, label_trg, image_name = data[0][ 'input'], data[0]['label_org_'], data[0]['label_trg_'], data[ 0]['image_name'] image_name_save = _compute_start_end(image_name) real_img_temp = save_batch_image(np.array(real_img)) images = [real_img_temp] for i in range(args.c_dim): label_trg_tmp = copy.deepcopy(np.array(label_trg)) for j in range(len(label_trg_tmp)): label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i] label_trg_tmp = check_attribute_conflict( label_trg_tmp, attr_names[i], attr_names) label_org_tmp = list( map(lambda x: ((x * 2) - 1) * 0.5, np.array(label_org))) label_trg_tmp = list( map(lambda x: ((x * 2) - 1) * 0.5, label_trg_tmp)) if args.model_net == 'AttGAN': for k in range(len(label_trg_tmp)): label_trg_tmp[k][i] = label_trg_tmp[k][i] * 2.0 tensor_label_org_ = fluid.LoDTensor() tensor_label_trg_ = fluid.LoDTensor() tensor_label_org_.set(label_org_tmp, place) tensor_label_trg_.set(label_trg_tmp, place) out = exe.run(feed={ "input": real_img, "label_org_": tensor_label_org_, "label_trg_": tensor_label_trg_ }, fetch_list=[fake.name]) fake_temp = save_batch_image(out[0]) images.append(fake_temp) images_concat = np.concatenate(images, 1) if len(np.array(label_org)) > 1: images_concat = np.concatenate(images_concat, 1) imageio.imwrite( os.path.join(args.output, "fake_img_" + image_name_save), ((images_concat + 1) * 127.5).astype(np.uint8)) elif args.model_net == 'StarGAN': test_reader = celeba_reader_creator(image_dir=args.dataset_dir, list_filename=args.test_list, args=args, mode="VAL") reader_test = test_reader.make_reader(return_name=True) py_reader.decorate_batch_generator( reader_test, places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places()) for data in py_reader(): real_img, label_org, label_trg, image_name = data[0][ 'input'], data[0]['label_org_'], data[0]['label_trg_'], data[ 0]['image_name'] image_name_save = _compute_start_end(image_name) real_img_temp = save_batch_image(np.array(real_img)) images = [real_img_temp] for i in range(args.c_dim): label_trg_tmp = copy.deepcopy(np.array(label_org)) for j in range(len(np.array(label_org))): label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i] label_trg_tmp = check_attribute_conflict( label_trg_tmp, attr_names[i], attr_names) tensor_label_trg_ = fluid.LoDTensor() tensor_label_trg_.set(label_trg_tmp, place) out = exe.run(feed={ "input": real_img, "label_trg_": tensor_label_trg_ }, fetch_list=[fake.name]) fake_temp = save_batch_image(out[0]) images.append(fake_temp) images_concat = np.concatenate(images, 1) if len(np.array(label_org)) > 1: images_concat = np.concatenate(images_concat, 1) imageio.imwrite( os.path.join(args.output, "fake_img_" + image_name_save), ((images_concat + 1) * 127.5).astype(np.uint8)) elif args.model_net == 'Pix2pix' or args.model_net == 'CycleGAN': test_reader = reader_creator(image_dir=args.dataset_dir, list_filename=args.test_list, shuffle=False, batch_size=args.n_samples, mode="VAL") reader_test = test_reader.make_reader(args, return_name=True) py_reader.decorate_batch_generator( reader_test, places=fluid.cuda_places() if args.use_gpu else fluid.cpu_places()) id2name = test_reader.id2name for data in py_reader(): real_img, image_name = data[0]['input'], data[0]['image_name'] image_name = id2name[np.array(image_name).astype('int32')[0]] print("read: ", image_name) fake_temp = exe.run(fetch_list=[fake.name], feed={"input": real_img}) fake_temp = np.squeeze(fake_temp[0]).transpose([1, 2, 0]) input_temp = np.squeeze(np.array(real_img)[0]).transpose([1, 2, 0]) imageio.imwrite(os.path.join(args.output, "fake_" + image_name), ((fake_temp + 1) * 127.5).astype(np.uint8)) elif args.model_net == 'CGAN': noise_data = np.random.uniform(low=-1.0, high=1.0, size=[args.n_samples, args.noise_size ]).astype('float32') label = np.random.randint(0, 9, size=[args.n_samples, 1]).astype('float32') noise_tensor = fluid.LoDTensor() conditions_tensor = fluid.LoDTensor() noise_tensor.set(noise_data, place) conditions_tensor.set(label, place) fake_temp = exe.run(fetch_list=[fake.name], feed={ "noise": noise_tensor, "conditions": conditions_tensor })[0] fake_image = np.reshape(fake_temp, (args.n_samples, -1)) fig = utility.plot(fake_image) plt.savefig(os.path.join(args.output, 'fake_cgan.png'), bbox_inches='tight') plt.close(fig) elif args.model_net == 'DCGAN': noise_data = np.random.uniform(low=-1.0, high=1.0, size=[args.n_samples, args.noise_size ]).astype('float32') noise_tensor = fluid.LoDTensor() noise_tensor.set(noise_data, place) fake_temp = exe.run(fetch_list=[fake.name], feed={"noise": noise_tensor})[0] fake_image = np.reshape(fake_temp, (args.n_samples, -1)) fig = utility.plot(fake_image) plt.savefig(os.path.join(args.output, 'fake_dcgan.png'), bbox_inches='tight') plt.close(fig) else: raise NotImplementedError("model_net {} is not support".format( args.model_net))
def infer(args): data_shape = [-1, 3, args.image_size, args.image_size] input = fluid.layers.data(name='input', shape=data_shape, dtype='float32') label_org_ = fluid.layers.data( name='label_org_', shape=[args.c_dim], dtype='float32') label_trg_ = fluid.layers.data( name='label_trg_', shape=[args.c_dim], dtype='float32') model_name = 'net_G' if args.model_net == 'CycleGAN': from network.CycleGAN_network import CycleGAN_model model = CycleGAN_model() if args.input_style == "A": fake = model.network_G(input, name="GA", cfg=args) elif args.input_style == "B": fake = model.network_G(input, name="GB", cfg=args) else: raise "Input with style [%s] is not supported." % args.input_style elif args.model_net == 'Pix2pix': from network.Pix2pix_network import Pix2pix_model model = Pix2pix_model() fake = model.network_G(input, "generator", cfg=args) elif args.model_net == 'StarGAN': from network.StarGAN_network import StarGAN_model model = StarGAN_model() fake = model.network_G(input, label_trg_, name="g_main", cfg=args) elif args.model_net == 'STGAN': from network.STGAN_network import STGAN_model model = STGAN_model() fake, _ = model.network_G( input, label_org_, label_trg_, cfg=args, name='generator', is_test=True) elif args.model_net == 'AttGAN': from network.AttGAN_network import AttGAN_model model = AttGAN_model() fake, _ = model.network_G( input, label_org_, label_trg_, cfg=args, name='generator', is_test=True) elif args.model_net == 'CGAN': noise = fluid.layers.data( name='noise', shape=[args.noise_size], dtype='float32') conditions = fluid.layers.data( name='conditions', shape=[1], dtype='float32') from network.CGAN_network import CGAN_model model = CGAN_model() fake = model.network_G(noise, conditions, name="G") elif args.model_net == 'DCGAN': noise = fluid.layers.data( name='noise', shape=[args.noise_size], dtype='float32') from network.DCGAN_network import DCGAN_model model = DCGAN_model() fake = model.network_G(noise, name="G") else: raise NotImplementedError("model_net {} is not support".format( args.model_net)) # prepare environment place = fluid.CPUPlace() if args.use_gpu: place = fluid.CUDAPlace(0) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) for var in fluid.default_main_program().global_block().all_parameters(): print(var.name) print(args.init_model + '/' + model_name) fluid.io.load_persistables(exe, args.init_model + "/" + model_name) print('load params done') if not os.path.exists(args.output): os.makedirs(args.output) attr_names = args.selected_attrs.split(',') if args.model_net == 'AttGAN' or args.model_net == 'STGAN': test_reader = celeba_reader_creator( image_dir=args.dataset_dir, list_filename=args.test_list, batch_size=args.batch_size, drop_last=False, args=args) reader_test = test_reader.get_test_reader( args, shuffle=False, return_name=True) for data in zip(reader_test()): real_img, label_org, name = data[0] print("read {}".format(name)) label_trg = copy.deepcopy(label_org) tensor_img = fluid.LoDTensor() tensor_label_org = fluid.LoDTensor() tensor_label_trg = fluid.LoDTensor() tensor_label_org_ = fluid.LoDTensor() tensor_label_trg_ = fluid.LoDTensor() tensor_img.set(real_img, place) tensor_label_org.set(label_org, place) real_img_temp = save_batch_image(real_img) images = [real_img_temp] for i in range(args.c_dim): label_trg_tmp = copy.deepcopy(label_trg) for j in range(len(label_org)): label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i] label_trg_tmp = check_attribute_conflict( label_trg_tmp, attr_names[i], attr_names) label_org_ = list(map(lambda x: ((x * 2) - 1) * 0.5, label_org)) label_trg_ = list( map(lambda x: ((x * 2) - 1) * 0.5, label_trg_tmp)) if args.model_net == 'AttGAN': for k in range(len(label_org)): label_trg_[k][i] = label_trg_[k][i] * 2.0 tensor_label_org_.set(label_org_, place) tensor_label_trg.set(label_trg, place) tensor_label_trg_.set(label_trg_, place) out = exe.run(feed={ "input": tensor_img, "label_org_": tensor_label_org_, "label_trg_": tensor_label_trg_ }, fetch_list=[fake.name]) fake_temp = save_batch_image(out[0]) images.append(fake_temp) images_concat = np.concatenate(images, 1) if len(label_org) > 1: images_concat = np.concatenate(images_concat, 1) imageio.imwrite(args.output + "/fake_img_" + name[0], ( (images_concat + 1) * 127.5).astype(np.uint8)) elif args.model_net == 'StarGAN': test_reader = celeba_reader_creator( image_dir=args.dataset_dir, list_filename=args.test_list, batch_size=args.batch_size, drop_last=False, args=args) reader_test = test_reader.get_test_reader( args, shuffle=False, return_name=True) for data in zip(reader_test()): real_img, label_org, name = data[0] print("read {}".format(name)) tensor_img = fluid.LoDTensor() tensor_label_org = fluid.LoDTensor() tensor_img.set(real_img, place) tensor_label_org.set(label_org, place) real_img_temp = save_batch_image(real_img) images = [real_img_temp] for i in range(args.c_dim): label_trg_tmp = copy.deepcopy(label_org) for j in range(len(label_org)): label_trg_tmp[j][i] = 1.0 - label_trg_tmp[j][i] label_trg = check_attribute_conflict( label_trg_tmp, attr_names[i], attr_names) tensor_label_trg = fluid.LoDTensor() tensor_label_trg.set(label_trg, place) out = exe.run( feed={"input": tensor_img, "label_trg_": tensor_label_trg}, fetch_list=[fake.name]) fake_temp = save_batch_image(out[0]) images.append(fake_temp) images_concat = np.concatenate(images, 1) if len(label_org) > 1: images_concat = np.concatenate(images_concat, 1) imageio.imwrite(args.output + "/fake_img_" + name[0], ( (images_concat + 1) * 127.5).astype(np.uint8)) elif args.model_net == 'Pix2pix' or args.model_net == 'CycleGAN': for file in glob.glob(args.dataset_dir): print("read {}".format(file)) image_name = os.path.basename(file) image = Image.open(file).convert('RGB') image = image.resize((256, 256), Image.BICUBIC) image = np.array(image).transpose([2, 0, 1]).astype('float32') image = image / 255.0 image = (image - 0.5) / 0.5 data = image[np.newaxis, :] tensor = fluid.LoDTensor() tensor.set(data, place) fake_temp = exe.run(fetch_list=[fake.name], feed={"input": tensor}) fake_temp = np.squeeze(fake_temp[0]).transpose([1, 2, 0]) input_temp = np.squeeze(data).transpose([1, 2, 0]) imageio.imwrite(args.output + "/fake_" + image_name, ( (fake_temp + 1) * 127.5).astype(np.uint8)) elif args.model_net == 'CGAN': noise_data = np.random.uniform( low=-1.0, high=1.0, size=[args.batch_size, args.noise_size]).astype('float32') label = np.random.randint( 0, 9, size=[args.batch_size, 1]).astype('float32') noise_tensor = fluid.LoDTensor() conditions_tensor = fluid.LoDTensor() noise_tensor.set(noise_data, place) conditions_tensor.set(label, place) fake_temp = exe.run( fetch_list=[fake.name], feed={"noise": noise_tensor, "conditions": conditions_tensor})[0] fake_image = np.reshape(fake_temp, (args.batch_size, -1)) fig = utility.plot(fake_image) plt.savefig(args.output + '/fake_cgan.png', bbox_inches='tight') plt.close(fig) elif args.model_net == 'DCGAN': noise_data = np.random.uniform( low=-1.0, high=1.0, size=[args.batch_size, args.noise_size]).astype('float32') noise_tensor = fluid.LoDTensor() noise_tensor.set(noise_data, place) fake_temp = exe.run(fetch_list=[fake.name], feed={"noise": noise_tensor})[0] fake_image = np.reshape(fake_temp, (args.batch_size, -1)) fig = utility.plot(fake_image) plt.savefig(args.output + '/fake_dcgan.png', bbox_inches='tight') plt.close(fig) else: raise NotImplementedError("model_net {} is not support".format( args.model_net))
def __init__(self, image_real, label_org, label_trg, cfg, step_per_epoch): self.program = fluid.default_main_program().clone() with fluid.program_guard(self.program): model = StarGAN_model() image_real = fluid.data(name='image_real', shape=image_real.shape, dtype='float32') self.fake_img = model.network_G(image_real, label_trg, cfg, name="g_main") self.pred_real, self.cls_real = model.network_D(image_real, cfg, name="d_main") self.pred_fake, _ = model.network_D(self.fake_img, cfg, name="d_main") cls_shape = self.cls_real.shape self.cls_real = fluid.layers.reshape( self.cls_real, [-1, cls_shape[1] * cls_shape[2] * cls_shape[3]]) self.d_loss_cls = fluid.layers.reduce_sum( fluid.layers.sigmoid_cross_entropy_with_logits( self.cls_real, label_org)) / cfg.batch_size if cfg.gan_mode != 'wgan': raise NotImplementedError( "gan_mode {} is not support! only support wgan".format( cfg.gan_mode)) #wgan self.d_loss_fake = fluid.layers.mean(self.pred_fake) self.d_loss_real = -1 * fluid.layers.mean(self.pred_real) self.d_loss_gp = self.gradient_penalty(getattr(model, "network_D"), image_real, self.fake_img, cfg=cfg, name="d_main") self.d_loss = self.d_loss_real + self.d_loss_fake + self.d_loss_cls + cfg.lambda_gp * self.d_loss_gp vars = [] for var in self.program.list_vars(): if fluid.io.is_parameter(var) and var.name.startswith("d_"): vars.append(var.name) self.param = vars total_iters = step_per_epoch * cfg.epoch boundaries = [cfg.num_iters - cfg.num_iters_decay] values = [cfg.d_lr] lr = cfg.d_lr for x in range(cfg.num_iters - cfg.num_iters_decay + 1, total_iters): if x % cfg.lr_update_step == 0: boundaries.append(x) lr -= (lr / float(cfg.num_iters_decay)) values.append(lr) lr = values[-1] lr -= (lr / float(cfg.num_iters_decay)) values.append(lr) optimizer = fluid.optimizer.Adam( learning_rate=fluid.layers.piecewise_decay( boundaries=boundaries, values=values), beta1=0.5, beta2=0.999, name="net_D") optimizer.minimize(self.d_loss, parameter_list=vars)