def _get_optimizer(self): lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False) opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9) return opt if __name__ == '__main__': args = DCGAN.get_args(default_batch=32, default_z_dim=64) if args.sample: DCGAN.sample(Model(), args.load, 'gen/conv4.3/output') else: logger.auto_set_dir() input = QueueInput(DCGAN.get_data()) model = Model() nr_tower = max(get_nr_gpu(), 1) if nr_tower == 1: trainer = GANTrainer(input, model) else: trainer = MultiGPUGANTrainer(nr_tower, input, model) trainer.train_with_defaults( callbacks=[ ModelSaver(), StatMonitorParamSetter( 'learning_rate', 'measure', lambda x: x * 0.5, 0, 10) ], session_init=SaverRestore(args.load) if args.load else None, steps_per_epoch=500, max_epoch=400)
self.trainer.monitors.put_image('testB-{}'.format(idx), vizB) idx += 1 if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--data', required=True, help='the image directory. should contain trainA/trainB/testA/testB') parser.add_argument('--load', help='load model') args = parser.parse_args() logger.auto_set_dir() df = get_data(args.data) df = PrintData(df) data = QueueInput(df) GANTrainer(data, Model()).train_with_defaults( callbacks=[ ModelSaver(), ScheduledHyperParamSetter( 'learning_rate', [(100, 2e-4), (200, 0)], interp='linear'), PeriodicTrigger(VisualizeTestSet(), every_k_epochs=3), ], max_epoch=195, steps_per_epoch=data.size(), session_init=SaverRestore(args.load) if args.load else None )
parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') parser.add_argument('--sample', action='store_true', help='run sampling') parser.add_argument('--data', help='Image directory', required=True) parser.add_argument('--mode', choices=['AtoB', 'BtoA'], default='AtoB') parser.add_argument('-b', '--batch', type=int, default=1) global args args = parser.parse_args() if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu BATCH = args.batch if args.sample: assert args.load sample(args.data, args.load) else: logger.auto_set_dir() data = QueueInput(get_data()) GANTrainer(data, Model()).train_with_defaults( callbacks=[ PeriodicTrigger(ModelSaver(), every_k_epochs=3), ScheduledHyperParamSetter('learning_rate', [(200, 1e-4)]) ], steps_per_epoch=data.size(), max_epoch=300, session_init=SaverRestore(args.load) if args.load else None )
input_names=['label', 'z'], output_names=['gen/gen']) ds = MapData(RandomZData((100, 100)), lambda dp: [np.arange(100) % 10, dp[0]]) pred = SimpleDatasetPredictor(pred, ds) for o in pred.get_result(): o = o[0] * 255.0 viz = stack_patches(o, nr_row=10, nr_col=10) viz = cv2.resize(viz, (800, 800)) interactive_imshow(viz) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') parser.add_argument('--sample', action='store_true') args = parser.parse_args() if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.sample: sample(args.load) else: logger.auto_set_dir() GANTrainer(QueueInput(get_data()), Model()).train_with_defaults( callbacks=[ModelSaver()], steps_per_epoch=500, max_epoch=100, session_init=SaverRestore(args.load) if args.load else None)
model=Model(), input_names=['z'], output_names=['gen/gen', 'z']) pred = SimpleDatasetPredictor(pred, RandomZData((100, 100))) for o in pred.get_result(): o, zs = o[0] + 1, o[1] o = o * 128.0 o = o[:, :, :, ::-1] viz = next(build_patch_list(o, nr_row=10, nr_col=10, viz=True)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') parser.add_argument('--sample', action='store_true', help='run sampling') parser.add_argument( '--data', help='`image_align_celeba` directory of the celebA dataset') args = parser.parse_args() use_global_argument(args) if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.sample: sample(args.load) else: assert args.data config = get_config() if args.load: config.session_init = SaverRestore(args.load) GANTrainer(config).train()
opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9) return opt if __name__ == '__main__': args = DCGAN.get_args() if args.sample: DCGAN.sample(Model(), args.load, 'gen/conv4.3/output') else: assert args.data logger.auto_set_dir() config = TrainConfig( callbacks=[ ModelSaver(), StatMonitorParamSetter('learning_rate', 'measure', lambda x: x * 0.5, 0, 10) ], steps_per_epoch=500, max_epoch=400, session_init=SaverRestore(args.load) if args.load else None, ) input = QueueInput(DCGAN.get_data(args.data)) model = Model() nr_tower = max(get_nr_gpu(), 1) if nr_tower == 1: trainer = GANTrainer(input, model) else: trainer = MultiGPUGANTrainer(nr_tower, input, model) trainer.train_with_config(config)
#assert np.all( #dic['w_smile'] - dic['w_neutral'] \ #+ dic['m_neutral'] == dic['m_smile']) #imgs = [] #for z in ['w_neutral', 'w_smile', 'm_neutral', 'm_smile']: #z = dic[z] #img = func([[z]])[0][0][:,:,::-1] #img = (img + 1) * 128 #imgs.append(img) #viz = next(build_patch_list(imgs, nr_row=1, nr_col=4, viz=True)) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') parser.add_argument('--sample', action='store_true', help='run sampling') parser.add_argument( '--data', help='`image_align_celeba` directory of the celebA dataset') args = parser.parse_args() use_global_argument(args) if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.sample: sample(args.load) else: assert args.data config = get_config() if args.load: config.session_init = SaverRestore(args.load) GANTrainer(config, g_vs_d=1).train()
output_names=['gen/gen']) ds = MapData(RandomZData((100, 100)), lambda dp: [np.arange(100) % 10, dp[0]]) pred = SimpleDatasetPredictor(pred, ds) for o in pred.get_result(): o = o[0] * 255.0 viz = stack_patches(o, nr_row=10, nr_col=10) viz = cv2.resize(viz, (800, 800)) interactive_imshow(viz) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') parser.add_argument('--sample', action='store_true') args = parser.parse_args() if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu if args.sample: sample(args.load) else: logger.auto_set_dir() GANTrainer(QueueInput(get_data()), Model()).train_with_defaults( callbacks=[ModelSaver()], steps_per_epoch=500, max_epoch=100, session_init=SmartInit(args.load), )
initializer=1e-4, trainable=False) opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9) return opt if __name__ == '__main__': args = DCGAN.get_args(default_batch=32, default_z_dim=64) if args.sample: DCGAN.sample(Model(), args.load, 'gen/conv4.3/output') else: logger.auto_set_dir() input = QueueInput(DCGAN.get_data()) model = Model() nr_tower = max(get_nr_gpu(), 1) if nr_tower == 1: trainer = GANTrainer(input, model) else: trainer = MultiGPUGANTrainer(nr_tower, input, model) trainer.train_with_defaults( callbacks=[ ModelSaver(), StatMonitorParamSetter('learning_rate', 'measure', lambda x: x * 0.5, 0, 10) ], session_init=SaverRestore(args.load) if args.load else None, steps_per_epoch=500, max_epoch=400)
def get_data(): lmdb = "/graphics/projects/scratch/student_datasets/cgpraktikum17/vat/resized_8.lmdb" ds = LMDBDataPoint(lmdb, shuffle=True) ds = ImageDecode(ds, index=0) ds = PrefetchDataZMQ(ds, 2) ds = BatchData(ds, BATCH_SIZE) return ds if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--gpu', help='comma separated list of GPU(s) to use.') parser.add_argument('--load', help='load model') args = parser.parse_args() logger.auto_set_dir() if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu GANTrainer(input=QueueInput(get_data()), model=Model()).train_with_defaults( callbacks=[ModelSaver()], steps_per_epoch=1875, # <- 30.000 / BATCH_SIZE max_epoch=20, # <- 600.000 / 30.000 session_init=SaverRestore( args.load, 'loaded', ignore=['global_step']) if args.load else None)
self.collect_variables() def optimizer(self): lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False) opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9) return opt if __name__ == '__main__': args = DCGAN.get_args(default_batch=32, default_z_dim=64) if args.sample: DCGAN.sample(Model(), args.load, 'gen/conv4.3/output') else: logger.auto_set_dir() input = QueueInput(DCGAN.get_data()) model = Model() nr_tower = max(get_num_gpu(), 1) trainer = GANTrainer(input, model, num_gpu=nr_tower) trainer.train_with_defaults(callbacks=[ ModelSaver(), StatMonitorParamSetter('learning_rate', 'losses/measure', lambda x: x * 0.5, 0, 10) ], session_init=SmartInit(args.load), steps_per_epoch=500, max_epoch=400)
parser.add_argument('--l2norm', type=float, default=0.00001) args = parser.parse_args() opt.use_argument(args) if args.gpu: os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu return args if __name__ == '__main__': args = get_args() assert args.data opt.DATA_INFO = json.loads(str(np.load(args.data)['info'])) if args.sample > 0: sample(args.sample, Model(), args.load, output_filename=args.output) else: logger.auto_set_dir(name=args.exp_name) GANTrainer( input=QueueInput(get_data(args.data)), model=Model()).train_with_defaults( callbacks=[ ModelSaver(), ], steps_per_epoch=args.steps_per_epoch, max_epoch=args.max_epoch, session_init=SaverRestore(args.load) if args.load else None )