Example #1
0
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if args.apply:
        apply(args.load, args.lowres, args.output)
    else:
        logger.auto_set_dir()

        if args.load:
            session_init = SaverRestore(args.load)
        else:
            assert os.path.isfile(args.vgg19)
            param_dict = dict(np.load(args.vgg19))
            param_dict = {'VGG19/' + name: value for name, value in six.iteritems(param_dict)}
            session_init = DictRestore(param_dict)

        nr_tower = max(get_num_gpu(), 1)
        data = QueueInput(get_data(args.data))
        model = Model()

        trainer = SeparateGANTrainer(data, model, d_period=3)

        trainer.train_with_defaults(
            callbacks=[
                ModelSaver(keep_checkpoint_every_n_hours=2)
            ],
            session_init=session_init,
            steps_per_epoch=len(data) // 4,
            max_epoch=300
        )
Example #2
0
        imgaug.Resize(64)]
    df = AugmentImageComponents(df, augs, (0, 1))
    df = BatchData(df, BATCH)
    df = PrefetchDataZMQ(df, 1)
    return df


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--data', required=True,
        help='the img_align_celeba directory. should also contain list_attr_celeba.txt')
    parser.add_argument('--style-A', help='style of A', default='Male')
    parser.add_argument('--style-B', help='style of B, default to "not A"')
    parser.add_argument('--load', help='load model')
    args = parser.parse_args()

    assert tf.test.is_gpu_available()
    logger.auto_set_dir()

    data = get_celebA_data(args.data, args.style_A, args.style_B)

    # train 1 D after 2 G
    SeparateGANTrainer(
        QueueInput(data), Model(), d_period=3).train_with_defaults(
        callbacks=[ModelSaver()],
        steps_per_epoch=300,
        max_epoch=250,
        session_init=SaverRestore(args.load) if args.load else None
    )
Example #3
0
        gradients = tf.gradients(vec_interp, [interp])[0]
        gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
        gradients_rms = symbolic_functions.rms(gradients, 'gradient_rms')
        gradient_penalty = tf.reduce_mean(tf.square(gradients - 1), name='gradient_penalty')
        add_moving_summary(self.d_loss, self.g_loss, gradient_penalty, gradients_rms)

        self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)

        self.collect_variables()

    def _get_optimizer(self):
        lr = symbolic_functions.get_scalar_var('learning_rate', 1e-4, summary=True)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


DCGAN.Model = Model


if __name__ == '__main__':
    args = DCGAN.get_args()
    if args.sample:
        DCGAN.sample(args.load)
    else:
        assert args.data
        logger.auto_set_dir()
        config = DCGAN.get_config()
        if args.load:
            config.session_init = SaverRestore(args.load)
        SeparateGANTrainer(config, g_period=6).train()
Example #4
0
parser.add_argument(
    '--data',
    required=True,
    help=
    'the img_align_celeba directory. should also contain list_attr_celeba.txt')
parser.add_argument('--load', help='load model')
args = parser.parse_args()

if __name__ == '__main__':
    logger.auto_set_dir()
    data = get_data(args.data)
    data = QueueInput(data)
    # train 1 D after 2 G
    SeparateGANTrainer(data, Model(), 2).train_with_defaults(
        callbacks=[
            PeriodicTrigger(ModelSaver(), every_k_epochs=20),
            PeriodicTrigger(VisualizeTestSet(args.data), every_k_epochs=3),
            ScheduledHyperParamSetter('learning_rate', [(150, 2e-4), (300, 0)],
                                      interp='linear')
        ],
        steps_per_epoch=1000,
        session_init=SaverRestore(args.load) if args.load else None)
    #SeparateGANTrainer(config, 2).train()
    # If you want to run across GPUs use code similar to below.
    #nr_gpu = get_nr_gpu()
    #config.nr_tower = max(get_nr_gpu(), 1)
    #if config.nr_tower == 1:
    #    GANTrainer(config).train()
    #else:
    #    MultiGPUGANTrainer(config).train()
Example #5
0
    # Running train or deploy
    if args.sample:
        # TODO
        # sample
        pass
    else:
        # Set up configuration
        # Set the logger directory
        logger.auto_set_dir()

        # SyncMultiGPUTrainer(config).train()
        nr_tower = max(get_nr_gpu(), 1)
        if nr_tower == 1:
            trainer = SeparateGANTrainer(data_set,
                                         model,
                                         g_period=4,
                                         d_period=1)
        else:
            trainer = MultiGPUGANTrainer(nr_tower, data_set, model)
        trainer.train_with_defaults(
            callbacks=[
                # PeriodicTrigger(ModelSaver(), every_k_epochs=20),
                ClipCallback(),
                ScheduledHyperParamSetter('learning_rate', [(0, 2e-4),
                                                            (100, 1e-4),
                                                            (200, 2e-5),
                                                            (300, 1e-5),
                                                            (400, 2e-6),
                                                            (500, 1e-6)],
                                          interp='linear'),
                PeriodicTrigger(VisualizeRunner(), every_k_epochs=5),
Example #6
0
        gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
        gradients_rms = symbolic_functions.rms(gradients, 'gradient_rms')
        gradient_penalty = tf.reduce_mean(tf.square(gradients - 1),
                                          name='gradient_penalty')
        add_moving_summary(self.d_loss, self.g_loss, gradient_penalty,
                           gradients_rms)

        self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)

        self.collect_variables()

    def optimizer(self):
        opt = tf.train.AdamOptimizer(1e-4, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    assert get_tf_version_number() >= 1.4
    args = DCGAN.get_args(default_batch=64, default_z_dim=128)
    M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)
    if args.sample:
        DCGAN.sample(M, args.load)
    else:
        logger.auto_set_dir()
        SeparateGANTrainer(QueueInput(
            DCGAN.get_data()), M, g_period=6).train_with_defaults(
                callbacks=[ModelSaver()],
                steps_per_epoch=300,
                max_epoch=200,
                session_init=SaverRestore(args.load) if args.load else None)
Example #7
0
            if not n.startswith('discrim/'):
                continue
            logger.info("Clip {}".format(n))
            ops.append(tf.assign(v, tf.clip_by_value(v, -0.01, 0.01)))
        self._op = tf.group(*ops, name='clip')

    def _trigger_step(self):
        self._op.run()


if __name__ == '__main__':
    args = DCGAN.get_args()

    if args.sample:
        DCGAN.sample(Model(), args.load)
    else:
        assert args.data
        logger.auto_set_dir()

        # The original code uses a different schedule, but this seems to work well.
        # Train 1 D after 2 G
        SeparateGANTrainer(
            input=QueueInput(DCGAN.get_data(args.data)),
            model=Model(),
            d_period=3).train_with_defaults(
            callbacks=[ModelSaver(), ClipCallback()],
            steps_per_epoch=500,
            max_epoch=200,
            session_init=SaverRestore(args.load) if args.load else None
        )
Example #8
0
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if args.apply:
        apply(args.load, args.lowres, args.output)
    else:
        logger.auto_set_dir()

        if args.load:
            session_init = SaverRestore(args.load)
        else:
            assert os.path.isfile(args.vgg19)
            param_dict = dict(np.load(args.vgg19))
            param_dict = {
                'VGG19/' + name: value
                for name, value in six.iteritems(param_dict)
            }
            session_init = DictRestore(param_dict)

        nr_tower = max(get_num_gpu(), 1)
        data = QueueInput(get_data(args.data))
        model = Model()

        trainer = SeparateGANTrainer(data, model, d_period=3)

        trainer.train_with_defaults(
            callbacks=[ModelSaver(keep_checkpoint_every_n_hours=2)],
            session_init=session_init,
            steps_per_epoch=data.size() // 4,
            max_epoch=300)
Example #9
0
    return df


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--data',
        required=True,
        help=
        'the img_align_celeba directory. should also contain list_attr_celeba.txt'
    )
    parser.add_argument('--style-A', help='style of A', default='Male')
    parser.add_argument('--style-B', help='style of B')
    parser.add_argument('--load', help='load model')
    args = parser.parse_args()

    logger.auto_set_dir()

    data = get_celebA_data(args.data, args.style_A, args.style_B)

    config = TrainConfig(
        model=Model(),
        dataflow=data,
        callbacks=[ModelSaver()],
        steps_per_epoch=300,
        max_epoch=1000,
        session_init=SaverRestore(args.load) if args.load else None)

    # train 1 D after 2 G
    SeparateGANTrainer(config, 2).train()
Example #10
0
            n = v.op.name
            if not n.startswith('discrim/'):
                continue
            logger.info("Clip {}".format(n))
            ops.append(tf.assign(v, tf.clip_by_value(v, -0.01, 0.01)))
        self._op = tf.group(*ops, name='clip')

    def _trigger_step(self):
        self._op.run()


if __name__ == '__main__':
    args = DCGAN.get_args(default_batch=64)

    M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)
    if args.sample:
        DCGAN.sample(M, args.load)
    else:
        logger.auto_set_dir()

        # The original code uses a different schedule, but this seems to work well.
        # Train 1 D after 2 G
        SeparateGANTrainer(input=QueueInput(DCGAN.get_data()),
                           model=M,
                           d_period=3).train_with_defaults(
                               callbacks=[ModelSaver(),
                                          ClipCallback()],
                               steps_per_epoch=500,
                               max_epoch=200,
                               session_init=SmartInit(args.load))
Example #11
0
        # add clipping to D optimizer
        def clip(p):
            n = p.op.name
            if not n.startswith('discrim/'):
                return None
            logger.info("Clip {}".format(n))
            return tf.clip_by_value(p, -0.01, 0.01)

        return optimizer.VariableAssignmentOptimizer(opt, clip)


DCGAN.Model = Model

if __name__ == '__main__':
    args = DCGAN.get_args()

    if args.sample:
        DCGAN.sample(args.load)
    else:
        assert args.data
        logger.auto_set_dir()
        config = DCGAN.get_config()
        config.steps_per_epoch = 500

        if args.load:
            config.session_init = SaverRestore(args.load)
        """
        The original code uses a different schedule.
        """
        SeparateGANTrainer(config, d_period=3).train()
Example #12
0
def main():
	np.random.seed(2018)
	tf.set_random_seed(2018)
	#https://docs.python.org/3/library/argparse.html
	parser = argparse.ArgumentParser()
	#
	parser.add_argument('--gpu',        help='comma separated list of GPU(s) to use.')
	parser.add_argument('--load',       help='load models for continue train or predict')
	parser.add_argument('--sample',     help='run sampling one instance')
	parser.add_argument('--imageDir',   help='Image directory', required=True)
	parser.add_argument('--maskDir',    help='Masks directory', required=False)
	parser.add_argument('--labelDir',   help='Label directory', required=True)
	parser.add_argument('-db', '--debug', type=int, default=0) # Debug one particular function in main flow
	global args
	args = parser.parse_args() # Create an object of parser
	if args.gpu:
		os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
		# os.environ['TENSORPACK_TRAIN_API'] = 'v2'
	if args.sample:

		sample(args.imageDir, args.maskDir, args.labelDir, args.load, args.sample)
	else:
		logger.auto_set_dir()
		ds_train, ds_valid = get_data(args.imageDir, args.maskDir, args.labelDir)

		ds_train = PrefetchDataZMQ(ds_train, nr_proc=4)
		ds_valid = PrefetchDataZMQ(ds_valid, nr_proc=4)

		ds_train.reset_state()
		ds_valid.reset_state() 

		nr_tower = max(get_nr_gpu(), 1)
		ds_train = QueueInput(ds_train)
		model = Model()
		if nr_tower == 1:
			trainer = SeparateGANTrainer(ds_train, model, g_period=1, d_period=1)
		else:
			trainer = MultiGPUGANTrainer(nr_tower, ds_train, model)
		trainer.train_with_defaults(
			callbacks=[
				PeriodicTrigger(ModelSaver(), every_k_epochs=20),
				PeriodicTrigger(MaxSaver('validation_PSNR_recon_A'), every_k_epochs=20),
				PeriodicTrigger(MaxSaver('validation_PSNR_boost_A'), every_k_epochs=20),
				VisualizeRunner(),
				InferenceRunner(ds_valid, [
										   ScalarStats('PSNR_zfill_A'), 
										   ScalarStats('PSNR_zfill_B'),
										   ScalarStats('PSNR_recon_A'),
										   ScalarStats('PSNR_recon_B'),
										   ScalarStats('PSNR_boost_A'), 
										   ScalarStats('PSNR_boost_B'),
										
										   ScalarStats('losses/Img/Zfill/zfill_img_MA'),
										   ScalarStats('losses/Img/Zfill/zfill_img_MB'),
											  
										   ScalarStats('losses/Frq/Recon/recon_frq_AA'),
										   ScalarStats('losses/Frq/Recon/recon_frq_BB'),
										   
										   ScalarStats('losses/Img/Recon/recon_img_AA'),
										   ScalarStats('losses/Img/Recon/recon_img_BB'),
										   
										   ScalarStats('losses/Frq/Boost/recon_frq_Aa'),
										   ScalarStats('losses/Frq/Boost/recon_frq_Bb'),
										   
										   ScalarStats('losses/Img/Boost/recon_img_Aa'),
										   ScalarStats('losses/Img/Boost/recon_img_Bb'),
					]),
				ClipCallback(),
				ScheduledHyperParamSetter('learning_rate', 
					[(0, 2e-4), (100, 1e-4), (200, 2e-5), (300, 1e-5), (400, 2e-6), (500, 1e-6)], interp='linear')
				
				],
			session_init=SaverRestore(args.load) if args.load else None, 
			steps_per_epoch=ds_train.size(),
			max_epoch=500
		)
Example #13
0
        gradients = tf.sqrt(tf.reduce_sum(tf.square(gradients), [1, 2, 3]))
        gradients_rms = tf.sqrt(tf.reduce_mean(tf.square(gradients)), name='gradient_rms')
        gradient_penalty = tf.reduce_mean(tf.square(gradients - 1), name='gradient_penalty')
        add_moving_summary(self.d_loss, self.g_loss, gradient_penalty, gradients_rms)

        self.d_loss = tf.add(self.d_loss, 10 * gradient_penalty)

        self.collect_variables()

    def optimizer(self):
        opt = tf.train.AdamOptimizer(1e-4, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    assert get_tf_version_tuple() >= (1, 4)
    args = DCGAN.get_args(default_batch=64, default_z_dim=128)
    M = Model(shape=args.final_size, batch=args.batch, z_dim=args.z_dim)
    if args.sample:
        DCGAN.sample(M, args.load)
    else:
        logger.auto_set_dir()
        SeparateGANTrainer(
            QueueInput(DCGAN.get_data()),
            M, g_period=5).train_with_defaults(
            callbacks=[ModelSaver()],
            steps_per_epoch=300,
            max_epoch=200,
            session_init=SmartInit(args.load)
        )
Example #14
0
def main():
    np.random.seed(2018)
    tf.set_random_seed(2018)
    #https://docs.python.org/3/library/argparse.html
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', help='comma seperated list of GPU(s) to use.')
    parser.add_argument(
        '--data',
        required=True,
        help='Data directory, contain trainA/trainB/validA/validB')
    parser.add_argument('--load', help='Load the model path')
    parser.add_argument('--sample',
                        help='Run the deployment on an instance',
                        action='store_true')

    args = parser.parse_args()
    global args
    # python Exp_FusionNet2D_-VectorField.py --gpu='0' --data='arranged/'

    # Set the logger directory
    logger.auto_set_dir()

    train_ds = get_data(args.data, isTrain=True)
    valid_ds = get_data(args.data, isTrain=False)

    train_ds = PrintData(train_ds)
    valid_ds = PrintData(valid_ds)

    train_ds = PrefetchDataZMQ(train_ds, 8)
    valid_ds = PrefetchDataZMQ(valid_ds, 1)
    # Set the GPU
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    # Running train or deploy
    if args.sample:
        # TODO
        # sample
        pass
    else:
        SeparateGANTrainer(
            QueueInput(train_ds), Model(), g_period=2, d_period=1
        ).train_with_defaults(
            callbacks=[
                # PeriodicTrigger(DumpParamAsImage('viz'), every_k_epochs=10),
                PeriodicTrigger(ModelSaver(), every_k_epochs=100),
                PeriodicTrigger(InferenceRunner(valid_ds, [
                    ScalarStats('MAE_losses/loss_membr'),
                    ScalarStats('MAE_losses/loss_point')
                ]),
                                every_k_epochs=1),
                PeriodicTrigger(VisualizeRunner(), every_k_epochs=5),
                ClipCallback(),
                ScheduledHyperParamSetter('learning_rate', [(0, 2e-4),
                                                            (100, 1e-4),
                                                            (200, 2e-5),
                                                            (300, 1e-5),
                                                            (400, 2e-6),
                                                            (500, 1e-6)],
                                          interp='linear')
            ],
            # model=Model(),
            steps_per_epoch=train_ds.size(),
            max_epoch=500,
        )
Example #15
0
def get_config():
    return TrainConfig(
        model=Model(),
        # use the same data in the DCGAN example
        dataflow=DCGAN.get_data(args.data),
        callbacks=[ModelSaver()],
        steps_per_epoch=500,
        max_epoch=200,
    )


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--load', help='load model')
    parser.add_argument('--sample', action='store_true', help='view generated examples')
    parser.add_argument('--data', help='a jpeg directory')
    args = parser.parse_args()
    if args.sample:
        DCGAN.sample(args.load)
    else:
        assert args.data
        logger.auto_set_dir()
        config = get_config()
        if args.load:
            config.session_init = SaverRestore(args.load)
        """
        This is to be consistent with the original code, but I found just
        running them 1:1 (i.e. just using the existing GANTrainer) also works well.
        """
        SeparateGANTrainer(config, d_interval=5).train()