Пример #1
0
    parser.add_argument('--mode', choices=['AtoB', 'BtoA'], default='AtoB')
    parser.add_argument('-b', '--batch', type=int, default=1)
    args = parser.parse_args()
    if args.gpu:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    BATCH = args.batch

    if args.sample:
        assert args.load
        sample(args.data, args.load)
    else:
        logger.auto_set_dir()

        data = QueueInput(get_data())

        nr_tower = max(get_num_gpu(), 1)
        if nr_tower == 1:
            trainer = GANTrainer(data, Model())
        else:
            trainer = MultiGPUGANTrainer(nr_tower, data, Model())

        trainer.train_with_defaults(
            callbacks=[
                PeriodicTrigger(ModelSaver(), every_k_epochs=3),
                ScheduledHyperParamSetter('learning_rate', [(200, 1e-4)])
            ],
            steps_per_epoch=data.size(),
            max_epoch=300,
            session_init=SaverRestore(args.load) if args.load else None)
Пример #2
0
                             initializer=1e-4,
                             trainable=False)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    args = DCGAN.get_args(default_batch=32, default_z_dim=64)
    if args.sample:
        DCGAN.sample(Model(), args.load, 'gen/conv4.3/output')
    else:
        logger.auto_set_dir()

        input = QueueInput(DCGAN.get_data())
        model = Model()
        nr_tower = max(get_nr_gpu(), 1)
        if nr_tower == 1:
            trainer = GANTrainer(input, model)
        else:
            trainer = MultiGPUGANTrainer(nr_tower, input, model)

        trainer.train_with_defaults(
            callbacks=[
                ModelSaver(),
                StatMonitorParamSetter('learning_rate', 'measure',
                                       lambda x: x * 0.5, 0, 10)
            ],
            session_init=SaverRestore(args.load) if args.load else None,
            steps_per_epoch=500,
            max_epoch=400)
Пример #3
0
        logger.auto_set_dir()

        nr_tower = max(get_nr_gpu(), 1)
        data = QueueInput(get_data(args.lmdb))
        model = Model()

        logger.info("run %i epochs", EPOCHS)
        logger.info("use %i blocks", BLOCKS)
        logger.info("use %i as batchsize", BATCH_SIZE)

        if nr_tower == 1:
            trainer = GANTrainer(data, model)
        else:
            trainer = MultiGPUGANTrainer(nr_tower, data, model)

        callbacks = [
            ModelSaver(),
            MovingAverageSummary(),
            ProgressBar(['d_loss', 'g_loss', 'alpha', 'loss-diff-g-d']),
            MergeAllSummaries(),
            RunUpdateOps()
        ]

        trainer.train_with_defaults(
            callbacks=callbacks,
            session_init=SaverRestore(args.load, ignore=['global_step'])
            if args.load else None,
            steps_per_epoch=STEPS_PER_EPOCH,
            max_epoch=EPOCHS,
        )
Пример #4
0
    def _get_optimizer(self):
        lr = tf.get_variable('learning_rate', initializer=1e-4, trainable=False)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    args = DCGAN.get_args(default_batch=32, default_z_dim=64)
    if args.sample:
        DCGAN.sample(Model(), args.load, 'gen/conv4.3/output')
    else:
        logger.auto_set_dir()

        input = QueueInput(DCGAN.get_data())
        model = Model()
        nr_tower = max(get_nr_gpu(), 1)
        if nr_tower == 1:
            trainer = GANTrainer(input, model)
        else:
            trainer = MultiGPUGANTrainer(nr_tower, input, model)

        trainer.train_with_defaults(
            callbacks=[
                ModelSaver(),
                StatMonitorParamSetter(
                    'learning_rate', 'measure', lambda x: x * 0.5, 0, 10)
            ],
            session_init=SaverRestore(args.load) if args.load else None,
            steps_per_epoch=500, max_epoch=400)
Пример #5
0
        self.collect_variables()

    def optimizer(self):
        lr = tf.get_variable('learning_rate',
                             initializer=1e-4,
                             trainable=False)
        opt = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
        return opt


if __name__ == '__main__':
    args = DCGAN.get_args(default_batch=32, default_z_dim=64)
    if args.sample:
        DCGAN.sample(Model(), args.load, 'gen/conv4.3/output')
    else:
        logger.auto_set_dir()

        input = QueueInput(DCGAN.get_data())
        model = Model()
        nr_tower = max(get_num_gpu(), 1)
        trainer = GANTrainer(input, model, num_gpu=nr_tower)
        trainer.train_with_defaults(callbacks=[
            ModelSaver(),
            StatMonitorParamSetter('learning_rate', 'losses/measure',
                                   lambda x: x * 0.5, 0, 10)
        ],
                                    session_init=SmartInit(args.load),
                                    steps_per_epoch=500,
                                    max_epoch=400)