Пример #1
0
def train(args):
    tf.get_logger().setLevel(logging.ERROR)

    mnist = MNIST()
    stylealae = StyleMNIST()

    modelname = args.name
    summary_path = os.path.join(args.summarydir, modelname)
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    
    ckpt_path = os.path.join(args.ckptdir, modelname)
    if not os.path.exists(ckpt_path):
        os.makedirs(ckpt_path)

    controller = LevelController(NUM_LAYERS, EPOCHS_PER_LEVEL)
    trainer = Trainer(summary_path, ckpt_path, callback=controller)
    trainer.train(
        stylealae,
        args.epochs,
        mnist.datasets(
            args.batch_size, padding=2, flatten=False),
        mnist.datasets(
            args.batch_size, padding=2, flatten=False, train=False),
        trainlen=len(mnist.x_train) // args.batch_size)

    return 0
Пример #2
0
def train(args):
    mnist = MNIST()
    mlpalae = MnistAlae()

    modelname = args.name
    summary_path = os.path.join(args.summarydir, modelname)
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    
    ckpt_path = os.path.join(args.ckptdir, modelname)
    if not os.path.exists(ckpt_path):
        os.makedirs(ckpt_path)

    trainer = Trainer(summary_path, ckpt_path)
    trainer.train(
        mlpalae,
        args.epochs,
        mnist.datasets(bsize=args.batch_size, flatten=True, condition=True),
        mnist.datasets(bsize=args.batch_size, flatten=True, condition=True, train=False),
        len(mnist.x_train) // args.batch_size)

    return 0