Exemple #1
0
    def test_batch_norm_full_init(self):
        T.random.seed(1234)
        orig_input = T.random.randn([35, 4])
        inputs = T.split(orig_input, sections=[7] * 5, axis=0)
        mean, var = T.calculate_mean_and_var(orig_input, axis=[0])

        layer = tk.layers.Sequential([
            tk.layers.BatchNorm(4, momentum=0.125),
        ])

        def step_fn(input):
            _ = layer(input)

        def data_generator():
            for input in inputs:
                yield (input, )

        for i in range(4):
            if i == 2:
                loop = mltk.TrainLoop()
            elif i == 3:
                loop = mltk.TestLoop()
            else:
                loop = None

            fn = lambda: tk.layers.batch_norm_full_init(
                layer, data_generator(), step_fn, loop=loop)
            tk.layers.set_train_mode(layer)

            if loop is not None:
                with loop:
                    fn()
            else:
                fn()
            tk.layers.set_eval_mode(layer)

            if T.backend_name == 'PyTorch':
                self.assertEqual(layer[0].momentum, 0.125)
                assert_allclose(layer[0].running_mean,
                                mean,
                                atol=1e-4,
                                rtol=1e-6)
            else:
                raise NotImplementedError()
Exemple #2
0
def main(exp: mltk.Experiment[Config]):
    # prepare the data
    train_stream, _, test_stream = utils.get_mnist_streams(
        batch_size=exp.config.batch_size,
        test_batch_size=exp.config.test_batch_size,
        val_batch_size=exp.config.test_batch_size,
        x_range=(-1., 1.),
    )

    tensorkit.utils.misc.print_experiment_summary(exp,
                                                  train_data=train_stream,
                                                  test_data=test_stream)

    # build the network
    net: T.Module = tk.layers.SequentialBuilder(train_stream.data_shapes[0]). \
        set_args('res_block2d',
                 kernel_size=3,
                 activation=tk.layers.LeakyReLU,
                 normalizer=tk.layers.BatchNorm2d,
                 dropout=0.5,
                 data_init=tk.init.StdDataInit()). \
        res_block2d(16). \
        res_block2d(32, stride=2). \
        res_block2d(32). \
        res_block2d(64, stride=2). \
        res_block2d(64). \
        global_avg_pool2d(). \
        linear(10). \
        log_softmax(). \
        build()
    params, param_names = tensorkit.utils.misc.get_params_and_names(net)
    tensorkit.utils.misc.print_parameters_summary(params, param_names)
    print('')
    mltk.print_with_time('Network constructed.')

    # initialize the network with first few batches of train data
    init_x, _ = train_stream.get_arrays(max_batch=exp.config.init_batch_count)
    init_x = T.as_tensor(init_x)
    _ = net(init_x)  # trigger initialization
    net = tk.layers.jit_compile(net)
    _ = net(init_x)  # trigger JIT
    mltk.print_with_time('Network initialized')

    # the train, test and validate functions
    def train_step(x, y):
        logits = net(x)
        loss = T.nn.cross_entropy_with_logits(logits, y, reduction='mean')
        return {'loss': loss}

    def eval_step(x, y):
        with tk.layers.scoped_eval_mode(net), T.no_grad():
            logits = net(x)
            acc = utils.calculate_acc(logits, y)
        return {'acc': acc}

    # build the optimizer and the train loop
    loop = mltk.TrainLoop(max_epoch=exp.config.max_epoch)
    optimizer = tk.optim.Adam(tk.layers.iter_parameters(net))
    lr_scheduler = tk.optim.lr_scheduler.AnnealingLR(
        optimizer=optimizer,
        initial_lr=exp.config.initial_lr,
        ratio=exp.config.lr_anneal_ratio,
        epochs=exp.config.lr_anneal_epochs)
    lr_scheduler.bind(loop)

    # run test after every 10 epochs
    loop.run_after_every(
        lambda: loop.test().run(eval_step, test_stream),
        epochs=10,
    )

    # train the model
    tk.layers.set_train_mode(net, True)
    utils.fit_model(loop=loop,
                    optimizer=optimizer,
                    fn=train_step,
                    stream=train_stream)

    # do the final test with the best network parameters (according to validation)
    results = mltk.TestLoop().run(eval_step, test_stream)
Exemple #3
0
def main(exp: mltk.Experiment[Config]):
    # prepare the data
    train_stream, _, test_stream = utils.get_mnist_streams(
        batch_size=exp.config.batch_size,
        test_batch_size=exp.config.test_batch_size,
        flatten=True,
        x_range=(0., 1.),
        use_y=False,
        mapper=utils.BernoulliSampler().as_mapper(),
    )

    tensorkit.utils.misc.print_experiment_summary(exp,
                                                  train_data=train_stream,
                                                  test_data=test_stream)

    # build the network
    vae: VAE = VAE(train_stream.data_shapes[0][0], exp.config)
    params, param_names = tensorkit.utils.misc.get_params_and_names(vae)
    tensorkit.utils.misc.print_parameters_summary(params, param_names)
    print('')
    mltk.print_with_time('Network constructed.')

    # initialize the network with first few batches of train data
    [init_x] = train_stream.get_arrays(max_batch=exp.config.init_batch_count)
    vae.initialize(init_x)
    mltk.print_with_time('Network initialized')

    # define the train and evaluate functions
    def train_step(x):
        chain = vae.get_chain(x)
        loss = chain.vi.training.sgvb(reduction='mean')
        return {'loss': loss}

    def eval_step(x, n_z=exp.config.test_n_z):
        with tk.layers.scoped_eval_mode(vae), T.no_grad():
            chain = vae.get_chain(x, n_z=n_z)
            loss = chain.vi.training.sgvb(reduction='mean')
            nll = -chain.vi.evaluation.is_loglikelihood(reduction='mean')
        return {'elbo': loss, 'nll': nll}

    def plot_samples(epoch=None):
        epoch = epoch or loop.epoch
        with tk.layers.scoped_eval_mode(vae), T.no_grad():
            logits = vae.p(n_z=100)['x'].distribution.logits
            images = T.reshape(
                T.cast(T.clip(T.nn.sigmoid(logits) * 255., 0., 255.),
                       dtype=T.uint8),
                [-1, 28, 28],
            )
        utils.save_images_collection(
            images=T.to_numpy(images),
            filename=exp.abspath(f'plotting/{epoch}.png'),
            grid_size=(10, 10),
        )

    # build the optimizer and the train loop
    loop = mltk.TrainLoop(max_epoch=exp.config.max_epoch)
    loop.add_callback(mltk.callbacks.StopOnNaN())
    optimizer = tk.optim.Adam(tk.layers.iter_parameters(vae))
    lr_scheduler = tk.optim.lr_scheduler.AnnealingLR(
        optimizer=optimizer,
        initial_lr=exp.config.initial_lr,
        ratio=exp.config.lr_anneal_ratio,
        epochs=exp.config.lr_anneal_epochs)
    lr_scheduler.bind(loop)
    loop.run_after_every(
        lambda: loop.test().run(partial(eval_step, n_z=10), test_stream),
        epochs=10)
    loop.run_after_every(plot_samples, epochs=10)

    # train the model
    tk.layers.set_train_mode(vae, True)
    utils.fit_model(loop=loop,
                    optimizer=optimizer,
                    fn=train_step,
                    stream=train_stream)

    # do the final test
    results = mltk.TestLoop().run(eval_step, test_stream)
    plot_samples('final')
Exemple #4
0
def main(exp: mltk.Experiment[Config]):
    # prepare the data
    train_stream, val_stream, test_stream = utils.get_mnist_streams(
        batch_size=exp.config.batch_size,
        test_batch_size=exp.config.test_batch_size,
        val_batch_size=exp.config.test_batch_size,
        val_portion=0.2,
        flatten=True,
        x_range=(-1., 1.),
    )

    tensorkit.utils.misc.print_experiment_summary(exp,
                                                  train_data=train_stream,
                                                  val_data=val_stream,
                                                  test_data=test_stream)

    # build the network
    net: T.Module = tk.layers.SequentialBuilder(784). \
        set_args('dense',
                 activation=tk.layers.LeakyReLU,
                 data_init=tk.init.StdDataInit()). \
        dense(500). \
        dense(500). \
        linear(10). \
        log_softmax(). \
        build()
    params, param_names = tensorkit.utils.misc.get_params_and_names(net)
    tensorkit.utils.misc.print_parameters_summary(params, param_names)
    print('')
    mltk.print_with_time('Network constructed.')

    # initialize the network
    init_x, _ = train_stream.get_arrays(max_batch=exp.config.init_batch_count)
    init_x = T.as_tensor(init_x)
    _ = net(init_x)  # trigger initialization
    net = tk.layers.jit_compile(net)
    _ = net(init_x)  # trigger JIT
    mltk.print_with_time('Network initialized')

    # define the train and evaluate functions
    def train_step(x, y):
        logits = net(x)
        loss = T.nn.cross_entropy_with_logits(logits, y, reduction='mean')
        return {'loss': loss}

    def eval_step(x, y):
        with tk.layers.scoped_eval_mode(net), T.no_grad():
            logits = net(x)
            acc = utils.calculate_acc(logits, y)
        return {'acc': acc}

    # build the optimizer and the train loop
    loop = mltk.TrainLoop(max_epoch=exp.config.max_epoch)
    optimizer = tk.optim.Adam(tk.layers.iter_parameters(net))
    lr_scheduler = tk.optim.lr_scheduler.AnnealingLR(
        optimizer=optimizer,
        initial_lr=exp.config.initial_lr,
        ratio=exp.config.lr_anneal_ratio,
        epochs=exp.config.lr_anneal_epochs)
    lr_scheduler.bind(loop)

    # add a callback to do early-stopping on the network parameters
    # according to the validation metric.
    loop.add_callback(
        mltk.callbacks.EarlyStopping(
            checkpoint=tk.train.Checkpoint(net=net),
            root_dir=exp.abspath('./checkpoint/early-stopping'),
            # note for `loop.validation()`, the prefix "val_" will be
            # automatically prepended to any metrics generated by the
            # `evaluate` function.
            metric_name='val_acc',
            smaller_is_better=False,
        ))

    # run validation after every 10 epochs
    if val_stream is not None:
        loop.run_after_every(
            lambda: loop.validation().run(eval_step, val_stream),
            epochs=10,
        )

    # run test after every 10 epochs
    loop.run_after_every(
        lambda: loop.test().run(eval_step, test_stream),
        epochs=10,
    )

    # train the model
    tk.layers.set_train_mode(net, True)
    utils.fit_model(loop=loop,
                    optimizer=optimizer,
                    fn=train_step,
                    stream=train_stream)

    # do the final test with the best network parameters (according to validation)
    results = mltk.TestLoop().run(eval_step, test_stream)
Exemple #5
0
    test_stream = mltk.DataStream.arrays(
            [test_x, test_y], batch_size=128). \
        to_torch_tensors().threaded(5)

    predict_stream = mltk.DataStream.arrays(
            [test_x], batch_size=128). \
        to_torch_tensors().threaded(5)

    # construct the model
    model: nn.Module = Net()

    # construct the optimizer
    optimizer = optim.Adam(model.parameters(), lr=0.001)

    # prepare for the train loop
    train_loop = mltk.TrainLoop(max_epoch=10)

    # set the model to `train` mode at the beginning of each epoch,
    # required by PyTorch
    train_loop.on_epoch_begin.do(model.train)

    def train_step(x, y):
        optimizer.zero_grad()
        out = model(x)
        loss = F.nll_loss(out, y)
        loss.backward()
        optimizer.step()

        acc = (torch.argmax(out, dim=-1) == y).to(torch.float32).mean()
        return {'loss': loss, 'acc': acc}