Ejemplo n.º 1
0
def test_ComputeMetrics():
    setka.base.environment_setup()

    ds = test_dataset.CIFAR10()
    model = tiny_model.TensorNet()

    trainer = setka.base.Trainer(pipes=[
        setka.pipes.DatasetHandler(ds, batch_size=32, limits=10,
                                   shuffle=False),
        setka.pipes.ModelHandler(model),
        setka.pipes.LossHandler(loss),
        setka.pipes.OneStepOptimizers([
            setka.base.Optimizer(model,
                                 torch.optim.SGD,
                                 lr=0.1,
                                 momentum=0.9,
                                 weight_decay=5e-4)
        ]),
        setka.pipes.ComputeMetrics([loss, acc, const],
                                   divide_first=[True, False, True],
                                   steps_to_compute=2,
                                   reduce=True)
    ])

    trainer.run_train(2)
Ejemplo n.º 2
0
def test_Logger_tensor():
    loss = tensor_loss
    acc = tensor_acc

    ds = test_dataset.CIFAR10()
    model = tiny_model.TensorNet()

    def view_result(one_input, one_output):
        img = one_input[0]
        img = (img - img.min()) / (img.max() - img.min())
        truth = one_input[1]
        label = one_output[0]

        fig = plt.figure()
        plt.imshow(img.permute(2, 1, 0))
        plt.close()
        return {'figures': {'img.jpg': fig}}

    trainer = setka.base.Trainer(pipes=[
        setka.pipes.DatasetHandler(
            ds, batch_size=4, limits={
                'train': 3,
                'valid': 3,
                'test': 1
            }),
        setka.pipes.ModelHandler(model),
        setka.pipes.LossHandler(loss),
        setka.pipes.OneStepOptimizers([
            setka.base.Optimizer(model,
                                 torch.optim.SGD,
                                 lr=0.01,
                                 momentum=0.9,
                                 weight_decay=5e-4)
        ]),
        setka.pipes.ComputeMetrics([loss, acc]),
        setka.pipes.
        Logger(f=view_result, full_snapshot_path=True, name='my_experiment')
    ])

    trainer.run_train(2)
    trainer.run_epoch('test', 'test', n_iterations=2)

    assert (os.path.exists(os.path.join('runs', 'my_experiment')))
    assert (len(os.listdir(os.path.join('runs', 'my_experiment'))) > 0)
    last_run = sorted(os.listdir(os.path.join('runs', 'my_experiment')))[-1]
    assert (os.path.exists(
        os.path.join('runs', 'my_experiment', last_run, '2_figures',
                     'test_2047', 'img.jpg')))

    assert (os.path.exists(
        os.path.join('runs', 'my_experiment', last_run, 'bash_command.txt')))
    assert (os.path.exists(
        os.path.join('runs', 'my_experiment', last_run, 'epoch_log.json')))
    assert (os.path.exists(
        os.path.join('runs', 'my_experiment', last_run, 'batch_log.json')))
Ejemplo n.º 3
0
def test_ComputeMetrics3():
    setka.base.environment_setup()

    ds = test_dataset.CIFAR10()
    model = tiny_model.TensorNet()

    cyclic_lr = lambda x: torch.optim.lr_scheduler.CyclicLR(
        x, base_lr=0.0, max_lr=0.0)
    reduce_lr = lambda x: torch.optim.lr_scheduler.ReduceLROnPlateau(x)

    trainer = setka.base.Trainer(pipes=[
        setka.pipes.DatasetHandler(ds, batch_size=32, limits=9, shuffle=False),
        setka.pipes.ModelHandler(model),
        setka.pipes.LossHandler(loss),
        setka.pipes.ComputeMetrics([loss, acc, const],
                                   divide_first=True,
                                   steps_to_compute=2,
                                   reduce=[True, False, True]),
        setka.pipes.OneStepOptimizers([
            setka.base.Optimizer(
                model,
                torch.optim.SGD,
                lr=0.1,
                momentum=0.9,
                weight_decay=5e-4,
                iter_schedulers=[
                    setka.base.Scheduler(torch.optim.lr_scheduler.CyclicLR,
                                         base_lr=1.0e-3,
                                         max_lr=1.0e-1)
                ],
                epoch_schedulers=[
                    setka.base.Scheduler(
                        torch.optim.lr_scheduler.ReduceLROnPlateau,
                        monitor=lambda: trainer._metrics['valid']['tensor_acc']
                        [0] / trainer._metrics['valid']['tensor_acc'][1],
                        mode='max')
                ],
                # schedulers={
                #     'batch': [cyclic_lr],
                #     'epoch': [(reduce_lr, 'valid', 'tensor_acc', 0)
            )
        ]),
    ])

    trainer.run_train(20)
Ejemplo n.º 4
0
def test_base():
    setka.base.environment_setup()

    ds = test_dataset.CIFAR10()
    model = tiny_model.TensorNet()

    # input, target, text, random_ones = ds['train', 0]

    trainer = setka.base.Trainer(
        pipes=[
            setka.pipes.DatasetHandler(ds,
                                       batch_size=32,
                                       limits=2,
                                       shuffle=True),
            setka.pipes.ModelHandler(model),
            setka.pipes.LossHandler(loss),
            setka.pipes.OneStepOptimizers([
                setka.base.Optimizer(model,
                                     torch.optim.SGD,
                                     lr=0.1,
                                     momentum=0.9,
                                     weight_decay=5e-4)
            ]),
            setka.pipes.Pipe()
        ],
        collection_op=setka.base.CollectionOperator(soft_collate_fn=True))

    trainer.remove_pipe(setka.pipes.LossHandler)
    trainer.add_pipe(setka.pipes.LossHandler(loss))

    trainer.run_train(n_epochs=2)
    trainer.run_epoch(mode='train', subset='train', n_iterations=100)
    trainer.run_epoch(mode='valid', subset='valid', n_iterations=10)
    trainer.run_epoch(mode='test', subset='valid', n_iterations=10)

    print("=====  Training schedule =====")
    print(trainer.view_train())
    print("===== Epoch schedule =====")
    print(trainer.view_epoch())
    print("===== Batch schedule =====")
    print(trainer.view_batch())

    print("===== Trianer =====")
    print(trainer.view_pipeline())
Ejemplo n.º 5
0
def test_WeightAveraging():
    ds = test_dataset.CIFAR10()
    model = tiny_model.TensorNet()

    trainer = setka.base.Trainer(pipes=[
        setka.pipes.DatasetHandler(ds, batch_size=32, limits=5),
        setka.pipes.ModelHandler(model),
        setka.pipes.LossHandler(loss),
        setka.pipes.OneStepOptimizers([
            setka.base.Optimizer(model,
                                 torch.optim.SGD,
                                 lr=0.1,
                                 momentum=0.9,
                                 weight_decay=5e-4)
        ]),
        setka.pipes.WeightAveraging(epoch_start=0, interval=1)
    ])

    trainer.run_train(3)
Ejemplo n.º 6
0
def test_Lambda():
    ds = test_dataset.CIFAR10()
    model = tiny_model.TensorNet()

    def print_message():
        print("Message")

    trainer = setka.base.Trainer(pipes=[
        setka.pipes.DatasetHandler(ds, batch_size=32, limits=2),
        setka.pipes.ModelHandler(model),
        setka.pipes.LossHandler(loss),
        setka.pipes.OneStepOptimizers([
            setka.base.Optimizer(model,
                                 torch.optim.SGD,
                                 lr=0.1,
                                 momentum=0.9,
                                 weight_decay=5e-4)
        ]),
        setka.pipes.Lambda(on_batch_begin=print_message)
    ])

    trainer.run_train(2)
Ejemplo n.º 7
0
def test_ProgressBar():
    ds = test_dataset.CIFAR10()
    model = tiny_model.TensorNet()

    trainer = setka.base.Trainer(pipes=[
        setka.pipes.DatasetHandler(ds, batch_size=32, limits=2),
        setka.pipes.ModelHandler(model),
        setka.pipes.LossHandler(loss),
        setka.pipes.OneStepOptimizers([
            setka.base.Optimizer(model,
                                 torch.optim.SGD,
                                 lr=0.1,
                                 momentum=0.9,
                                 weight_decay=5e-4)
        ]),
        setka.pipes.ProgressBar()
    ])

    trainer.status['Test'] = [
        'This is just a very-very long string that helps in testing the package. This string has no delimiters so it will be split in the middle of the string'
    ]
    trainer.status['Test2'] = [1.0e-12, 1.0e12, 1, 100000000000000000, 1000]
    trainer.run_train(1)
Ejemplo n.º 8
0
def test_SaveResult_tensor():
    ds = test_dataset.CIFAR10()
    model = tiny_model.TensorNet()

    def f(input, output):
        return input, output

    trainer = setka.base.Trainer(pipes=[
        setka.pipes.DatasetHandler(ds, batch_size=32, limits=2),
        setka.pipes.ModelHandler(model),
        setka.pipes.LossHandler(tensor_loss),
        setka.pipes.OneStepOptimizers([
            setka.base.Optimizer(model,
                                 torch.optim.SGD,
                                 lr=0.1,
                                 momentum=0.9,
                                 weight_decay=5e-4)
        ]),
        setka.pipes.SaveResult(f=f)
    ])

    trainer.run_train(1)
    trainer.run_epoch('test', 'test', n_iterations=2)
Ejemplo n.º 9
0
import torch

import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             '..'))
import tiny_model
import test_dataset

import matplotlib.pyplot as plt

from test_metrics import tensor_loss as loss
from test_metrics import tensor_acc as acc

ds = test_dataset.CIFAR10()
model = tiny_model.TensorNet()


def view_result(one_input, one_output):
    # print("In view result")
    img = one_input[0]
    img = (img - img.min()) / (img.max() - img.min())
    truth = one_input[1]
    label = one_output[0]

    # print(img.size())

    fig = plt.figure()
    plt.imshow(img.permute(2, 1, 0))
    plt.close()
    return {'figures': {'img': fig}}
Ejemplo n.º 10
0
def test_MakeCheckpoints():

    ds = test_dataset.CIFAR10()
    model = tiny_model.TensorNet()

    trainer = setka.base.Trainer(pipes=[
        setka.pipes.DatasetHandler(ds, batch_size=32, limits=2),
        setka.pipes.ModelHandler(model),
        setka.pipes.LossHandler(loss),
        setka.pipes.OneStepOptimizers([
            setka.base.Optimizer(model,
                                 torch.optim.SGD,
                                 lr=0.01,
                                 momentum=0.9,
                                 weight_decay=5e-4)
        ]),
        setka.pipes.ComputeMetrics([loss, acc]),
        setka.pipes.Checkpointer('tensor_acc',
                                 max_mode=True,
                                 name='my_experiment',
                                 keep_best_only=False),
        setka.pipes.Checkpointer(
            'tensor_loss', max_mode=False, name='my_experiment_best_only')
    ])

    trainer.run_train(5)
    trainer.run_epoch('test', 'test', n_iterations=2)

    path = os.path.join('runs', 'my_experiment')
    last_exp = sorted(os.listdir(path))[-1]

    assert (len(
        os.listdir(
            os.path.join('runs', 'my_experiment', last_exp,
                         'checkpoints'))) == 8 * 2)
    assert (os.path.exists(
        os.path.join('runs', 'my_experiment', last_exp, 'checkpoints',
                     'my_experiment_best.pth.tar')))
    assert (os.path.exists(
        os.path.join('runs', 'my_experiment', last_exp, 'checkpoints',
                     'my_experiment_latest.pth.tar')))
    assert (os.path.exists(
        os.path.join('runs', 'my_experiment', last_exp, 'checkpoints',
                     'my_experiment_weights_best.pth.tar')))
    assert (os.path.exists(
        os.path.join('runs', 'my_experiment', last_exp, 'checkpoints',
                     'my_experiment_weights_latest.pth.tar')))

    for index in range(6):
        assert (os.path.exists(
            os.path.join('runs', 'my_experiment', last_exp, 'checkpoints',
                         'my_experiment_weights_' + str(index) + '.pth.tar')))
        assert (os.path.exists(
            os.path.join('runs', 'my_experiment', last_exp, 'checkpoints',
                         'my_experiment_' + str(index) + '.pth.tar')))

    # path = os.path.join('runs', 'my_experiment_best_only')
    # last_exp = sorted(os.listdir(path))[-1]
    assert (len(
        os.listdir(
            os.path.join('runs', 'my_experiment_best_only', last_exp,
                         'checkpoints'))) == 4)
    assert (os.path.exists(
        os.path.join('runs', 'my_experiment_best_only', last_exp,
                     'checkpoints', 'my_experiment_best_only_best.pth.tar')))
    assert (os.path.exists(
        os.path.join('runs', 'my_experiment_best_only', last_exp,
                     'checkpoints', 'my_experiment_best_only_latest.pth.tar')))
    assert (os.path.exists(
        os.path.join('runs', 'my_experiment_best_only', last_exp,
                     'checkpoints',
                     'my_experiment_best_only_weights_best.pth.tar')))
    assert (os.path.exists(
        os.path.join('runs', 'my_experiment_best_only', last_exp,
                     'checkpoints',
                     'my_experiment_best_only_weights_latest.pth.tar')))

    latest_weights = torch.load(
        os.path.join('runs', 'my_experiment_best_only', last_exp,
                     'checkpoints',
                     'my_experiment_best_only_weights_latest.pth.tar'))

    latest_weights = latest_weights

    latest_trainer = torch.load(
        os.path.join('runs', 'my_experiment_best_only', last_exp,
                     'checkpoints', 'my_experiment_best_only_latest.pth.tar'))
    latest_trainer = latest_trainer['trainer']

    assert (latest_trainer._model.state_dict().__str__() ==
            trainer._model.state_dict().__str__())
    assert (latest_weights.__str__() == trainer._model.state_dict().__str__())