def test_model_train():
    model = Model(n_input=1, n_output=30, n_channel=32)
    inputs = Variable(torch.randn(4, 32, 32))
    targets = Variable(torch.randint(0, 30, (4, )))
    assert_vars_change(model=model,
                       loss_fn=F.nll_loss,
                       optim=torch.optim.Adam(model.parameters()),
                       batch=[inputs, targets],
                       device='cpu')
Exemplo n.º 2
0
def test_vars_change(model, loss, batch):
    """

    :param model:
    :param loss:
    :param batch:
    """
    assert_vars_change(model, loss, torch.optim.Adam(model.parameters()),
                       batch)
Exemplo n.º 3
0
def test_variables_change(model, inputs, outputs):
    try:
        assert_vars_change(model=model,
                           loss_fn=nn.MSELoss(),
                           optim=torch.optim.SGD(net.parameters(), lr=0.0001),
                           batch=[inputs, outputs],
                           device="cpu")
        print("SUCCESS: variables changed")
    except Exception as e:
        print("FAILED: ", e)
Exemplo n.º 4
0
def train_fashion_mnist(model, batch_size, lr, momentum, num_epochs,
                        optimization):
    """
    A function that trains a neural network with certain hyper-parameters.

    :param model: model's object (FashionConvNet in our case).
    :param batch_size: batch size of data to train on.
    :param lr: the learning rate for the optimizer algorithm.
    :param momentum: the momentum scalar for the optimizer algorithm (in case of SGD).
    :param num_epochs: number of epochs to run over all data points.
    :param optimization: optimization algorithm object.
    :return: Cross-Entropy loss.
    """

    model = model.to(device=torch.device("cpu"))  # run on cpu
    loader_train, loader_val, _ = load_FashionMNIST(
        batch_size=batch_size, ROOT='./data')  # load data.

    if optimization == SGD:
        optimizer = optimization(model.parameters(), lr=lr, momentum=momentum)
    else:
        optimizer = optimization(model.parameters(), lr=lr)

    model.train()  # turn model to training mode.
    loss = None
    for e in range(num_epochs):
        for batch_idx, (X_tr, y_tr) in enumerate(loader_train):

            X_tr = X_tr.to(device=torch.device("cpu"), dtype=torch.float32)
            y_tr = y_tr.to(device=torch.device("cpu"), dtype=torch.long)

            # Assert variables change during the training process
            assert_vars_change(model=model,
                               loss_fn=F.cross_entropy,
                               optim=optimizer,
                               batch=[X_tr, y_tr],
                               device="cpu")

            scores = model(X_tr)
            loss = F.cross_entropy(scores, y_tr)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            print_every = 10
            if batch_idx % print_every == 0:
                print('Iteration %d, loss = %.4f' % (batch_idx, loss.item()))
                check_accuracy(loader_val, model)

    return loss
Exemplo n.º 5
0
    def test_all_variables_train(self):
        in_features=10
        out_features=1
        inputs = Variable(torch.randn(20, in_features))
        targets = Variable(torch.randn(20, out_features))
        batch = [inputs, targets]
        model = MLP(in_features=in_features, out_features=out_features, n_hidden=32, num_layers=5)

        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        assert_vars_change(
            model=model,
            loss_fn=F.mse_loss,
            optim=torch.optim.Adam(model.parameters()),
            batch=batch,
            device=device)
Exemplo n.º 6
0
 def test_all_variables_train(self):
     in_features=1
     out_features=1
     inputs = Variable(torch.randn(100, in_features, 20))
     targets = Variable(torch.randn(100, out_features, 20))
     batch = [inputs, targets]
     model = TCN(
         in_channels=in_features,
         channels=[10,out_features],
         kernel_size=2,
         )
     device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
     assert_vars_change(
         model=model,
         loss_fn=F.mse_loss,
         optim=torch.optim.Adam(model.parameters()),
         batch=batch,
         device=device)
Exemplo n.º 7
0
def test_variables_change_siren(rand_inputs, rand_targets):
    """Basic test just to make sure the graident is making it to all parts
    of the model.
    """

    batch = [rand_inputs, rand_targets]
    model = SIREN()

    # print("Our list of parameters", [np[0] for np in model.named_parameters()])

    # do they change after a training step?
    #  let's run a train step and see
    assert_vars_change(
        model=model,
        loss_fn=F.mse_loss,
        optim=torch.optim.Adam(model.parameters()),
        batch=batch,
        device="cuda:0",
    )
Exemplo n.º 8
0
def test_variables_change_fast_siren(rand_inputs, rand_targets):
    """Basic test just to make sure the graident is making it to all parts
    of the model.
    """

    coords = torch.randn(3, 100, 2)
    rgb_vals = torch.randn(3, 100, 3)
    imgs = torch.randn(3, 3, 224, 224)

    batch = (coords, imgs), rgb_vals

    model = HyperSIRENPTL()

    # print("Our list of parameters", [np[0] for np in model.named_parameters()])

    # do they change after a training step?
    #  let's run a train step and see
    assert_vars_change(
        model=model,
        loss_fn=F.mse_loss,
        optim=torch.optim.Adam(model.parameters()),
        batch=batch,
        device="cuda:0",
    )
Exemplo n.º 9
0
import torch
from torch.autograd import Variable
from torchtest import assert_vars_change
import torch.nn.functional as F

import rl


state_size = 4
action_size = 2
batch_size = 20

states = Variable(torch.randn(batch_size, state_size))
q_values = Variable(torch.randn(batch_size, action_size))
batch = [states, q_values]
model = rl.DQNDense(state_size, action_size).to('cuda')

assert_vars_change(
    model=model,
    loss_fn=F.smooth_l1_loss,
    optim=torch.optim.Adam(model.parameters()),
    batch=batch)
Exemplo n.º 10
0
"""
[1] Variables Change

"""
inputs = Variable(torch.randn(20, 20))
targets = Variable(torch.randint(0, 2, (20, ))).long()
batch = [inputs, targets]
model = nn.Linear(20, 2)

# what are the variables?
print('Our list of parameters', [np[0] for np in model.named_parameters()])

# do they change after a training step?
#  let's run a train step and see
tt.assert_vars_change(model=model,
                      loss_fn=F.cross_entropy,
                      optim=torch.optim.Adam(model.parameters()),
                      batch=batch)

# let's try to break this, so the test fails
params_to_train = [
    np[1] for np in model.named_parameters() if np[0] is not 'bias'
]
# run test now
""" FAILURE
tt.assert_vars_change(
    model=model, 
    loss_fn=F.cross_entropy, 
    optim=torch.optim.Adam(params_to_train),
    batch=batch)
"""
Exemplo n.º 11
0
from torch.autograd import Variable
from torchtest import assert_vars_change
import torch.nn.functional as F
import torch
import torch.optim as optim
from FCC2 import FCC2

inputs = Variable(torch.randn(784))
targets = Variable(torch.randint(1, 10, (1, ))).long()
batch = [inputs, targets]
model = FCC2()

assert_vars_change(model=model,
                   loss_fn=F.nll_loss,
                   optim=optim.SGD(model.parameters(), lr=1e-4),
                   batch=batch,
                   device='cpu')
Exemplo n.º 12
0
"""
[1] Variables Change
"""
inputs = Variable(torch.randn(20, 20))
targets = Variable(torch.randint(0, 2, (20,))).long()
batch = [inputs, targets]
model = nn.Linear(20, 2)

# what are the variables?
print('Our list of parameters', [ np[0] for np in model.named_parameters() ])

# do they change after a training step?
#  let's run a train step and see
tt.assert_vars_change(
    model=model, 
    loss_fn=F.cross_entropy, 
    optim=torch.optim.Adam(model.parameters()),
    batch=batch,
    device=device)

# let's try to break this, so the test fails
params_to_train = [ np[1] for np in model.named_parameters() if np[0] is not 'bias' ]
# run test now
#""" FAILURE
tt.assert_vars_change(
    model=model, 
    loss_fn=F.cross_entropy, 
    optim=torch.optim.Adam(params_to_train),
    batch=batch,
    device=device)
#"""
Exemplo n.º 13
0
    if args.resume:
        # Load checkpoint.
        print('==> Resuming from checkpoint..')
        assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
        checkpoint = torch.load('./checkpoint/ckpt.pth')
        net.load_state_dict(checkpoint['net'])
        best_acc = checkpoint['acc']
        start_epoch = checkpoint['epoch']

    if args.test:
        # testing model
        print('==> Testing model and train process...')

        torchtest.assert_vars_change(
            model=net,
            loss_fn=criterion,
            optim=optimizer,
            batch=test_ds,
            device=device)

        torchtest.test_suite(
            model=net,
            loss_fn=criterion,
            optim=optimizer,
            batch=test_ds,
            device=device,
            test_nan_vals=True,
            test_vars_change=True,
            # non_train_vars=None,
            test_inf_vals=True
        )