コード例 #1
0
def test_linear_xeloss_backward():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20))
    mytorch_optimizer = SGD(mytorch_mlp.parameters())
    mytorch_criterion = CrossEntropyLoss()
    test_forward_backward(mytorch_mlp, mytorch_criterion=mytorch_criterion)
    return True
コード例 #2
0
ファイル: mnist.py プロジェクト: bekiekB1/11785CMU_Coursework
def mnist(train_x, train_y, val_x, val_y):
    """Problem 3.1: Initialize objects and start training
    You won't need to call this function yourself.
    (Data is provided by autograder)
    
    Args:
        train_x (np.array): training data (55000, 784) 
        train_y (np.array): training labels (55000,) 
        val_x (np.array): validation data (5000, 784)
        val_y (np.array): validation labels (5000,)
    Returns:
        val_accuracies (list(float)): List of accuracies per validation round
                                      (num_epochs,)
    """
    # TODO: Initialize an MLP, optimizer, and criterion
    mnist_model = Sequential(Linear(784, 20), ReLU(), Linear(20, 10))
    #mnist_model = Sequential(Linear(784,20),BatchNorm1d(20),ReLU(),Linear(20,10))
    creterion = CrossEntropyLoss()
    mdl_optimizer = SGD(mnist_model.parameters(), momentum=0.9, lr=0.1)

    # TODO: Call training routine (make sure to write it below)
    val_accuracies = train(mnist_model, mdl_optimizer, creterion, train_x,
                           train_y, val_x, val_y)

    return val_accuracies
コード例 #3
0
def test_linear_adam():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20))
    mytorch_optimizer = Adam(mytorch_mlp.parameters())
    mytorch_criterion = CrossEntropyLoss()
    return test_step(mytorch_mlp,
                     mytorch_optimizer,
                     5,
                     5,
                     mytorch_criterion=mytorch_criterion)
コード例 #4
0
ファイル: test_dropout.py プロジェクト: debdasghosh/mytorch
def test_big_model_step():
    np.random.seed(11785)
    
    # run a big model
    model = Sequential(Linear(10, 15), ReLU(), Dropout(p=0.2), 
                       Linear(15, 20), ReLU(), Dropout(p=0.1))
    x, y = generate_dataset_for_mytorch_model(model, 4)
    x, y = Tensor(x), Tensor(y)
    criterion = CrossEntropyLoss()
    optimizer = Adam(model.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-08)
    
    # check output correct
    out = model(x)
    test_out = load_numpy_array('autograder/hw1_bonus_autograder/outputs/big_output.npy')

    if not assertions_all(out.data, test_out, "test_big_model_step_out", 1e-5, 1e-6):
        return False
    
    # run backward
    loss = criterion(out, y)
    loss.backward()
    
    # check params are correct (sorry this is ugly)
    assert model[0].weight.grad is not None, "Linear layer must have gradient."
    assert model[0].weight.grad.grad is None, "Final gradient tensor must not have its own gradient"
    assert model[0].weight.grad.grad_fn is None, "Final gradient tensor must not have its own grad function"
    assert model[0].weight.requires_grad, "Weight tensor must have requires_grad==True"
    assert model[0].weight.is_parameter, "Weight tensor must be marked as a parameter tensor"
    assert model[3].weight.grad is not None, "Linear layer must have gradient."
    assert model[3].weight.grad.grad is None, "Final gradient tensor must not have its own gradient"
    assert model[3].weight.grad.grad_fn is None, "Final gradient tensor must not have its own grad function"
    assert model[3].weight.requires_grad, "Weight tensor must have requires_grad==True"
    assert model[3].weight.is_parameter, "Weight tensor must be marked as a parameter tensor"
    
    # check gradient for linear layer at idx 0 is correct
    test_grad = load_numpy_array('autograder/hw1_bonus_autograder/outputs/big_grad.npy')
    if not assertions_all(model[0].weight.grad.data, test_grad, "test_big_model_grad_0", 1e-5, 1e-6):
        return False
    
    # check gradient for linear layer at idx 3 is correct
    test_grad = load_numpy_array('autograder/hw1_bonus_autograder/outputs/big_grad_3.npy')
    if not assertions_all(model[3].weight.grad.data, test_grad, "test_big_model_grad_3", 1e-5, 1e-6):
        return False

    # weight update with adam
    optimizer.step()
    
    # check updated weight values
    assert model[0].weight.requires_grad, "Weight tensor must have requires_grad==True"
    assert model[0].weight.is_parameter, "Weight tensor must be marked as a parameter tensor"
    test_weights_3 = load_numpy_array('autograder/hw1_bonus_autograder/outputs/big_weight_update_3.npy')
    test_weights_0 = load_numpy_array('autograder/hw1_bonus_autograder/outputs/big_weight_update_0.npy')
    
    return assertions_all(model[0].weight.data, test_weights_0, "test_big_weight_update_0", 1e-5, 1e-6) and \
        assertions_all(model[3].weight.data, test_weights_3, "test_big_weight_update_3", 1e-5, 1e-6)
コード例 #5
0
def test_big_linear_relu_xeloss_momentum():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), ReLU(), Linear(20, 30), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters(), momentum=0.9)
    mytorch_criterion = CrossEntropyLoss()
    test_step(mytorch_mlp,
              mytorch_optimizer,
              5,
              5,
              mytorch_criterion=mytorch_criterion)
    return True
コード例 #6
0
def test_big_linear_bn_relu_xeloss_train_eval():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), BatchNorm1d(20), ReLU(),
                             Linear(20, 30), BatchNorm1d(30), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters())
    mytorch_criterion = CrossEntropyLoss()
    test_step(mytorch_mlp,
              mytorch_optimizer,
              5,
              5,
              mytorch_criterion=mytorch_criterion)
    return True
コード例 #7
0
def test_big_model_adam():
    np.random.seed(11785)
    # mytorch_mlp = Sequential(Linear(10, 20), BatchNorm1d(20), ReLU(),
    #  Linear(20, 30), BatchNorm1d(30), ReLU())
    mytorch_mlp = Sequential(Linear(10, 20), ReLU(), Linear(20, 30), ReLU())
    mytorch_optimizer = Adam(mytorch_mlp.parameters())
    mytorch_criterion = CrossEntropyLoss()
    return test_step(mytorch_mlp,
                     mytorch_optimizer,
                     5,
                     5,
                     mytorch_criterion=mytorch_criterion)
コード例 #8
0
def test_linear_batchnorm_relu_train_eval():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), BatchNorm1d(20), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters())
    test_step(mytorch_mlp, mytorch_optimizer, 5, 5)
    return True
コード例 #9
0
def test_big_linear_relu_step():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), ReLU(), Linear(20, 30), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters())
    test_step(mytorch_mlp, mytorch_optimizer, 5, 5)
    return True
コード例 #10
0
def test_linear_momentum():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters(), momentum=0.9)
    test_step(mytorch_mlp, mytorch_optimizer, 5, 0)
    return True