コード例 #1
0
ファイル: test_mlp.py プロジェクト: loevlie/MyTorch
def test_big_linear_relu_forward():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), ReLU(), Linear(20, 30), ReLU())
    test_forward(mytorch_mlp)
    return True
コード例 #2
0
ファイル: test_mlp.py プロジェクト: loevlie/MyTorch
def test_linear_momentum():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters(), momentum=0.9)
    test_step(mytorch_mlp, mytorch_optimizer, 5, 0)
    return True
コード例 #3
0
ファイル: test_mlp.py プロジェクト: loevlie/MyTorch
def test_linear_backward():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20))
    test_forward_backward(mytorch_mlp)
    return True
コード例 #4
0
ファイル: test_mlp.py プロジェクト: loevlie/MyTorch
def test_big_linear_batchnorm_relu_train_eval():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), BatchNorm1d(20), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters())
    test_step(mytorch_mlp, mytorch_optimizer, 5, 5)
    return True
コード例 #5
0
def test_linear_batchnorm_relu_backward_train():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), BatchNorm1d(20), ReLU())
    test_forward_backward(mytorch_mlp)
    return True
コード例 #6
0
def test_big_linear_relu_step():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), ReLU(), Linear(20, 30), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters())
    test_step(mytorch_mlp, mytorch_optimizer, 5, 5)
    return True
コード例 #7
0
def test_linear_forward():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20))
    test_forward(mytorch_mlp)
    check_model_param_settings(mytorch_mlp)
    return True
コード例 #8
0
def test_big_model_step():
    np.random.seed(11785)

    # run a big model
    model = Sequential(Linear(10, 15), ReLU(), Dropout(p=0.2), Linear(15, 20),
                       ReLU(), Dropout(p=0.1))
    x, y = generate_dataset_for_mytorch_model(model, 4)
    x, y = Tensor(x), Tensor(y)
    criterion = CrossEntropyLoss()
    optimizer = Adam(model.parameters(),
                     lr=1e-3,
                     betas=(0.9, 0.999),
                     eps=1e-08)

    # check output correct
    out = model(x)
    test_out = load_numpy_array(
        'autograder/hw1_bonus_autograder/outputs/big_output.npy')

    if not assertions_all(out.data, test_out, "test_big_model_step_out", 1e-5,
                          1e-6):
        return False

    # run backward
    loss = criterion(out, y)
    loss.backward()

    # check params are correct (sorry this is ugly)
    assert model[0].weight.grad is not None, "Linear layer must have gradient."
    assert model[
        0].weight.grad.grad is None, "Final gradient tensor must not have its own gradient"
    assert model[
        0].weight.grad.grad_fn is None, "Final gradient tensor must not have its own grad function"
    assert model[
        0].weight.requires_grad, "Weight tensor must have requires_grad==True"
    assert model[
        0].weight.is_parameter, "Weight tensor must be marked as a parameter tensor"
    assert model[3].weight.grad is not None, "Linear layer must have gradient."
    assert model[
        3].weight.grad.grad is None, "Final gradient tensor must not have its own gradient"
    assert model[
        3].weight.grad.grad_fn is None, "Final gradient tensor must not have its own grad function"
    assert model[
        3].weight.requires_grad, "Weight tensor must have requires_grad==True"
    assert model[
        3].weight.is_parameter, "Weight tensor must be marked as a parameter tensor"

    # check gradient for linear layer at idx 0 is correct
    test_grad = load_numpy_array(
        'autograder/hw1_bonus_autograder/outputs/big_grad.npy')
    if not assertions_all(model[0].weight.grad.data, test_grad,
                          "test_big_model_grad_0", 1e-5, 1e-6):
        return False

    # check gradient for linear layer at idx 3 is correct
    test_grad = load_numpy_array(
        'autograder/hw1_bonus_autograder/outputs/big_grad_3.npy')
    if not assertions_all(model[3].weight.grad.data, test_grad,
                          "test_big_model_grad_3", 1e-5, 1e-6):
        return False

    # weight update with adam
    optimizer.step()

    # check updated weight values
    assert model[
        0].weight.requires_grad, "Weight tensor must have requires_grad==True"
    assert model[
        0].weight.is_parameter, "Weight tensor must be marked as a parameter tensor"

    test_weights_3 = load_numpy_array(
        'autograder/hw1_bonus_autograder/outputs/big_weight_update_3.npy')
    test_weights_0 = load_numpy_array(
        'autograder/hw1_bonus_autograder/outputs/big_weight_update_0.npy')

    return assertions_all(model[0].weight.data, test_weights_0, "test_big_weight_update_0", 1e-5, 1e-6) and \
        assertions_all(model[3].weight.data, test_weights_3, "test_big_weight_update_3", 1e-5, 1e-6)