def test_linear_xeloss_backward():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20))
    mytorch_optimizer = SGD(mytorch_mlp.parameters())
    mytorch_criterion = CrossEntropyLoss()
    test_forward_backward(mytorch_mlp, mytorch_criterion=mytorch_criterion)
    return True
def mnist(train_x, train_y, val_x, val_y):
    """Problem 3.1: Initialize objects and start training
    You won't need to call this function yourself.
    (Data is provided by autograder)
    
    Args:
        train_x (np.array): training data (55000, 784) 
        train_y (np.array): training labels (55000,) 
        val_x (np.array): validation data (5000, 784)
        val_y (np.array): validation labels (5000,)
    Returns:
        val_accuracies (list(float)): List of accuracies per validation round
                                      (num_epochs,)
    """
    # TODO: Initialize an MLP, optimizer, and criterion
    mnist_model = Sequential(Linear(784, 20), ReLU(), Linear(20, 10))
    #mnist_model = Sequential(Linear(784,20),BatchNorm1d(20),ReLU(),Linear(20,10))
    creterion = CrossEntropyLoss()
    mdl_optimizer = SGD(mnist_model.parameters(), momentum=0.9, lr=0.1)

    # TODO: Call training routine (make sure to write it below)
    val_accuracies = train(mnist_model, mdl_optimizer, creterion, train_x,
                           train_y, val_x, val_y)

    return val_accuracies
Exemple #3
0
def mnist(train_x, train_y, val_x, val_y):
    """Problem 3.1: Initialize objects and start training
    You won't need to call this function yourself.
    (Data is provided by autograder)
    
    Args:
        train_x (np.array): training data (55000, 784) 
        train_y (np.array): training labels (55000,) 
        val_x (np.array): validation data (5000, 784)
        val_y (np.array): validation labels (5000,)
    Returns:
        val_accuracies (list(float)): List of accuracies per validation round
                                      (num_epochs,)
    """
    # TODO: Initialize an MLP, optimizer, and criterion
    model = sequential.Sequential(linear.Linear(784, 20), activations.ReLU(),
                                  linear.Linear(20, 10))
    optimizer = SGD(model.parameters(), lr=0.1)
    criterion = loss.CrossEntropyLoss()
    # TODO: Call training routine (make sure to write it below)
    val_accuracies = train(model, optimizer, criterion, tensor.Tensor(train_x),
                           tensor.Tensor(train_y), tensor.Tensor(val_x),
                           tensor.Tensor(val_y))

    return val_accuracies
def test_big_linear_relu_xeloss_momentum():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), ReLU(), Linear(20, 30), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters(), momentum=0.9)
    mytorch_criterion = CrossEntropyLoss()
    test_step(mytorch_mlp,
              mytorch_optimizer,
              5,
              5,
              mytorch_criterion=mytorch_criterion)
    return True
def test_big_linear_bn_relu_xeloss_train_eval():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), BatchNorm1d(20), ReLU(),
                             Linear(20, 30), BatchNorm1d(30), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters())
    mytorch_criterion = CrossEntropyLoss()
    test_step(mytorch_mlp,
              mytorch_optimizer,
              5,
              5,
              mytorch_criterion=mytorch_criterion)
    return True
Exemple #6
0
def test_cnn_step():
    np.random.seed(11785)

    mytorch_cnn = CNN()
    mytorch_optimizer = SGD(mytorch_cnn.parameters())

    pytorch_cnn = nn.Sequential(nn.Conv1d(24, 56, 5, 1), nn.Tanh(),
                                nn.Conv1d(56, 28, 6, 2), nn.ReLU(),
                                nn.Conv1d(28, 14, 2, 2), nn.Sigmoid(),
                                nn.Flatten(), nn.Linear(13 * 14, 10))

    assert len(pytorch_cnn) == len(
        mytorch_cnn.layers.layers
    ), "Check number of modules in model; must be same as reference."

    # check that all layers are the same
    for idx, layer in enumerate(pytorch_cnn):
        if isinstance(layer, nn.Conv1d):
            assert isinstance(
                mytorch_cnn.layers.layers[idx],
                Conv1d), "Incorrect layer type at index " + str(idx)
        if isinstance(layer, nn.Linear):
            assert isinstance(
                mytorch_cnn.layers.layers[idx],
                Linear), "Incorrect layer type at index " + str(idx)
        if isinstance(layer, nn.ReLU):
            assert isinstance(
                mytorch_cnn.layers.layers[idx],
                ReLU), "Incorrect layer type at index " + str(idx)
        if isinstance(layer, nn.Sigmoid):
            assert isinstance(
                mytorch_cnn.layers.layers[idx],
                Sigmoid), "Incorrect layer type at index " + str(idx)
        if isinstance(layer, nn.Flatten):
            assert isinstance(
                mytorch_cnn.layers.layers[idx],
                Flatten), "Incorrect layer type at index " + str(idx)
        if isinstance(layer, nn.Tanh):
            assert isinstance(
                mytorch_cnn.layers.layers[idx],
                Tanh), "Incorrect layer type at index " + str(idx)

        # Copy over weight data
        if isinstance(layer, nn.Conv1d) or isinstance(layer, nn.Linear):
            l = mytorch_cnn.layers.layers[idx]
            layer.weight = nn.Parameter(torch.tensor(l.weight.data))
            layer.bias = nn.Parameter(torch.tensor(l.bias.data))

    return test_step(mytorch_cnn.layers,
                     mytorch_optimizer,
                     5,
                     pytorch_model=pytorch_cnn)
def test_linear_batchnorm_relu_train_eval():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), BatchNorm1d(20), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters())
    test_step(mytorch_mlp, mytorch_optimizer, 5, 5)
    return True
def test_big_linear_relu_step():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), ReLU(), Linear(20, 30), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters())
    test_step(mytorch_mlp, mytorch_optimizer, 5, 5)
    return True
def test_linear_momentum():
    np.random.seed(11785)
    mytorch_mlp = Sequential(Linear(10, 20), ReLU())
    mytorch_optimizer = SGD(mytorch_mlp.parameters(), momentum=0.9)
    test_step(mytorch_mlp, mytorch_optimizer, 5, 0)
    return True