Ejemplo n.º 1
0
def predictAndDisplay(
):  #This is just a test function that predicts a random handwritten number from the dataset.

    X_train, Y_train, X_val, Y_val = utils.load_full_mnist()

    X_train = pre_process_images(X_train)

    index = random.randint(0, X_train.shape[0])

    image = np.array([X_train[index]])

    printable = X_train[index]

    model = pickle.load(open('model3a.sav', 'rb'))

    a = model.forward(image)

    predicted = (np.argmax(a))

    image_2d = printable[:-1].reshape(28, 28)

    label = "predicted :" + str(predicted)

    plt.imshow(image_2d)
    plt.title(label)
    plt.show()
Ejemplo n.º 2
0
def pre_process_images(X: np.ndarray):
    """
    Args:
        X: images of shape [batch size, 784] in the range (0, 255)
    Returns:
        X: images of shape [batch size, 785] normalized as described in task2a
    """
    assert X.shape[1] == 784,\
        f"X.shape[1]: {X.shape[1]}, should be 784"
    # Computing mean and std from training set
    new_X = np.zeros((X.shape[0], X.shape[1] + 1))
    X_train, _, _, _ = utils.load_full_mnist()
    mean = np.mean(X_train)
    std = np.sqrt(np.cov(X_train.flatten()))
    # Normalizing
    new_X[:, :-1] = (X - mean) / std
    new_X[:, -1] = 1.0  # Bias Trick
    return new_X
Ejemplo n.º 3
0
        accuracy_train = calculate_accuracy(X_train, Y_train, self.model)
        accuracy_val = calculate_accuracy(X_val, Y_val, self.model)
        return loss, accuracy_train, accuracy_val


if __name__ == "__main__":
    # hyperparameters DO NOT CHANGE IF NOT SPECIFIED IN ASSIGNMENT TEXT
    num_epochs = 50
    learning_rate = 0.01
    batch_size = 128
    l2_reg_lambda = 0
    shuffle_dataset = True

    # Load dataset
    X_train, Y_train, X_val, Y_val = utils.load_full_mnist()
    X_train = pre_process_images(X_train)
    X_val = pre_process_images(X_val)
    Y_train = one_hot_encode(Y_train, 10)
    Y_val = one_hot_encode(Y_val, 10)

    # ANY PARTS OF THE CODE BELOW THIS CAN BE CHANGED.

    # Intialize model
    model = SoftmaxModel(l2_reg_lambda)
    # Train model
    trainer = SoftmaxTrainer(
        model,
        learning_rate,
        batch_size,
        shuffle_dataset,
import numpy as np
import utils
from task2a import one_hot_encode, pre_process_images, SoftmaxModel, gradient_approximation_test

if __name__ == "__main__":
    # Simple test on one-hot encoding
    Y = np.zeros((1, 1), dtype=int)
    Y[0, 0] = 3
    Y = one_hot_encode(Y, 10)
    assert Y[0, 3] == 1 and Y.sum() == 1, \
        f"Expected the vector to be [0,0,0,1,0,0,0,0,0,0], but got {Y}"

    X_train, Y_train, *_ = utils.load_full_mnist(0.1)
    mean = np.mean(X_train)
    std = np.std(X_train)
    X_train = pre_process_images(X_train, mean, std)
    Y_train = one_hot_encode(Y_train, 10)
    assert X_train.shape[1] == 785,\
        f"Expected X_train to have 785 elements per image. Shape was: {X_train.shape}"

    # Modify your network here
    neurons_per_layer = [64, 64, 10]
    use_improved_sigmoid = True
    use_improved_weight_init = True
    model = SoftmaxModel(neurons_per_layer, use_improved_sigmoid,
                         use_improved_weight_init)

    # Gradient approximation check for 100 images
    X_train = X_train[:100]
    Y_train = Y_train[:100]
    for layer_idx, w in enumerate(model.ws):
Ejemplo n.º 5
0
            if global_step % num_steps_per_val == 0:
                _val_loss = 0
                val_loss[global_step] = _val_loss

                train_accuracy[global_step] = calculate_accuracy(
                    X_train, Y_train, model)
                val_accuracy[global_step] = calculate_accuracy(
                    X_val, Y_val, model)

            global_step += 1
    return model, train_loss, val_loss, train_accuracy, val_accuracy


# Load dataset
validation_percentage = 0.1
X_train, Y_train, X_val, Y_val, X_test, Y_test = utils.load_full_mnist(
    validation_percentage)


# Hyperparameters
num_epochs = 50
learning_rate = .3
batch_size = 128
l2_reg_lambda = 0.001

model, train_loss, val_loss, train_accuracy, val_accuracy = train(
    num_epochs=num_epochs,
    learning_rate=learning_rate,
    batch_size=batch_size,
    l2_reg_lambda=l2_reg_lambda)

print("Final Train Cross Entropy Loss:",