예제 #1
0
def calculate_accuracy(X: np.ndarray, targets: np.ndarray, model: SoftmaxModel) -> float:
    """
    Args:
        X: images of shape [batch size, 785]
        targets: labels/targets of each image of shape: [batch size, 10]
        model: model of class SoftmaxModel
    Returns:
        Accuracy (float)
    """
    output = model.forward(X)
    predictions  = one_hot_encode(np.array([np.argmax(output, axis=1)]).T, 10)   
    correct_pred = np.count_nonzero(targets*predictions)
    total_pred   = output.shape[0]
    accuracy     = correct_pred/total_pred
    
    return accuracy
예제 #2
0
    return model, train_loss, val_loss, train_accuracy, val_accuracy


# Load dataset
validation_percentage = 0.1
X_train, Y_train, X_val, Y_val, X_test, Y_test = utils.load_full_mnist(
    validation_percentage)

# Preprocessing
X_train = pre_process_images(X_train)
X_test = pre_process_images(X_test)
X_val = pre_process_images(X_val)

#OneHotencoding of targets

Y_train = one_hot_encode(Y_train, 10)
Y_val = one_hot_encode(Y_val, 10)
Y_test = one_hot_encode(Y_test, 10)

# Hyperparameters
num_epochs = 50
learning_rate = .3
batch_size = 128
l2_reg_lambda = 0.001

model, train_loss, val_loss, train_accuracy, val_accuracy = train(
    num_epochs=num_epochs,
    learning_rate=learning_rate,
    batch_size=batch_size,
    l2_reg_lambda=l2_reg_lambda)
예제 #3
0
            global_step += 1
    return model, train_loss, val_loss, train_accuracy, val_accuracy


# Load dataset
validation_percentage = 0.1
X_train, Y_train, X_val, Y_val, X_test, Y_test = utils.load_full_mnist(
    validation_percentage)

X_train = pre_process_images(X_train)
X_val = pre_process_images(X_val)
X_test = pre_process_images(X_test)

# One-hot encode our targets
Y_train = one_hot_encode(Y_train, NUM_OUTPUTS)
Y_val = one_hot_encode(Y_val, NUM_OUTPUTS)
Y_test = one_hot_encode(Y_test, NUM_OUTPUTS)

# Hyperparameters
num_epochs = 50
learning_rate = .3
batch_size = 128

# Training
model, train_loss, val_loss, train_accuracy, val_accuracy = train(
    num_epochs=num_epochs,
    learning_rate=learning_rate,
    batch_size=batch_size,
    l2_reg_lambda=0)