Ejemplo n.º 1
0
def main():

    nn = NeuralNetwork(config.numLayers, config.numClasses, config.weightInitialisation, config.activationFn, config.weightDecay)
    nn.initialiseParams(len(x_train[0])*len(x_train[0]), config.numNeurons)

    sample = np.random.randint(3*len(x_train)/4)
    nn.forwardPropagate(x_train[sample])
    nn.momentumGradDesc(x_train, y_train, config.maxIterations, config.learningRate, config.batchSize, config.gamma)

    predictions = []
    predProbs = []
    test_acc = 0
    test_entropy = 0
    test_mse = 0
    for i in range(len(x_test)):
        nn.forwardPropagate(x_test[i])
        predictions.append(nn.predictedClass)
        predProbs.append(nn.output[nn.predictedClass])


    test_acc = accuracy(y_test,predictions)
    test_entropy = crossEntropyLoss(y_test,predProbs)
    test_mse = MSEloss(y_test,predictions)

    confusion_matrix = np.zeros((config.numClasses, config.numClasses))
    for i in range(len(y_test)):
        confusion_matrix[predictions[i]][y_test[i]] += 1
    
    df_cm = pd.DataFrame(confusion_matrix, index = [i for i in "0123456789"], columns = [i for i in "0123456789"])
    plt.figure(figsize = (10,10))
    sn.heatmap(df_cm, annot=True)
    plt.title("Confusion Matrix")
    plt.xlabel("y_test")
    plt.ylabel("y_pred")
    wandb.log({"plot":wandb.Image(plt)})
    plt.show()
    # #Log in wandb
    metrics = {
        'test_acc': test_acc, 
        # 'test_entropy': test_entropy,
        "test_mse": test_mse, 
        # "confusion_matrix": confusion_matrix,
    }
    wandb.log(metrics)
    run.finish()
Ejemplo n.º 2
0
def main():

    visualiseTrainingExamples()

    nn = NeuralNetwork(config.numLayers, config.numClasses,
                       config.weightInitialisation, config.activationFn,
                       config.weightDecay)
    nn.initialiseParams(len(x_train[0]) * len(x_train[0]), config.numNeurons)

    sample = np.random.randint(3 * len(x_train) / 4)
    nn.forwardPropagate(x_train[sample])
    if config.optimizer == "sgd":
        nn.stochasticGradDesc(x_train, y_train, config.maxIterations,
                              config.learningRate, config.batchSize)
    elif config.optimizer == "momentum":
        nn.momentumGradDesc(x_train, y_train, config.maxIterations,
                            config.learningRate, config.batchSize,
                            config.gamma)
    elif config.optimizer == "nesterov":
        nn.nesterovAcceleratedGradDesc(x_train, y_train, config.maxIterations,
                                       config.learningRate, config.batchSize,
                                       config.gamma)
    elif config.optimizer == "rmsprop":
        nn.rmsprop(x_train, y_train, config.maxIterations, config.learningRate,
                   config.batchSize, config.eps, config.beta1)
    elif config.optimizer == "adam":
        nn.adam(x_train, y_train, config.maxIterations, config.learningRate,
                config.batchSize, config.eps, config.beta1, config.beta2)
    elif config.optimizer == "nadam":
        nn.nadam(x_train, y_train, config.maxIterations, config.learningRate,
                 config.batchSize, config.eps, config.beta1, config.beta2)
    else:
        print("No such optimizer available.")

    predictions = []
    predProbs = []
    test_acc = 0
    test_entropy = 0
    test_mse = 0
    for i in range(len(x_test)):
        nn.forwardPropagate(x_test[i])
        predictions.append(nn.predictedClass)
        predProbs.append(nn.output[nn.predictedClass])

    test_acc = accuracy(y_test, predictions)
    test_entropy = crossEntropyLoss(y_test, predProbs)
    test_mse = MSEloss(y_test, predictions)

    predictions = []
    predProbs = []
    valid_acc = 0
    valid_entropy = 0
    valid_mse = 0
    for i in range(len(x_valid)):
        nn.forwardPropagate(x_valid[i])
        predictions.append(nn.predictedClass)
        predProbs.append(nn.output[nn.predictedClass])

    valid_acc = accuracy(y_valid, predictions)
    valid_entropy = crossEntropyLoss(y_valid, predProbs)
    valid_mse = MSEloss(y_valid, predictions)

    print(
        f"Test Set:\nAccuracy = {test_acc}\nLoss = {test_entropy}\nMSE = {test_mse}"
    )
    print(
        f"Validation Set:\nAccuracy = {valid_acc}\nLoss = {valid_entropy}\nMSE = {valid_mse}"
    )

    # #Log in wandb
    metrics = {
        'test_acc': test_acc,
        # 'test_entropy': test_entropy,
        "test_mse": test_mse,
        'valid_acc': valid_acc,
        # 'valid_entropy': valid_entropy,
        "valid_mse": valid_mse
    }
    wandb.log(metrics)
    run.finish()