Exemplo n.º 1
0
    def ExecuteSweep(sweepSpec, optSpec, plotString, plot=False):
        evaluationRunSpecifications = []
        for sweepValue in sweepSpec[optSpec]:
            runSpecification = {}
            runSpecification['optimizing'] = optSpec
            runSpecification['maxDepth'] = sweepSpec['maxDepth']
            runSpecification[optSpec] = sweepValue
            evaluationRunSpecifications.append(runSpecification)

        ## if you want to run in parallel you need to install joblib as described in the lecture notes and adjust the comments on the next three lines...
        from joblib import Parallel, delayed
        evaluations = Parallel(n_jobs=4)(
            delayed(ExecuteEvaluationRun)(runSpec, xTrain, yTrain, folds)
            for runSpec in evaluationRunSpecifications)

        if plot:
            xValues = sweepSpec[optSpec]
            series1 = [
                runSpec['crossValidationMean'] for runSpec in evaluations
            ]
            errorBarsForSeries1 = [
                runSpec['crossValidationErrorBound'] for runSpec in evaluations
            ]
            if folds > 1:
                chartName = "Cross Validation Accuracy"
                fileName = "CrossValidationAccuracy"
            else:
                chartName = "Validation Accuracy"
                fileName = "ValidationAccuracy"
            if useNumeric:
                Charting.PlotSeriesWithErrorBars(
                    [series1], [errorBarsForSeries1], [chartName],
                    xValues,
                    chartTitle=plotString + " " + chartName +
                    " (w/ Numeric Features)",
                    xAxisTitle=plotString + " Values",
                    yAxisTitle=chartName,
                    yBotLimit=0.7,
                    outputDirectory=kOutputDirectory,
                    fileName="2-" + plotString + fileName + "Numeric")
            else:
                Charting.PlotSeriesWithErrorBars(
                    [series1], [errorBarsForSeries1], [chartName],
                    xValues,
                    chartTitle=plotString + " " + chartName,
                    xAxisTitle=plotString + " Values",
                    yAxisTitle=chartName,
                    yBotLimit=0.7,
                    outputDirectory=kOutputDirectory,
                    fileName="2-" + plotString + fileName)
        return evaluations
Exemplo n.º 2
0
def ExecuteSweep(sweepSpec, optSpec, plotString, plot=False):
    evaluationRunSpecifications = []
    for sweepValue in sweepSpec[optSpec]:
        runSpecification = {}
        runSpecification['optimizing'] = optSpec
        runSpecification['stepSize'] = sweepSpec['stepSize']
        runSpecification['convergence'] = sweepSpec['convergence']
        runSpecification[optSpec] = sweepValue
        evaluationRunSpecifications.append(runSpecification)

    ## if you want to run in parallel you need to install joblib as described in the lecture notes and adjust the comments on the next three lines...
    from joblib import Parallel, delayed
    evaluations = Parallel(n_jobs=4)(
        delayed(ExecuteEvaluationRun)(runSpec, xTrain, yTrain)
        for runSpec in evaluationRunSpecifications)

    if plot:
        xValues = sweepSpec[optSpec]
        series1 = [runSpec['crossValidationMean'] for runSpec in evaluations]
        errorBarsForSeries1 = [
            runSpec['crossValidationErrorBound'] for runSpec in evaluations
        ]
        # series2 = [ runSpec['runtime'] for runSpec in evaluations ]
        Charting.PlotSeriesWithErrorBars(
            [series1], [errorBarsForSeries1], ["Cross Validation Accuracy"],
            xValues,
            chartTitle=plotString + " Cross Validation Accuracy",
            xAxisTitle=plotString + " Values",
            yAxisTitle="Cross Valid. Accuracy",
            yBotLimit=0.7,
            outputDirectory=kOutputDirectory,
            fileName="1-" + plotString + "CrossValidationAccuracy")
        #Charting.PlotSeries([series2], ['runtime'], xValues, chartTitle="Cross Validation Runtime", xAxisTitle=plotString + " Values", yAxisTitle="Run Time", outputDirectory=kOutputDirectory, fileName="1-"+plotString+"CrossValidationRunTime")
    return evaluations
Exemplo n.º 3
0
    seriesFPRs = []
    seriesFNRs = []
    seriesLabels = []

    model = DecisionTree.DecisionTree()
    model.fit(xTrainNumeric, yTrain, maxDepth=BestDepthNumeric)

    (modelFPRs, modelFNRs,
     thresholds) = TabulateModelPerformanceForROC(model, xTestNumeric, yTest)
    seriesFPRs.append(modelFPRs)
    seriesFNRs.append(modelFNRs)
    seriesLabels.append('best numeric model')

    model = DecisionTree.DecisionTree()
    model.fit(xTrain, yTrain, maxDepth=BestDepth)

    (modelFPRs, modelFNRs,
     thresholds) = TabulateModelPerformanceForROC(model, xTest, yTest)
    seriesFPRs.append(modelFPRs)
    seriesFNRs.append(modelFNRs)
    seriesLabels.append('best non-numeric model')

    Charting.PlotROCs(seriesFPRs,
                      seriesFNRs,
                      seriesLabels,
                      useLines=True,
                      chartTitle="ROC Comparison Features",
                      xAxisTitle="False Negative Rate",
                      yAxisTitle="False Positive Rate",
                      outputDirectory=kOutputDirectory,
                      fileName="2-DecistionTreeROC")
##
# Visualize Training run
##

kOutputDirectory = "C:\\temp\\visualize"

import MachineLearningCourse.MLUtilities.Visualizations.Charting as Charting

xValues = [i + 1 for i in range(len(trainLosses))]

Charting.PlotSeries([trainLosses, validationLosses],
                    ["Train Loss", "Validate Loss"],
                    xValues,
                    useMarkers=False,
                    chartTitle="Pytorch First Modeling Run",
                    xAxisTitle="Epoch",
                    yAxisTitle="Loss",
                    yBotLimit=0.0,
                    outputDirectory=kOutputDirectory,
                    fileName="PyTorch-Initial-TrainValidate")

##
# Evaluate the Model
##

import MachineLearningCourse.MLUtilities.Evaluations.EvaluateBinaryClassification as EvaluateBinaryClassification
import MachineLearningCourse.MLUtilities.Evaluations.ErrorBounds as ErrorBounds

model.train(mode=False)
yTestPredicted = model(xTest)
Exemplo n.º 5
0
    converg_series = []

    # log_convert = {0.1: 0, 0.01: 1, 0.001: 2, 0.0001: 3, 0.00001: 4}

    for evaluation in evaluations:
        if evaluation["optimizing"] == "convergence":
            converg_error_series.append(evaluation['50PercentBound'])
            converg_valid_series.append(evaluation['accuracy'])
            converg_series.append(evaluation['convergence'])
        elif evaluation["optimizing"] == "learning_rate":
            learning_error_series.append(evaluation['50PercentBound'])
            learning_valid_series.append(evaluation['accuracy'])
            learning_series.append(evaluation['learning_rate'])
        

    Charting.PlotSeriesWithErrorBars([converg_valid_series], [converg_error_series], ["Accuracy"], [converg_series], chartTitle="<NN Accuracy on Validation Data>", xAxisTitle="<converg>", yAxisTitle="<Accuracy>", yBotLimit=0.65, outputDirectory=kOutputDirectory, fileName="converg_sweep")
    Charting.PlotSeriesWithErrorBars([learning_valid_series], [learning_error_series], ["Accuracy"], [learning_series], chartTitle="<NN Accuracy on Validation Data>", xAxisTitle="<learning>", yAxisTitle="<Accuracy>", yBotLimit=0.65, outputDirectory=kOutputDirectory, fileName="learning_sweep")


roccompare = False
if roccompare:
    
    # A helper function for calculating FN rate and FP rate across a range of thresholds
    def TabulateModelPerformanceForROC(model, xValidate, yValidate):
        pointsToEvaluate = 100
        thresholds = [ x / float(pointsToEvaluate) for x in range(pointsToEvaluate + 1)]
        FPRs = []
        FNRs = []

        try:
            for threshold in thresholds:
        'convergence': newBestParameters['convergence'],
        'numFrequentWords': newBestParameters['numFrequentWords'],
        'numMutualInformationWords':
        newBestParameters['numMutualInformationWords']
    }
    result = ExecuteEvaluationRun(bestParameters, xTrainRaw, yTrain, 1)
    validationSetAccuracy.append(result['accuracy'])
    validationSetAccuracyError.append(result['accuracyErrorBound'])

# Plot Validation Accuracy
lastSweep = sweep[len(sweep) - 1]
Charting.PlotSeriesWithErrorBars([validationSetAccuracy],
                                 [validationSetAccuracyError], ["Accuracy"],
                                 sweep + [lastSweep + 1],
                                 chartTitle="Validation Set Accuracy",
                                 xAxisTitle="Sweep #",
                                 yAxisTitle="Validation Set Accuracy",
                                 yBotLimit=0.8,
                                 outputDirectory=kOutputDirectory,
                                 fileName="7-ValidationSetAccuracy")
print("BestParameters: ", bestParameters)

# ROC of initial vs. best
seriesFPRs = []
seriesFNRs = []
seriesLabels = []
init = {}
init['stepSize'] = 1.0
init['convergence'] = 0.005
init['numFrequentWords'] = 0
init['numMutualInformationWords'] = 20
Exemplo n.º 7
0
accuracies = []
errorBarsAccuracy = []
for kv in kValues:
    model = BoostedTree.BoostedTree()
    model.fit(xTrain, yTrain, maxDepth=maxDepth, k=kv)
    accuracy = EvaluateBinaryClassification.Accuracy(yTest, model.predict(xTest))
    lower, upper = ErrorBounds.GetAccuracyBounds(accuracy, len(yTest), .5)
    print(kv, ": ", accuracy)
    accuracies.append(accuracy)
    errorBarsAccuracy.append(accuracy-lower)
    if bestModel is None:
        bestModel = (model, upper)
    elif lower > bestModel[1]:
        bestModel = (model, upper)

Charting.PlotSeriesWithErrorBars([accuracies], [errorBarsAccuracy], ["k-round tuning accuracy"], kValues, chartTitle="Line/Circle Concept Accuracy", xAxisTitle="Boosting Rounds", yAxisTitle="Test Accuracy", yBotLimit=0.5, outputDirectory=kOutputDirectory, fileName="4-BoostingTreeRoundTuning")

## you can use this to visualize what your model is learning.
accuracy = EvaluateBinaryClassification.Accuracy(yTest, bestModel[0].predict(xTest))
lower, upper = ErrorBounds.GetAccuracyBounds(accuracy, len(yTest), .95)
print("accuracy: ", lower, "-", upper)
visualize = Visualize2D.Visualize2D(kOutputDirectory, "4-My Boosted Tree")
visualize.PlotBinaryConcept(model)

# Or you can use it to visualize individual models that you learened, e.g.:
# visualize.PlotBinaryConcept(model->modelLearnedInRound[2])
    
## you might like to see the training or test data too, so you might prefer this to simply calling 'PlotBinaryConcept'
#visualize.Plot2DDataAndBinaryConcept(xTrain,yTrain,model)

# And remember to save
Exemplo n.º 8
0
xTrain      = featurizer.Featurize(xTrainRaw)
xValidate   = featurizer.Featurize(xValidateRaw)
xTest       = featurizer.Featurize(xTestRaw)

model = LogisticRegression.LogisticRegression()
model.fit(xTrain,yTrain,convergence=convergence, stepSize=stepSize)

(modelFPRs, modelFNRs, thresholds) = TabulateModelPerformanceForROC(model, xValidate, yValidate)
seriesFPRs.append(modelFPRs)
seriesFNRs.append(modelFNRs)
seriesLabels.append('25 Frequent')

#### Learn a model with 25 features by mutual information
featurizer = SMSSpamFeaturize.SMSSpamFeaturize(useHandCraftedFeatures=False)
featurizer.CreateVocabulary(xTrainRaw, yTrain, numMutualInformationWords = 25)

xTrain      = featurizer.Featurize(xTrainRaw)
xValidate   = featurizer.Featurize(xValidateRaw)
xTest       = featurizer.Featurize(xTestRaw)

model = LogisticRegression.LogisticRegression()
model.fit(xTrain,yTrain,convergence=convergence, stepSize=stepSize)

(modelFPRs, modelFNRs, thresholds) = TabulateModelPerformanceForROC(model, xValidate, yValidate)
seriesFPRs.append(modelFPRs)
seriesFNRs.append(modelFNRs)
seriesLabels.append('25 Mutual Information')

Charting.PlotROCs(seriesFPRs, seriesFNRs, seriesLabels, useLines=True, chartTitle="ROC Comparison", xAxisTitle="False Negative Rate", yAxisTitle="False Positive Rate", outputDirectory=kOutputDirectory, fileName="Plot-SMSSpamROCs")
        logisticRegressionModel.fit(xTrain,
                                    yTrain,
                                    stepSize=1.0,
                                    convergence=0.001)
        trainLosses.append(logisticRegressionModel.loss(xTrain, yTrain))
        validationLosses.append(
            logisticRegressionModel.loss(xValidate, yValidate))

    import MachineLearningCourse.MLUtilities.Visualizations.Charting as Charting
    # trainLosses, validationLosses, and lossXLabels are parallel arrays with the losses you want to plot at the specified x coordinates
    Charting.PlotSeries(
        [trainLosses, validationLosses], ['Train', 'Validate'],
        lossXLabels,
        chartTitle="Num Frequent Words Logistic Regression",
        xAxisTitle="Num Frequent Words",
        yAxisTitle="Avg. Loss",
        outputDirectory=kOutputDirectory,
        fileName=
        "4-Logistic Regression Num Frequent Words Train vs Validate loss")

    trainLosses = []
    validationLosses = []
    lossXLabels = [1, 10, 20, 30, 40, 50]
    for freq in lossXLabels:
        # Now get into model training
        import MachineLearningCourse.MLUtilities.Learners.LogisticRegression as LogisticRegression

        # Remember to create a new featurizer object/vocabulary for each part of the assignment
        featurizer = SMSSpamFeaturize.SMSSpamFeaturize(
            useHandCraftedFeatures=False)
Exemplo n.º 10
0
    for i in discountRateRange:
        results = trainModel(discountRate=i,
                             actionProbabilityBase=1.25,
                             randomActionRate=0.1,
                             learningRateScale=0.01,
                             binsPerDimension=8)
        acc = np.mean(results)
        err = np.std(results)
        accResults.append(acc)
        accError.append(err)

    Charting.PlotSeriesWithErrorBars(
        [accResults], [accError], ["CartPole Score (Mean) 20 trials"],
        discountRateRange,
        useMarkers=False,
        chartTitle="CartPole Discount Rate Effect on Learning",
        xAxisTitle="Discount Rate",
        yAxisTitle="Score",
        yBotLimit=20,
        outputDirectory=kOutputDirectory,
        fileName="1-DiscountRate")

runActionProbability = False
if runActionProbability:
    accResults = []
    accError = []
    for i in actionProbabilityBaseRange:
        results = trainModel(discountRate=0.75,
                             actionProbabilityBase=i,
                             randomActionRate=0.1,
                             learningRateScale=0.01,
                             binsPerDimension=8)
Exemplo n.º 11
0
        trainingAccuracy = EvaluateBinaryClassification.Accuracy(yTrain, model.predict(xTrain))
        lowerTrain, upperTrain = ErrorBounds.GetAccuracyBounds(trainingAccuracy, len(yTrain), .5)

        validationAccuracies.append(validationAccuracy)
        validationAccuracyErrorBounds.append(validationAccuracy-lower)
        trainingAccuracies.append(trainingAccuracy)
        trainingAccuracyErrorBounds.append(trainingAccuracy-lowerTrain)

        print("k: ", kv, " accuracy: ", lower, "-", upper)
        if bestModelBT is None:
            bestModelBT = (model, lower, upper, kv)
        elif lower > bestModelBT[2]:
            bestModelBT = (model, lower, upper, kv)

    print("boosted tree - k-rounds: ", bestModelBT[3], " accuracy: ", bestModelBT[1], "-", bestModelBT[2])
    Charting.PlotSeriesWithErrorBars([validationAccuracies, trainingAccuracies], [validationAccuracyErrorBounds, trainingAccuracyErrorBounds], ["BT-validation", "BT-training"], kValues, chartTitle="Boosted Decision Tree k-Round Search", xAxisTitle="Boosting Rounds", yAxisTitle="Accuracy", yBotLimit=0.5, outputDirectory=kOutputDirectory, fileName=filename)

tuneDepth = False
if tuneDepth:
    
    featurizer = BlinkFeaturize.BlinkFeaturize()

    #featurizer.CreateFeatureSet(xTrainRaw, yTrain, includeEdgeFeatures=True)
    #filename = "1-BlinkBoostedTreeEdgeFilterFeaturesOnly-MaxDepth" 
    featurizer.CreateFeatureSet(xTrainRaw, yTrain, includeEdgeFeatures=True, includeEdgeFeaturesMax=True)
    filename = "1-BlinkBoostedTreeEdgeFilterMaxAndAvg-MaxDepth"

    xTrain    = featurizer.Featurize(xTrainRaw)
    xValidate = featurizer.Featurize(xValidateRaw)
    xTest     = featurizer.Featurize(xTestRaw)
Exemplo n.º 12
0
        validationAccuracyErrorBounds.append(validationAccuracy - lower)
        trainingAccuracies.append(trainingAccuracy)
        trainingAccuracyErrorBounds.append(trainingAccuracy - lowerTrain)

        print("stepSize: ", stepSize, " accuracy: ", lower, "-", upper)
        if bestModelLG is None:
            bestModelLG = (model, lower, upper, stepSize)
        elif lower > bestModelLG[2]:
            bestModelLG = (model, lower, upper, stepSize)

    Charting.PlotSeriesWithErrorBars(
        [validationAccuracies, trainingAccuracies],
        [validationAccuracyErrorBounds, trainingAccuracyErrorBounds],
        ["LR-validation", "LR-training"],
        stepSizes,
        chartTitle="Logistic Regression Step Size Search",
        xAxisTitle="Step Size",
        yAxisTitle="Accuracy",
        yBotLimit=0.5,
        outputDirectory=kOutputDirectory,
        fileName="5-LogisticRegressionStepSize")

bestModelDT = None
runDecisionTree = True
if runDecisionTree:
    import MachineLearningCourse.MLUtilities.Learners.DecisionTree as DecisionTree
    maxDepths = [1, 10, 50, 100, 500]
    validationAccuracies = []
    validationAccuracyErrorBounds = []
    trainingAccuracies = []
    trainingAccuracyErrorBounds = []
Exemplo n.º 13
0
def trainModel(m, op, maxE, patience=10, saveChartName=""):
    startTime = time.time()

    lossFunction = torch.nn.BCELoss(reduction='mean')

    trainLosses = []
    validationLosses = []

    converged = False
    epoch = 1
    lastValidationLoss = None

    currPatience = 0
    while not converged and epoch < maxE:
        # Reset the gradients in the network to zero
        op.zero_grad()
        #for batchXTensor, batchYTensor in trainDataSetGenerator:
        #    x = batchXTensor.to(device)
        #    y = batchYTensor.to(device)
        #
        # Do the forward pass
        #    yPredicted = m(x)

        # Compute the total loss summed across training samples in the epoch
        #  note this is different from our implementation, which took one step
        #  of gradient descent per sample.
        #    trainLoss = lossFunction(yPredicted, y)

        # Backprop the errors from the loss on this iteration
        #    trainLoss.backward()
        yTrainPredicted = m(xTrain)
        trainLoss = lossFunction(yTrainPredicted, yTrain)
        trainLoss.backward()
        # Do a weight update step
        op.step()

        # now check the validation loss
        m.train(mode=False)
        #validationLossTotal = 0
        #for batchXTensor, batchYTensor in validationDataSetGenerator:
        #    x = batchXTensor.to(device)
        #    y = batchYTensor.to(device)
        #    yPredicted = m(x)
        #    validationLoss = lossFunction(yPredicted, y)
        #    validationLossTotal += validationLoss.item()
        #validationLoss = validationLossTotal / len(validationDataSet)
        #validationLosses.append(validationLoss)
        yValidationPredicted = m(xValidate)
        validationLoss = lossFunction(yValidationPredicted, yValidate)

        #trainingLossTotal = 0
        #for batchXTensor, batchYTensor in trainDataSetGenerator:
        #    x = batchXTensor.to(device)
        #    y = batchYTensor.to(device)
        #    yPredicted = m(x)
        #    trainLoss = lossFunction(yPredicted, y)
        #    trainingLossTotal += trainLoss.item()
        #trainLosses.append(trainingLossTotal / len(trainingDataSet))

        #print("epoch %d: training loss {}, validation loss {}".format(epoch, trainLosses[-1], validationLoss))
        #if lastValidationLoss is not None and validationLoss > lastValidationLoss and saveChartName == "":
        #    converged = True
        #else:
        #    lastValidationLoss = validationLoss
        yTrainingPredicted = m(xTrain)
        trainLoss = lossFunction(yTrainingPredicted, yTrain)
        trainLosses.append(trainLoss.item() / len(yTrain))
        validationLosses.append(validationLoss.item() / len(yValidate))
        print("epoch {}: training loss {}, validation loss {}".format(
            epoch, trainLosses[-1], validationLosses[-1]))
        if lastValidationLoss is not None and validationLoss > lastValidationLoss:
            if currPatience < patience:
                currPatience += 1
            else:
                converged = True
        else:
            lastValidationLoss = validationLoss
            currPatience = 0
        epoch = epoch + 1
        m.train(mode=True)

    endTime = time.time()
    print("Runtime: %s" % (endTime - startTime))

    ##
    # Visualize Training run
    ##
    if saveChartName != "":
        xValues = [i + 1 for i in range(len(trainLosses))]
        Charting.PlotSeries([trainLosses, validationLosses],
                            ["Train Loss", "Validate Loss"],
                            xValues,
                            useMarkers=False,
                            chartTitle="Blink LeNet Model Loss/Epoch",
                            xAxisTitle="Epoch",
                            yAxisTitle="Loss",
                            yBotLimit=0.0,
                            outputDirectory=kOutputDirectory,
                            fileName="4-" + saveChartName)

    ##
    # Get the model accuracy on validation set
    ##
    model.train(mode=False)
    #yValidatePredicted = []
    #for batchXTensor, batchYTensor in validationDataSetGenerator:
    #        x = batchXTensor.to(device)
    #        y = batchYTensor.to(device)
    #        yPredicted = m(x)
    #        yValidatePredicted += yPredicted.tolist()
    yValidatePredicted = m(xValidate)
    return EvaluateBinaryClassification.Accuracy(
        yValidate, [1 if pred > 0.5 else 0 for pred in yValidatePredicted])
                                           yTrain,
                                           maxSteps=0,
                                           stepSize=1.0,
                                           convergence=0.0001)
    import MachineLearningCourse.MLUtilities.Visualizations.Charting as Charting
    lossXLabels = [0]
    trainLosses = [logisticRegressionModel.loss(xTrain, yTrain)]
    validationLosses = [logisticRegressionModel.loss(xValidate, yValidate)]

    while not logisticRegressionModel.converged:
        # do 100 iterations of training
        logisticRegressionModel.incrementalFit(xTrain,
                                               yTrain,
                                               maxSteps=100,
                                               stepSize=1.0,
                                               convergence=0.0001)

        lossXLabels.append(logisticRegressionModel.totalGradientDescentSteps)
        trainLosses.append(logisticRegressionModel.loss(xTrain, yTrain))
        validationLosses.append(
            logisticRegressionModel.loss(xValidate, yValidate))

    # trainLosses, validationLosses, and lossXLabels are parallel arrays with the losses you want to plot at the specified x coordinates
    Charting.PlotSeries(
        [trainLosses, validationLosses], ['Train', 'Validate'],
        lossXLabels,
        chartTitle="Logistic Regression",
        xAxisTitle="Gradient Descent Steps",
        yAxisTitle="Avg. Loss",
        outputDirectory=kOutputDirectory,
        fileName="3-Logistic Regression Train vs Validate loss")
                             convergence=convergence,
                             momentum=momentum)
        if (i + 1) % 100 == 0:
            for filterNumber in range(hiddenStructure[0]):
                ## update the first parameter based on your representation
                #VisualizeWeights([model.weight0[0][filterNumber]] + list(model.layers[0][filterNumber][:]), "%s/filters/epoch%d_neuron%d.jpg" % (kOutputDirectory, i+1, filterNumber), sampleStride=sampleStride)
                VisualizeWeights([model.weight0[0][filterNumber]] +
                                 list(model.layers[0][filterNumber][:]),
                                 "%s/filters2/epoch%d_neuron%d.jpg" %
                                 (kOutputDirectory, i + 1, filterNumber),
                                 sampleStride=sampleStride)
    tLoss = model.loss(xTrain, yTrain)
    vLoss = model.loss(xValidate, yValidate)
    trainingLosses.append(tLoss)
    validationLosses.append(vLoss)

import MachineLearningCourse.MLUtilities.Visualizations.Charting as Charting
#Charting.PlotSeries([trainingLosses, validationLosses], ["training loss", "validation loss"], list(range(maxEpochs)), chartTitle="Single Layer Loss", xAxisTitle="epochs", yAxisTitle="loss", outputDirectory=kOutputDirectory+"/visualize\\", fileName="2-SingleLayerModelLoss")
Charting.PlotSeries([trainingLosses, validationLosses],
                    ["training loss", "validation loss"],
                    list(range(maxEpochs)),
                    chartTitle="Two Layer Loss",
                    xAxisTitle="epochs",
                    yAxisTitle="loss",
                    outputDirectory=kOutputDirectory + "/visualize\\",
                    fileName="2-TwoLayerModelLoss")

# Evaluate things...
accuracy = EvaluateBinaryClassification.Accuracy(yValidate,
                                                 model.predict(xValidate))
print("Model Accuracy is:", accuracy)