def validateTitanic(): # Data X, Y = getDataValidation() # Network inputSize = 7 layerCount = 2 networkConf = nnConf.NeuralNetworkConf(inputSize, layerCount) networkConf.layerConf[0].neuronCount = 20 networkConf.layerConf[0].activationFn = "relu" networkConf.layerConf[0].weightInitializerMethod = "random" networkConf.layerConf[1].neuronCount = 1 networkConf.layerConf[1].activationFn = "sigmoid" networkConf.layerConf[1].weightInitializerMethod = "random" networkConf.Lambda = .00009 # 0.0001 networkConf.maxIter = 500 accuracyList = nnUtils.validateNN(X, Y, networkConf, 5, showLearning=False) print(accuracyList) print("Mean Accuracy: ", np.mean(accuracyList)) # Store Neural network settings and state # print(NN.getGlobalConf()) # print(networkConf) nnUtils.saveConfig(networkConf, nn.getGlobalConf()) nnUtils.restoreConfig() accuracyList = [] return (accuracyList)
def runAndMeasure(): # Data X, Y = getDataValidation() # Network inputSize = 7 layerCount = 2 netConf = nnConf.NeuralNetworkConf(inputSize, layerCount) netConf.layerConf[0].neuronCount = 20 netConf.layerConf[0].activationFn = "relu" netConf.layerConf[0].weightInitializerMethod = "random" netConf.layerConf[1].neuronCount = 1 netConf.layerConf[1].activationFn = "sigmoid" netConf.layerConf[1].weightInitializerMethod = "random" netConf.Lambda = .00009 # 0.0001 netConf.maxIter = 500 accuracyList = nnUtils.validateNN(X, Y, netConf, 5, showLearning=False) netConf.accuracy = np.mean(accuracyList) print(accuracyList) print("Mean Accuracy: ", netConf.accuracy) return (netConf, nn.getGlobalConf())
def loadAndValidate(trainX, trainY): netConf, wrightsBias = nnUtils.restoreConfig("nn.json") accuracyList = nnUtils.validateNN(trainX, trainY, netConf, 5, showLearning=False, wtReuse=True, wtAndBias=wrightsBias) print("Best accuracy on validation: ", np.mean(accuracyList))
def loadAndRun(): # Data X, Y = getDataValidation() netConf, wrightsBias = nnUtils.restoreConfig("nn.json") accuracyList = nnUtils.validateNN(X, Y, netConf, 5, showLearning=False, wtReuse=True, wtAndBias=wrightsBias) print(accuracyList) print("Mean Accuracy: ", np.mean(accuracyList))
def findAndStoreBestInitialWeights(X, Y, netConf, noOfPass): bestAccuracy = 0 n_splits = 5 for i in range(noOfPass): accuracyList = nnUtils.validateNN(X, Y, netConf, n_splits, showLearning=False) if np.mean(accuracyList) > bestAccuracy: bestAccuracy = np.mean(accuracyList) # Store Neural network settings and state print("Got Better accuracy: ", bestAccuracy) netConf.accuracy = bestAccuracy nnUtils.saveConfig(netConf, nn.getGlobalConf(), "nn.json")
def getBestMaxIter(X, Y, netConf): bestMaxIter = 0 bestAccuracy = 0 for MaxIter in range(100, 1000, 100): netConf.maxIter = MaxIter n_splits = 5 accuracyList = nnUtils.validateNN(X, Y, netConf, n_splits, showLearning=False, wtReuse=True, wtAndBias=nn.getGlobalConf()) if np.mean(accuracyList) > bestAccuracy: bestAccuracy = np.mean(accuracyList) bestMaxIter = MaxIter return (bestMaxIter, bestAccuracy)
def getBestLambda(X, Y, netConf): bestLambda = 0 bestAccuracy = 0 for Lambda in np.linspace(0.00001, 0.0001, 200): netConf.Lambda = Lambda n_splits = 5 accuracyList = nnUtils.validateNN(X, Y, netConf, n_splits, showLearning=False, wtReuse=True, wtAndBias=nn.getGlobalConf()) if np.mean(accuracyList) > bestAccuracy: bestAccuracy = np.mean(accuracyList) bestLambda = Lambda return (bestLambda, bestAccuracy)
def getBestNeuronCount(X, Y, netConf): bestNeuronCount = 0 bestAccuracy = 0 for neuronCount in range(2, 30): netConf.layerConf[0].neuronCount = neuronCount n_splits = 5 accuracyList = nnUtils.validateNN(X, Y, netConf, n_splits, showLearning=False, wtReuse=True, wtAndBias=nn.getGlobalConf()) if np.mean(accuracyList) > bestAccuracy: bestAccuracy = np.mean(accuracyList) bestNeuronCount = neuronCount return (bestNeuronCount, bestAccuracy)
if __name__ == "__main__": # 1. Parse data and get the useful features trainX, trainY = getDataValidation() # 2. Get the best initial network configuration. # * Decide and fix network depth(getNetConf). netConf = getInitialNetConf() # * Run evaluation once with randomized initial weight and store # the weight(Evaluate and store function, takes a flag for # random initialization). n_splits = 5 accuracyList = nnUtils.validateNN(trainX, trainY, netConf, n_splits, showLearning=False) print("Initial Mean Accuracy: ", np.mean(accuracyList)) # * Run evaluation varying different network parameters with # the fixed initial weights. # netConf.Lambda, accuracy = getBestLambda(trainX, trainY, netConf) # netConf.maxIter, accuracy = getBestMaxIter(trainX, trainY, netConf) netConf.Lambda = 0.0000946 netConf.maxIter = 500 print("Best Lambda: ", netConf.Lambda) print("Best MaxIter: ", netConf.maxIter) print("Best Config Mean Accuracy: ", np.mean(accuracyList)) # 3. With the best network configuration get best set of Weights.