def Net(dataReader, num_input, num_hidden, num_output, params): net = NeuralNet(params) fc1 = FcLayer(num_input, num_hidden, params) net.add_layer(fc1, "fc1") relu1 = ActivatorLayer(Relu()) net.add_layer(relu1, "relu1") fc2 = FcLayer(num_hidden, num_hidden, params) net.add_layer(fc2, "fc2") relu2 = ActivatorLayer(Relu()) net.add_layer(relu2, "relu2") fc3 = FcLayer(num_hidden, num_hidden, params) net.add_layer(fc3, "fc3") relu3 = ActivatorLayer(Relu()) net.add_layer(relu3, "relu3") fc4 = FcLayer(num_hidden, num_hidden, params) net.add_layer(fc4, "fc4") relu4 = ActivatorLayer(Relu()) net.add_layer(relu4, "relu4") fc5 = FcLayer(num_hidden, num_output, params) net.add_layer(fc5, "fc5") softmax = ActivatorLayer(Softmax()) net.add_layer(softmax, "softmax") net.train(dataReader, checkpoint=1, need_test=False) net.ShowLossHistory()
def model(): dataReader = LoadData() num_input = dataReader.num_feature num_hidden1 = 8 num_output = 3 max_epoch = 5000 batch_size = 10 learning_rate = 0.1 eps = 0.06 params = CParameters(learning_rate, max_epoch, batch_size, eps, LossFunctionName.CrossEntropy3, InitialMethod.Xavier, OptimizerName.SGD) net = NeuralNet(params, "chinabank") fc1 = FcLayer(num_input, num_hidden1, params) net.add_layer(fc1, "fc1") r1 = ActivatorLayer(Relu()) net.add_layer(r1, "Relu1") fc2 = FcLayer(num_hidden1, num_output, params) net.add_layer(fc2, "fc2") softmax1 = ClassificationLayer(Softmax()) net.add_layer(softmax1, "softmax1") net.train(dataReader, checkpoint=10, need_test=True) net.ShowLossHistory() ShowResult(net, params.toString()) ShowData(dataReader)
def DropoutNet(num_input, num_hidden1, num_hidden2, num_output, params): net = NeuralNet(params) fc1 = FcLayer(num_input, num_hidden1, params) net.add_layer(fc1, "fc1") sigmoid = ActivatorLayer(Relu()) net.add_layer(sigmoid, "sigmoid") fc2 = FcLayer(num_hidden1, num_hidden2, params) net.add_layer(fc2, "fc2") tanh = ActivatorLayer(Relu()) net.add_layer(tanh, "tanh") dp1 = DropoutLayer(num_hidden2, 0.5) net.add_layer(dp1, "dp1") fc3 = FcLayer(num_hidden2, num_output, params) net.add_layer(fc3, "fc3") dp2 = DropoutLayer(num_output, 0.5) net.add_layer(dp2, "dp2") softmax = ActivatorLayer(Softmax()) net.add_layer(softmax, "softmax") net.train(dataReader, checkpoint=10, need_test=True) net.ShowLossHistory(0, None, 0, 1)
def DropoutNet(dataReader, num_input, num_hidden, num_output, params): net = NeuralNet(params) fc1 = FcLayer(784, 128, params) net.add_layer(fc1, "fc1") relu1 = ActivatorLayer(Relu()) net.add_layer(relu1, "relu1") drop1 = DropoutLayer(128, 0.3) net.add_layer(drop1, "dp1") fc2 = FcLayer(128, 32, params) net.add_layer(fc2, "fc2") relu2 = ActivatorLayer(Relu()) net.add_layer(relu2, "relu2") drop2 = DropoutLayer(32, 0.5) net.add_layer(drop2, "dp2") fc2 = FcLayer(32, 16, params) net.add_layer(fc2, "fc2") relu2 = ActivatorLayer(Relu()) net.add_layer(relu2, "relu2") drop2 = DropoutLayer(16, 0.5) net.add_layer(drop2, "dp2") fc5 = FcLayer(16, 10, params) net.add_layer(fc5, "fc5") softmax = ActivatorLayer(Softmax()) net.add_layer(softmax, "softmax") net.train(dataReader, checkpoint=1) net.ShowLossHistory()
def model(): dataReader = LoadData() num_input = 1 num_hidden1 = 4 num_output = 1 max_epoch = 10000 batch_size = 10 learning_rate = 0.5 eps = 0.001 params = CParameters(learning_rate, max_epoch, batch_size, eps, LossFunctionName.MSE, InitialMethod.Xavier, OptimizerName.SGD) net = NeuralNet(params, "Level1_CurveFittingNet") fc1 = FcLayer(num_input, num_hidden1, params) net.add_layer(fc1, "fc1") sigmoid1 = ActivatorLayer(Sigmoid()) net.add_layer(sigmoid1, "sigmoid1") fc2 = FcLayer(num_hidden1, num_output, params) net.add_layer(fc2, "fc2") #net.load_parameters() #ShowResult(net, dataReader, params.toString()) #ShowResult2(net, dataReader) net.train(dataReader, checkpoint=100, need_test=True) net.ShowLossHistory() #ShowResult(net, dataReader, params.toString()) ShowResult(net, dataReader)
def model(): dr = LoadData() num_input = dr.num_feature num_hidden1 = 64 num_hidden2 = 64 num_hidden3 = 32 num_hidden4 = 16 num_output = 1 max_epoch = 100 batch_size = 16 learning_rate = 0.1 eps = 1e-3 params = HyperParameters(learning_rate, max_epoch, batch_size, eps, net_type=NetType.BinaryClassifier, init_method=InitialMethod.Xavier) net = NeuralNet(params, "Income") fc1 = FcLayer(num_input, num_hidden1, params) net.add_layer(fc1, "fc1") a1 = ActivatorLayer(Relu()) net.add_layer(a1, "relu1") fc2 = FcLayer(num_hidden1, num_hidden2, params) net.add_layer(fc2, "fc2") a2 = ActivatorLayer(Relu()) net.add_layer(a2, "relu2") fc3 = FcLayer(num_hidden2, num_hidden3, params) net.add_layer(fc3, "fc3") a3 = ActivatorLayer(Relu()) net.add_layer(a3, "relu3") fc4 = FcLayer(num_hidden3, num_hidden4, params) net.add_layer(fc4, "fc4") a4 = ActivatorLayer(Relu()) net.add_layer(a4, "relu4") fc5 = FcLayer(num_hidden4, num_output, params) net.add_layer(fc5, "fc5") logistic = ClassificationLayer(Logistic()) net.add_layer(logistic, "logistic") #net.load_parameters() net.train(dr, checkpoint=10, need_test=True) net.ShowLossHistory("epoch")
def model2(): dr = LoadData() num_input = dr.num_feature num_hidden1 = 64 num_hidden2 = 64 num_hidden3 = 32 num_hidden4 = 16 num_output = 1 max_epoch = 1000 batch_size = 16 learning_rate = 0.01 eps = 0.001 params = CParameters(learning_rate, max_epoch, batch_size, eps, LossFunctionName.CrossEntropy2, InitialMethod.Xavier, OptimizerName.Adam) net = NeuralNet(params, "Income") fc1 = FcLayer(num_input, num_hidden1, params) net.add_layer(fc1, "fc1") a1 = ActivatorLayer(Relu()) net.add_layer(a1, "relu1") fc2 = FcLayer(num_hidden1, num_hidden2, params) net.add_layer(fc2, "fc2") a2 = ActivatorLayer(Relu()) net.add_layer(a2, "relu2") fc3 = FcLayer(num_hidden2, num_hidden3, params) net.add_layer(fc3, "fc3") a3 = ActivatorLayer(Relu()) net.add_layer(a3, "relu3") fc4 = FcLayer(num_hidden3, num_hidden4, params) net.add_layer(fc4, "fc4") a4 = ActivatorLayer(Relu()) net.add_layer(a4, "relu4") fc5 = FcLayer(num_hidden4, num_output, params) net.add_layer(fc5, "fc5") sigmoid5 = ClassificationLayer(Sigmoid()) net.add_layer(sigmoid5, "sigmoid5") #net.load_parameters() net.train(dr, checkpoint=10, need_test=True) net.ShowLossHistory()
def model_sigmoid(num_input, num_hidden, num_output, hp): net = NeuralNet(hp, "chinabank_sigmoid") fc1 = FcLayer(num_input, num_hidden, hp) net.add_layer(fc1, "fc1") s1 = ActivatorLayer(Sigmoid()) net.add_layer(s1, "Sigmoid1") fc2 = FcLayer(num_hidden, num_output, hp) net.add_layer(fc2, "fc2") softmax1 = ClassificationLayer(Softmax()) net.add_layer(softmax1, "softmax1") net.train(dataReader, checkpoint=50, need_test=True) net.ShowLossHistory("epoch") ShowResult(net, hp.toString()) ShowData(dataReader)
def DropoutNet(dataReader, num_input, num_hidden, num_output, params): net = NeuralNet(params) fc1 = FcLayer(num_input, num_hidden, params) net.add_layer(fc1, "fc1") relu1 = ActivatorLayer(Relu()) net.add_layer(relu1, "relu1") drop1 = DropoutLayer(num_hidden, 0.1) net.add_layer(drop1, "dp1") fc2 = FcLayer(num_hidden, num_hidden, params) net.add_layer(fc2, "fc2") relu2 = ActivatorLayer(Relu()) net.add_layer(relu2, "relu2") drop2 = DropoutLayer(num_hidden, 0.3) net.add_layer(drop2, "dp2") fc3 = FcLayer(num_hidden, num_hidden, params) net.add_layer(fc3, "fc3") relu3 = ActivatorLayer(Relu()) net.add_layer(relu3, "relu3") drop3 = DropoutLayer(num_hidden, 0.3) net.add_layer(drop1, "dp3") fc4 = FcLayer(num_hidden, num_hidden, params) net.add_layer(fc4, "fc4") relu4 = ActivatorLayer(Relu()) net.add_layer(relu4, "relu4") drop4 = DropoutLayer(num_hidden, 0.3) net.add_layer(drop4, "dp4") fc5 = FcLayer(num_hidden, num_output, params) net.add_layer(fc5, "fc5") softmax = ActivatorLayer(Softmax()) net.add_layer(softmax, "softmax") net.train(dataReader, checkpoint=1) net.ShowLossHistory()
def OverFitNet(num_input, num_hidden1, num_hidden2, num_output, params): net = NeuralNet(params) fc1 = FcLayer(num_input, num_hidden1, params) net.add_layer(fc1, "fc1") sigmoid = ActivatorLayer(Relu()) net.add_layer(sigmoid, "sigmoid") fc2 = FcLayer(num_hidden1, num_hidden2, params) net.add_layer(fc2, "fc2") tanh = ActivatorLayer(Relu()) net.add_layer(tanh, "tanh") fc3 = FcLayer(num_hidden2, num_output, params) net.add_layer(fc3, "fc3") softmax = ActivatorLayer(Softmax()) net.add_layer(softmax, "softmax") net.train(dataReader, checkpoint=10) net.ShowLossHistory(0, None, 0, 1)
num_hidden1 = 16 num_hidden2 = 4 num_output = 1 max_epoch = 1000 batch_size = 100 learning_rate = 0.1 eps = 0.001 params = CParameters(learning_rate, max_epoch, batch_size, eps, LossFunctionName.MSE, InitialMethod.MSRA, OptimizerName.Momentum) net = NeuralNet(params, "PM25") fc1 = FcLayer(num_input, num_hidden1, params) net.add_layer(fc1, "fc1") sigmoid1 = ActivatorLayer(Relu()) net.add_layer(sigmoid1, "sigmoid1") fc2 = FcLayer(num_hidden1, num_hidden2, params) net.add_layer(fc2, "fc2") sigmoid2 = ActivatorLayer(Relu()) net.add_layer(sigmoid2, "sigmoid2") fc3 = FcLayer(num_hidden2, num_output, params) net.add_layer(fc3, "fc3") net.train(dr, checkpoint=10, need_test=True) net.ShowLossHistory()
def model(): dr = LoadData() num_input = dr.num_feature num_hidden1 = 32 num_hidden2 = 16 num_hidden3 = 8 num_hidden4 = 4 num_output = 1 max_epoch = 1000 batch_size = 16 learning_rate = 0.01 eps = 1e-6 params = HyperParameters(learning_rate, max_epoch, batch_size, eps, net_type=NetType.Fitting, init_method=InitialMethod.Xavier) net = NeuralNet(params, "HouseSingle") fc1 = FcLayer(num_input, num_hidden1, params) net.add_layer(fc1, "fc1") r1 = ActivatorLayer(Relu()) net.add_layer(r1, "r1") fc2 = FcLayer(num_hidden1, num_hidden2, params) net.add_layer(fc2, "fc2") r2 = ActivatorLayer(Relu()) net.add_layer(r2, "r2") fc3 = FcLayer(num_hidden2, num_hidden3, params) net.add_layer(fc3, "fc3") r3 = ActivatorLayer(Relu()) net.add_layer(r3, "r3") fc4 = FcLayer(num_hidden3, num_hidden4, params) net.add_layer(fc4, "fc4") r4 = ActivatorLayer(Relu()) net.add_layer(r4, "r4") fc5 = FcLayer(num_hidden4, num_output, params) net.add_layer(fc5, "fc5") #ShowResult(net, dr) net.load_parameters() #Inference(net, dr) #exit() #ShowResult(net, dr) net.train(dr, checkpoint=10, need_test=True) output = net.inference(dr.XTest) real_output = dr.DeNormalizeY(output) mse = np.sum((dr.YTestRaw - real_output)**2) / dr.YTest.shape[0] / 10000 print("mse=", mse) net.ShowLossHistory("epoch") ShowResult(net, dr)
learning_rate = 0.2 eps = 0.08 params = CParameters(learning_rate, max_epoch, batch_size, eps, LossFunctionName.CrossEntropy3, InitialMethod.MSRA, OptimizerName.SGD) net = NeuralNet(params) fc1 = FcLayer(num_input, num_hidden1, params) net.add_layer(fc1, "fc1") sigmoid = ActivatorLayer(Relu()) net.add_layer(sigmoid, "sigmoid") fc2 = FcLayer(num_hidden1, num_hidden2, params) net.add_layer(fc2, "fc2") tanh = ActivatorLayer(Relu()) net.add_layer(tanh, "tanh") fc3 = FcLayer(num_hidden2, num_output, params) net.add_layer(fc3, "fc3") softmax = ActivatorLayer(Softmax()) net.add_layer(softmax, "softmax") net.train(dataReader, checkpoint=1, need_test=True) net.ShowLossHistory(0, None, 0, 1)
fc1 = FcLayer(num_input, num_hidden1, params) net.add_layer(fc1, "fc1") r1 = ActivatorLayer(Relu()) net.add_layer(r1, "r1") fc2 = FcLayer(num_hidden1, num_hidden2, params) net.add_layer(fc2, "fc2") r2 = ActivatorLayer(Relu()) net.add_layer(r2, "r2") fc3 = FcLayer(num_hidden2, num_hidden3, params) net.add_layer(fc3, "fc3") r3 = ActivatorLayer(Relu()) net.add_layer(r3, "r3") fc4 = FcLayer(num_hidden3, num_hidden4, params) net.add_layer(fc4, "fc4") r4 = ActivatorLayer(Relu()) net.add_layer(r4, "r4") fc5 = FcLayer(num_hidden4, num_output, params) net.add_layer(fc5, "fc5") softmax = ClassificationLayer(Softmax()) net.add_layer(softmax, "softmax") #net.load_parameters() net.train(dataReader, checkpoint=0.5, need_test=True) net.ShowLossHistory("epoch")
def model_dropout(): dr = LoadData() num_input = dr.num_feature num_hidden1 = 64 num_hidden2 = 64 num_hidden3 = 64 num_hidden4 = 8 num_output = 1 max_epoch = 2000 batch_size = 16 learning_rate = 0.01 eps = 1e-6 params = CParameters( learning_rate, max_epoch, batch_size, eps, LossFunctionName.MSE, InitialMethod.Xavier, OptimizerName.Momentum, RegularMethod.L1, 0.001) net = NeuralNet(params, "HouseSingleDropout64") fc1 = FcLayer(num_input, num_hidden1, params) net.add_layer(fc1, "fc1") r1 = ActivatorLayer(Relu()) net.add_layer(r1, "r1") #d1 = DropoutLayer(num_hidden1, 0.2) #net.add_layer(d1, "d1") fc2 = FcLayer(num_hidden1, num_hidden2, params) net.add_layer(fc2, "fc2") r2 = ActivatorLayer(Relu()) net.add_layer(r2, "r2") #d2 = DropoutLayer(num_hidden2, 0.3) #net.add_layer(d2, "d2") fc3 = FcLayer(num_hidden2, num_hidden3, params) net.add_layer(fc3, "fc3") r3 = ActivatorLayer(Relu()) net.add_layer(r3, "r3") #d3 = DropoutLayer(num_hidden3, 0.2) #net.add_layer(d3, "d3") fc4 = FcLayer(num_hidden3, num_hidden4, params) net.add_layer(fc4, "fc4") r4 = ActivatorLayer(Relu()) net.add_layer(r4, "r4") #d4 = DropoutLayer(num_hidden4, 0.1) #net.add_layer(d4, "d4") fc5 = FcLayer(num_hidden4, num_output, params) net.add_layer(fc5, "fc5") #ShowResult(net, dr) net.load_parameters() #Inference(net, dr) #exit() #ShowResult(net, dr) net.train(dr, checkpoint=10, need_test=True) output = net.inference(dr.XTest) real_output = dr.DeNormalizeY(output) mse = np.sum((dr.YTestRaw - real_output)**2)/dr.YTest.shape[0]/10000 print("mse=", mse) net.ShowLossHistory() ShowResult(net, dr)