예제 #1
0
def Net(dataReader, num_input, num_hidden, num_output, params):
    net = NeuralNet(params)

    fc1 = FcLayer(num_input, num_hidden, params)
    net.add_layer(fc1, "fc1")
    relu1 = ActivatorLayer(Relu())
    net.add_layer(relu1, "relu1")

    fc2 = FcLayer(num_hidden, num_hidden, params)
    net.add_layer(fc2, "fc2")
    relu2 = ActivatorLayer(Relu())
    net.add_layer(relu2, "relu2")

    fc3 = FcLayer(num_hidden, num_hidden, params)
    net.add_layer(fc3, "fc3")
    relu3 = ActivatorLayer(Relu())
    net.add_layer(relu3, "relu3")

    fc4 = FcLayer(num_hidden, num_hidden, params)
    net.add_layer(fc4, "fc4")
    relu4 = ActivatorLayer(Relu())
    net.add_layer(relu4, "relu4")

    fc5 = FcLayer(num_hidden, num_output, params)
    net.add_layer(fc5, "fc5")
    softmax = ActivatorLayer(Softmax())
    net.add_layer(softmax, "softmax")

    net.train(dataReader, checkpoint=1, need_test=False)

    net.ShowLossHistory()
예제 #2
0
def Model(dataReader, num_input, num_hidden, num_output, params):
    net = NeuralNet41(params, "overfitting")

    fc1 = FcLayer(num_input, num_hidden, params)
    net.add_layer(fc1, "fc1")
    s1 = ActivatorLayer(Sigmoid())
    net.add_layer(s1, "s1")

    fc2 = FcLayer(num_hidden, num_hidden, params)
    net.add_layer(fc2, "fc2")
    t2 = ActivatorLayer(Tanh())
    net.add_layer(t2, "t2")

    fc3 = FcLayer(num_hidden, num_hidden, params)
    net.add_layer(fc3, "fc3")
    t3 = ActivatorLayer(Tanh())
    net.add_layer(t3, "t3")

    fc4 = FcLayer(num_hidden, num_output, params)
    net.add_layer(fc4, "fc4")

    net.train(dataReader, checkpoint=100, need_test=True)
    net.ShowLossHistory(XCoordinate.Epoch, ymin=0.8)

    return net
예제 #3
0
def DropoutNet(dataReader, num_input, num_hidden, num_output, params):
    net = NeuralNet(params)

    fc1 = FcLayer(784, 128, params)
    net.add_layer(fc1, "fc1")
    relu1 = ActivatorLayer(Relu())
    net.add_layer(relu1, "relu1")

    drop1 = DropoutLayer(128, 0.3)
    net.add_layer(drop1, "dp1")

    fc2 = FcLayer(128, 32, params)
    net.add_layer(fc2, "fc2")
    relu2 = ActivatorLayer(Relu())
    net.add_layer(relu2, "relu2")

    drop2 = DropoutLayer(32, 0.5)
    net.add_layer(drop2, "dp2")

    fc2 = FcLayer(32, 16, params)
    net.add_layer(fc2, "fc2")
    relu2 = ActivatorLayer(Relu())
    net.add_layer(relu2, "relu2")

    drop2 = DropoutLayer(16, 0.5)
    net.add_layer(drop2, "dp2")

    fc5 = FcLayer(16, 10, params)
    net.add_layer(fc5, "fc5")
    softmax = ActivatorLayer(Softmax())
    net.add_layer(softmax, "softmax")

    net.train(dataReader, checkpoint=1)

    net.ShowLossHistory()
예제 #4
0
def DropoutNet(num_input, num_hidden1, num_hidden2, num_output, params):
    net = NeuralNet(params)

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")

    sigmoid = ActivatorLayer(Relu())
    net.add_layer(sigmoid, "sigmoid")

    fc2 = FcLayer(num_hidden1, num_hidden2, params)
    net.add_layer(fc2, "fc2")

    tanh = ActivatorLayer(Relu())
    net.add_layer(tanh, "tanh")

    dp1 = DropoutLayer(num_hidden2, 0.5)
    net.add_layer(dp1, "dp1")

    fc3 = FcLayer(num_hidden2, num_output, params)
    net.add_layer(fc3, "fc3")

    dp2 = DropoutLayer(num_output, 0.5)
    net.add_layer(dp2, "dp2")


    softmax = ActivatorLayer(Softmax())
    net.add_layer(softmax, "softmax")

    net.train(dataReader, checkpoint=10, need_test=True)
    
    net.ShowLossHistory(0, None, 0, 1)
예제 #5
0
def model():
    dr = LoadData()

    num_input = dr.num_feature
    num_hidden1 = 64
    num_hidden2 = 64
    num_hidden3 = 32
    num_hidden4 = 16
    num_output = 1

    max_epoch = 100
    batch_size = 16
    learning_rate = 0.1
    eps = 1e-3

    params = HyperParameters40(learning_rate,
                               max_epoch,
                               batch_size,
                               eps,
                               net_type=NetType.BinaryClassifier,
                               init_method=InitialMethod.Xavier)

    net = NeuralNet40(params, "Income")

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    a1 = ActivatorLayer(Relu())
    net.add_layer(a1, "relu1")

    fc2 = FcLayer(num_hidden1, num_hidden2, params)
    net.add_layer(fc2, "fc2")
    a2 = ActivatorLayer(Relu())
    net.add_layer(a2, "relu2")

    fc3 = FcLayer(num_hidden2, num_hidden3, params)
    net.add_layer(fc3, "fc3")
    a3 = ActivatorLayer(Relu())
    net.add_layer(a3, "relu3")

    fc4 = FcLayer(num_hidden3, num_hidden4, params)
    net.add_layer(fc4, "fc4")
    a4 = ActivatorLayer(Relu())
    net.add_layer(a4, "relu4")

    fc5 = FcLayer(num_hidden4, num_output, params)
    net.add_layer(fc5, "fc5")
    logistic = ClassificationLayer(Logistic())
    net.add_layer(logistic, "logistic")

    #net.load_parameters()

    net.train(dr, checkpoint=10, need_test=True)
    net.ShowLossHistory("epoch")
예제 #6
0
def model2():
    dr = LoadData()

    num_input = dr.num_feature
    num_hidden1 = 64
    num_hidden2 = 64
    num_hidden3 = 32
    num_hidden4 = 16
    num_output = 1

    max_epoch = 1000
    batch_size = 16
    learning_rate = 0.01
    eps = 0.001

    params = CParameters(learning_rate, max_epoch, batch_size, eps,
                         LossFunctionName.CrossEntropy2, InitialMethod.Xavier,
                         OptimizerName.Adam)

    net = NeuralNet(params, "Income")

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    a1 = ActivatorLayer(Relu())
    net.add_layer(a1, "relu1")

    fc2 = FcLayer(num_hidden1, num_hidden2, params)
    net.add_layer(fc2, "fc2")
    a2 = ActivatorLayer(Relu())
    net.add_layer(a2, "relu2")

    fc3 = FcLayer(num_hidden2, num_hidden3, params)
    net.add_layer(fc3, "fc3")
    a3 = ActivatorLayer(Relu())
    net.add_layer(a3, "relu3")

    fc4 = FcLayer(num_hidden3, num_hidden4, params)
    net.add_layer(fc4, "fc4")
    a4 = ActivatorLayer(Relu())
    net.add_layer(a4, "relu4")

    fc5 = FcLayer(num_hidden4, num_output, params)
    net.add_layer(fc5, "fc5")
    sigmoid5 = ClassificationLayer(Sigmoid())
    net.add_layer(sigmoid5, "sigmoid5")

    #net.load_parameters()

    net.train(dr, checkpoint=10, need_test=True)
    net.ShowLossHistory()
예제 #7
0
파일: Level1_ch09.py 프로젝트: z5wjz/ai-edu
def model():
    dataReader = LoadData()
    num_input = 1
    num_hidden1 = 4
    num_output = 1

    max_epoch = 10000
    batch_size = 10
    learning_rate = 0.5
    eps = 0.001

    params = CParameters(learning_rate, max_epoch, batch_size, eps,
                         LossFunctionName.MSE, InitialMethod.Xavier,
                         OptimizerName.SGD)

    net = NeuralNet(params, "Level1_CurveFittingNet")
    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    sigmoid1 = ActivatorLayer(Sigmoid())
    net.add_layer(sigmoid1, "sigmoid1")
    fc2 = FcLayer(num_hidden1, num_output, params)
    net.add_layer(fc2, "fc2")

    #net.load_parameters()
    #ShowResult(net, dataReader, params.toString())
    #ShowResult2(net, dataReader)

    net.train(dataReader, checkpoint=100, need_test=True)
    net.ShowLossHistory()

    #ShowResult(net, dataReader, params.toString())
    ShowResult(net, dataReader)
예제 #8
0
def model():
    dataReader = LoadData()
    num_input = 1
    num_hidden1 = 4
    num_output = 1

    max_epoch = 10000
    batch_size = 10
    learning_rate = 0.5
    eps = 1e-5

    params = HyperParameters40(learning_rate,
                               max_epoch,
                               batch_size,
                               net_type=NetType.Fitting,
                               init_method=InitialMethod.Xavier)

    net = NeuralNet40(params, "Level1_CurveFittingNet")
    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    sigmoid1 = ActivatorLayer(Sigmoid())
    net.add_layer(sigmoid1, "sigmoid1")
    fc2 = FcLayer(num_hidden1, num_output, params)
    net.add_layer(fc2, "fc2")

    net.train(dataReader, checkpoint=100, need_test=True)

    net.ShowLossHistory("epoch")
    ShowResult(net, dataReader)
예제 #9
0
파일: Level3_ch11.py 프로젝트: z5wjz/ai-edu
def model():
    dataReader = LoadData()
    num_input = dataReader.num_feature
    num_hidden1 = 8
    num_output = 3

    max_epoch = 5000
    batch_size = 10
    learning_rate = 0.1
    eps = 0.06

    params = CParameters(learning_rate, max_epoch, batch_size, eps,
                         LossFunctionName.CrossEntropy3, InitialMethod.Xavier,
                         OptimizerName.SGD)

    net = NeuralNet(params, "chinabank")

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    r1 = ActivatorLayer(Relu())
    net.add_layer(r1, "Relu1")

    fc2 = FcLayer(num_hidden1, num_output, params)
    net.add_layer(fc2, "fc2")
    softmax1 = ClassificationLayer(Softmax())
    net.add_layer(softmax1, "softmax1")

    net.train(dataReader, checkpoint=10, need_test=True)
    net.ShowLossHistory()

    ShowResult(net, params.toString())
    ShowData(dataReader)
예제 #10
0
def DropoutNet(dataReader, num_input, num_hidden, num_output, params):
    net = NeuralNet(params)

    fc1 = FcLayer(num_input, num_hidden, params)
    net.add_layer(fc1, "fc1")
    relu1 = ActivatorLayer(Relu())
    net.add_layer(relu1, "relu1")
    
    drop1 = DropoutLayer(num_hidden, 0.1)
    net.add_layer(drop1, "dp1")
    
    fc2 = FcLayer(num_hidden, num_hidden, params)
    net.add_layer(fc2, "fc2")
    relu2 = ActivatorLayer(Relu())
    net.add_layer(relu2, "relu2")
    
    drop2 = DropoutLayer(num_hidden, 0.3)
    net.add_layer(drop2, "dp2")
    
    fc3 = FcLayer(num_hidden, num_hidden, params)
    net.add_layer(fc3, "fc3")
    relu3 = ActivatorLayer(Relu())
    net.add_layer(relu3, "relu3")
    
    drop3 = DropoutLayer(num_hidden, 0.3)
    net.add_layer(drop1, "dp3")
    
    fc4 = FcLayer(num_hidden, num_hidden, params)
    net.add_layer(fc4, "fc4")
    relu4 = ActivatorLayer(Relu())
    net.add_layer(relu4, "relu4")
    
    drop4 = DropoutLayer(num_hidden, 0.3)
    net.add_layer(drop4, "dp4")
    

    fc5 = FcLayer(num_hidden, num_output, params)
    net.add_layer(fc5, "fc5")
    softmax = ActivatorLayer(Softmax())
    net.add_layer(softmax, "softmax")

    net.train(dataReader, checkpoint=1)
    
    net.ShowLossHistory()
예제 #11
0
def net(init_method, activator):

    max_epoch = 1
    batch_size = 5
    learning_rate = 0.02
    eps = 0.01

    params = HyperParameters41(learning_rate,
                               max_epoch,
                               batch_size,
                               eps,
                               net_type=NetType.Fitting,
                               init_method=init_method,
                               optimizer_name=OptimizerName.SGD)

    loss_history = CLossHistory()

    net = NeuralNet41(params)
    num_hidden = [128, 128, 128, 128, 128, 128, 128]
    count = len(num_hidden) - 1
    layers = []

    for i in range(count):
        fc = FcLayer(num_hidden[i], num_hidden[i + 1], params)
        net.add_layer(fc, "fc")
        layers.append(fc)

        ac = ActivatorLayer(activator)
        net.add_layer(ac, "activator")
        layers.append(ac)

    # 从正态分布中取1000个样本,每个样本有num_hidden[0]个特征值
    # 转置是为了可以和w1做矩阵乘法
    x = np.random.randn(1000, num_hidden[0]).T
    #x = np.random.normal(size=num_hidden[0]).T

    # 激活函数输出值矩阵列表
    a_value = []
    a_value.append(x)

    # 依次做所有层的前向计算
    for i in range(len(layers)):
        a = layers[i].forward(a_value[i])
        a_value.append(a)

    for i in range(count):
        ax = plt.subplot(1, count, i + 1)
        ax.set_title("layer" + str(i + 1))
        plt.ylim(0, 10000)
        if i > 0:
            plt.yticks([])
        ax.hist(a_value[i + 1].flatten(), bins=25, range=[0, 1])
    #end for
    # super title
    plt.suptitle(init_method.name + " : " + activator.get_name())
    plt.show()
def Net(subfolder,
        dataReader,
        num_input,
        num_hidden,
        num_output,
        params,
        show_history=True):
    net = NeuralNet_4_2(params, subfolder)

    fc1 = FcLayer_2_0(num_input, num_hidden, params)
    net.add_layer(fc1, "fc1")
    relu1 = ActivatorLayer(Relu())
    net.add_layer(relu1, "relu1")

    fc2 = FcLayer_2_0(num_hidden, num_hidden, params)
    net.add_layer(fc2, "fc2")
    relu2 = ActivatorLayer(Relu())
    net.add_layer(relu2, "relu2")

    fc3 = FcLayer_2_0(num_hidden, num_hidden, params)
    net.add_layer(fc3, "fc3")
    relu3 = ActivatorLayer(Relu())
    net.add_layer(relu3, "relu3")

    fc4 = FcLayer_2_0(num_hidden, num_hidden, params)
    net.add_layer(fc4, "fc4")
    relu4 = ActivatorLayer(Relu())
    net.add_layer(relu4, "relu4")

    fc5 = FcLayer_2_0(num_hidden, num_output, params)
    net.add_layer(fc5, "fc5")
    softmax = ClassificationLayer(Softmax())
    net.add_layer(softmax, "softmax")

    net.train(dataReader, checkpoint=1, need_test=True)
    if show_history:
        net.ShowLossHistory(XCoordinate.Iteration)

    return net
def Net(dataReader,
        num_input,
        num_hidden,
        num_output,
        params,
        show_history=True):
    net = NeuralNet41(params, "mnist_overfitting")

    fc1 = FcLayer(num_input, num_hidden, params)
    net.add_layer(fc1, "fc1")
    relu1 = ActivatorLayer(Relu())
    net.add_layer(relu1, "relu1")

    fc2 = FcLayer(num_hidden, num_hidden, params)
    net.add_layer(fc2, "fc2")
    relu2 = ActivatorLayer(Relu())
    net.add_layer(relu2, "relu2")

    fc3 = FcLayer(num_hidden, num_hidden, params)
    net.add_layer(fc3, "fc3")
    relu3 = ActivatorLayer(Relu())
    net.add_layer(relu3, "relu3")

    fc4 = FcLayer(num_hidden, num_hidden, params)
    net.add_layer(fc4, "fc4")
    relu4 = ActivatorLayer(Relu())
    net.add_layer(relu4, "relu4")

    fc5 = FcLayer(num_hidden, num_output, params)
    net.add_layer(fc5, "fc5")
    softmax = ClassificationLayer(Softmax())
    net.add_layer(softmax, "softmax")

    net.train(dataReader, checkpoint=1, need_test=True)
    if show_history:
        net.ShowLossHistory()

    return net
예제 #14
0
def OverFitNet(num_input, num_hidden1, num_hidden2, num_output, params):
    net = NeuralNet(params)

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")

    sigmoid = ActivatorLayer(Relu())
    net.add_layer(sigmoid, "sigmoid")

    fc2 = FcLayer(num_hidden1, num_hidden2, params)
    net.add_layer(fc2, "fc2")

    tanh = ActivatorLayer(Relu())
    net.add_layer(tanh, "tanh")

    fc3 = FcLayer(num_hidden2, num_output, params)
    net.add_layer(fc3, "fc3")

    softmax = ActivatorLayer(Softmax())
    net.add_layer(softmax, "softmax")

    net.train(dataReader, checkpoint=10)
    
    net.ShowLossHistory(0, None, 0, 1)
예제 #15
0
def model_sigmoid(num_input, num_hidden, num_output, hp):
    net = NeuralNet(hp, "chinabank_sigmoid")

    fc1 = FcLayer(num_input, num_hidden, hp)
    net.add_layer(fc1, "fc1")
    s1 = ActivatorLayer(Sigmoid())
    net.add_layer(s1, "Sigmoid1")

    fc2 = FcLayer(num_hidden, num_output, hp)
    net.add_layer(fc2, "fc2")
    softmax1 = ClassificationLayer(Softmax())
    net.add_layer(softmax1, "softmax1")

    net.train(dataReader, checkpoint=50, need_test=True)
    net.ShowLossHistory("epoch")
    
    ShowResult(net, hp.toString())
    ShowData(dataReader)
예제 #16
0
    num_input = dr.num_feature
    num_hidden1 = 16
    num_hidden2 = 4
    num_output = 1

    max_epoch = 1000
    batch_size = 100
    learning_rate = 0.1
    eps = 0.001

    params = CParameters(learning_rate, max_epoch, batch_size, eps,
                         LossFunctionName.MSE, InitialMethod.MSRA,
                         OptimizerName.Momentum)

    net = NeuralNet(params, "PM25")

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    sigmoid1 = ActivatorLayer(Relu())
    net.add_layer(sigmoid1, "sigmoid1")

    fc2 = FcLayer(num_hidden1, num_hidden2, params)
    net.add_layer(fc2, "fc2")
    sigmoid2 = ActivatorLayer(Relu())
    net.add_layer(sigmoid2, "sigmoid2")

    fc3 = FcLayer(num_hidden2, num_output, params)
    net.add_layer(fc3, "fc3")

    net.train(dr, checkpoint=10, need_test=True)
    net.ShowLossHistory()
예제 #17
0
    max_epoch = 50
    batch_size = 32
    learning_rate = 0.01
    eps = 0.08

    params = CParameters(
        learning_rate, max_epoch, batch_size, eps,
        LossFunctionName.CrossEntropy3, 
        InitialMethod.MSRA, 
        OptimizerName.SGD)

    net = NeuralNet(params, "Cifar10")

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    r1 = ActivatorLayer(Relu())
    net.add_layer(r1, "r1")
    
    fc2 = FcLayer(num_hidden1, num_hidden2, params)
    net.add_layer(fc2, "fc2")
    r2 = ActivatorLayer(Relu())
    net.add_layer(r2, "r2")

    fc3 = FcLayer(num_hidden2, num_hidden3, params)
    net.add_layer(fc3, "fc3")
    r3 = ActivatorLayer(Relu())
    net.add_layer(r3, "r3")

    fc4 = FcLayer(num_hidden3, num_hidden4, params)
    net.add_layer(fc4, "fc4")
    r4 = ActivatorLayer(Relu())
예제 #18
0
    num_hidden = 32
    num_output = 1

    max_epoch = 1000
    batch_size = 16
    learning_rate = 0.1
    eps = 0.001

    params = CParameters(
        learning_rate, max_epoch, batch_size, eps,
        LossFunctionName.CrossEntropy2,
        InitialMethod.Xavier, 
        OptimizerName.SGD)

    net = NeuralNet(params, "Income")

    fc1 = FcLayer(num_input, num_hidden, params)
    net.add_layer(fc1, "fc1")
    sigmoid1 = ActivatorLayer(Sigmoid())
    net.add_layer(sigmoid1, "sigmoid1")
    
    fc2 = FcLayer(num_hidden, num_output, params)
    net.add_layer(fc2, "fc2")
    sigmoid1 = ActivatorLayer(Sigmoid())
    net.add_layer(sigmoid1, "sigmoid1")

    #net.load_parameters()

    net.train(dr, checkpoint=10, need_test=True)
    net.ShowLossHistory()
예제 #19
0
if __name__ == '__main__':
    dataReader = LoadData()
    num_input = dataReader.num_feature
    num_hidden1 = 8
    num_output = 3

    max_epoch = 1000
    batch_size = 10
    learning_rate = 0.1
    eps = 0.06

    params = CParameters(learning_rate, max_epoch, batch_size, eps,
                         LossFunctionName.CrossEntropy3, InitialMethod.Xavier,
                         OptimizerName.SGD)

    net = NeuralNet(params)
    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    relu1 = ActivatorLayer(Relu())
    net.add_layer(relu1, "relu1")
    fc2 = FcLayer(num_hidden1, num_output, params)
    net.add_layer(fc2, "fc2")
    softmax1 = ClassificationLayer(Softmax())
    net.add_layer(softmax1, "softmax1")

    net.train(dataReader, checkpoint=10, need_test=False)
    net.ShowLossHistory()

    ShowResult(net, params.toString())
    ShowData(dataReader)
예제 #20
0
def model():
    dr = LoadData()

    num_input = dr.num_feature
    num_hidden1 = 32
    num_hidden2 = 16
    num_hidden3 = 8
    num_hidden4 = 4
    num_output = 1

    max_epoch = 1000
    batch_size = 16
    learning_rate = 0.01
    eps = 1e-6

    params = HyperParameters(learning_rate,
                             max_epoch,
                             batch_size,
                             eps,
                             net_type=NetType.Fitting,
                             init_method=InitialMethod.Xavier)

    net = NeuralNet(params, "HouseSingle")

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    r1 = ActivatorLayer(Relu())
    net.add_layer(r1, "r1")

    fc2 = FcLayer(num_hidden1, num_hidden2, params)
    net.add_layer(fc2, "fc2")
    r2 = ActivatorLayer(Relu())
    net.add_layer(r2, "r2")

    fc3 = FcLayer(num_hidden2, num_hidden3, params)
    net.add_layer(fc3, "fc3")
    r3 = ActivatorLayer(Relu())
    net.add_layer(r3, "r3")

    fc4 = FcLayer(num_hidden3, num_hidden4, params)
    net.add_layer(fc4, "fc4")
    r4 = ActivatorLayer(Relu())
    net.add_layer(r4, "r4")

    fc5 = FcLayer(num_hidden4, num_output, params)
    net.add_layer(fc5, "fc5")

    #ShowResult(net, dr)

    net.load_parameters()
    #Inference(net, dr)
    #exit()
    #ShowResult(net, dr)

    net.train(dr, checkpoint=10, need_test=True)

    output = net.inference(dr.XTest)
    real_output = dr.DeNormalizeY(output)
    mse = np.sum((dr.YTestRaw - real_output)**2) / dr.YTest.shape[0] / 10000
    print("mse=", mse)

    net.ShowLossHistory("epoch")

    ShowResult(net, dr)
예제 #21
0
    num_output = 10
    max_epoch = 20
    batch_size = 100
    learning_rate = 0.2
    eps = 0.08

    params = CParameters(learning_rate, max_epoch, batch_size, eps,
                         LossFunctionName.CrossEntropy3, InitialMethod.MSRA,
                         OptimizerName.SGD)

    net = NeuralNet(params)

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")

    sigmoid = ActivatorLayer(Relu())
    net.add_layer(sigmoid, "sigmoid")

    fc2 = FcLayer(num_hidden1, num_hidden2, params)
    net.add_layer(fc2, "fc2")

    tanh = ActivatorLayer(Relu())
    net.add_layer(tanh, "tanh")

    fc3 = FcLayer(num_hidden2, num_output, params)
    net.add_layer(fc3, "fc3")

    softmax = ActivatorLayer(Softmax())
    net.add_layer(softmax, "softmax")

    net.train(dataReader, checkpoint=1, need_test=True)
예제 #22
0
def model_dropout():
    dr = LoadData()

    num_input = dr.num_feature
    num_hidden1 = 64
    num_hidden2 = 64
    num_hidden3 = 64
    num_hidden4 = 8
    num_output = 1

    max_epoch = 2000
    batch_size = 16
    learning_rate = 0.01
    eps = 1e-6

    params = CParameters(
        learning_rate, max_epoch, batch_size, eps,
        LossFunctionName.MSE, 
        InitialMethod.Xavier, 
        OptimizerName.Momentum,
        RegularMethod.L1, 0.001)

    net = NeuralNet(params, "HouseSingleDropout64")

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    r1 = ActivatorLayer(Relu())
    net.add_layer(r1, "r1")
    #d1 = DropoutLayer(num_hidden1, 0.2)
    #net.add_layer(d1, "d1")

    fc2 = FcLayer(num_hidden1, num_hidden2, params)
    net.add_layer(fc2, "fc2")
    r2 = ActivatorLayer(Relu())
    net.add_layer(r2, "r2")
    #d2 = DropoutLayer(num_hidden2, 0.3)
    #net.add_layer(d2, "d2")

    fc3 = FcLayer(num_hidden2, num_hidden3, params)
    net.add_layer(fc3, "fc3")
    r3 = ActivatorLayer(Relu())
    net.add_layer(r3, "r3")
    #d3 = DropoutLayer(num_hidden3, 0.2)
    #net.add_layer(d3, "d3")

    fc4 = FcLayer(num_hidden3, num_hidden4, params)
    net.add_layer(fc4, "fc4")
    r4 = ActivatorLayer(Relu())
    net.add_layer(r4, "r4")
    #d4 = DropoutLayer(num_hidden4, 0.1)
    #net.add_layer(d4, "d4")
    
    fc5 = FcLayer(num_hidden4, num_output, params)
    net.add_layer(fc5, "fc5")


    #ShowResult(net, dr)

    net.load_parameters()
    #Inference(net, dr)
    #exit()
    #ShowResult(net, dr)

    net.train(dr, checkpoint=10, need_test=True)
    
    output = net.inference(dr.XTest)
    real_output = dr.DeNormalizeY(output)
    mse = np.sum((dr.YTestRaw - real_output)**2)/dr.YTest.shape[0]/10000
    print("mse=", mse)
    
    net.ShowLossHistory()

    ShowResult(net, dr)
예제 #23
0
    #ShowData(dataReader)
    num_input = dataReader.num_feature
    num_hidden1 = 8
    num_output = 3

    max_epoch = 1000
    batch_size = 10
    learning_rate = 0.1
    eps = 0.06

    params = CParameters(learning_rate, max_epoch, batch_size, eps,
                        LossFunctionName.CrossEntropy3, 
                        InitialMethod.Xavier, 
                        OptimizerName.SGD)

    net = NeuralNet(params)
    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    relu1 = ActivatorLayer(Sigmoid())
    net.add_layer(relu1, "relu1")
    fc2 = FcLayer(num_hidden1, num_output, params)
    net.add_layer(fc2, "fc2")
    softmax1 = ClassificationLayer(Softmax())
    net.add_layer(softmax1, "softmax1")

    net.train(dataReader, checkpoint=10, need_test=False)
    net.ShowLossHistory()
    
    ShowResult(net, params.toString())
    ShowData(dataReader)
예제 #24
0
    num_output = 10
    max_epoch = 50
    batch_size = 32
    learning_rate = 0.01
    eps = 1e-3

    params = HyperParameters(
        learning_rate, max_epoch, batch_size, eps,
        net_type=NetType.MultipleClassifier,
        init_method=InitialMethod.MSRA)

    net = NeuralNet(params, "Cifar10")

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")
    r1 = ActivatorLayer(Relu())
    net.add_layer(r1, "r1")
    
    fc2 = FcLayer(num_hidden1, num_hidden2, params)
    net.add_layer(fc2, "fc2")
    r2 = ActivatorLayer(Relu())
    net.add_layer(r2, "r2")

    fc3 = FcLayer(num_hidden2, num_hidden3, params)
    net.add_layer(fc3, "fc3")
    r3 = ActivatorLayer(Relu())
    net.add_layer(r3, "r3")

    fc4 = FcLayer(num_hidden3, num_hidden4, params)
    net.add_layer(fc4, "fc4")
    r4 = ActivatorLayer(Relu())
예제 #25
0
    num_output = 10
    max_epoch = 20
    batch_size = 100
    learning_rate = 0.2
    eps = 0.08

    params = CParameters(learning_rate, max_epoch, batch_size, eps,
                         LossFunctionName.CrossEntropy3, InitialMethod.Xavier,
                         OptimizerName.SGD)

    net = NeuralNet(params)

    fc1 = FcLayer(num_input, num_hidden1, params)
    net.add_layer(fc1, "fc1")

    sigmoid = ActivatorLayer(Sigmoid())
    net.add_layer(sigmoid, "sigmoid")

    fc2 = FcLayer(num_hidden1, num_hidden2, params)
    net.add_layer(fc2, "fc2")

    tanh = ActivatorLayer(Tanh())
    net.add_layer(tanh, "tanh")

    fc3 = FcLayer(num_hidden2, num_output, params)
    net.add_layer(fc3, "fc3")

    softmax = ActivatorLayer(Softmax())
    net.add_layer(softmax, "softmax")

    net.train(dataReader, checkpoint=1, need_test=True)