def WalkThroughAllOptimizers(option):

    dataReader = DataReader(x_data_name, y_data_name)
    XData, YData = dataReader.ReadData()
    X = dataReader.NormalizeX()
    Y = dataReader.NormalizeY()

    n_input, n_output = dataReader.num_feature, 1
    n_hidden = option[2]
    eta, batch_size, max_epoch = option[1], 10, 10000
    eps = 0.001

    params = CParameters(n_input, n_hidden, n_output, eta, max_epoch,
                         batch_size, eps, InitialMethod.Xavier, option[0])

    loss_history = CLossHistory()
    net = TwoLayerNet(NetType.Fitting)

    wbs = net.train(dataReader, params, loss_history)

    trace = loss_history.GetMinimalLossData()
    print(trace.toString())
    title = loss_history.ShowLossHistory(params)

    print("wait for 10 seconds...")

    ShowResult(net, X, Y, title, trace.wb1, trace.wb2)
Exemple #2
0
def train(ne, batch, eta):
    dataReader = DataReader(x_data_name, y_data_name)
    XData, YData = dataReader.ReadData()
    X = dataReader.NormalizeX(passthrough=True)
    Y = dataReader.NormalizeY()

    n_input, n_hidden, n_output = 1, ne, 1
    eta, batch_size, max_epoch = eta, batch, 10000
    eps = 0.001

    params = CParameters(n_input, n_hidden, n_output, eta, max_epoch,
                         batch_size, eps, LossFunctionName.MSE,
                         InitialMethod.Xavier)

    loss_history = CLossHistory(params)
    net = TwoLayerFittingNet()
    wb1, wb2 = net.train(dataReader, params, loss_history)
    return loss_history
Exemple #3
0
def train(init_method):
    dataReader = DataReader(x_data_name, y_data_name)
    XData, YData = dataReader.ReadData()
    X = dataReader.NormalizeX(passthrough=True)
    Y = dataReader.NormalizeY()

    n_input, n_hidden, n_output = 1, 4, 1
    eta, batch_size, max_epoch = 0.5, 10, 30000
    eps = 0.001

    params = CParameters(n_input, n_hidden, n_output, eta, max_epoch,
                         batch_size, eps, init_method, OptimizerName.SGD)

    loss_history = CLossHistory()
    net = TwoLayerNet(NetType.Fitting)
    net.train(dataReader, params, loss_history)

    trace = loss_history.GetMinimalLossData()
    print(trace.toString())
    title = loss_history.ShowLossHistory(params)
    ShowResult(net, X, YData, title, trace.wb1, trace.wb2)
def WalkThroughAllOptimizers(option):

    dataReader = DataReader(x_data_name, y_data_name)
    XData,YData = dataReader.ReadData()
    X = dataReader.NormalizeX()
    Y = dataReader.ToOneHot()
    
    n_input, n_output = dataReader.num_feature,  dataReader.num_category
    n_hidden = 8
    eta, batch_size, max_epoch = option[1], 10, 10000
    eps = 0.06

    params = HyperParameters41(n_input, n_output, n_hidden,
                         eta, max_epoch, batch_size, eps, 
                         LossFunctionName.CrossEntropy3, 
                         InitialMethod.Xavier,
                         option[0])

    loss_history = CLossHistory()
    net = TwoLayerClassificationNet()

    #ShowData(XData, YData)

    net.train(dataReader, params, loss_history)

    trace = loss_history.GetMinimalLossData()
    print(trace.toString())
    title = loss_history.ShowLossHistory(params)

    print("wait for 10 seconds...")

    wbs_min = WeightsBias30(params)
    wbs_min.W1 = trace.dict_weights["W1"]
    wbs_min.W2 = trace.dict_weights["W2"]
    wbs_min.B1 = trace.dict_weights["B1"]
    wbs_min.B2 = trace.dict_weights["B2"]
    ShowAreaResult(X, wbs_min, net, title)
    ShowData(X, YData)
Exemple #5
0
def WalkThroughAllOptimizers(option):

    dataReader = DataReader(x_data_name, y_data_name)
    XData,YData = dataReader.ReadData()
    X = dataReader.NormalizeX()
    Y = dataReader.NormalizeY()
    
    n_input, n_output = dataReader.num_feature, 1
    n_hidden = 4
    eta, batch_size, max_epoch = option[1], 10, 10000
    eps = 0.001

    params = CParameters(n_input, n_output, n_hidden,
                         eta, max_epoch, batch_size, eps, 
                         LossFunctionName.MSE, 
                         InitialMethod.Xavier,
                         option[0])

    loss_history = CLossHistory()
    net = TwoLayerFittingNet()

    #ShowData(XData, YData)

    wbs = net.train(dataReader, params, loss_history)

    trace = loss_history.GetMinimalLossData()
    print(trace.toString())
    title = loss_history.ShowLossHistory(params)

    print("wait for 10 seconds...")

    wbs_min = WeightsBias(params)
    wbs_min.W1 = trace.dict_weights["W1"]
    wbs_min.W2 = trace.dict_weights["W2"]
    wbs_min.B1 = trace.dict_weights["B1"]
    wbs_min.B2 = trace.dict_weights["B2"]
    ShowResult(X, Y, net, wbs_min, title)
def Train():
    dataReader = DataReader(x_data_name, y_data_name)
    dataReader.ReadData()
    dataReader.NormalizeX()
    dataReader.NormalizeY()

    n_input, n_hidden, n_output = 1, 3, 1
    eta, batch_size, max_epoch = 0.5, 10, 50000
    eps = 0.001

    params = CParameters(n_input, n_hidden, n_output, eta, max_epoch, batch_size, eps)

    # SGD, MiniBatch, FullBatch
    loss_history = CLossHistory()
    net = TwoLayerFittingNet()
    wb1, wb2 = net.train(dataReader, params, loss_history)

    trace = loss_history.GetMinimalLossData()
    print(trace.toString())
    title = loss_history.ShowLossHistory(params)

    ShowResult(net, dataReader.X, dataReader.Y, title, trace.wb1, trace.wb2)
    trace.wb1.Save("wb1")
    trace.wb2.Save("wb2")
Exemple #7
0
    plt.plot(X[0, :], Y[0, :], '.', c='b')
    # create and draw visualized validation data
    TX = np.linspace(0, 1, 100).reshape(1, 100)
    dict_cache = net.ForwardCalculationBatch(TX, wb1, wb2)
    TY = dict_cache["Output"]
    plt.plot(TX, TY, 'x', c='r')
    plt.title(title)
    plt.show()


#end def

if __name__ == '__main__':
    dataReader = DataReader(x_data_name, y_data_name)
    dataReader.ReadData()
    dataReader.NormalizeX()
    dataReader.NormalizeY()

    n_input, n_hidden, n_output = 1, 3, 1
    eta, batch_size, max_epoch = 0.5, 10, 50000
    eps = 0.001

    params = CParameters(n_input, n_hidden, n_output, eta, max_epoch,
                         batch_size, eps)

    # SGD, MiniBatch, FullBatch
    loss_history = CLossHistory()
    net = TwoLayerFittingNet()
    wb1, wb2 = net.train(dataReader, params, loss_history)

    trace = loss_history.GetMinimalLossData()
def ShowResult(net, X, Y, title, wb1, wb2):
    # draw train data
    plt.plot(X[0,:], Y[0,:], '.', c='b')
    # create and draw visualized validation data
    TX = np.linspace(0,1,100).reshape(1,100)
    dict_cache = net.ForwardCalculationBatch(TX, wb1, wb2)
    TY = dict_cache["Output"]
    plt.plot(TX, TY, 'x', c='r')
    plt.title(title)
    plt.show()


if __name__ == '__main__':
    dataReader = DataReader(x_data_name, y_data_name)
    XData,YData = dataReader.ReadData()
    X = dataReader.NormalizeX(passthrough=True)
    Y = dataReader.NormalizeY()
    # 为了说明问题,我们用2个隐层单元和20批大小来做试验
    n_input, n_hidden, n_output = 1, 4, 1
    eta, batch_size, max_epoch = 0.1, 1, 10000
    eps = 0.001

    params = CParameters(n_input, n_hidden, n_output,
                         eta, max_epoch, batch_size, eps, 
                         LossFunctionName.MSE,
                         InitialMethod.Xavier)

    loss_history = CLossHistory(params)

    # 试验时,先注释掉try_2,运行try_1;然后注释掉try_1,运行try_2
    #lr_Searcher = try_1()