Example #1
0
def WalkThroughAllOptimizers(option):

    dataReader = DataReader(x_data_name, y_data_name)
    XData, YData = dataReader.ReadData()
    X = dataReader.NormalizeX()
    Y = dataReader.NormalizeY()

    n_input, n_output = dataReader.num_feature, 1
    n_hidden = option[2]
    eta, batch_size, max_epoch = option[1], 10, 10000
    eps = 0.001

    params = CParameters(n_input, n_hidden, n_output, eta, max_epoch,
                         batch_size, eps, InitialMethod.Xavier, option[0])

    loss_history = CLossHistory()
    net = TwoLayerNet(NetType.Fitting)

    wbs = net.train(dataReader, params, loss_history)

    trace = loss_history.GetMinimalLossData()
    print(trace.toString())
    title = loss_history.ShowLossHistory(params)

    print("wait for 10 seconds...")

    ShowResult(net, X, Y, title, trace.wb1, trace.wb2)
Example #2
0
def train(ne, batch, eta):
    dataReader = DataReader(x_data_name, y_data_name)
    XData, YData = dataReader.ReadData()
    X = dataReader.NormalizeX(passthrough=True)
    Y = dataReader.NormalizeY()

    n_input, n_hidden, n_output = 1, ne, 1
    eta, batch_size, max_epoch = eta, batch, 10000
    eps = 0.001

    params = CParameters(n_input, n_hidden, n_output, eta, max_epoch,
                         batch_size, eps, LossFunctionName.MSE,
                         InitialMethod.Xavier)

    loss_history = CLossHistory(params)
    net = TwoLayerFittingNet()
    wb1, wb2 = net.train(dataReader, params, loss_history)
    return loss_history
Example #3
0
def train(init_method):
    dataReader = DataReader(x_data_name, y_data_name)
    XData, YData = dataReader.ReadData()
    X = dataReader.NormalizeX(passthrough=True)
    Y = dataReader.NormalizeY()

    n_input, n_hidden, n_output = 1, 4, 1
    eta, batch_size, max_epoch = 0.5, 10, 30000
    eps = 0.001

    params = CParameters(n_input, n_hidden, n_output, eta, max_epoch,
                         batch_size, eps, init_method, OptimizerName.SGD)

    loss_history = CLossHistory()
    net = TwoLayerNet(NetType.Fitting)
    net.train(dataReader, params, loss_history)

    trace = loss_history.GetMinimalLossData()
    print(trace.toString())
    title = loss_history.ShowLossHistory(params)
    ShowResult(net, X, YData, title, trace.wb1, trace.wb2)
Example #4
0
def WalkThroughAllOptimizers(option):

    dataReader = DataReader(x_data_name, y_data_name)
    XData,YData = dataReader.ReadData()
    X = dataReader.NormalizeX()
    Y = dataReader.NormalizeY()
    
    n_input, n_output = dataReader.num_feature, 1
    n_hidden = 4
    eta, batch_size, max_epoch = option[1], 10, 10000
    eps = 0.001

    params = CParameters(n_input, n_output, n_hidden,
                         eta, max_epoch, batch_size, eps, 
                         LossFunctionName.MSE, 
                         InitialMethod.Xavier,
                         option[0])

    loss_history = CLossHistory()
    net = TwoLayerFittingNet()

    #ShowData(XData, YData)

    wbs = net.train(dataReader, params, loss_history)

    trace = loss_history.GetMinimalLossData()
    print(trace.toString())
    title = loss_history.ShowLossHistory(params)

    print("wait for 10 seconds...")

    wbs_min = WeightsBias(params)
    wbs_min.W1 = trace.dict_weights["W1"]
    wbs_min.W2 = trace.dict_weights["W2"]
    wbs_min.B1 = trace.dict_weights["B1"]
    wbs_min.B2 = trace.dict_weights["B2"]
    ShowResult(X, Y, net, wbs_min, title)
def Train():
    dataReader = DataReader(x_data_name, y_data_name)
    dataReader.ReadData()
    dataReader.NormalizeX()
    dataReader.NormalizeY()

    n_input, n_hidden, n_output = 1, 3, 1
    eta, batch_size, max_epoch = 0.5, 10, 50000
    eps = 0.001

    params = CParameters(n_input, n_hidden, n_output, eta, max_epoch, batch_size, eps)

    # SGD, MiniBatch, FullBatch
    loss_history = CLossHistory()
    net = TwoLayerFittingNet()
    wb1, wb2 = net.train(dataReader, params, loss_history)

    trace = loss_history.GetMinimalLossData()
    print(trace.toString())
    title = loss_history.ShowLossHistory(params)

    ShowResult(net, dataReader.X, dataReader.Y, title, trace.wb1, trace.wb2)
    trace.wb1.Save("wb1")
    trace.wb2.Save("wb2")
Example #6
0
    # create and draw visualized validation data
    TX = np.linspace(0, 1, 100).reshape(1, 100)
    dict_cache = net.ForwardCalculationBatch(TX, wb1, wb2)
    TY = dict_cache["Output"]
    plt.plot(TX, TY, 'x', c='r')
    plt.title(title)
    plt.show()


#end def

if __name__ == '__main__':
    dataReader = DataReader(x_data_name, y_data_name)
    dataReader.ReadData()
    dataReader.NormalizeX()
    dataReader.NormalizeY()

    n_input, n_hidden, n_output = 1, 3, 1
    eta, batch_size, max_epoch = 0.5, 10, 50000
    eps = 0.001

    params = CParameters(n_input, n_hidden, n_output, eta, max_epoch,
                         batch_size, eps)

    # SGD, MiniBatch, FullBatch
    loss_history = CLossHistory()
    net = TwoLayerFittingNet()
    wb1, wb2 = net.train(dataReader, params, loss_history)

    trace = loss_history.GetMinimalLossData()
    print(trace.toString())