Ejemplo n.º 1
0
def run_model(n,
              train_steps=50 * 1000,
              verbose=2,
              lr=0.001,
              anneal=True,
              plot=False,
              batch_size=32,
              epochs=1):

    model = student.Model([20, n, n, 1],
                          softmax=False,
                          loss='power',
                          power=power)

    loc = 0

    bs = 200  #batch_size
    tt = 200000  #training time (steps)
    lr = 0.01
    history = model.train(teach.data_matrix_x[loc:loc + bs * tt],
                          teach.data_matrix_y[loc:loc + bs * tt],
                          learning_rate=lr,
                          batch_size=bs,
                          verbose=verbose,
                          epochs=1)
    loc += bs * tt

    bs = 1000  #batch_size
    tt = 20000  #training time (steps)
    lr = 0.01
    history = model.train(teach.data_matrix_x[loc:loc + bs * tt],
                          teach.data_matrix_y[loc:loc + bs * tt],
                          learning_rate=lr,
                          batch_size=bs,
                          verbose=verbose,
                          epochs=1)
    loc += bs * tt

    bs = 400  #batch_size
    tt = 20000  #training time (steps)
    lr = 0.001
    history = model.train(teach.data_matrix_x[loc:loc + bs * tt],
                          teach.data_matrix_y[loc:loc + bs * tt],
                          learning_rate=lr,
                          batch_size=bs,
                          verbose=verbose,
                          epochs=1)
    loc += bs * tt

    x1, y1 = teach.predict_from_matrix(batch_size=10000)
    loss = model.evaluate(x1, y1)

    if plot:
        student.graph_sample(parent, model, folder='plot')

    #return [loss,model,model.count_params()-n-1]
    return loss, model
Ejemplo n.º 2
0
def run_model(n,
              train_steps=50 * 1000,
              verbose=2,
              lr=0.001,
              anneal=True,
              plot=False,
              batch_size=32,
              epochs=1):

    model = student.Model([9, n, n, 1], softmax=False, loss='power')

    dm_x = np.concatenate(
        (teach1.data_matrix_x[:, 0:3], teach2.data_matrix_x[:, 3:6],
         teach3.data_matrix_x[:, 6:9]),
        axis=1)
    dm_y = teach1.data_matrix_y + teach2.data_matrix_y + teach3.data_matrix_y

    loc = 0

    bs = 200  #batch_size
    tt = 200000  #training time (steps)
    lr = 0.01
    history = model.train(dm_x[loc:loc + bs * tt],
                          dm_y[loc:loc + bs * tt],
                          learning_rate=lr,
                          batch_size=bs,
                          verbose=verbose,
                          epochs=1)
    loc += bs * tt

    bs = 1000  #batch_size
    tt = 20000  #training time (steps)
    lr = 0.01
    history = model.train(dm_x[loc:loc + bs * tt],
                          dm_y[loc:loc + bs * tt],
                          learning_rate=lr,
                          batch_size=bs,
                          verbose=verbose,
                          epochs=1)
    loc += bs * tt

    bs = 4000  #batch_size
    tt = 20000  #training time (steps)
    lr = 0.001
    history = model.train(dm_x[loc:loc + bs * tt],
                          dm_y[loc:loc + bs * tt],
                          learning_rate=lr,
                          batch_size=bs,
                          verbose=verbose,
                          epochs=1)
    loc += bs * tt

    x1, y1 = teach1.predict_from_matrix(batch_size=10000)
    x2, y2 = teach2.predict_from_matrix(batch_size=10000)
    x3, y3 = teach3.predict_from_matrix(batch_size=10000)
    x1 = np.concatenate((x1[:, 0:3], x2[:, 3:6], x3[:, 6:9]), axis=1)
    y1 = y1 + y2 + y3
    loss = model.evaluate(x1, y1)

    if plot:
        student.graph_sample(parent, model, folder='plot')

    #return [loss,model,model.count_params()-n-1]
    return loss, model