Ejemplo n.º 1
0
def test_trigonometric():
    """Check that using trig functions work and that results differ"""

    est1 = SymbolicRegressor(random_state=0)
    est1.fit(boston.data[:400, :], boston.target[:400])
    est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),
                               boston.target[400:])

    est2 = SymbolicRegressor(trigonometric=True, random_state=0)
    est2.fit(boston.data[:400, :], boston.target[:400])
    est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),
                               boston.target[400:])

    assert_true(abs(est1 - est2) > 0.01)
Ejemplo n.º 2
0
def test_subsample():
    """Check that subsample work and that results differ"""

    est1 = SymbolicRegressor(max_samples=1.0, random_state=0)
    est1.fit(boston.data[:400, :], boston.target[:400])
    est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),
                               boston.target[400:])

    est2 = SymbolicRegressor(max_samples=0.7, random_state=0)
    est2.fit(boston.data[:400, :], boston.target[:400])
    est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),
                               boston.target[400:])

    assert_true(abs(est1 - est2) > 0.01)
Ejemplo n.º 3
0
def test_parsimony_coefficient():
    """Check that parsimony coefficients work and that results differ"""

    est1 = SymbolicRegressor(parsimony_coefficient=0.001, random_state=0)
    est1.fit(boston.data[:400, :], boston.target[:400])
    est1 = mean_absolute_error(est1.predict(boston.data[400:, :]),
                               boston.target[400:])

    est2 = SymbolicRegressor(parsimony_coefficient=0.1, random_state=0)
    est2.fit(boston.data[:400, :], boston.target[:400])
    est2 = mean_absolute_error(est2.predict(boston.data[400:, :]),
                               boston.target[400:])

    est3 = SymbolicRegressor(parsimony_coefficient='auto', random_state=0)
    est3.fit(boston.data[:400, :], boston.target[:400])
    est3 = mean_absolute_error(est3.predict(boston.data[400:, :]),
                               boston.target[400:])

    assert_true(abs(est1 - est2) > 0.01)
    assert_true(abs(est1 - est3) > 0.01)
    assert_true(abs(est2 - est3) > 0.01)
Ejemplo n.º 4
0
def main():
    escalation = {
        Position.GOALKEEPER: 1,
        Position.DEFENDER: 2,
        Position.SIDE: 2,
        Position.MIDFIELD: 4,
        Position.ATTACKER: 2,
        Position.COACH: 1
    }

    print("Getting auth")
    auth = get_auth()

    print("Getting teams")
    teams = get_teams()

    print("Getting athletes")
    athletes = get_athletes(teams)

    print("Getting scores")
    scores = [athlete.get_row(auth) for athlete in athletes]
    max_length = 0
    for score in scores:
        if len(score) > max_length:
            max_length = len(score)
    fixed_score = []
    for score in scores:
        fixed_score.append([0.0] * (max_length - len(score)) + score)

    generations = 2000
    print("Training using " + str(generations) + " generations. It can take a long time to end")
    est_gp = SymbolicRegressor(
        population_size=5000,
        generations=generations,
        stopping_criteria=0.01,
        p_crossover=0.7,
        p_subtree_mutation=0.1,
        p_hoist_mutation=0.05,
        p_point_mutation=0.1,
        max_samples=0.9,
        verbose=1,
        parsimony_coefficient=0.01,
        random_state=0,
        const_range=(-50., 50.),
        function_set=(
            'add', 'sub', 'mul', 'div', 'sqrt', 'log', 'abs', 'neg', 'inv', 'max', 'min', 'sin', 'cos', 'tan'))
    est_gp.fit([x[:-1] for x in fixed_score], [x[-1] for x in fixed_score])
    predictions = est_gp.predict([x[:-1] for x in fixed_score])

    print("Getting results")
    results = [[athlete, prediction] for athlete, prediction in zip(athletes, predictions)]
    results.sort(key=lambda x: -x[1])
    print("\"Scale\",\"Name\",\"Team\",\"Position\",\"Status\",\"Price\",\"Prediction\"")
    for result in results:
        athlete = result[0]
        prediction = result[1]
        scale = athlete.status == Status.Probable and escalation[athlete.position] > 0
        if scale:
            escalation[athlete.position] = escalation[athlete.position] - 1
        print("\"" +
              ("*" if scale else " ") + "\",\"" +
              athlete.nick + "\",\"" +
              athlete.club.name + "\",\"" +
              str(athlete.position.name) + "\",\"" +
              str(athlete.status.name) + "\"," +
              str(athlete.price) + "," +
              str(prediction))

    print("Done")
    p_hoist_mutation=0.0001,
    p_point_mutation=0.0001,
    max_samples=1.0,
    verbose=1,
    function_set=('add', 'sub', 'mul', 'div', gp_tanh, 'sqrt', 'log', 'abs',
                  'neg', 'inv', 'max', 'min', 'tan', 'cos', 'sin'),
    #function_set = (gp_tanh, 'add', 'sub', 'mul', 'div'),
    metric='mean absolute error',
    warm_start=True,
    n_jobs=1,
    parsimony_coefficient=0.0001,
    random_state=111)

if (os.path.exists(f'est_gp.pickle')):
    pickle_in = open(f'est_gp.pickle', 'rb')
    est_gp = pickle.load(pickle_in)
    print("Model Loaded")

est_gp.generations = est_gp.generations + 9
est_gp.fit(X_tr, y_tr)

with open(f'est_gp.pickle', 'wb') as f:
    pickle.dump(est_gp, f)
    print('Model Saved')

print("gpLearn Program:", est_gp._program)
y_gp = est_gp.predict(X_tr)
gpLearn_MAE = mean_absolute_error(y_tr, y_gp)
print("gpLearn MAE:", gpLearn_MAE)
print(y_tr, y_gp)
Ejemplo n.º 6
0
                           verbose=1,
                           parsimony_coefficient=0.01,
                           random_state=0)

_ = [i for i in range(x_data.shape[0])]
est_gp.fit(x_data, _)

ts = int(time.time())

graph = pydotplus.graphviz.graph_from_dot_data(
    est_gp._program.export_graphviz())
graph.write_png("outputs/gp-{suffix}.png".format(suffix=ts))

res = cheating_func(x_arr_pointer, CONFIG_N_DIM, x_len)
y_truth = np.array([float(res[i]) for i in range(n_data)])
y_pred = np.array(est_gp.predict(x_data))

n_data_plot = 200
indicies_plot = sorted(np.random.choice(n_data, n_data_plot, replace=False))

canvas = gp_plot.GPCanvas()
canvas.draw_line_chart_2d(range(0, n_data_plot),
                          y_truth[indicies_plot],
                          color="blue",
                          label="y_truth",
                          line_style="solid")

canvas.draw_line_chart_2d(range(0, n_data_plot),
                          y_pred[indicies_plot],
                          color="red",
                          label="y_pred")
Ejemplo n.º 7
0
def main():
    escalation = {
        Position.GOALKEEPER: 1,
        Position.DEFENDER: 2,
        Position.SIDE: 2,
        Position.MIDFIELD: 4,
        Position.ATTACKER: 2,
        Position.COACH: 1
    }

    print("Getting auth")
    auth = get_auth()

    print("Getting teams")
    teams = get_teams()

    print("Getting athletes")
    athletes = get_athletes(teams)

    print("Getting scores")
    scores = [athlete.get_row(auth) for athlete in athletes]
    max_length = 0
    for score in scores:
        if len(score) > max_length:
            max_length = len(score)
    fixed_score = []
    for score in scores:
        fixed_score.append([0.0] * (max_length - len(score)) + score)

    generations = 2000
    print("Training using " + str(generations) +
          " generations. It can take a long time to end")
    est_gp = SymbolicRegressor(
        population_size=5000,
        generations=generations,
        stopping_criteria=0.01,
        p_crossover=0.7,
        p_subtree_mutation=0.1,
        p_hoist_mutation=0.05,
        p_point_mutation=0.1,
        max_samples=0.9,
        verbose=1,
        parsimony_coefficient=0.01,
        random_state=0,
        const_range=(-50., 50.),
        function_set=('add', 'sub', 'mul', 'div', 'sqrt', 'log', 'abs', 'neg',
                      'inv', 'max', 'min', 'sin', 'cos', 'tan'))
    est_gp.fit([x[:-1] for x in fixed_score], [x[-1] for x in fixed_score])
    predictions = est_gp.predict([x[:-1] for x in fixed_score])

    print("Getting results")
    results = [[athlete, prediction]
               for athlete, prediction in zip(athletes, predictions)]
    results.sort(key=lambda x: -x[1])
    print(
        "\"Scale\",\"Name\",\"Team\",\"Position\",\"Status\",\"Price\",\"Prediction\""
    )
    for result in results:
        athlete = result[0]
        prediction = result[1]
        scale = athlete.status == Status.Probable and escalation[
            athlete.position] > 0
        if scale:
            escalation[athlete.position] = escalation[athlete.position] - 1
        print("\"" + ("*" if scale else " ") + "\",\"" + athlete.nick +
              "\",\"" + athlete.club.name + "\",\"" +
              str(athlete.position.name) + "\",\"" + str(athlete.status.name) +
              "\"," + str(athlete.price) + "," + str(prediction))

    print("Done")
Ejemplo n.º 8
0
                           max_samples=0.9, verbose=1,
                           parsimony_coefficient=0.01, random_state=0,
                           function_set=('add', 'sub', 'mul', 'div', 'sqrt', 'log'
                                         , 'abs', 'neg', 'inv', 'max', 'min', 'sin', 'cos', 'tan'))
est_gp.fit(trainSet, z_train)
print(est_gp._program)

score_gp = est_gp.score(testSet, z_test)
                   
#score_gp = est_gp.mean_absolute_error(testSet, z_test)

print(score_gp)
#19 generations required
#min(add(add(log(add(inv(div(sin(X0), mul(X0, 0.952))),neg(abs(X0)))), neg(inv(div(sin(X1), mul(X0, 0.952))))), min(add(log(add(min(mul(-0.020, X1), cos(X1)), neg(add(log(add(min(inv(div(sin(X1), mul(X0, 0.952))), cos(X1)), div(sin(X1), add(log(add(min(mul(-0.020, X1), cos(X1)), neg(add(log(cos(X1)), neg(0.952))))), add(tan(tan(sin(X1))), neg(inv(div(sin(X1), neg(div(X1, 0.794)))))))))), neg(0.952))))), add(tan(tan(sin(X1))), neg(inv(div(sin(X1), neg(div(X1, 0.794))))))), div(neg(cos(tan(X1))), inv(mul(add(X1, X1), log(X1)))))), div(neg(cos(inv(mul(add(X1, X1), log(X1))))), inv(mul(add(X1, X1), log(X1)))))

z_gp = est_gp.predict(np.c_[x.ravel(), y.ravel()]).reshape(x.shape)
#print(z_gp)
ax = plt.figure().gca(projection = '3d')
ax.set_xlim(-10,10)
ax.set_ylim(-10, 10)
#surf = ax.plot_trisurf(x_test, y_test, z_gp, color='green')
surf = ax.plot_surface(x, y, z_gp, rstride=1, cstride=1, color='green', alpha=0.5)
points = ax.scatter(x_train, y_train, z_train)
score = ax.text(-.7, 1, .2, "$R^2 =\/ %.6f$" % score_gp, 'x', fontsize=14)
title = "TestId: 2; Symbolic Regressor"
plt.title(title)
plt.show()


ax = plt.figure().gca(projection='3d')
ax.set_xlim(-10, 10)