Ejemplo n.º 1
0
def test_adam(show=False):

    problem = tigerforecast.problem('ARMA-v0')
    x = problem.initialize(p=2,q=0)

    method = tigerforecast.method('LSTM')
    method.initialize(n=1, m=1, l=3, h=10, optimizer=Adam) # initialize with class
    method.predict(1.0) # call methods to verify it works
    method.update(1.0)

    optimizer = Adam(learning_rate=0.1)
    method = tigerforecast.method('LSTM')
    method.initialize(n=1, m=1, l=3, h=10, optimizer=optimizer) # reinitialize with instance

    loss = []
    for t in range(1000):
        y_pred = method.predict(x)
        y_true = problem.step()
        loss.append(mse(y_pred, y_true))
        method.update(y_true)
        x = y_true

    if show:
        plt.plot(loss)
        plt.show(block=False)
        plt.pause(3)
        plt.close()
    print("test_adam passed")
Ejemplo n.º 2
0
def test_ons(show=False):

    #tigerforecast.set_key(0) # consistent randomness

    problem = tigerforecast.problem('ARMA-v0')
    x, y_true = problem.initialize()

    methods = []
    labels = ['OGD', 'ONS', 'Adam']  # don't run deprecated ONS

    method = tigerforecast.method('LSTM')
    method.initialize(n=1, m=1, optimizer=OGD)  # initialize with class
    methods.append(method)

    #method = tigerforecast.method('AutoRegressor')
    #method.initialize(optimizer=Adagrad) # initialize with class
    #methods.append(method)

    method = tigerforecast.method('LSTM')
    method.initialize(n=1, m=1, optimizer=ONS)  # initialize with class
    methods.append(method)

    #method = tigerforecast.method('AutoRegressor')
    #method.initialize(optimizer=Adam) # initialize with class
    #methods.append(method)

    losses = [[] for i in range(len(methods))]
    update_time = [0.0 for i in range(len(methods))]
    for t in tqdm(range(2000)):
        for i in range(len(methods)):
            l, method = losses[i], methods[i]
            y_pred = method.predict(x)
            l.append(mse(y_pred, y_true))

            t = time.time()
            method.update(y_true)
            update_time[i] += time.time() - t
        x, y_true = problem.step()

    print("time taken:")
    for t, label in zip(update_time, labels):
        print(label + ": " + str(t))

    if show:
        plt.yscale('log')
        for l, label in zip(losses, labels):
            plt.plot(l, label=label)
            #plt.plot(avg_regret(l), label = label)
        plt.legend()
        plt.title("Autoregressors on ENSO-T6")
        plt.show(block=False)
        plt.pause(300)
        plt.close()

    print("test_ons passed")
Ejemplo n.º 3
0
def test_sgd_autoregressor(show=False):
    problem = tigerforecast.problem('ARMA-v0')
    x = problem.initialize(p=2, q=0)

    optimizer = SGD(learning_rate=0.0003)
    method = tigerforecast.method('AutoRegressor')
    method.initialize(p=3, optimizer=optimizer)  # reinitialize with instance

    loss = []
    for t in range(1000):
        y_pred = method.predict(x)
        y_true = problem.step()
        loss.append(mse(y_pred, y_true))
        method.update(y_true)
        x = y_true

    if show:
        plt.title("Test SGD on ARMA(3) with AutoRegressor method")
        plt.plot(loss)
        plt.show(block=False)
        plt.pause(3)
        plt.close()
Ejemplo n.º 4
0
def test_simple_boost_lstm(steps=500, show=True):
    # method initialize
    T = steps
    method_id = "LSTM"
    ogd = OGD(learning_rate=0.01)
    method_params = {'n': 1, 'm': 1, 'l': 5, 'h': 10, 'optimizer': ogd}
    methods = []
    Ns = [1, 3, 6]
    for n in Ns:  # number of weak learners
        method = tigerforecast.method("SimpleBoost")
        method.initialize(method_id, method_params, n,
                          reg=1.0)  # regularization
        methods.append(method)

    # regular AutoRegressor for comparison
    autoreg = tigerforecast.method("AutoRegressor")
    autoreg.initialize(p=4)  # regularization

    # problem initialize
    p, q = 4, 0
    problem = tigerforecast.problem("ARMA-v0")
    y_true = problem.initialize(p, q, noise_magnitude=0.1)

    # run all boosting method
    result_list = [[] for n in Ns]
    last_value = []
    autoreg_loss = []
    for i in range(T):
        y_next = problem.step()

        # predictions for every boosting method
        for result_i, method_i in zip(result_list, methods):
            y_pred = method_i.predict(y_true)
            result_i.append(mse(y_next, y_pred))
            method_i.update(y_next)

        # last value and autoregressor predictions
        last_value.append(mse(y_true, y_next))
        autoreg_loss.append(mse(autoreg.predict(y_true), y_next))
        autoreg.update(y_next)
        y_true = y_next

    # plot performance
    if show:
        start = 100
        x = np.arange(start, steps)
        plt.figure(figsize=(12, 8))

        # plot every boosting method loss
        for n, results in zip(Ns, result_list):
            print("Mean loss for n={}: {}".format(
                n, np.mean(np.array(results[start:]))))
            plt.plot(x,
                     avg_regret(results[start:]),
                     label="SimpleBoost, n={}".format(n))

        # plot loss for last value and autoregressor methods
        print("Mean loss for LastValue: {}".format(
            np.mean(np.array(last_value[start:]))))
        plt.plot(x, avg_regret(last_value[start:]), label="Last value method")
        print("Mean loss for AutoRegressor: {}".format(
            np.mean(np.array(autoreg_loss[start:]))))
        plt.plot(x,
                 avg_regret(autoreg_loss[start:]),
                 label="AutoRegressor method")

        plt.title("SimpleBoost method on ARMA problem")
        plt.legend()
        plt.show(block=False)
        plt.pause(10)
        plt.close()
Ejemplo n.º 5
0
def test_simple_boost_arma(steps=500, show=True):
    # method initialize
    T = steps
    method_id = "AutoRegressor"
    method_params = {'p': 18, 'optimizer': OGD}
    Ns = [64]
    timelines = [6, 9, 12]

    # regular AutoRegressor for comparison
    autoreg = tigerforecast.method("AutoRegressor")
    autoreg.initialize(p=18, optimizer=OGD)

    fig, ax = plt.subplots(nrows=1, ncols=3)
    cur = 0

    # run all boosting method
    for timeline in timelines:

        # problem initialize
        problem = tigerforecast.problem("ENSO-v0")
        x, y_true = problem.initialize(input_signals=['oni'],
                                       timeline=timeline)
        methods = []

        for n in Ns:  # number of weak learners
            method = tigerforecast.method("SimpleBoost")
            method.initialize(method_id, method_params, n,
                              reg=0.0)  # regularization
            methods.append(method)

        result_list = [[] for n in Ns]
        autoreg_loss = []

        for i in tqdm(range(T)):

            # predictions for every boosting method
            for result_i, method_i in zip(result_list, methods):
                y_pred = method_i.predict(x)
                result_i.append(mse(y_true, y_pred))
                method_i.update(y_true)

            # last value and autoregressor predictions
            autoreg_loss.append(mse(autoreg.predict(x), y_true))
            autoreg.update(y_true)
            x, y_true = problem.step()

        # plot performance
        if show:

            start = T // 2

            # plot every boosting method loss
            for n, results in zip(Ns, result_list):
                print("Mean loss for n={}: {}".format(
                    n, np.mean(np.array(results))))
                ax[cur].plot(avg_regret(results[-start:]),
                             label="SimpleBoost, n={}".format(n))

            # plot loss for last value and autoregressor methods
            print("Mean loss for AutoRegressor: {}".format(
                np.mean(np.array(autoreg_loss))))
            ax[cur].plot(avg_regret(autoreg_loss[-start:]),
                         label="AutoRegressor method")
            ax[cur].legend(loc="upper right", fontsize=8)

        cur += 1

    fig.tight_layout()
    plt.show()