Esempio n. 1
0
def test_sgd_lstm(show=False):
    environment = tigercontrol.environment('LDS')
    x = environment.reset(p=2,q=0)

    controller = tigercontrol.controllers('LSTM')
    controller.initialize(n=1, m=1, l=3, h=10, optimizer=SGD) # initialize with class
    controller.predict(1.0) # call controllers to verify it works
    controller.update(1.0)

    optimizer = SGD(learning_rate=0.001)
    controller = tigercontrol.controllers('LSTM')
    controller.initialize(n=1, m=1, l=3, h=10, optimizer=optimizer) # reinitialize with instance

    loss = []
    for t in range(1000):
        y_pred = controller.predict(x)
        y_true = environment.step()
        loss.append(mse(y_pred, y_true))
        controller.update(y_true)
        x = y_true

    if show:
        plt.title("Test SGD on LQR(3) with LSTM controller")
        plt.plot(loss)
        plt.show(block=False)
        plt.pause(3)
        plt.close()
def test_ons(show=False):

    #tigercontrol.set_key(0) # consistent randomness

    environment = tigercontrol.environment('LDS')
    x, y_true = environment.reset()

    controllers = []
    labels = ['OGD', 'ONS', 'Adam'] # don't run deprecated ONS

    controller = tigercontrol.controllers('LSTM')
    controller.initialize(n = 1, m = 1, optimizer=OGD) # initialize with class
    controllers.append(controller)

    #controller = tigercontrol.controllers('AutoRegressor')
    #controller.initialize(optimizer=Adagrad) # initialize with class
    #controllers.append(controller)

    controller = tigercontrol.controllers('LSTM')
    controller.initialize(n = 1, m = 1, optimizer=ONS) # initialize with class
    controllers.append(controller)

    #controller = tigercontrol.controllers('AutoRegressor')
    #controller.initialize(optimizer=Adam) # initialize with class
    #controllers.append(controller)

    losses = [[] for i in range(len(controllers))]
    update_time = [0.0 for i in range(len(controllers))]
    for t in tqdm(range(2000)):
        for i in range(len(controllers)):
            l, controller = losses[i], controllers[i]
            y_pred = controller.predict(x)
            l.append(mse(y_pred, y_true))

            t = time.time()
            controller.update(y_true)
            update_time[i] += time.time() - t
        x, y_true = environment.step()

    print("time taken:")
    for t, label in zip(update_time, labels):
        print(label + ": " + str(t))

    if show:
        plt.yscale('log')
        for l, label in zip(losses, labels):
            plt.plot(l, label = label)
            #plt.plot(avg_regret(l), label = label)
        plt.legend()
        plt.title("Autoregressors on ENSO-T6")
        plt.show(block=False)
        plt.pause(300)
        plt.close()
        
    print("test_ons passed")
Esempio n. 3
0
def test_sgd_autoregressor(show=False):
    environment = tigercontrol.environment('LDS')
    x = environment.reset(p=2,q=0)

    optimizer = SGD(learning_rate=0.0003)
    controller = tigercontrol.controllers('AutoRegressor')
    controller.initialize(p=3, optimizer=optimizer) # reinitialize with instance

    loss = []
    for t in range(1000):
        y_pred = controller.predict(x)
        y_true = environment.step()
        loss.append(mse(y_pred, y_true))
        controller.update(y_true)
        x = y_true

    if show:
        plt.title("Test SGD on LQR(3) with AutoRegressor controller")
        plt.plot(loss)
        plt.show(block=False)
        plt.pause(3)
        plt.close()
Esempio n. 4
0
def test_dynaboost_lstm(steps=500, show=True):
    # controller initialize
    T = steps
    controller_id = "LSTM"
    ogd = OGD(learning_rate=0.01)
    controller_params = {'n': 1, 'm': 1, 'l': 5, 'h': 10, 'optimizer': ogd}
    controllers = []
    Ns = [1, 3, 6]
    for n in Ns:  # number of weak learners
        controller = tigercontrol.controllers("DynaBoost")
        controller.initialize(controller_id, controller_params, n,
                              reg=1.0)  # regularization
        controllers.append(controller)

    # regular AutoRegressor for comparison
    autoreg = tigercontrol.controllers("AutoRegressor")
    autoreg.initialize(p=4)  # regularization

    # environment initialize
    p, q = 4, 0
    environment = tigercontrol.environment("LDS")
    y_true = environment.reset(p, q, noise_magnitude=0.1)

    # run all boosting controller
    result_list = [[] for n in Ns]
    last_value = []
    autoreg_loss = []
    for i in range(T):
        y_next = environment.step()

        # predictions for every boosting controller
        for result_i, controller_i in zip(result_list, controllers):
            y_pred = controller_i.predict(y_true)
            result_i.append(mse(y_next, y_pred))
            controller_i.update(y_next)

        # last value and autoregressor predictions
        last_value.append(mse(y_true, y_next))
        autoreg_loss.append(mse(autoreg.predict(y_true), y_next))
        autoreg.update(y_next)
        y_true = y_next

    # plot performance
    if show:
        start = 100
        x = np.arange(start, steps)
        plt.figure(figsize=(12, 8))

        # plot every boosting controller loss
        for n, results in zip(Ns, result_list):
            print("Mean loss for n={}: {}".format(
                n, np.mean(np.array(results[start:]))))
            plt.plot(x,
                     avg_regret(results[start:]),
                     label="DynaBoost, n={}".format(n))

        # plot loss for last value and autoregressor controllers
        print("Mean loss for LastValue: {}".format(
            np.mean(np.array(last_value[start:]))))
        plt.plot(x,
                 avg_regret(last_value[start:]),
                 label="Last value controller")
        print("Mean loss for AutoRegressor: {}".format(
            np.mean(np.array(autoreg_loss[start:]))))
        plt.plot(x,
                 avg_regret(autoreg_loss[start:]),
                 label="AutoRegressor controller")

        plt.title("DynaBoost controller on LQR environment")
        plt.legend()
        plt.show(block=False)
        plt.pause(10)
        plt.close()
Esempio n. 5
0
def test_dynaboost_arma(steps=500, show=True):
    # controller initialize
    T = steps
    controller_id = "AutoRegressor"
    controller_params = {'p': 18, 'optimizer': OGD}
    Ns = [64]
    timelines = [6, 9, 12]

    # regular AutoRegressor for comparison
    autoreg = tigercontrol.controllers("AutoRegressor")
    autoreg.initialize(p=18, optimizer=OGD)

    fig, ax = plt.subplots(nrows=1, ncols=3)
    cur = 0

    # run all boosting controller
    for timeline in timelines:

        # environment initialize
        environment = tigercontrol.environment("ENSO")
        x, y_true = environment.reset(input_signals=['oni'], timeline=timeline)
        controllers = []

        for n in Ns:  # number of weak learners
            controller = tigercontrol.controllers("DynaBoost")
            controller.initialize(controller_id, controller_params, n,
                                  reg=0.0)  # regularization
            controllers.append(controller)

        result_list = [[] for n in Ns]
        autoreg_loss = []

        for i in tqdm(range(T)):

            # predictions for every boosting controller
            for result_i, controller_i in zip(result_list, controllers):
                y_pred = controller_i.predict(x)
                result_i.append(mse(y_true, y_pred))
                controller_i.update(y_true)

            # last value and autoregressor predictions
            autoreg_loss.append(mse(autoreg.predict(x), y_true))
            autoreg.update(y_true)
            x, y_true = environment.step()

        # plot performance
        if show:

            start = T // 2

            # plot every boosting controller loss
            for n, results in zip(Ns, result_list):
                print("Mean loss for n={}: {}".format(
                    n, np.mean(np.array(results))))
                ax[cur].plot(avg_regret(results[-start:]),
                             label="DynaBoost, n={}".format(n))

            # plot loss for last value and autoregressor controllers
            print("Mean loss for AutoRegressor: {}".format(
                np.mean(np.array(autoreg_loss))))
            ax[cur].plot(avg_regret(autoreg_loss[-start:]),
                         label="AutoRegressor controller")
            ax[cur].legend(loc="upper right", fontsize=8)

        cur += 1

    fig.tight_layout()
    plt.show()