Example #1
0
def test_least_squares(steps=1000, show_plot=True):
    T = steps 
    problem = tigerforecast.problem("ENSO-v0")
    x, y = problem.initialize(input_signals = ['nino12', 'nino34', 'nino4'])

    method = tigerforecast.method("LeastSquares")
    method.initialize(x, y, reg = 10.0 * steps)
    loss = lambda y_true, y_pred: np.sum((y_true - y_pred)**2)
 
    results = []

    for i in range(T):
        x, y_true = problem.step()
        y_pred = method.step(x, y_true)
        cur_loss = loss(y_true, y_pred)
        results.append(cur_loss)

    if show_plot:
        plt.plot(results)
        plt.title("LeastSquares method on ARMA problem")
        plt.show(block=False)
        plt.pause(3)
        plt.close()
    print("test_least_squares passed")
    return
Example #2
0
def test_autoregressor(steps=100, show_plot=True):
    T = steps
    p, q = 3, 3
    n = 3
    problem = tigerforecast.problem("ARMA-v0")
    cur_x = problem.initialize(p, q, n=n)

    method = tigerforecast.method("AutoRegressor")
    #method.initialize(p, optimizer = ONS)
    method.initialize(p, optimizer=Adagrad)
    loss = lambda y_true, y_pred: np.sum((y_true - y_pred)**2)

    results = []

    for i in range(T):
        cur_y_pred = method.predict(cur_x)
        #print(cur_y_pred.shape)
        #method.forecast(cur_x, 3)
        cur_y_true = problem.step()
        cur_loss = loss(cur_y_true, cur_y_pred)
        method.update(cur_y_true)
        cur_x = cur_y_true
        results.append(cur_loss)

    if show_plot:
        plt.plot(results)
        plt.title("Autoregressive method on ARMA problem")
        plt.show(block=False)
        plt.pause(3)
        plt.close()
    print("test_autoregressor passed")
    return
Example #3
0
def test_rnn_time_series(steps=1000, show_plot=False, verbose=False):
    T = steps
    n, m, d = 5, 3, 10
    problem = tigerforecast.problem("RNN-TimeSeries-v0")
    problem.initialize(n, m, d)

    x_output = []
    y_output = []
    for t in range(T):
        x, y = problem.step()
        x_output.append(x)
        y_output.append(y)

    info = problem.hidden()
    if verbose:
        print(info)

    if show_plot:
        plt.plot(x_output)
        plt.plot(y_output)
        plt.title("lds")
        plt.show(block=False)
        plt.pause(1)
        plt.close()
    print("test_rnn_time_series passed")
    return
Example #4
0
def test_lstm_time_series(steps=1000, show_plot=False, verbose=False):
    T = steps
    n, m, d = 5, 1, 10
    problem = tigerforecast.problem("LSTM-TimeSeries-v0")
    problem.initialize(n, m, d)

    x_output = []
    y_output = []
    for t in range(T):
        x, y = problem.step()
        x_output.append(x)
        y_output.append(y)

    info = problem.hidden()
    if verbose:
        print(info)

    if show_plot:
        #plt.plot(x_output)
        plt.figure(figsize=(10, 6))
        plt.plot(y_output)
        plt.title("Output of random LSTM")
        plt.show(block=False)
        plt.pause(5)
        plt.close()
    print("test_lstm_time_series passed")
    return
Example #5
0
def test_rnn(steps=100, show_plot=True):
    T = steps
    p, q = 3, 3
    n = 1
    problem = tigerforecast.problem("ARMA-v0")
    y_true = problem.initialize(p=p, q=q, n=1)
    method = tigerforecast.method("RNN")
    method.initialize(n=1, m=1, l=3, h=1)
    loss = lambda pred, true: np.sum((pred - true)**2)

    results = []
    for i in range(T):
        u = random.normal(generate_key(), (n, ))
        y_pred = method.predict(u)
        y_true = problem.step()
        results.append(loss(y_true, y_pred))
        method.update(y_true)

    if show_plot:
        plt.plot(results)
        plt.title("RNN method on LDS problem")
        plt.show(block=False)
        plt.pause(3)
        plt.close()
    print("test_rnn passed")
    return
Example #6
0
def test_arma(steps=1000, show_plot=False, verbose=False):
    T = steps
    p, q = 3, 3
    n = 1
    problem = tigerforecast.problem("ARMA-v0")
    problem.initialize(p, q, n=n)
    assert problem.T == 0

    test_output = []
    for t in range(T):
        test_output.append(problem.step())
        #print(float(test_output[t]))

    info = problem.hidden()
    if verbose:
        print(info)

    assert problem.T == T
    if verbose:
        print(problem.phi)
        print(problem.psi)
    if show_plot:
        plt.plot(test_output)
        plt.title("arma")
        plt.show(block=False)
        plt.pause(10)
        plt.close()
    print("test_arma passed")
    return
Example #7
0
def test_last_value(steps=1000, show_plot=True):
    T = steps
    p, q = 3, 3
    problem = tigerforecast.problem("ARMA-v0")
    cur_x = problem.initialize(p, q)
    method = tigerforecast.method("LastValue")
    method.initialize()
    loss = lambda y_true, y_pred: (y_true - y_pred)**2

    results = []
    for i in range(T):
        cur_y_pred = method.predict(cur_x)
        #print(method.forecast(cur_x, 3))
        cur_y_true = problem.step()
        cur_loss = loss(cur_y_true, cur_y_pred)
        results.append(cur_loss)
        method.update(cur_y_true)
        cur_x = cur_y_true

    if show_plot:
        plt.plot(results)
        plt.title("LastValue method on ARMA problem")
        plt.show(block=False)
        plt.pause(1)
        plt.close()
    print("test_last_value passed")
    return
Example #8
0
def test_adam(show=False):

    problem = tigerforecast.problem('ARMA-v0')
    x = problem.initialize(p=2,q=0)

    method = tigerforecast.method('LSTM')
    method.initialize(n=1, m=1, l=3, h=10, optimizer=Adam) # initialize with class
    method.predict(1.0) # call methods to verify it works
    method.update(1.0)

    optimizer = Adam(learning_rate=0.1)
    method = tigerforecast.method('LSTM')
    method.initialize(n=1, m=1, l=3, h=10, optimizer=optimizer) # reinitialize with instance

    loss = []
    for t in range(1000):
        y_pred = method.predict(x)
        y_true = problem.step()
        loss.append(mse(y_pred, y_true))
        method.update(y_true)
        x = y_true

    if show:
        plt.plot(loss)
        plt.show(block=False)
        plt.pause(3)
        plt.close()
    print("test_adam passed")
Example #9
0
def test_grid_search_lstm(show=False):
    problem_id = "SP500-v0"
    method_id = "LSTM"
    problem_params = {}  # {'p':4, 'q':1} # params for ARMA problem
    method_params = {'n': 1, 'm': 1}
    loss = lambda a, b: np.sum((a - b)**2)
    search_space = {
        'l': [3, 4, 5, 6],
        'h': [2, 5, 8],
        'optimizer': []
    }  # parameters for ARMA method
    opts = [Adam, Adagrad, ONS, OGD]
    lr_start, lr_stop = -1, -3  # search learning rates from 10^start to 10^stop
    learning_rates = np.logspace(lr_start, lr_stop,
                                 1 + 2 * np.abs(lr_start - lr_stop))
    for opt, lr in itertools.product(opts, learning_rates):
        search_space['optimizer'].append(
            opt(learning_rate=lr))  # create instance and append

    trials, min_steps = 10, 100
    hpo = GridSearch()  # hyperparameter optimizer
    optimal_params, optimal_loss = hpo.search(
        method_id,
        method_params,
        problem_id,
        problem_params,
        loss,
        search_space,
        trials=trials,
        smoothing=10,
        min_steps=min_steps,
        verbose=show)  # run each model at least 1000 steps

    if show:
        print("optimal params: ", optimal_params)
        print("optimal loss: ", optimal_loss)

    # test resulting method params
    method = tigerforecast.method(method_id)
    method.initialize(**optimal_params)
    problem = tigerforecast.problem(problem_id)
    x = problem.initialize(**problem_params)
    loss = []
    if show:
        print("run final test with optimal parameters")
    for t in range(5000):
        y_pred = method.predict(x)
        y_true = problem.step()
        loss.append(mse(y_pred, y_true))
        method.update(y_true)
        x = y_true

    if show:
        print("plot results")
        plt.plot(loss)
        plt.show(block=False)
        plt.pause(10)
        plt.close()
Example #10
0
def test_ons(show=False):

    #tigerforecast.set_key(0) # consistent randomness

    problem = tigerforecast.problem('ARMA-v0')
    x, y_true = problem.initialize()

    methods = []
    labels = ['OGD', 'ONS', 'Adam']  # don't run deprecated ONS

    method = tigerforecast.method('LSTM')
    method.initialize(n=1, m=1, optimizer=OGD)  # initialize with class
    methods.append(method)

    #method = tigerforecast.method('AutoRegressor')
    #method.initialize(optimizer=Adagrad) # initialize with class
    #methods.append(method)

    method = tigerforecast.method('LSTM')
    method.initialize(n=1, m=1, optimizer=ONS)  # initialize with class
    methods.append(method)

    #method = tigerforecast.method('AutoRegressor')
    #method.initialize(optimizer=Adam) # initialize with class
    #methods.append(method)

    losses = [[] for i in range(len(methods))]
    update_time = [0.0 for i in range(len(methods))]
    for t in tqdm(range(2000)):
        for i in range(len(methods)):
            l, method = losses[i], methods[i]
            y_pred = method.predict(x)
            l.append(mse(y_pred, y_true))

            t = time.time()
            method.update(y_true)
            update_time[i] += time.time() - t
        x, y_true = problem.step()

    print("time taken:")
    for t, label in zip(update_time, labels):
        print(label + ": " + str(t))

    if show:
        plt.yscale('log')
        for l, label in zip(losses, labels):
            plt.plot(l, label=label)
            #plt.plot(avg_regret(l), label = label)
        plt.legend()
        plt.title("Autoregressors on ENSO-T6")
        plt.show(block=False)
        plt.pause(300)
        plt.close()

    print("test_ons passed")
Example #11
0
def test_custom_method(steps=1000, show_plot=True):
    # initial preparation
    T = steps 
    p, q = 3, 3
    loss = lambda y_true, y_pred: (y_true - y_pred)**2
    problem = tigerforecast.problem("ARMA-v0")
    cur_x = problem.initialize(p, q)

    # simple LastValue custom method implementation
    class Custom(tigerforecast.CustomMethod):
        def initialize(self):
            self.x = 0.0
        def predict(self, x):
            self.x = x
            return self.x
        def update(self, y):
            pass

    # try registering and calling the custom method
    tigerforecast.register_custom_method(Custom, "TestCustomMethod")
    custom_method = tigerforecast.method("TestCustomMethod")
    custom_method.initialize()

    # regular LastValue method as sanity check
    reg_method = tigerforecast.method("LastValue")
    reg_method.initialize()
 
    results = []
    for i in range(T):
        cur_y_pred = custom_method.predict(cur_x)
        reg_y_pred = reg_method.predict(cur_x)
        assert cur_y_pred == reg_y_pred # check that CustomMethod outputs the correct thing
        cur_y_true = problem.step()
        custom_method.update(cur_y_true)
        reg_method.update(cur_y_true)
        results.append(loss(cur_y_true, cur_y_pred))
        cur_x = cur_y_true

    if show_plot:
        plt.plot(results)
        plt.title("Custom (last value) method on ARMA problem")
        plt.show(block=False)
        plt.pause(1)
        plt.close()
    print("test_custom_method passed")
    return
Example #12
0
def test_random(steps=1000, show_plot=False):
    T = steps
    problem = tigerforecast.problem("Random-v0")
    problem.initialize()
    assert problem.T == 0

    test_output = []
    for t in range(T):
        test_output.append(problem.step())

    assert problem.T == T
    if show_plot:
        plt.plot(test_output)
        plt.title("random")
        plt.show(block=False)
        plt.pause(1)
        plt.close()
    print("test_random passed")
    return
Example #13
0
def test_sp500(steps=1000, show_plot=False, verbose=False):
    T = steps
    problem = tigerforecast.problem("SP500-v0")
    problem.initialize()
    assert problem.T == 0

    test_output = []
    for t in range(T):
        test_output.append(problem.step())

    assert problem.T == T

    if show_plot:
        plt.plot(test_output)
        plt.title("S&P 500")
        plt.show(block=False)
        plt.pause(5)
        plt.close()
    print("test_sp500 passed")
    return
Example #14
0
def test_uci_indoor(steps=1000, show_plot=False, verbose=False):
    T = steps
    problem = tigerforecast.problem("UCI-Indoor-v0")
    problem.initialize()
    assert problem.T == 0

    test_output = []
    for t in range(T):
        test_output.append(problem.step())

    assert problem.T == T
    if verbose:
        print(problem.hidden())
    if show_plot:
        plt.plot(test_output)
        plt.title("UCI Indoor")
        plt.show(block=False)
        plt.pause(5)
        plt.close()
    print("test_uci_indoor passed")
    return
Example #15
0
    def _run_test(self, method_params, smoothing, min_steps, verbose=0):
        """ Run a single test with given method params, using median stopping rule """
        # initialize problem and method
        if verbose:
            print("Currently testing parameters: " + str(method_params))
        method = tigerforecast.method(self.method_id)
        method.initialize(**method_params)
        problem = tigerforecast.problem(self.problem_id)
        if problem.has_regressors:
            x, y_true = problem.initialize(**self.problem_params)
        else:
            x = problem.initialize(**self.problem_params)

        t = 0
        losses = []  # sorted losses, used to get median
        smooth_losses = np.zeros(
            smoothing)  # store previous losses to get smooth loss
        while True:  # run method until worse than median loss, ignoring first 100 steps
            t += 1
            y_pred = method.predict(x)
            if problem.has_regressors:
                method.update(y_true)
                loss = self.loss(y_pred, y_true)
            else:
                x = problem.step()
                method.update(x)
                loss = self.loss(y_pred, x)
            if t == 1:  # fill all of smooth_losses with the first loss
                for i in range(smoothing):
                    smooth_losses = self._update_smoothing(smooth_losses, loss)
            else:  # else replace only the oldest loss
                smooth_losses = self._update_smoothing(smooth_losses, loss)
            smooth_loss = np.mean(smooth_losses)
            if t % smoothing == 0:
                self._add_to_list(losses, smooth_loss)
                if self._halting_rule(losses, smooth_loss) and t >= min_steps:
                    break
        if verbose:
            print("Time taken: {}, final loss: {}".format(t, smooth_loss))
        return smooth_loss
Example #16
0
def test_rnn_lstm_arma(steps=100, show_plot=True):
    T = steps
    p, q = 3, 0
    problem = tigerforecast.problem("ARMA-v0")
    cur_x = problem.initialize(p=p, q=q, n=1)

    method_RNN = tigerforecast.method("RNN")
    method_RNN.initialize(1, 1, l=p, h=1)

    method_LSTM = tigerforecast.method("LSTM")
    method_LSTM.initialize(1, 1, l=p, h=1)

    loss = lambda pred, true: np.sum((pred - true)**2)

    results_RNN = []
    results_LSTM = []
    for i in range(T):
        y_pred_RNN = method_RNN.predict(cur_x)
        y_pred_LSTM = method_LSTM.predict(cur_x)
        if (i == 0):
            print(method_RNN.forecast(cur_x, timeline=10))
            print(method_LSTM.forecast(cur_x, timeline=10))
        y_true = problem.step()
        results_RNN.append(loss(y_true, y_pred_RNN))
        results_LSTM.append(loss(y_true, y_pred_LSTM))
        cur_x = y_true
        method_RNN.update(y_true)
        method_LSTM.update(y_true)

    if show_plot:
        plt.plot(results_RNN, label='RNN')
        plt.plot(results_LSTM, label='LSTM')
        plt.legend()
        plt.title("RNN vs. LSTM on ARMA problem")
        plt.show(block=False)
        plt.pause(3)
        plt.close()
    print("test_rnn_lstm_arma passed")
    return
Example #17
0
def test_sgd_autoregressor(show=False):
    problem = tigerforecast.problem('ARMA-v0')
    x = problem.initialize(p=2, q=0)

    optimizer = SGD(learning_rate=0.0003)
    method = tigerforecast.method('AutoRegressor')
    method.initialize(p=3, optimizer=optimizer)  # reinitialize with instance

    loss = []
    for t in range(1000):
        y_pred = method.predict(x)
        y_true = problem.step()
        loss.append(mse(y_pred, y_true))
        method.update(y_true)
        x = y_true

    if show:
        plt.title("Test SGD on ARMA(3) with AutoRegressor method")
        plt.plot(loss)
        plt.show(block=False)
        plt.pause(3)
        plt.close()
Example #18
0
def test_crypto(steps=1000, show_plot=False, verbose=False):
    T = steps
    problem = tigerforecast.problem("Crypto-v0")
    problem.initialize()
    assert problem.T == 0

    test_output = []
    for t in range(T):
        test_output.append(problem.step())

    assert problem.T == T

    info = problem.hidden()
    if verbose:
        print(info)
    if show_plot:
        plt.plot(test_output)
        plt.title("Crypto")
        plt.show(block=False)
        plt.pause(1)
        plt.close()
    print("test_crypto passed")
    return
Example #19
0
def test_custom_problem(steps=1000, show=True):
    # initial preparation
    T = steps
    loss = lambda y_true, y_pred: (y_true - y_pred)**2
    method = tigerforecast.method("LastValue")
    method.initialize()

    # simple custom Problem that returns alternating +/- 1.0
    class Custom(tigerforecast.CustomProblem):
        def initialize(self):
            self.T = 0
            return -1

        def step(self):
            self.T += 1
            return 2 * (self.T % 2) - 1

    # try registering and calling the custom problem
    tigerforecast.register_custom_problem(Custom, "TestCustomProblem")
    custom_problem = tigerforecast.problem("TestCustomProblem")
    cur_x = custom_problem.initialize()

    results = []
    for i in range(T):
        cur_y_pred = method.predict(cur_x)
        cur_y_true = custom_problem.step()
        results.append(loss(cur_y_true, cur_y_pred))
        cur_x = cur_y_true

    if show:
        plt.plot(results)
        plt.title("LastValue method on custom alternating problem")
        plt.show(block=False)
        plt.pause(2)
        plt.close()
    print("test_custom_problem passed")
    return
Example #20
0
def test_enso(steps=1000, show_plot=False, verbose=False):
    T = steps
    problem = tigerforecast.problem("ENSO-v0")
    problem.initialize(input_signals=['oni'])
    assert problem.T == 0

    test_output = []
    for t in range(T):
        x_t, y_t = problem.step()
        test_output.append(y_t)

    assert problem.T == T

    info = problem.hidden()
    if verbose:
        print(info)
    if show_plot:
        plt.plot(test_output)
        plt.title("ONI of Nino34")
        plt.show(block=False)
        plt.pause(1)
        plt.close()
    print("test_enso passed")
    return
Example #21
0
def test_simple_boost_lstm(steps=500, show=True):
    # method initialize
    T = steps
    method_id = "LSTM"
    ogd = OGD(learning_rate=0.01)
    method_params = {'n': 1, 'm': 1, 'l': 5, 'h': 10, 'optimizer': ogd}
    methods = []
    Ns = [1, 3, 6]
    for n in Ns:  # number of weak learners
        method = tigerforecast.method("SimpleBoost")
        method.initialize(method_id, method_params, n,
                          reg=1.0)  # regularization
        methods.append(method)

    # regular AutoRegressor for comparison
    autoreg = tigerforecast.method("AutoRegressor")
    autoreg.initialize(p=4)  # regularization

    # problem initialize
    p, q = 4, 0
    problem = tigerforecast.problem("ARMA-v0")
    y_true = problem.initialize(p, q, noise_magnitude=0.1)

    # run all boosting method
    result_list = [[] for n in Ns]
    last_value = []
    autoreg_loss = []
    for i in range(T):
        y_next = problem.step()

        # predictions for every boosting method
        for result_i, method_i in zip(result_list, methods):
            y_pred = method_i.predict(y_true)
            result_i.append(mse(y_next, y_pred))
            method_i.update(y_next)

        # last value and autoregressor predictions
        last_value.append(mse(y_true, y_next))
        autoreg_loss.append(mse(autoreg.predict(y_true), y_next))
        autoreg.update(y_next)
        y_true = y_next

    # plot performance
    if show:
        start = 100
        x = np.arange(start, steps)
        plt.figure(figsize=(12, 8))

        # plot every boosting method loss
        for n, results in zip(Ns, result_list):
            print("Mean loss for n={}: {}".format(
                n, np.mean(np.array(results[start:]))))
            plt.plot(x,
                     avg_regret(results[start:]),
                     label="SimpleBoost, n={}".format(n))

        # plot loss for last value and autoregressor methods
        print("Mean loss for LastValue: {}".format(
            np.mean(np.array(last_value[start:]))))
        plt.plot(x, avg_regret(last_value[start:]), label="Last value method")
        print("Mean loss for AutoRegressor: {}".format(
            np.mean(np.array(autoreg_loss[start:]))))
        plt.plot(x,
                 avg_regret(autoreg_loss[start:]),
                 label="AutoRegressor method")

        plt.title("SimpleBoost method on ARMA problem")
        plt.legend()
        plt.show(block=False)
        plt.pause(10)
        plt.close()
Example #22
0
def test_simple_boost_arma(steps=500, show=True):
    # method initialize
    T = steps
    method_id = "AutoRegressor"
    method_params = {'p': 18, 'optimizer': OGD}
    Ns = [64]
    timelines = [6, 9, 12]

    # regular AutoRegressor for comparison
    autoreg = tigerforecast.method("AutoRegressor")
    autoreg.initialize(p=18, optimizer=OGD)

    fig, ax = plt.subplots(nrows=1, ncols=3)
    cur = 0

    # run all boosting method
    for timeline in timelines:

        # problem initialize
        problem = tigerforecast.problem("ENSO-v0")
        x, y_true = problem.initialize(input_signals=['oni'],
                                       timeline=timeline)
        methods = []

        for n in Ns:  # number of weak learners
            method = tigerforecast.method("SimpleBoost")
            method.initialize(method_id, method_params, n,
                              reg=0.0)  # regularization
            methods.append(method)

        result_list = [[] for n in Ns]
        autoreg_loss = []

        for i in tqdm(range(T)):

            # predictions for every boosting method
            for result_i, method_i in zip(result_list, methods):
                y_pred = method_i.predict(x)
                result_i.append(mse(y_true, y_pred))
                method_i.update(y_true)

            # last value and autoregressor predictions
            autoreg_loss.append(mse(autoreg.predict(x), y_true))
            autoreg.update(y_true)
            x, y_true = problem.step()

        # plot performance
        if show:

            start = T // 2

            # plot every boosting method loss
            for n, results in zip(Ns, result_list):
                print("Mean loss for n={}: {}".format(
                    n, np.mean(np.array(results))))
                ax[cur].plot(avg_regret(results[-start:]),
                             label="SimpleBoost, n={}".format(n))

            # plot loss for last value and autoregressor methods
            print("Mean loss for AutoRegressor: {}".format(
                np.mean(np.array(autoreg_loss))))
            ax[cur].plot(avg_regret(autoreg_loss[-start:]),
                         label="AutoRegressor method")
            ax[cur].legend(loc="upper right", fontsize=8)

        cur += 1

    fig.tight_layout()
    plt.show()
Example #23
0
def test_tigerforecast_problem():
    problem = tigerforecast.problem('Random-v0')
    assert problem.spec.id == 'Random-v0'
    return
Example #24
0
def run_experiment(problem,
                   method,
                   metric='mse',
                   lr_tuning=True,
                   key=0,
                   timesteps=None,
                   verbose=0):
    '''
    Description: Initializes the experiment instance.
    
    Args:
        problem (tuple): problem id and parameters to initialize the specific problem instance with
        method (tuple): method id and parameters to initialize the specific method instance with
        metric (string): metric we are interesting in computing for current experiment
        key (int): for reproducibility
        timesteps(int): number of time steps to run experiment for
    Returns:
        loss (list): loss series for the specified metric over the entirety of the experiment
        time (float): time elapsed
        memory (float): memory used
    '''
    set_key(key)

    # extract specifications
    (problem_id, problem_params) = problem
    (method_id, method_params) = method

    loss_fn = metrics[metric]

    # initialize problem
    problem = tigerforecast.problem(problem_id)

    if (problem_params is None):
        init = problem.initialize()
    else:
        init = problem.initialize(**problem_params)

    # get first few x and y
    if (problem.has_regressors):
        x, y = init
    else:
        x, y = init, problem.step()

    # initialize method
    method = tigerforecast.method(method_id)

    if (method_params is None):
        method_params = {}
    try:
        method_params['n'] = x.shape[0]
    except:
        method_params['n'] = 1
    try:
        method_params['m'] = y.shape[0]
    except:
        method_params['m'] = 1

    if (lr_tuning):
        method_params = tune_lr(method_id, method_params, problem_id,
                                problem_params)

    method.initialize(**method_params)
    if (timesteps is None):
        if (problem.max_T == -1):
            print(
                "WARNING: On simulated problem, the number of timesteps should be specified. Will default to 5000."
            )
            timesteps = 5000
        else:
            timesteps = problem.max_T - method.p - 2
    elif (problem.max_T != -1):
        timesteps = min(timesteps, problem.max_T - method.p - 2)

    for i in range(method.p):
        method.predict(x)
        new = problem.step()
        #print('x:{0}, y:{1}'.format(x,y))
        if (problem.has_regressors):
            x, y = new
        else:
            x, y = y, new

    #print('history:{0}'.format(method.past))
    if (verbose and key == 0):
        print("Running %s on %s..." % (method_id, problem_id))

    loss = np.zeros(timesteps)
    time_start = time.time()
    memory = 0
    load_bar = False
    if (verbose == 2):
        load_bar = True
    # get loss series
    for i in tqdm(range(timesteps), disable=(not load_bar or key != 0)):
        # get loss and update method
        try:  #this avoids exceptions usually caused by ONS
            cur_loss = float(loss_fn(y, method.predict(x)))
            loss = jax.ops.index_update(loss, i, cur_loss)
            method.update(y)
            # get new pair of observation and label
            new = problem.step()
            if (problem.has_regressors):
                x, y = new
            else:
                x, y = y, new
        except:
            loss = jax.ops.index_update(loss, i, float('nan'))

    return loss, time.time() - time_start, memory