示例#1
0
    def predict(self, future_points: int = 1) -> List[Prediction]:

        predictions = []

        train_X, train_Y, test_X = self.build_train_test()

        train_X = np.transpose(train_X,(0,2,1))

        # model = Sequential()
        # model.add(LSTM(n_hidden, input_shape=(1, train_window)))
        # model.add(Dense(1))
        model = self.current_model

        model.fit(train_X, train_Y, epochs=training_iters, batch_size=10, verbose=0)

        for k in range(future_points):

            test_X = np.expand_dims(test_X, axis=2).transpose(0,2,1)
            result = model.predict(test_X)

            result = self.scaler.inverse_transform(result)

            predictions.append(result[0][0])

            self.add_observation(result)

            _, _, test_X = self.build_train_test(build_test_x_only=True)

        # K.clear_session()
        # tf.reset_default_graph()

        return [Prediction(predictions[i]) for i in np.arange(len(predictions))]
示例#2
0
    def __parse_kwargs__(self, **kwargs):
        def __extract_values__(values):
            if isinstance(values, str):
                return [float(x) for x in values.split(" ")]
            elif isinstance(values, List):
                return [float(x) for x in values]
            else:
                RuntimeError("Values class not recognized. {}".format(values))
                sys.exit(-2)

        self.forecasting_window = int(kwargs.pop("forecasting_window", 1))
        self.minimum_observations = int(kwargs.pop("minimum_observations", 10))

        self.observations = __extract_values__(kwargs.pop("observations", []))

        predicted_values = __extract_values__(
            kwargs.pop("predicted_values", []))
        predicted_stddev = __extract_values__(
            kwargs.pop("predicted_stddev", []))
        assert len(predicted_values) == len(predicted_stddev), \
            "The length of predicted values ({}) and of the stddev ({}) are not equal.".format(
                len(predicted_values), len(predicted_stddev))
        self.predictions = [
            Prediction(predicted_values[i], predicted_stddev[i])
            for i in np.arange(len(predicted_values))
        ]

        for key, value in kwargs.items():
            setattr(self, key, value)
    def predict(self, future_points: int = 1) -> List[Prediction]:

        predictions = []
        sigmas = []

        train_X, train_Y, test_X = self.build_train_test()
        train_X = np.squeeze(train_X, axis=2)

        k = gpflow.kernels.RBF(1)
        model = gpflow.models.GPR(train_X, train_Y, kern=k)

        opt = gpflow.train.ScipyOptimizer()
        opt.minimize(model)

        for k in range(future_points):

            mean, var = model.predict_y(test_X)
            result = self.scaler.inverse_transform(mean)[0][0]
            variances = self.scaler.inverse_transform(var)[0][0]

            predictions.append(result)
            sigmas.append(variances)

            self.add_observation(result)
            _, _, test_X = self.build_train_test()

        return [
            Prediction(predictions[i], sigmas[i])
            for i in np.arange(len(predictions))
        ]
示例#4
0
    def predict(self, future_points: int = 1) -> List[Prediction]:

        train_X, train_Y, test_X = self.build_train_test(future_points)

        win_out = 1
        win_in = train_window
        model = autoreg.DeepAutoreg([0, win_out],
                                    train_Y,
                                    U=train_X,
                                    U_win=win_in,
                                    kernels=[
                                        GPy.kern.RBF(win_out,
                                                     ARD=True,
                                                     variance=0.8,
                                                     lengthscale=4),
                                        GPy.kern.RBF(win_in + win_out,
                                                     ARD=True,
                                                     variance=0.8,
                                                     lengthscale=4)
                                    ])

        model.optimize(messages=1, max_iters=70)

        posterior_pred = model.freerun(U=test_X)
        result = posterior_pred.mean
        vars = posterior_pred.variance

        predictions = self.scaler.inverse_transform(result).flatten()
        variances = self.scaler.inverse_transform(vars).flatten()

        return [
            Prediction(predictions[i], variances[i])
            for i in np.arange(len(predictions))
        ]
示例#5
0
    def run(self):
        with open(self.experiment_result_file_path, "w",
                  newline='') as csv_file:
            for ts in self.time_series:
                self.model.reset()

                i = 0
                while i < ts.minimum_observations:
                    ts.predictions.append(Prediction(ts.observations[i], 0))
                    i += 1

                i = 0
                while i < len(ts.observations):
                    value = ts.observations[i]
                    if len(self.model.get_observations()
                           ) >= ts.minimum_observations:
                        predictions = self.model.predict(ts.forecasting_window)
                        assert ts.forecasting_window == len(predictions), \
                            "The returned predictions are less than what requested. {} vs {}".format(
                                len(predictions), ts.forecasting_window)
                        ts.predictions.extend(predictions)
                        if len(ts.predictions) >= len(ts.observations):
                            break
                    self.model.add_observation(value)
                    i += 1

                self.__dump_result_on_csv__(csv_file, ts.to_csv())
示例#6
0
    def predict(self, future_points: int = 1) -> List[Prediction]:

        predictions = []

        train_X, train_Y, test_X = self.build_train_test()

        train_X = np.transpose(train_X, (0, 2, 1))

        model = Sequential()
        model.add(LSTM(n_hidden, input_shape=(1, train_window)))
        model.add(Dense(1))

        optimizer = optimizers.Adam(lr=0.001)
        model.compile(loss='mean_squared_error', optimizer=optimizer)

        model.fit(train_X,
                  train_Y,
                  epochs=training_iters,
                  batch_size=1,
                  verbose=1)

        for k in range(future_points):

            test_X = np.expand_dims(test_X, axis=2).transpose(0, 2, 1)
            result = model.predict(test_X)
            result = self.scaler.inverse_transform(result)

            predictions.append(result[0][0])

            self.add_observation(result)
            _, _, test_X = self.build_train_test()

        return [
            Prediction(predictions[i]) for i in np.arange(len(predictions))
        ]
    def predict(self, future_points: int = 1) -> List[Prediction]:

        neural_net = TimeSeriesNnet(hidden_layers=[50,20], activation_functions=['sigmoid','sigmoid'])

        neural_net.fit(np.array(self.time_series_values), lag=train_window, epochs=1000, verbose=0)
        predictions = neural_net.predict_ahead(n_ahead=future_points)

        return [Prediction(predictions[i]) for i in np.arange(len(predictions))]
示例#8
0
    def predict(self, future_points: int = 1) -> List[Prediction]:
        model = ARIMA(self.time_series_values, order=(p, d, q))
        model_fit = model.fit(disp=0)

        predictions, std_errors, confidence_intervals = model_fit.forecast(steps=future_points)

        return [Prediction(predictions[i], std_errors[i] * np.sqrt(len(self.time_series_values)))
                for i in np.arange(len(predictions))]
示例#9
0
    def run(self, mp_logger: MultiProcessLogger = None):
        if mp_logger is not None:
            mp_logger.add_logger()
            logger = MultiProcessLogger.logger(self.model.name)
            logger.info("Experiment Started.")
        else:
            logger = None

        if self.csv_writing:
            csv_file = open(self.experiment_result_file_path, "w", newline='')
        else:
            csv_file = None

        if logger is not None:
            logger.debug(" Predicting {} time-series.".format(
                len(self.time_series)))

        for ts in self.time_series:
            self.model.reset()
            ts.reset()
            assert len(ts.predictions) == 0, \
                "There are already some predictions ({}) inside this TimeSeries object.".format(len(ts.predictions))

            if logger is not None:
                logger.debug("  Time-Series of {} observations.".format(
                    len(ts.observations)))

            i = 0
            while i < ts.minimum_observations:
                ts.predictions.append(Prediction(ts.observations[i], 0))
                i += 1

            i = 0
            while i < len(ts.observations):
                value = ts.observations[i]
                if len(self.model.get_observations()
                       ) >= ts.minimum_observations:
                    # if logger is not None:
                    #     logger.debug("   Predicting the points from {} to {}.".format(i, i + ts.forecasting_window))
                    predictions = self.model.predict(ts.forecasting_window)
                    assert ts.forecasting_window == len(predictions), \
                        "The returned predictions are less than what requested. {} vs {}".format(
                            len(predictions), ts.forecasting_window)
                    ts.predictions.extend(predictions)
                    if len(ts.predictions) >= len(ts.observations):
                        break
                self.model.add_observation(value)
                i += 1
            if csv_file is not None:
                self.__dump_result_on_csv__(csv_file, ts.to_csv())

        if csv_file is not None:
            csv_file.close()

        if logger is not None:
            logger.info("Experiment Finished.")

        return self.time_series
示例#10
0
    def predict(self, future_points: int = 1) -> List[Prediction]:

        exp_smooth = ExponentialSmoothing(np.array(self.time_series_values),
                                          trend="add")
        model = exp_smooth.fit()
        predictions = model.forecast(future_points)

        return [
            Prediction(predictions[i]) for i in np.arange(len(predictions))
        ]
示例#11
0
    def predict(self, future_points: int = 1) -> List[Prediction]:
        predictions = []

        for i in range(1, future_points + 1):
            train_x, train_y, test_x = self.build_train_test(
                future_points=i, train_window=train_window)

            regr = RandomForestRegressor(max_depth=2)
            train_model = regr.fit(train_x, train_y.ravel())

            result = train_model.predict(test_x)[0]
            predictions.append(Prediction(result))

        return predictions
示例#12
0
    def predict(self, future_points: int = 1) -> List[Prediction]:
        predictions = []

        for i in range(1, future_points + 1):
            train_x, train_y, test_x = self.build_train_test(
                future_points=i, train_window=train_window)

            clf = GridSearchCV(SVR(), parameters)
            train_model = clf.fit(train_x, train_y.ravel())

            result = train_model.predict(test_x)[0]
            predictions.append(Prediction(result))

        return predictions
示例#13
0
    def predict(self, future_points: int = 1) -> List[Prediction]:

        train_X, train_Y, test_X = self.build_train_test(future_points)
        df = self.build_df(train_X, train_Y)

        model = Prophet(uncertainty_samples=2000)
        model.fit(df)

        future = pd.DataFrame({"ds": test_X})
        forecast = model.predictive_samples(future)

        posterior_pred = forecast["yhat"]
        pred_mean = np.mean(posterior_pred, axis=1)
        pred_var = np.var(posterior_pred, axis=1)

        return [Prediction(pred_mean[i],pred_var[i]) for i in np.arange(len(posterior_pred))]
示例#14
0
    def predict(self, future_points: int = 1) -> List[Prediction]:
        if self.last_optimization == 0:
            self.model = self.auto_arima()
        else:
            try:
                self.model = self.model.fit(self.time_series_values)
            except Exception as e:
                # print("Error from the fitting, rerunning the ARIMA optimization. Error: {}".format(e))
                self.auto_arima()

        self.last_optimization = (self.last_optimization + 1) % OPTIMIZE_EVERY

        predictions = self.model.predict(n_periods=future_points)

        return [
            Prediction(predictions[i]) for i in np.arange(len(predictions))
        ]
示例#15
0
    def predict(self, future_points: int = 1) -> List[Prediction]:

        tf.reset_default_graph()

        train_X, train_Y, test_X = self.build_train_test()

        X = tf.placeholder(tf.float32, [None, train_window, num_features],
                           name="X")
        y = tf.placeholder(tf.float32, [None, num_features], name="y")

        weigths = tf.Variable(tf.random_normal([n_hidden, num_features]))
        biases = tf.Variable(tf.random_normal([num_features]))

        input = tf.unstack(X, train_window, 1)

        rnn_output = self.build_RNN(input, weigths, biases)

        loss = tf.losses.mean_squared_error(rnn_output, y)
        optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
        train_op = optimizer.minimize(loss)

        init = tf.global_variables_initializer()

        predictions = []

        with tf.Session() as sess:

            sess.run(init)

            for i in range(training_iters):
                sess.run(train_op, feed_dict={X: train_X, y: train_Y})

            for k in range(future_points):

                test_X = np.expand_dims(test_X, axis=2)
                result = sess.run(rnn_output, feed_dict={X: test_X})
                result = self.scaler.inverse_transform(result)

                predictions.append(result[0][0])

                self.add_observation(result)
                _, _, test_X = self.build_train_test()

        return [
            Prediction(predictions[i]) for i in np.arange(len(predictions))
        ]
    def predict(self, future_points: int = 1) -> List[Prediction]:
        predictions = []

        train_x, train_y, test_x = self.build_train_test(
            train_window=train_window)

        regr = GradientBoostingRegressor(**params)
        # regr = GridSearchCV(GradientBoostingRegressor(), params)
        train_model = regr.fit(train_x, train_y.ravel())

        for i in range(future_points):
            result = train_model.predict(test_x)[0]
            predictions.append(Prediction(result))

            self.add_observation(result)
            _, _, test_x = self.build_train_test(train_window=train_window)

        return predictions
    def predict(self, future_points: int = 1):

        train_X, train_Y, test_X = self.build_train_test()

        rnn = RNN(input_size, n_hidden, n_layers, num_features)

        # Loss and Optimizer
        criterion = nn.MSELoss()
        optimizer = optim.Adam(rnn.parameters(), lr=learning_rate)

        # Train the model
        for epoch in range(training_iters):
            train_X_tens = torch.from_numpy(train_X)
            train_Y_tens = torch.from_numpy(train_Y)
            train_inputs = Variable(
                train_X_tens.view(-1, train_window, input_size)).float()
            train_labels = Variable(train_Y_tens).float()

            # Forward + Backward + Optimize
            optimizer.zero_grad()
            outputs = rnn(train_inputs)
            loss = criterion(outputs, train_labels)
            loss.backward()
            optimizer.step()

        # Test the model
        predictions = []
        for i in range(future_points):
            test_X_tens = torch.from_numpy(test_X)
            test_inputs = Variable(
                test_X_tens.view(-1, train_window, input_size)).float()
            outputs = rnn(test_inputs)
            result = outputs.data.numpy()[0][0]
            predictions.append(result)

            self.add_observation(result)
            _, _, test_X = self.build_train_test()

        return [
            Prediction(predictions[i]) for i in np.arange(len(predictions))
        ]
 def predict(self, future_points: int = 1) -> List[Prediction]:
     return [Prediction(np.mean(self.time_series_values)) for i in np.arange(future_points)]