def reset(self):
     self.mae = metrics.Rolling(metrics.MAE(), window_size=self.window_size)
     self.mse = metrics.Rolling(metrics.MSE(), window_size=self.window_size)
     self.r2 = metrics.Rolling(metrics.R2(), window_size=self.window_size)
     self.sample_count = 0
     self.last_true_label = None
     self.last_prediction = None
 def __init__(self, window_size=200):
     super().__init__()
     self.window_size = window_size
     self.mae = metrics.Rolling(metrics.MAE(), window_size=self.window_size)
     self.mse = metrics.Rolling(metrics.MSE(), window_size=self.window_size)
     self.r2 = metrics.Rolling(metrics.R2(), window_size=self.window_size)
     self.sample_count = 0
     self.last_true_label = None
     self.last_prediction = None
Beispiel #3
0
def test_rolling_r2():
    def tail(iterable, n):
        return collections.deque(iterable, maxlen=n)

    r2 = metrics.Rolling(metric=metrics.R2(), window_size=3)
    n = r2.window_size
    sk_r2 = sk_metrics.r2_score
    y_true = [
        0.4656520648923188,
        0.5768996330715701,
        0.045385529424484594,
        0.31852843450357393,
        0.8344133739124894,
    ]
    y_pred = [
        0.5431172475992199,
        0.2436885541729249,
        0.20238076597257637,
        0.6173775443360237,
        0.9194776501054074,
    ]

    for i, (yt, yp) in enumerate(zip(y_true, y_pred)):

        r2.update(yt, yp)

        if i >= 2:
            assert math.isclose(
                r2.get(),
                sk_r2(tail(y_true[:i + 1], n), tail(y_pred[:i + 1], n)))
Beispiel #4
0
def evaluate_model(model):

    metric = metrics.Rolling(metrics.MAE(), 12)

    # dates = []
    y_trues = []
    y_preds = []

    for x, y in timeonly_training_dataset(target_data):

        # Obtain the prior prediction and update the model in one go
        y_pred = model.predict_one(x)
        model.learn_one(x, y)

        # Update the error metric
        metric.update(y, y_pred)

        # Store the true value and the prediction
        # dates.append(x['secs_elapsed'])
        y_trues.append(y)
        y_preds.append(y_pred)

    # Plot the results
    fig, ax = plt.subplots(figsize=(10, 6))
    ax.grid(alpha=0.75)
    ax.plot(y_trues, lw=3, color='#2ecc71', alpha=0.8, label='Ground truth')
    ax.plot(y_preds, lw=3, color='#e74c3c', alpha=0.8, label='Prediction')
    ax.legend()
    ax.set_title(metric)
    plt.show()
 def __init__(self, cm: "metrics.ConfusionMatrix" = None, window_size=200):
     self.window_size = window_size
     self._rolling_cm = metrics.Rolling(
         metrics.MultiLabelConfusionMatrix() if cm is None else cm,
         window_size=self.window_size,
     )
     super().__init__(cm=self._rolling_cm.metric)
Beispiel #6
0
def test_rolling_multi_fbeta():
    def tail(iterable, n):
        return collections.deque(iterable, maxlen=n)

    fbeta = metrics.Rolling(
        metric=metrics.MultiFBeta(betas={0: 0.25, 1: 1, 2: 4}, weights={0: 1, 1: 1, 2: 2}),
        window_size=3,
    )
    n = fbeta.window_size
    sk_fbeta = sk_metrics.fbeta_score
    y_true = [0, 1, 2, 2, 2]
    y_pred = [0, 1, 0, 2, 1]

    for i, (yt, yp) in enumerate(zip(y_true, y_pred)):

        fbeta.update(yt, yp)

        if i >= 2:
            sk_y_true, sk_y_pred = tail(y_true[: i + 1], n), tail(y_pred[: i + 1], n)
            fbeta_0, _, _ = sk_fbeta(sk_y_true, sk_y_pred, beta=0.25, average=None)
            _, fbeta_1, _ = sk_fbeta(sk_y_true, sk_y_pred, beta=1, average=None)
            _, _, fbeta_2 = sk_fbeta(sk_y_true, sk_y_pred, beta=4, average=None)

            multi_fbeta = fbeta_0 * 1 + fbeta_1 * 1 + fbeta_2 * 2
            multi_fbeta /= 1 + 1 + 2

            assert math.isclose(fbeta.get(), multi_fbeta)
Beispiel #7
0
def test_rolling_metric(metric, sk_metric):
    def tail(iterable, n):
        return collections.deque(iterable, maxlen=n)

    for n in (1, 2, 5, 10):
        for y_true, y_pred, _ in generate_test_cases(metric=metric, n=30):

            m = metrics.Rolling(metric=copy.deepcopy(metric), window_size=n)

            # Check str works
            str(m)

            for i, (yt, yp) in enumerate(zip(y_true, y_pred)):

                if isinstance(yp, list):
                    yp = dict(enumerate(yp))

                m.update(y_true=yt, y_pred=yp)

                if i >= 1:
                    assert (
                        abs(
                            m.get()
                            - sk_metric(
                                y_true=tail(y_true[: i + 1], n),
                                y_pred=tail(y_pred[: i + 1], n),
                            )
                        )
                        < 1e-10
                    )
Beispiel #8
0
def test_rolling_pair_confusion():
    def tail(iterable, n):
        return collections.deque(iterable, maxlen=n)

    metric = metrics.PairConfusionMatrix()

    for n in (1, 2, 5, 10):
        for y_true, y_pred, _ in generate_test_cases(metric=metric, n=30):

            m = metrics.Rolling(metric=copy.deepcopy(metric), window_size=n)

            for i, (yt, yp) in enumerate(zip(y_true, y_pred)):

                m.update(y_true=yt, y_pred=yp)

                sk_pair_confusion_matrix = sk_metrics.cluster.pair_confusion_matrix(
                    labels_true=tail(y_true[:i + 1], n),
                    labels_pred=tail(y_pred[:i + 1], n),
                )

                if i >= 1:
                    for j in [0, 1]:
                        for k in [0, 1]:
                            assert m.get(
                            )[j][k] == sk_pair_confusion_matrix[j][k]
def elevate_model(model):
    metric = metrics.Rolling(metrics.MAE(), 12)

    for x, y in datasets.AirlinePassengers():
        y_trues.append(y)
        dates.append(x['month'])

    iter = 0

    for x, y in datasets.AirlinePassengers():
        #Obtain the prior prediction and update the model in one go
        y_pred = model.predict_one(x)
        model.learn_one(x, y)

        y_pred_post = model.predict_one(x)

        #Update the error metric
        metric.update(y, y_pred)

        #Store the true value and the prediction
        dates_pred.append(x['month'])
        y_preds.append(y_pred)

        iter += 1

    fig, ax = plt.subplots(figsize=(10, 6))
    ax.grid(alpha=0.5)
    ax.plot(dates,
            y_trues,
            lw=3,
            color='#2ecc71',
            alpha=0.8,
            label='Ground Truth')
    ax.plot(dates,
            y_preds,
            lw=3,
            color='#e74c3c',
            alpha=0.8,
            label='Prediction')
    ax.legend()
    ax.set_title("Airline Passenger Example")
    plt.show()

    print(metric)
Beispiel #10
0
def get_metric(period):
    return metrics.Rolling(metrics.MAE(), period)
Beispiel #11
0
    def build_model_4snarimax(self):
        if os.path.exists(
                self.pck_filename
        ):  #if model backup exists then load it and update model from start1 to start2
            src_bck = pickle.load(open(self.pck_filename, 'rb'))
            model = src_bck.snarimax_model
            metric = src_bck.snarimax_metric
            self.snarimax_para = src_bck.snarimax_para
            self.snarimax_model = model
            self.snarimax_metric = metric

            start1 = src_bck.data.index[-1]
            start2 = self.data.index[
                -1]  #self.data.index[-self.data.index[-1].weekday()]

        else:  #if model backup does not exist then rebuild model from the start
            p, d, q, m, sp, sd, sq = self.snarimax_para
            extract_features = compose.TransformerUnion(get_ordinal_date)
            model = (
                extract_features | time_series.SNARIMAX(
                    p=p,
                    d=d,
                    q=q,
                    m=m,
                    sp=sp,
                    sd=sd,
                    sq=sq,
                    regressor=(
                        #preprocessing.Normalizer() |
                        preprocessing.AdaptiveStandardScaler(alpha=0.1)
                        | preprocessing.StandardScaler() |

                        #preprocessing.RobustScaler(with_scaling=True) |
                        linear_model.LinearRegression(
                            intercept_init=0,
                            optimizer=optim.SGD(0.0001),  #important parameter
                            #optimizer=optim.AdaDelta(0.8,0.00001), #important parameter
                            #optimizer=optim.AMSGrad(lr=0.01,beta_1=0.8,beta_2=0.1),
                            intercept_lr=0.001))))

            metric = metrics.Rolling(metrics.MSE(), self.dd_historic)
            #metric = metrics.MSE()

            start1 = self.data.index[0]
            start2 = self.data.index[
                -1]  #self.data.index[-self.data.index[-1].weekday()]

        if start1 < start2:
            for t in pd.date_range(start1, start2, freq='D'):
                x, y = self.snarimax_data.loc[t][['ds', 'temp']].values
                y_pred = model.forecast(horizon=1, xs=[x])
                #print(x,y,y_pred[0],y-y_pred[0])
                model = model.learn_one(x, y)
                metric = metric.update(y, y_pred[0])

            self.snarimax_model = model
            self.snarimax_metric = metric
            with open(self.pck_filename, 'wb') as fh:
                pickle.dump(self, fh)

            #for t in pd.date_range(start1, start2):
            #    x = self.snarimax_data.loc[pd.date_range(t-timedelta(self.dd_historic),t)][['ds']].values
            #    y = self.snarimax_data.loc[pd.date_range(t-timedelta(self.dd_historic),t)][['temp']].values
            #    x = np.hstack(x)
            #    y = np.hstack(y)
            #    y_pred = model.forecast(horizon=self.dd_historic+1, xs=x)
            #    for i in range(0,self.dd_historic):
            #        model = model.learn_one(x[i], y[i])
            #        metric = metric.update(y[i], y_pred[i])

        return