Ejemplo n.º 1
0
def test_compose():

    metrics.MAE() + metrics.MSE()
    metrics.Accuracy() + metrics.LogLoss()
    metrics.Accuracy() + metrics.ConfusionMatrix()

    with pytest.raises(ValueError):
        _ = metrics.MSE() + metrics.LogLoss()

    with pytest.raises(ValueError):
        _ = metrics.MSE() + metrics.MAE() + metrics.LogLoss()
Ejemplo n.º 2
0
def test_compose():

    with pytest.raises(ValueError):
        _ = metrics.MSE() + metrics.LogLoss()

    with pytest.raises(ValueError):
        _ = metrics.MSE() + metrics.MAE() + metrics.LogLoss()
Ejemplo n.º 3
0
def main():
    import datetime as dt
    from creme import compose
    from creme import datasets
    from creme import feature_extraction
    from creme import linear_model
    from creme import metrics as metricss
    from creme import preprocessing
    from creme import stats
    from creme import stream

    X_y = datasets.Bikes()
    X_y = stream.simulate_qa(X_y,
                             moment='moment',
                             delay=dt.timedelta(minutes=30))

    def add_time_features(x):
        return {**x, 'hour': x['moment'].hour, 'day': x['moment'].weekday()}

    model = add_time_features
    model |= (compose.Select('clouds', 'humidity', 'pressure', 'temperature',
                             'wind') + feature_extraction.TargetAgg(
                                 by=['station', 'hour'], how=stats.Mean()) +
              feature_extraction.TargetAgg(by='station', how=stats.EWMean()))
    model |= preprocessing.StandardScaler()
    model |= linear_model.LinearRegression()

    metric = metricss.MAE()

    questions = {}

    for i, x, y in X_y:
        # Question
        is_question = y is None
        if is_question:
            y_pred = model.predict_one(x)
            questions[i] = y_pred

        # Answer
        else:
            metric.update(y, questions[i])
            model = model.fit_one(x, y)

            if i >= 30000 and i % 30000 == 0:
                print(i, metric)
    torch_model = PyTorchNet(n_features=n_features)
    torch_lin_reg = PyTorchRegressor(network=torch_model,
                                     loss_fn=torch.nn.MSELoss(),
                                     optimizer=torch_optim(
                                         torch_model.parameters()))

    inputs = layers.Input(shape=(n_features, ))
    predictions = layers.Dense(1,
                               kernel_initializer='zeros',
                               bias_initializer='zeros')(inputs)
    keras_model = models.Model(inputs=inputs, outputs=predictions)
    keras_model.compile(optimizer=keras_optim, loss='mean_squared_error')
    keras_lin_reg = KerasRegressor(keras_model)

    creme_metric = metrics.MAE()
    torch_metric = metrics.MAE()
    keras_metric = metrics.MAE()

    scaler = preprocessing.StandardScaler()

    for x, y in X_y:

        x = scaler.fit_one(x).transform_one(x)

        creme_metric.update(y, creme_lin_reg.predict_one(x))
        creme_lin_reg.fit_one(x, y)

        torch_metric.update(y, torch_lin_reg.predict_one(x))
        torch_lin_reg.fit_one(x, y)
Ejemplo n.º 5
0
        for i in mem:
            sline = i.split()
            if str(sline[0]) in ('MemFree:', 'Buffers:', 'Cached:'):
                free_memory += int(sline[1])
    return free_memory """

if __name__ == '__main__':
    #memory_limit() # Limitates maximun memory usage to half
    try:
        time = dt.datetime.now()
        model = joblib.load("ARFRegressionModel")
        print("Took", dt.datetime.now() - time, "seconds to load model")

        dob = dt.datetime.now()
        metrics = {
            "ESPsensormae" : metrics.MAE(),
            "ESPsensorrmse" : metrics.RMSE()
        }
        data = {
            "model": model,
            "date_of_birth": dob,
            "metrics": metrics
        }
        print("Inside save_device Process. Saving device")
        time = dt.datetime.now()
        joblib.dump(
            data, "Models\\teste")
        print("Took", dt.datetime.now() - time, "seconds to save model")
        
    except MemoryError:
        sys.stderr.write('\n\nERROR: Memory Exception\n')
Ejemplo n.º 6
0
                                               beta=.5)),
    (metrics.MacroFBeta(beta=.5),
     functools.partial(sk_metrics.fbeta_score, beta=.5, average='macro')),
    (metrics.MicroFBeta(beta=.5),
     functools.partial(sk_metrics.fbeta_score, beta=.5, average='micro')),
    (metrics.WeightedFBeta(beta=.5),
     functools.partial(sk_metrics.fbeta_score, beta=.5, average='weighted')),
    (metrics.F1(), sk_metrics.f1_score),
    (metrics.MacroF1(), functools.partial(sk_metrics.f1_score,
                                          average='macro')),
    (metrics.MicroF1(), functools.partial(sk_metrics.f1_score,
                                          average='micro')),
    (metrics.WeightedF1(),
     functools.partial(sk_metrics.f1_score, average='weighted')),
    (metrics.MCC(), sk_metrics.matthews_corrcoef),
    (metrics.MAE(), sk_metrics.mean_absolute_error),
    (metrics.MSE(), sk_metrics.mean_squared_error),
]


@pytest.mark.parametrize('metric, sk_metric', TEST_CASES)
@pytest.mark.filterwarnings('ignore::RuntimeWarning')
@pytest.mark.filterwarnings(
    'ignore::sklearn.metrics.classification.UndefinedMetricWarning')
def test_metric(metric, sk_metric):

    # Check str works
    str(metric)

    for y_true, y_pred, sample_weights in generate_test_cases(metric=metric,
                                                              n=30):
Ejemplo n.º 7
0
    def __init__(self, data_collector):
        dc = data_collector
        data = dc.get_data_frame()
        metric = metrics.MAE()

        # delete NA examples
        data = data.dropna()

        # shuffle data
        X_y = data.sample(frac=1).reset_index(drop=True)

        data = X_y[['x', 'y', 'theta']].to_dict('records')
        target_1 = X_y[['sensor_1']]
        target_2 = X_y[['sensor_3']]
        target_3 = X_y[['sensor_5']]
        target_4 = X_y[['sensor_7']]

        print('constructing models')

        # construct our pipeline
        model_1 = Pipeline([
            ("scale", StandardScaler()),
            ("learn",
             ensemble.HedgeRegressor([
                 linear_model.LinearRegression(optim.SGD()),
                 linear_model.LinearRegression(optim.RMSProp()),
                 linear_model.LinearRegression(optim.Adam())
             ]))
        ])

        # construct our pipeline
        model_2 = Pipeline([
            ("scale", StandardScaler()),
            ("learn",
             ensemble.HedgeRegressor([
                 linear_model.LinearRegression(optim.SGD()),
                 linear_model.LinearRegression(optim.RMSProp()),
                 linear_model.LinearRegression(optim.Adam())
             ]))
        ])

        # construct our pipeline
        model_3 = Pipeline([
            ("scale", StandardScaler()),
            ("learn",
             ensemble.HedgeRegressor([
                 linear_model.LinearRegression(optim.SGD()),
                 linear_model.LinearRegression(optim.RMSProp()),
                 linear_model.LinearRegression(optim.Adam())
             ]))
        ])

        # construct our pipeline
        model_4 = Pipeline([
            ("scale", StandardScaler()),
            ("learn",
             ensemble.HedgeRegressor([
                 linear_model.LinearRegression(optim.SGD()),
                 linear_model.LinearRegression(optim.RMSProp()),
                 linear_model.LinearRegression(optim.Adam())
             ]))
        ])

        print('start training')

        for x, y_1, y_2, y_3, y_4 in zip(
                data,
                target_1.values,
                target_2.values,
                target_3.values,
                target_4.values,
        ):
            model_1, y_pred_1 = self._update_model(model_1, x, y_1)
            model_2, y_pred_2 = self._update_model(model_2, x, y_2)
            model_3, y_pred_3 = self._update_model(model_3, x, y_3)
            model_4, y_pred_4 = self._update_model(model_4, x, y_4)

        self.models = [model_1, model_2, model_3, model_4]

        print('done...')
Ejemplo n.º 8
0
def on_discover_sensor(client, userdata, msg):
    if number_of_devices < MAX_DEVICES:
        parsed = json.loads(msg.payload.decode())
        logger.log('info', "Just discovered sensor: " + parsed["unique_id"])
        device_id = (parsed["device"]["name"] + "_" +
                     parsed["device"]["identifiers"])
        if not device_id in nodes:
            create_device(parsed, device_id)
        try:
            # Add Sensor to Device
            nodes[device_id]["device"].add_Sensor(Sensor(
                parsed["name"], parsed["unique_id"], parsed["state_topic"], parsed["unit_of_measurement"]))
            # Handle MQTT subscribe and callbacks
            nodes[device_id]["mqtt"].subscribe_to_topic(
                parsed["state_topic"], 1)
            nodes[device_id]["mqtt"].add_message_callback(
                parsed["state_topic"], nodes[device_id]["device"].on_sensor_state_change)
        except:
            if 'mae' in parsed["state_topic"]:
                nodes[device_id]["device"].add_Metrics(Metrics(
                    parsed["name"], parsed["unique_id"], parsed["state_topic"], metrics.Rolling(metrics.MAE(), int(10080/TRAIN_INTERVAL))))
            elif 'rmse' in parsed["state_topic"]:
                nodes[device_id]["device"].add_Metrics(Metrics(
                    parsed["name"], parsed["unique_id"], parsed["state_topic"], metrics.Rolling(metrics.RMSE(), int(10080/TRAIN_INTERVAL))))
        # Check if Device as all components
        check_if_finished(device_id)
Ejemplo n.º 9
0
 def default_metrics(self):
     return [metrics.MAE(), metrics.RMSE(), metrics.SMAPE()]
Ejemplo n.º 10
0

if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('speed_up', type=int, nargs='?', default=1)
    args = parser.parse_args()

    def sleep(td: dt.timedelta):
        if td.seconds >= 0:
            time.sleep(td.seconds / args.speed_up)

    # Use the first trip's departure time as a reference time
    taxis = datasets.Taxis()
    now = next(iter(taxis))[0]['pickup_datetime']
    mae = metrics.MAE()
    host = 'http://localhost:5000'
    predictions = {}

    for trip_no, trip, duration in stream.simulate_qa(
        taxis,
        moment='pickup_datetime',
        delay=lambda _, duration: dt.timedelta(seconds=duration)
    ):

        trip_no = str(trip_no).zfill(len(str(taxis.n_samples)))

        # Taxi trip starts

        if duration is None:
Ejemplo n.º 11
0
def build_model(data, frame_nos, max_frame, tot_objects, width, height,
                nrow_tiles, ncol_tiles, fps, pred_nframe):
    model = linear_model.PARegressor(C=0.01,
                                     mode=2,
                                     eps=0.001,
                                     data=data,
                                     learning_rate=0.005,
                                     rho=0.99)
    metric_X = metrics.MAE()
    metric_Y = metrics.MAE()
    manhattan_error = []
    x_mae = []
    y_mae = []
    count = 0

    i = 0
    tile_manhattan_error = 0
    act_tiles, pred_tiles = [], []
    chunk_frames = []

    #Initial training of first 5 seconds
    prev_frames = {0}
    while True:
        curr_frame = frame_nos[i]
        prev_frames.add(i)
        if curr_frame < 5 * fps:
            i = i + 1
            [inp_i, x, y] = data[curr_frame]
            model = model.fit_one(inp_i, x, y)
        else:
            break
    prev_frames = sorted(prev_frames)
    cnt = 0
    # Predicting frames and update model
    while True:
        curr_frame = frame_nos[i]
        nframe = min(pred_nframe, max_frame - frame_nos[i])

        if (nframe < 1):
            break

        frames = {i}
        for k in range(i + 1, len(frame_nos)):
            if (frame_nos[k] < curr_frame + nframe):
                frames.add(k)
            else:
                i = k
                break
        if (i != k):
            i = k

        if (i == (len(frame_nos) - 1)):
            break
        frames = sorted(frames)
        chunk_frames.append(frames)

        metric_X, metric_Y, tile_manhattan_error, count, act_tiles, pred_tiles = pred_frames(
            data, model, metric_X, metric_Y, frames, prev_frames,
            tile_manhattan_error, act_tiles, pred_tiles, count, width, height,
            nrow_tiles, ncol_tiles)
        model = model.fit_n(frames)

        prev_frames = prev_frames + frames
        manhattan_error.append(tile_manhattan_error * 1.0 / count)
        x_mae.append(metric_X.get())
        y_mae.append(metric_Y.get())

        print("Manhattan Tile Error: " +
              str(tile_manhattan_error * 1.0 / count))
        print(metric_X, metric_Y)
        print("\n")
        cnt = cnt + 1
        if cnt == 60:
            break

    return act_tiles, pred_tiles, chunk_frames, manhattan_error, x_mae, y_mae