コード例 #1
0
def sample(cities):
    # Sample bike availability for all cities
    for city, data in cities.items():
        log(' Sampling for %s ...' % city)
        try:
            datapoints = []
            for station in data['get_availability']():
                bysykkel_id = station.id
                num_bikes = station.bikes
                num_locks = station.locks

                if bysykkel_id in data['id_mapping']:
                    bikes_asset_name = data['id_mapping'][bysykkel_id][
                        'asset_name'] + '_bikes'
                    locks_asset_name = data['id_mapping'][bysykkel_id][
                        'asset_name'] + '_locks'
                    timestamp = int(time.time() * 1000)
                    datapoints.append(
                        TimeseriesWithDatapoints(
                            bikes_asset_name,
                            [Datapoint(timestamp, num_bikes)]))
                    datapoints.append(
                        TimeseriesWithDatapoints(
                            locks_asset_name,
                            [Datapoint(timestamp, num_locks)]))
            timestamp = int(time.time() * 1000)
            log('  Posting %d data points for %s at %d' %
                (len(datapoints), city, timestamp))
            post_multi_tag_datapoints(datapoints)
            log('  Data points posted to CDP.')
        except Exception as e:
            log('  Error fetching availaility for %s: %s' % (city, str(e)))
コード例 #2
0
def post_multitag_datapoints(
        depthseries_with_datapoints: List[TimeseriesWithDatapoints], **kwargs):
    """Insert data into multiple depthseries.

        Args:
            depthseries_with_datapoints (List[v05.dto.DepthseriesWithDatapoints]): The depthseries with data to insert.

        Keyword Args:
            api_key (str): Your api-key.

            project (str): Project name.

        Returns:
            An empty response.
        """
    timeseries = []
    for depthseries in depthseries_with_datapoints:
        valueseries = TimeseriesWithDatapoints(depthseries.name, [])
        indexseries = TimeseriesWithDatapoints(
            _generateIndexName(depthseries.name), [])
        offset: int = 0
        for datapoint in depthseries.datapoints:
            valueseries.datapoints.append(Datapoint(offset, datapoint.value))
            indexseries.datapoints.append(Datapoint(offset, datapoint.depth))
            offset += MS_INCREMENT
        timeseries.append(valueseries)
        timeseries.append(indexseries)

    return ts.post_multi_tag_datapoints(timeseries)
コード例 #3
0
def post_datapoints(name, depthdatapoints: List[DatapointDepth], **kwargs):
    """Insert a list of datapoints.

    Args:
        name (str):       Name of timeseries to insert to.

        datapoints (list[v05.data_objects.Datapoint): List of datapoint data transfer objects to insert.

    Keyword Args:
        api_key (str): Your api-key.

        project (str): Project name.

    Returns:
        An empty response.
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    offset = 0  # Random timestamp to start the time series

    url = config.get_base_url(
        api_version=0.5) + "/projects/{}/timeseries/data".format(project)

    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "application/json"
    }
    datapoints = []
    depthpoints = []
    for datapoint in depthdatapoints:
        datapoints.append(Datapoint(offset, datapoint.value))
        depthpoints.append(Datapoint(offset, datapoint.depth))
        offset += MS_INCREMENT

    ul_dps_limit = 100000
    i = 0
    while i < len(datapoints):
        body = {
            "items": [
                {
                    "name":
                    name,
                    "datapoints":
                    [dp.__dict__ for dp in datapoints[i:i + ul_dps_limit]]
                },
                {
                    "name":
                    _generateIndexName(name),
                    "datapoints":
                    [dp.__dict__ for dp in depthpoints[i:i + ul_dps_limit]],
                },
            ]
        }
        _utils.post_request(url, body=body, headers=headers)
        i += ul_dps_limit

    return {}
コード例 #4
0
def upload_datapoints_historical(logger, sensor_id, sensor_data, api_key,
                                 project_name, log):
    """Historical datapoints uploader, unique for Sauter Vision API
    Converts timestamp to unix and batches data if over 10000 and upload to RAW
    :param logger: Energima logger
    :param sensor_data: data from API client
    :param api_key: API key from CDP
    :param project_name: Energima
    :return None
    """
    sensor_values = sensor_data["HistoricalDataValues"]
    var1 = sensor_id["Name"].replace(".", "_")
    name = var1.replace(" ", "_")

    points = []
    for object in sensor_values:
        t = object["LocalTimestamp"]
        if '.' in t:
            timestamp = int(
                datetime.datetime.strptime(
                    t, "%Y-%m-%dT%H:%M:%S.%f").timestamp() * 1000)
        else:
            timestamp = int(
                datetime.datetime.strptime(t, "%Y-%m-%dT%H:%M:%S").timestamp()
                * 1000)
        val = float(object["ReceivedValue"].replace(",", "."))
        points.append(Datapoint(timestamp, val))

        if len(points) >= 10000:  # Post in batches of 10K
            try:
                timeseries.post_datapoints(name,
                                           points,
                                           api_key=api_key,
                                           project=project_name)
                points = []
            except ConnectionError as err:
                logger.error("upload_datapoints_historical: " + str(err))
            except TimeoutError as err:
                logger.error("upload_datapoints_historical: " + str(err))
            except Exception as err:
                logger.error("upload_datapoints_historical: " + str(err))
            else:
                log.info("batching datapoints: " + name)
    try:
        timeseries.post_datapoints(name,
                                   points,
                                   api_key=api_key,
                                   project=project_name)
    except ConnectionError as err:
        logger.error("upload_datapoints_historical: " + str(err))
    except TimeoutError as err:
        logger.error("upload_datapoints_historical: " + str(err))
    except Exception as err:
        logger.error("upload_datapoints_historical: " + str(err))
    else:
        log.info("Posting last datapoints: " + name)
コード例 #5
0
def upload_datapoints_live(logger, sensor, api_key, project_name, log):
    """Live datapoints uploader, unique for Sauter Vision API
         Converts timestamp to unix and upload to RAW
        :param logger: Energima logger
        :param sensor_data: data from API client
        :param api_key: API key from CDP
        :param project_name: Energima
        :return None
        """
    sensor_values = sensor
    var1 = sensor_values["Name"].replace(".", "_")
    name = var1.replace(" ", "_")

    points = []
    t = sensor_values["LocalTimestamp"]

    if "," in (sensor_values["PresentValue"]):
        val = float(sensor_values["PresentValue"].replace(",", "."))
        points.append(Datapoint((int(t * 1000)), val))
    elif sensor_values["PresentValue"] == "inactive":
        val = (sensor_values["PresentValue"])
        points.append(Datapoint((int(t * 1000)), val))
    elif sensor_values["PresentValue"] == "active":
        val = (sensor_values["PresentValue"])
        points.append(Datapoint((int(t * 1000)), val))
    else:
        val = float(sensor_values["PresentValue"])
        points.append(Datapoint((int(t * 1000)), val))
    try:
        timeseries.post_datapoints(name,
                                   points,
                                   api_key=api_key,
                                   project=project_name)
    except ConnectionError as err:
        logger.error("upload_datapoints_live: " + str(err))
    except TimeoutError as err:
        logger.error("upload_datapoints_live: " + str(err))
    except Exception as err:
        logger.error("upload_datapoints_live: " + str(err))
    else:
        log.info("Posting last datapoints: " + name)
コード例 #6
0
def test_split_TimeseriesWithDatapoints_if_over_limit():
    from cognite.v05.dto import TimeseriesWithDatapoints
    from cognite.v05.dto import Datapoint
    from cognite.v05.timeseries import _split_TimeseriesWithDatapoints_if_over_limit
    from typing import List

    timeseries_with_datapoints_over_limit: TimeseriesWithDatapoints = TimeseriesWithDatapoints(
        name="test", datapoints=[Datapoint(x, x) for x in range(1000)])

    result: List[
        TimeseriesWithDatapoints] = _split_TimeseriesWithDatapoints_if_over_limit(
            timeseries_with_datapoints_over_limit, 100)

    assert isinstance(result[0], TimeseriesWithDatapoints)
    assert len(result) == 10

    result = _split_TimeseriesWithDatapoints_if_over_limit(
        timeseries_with_datapoints_over_limit, 1000)

    assert isinstance(result[0], TimeseriesWithDatapoints)
    assert len(result) == 1
コード例 #7
0
def main():
    output_columns = [
        "SKAP_18FI381-VFlLGas/Y/10sSAMP|average",
        "SKAP_18FI381-VFlLH2O/Y/10sSAMP|average",
        "SKAP_18FI381-VFlLOil/Y/10sSAMP|average",
    ]
    router = "SKAP_18HV3806/BCH/10sSAMP|stepinterpolation"
    one_hour_ago = datetime.now() - timedelta(0, 3600)
    last_processed_timestamp = int(one_hour_ago.timestamp() * 1e3)

    is_first = True

    while True:
        d2_inputs = pd.DataFrame([[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]])
        d2_inputs.columns = ["hoho", "blaa", "hgi"] + output_columns
        input_has_nans = True
        while input_has_nans:
            ds = generate_data_spec(last_processed_timestamp)
            dts = DataTransferService(data_spec=ds)
            while True:
                try:
                    d2_inputs = dts.get_dataframes()["d2"]
                    break
                except:
                    time.sleep(2)
            any_nans_per_column = d2_inputs.drop(output_columns, axis=1).isna().any()
            all_nans_per_column = d2_inputs.drop(output_columns, axis=1).isna().all()

            print(any_nans_per_column)
            print(all_nans_per_column)

            if any_nans_per_column.any() and not all_nans_per_column.any():
                last_processed_timestamp -= 10000

            print(datetime.fromtimestamp(last_processed_timestamp * 1e-3))
            time.sleep(2)
            input_has_nans = d2_inputs.drop(output_columns, axis=1).isna().any().any()

        last_ts = d2_inputs["timestamp"].iloc[-1]

        print(d2_inputs[output_columns[0]].values.tolist())
        d2_inputs_formatted = (
            d2_inputs.drop("timestamp", axis=1).drop(router, axis=1).drop(output_columns, axis=1).values.tolist()
        )
        timestamps = d2_inputs["timestamp"]
        res = models.online_predict(
            model_id=3885574571413770, version_id=4299054386152423, instances=[d2_inputs_formatted]
        )

        predictions = res["predictions"][0]
        formatted_predictions = [int(pred[0]) for pred in predictions]
        last_processed_timestamp = int(last_ts)

        dps = [Datapoint(ts, value) for ts, value in zip(timestamps.values.tolist(), formatted_predictions)]
        print([dp.value for dp in dps])
        if is_first:
            post_datapoints(name="SKAP_18FI381-VFlLGas/Y/10sSAMP_calc_D02_2", datapoints=dps)
            is_first = False
        else:
            for dp in dps:
                post_datapoints(name="SKAP_18FI381-VFlLGas/Y/10sSAMP_calc_D02_2", datapoints=[dp])
                time.sleep(5)