Exemple #1
0
    def __call__(
        self,
        chunk_size: int = None,
        name: str = None,
        unit: str = None,
        is_string: bool = None,
        is_step: bool = None,
        asset_ids: List[int] = None,
        root_asset_ids: List[int] = None,
        metadata: Dict[str, Any] = None,
        external_id_prefix: str = None,
        created_time: Dict[str, Any] = None,
        last_updated_time: Dict[str, Any] = None,
        limit: int = None,
        include_metadata=True,
    ) -> Generator[Union[TimeSeries, TimeSeriesList], None, None]:
        """Iterate over time series

        Fetches time series as they are iterated over, so you keep a limited number of objects in memory.

        Args:
            chunk_size (int, optional): Number of time series to return in each chunk. Defaults to yielding one time series a time.
            name (str): Name of the time series. Often referred to as tag.
            unit (str): Unit of the time series.
            is_string (bool): Whether the time series is an string time series.
            is_step (bool): Whether the time series is a step (piecewise constant) time series.
            asset_ids (List[int], optional): List time series related to these assets.
            root_asset_ids (List[int], optional): List time series related to assets under these root assets.
            metadata (Dict[str, Any]): Custom, application specific metadata. String key -> String value
            created_time (Dict[str, Any]): Range between two timestamps
            last_updated_time (Dict[str, Any]): Range between two timestamps
            external_id_prefix (str): Filter by this (case-sensitive) prefix for the external ID.
            limit (int, optional): Maximum number of time series to return. Defaults to return all items.
            include_metadata (bool, optional): Ignored. Only present in parameter list for backward compatibility.

        Yields:
            Union[TimeSeries, TimeSeriesList]: yields TimeSeries one by one if chunk is not specified, else TimeSeriesList objects.
        """
        filter = TimeSeriesFilter(
            name=name,
            unit=unit,
            is_step=is_step,
            is_string=is_string,
            asset_ids=asset_ids,
            root_asset_ids=root_asset_ids,
            metadata=metadata,
            created_time=created_time,
            last_updated_time=last_updated_time,
            external_id_prefix=external_id_prefix,
        ).dump(camel_case=True)
        return self._list_generator(method="POST", chunk_size=chunk_size, filter=filter, limit=limit)
Exemple #2
0
 def test_search_with_filter(self, mock_ts_response):
     res = TS_API.search(name="n",
                         description="d",
                         query="q",
                         filter=TimeSeriesFilter(unit="bla"))
     assert mock_ts_response.calls[0].response.json()["items"] == res.dump(
         camel_case=True)
     req_body = jsgz_load(mock_ts_response.calls[0].request.body)
     assert "bla" == req_body["filter"]["unit"]
     assert {
         "name": "n",
         "description": "d",
         "query": "q"
     } == req_body["search"]
Exemple #3
0
 def test_search(self, mock_ts_response):
     res = TS_API.search(filter=TimeSeriesFilter(is_string=True))
     assert mock_ts_response.calls[0].response.json()["items"] == res.dump(
         camel_case=True)
     assert {
         "search": {
             "name": None,
             "description": None,
             "query": None
         },
         "filter": {
             "isString": True
         },
         "limit": None,
     } == jsgz_load(mock_ts_response.calls[0].request.body)
Exemple #4
0
 def test_search(self):
     res = COGNITE_CLIENT.time_series.search(
         name="test__timestamp_multiplied",
         filter=TimeSeriesFilter(created_time={"min": 0}))
     assert len(res) > 0
Exemple #5
0
 def test_aggregate(self, new_ts):
     res = COGNITE_CLIENT.time_series.aggregate(filter=TimeSeriesFilter(
         name="any"))
     assert res[0].count > 0
Exemple #6
0
    def list(
        self,
        name: str = None,
        unit: str = None,
        is_string: bool = None,
        is_step: bool = None,
        asset_ids: List[int] = None,
        asset_external_ids: List[str] = None,
        root_asset_ids: List[int] = None,
        asset_subtree_ids: List[int] = None,
        asset_subtree_external_ids: List[str] = None,
        data_set_ids: List[int] = None,
        data_set_external_ids: List[str] = None,
        metadata: Dict[str, Any] = None,
        external_id_prefix: str = None,
        created_time: Dict[str, Any] = None,
        last_updated_time: Dict[str, Any] = None,
        partitions: int = None,
        limit: int = 25,
        include_metadata=True,
    ) -> TimeSeriesList:
        """`List over time series <https://docs.cognite.com/api/v1/#operation/listTimeSeries>`_

        Fetches time series as they are iterated over, so you keep a limited number of objects in memory.

        Args:
            name (str): Name of the time series. Often referred to as tag.
            unit (str): Unit of the time series.
            is_string (bool): Whether the time series is an string time series.
            is_step (bool): Whether the time series is a step (piecewise constant) time series.
            asset_ids (List[int], optional): List time series related to these assets.
            asset_external_ids (List[str], optional): List time series related to these assets.
            root_asset_ids (List[int], optional): List time series related to assets under these root assets.
            asset_subtree_ids (List[int]): List of asset subtrees ids to filter on.
            asset_subtree_external_ids (List[str]): List of asset subtrees external ids to filter on.
            data_set_ids (List[int]): Return only assets in the specified data sets with these ids.
            data_set_external_ids (List[str]): Return only assets in the specified data sets with these external ids.
            metadata (Dict[str, Any]): Custom, application specific metadata. String key -> String value
            created_time (Union[Dict[str, int], TimestampRange]):  Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
            last_updated_time (Union[Dict[str, int], TimestampRange]):  Range between two timestamps. Possible keys are `min` and `max`, with values given as time stamps in ms.
            external_id_prefix (str): Filter by this (case-sensitive) prefix for the external ID.
            limit (int, optional): Maximum number of time series to return.  Defaults to 25. Set to -1, float("inf") or None to return all items.
            partitions (int): Retrieve time series in parallel using this number of workers. Also requires `limit=None` to be passed.
            include_metadata (bool, optional): Ignored. Only present in parameter list for backward compatibility.


        Returns:
            TimeSeriesList: The requested time series.

        Examples:

            List time series::

                >>> from cognite.client import CogniteClient
                >>> c = CogniteClient()
                >>> res = c.time_series.list(limit=5)

            Iterate over time series::

                >>> from cognite.client import CogniteClient
                >>> c = CogniteClient()
                >>> for ts in c.time_series:
                ...     ts # do something with the time_series

            Iterate over chunks of time series to reduce memory load::

                >>> from cognite.client import CogniteClient
                >>> c = CogniteClient()
                >>> for ts_list in c.time_series(chunk_size=2500):
                ...     ts_list # do something with the time_series
        """
        if asset_subtree_ids or asset_subtree_external_ids:
            asset_subtree_ids = self._process_ids(asset_subtree_ids,
                                                  asset_subtree_external_ids,
                                                  wrap_ids=True)
        if data_set_ids or data_set_external_ids:
            data_set_ids = self._process_ids(data_set_ids,
                                             data_set_external_ids,
                                             wrap_ids=True)

        filter = TimeSeriesFilter(
            name=name,
            unit=unit,
            is_step=is_step,
            is_string=is_string,
            asset_ids=asset_ids,
            asset_external_ids=asset_external_ids,
            root_asset_ids=root_asset_ids,
            asset_subtree_ids=asset_subtree_ids,
            metadata=metadata,
            data_set_ids=data_set_ids,
            created_time=created_time,
            last_updated_time=last_updated_time,
            external_id_prefix=external_id_prefix,
        ).dump(camel_case=True)
        return self._list(method="POST",
                          filter=filter,
                          limit=limit,
                          partitions=partitions)