def test_post_request_exception(self, mock_request):
        mock_request.return_value = MockReturnValue(status=500)
        mock_request.side_effect = Exception("Custom error")

        with pytest.raises(utils.APIError) as e:
            utils.post_request(url, RESPONSE)
        assert re.match("Custom error", str(e.value))
def post_datapoints(name, depthdatapoints: List[DatapointDepth], **kwargs):
    """Insert a list of datapoints.

    Args:
        name (str):       Name of timeseries to insert to.

        datapoints (list[v05.data_objects.Datapoint): List of datapoint data transfer objects to insert.

    Keyword Args:
        api_key (str): Your api-key.

        project (str): Project name.

    Returns:
        An empty response.
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    offset = 0  # Random timestamp to start the time series

    url = config.get_base_url(
        api_version=0.5) + "/projects/{}/timeseries/data".format(project)

    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "application/json"
    }
    datapoints = []
    depthpoints = []
    for datapoint in depthdatapoints:
        datapoints.append(Datapoint(offset, datapoint.value))
        depthpoints.append(Datapoint(offset, datapoint.depth))
        offset += MS_INCREMENT

    ul_dps_limit = 100000
    i = 0
    while i < len(datapoints):
        body = {
            "items": [
                {
                    "name":
                    name,
                    "datapoints":
                    [dp.__dict__ for dp in datapoints[i:i + ul_dps_limit]]
                },
                {
                    "name":
                    _generateIndexName(name),
                    "datapoints":
                    [dp.__dict__ for dp in depthpoints[i:i + ul_dps_limit]],
                },
            ]
        }
        _utils.post_request(url, body=body, headers=headers)
        i += ul_dps_limit

    return {}
Exemple #3
0
def delete_rows(
    database_name: str = None, table_name: str = None, rows: List[RawRow] = None, api_key=None, project=None
):
    """Deletes rows in the Raw API.

    Args:
        database_name (str):    The database to create tables in.

        table_name (str):      The table name where the rows are at.

        rows (list):            The rows to delete.

        api_key (str):          Your api-key.

        project (str):          Project name.

    Returns:
        An empty response.

    """
    api_key, project = config.get_config_variables(api_key, project)
    url = config.get_base_url(api_version=0.4) + "/projects/{}/raw/{}/{}/delete".format(
        project, database_name, table_name
    )
    body = {"items": [{"key": "{}".format(row.key), "columns": row.columns} for row in rows]}
    headers = {"api-key": api_key, "content-type": "*/*", "accept": "application/json"}
    res = _utils.post_request(url=url, body=body, headers=headers, cookies=config.get_cookies())
    return res.json()
Exemple #4
0
def delete_assets(asset_ids: List[int], **kwargs):
    """Delete a list of assets.

    Args:
        asset_ids (list[int]): List of IDs of assets to delete.

    Keyword Args:
        api_key (str): Your api-key.

        project (str): Project name.

    Returns:
        An empty response.
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    url = config.get_base_url(
        api_version=0.5) + "/projects/{}/assets/delete".format(project)
    body = {"items": asset_ids}
    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "application/json"
    }
    res = utils.post_request(url,
                             body=body,
                             headers=headers,
                             cookies=config.get_cookies())
    return res.json()
def tag_matching(tag_ids, fuzzy_threshold=0, platform=None, **kwargs):
    """Returns a TagMatchingObject containing a list of matched tags for the given query.

    This method takes an arbitrary string as argument and performs fuzzy matching with a user defined threshold
    toward tag ids in the system.

    Args:
        tag_ids (list):         The tag_ids to retrieve matches for.

        fuzzy_threshold (int):  The threshold to use when searching for matches. A fuzzy threshold of 0 means you only
                                want to accept perfect matches. Must be >= 0.

        platform (str):         The platform to search on.

    Keyword Args:
        api_key (str):          Your api-key.

        project (str):          Project name.

    Returns:
        v04.dto.TagMatchingResponse: A data object containing the requested data with several getter methods with different
        output formats.
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
    url = config.get_base_url(api_version=0.4) + "/projects/{}/tagmatching".format(project)
    body = {"tagIds": tag_ids, "metadata": {"fuzzyThreshold": fuzzy_threshold, "platform": platform}}
    headers = {"api-key": api_key, "content-type": "*/*", "accept": "application/json"}
    res = _utils.post_request(url=url, body=body, headers=headers, cookies=config.get_cookies())
    return TagMatchingResponse(res.json())
Exemple #6
0
def delete_files(file_ids, **kwargs):
    """Delete

    Args:
        file_ids (list[int]):   List of IDs of files to delete.

    Keyword Args:
        api_key (str):          Your api key.

        project (str):          Your project.

    Returns:
        list: List of files deleted and files that failed to delete.
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    url = config.get_base_url(
        api_version=0.4) + "/projects/{}/storage/delete".format(project)
    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "application/json"
    }
    body = {"items": file_ids}
    res = _utils.post_request(url, body=body, headers=headers)
    return res.json()["data"]
def post_time_series(time_series: List[TimeSeries], **kwargs):
    """Create a new time series.

    Args:
        time_series (list[v05.dto.TimeSeries]):   List of time series data transfer objects to create.

    Keyword Args:
        api_key (str): Your api-key.

        project (str): Project name.
    Returns:
        An empty response.
    """

    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    url = config.get_base_url(
        api_version=0.5) + "/projects/{}/timeseries".format(project)

    body = {"items": [ts.__dict__ for ts in time_series]}

    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "application/json"
    }

    res = _utils.post_request(url, body=body, headers=headers)
    return res.json()
Exemple #8
0
def post_assets(assets: List[Asset], **kwargs):
    """Insert a list of assets.

    Args:
        assets (list[v05.dto.Asset]): List of asset data transfer objects.

    Keyword Args:
        api_key (str): Your api-key.

        project (str): Project name.

    Returns:
        v05.dto.AssetListResponse: A data object containing the posted assets with several getter methods with different
        output formats.
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    url = config.get_base_url(
        api_version=0.5) + "/projects/{}/assets".format(project)
    body = {"items": [asset.__dict__ for asset in assets]}
    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "application/json"
    }
    res = utils.post_request(url,
                             body=body,
                             headers=headers,
                             cookies=config.get_cookies())
    return AssetListResponse(res.json())
Exemple #9
0
def create_databases(database_names: list, api_key=None, project=None):
    """Creates databases in the Raw API and returns the created databases.

    Args:
        database_names (list):  A list of databases to create.

        api_key (str):          Your api-key.

        project (str):          Project name.

    Returns:
        v05.dto.RawResponse: A data object containing the requested data with several getter methods with different
        output formats.

    """
    api_key, project = config.get_config_variables(api_key, project)
    url = config.get_base_url(
        api_version=0.5) + "/projects/{}/raw/create".format(project)
    body = {
        "items": [{
            "dbName": "{}".format(database_name)
        } for database_name in database_names]
    }
    headers = {
        "api-key": api_key,
        "content-type": "*/*",
        "accept": "application/json"
    }
    res = _utils.post_request(url=url,
                              body=body,
                              headers=headers,
                              cookies=config.get_cookies())
    return RawResponse(res.json())
    def test_post_request_ok(self, mock_request):
        mock_request.return_value = MockReturnValue(json_data=RESPONSE)

        response = utils.post_request(url, RESPONSE)
        response_json = response.json()

        assert response.status_code == 200
        assert len(response_json['data']['items']) == len(RESPONSE)
def post_multi_tag_datapoints(
        timeseries_with_datapoints: List[TimeseriesWithDatapoints], **kwargs):
    """Insert data into multiple timeseries.

    Args:
        timeseries_with_datapoints (List[v05.dto.TimeseriesWithDatapoints]): The timeseries with data to insert.

    Keyword Args:
        api_key (str): Your api-key.

        project (str): Project name.

        use_gzip (bool): Whether or not to gzip the request

    Returns:
        An empty response.
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    url = config.get_base_url(
        api_version=0.4) + "/projects/{}/timeseries/data".format(project)

    use_gzip = kwargs.get("use_gzip", False)

    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "application/json"
    }

    ul_dps_limit = 100000

    # Make sure we only work with TimeseriesWithDatapoints objects that has a max number of datapoints
    timeseries_with_datapoints_limited = []
    for entry in timeseries_with_datapoints:
        timeseries_with_datapoints_limited.extend(
            _split_TimeseriesWithDatapoints_if_over_limit(entry, ul_dps_limit))

    # Group these TimeseriesWithDatapoints if possible so that we upload as much as possible in each call to the API
    timeseries_to_upload_binned = _utils.first_fit(
        list_items=timeseries_with_datapoints_limited,
        max_size=ul_dps_limit,
        get_count=lambda x: len(x.datapoints))

    for bin in timeseries_to_upload_binned:
        body = {
            "items": [{
                "tagId":
                ts_with_data.name,
                "datapoints": [dp.__dict__ for dp in ts_with_data.datapoints]
            } for ts_with_data in bin]
        }
        res = _utils.post_request(url,
                                  body=body,
                                  headers=headers,
                                  use_gzip=use_gzip)

    return res.json()
    def test_post_request_gzip(self, mock_request):
        import json, gzip
        def check_gzip_enabled_and_return_mock(arg_url, data=None, headers=None, params=None, cookies=None):
            # URL is sent as is
            assert arg_url == url
            # gzip is added as Content-Encoding header
            assert headers["Content-Encoding"] == "gzip"
            # data is gzipped. Decompress and check if items size matches
            decompressed_assets = json.loads(gzip.decompress(data))
            assert len(decompressed_assets["data"]["items"]) == len(RESPONSE)
            # Return the mock response
            return MockReturnValue(json_data=RESPONSE)

        mock_request.side_effect = check_gzip_enabled_and_return_mock

        response = utils.post_request(
            url, RESPONSE,
            headers={},
            use_gzip=True)
        assert response.status_code == 200

        def check_gzip_disabled_and_return_mock(arg_url, data=None, headers=None, params=None, cookies=None):
            # URL is sent as is
            assert arg_url == url
            # gzip is not added as Content-Encoding header
            assert 'Content-Encoding' not in headers
            # data is not gzipped.
            assert len(json.loads(data)["data"]["items"]) == len(RESPONSE)
            # Return the mock response
            return MockReturnValue(json_data=RESPONSE)

        mock_request.side_effect = check_gzip_disabled_and_return_mock

        response = utils.post_request(
            url, RESPONSE,
            headers={},
            use_gzip=False)
        assert response.status_code == 200
Exemple #13
0
def create_rows(
    database_name: str = None,
    table_name: str = None,
    rows: List[RawRow] = None,
    api_key=None,
    project=None,
    ensure_parent=False,
    use_gzip=False,
):
    """Creates tables in the given Raw API database.

    Args:
        database_name (str):    The database to create rows in.

        table_name (str):       The table names to create rows in.

        rows (list[v04.dto.RawRow]):            The rows to create.

        api_key (str):          Your api-key.

        project (str):          Project name.

        ensure_parent (bool):   Create database/table if it doesn't exist already

        use_gzip (bool):        Compress content using gzip

    Returns:
        An empty response

    """
    api_key, project = config.get_config_variables(api_key, project)
    url = config.get_base_url(api_version=0.4) + "/projects/{}/raw/{}/{}/create".format(
        project, database_name, table_name
    )

    headers = {"api-key": api_key, "content-type": "*/*", "accept": "application/json"}
    if ensure_parent:
        params = {"ensureParent": "true"}
    else:
        params = {}

    ul_row_limit = 1000
    i = 0
    while i < len(rows):
        body = {"items": [{"key": "{}".format(row.key), "columns": row.columns} for row in rows[i : i + ul_row_limit]]}
        res = _utils.post_request(
            url=url, body=body, headers=headers, params=params, cookies=config.get_cookies(), use_gzip=use_gzip
        )
        i += ul_row_limit
    return res.json()
    def test_post_request_args(self, mock_request):
        def check_args_to_post_and_return_mock(arg_url, data=None, headers=None, params=None, cookies=None):
            # URL is sent as is
            assert arg_url == url

            # cookies should be the same
            assert cookies == {"a-cookie": 'a-cookie-val'}

            # Return the mock response
            return MockReturnValue(json_data=RESPONSE)

        mock_request.side_effect = check_args_to_post_and_return_mock

        response = utils.post_request(
            url, RESPONSE,
            headers={"Existing-Header": "SomeValue"},
            cookies={"a-cookie": "a-cookie-val"},
            use_gzip=True)

        assert response.status_code == 200
Exemple #15
0
def delete_databases(database_names: list, recursive: bool = False, api_key=None, project=None):
    """Deletes databases in the Raw API.

    Args:
        database_names (list):  A list of databases to delete.

        api_key (str):          Your api-key.

        project (str):          Project name.

    Returns:
        An empty response.

    """
    api_key, project = config.get_config_variables(api_key, project)
    url = config.get_base_url(api_version=0.4) + "/projects/{}/raw/delete".format(project)
    body = {"items": [{"dbName": "{}".format(database_name)} for database_name in database_names]}
    params = {"recursive": recursive}
    headers = {"api-key": api_key, "content-type": "*/*", "accept": "application/json"}
    res = _utils.post_request(url=url, body=body, params=params, headers=headers, cookies=config.get_cookies())
    return res.json()
Exemple #16
0
def post_events(events, **kwargs):
    """Adds a list of events and returns an EventListResponse object containing created events.

    Args:
        events (List[v05.dto.Event]):    List of events to create.

    Keyword Args:
        api_key (str):          Your api-key.
        project (str):          Project name.

    Returns:
        v05.dto.EventListResponse
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"), kwargs.get("project"))
    url = config.get_base_url(api_version=0.5) + "/projects/{}/events".format(project)

    headers = {"api-key": api_key, "content-type": "application/json", "accept": "application/json"}

    body = {"items": [event.__dict__ for event in events]}

    res = _utils.post_request(url, body=body, headers=headers)
    return EventListResponse(res.json())
Exemple #17
0
def delete_tables(database_name: str = None,
                  table_names: list = None,
                  api_key=None,
                  project=None):
    """Deletes databases in the Raw API.

    Args:
        database_name (str):    The database to create tables in.

        table_names (list):     The table names to create.

        api_key (str):          Your api-key.

        project (str):          Project name.

    Returns:
        An empty response.

    """
    api_key, project = config.get_config_variables(api_key, project)
    url = config.get_base_url(
        api_version=0.5) + "/projects/{}/raw/{}/delete".format(
            project, database_name)
    body = {
        "items": [{
            "tableName": "{}".format(table_name)
        } for table_name in table_names]
    }
    headers = {
        "api-key": api_key,
        "content-type": "*/*",
        "accept": "application/json"
    }
    res = _utils.post_request(url=url,
                              body=body,
                              headers=headers,
                              cookies=config.get_cookies())
    return res.json()
    def test_post_request_failed(self, mock_request):
        mock_request.return_value = MockReturnValue(status=400, json_data={"error": "Client error"})

        with pytest.raises(utils.APIError) as e:
            utils.post_request(url, RESPONSE)
        assert re.match("Client error[\n]X-Request_id", str(e.value))

        mock_request.return_value = MockReturnValue(status=500, content="Server error")

        with pytest.raises(utils.APIError) as e:
            utils.post_request(url, RESPONSE)
        assert re.match("Server error[\n]X-Request_id", str(e.value))

        mock_request.return_value = MockReturnValue(status=500, json_data={"error": "Server error"})

        with pytest.raises(utils.APIError) as e:
            utils.post_request(url, RESPONSE)
        assert re.match("Server error[\n]X-Request_id", str(e.value))
def post_datapoints(tag_id, datapoints: List[Datapoint], **kwargs):
    """Insert a list of datapoints.

    Args:
        tag_id (str):       ID of timeseries to insert to.

        datapoints (list[v04.dto.Datapoint): List of datapoint data transfer objects to insert.

    Keyword Args:
        api_key (str): Your api-key.

        project (str): Project name.

    Returns:
        An empty response.
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    url = config.get_base_url(
        api_version=0.4) + "/projects/{}/timeseries/data/{}".format(
            project, quote(tag_id, safe=""))

    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "application/json"
    }

    ul_dps_limit = 100000
    i = 0
    while i < len(datapoints):
        body = {
            "items": [dp.__dict__ for dp in datapoints[i:i + ul_dps_limit]]
        }
        res = _utils.post_request(url, body=body, headers=headers)
        i += ul_dps_limit
    return res.json()
def _get_datapoints_frame_user_defined_limit(time_series, aggregates,
                                             granularity, start, end, limit,
                                             **kwargs):
    """Returns a DatapointsResponse object with the requested data.

    No paging or parallelizing is done.

    Args:
        time_series (str):       The list of timeseries names to retrieve data for. Each timeseries can be either a string containing the
                            ts name or a dictionary containing the ts name and a list of specific aggregate functions.

        aggregates (list):      The list of aggregate functions you wish to apply to the data. Valid aggregate functions
                                are: 'average/avg, max, min, count, sum, interpolation/int, stepinterpolation/step'.

        granularity (str):      The granularity of the aggregate values. Valid entries are : 'day/d, hour/h, minute/m,
                                second/s', or a multiple of these indicated by a number as a prefix e.g. '12hour'.

        start (Union[str, int, datetime]):    Get datapoints after this time. Format is N[timeunit]-ago where timeunit is w,d,h,m,s.
                                    E.g. '2d-ago' will get everything that is up to 2 days old. Can also send time in ms since
                                    epoch or a datetime object which will be converted to ms since epoch UTC.

        end (Union[str, int, datetime]):      Get datapoints up to this time. Same format as for start.

        limit (int):            Max number of rows to retrieve. Max is 100,000.

    Keyword Arguments:
        api_key (str):          Your api-key. Obligatory in this helper method.

        project (str):          Project name. Obligatory in this helper method.
    Returns:
        v05.dto.DatapointsResponse: A data object containing the requested data with several getter methods with different
        output formats.
    """
    api_key, project = kwargs.get("api_key"), kwargs.get("project")
    cookies = kwargs.get("cookies")
    url = config.get_base_url(
        api_version=0.5) + "/projects/{}/timeseries/dataframe".format(project)
    body = {
        "items": [{
            "name": "{}".format(ts)
        } if isinstance(ts, str) else {
            "name": "{}".format(ts["name"]),
            "aggregates": ts.get("aggregates", [])
        } for ts in time_series],
        "aggregates":
        aggregates,
        "granularity":
        granularity,
        "start":
        start,
        "end":
        end,
        "limit":
        limit,
    }
    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "text/csv"
    }

    res = _utils.post_request(url=url,
                              body=body,
                              headers=headers,
                              cookies=cookies)
    df = pd.read_csv(
        io.StringIO(
            res.content.decode(
                res.encoding if res.encoding else res.apparent_encoding)))

    return df
def _get_datapoints_frame_helper(time_series,
                                 aggregates,
                                 granularity,
                                 start=None,
                                 end=None,
                                 **kwargs):
    """Returns a pandas dataframe of datapoints for the given timeseries all on the same timestamps.

    This method will automate paging for the user and return all data for the given time period.

    Args:
        time_series (list):     The list of timeseries names to retrieve data for. Each timeseries can be either a string containing the
                            ts name or a dictionary containing the ts name and a list of specific aggregate functions.

        aggregates (list):  The list of aggregate functions you wish to apply to the data for which you have not
                            specified an aggregate function. Valid aggregate functions are: 'average/avg, max, min,
                            count, sum, interpolation/int, stepinterpolation/step'.

        granularity (str):  The granularity of the aggregate values. Valid entries are : 'day/d, hour/h, minute/m,
                            second/s', or a multiple of these indicated by a number as a prefix e.g. '12hour'.

        start (Union[str, int, datetime]):    Get datapoints after this time. Format is N[timeunit]-ago where timeunit is w,d,h,m,s.
                                    E.g. '2d-ago' will get everything that is up to 2 days old. Can also send time in ms since
                                    epoch or a datetime object which will be converted to ms since epoch UTC.

        end (Union[str, int, datetime]):      Get datapoints up to this time. Same format as for start.

    Keyword Arguments:
        api_key (str):                  Your api-key.

        project (str):                  Project name.

    Returns:
        pandas.DataFrame: A pandas dataframe containing the datapoints for the given timeseries. The datapoints for all the
        timeseries will all be on the same timestamps.

    Note:
        The ``timeseries`` parameter can take a list of strings and/or dicts on the following formats::

            Using strings:
                ['<timeseries1>', '<timeseries2>']

            Using dicts:
                [{'name': '<timeseries1>', 'aggregates': ['<aggfunc1>', '<aggfunc2>']},
                {'name': '<timeseries2>', 'aggregates': []}]

            Using both:
                ['<timeseries1>', {'name': '<timeseries2>', 'aggregates': ['<aggfunc1>', '<aggfunc2>']}]
    """
    api_key, project = kwargs.get("api_key"), kwargs.get("project")
    cookies = kwargs.get("cookies")
    url = config.get_base_url(
        api_version=0.5) + "/projects/{}/timeseries/dataframe".format(project)

    num_aggregates = 0
    for ts in time_series:
        if isinstance(ts, str) or ts.get("aggregates") is None:
            num_aggregates += len(aggregates)
        else:
            num_aggregates += len(ts["aggregates"])

    per_tag_limit = int(_constants.LIMIT / num_aggregates)

    body = {
        "items": [{
            "name": "{}".format(ts)
        } if isinstance(ts, str) else {
            "name": "{}".format(ts["name"]),
            "aggregates": ts.get("aggregates", [])
        } for ts in time_series],
        "aggregates":
        aggregates,
        "granularity":
        granularity,
        "start":
        start,
        "end":
        end,
        "limit":
        per_tag_limit,
    }
    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "text/csv"
    }
    dataframes = []
    while (not dataframes or dataframes[-1].shape[0]
           == per_tag_limit) and body["end"] > body["start"]:
        res = _utils.post_request(url=url,
                                  body=body,
                                  headers=headers,
                                  cookies=cookies)
        dataframes.append(
            pd.read_csv(
                io.StringIO(
                    res.content.decode(res.encoding if res.encoding else res.
                                       apparent_encoding))))
        if dataframes[-1].empty:
            break
        latest_timestamp = int(dataframes[-1].iloc[-1, 0])
        body["start"] = latest_timestamp + _utils.granularity_to_ms(
            granularity)
    return pd.concat(dataframes).reset_index(drop=True)
def get_multi_time_series_datapoints(datapoints_queries,
                                     aggregates=None,
                                     granularity=None,
                                     start=None,
                                     end=None,
                                     **kwargs):
    """Returns a list of DatapointsObjects each of which contains a list of datapoints for the given timeseries.

    This method will automate paging for the user and return all data for the given time period(s).

    Args:
        datapoints_queries (list[v05.dto.DatapointsQuery]): The list of DatapointsQuery objects specifying which
                                                                    timeseries to retrieve data for.

        aggregates (list, optional):    The list of aggregate functions you wish to apply to the data. Valid aggregate
                                        functions are: 'average/avg, max, min, count, sum, interpolation/int,
                                        stepinterpolation/step'.

        granularity (str):              The granularity of the aggregate values. Valid entries are : 'day/d, hour/h,
                                        minute/m, second/s', or a multiple of these indicated by a number as a prefix
                                        e.g. '12hour'.

        start (Union[str, int, datetime]):    Get datapoints after this time. Format is N[timeunit]-ago where timeunit is w,d,h,m,s.
                                    E.g. '2d-ago' will get everything that is up to 2 days old. Can also send time in ms since
                                    epoch or a datetime object which will be converted to ms since epoch UTC.

        end (Union[str, int, datetime]):      Get datapoints up to this time. Same format as for start.

    Keyword Arguments:
        api_key (str):                  Your api-key.

        project (str):                  Project name.

    Returns:
        list(v05.dto.DatapointsResponse): A list of data objects containing the requested data with several getter methods
        with different output formats.
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    url = config.get_base_url(
        api_version=0.5) + "/projects/{}/timeseries/dataquery".format(project)
    start, end = _utils.interval_to_ms(start, end)

    num_of_dpqs_with_agg = 0
    num_of_dpqs_raw = 0
    for dpq in datapoints_queries:
        if (dpq.aggregates is None
                and aggregates is None) or dpq.aggregates == "":
            num_of_dpqs_raw += 1
        else:
            num_of_dpqs_with_agg += 1

    items = []
    for dpq in datapoints_queries:
        if dpq.aggregates is None and aggregates is None:
            dpq.limit = int(_constants.LIMIT / num_of_dpqs_raw)
        else:
            dpq.limit = int(_constants.LIMIT_AGG / num_of_dpqs_with_agg)
        items.append(dpq.__dict__)
    body = {
        "items": items,
        "aggregates": ",".join(aggregates) if aggregates is not None else None,
        "granularity": granularity,
        "start": start,
        "end": end,
    }
    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "application/json"
    }
    datapoints_responses = []
    has_incomplete_requests = True
    while has_incomplete_requests:
        res = _utils.post_request(
            url=url, body=body, headers=headers,
            cookies=config.get_cookies()).json()["data"]["items"]
        datapoints_responses.append(res)
        has_incomplete_requests = False
        for i, dpr in enumerate(res):
            dpq = datapoints_queries[i]
            if len(dpr["datapoints"]) == dpq.limit:
                has_incomplete_requests = True
                latest_timestamp = dpr["datapoints"][-1]["timestamp"]
                ts_granularity = granularity if dpq.granularity is None else dpq.granularity
                next_start = latest_timestamp + (_utils.granularity_to_ms(
                    ts_granularity) if ts_granularity else 1)
            else:
                next_start = end - 1
                if datapoints_queries[i].end:
                    next_start = datapoints_queries[i].end - 1
            datapoints_queries[i].start = next_start

    results = [{
        "data": {
            "items": [{
                "name": dpq.name,
                "datapoints": []
            }]
        }
    } for dpq in datapoints_queries]
    for res in datapoints_responses:
        for i, ts in enumerate(res):
            results[i]["data"]["items"][0]["datapoints"].extend(
                ts["datapoints"])
    return DatapointsResponseIterator(
        [DatapointsResponse(result) for result in results])
 def do_post(chunk):
     body = {"items": [dp.__dict__ for dp in chunk]}
     return _utils.post_request(url,
                                body=body,
                                headers=headers,
                                session=session)
def post_depth_series(depth_series: List[TimeSeries], **kwargs):
    """Create a new depth series.

        Args:
            depth_series (list[v05.dto.TimeSeries]):   List of time series data transfer objects to create.
            Corresponding depth series used for indexing will be created automatically, with unit of m(meter)

        Keyword Args:
            api_key (str): Your api-key.

            project (str): Project name.
        Returns:
            An empty response.
        """

    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    url = config.get_base_url(
        api_version=0.5) + "/projects/{}/timeseries".format(project)
    depth_indexes = copy.deepcopy(depth_series)

    for ts in depth_indexes:
        ts.name = _generateIndexName(ts.name)
        ts.unit = "m"
        ts.isString = False

    body = {
        "items":
        [ts.__dict__ for ts in itertools.chain(depth_series, depth_indexes)]
    }

    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "application/json"
    }
    retry_list: Set(str) = set()
    try:
        _utils.post_request(url, body=body, headers=headers)
    except _utils.APIError as e:
        # Are we getting this error because some metrics already exist? If so, then we still want to create the rest
        # First try to do all the ones that does not exist in one go
        if "Some metrics already exist" in str(e):
            retry_list = _parse_exists_exception(str(e))
        else:
            raise e

    if len(retry_list) > 0:
        body = {
            "items": [
                ts.__dict__
                for ts in itertools.chain(depth_series, depth_indexes)
                if ts.name in retry_list
            ]
        }
        try:
            _utils.post_request(url, body=body, headers=headers)
        except _utils.APIError as e:
            # Are we getting this error because some metrics already exist? If so, then we still want to create the rest
            # OK, now try one by one...
            if "Some metrics already exist" in str(e):
                for ts in itertools.chain(depth_series, depth_indexes):
                    body = {"items": [ts.__dict__]}
                    try:
                        _utils.post_request(url, body=body, headers=headers)
                    except _utils.APIError as e:
                        if "Some metrics already exist" in str(e):
                            continue
                        else:
                            raise e
            else:
                raise e
    return {}
Exemple #25
0
def upload_file(file_name,
                file_path=None,
                directory=None,
                source=None,
                file_type=None,
                content_type=None,
                **kwargs):
    """Upload metadata about a file and get an upload link.

    The link will expire after 30 seconds if not resumable. A resumable upload link is default. Such a link is one-time
    use and expires after one week. For more information, check this link:
    https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload. Use PUT request to upload file with the
    link returned.

    If file_path is specified, the file will be uploaded directly by the SDK.

    Args:
        file_name (str):      File name. Max length is 256.

        file_path (str, optional):     Path of file to upload, if omitted a upload link will be returned.

        content_type (str, optional):   MIME type of your file. Required if file_path is specified.

        directory (str, optional):      Directory containing the file. Max length is 512.

        source (str, optional):         Source that this file comes from. Max length is 256.

        file_type (str, optional):      File type. E.g. pdf, css, spreadsheet, .. Max length is 64.

    Keyword Args:
        api_key (str, optional):        Your api-key.

        project (str, optional):        Project name.

        metadata (dict):      Customized data about the file.

        tag_ids (list):       IDs (tagIds) of equipment related to this file.

        resumable (bool):     Whether to generate a resumable URL or not. Default is true.

        overwrite (bool):     Whether to overwrite existing data if duplicate or not. Default is false.

    Returns:
        dict: A dictionary containing the field fileId and optionally also uploadURL if file_path is omitted.
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    url = config.get_base_url(
        api_version=0.4) + "/projects/{}/storage/metadata".format(project)

    headers = {
        "api-key": api_key,
        "content-type": "application/json",
        "accept": "application/json",
        "X-Upload-Content-Type": content_type,
    }

    params = {
        "resumable": kwargs.get("resumable", True),
        "overwrite": kwargs.get("overwrite", False)
    }

    body = {
        "fileName": file_name,
        "directory": directory,
        "source": source,
        "fileType": file_type,
        "metadata": kwargs.get("metadata", None),
        "tagIds": kwargs.get("tagIds", None),
    }
    res_storage = _utils.post_request(url=url,
                                      body=body,
                                      headers=headers,
                                      params=params,
                                      cookies=config.get_cookies())
    result = res_storage.json()["data"]
    if file_path:
        if not content_type:
            warning = "content_type should be specified when directly uploading the file."
            warnings.warn(warning)
        headers = {"content-length": str(os.path.getsize(file_path))}
        with open(file_path, "rb") as file:
            requests.put(result["uploadURL"], data=file, headers=headers)
        result.pop("uploadURL")
    return result