예제 #1
0
def get_datapoints(name,
                   aggregates=None,
                   granularity=None,
                   start=None,
                   end=None,
                   **kwargs):
    """Returns a DatapointsObject containing a list of datapoints for the given query.

    This method will automate paging for the user and return all data for the given time period.

    Args:
        name (str):             The name of the timeseries to retrieve data for.

        aggregates (list):      The list of aggregate functions you wish to apply to the data. Valid aggregate functions
                                are: 'average/avg, max, min, count, sum, interpolation/int, stepinterpolation/step'.

        granularity (str):      The granularity of the aggregate values. Valid entries are : 'day/d, hour/h, minute/m,
                                second/s', or a multiple of these indicated by a number as a prefix e.g. '12hour'.

        start (Union[str, int, datetime]):    Get datapoints after this time. Format is N[timeunit]-ago where timeunit is w,d,h,m,s.
                                    E.g. '2d-ago' will get everything that is up to 2 days old. Can also send time in ms since
                                    epoch or a datetime object which will be converted to ms since epoch UTC.

        end (Union[str, int, datetime]):      Get datapoints up to this time. Same format as for start.

    Keyword Arguments:
        protobuf (bool):        Download the data using the binary protobuf format. Only applicable when getting raw data.
                                Defaults to True.

        processes (int):        Number of download processes to run in parallell. Defaults to number returned by cpu_count().

        api_key (str):          Your api-key.

        project (str):          Project name.

        limit (str):            Max number of datapoints to return. If limit is specified, this method will not automate
                                paging and will return a maximum of 100,000 dps.

    Returns:
        v05.dto.DatapointsResponse: A data object containing the requested data with several getter methods with different
        output formats.
    """
    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    start, end = _utils.interval_to_ms(start, end)

    if kwargs.get("limit"):
        return _get_datapoints_user_defined_limit(
            name,
            aggregates,
            granularity,
            start,
            end,
            limit=kwargs.get("limit"),
            protobuf=kwargs.get("protobuf"),
            api_key=api_key,
            project=project,
        )

    diff = end - start
    num_of_processes = kwargs.get("processes", os.cpu_count())

    granularity_ms = 1
    if granularity:
        granularity_ms = _utils.granularity_to_ms(granularity)

    # Ensure that number of steps is not greater than the number data points that will be returned
    steps = min(num_of_processes, max(1, int(diff / granularity_ms)))
    # Make step size a multiple of the granularity requested in order to ensure evenly spaced results
    step_size = _utils.round_to_nearest(int(diff / steps), base=granularity_ms)
    # Create list of where each of the parallelized intervals will begin
    step_starts = [start + (i * step_size) for i in range(steps)]
    args = [{
        "start": start,
        "end": start + step_size
    } for start in step_starts]

    partial_get_dps = partial(
        _get_datapoints_helper_wrapper,
        name=name,
        aggregates=aggregates,
        granularity=granularity,
        protobuf=kwargs.get("protobuf", True),
        api_key=api_key,
        project=project,
    )

    if steps == 1:
        dps = _get_datapoints_helper(
            name,
            aggregates,
            granularity,
            start,
            end,
            protobuf=kwargs.get("protobuf", True),
            api_key=api_key,
            project=project,
        )
        return DatapointsResponse(
            {"data": {
                "items": [{
                    "name": name,
                    "datapoints": dps
                }]
            }})

    with Pool(steps) as p:
        datapoints = p.map(partial_get_dps, args)

    concat_dps = []
    [concat_dps.extend(el) for el in datapoints]

    return DatapointsResponse(
        {"data": {
            "items": [{
                "name": name,
                "datapoints": concat_dps
            }]
        }})
예제 #2
0
def get_datapoints_frame(time_series,
                         aggregates,
                         granularity,
                         start=None,
                         end=None,
                         **kwargs):
    """Returns a pandas dataframe of datapoints for the given timeseries all on the same timestamps.

    This method will automate paging for the user and return all data for the given time period.

    Args:
        time_series (list):  The list of timeseries names to retrieve data for. Each timeseries can be either a string
                            containing the timeseries or a dictionary containing the names of thetimeseries and a
                            list of specific aggregate functions.

        aggregates (list):  The list of aggregate functions you wish to apply to the data for which you have not
                            specified an aggregate function. Valid aggregate functions are: 'average/avg, max, min,
                            count, sum, interpolation/int, stepinterpolation/step'.

        granularity (str):  The granularity of the aggregate values. Valid entries are : 'day/d, hour/h, minute/m,
                            second/s', or a multiple of these indicated by a number as a prefix e.g. '12hour'.

        start (Union[str, int, datetime]):    Get datapoints after this time. Format is N[timeunit]-ago where timeunit is w,d,h,m,s.
                                    E.g. '2d-ago' will get everything that is up to 2 days old. Can also send time in ms since
                                    epoch or a datetime object which will be converted to ms since epoch UTC.

        end (Union[str, int, datetime]):      Get datapoints up to this time. Same format as for start.

    Keyword Arguments:
        api_key (str): Your api-key.

        project (str): Project name.

        cookies (dict): Cookies.

        limit (str): Max number of rows to return. If limit is specified, this method will not automate
                        paging and will return a maximum of 100,000 rows.

        processes (int):    Number of download processes to run in parallell. Defaults to number returned by cpu_count().

    Returns:
        pandas.DataFrame: A pandas dataframe containing the datapoints for the given timeseries. The datapoints for all the
        timeseries will all be on the same timestamps.

    Note:
        The ``timeseries`` parameter can take a list of strings and/or dicts on the following formats::

            Using strings:
                ['<timeseries1>', '<timeseries2>']

            Using dicts:
                [{'name': '<timeseries1>', 'aggregates': ['<aggfunc1>', '<aggfunc2>']},
                {'name': '<timeseries2>', 'aggregates': []}]

            Using both:
                ['<timeseries1>', {'name': '<timeseries2>', 'aggregates': ['<aggfunc1>', '<aggfunc2>']}]
    """
    if not isinstance(time_series, list):
        raise _utils.InputError("time_series should be a list")
    api_key, project = config.get_config_variables(kwargs.get("api_key"),
                                                   kwargs.get("project"))
    cookies = config.get_cookies(kwargs.get("cookies"))
    start, end = _utils.interval_to_ms(start, end)

    if kwargs.get("limit"):
        return _get_datapoints_frame_user_defined_limit(
            time_series,
            aggregates,
            granularity,
            start,
            end,
            limit=kwargs.get("limit"),
            api_key=api_key,
            project=project,
            cookies=cookies,
        )

    diff = end - start
    num_of_processes = kwargs.get("processes") or os.cpu_count()

    granularity_ms = 1
    if granularity:
        granularity_ms = _utils.granularity_to_ms(granularity)

    # Ensure that number of steps is not greater than the number data points that will be returned
    steps = min(num_of_processes, max(1, int(diff / granularity_ms)))
    # Make step size a multiple of the granularity requested in order to ensure evenly spaced results
    step_size = _utils.round_to_nearest(int(diff / steps), base=granularity_ms)
    # Create list of where each of the parallelized intervals will begin
    step_starts = [start + (i * step_size) for i in range(steps)]
    args = [{
        "start": start,
        "end": start + step_size
    } for start in step_starts]

    partial_get_dpsf = partial(
        _get_datapoints_frame_helper_wrapper,
        time_series=time_series,
        aggregates=aggregates,
        granularity=granularity,
        api_key=api_key,
        project=project,
        cookies=cookies,
    )

    if steps == 1:
        return _get_datapoints_frame_helper(time_series,
                                            aggregates,
                                            granularity,
                                            start,
                                            end,
                                            api_key=api_key,
                                            project=project,
                                            cookies=cookies)

    with Pool(steps) as p:
        dataframes = p.map(partial_get_dpsf, args)

    df = pd.concat(dataframes).drop_duplicates(subset="timestamp").reset_index(
        drop=True)

    return df
예제 #3
0
 def test_round_to_nearest(self):
     assert utils.round_to_nearest(12, 10) == 10
     assert utils.round_to_nearest(8, 10) == 10