Beispiel #1
0
    def iter_json_pages(self, path, page_size=1000, **params):
        """Return an iterator over JSON items from a paginated resource

        Legacy resources (prior to V1) implemented a common paging interfaces for
        several different resources.  This method handles the details of iterating
        over the paged result set, yielding only the JSON data for each item
        within the aggregate resource.

        :param str path: The base path to the resource being requested (e.g. /ws/Group)
        :param int page_size: The number of items that should be requested for each page.  A larger
            page_size may mean fewer HTTP requests but could also increase the time to get a first
            result back from Device Cloud.
        :param params: These are additional query parameters that should be sent with each
            request to Device Cloud.

        """
        path = validate_type(path, *six.string_types)
        page_size = validate_type(page_size, *six.integer_types)

        offset = 0
        remaining_size = 1  # just needs to be non-zero
        while remaining_size > 0:
            reqparams = {"start": offset, "size": page_size}
            reqparams.update(params)
            response = self.get_json(path, params=reqparams)
            offset += page_size
            remaining_size = int(response.get("remainingSize", "0"))
            for item_json in response.get("items", []):
                yield item_json
Beispiel #2
0
    def get_filedata(self, condition=None, page_size=1000):
        """Return a generator over all results matching the provided condition

        :param condition: An :class:`.Expression` which defines the condition
            which must be matched on the filedata that will be retrieved from
            file data store. If a condition is unspecified, the following condition
            will be used ``fd_path == '~/'``.  This condition will match all file
            data in this accounts "home" directory (a sensible root).
        :type condition: :class:`.Expression` or None
        :param int page_size: The number of results to fetch in a single page.  Regardless
            of the size specified, :meth:`.get_filedata` will continue to fetch pages
            and yield results until all items have been fetched.
        :return: Generator yielding :class:`.FileDataObject` instances matching the
            provided conditions.

        """

        condition = validate_type(condition, type(None), Expression, *six.string_types)
        page_size = validate_type(page_size, *six.integer_types)
        if condition is None:
            condition = (fd_path == "~/")  # home directory

        params = {"embed": "true", "condition": condition.compile()}
        for fd_json in self._conn.iter_json_pages("/ws/FileData", page_size=page_size, **params):
            yield FileDataObject.from_json(self, fd_json)
    def get_filedata(self, condition=None, page_size=1000):
        """Return a generator over all results matching the provided condition

        :param condition: An :class:`.Expression` which defines the condition
            which must be matched on the filedata that will be retrieved from
            file data store. If a condition is unspecified, the following condition
            will be used ``fd_path == '~/'``.  This condition will match all file
            data in this accounts "home" directory (a sensible root).
        :type condition: :class:`.Expression` or None
        :param int page_size: The number of results to fetch in a single page.  Regardless
            of the size specified, :meth:`.get_filedata` will continue to fetch pages
            and yield results until all items have been fetched.
        :return: Generator yielding :class:`.FileDataObject` instances matching the
            provided conditions.

        """

        condition = validate_type(condition, type(None), Expression,
                                  *six.string_types)
        page_size = validate_type(page_size, *six.integer_types)
        if condition is None:
            condition = (fd_path == "~/")  # home directory

        params = {"embed": "true", "condition": condition.compile()}
        for fd_json in self._conn.iter_json_pages("/ws/FileData",
                                                  page_size=page_size,
                                                  **params):
            yield FileDataObject.from_json(self, fd_json)
Beispiel #4
0
    def delete_datapoints_in_time_range(self, start_dt=None, end_dt=None):
        """Delete datapoints from this stream between the provided start and end times

        If neither a start or end time is specified, all data points in the stream
        will be deleted.

        :param start_dt: The datetime after which data points should be deleted or None
            if all data points from the beginning of time should be deleted.
        :param end_dt: The datetime before which data points should be deleted or None
            if all data points until the current time should be deleted.
        :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error

        """
        start_dt = to_none_or_dt(validate_type(start_dt, datetime.datetime, type(None)))
        end_dt = to_none_or_dt(validate_type(end_dt, datetime.datetime, type(None)))

        params = {}
        if start_dt is not None:
            params['startTime'] = isoformat(start_dt)
        if end_dt is not None:
            params['endTime'] = isoformat(end_dt)

        self._conn.delete("/ws/DataPoint/{stream_id}{querystring}".format(
            stream_id=self.get_stream_id(),
            querystring="?" + urllib.parse.urlencode(params) if params else "",
        ))
Beispiel #5
0
    def __init__(self, data, stream_id=None, description=None, timestamp=None,
                 quality=None, location=None, data_type=None, units=None, dp_id=None,
                 customer_id=None, server_timestamp=None):
        self._stream_id = None  # invariant: always string or None
        self._data = None  # invariant: could be any type, with conversion applied lazily
        self._description = None  # invariant: always string or None
        self._timestamp = None  # invariant: always datetime object or None
        self._quality = None  # invariant: always integer (32-bit) or None
        self._location = None  # invariant: 3-tuple<float> or None
        self._data_type = None  # invariant: always string in set of types or None
        self._units = None  # invariant: always string or None
        self._dp_id = None  # invariant: always string or None
        self._customer_id = None  # invariant: always string or None
        self._server_timestamp = None  # invariant: always None or datetime

        # all of these could be set via public API
        self.set_stream_id(stream_id)
        self.set_data(data)
        self.set_description(description)
        self.set_timestamp(timestamp)
        self.set_quality(quality)
        self.set_location(location)
        self.set_data_type(data_type)
        self.set_units(units)

        # these should only ever be read by the public API
        self._dp_id = validate_type(dp_id, type(None), *six.string_types)
        self._customer_id = validate_type(customer_id, type(None), *six.string_types)
        self._server_timestamp = to_none_or_dt(server_timestamp)
    def iter_json_pages(self, path, page_size=1000, **params):
        """Return an iterator over JSON items from a paginated resource

        Legacy resources (prior to V1) implemented a common paging interfaces for
        several different resources.  This method handles the details of iterating
        over the paged result set, yielding only the JSON data for each item
        within the aggregate resource.

        :param str path: The base path to the resource being requested (e.g. /ws/Group)
        :param int page_size: The number of items that should be requested for each page.  A larger
            page_size may mean fewer HTTP requests but could also increase the time to get a first
            result back from the device cloud.
        :param params: These are additional query parameters that should be sent with each
            request to the device cloud.

        """
        path = validate_type(path, *six.string_types)
        page_size = validate_type(page_size, *six.integer_types)

        offset = 0
        remaining_size = 1  # just needs to be non-zero
        while remaining_size > 0:
            reqparams = {"start": offset, "size": page_size}
            reqparams.update(params)
            response = self.get_json(path, params=reqparams)
            offset += page_size
            remaining_size = int(response.get("remainingSize", "0"))
            for item_json in response.get("items", []):
                yield item_json
    def write_file(self,
                   path,
                   name,
                   data,
                   content_type=None,
                   archive=False,
                   raw=False):
        """Write a file to the file data store at the given path

        :param str path: The path (directory) into which the file should be written.
        :param str name: The name of the file to be written.
        :param data: The binary data that should be written into the file.
        :type data: str (Python2) or bytes (Python3)
        :param content_type: The content type for the data being written to the file.  May
             be left unspecified.
        :type content_type: str or None
        :param bool archive: If true, history will be retained for various revisions of this
            file.  If this is not required, leave as false.
        :param bool raw: If true, skip the FileData XML headers (necessary for binary files)

        """
        path = validate_type(path, *six.string_types)
        name = validate_type(name, *six.string_types)
        data = validate_type(data, six.binary_type)
        content_type = validate_type(content_type, type(None),
                                     *six.string_types)
        archive_str = "true" if validate_type(archive, bool) else "false"

        if not path.startswith("/"):
            path = "/" + path
        if not path.endswith("/"):
            path += "/"
        name = name.lstrip("/")

        sio = six.moves.StringIO()
        if not raw:
            if six.PY3:
                base64_encoded_data = base64.encodebytes(data).decode('utf-8')
            else:
                base64_encoded_data = base64.encodestring(data)

            sio.write("<FileData>")
            if content_type is not None:
                sio.write(
                    "<fdContentType>{}</fdContentType>".format(content_type))
            sio.write("<fdType>file</fdType>")
            sio.write("<fdData>{}</fdData>".format(base64_encoded_data))
            sio.write("<fdArchive>{}</fdArchive>".format(archive_str))
            sio.write("</FileData>")
        else:
            sio.write(data)

        params = {"type": "file", "archive": archive_str}
        self._conn.put("/ws/FileData{path}{name}".format(path=path, name=name),
                       sio.getvalue(),
                       params=params)
    def write_file(self, path, name, data, content_type=None, archive=False,
                   raw=False):
        """Write a file to the file data store at the given path

        :param str path: The path (directory) into which the file should be written.
        :param str name: The name of the file to be written.
        :param data: The binary data that should be written into the file.
        :type data: str (Python2) or bytes (Python3)
        :param content_type: The content type for the data being written to the file.  May
             be left unspecified.
        :type content_type: str or None
        :param bool archive: If true, history will be retained for various revisions of this
            file.  If this is not required, leave as false.
        :param bool raw: If true, skip the FileData XML headers (necessary for binary files)

        """
        path = validate_type(path, *six.string_types)
        name = validate_type(name, *six.string_types)
        data = validate_type(data, six.binary_type)
        content_type = validate_type(content_type, type(None), *six.string_types)
        archive_str = "true" if validate_type(archive, bool) else "false"

        if not path.startswith("/"):
            path = "/" + path
        if not path.endswith("/"):
            path += "/"
        name = name.lstrip("/")

        sio = six.moves.StringIO()
        if not raw:
            if six.PY3:
                base64_encoded_data = base64.encodebytes(data).decode('utf-8')
            else:
                base64_encoded_data = base64.encodestring(data)

            sio.write("<FileData>")
            if content_type is not None:
                sio.write("<fdContentType>{}</fdContentType>".format(content_type))
            sio.write("<fdType>file</fdType>")
            sio.write("<fdData>{}</fdData>".format(base64_encoded_data))
            sio.write("<fdArchive>{}</fdArchive>".format(archive_str))
            sio.write("</FileData>")
        else:
            sio.write(data)

        params = {
            "type": "file",
            "archive": archive_str
        }
        self._conn.put(
            "/ws/FileData{path}{name}".format(path=path, name=name),
            sio.getvalue(),
            params=params)
Beispiel #9
0
    def walk(self, root="~/"):
        """Emulation of os.walk behavior against the device cloud filedata store

        This method will yield tuples in the form ``(dirpath, FileDataDirectory's, FileData's)``
        recursively in pre-order (depth first from top down).

        :param str root: The root path from which the search should commence.  By default, this
            is the root directory for this device cloud account (~).
        :return: Generator yielding 3-tuples of dirpath, directories, and files
        :rtype: 3-tuple in form (dirpath, list of :class:`FileDataDirectory`, list of :class:`FileDataFile`)

        """
        root = validate_type(root, *six.string_types)

        directories = []
        files = []

        # fd_path is real picky
        query_fd_path = root
        if not query_fd_path.endswith("/"):
            query_fd_path += "/"

        for fd_object in self.get_filedata(fd_path == query_fd_path):
            if fd_object.get_type() == "directory":
                directories.append(fd_object)
            else:
                files.append(fd_object)

        # Yield the walk results for this level of the tree
        yield (root, directories, files)

        # recurse on each directory and yield results up the chain
        for directory in directories:
            for dirpath, directories, files in self.walk(directory.get_full_path()):
                yield (dirpath, directories, files)
Beispiel #10
0
    def set_data_type(self, data_type):
        """Set the data type for ths data point

        The data type is actually associated with the stream itself and should
        not (generally) vary on a point-per-point basis.  That being said, if
        creating a new stream by writing a datapoint, it may be beneficial to
        include this information.

        The data type provided should be in the set of available data types of
        { INTEGER, LONG, FLOAT, DOUBLE, STRING, BINARY, UNKNOWN }.

        """
        validate_type(data_type, type(None), *six.string_types)
        if isinstance(data_type, *six.string_types):
            data_type = str(data_type).upper()
        if not data_type in ({None} | set(DSTREAM_TYPE_MAP.keys())):
            raise ValueError("Provided data type not in available set of types")
        self._data_type = data_type
Beispiel #11
0
    def __init__(self, conn, stream_id, cached_data=None):
        if not isinstance(cached_data, (type(None), dict)):
            raise TypeError("cached_data should be dict or None")

        stream_id = validate_type(stream_id, *six.string_types).lstrip('/')

        self._conn = conn
        self._stream_id = stream_id  # Invariant: string with any leading '/' stripped
        self._cached_data = cached_data
Beispiel #12
0
    def set_units(self, unit):
        """Set the unit for this data point

        Unit, as with data_type, are actually associated with the stream and not
        the individual data point.  As such, changing this within a stream is
        not encouraged.  Setting the unit on the data point is useful when the
        stream might be created with the write of a data point.

        """
        self._units = validate_type(unit, type(None), *six.string_types)
Beispiel #13
0
    def delete_datapoint(self, datapoint):
        """Delete the provided datapoint from this stream

        :raises devicecloud.DeviceCloudHttpException: in the case of an unexpected http error

        """
        datapoint = validate_type(datapoint, DataPoint)
        self._conn.delete("/ws/DataPoint/{stream_id}/{datapoint_id}".format(
            stream_id=self.get_stream_id(),
            datapoint_id=datapoint.get_id(),
        ))
Beispiel #14
0
    def get_filedata(self, condition=None, page_size=1000):
        """Return a generator over all results matching the provided condition

        :param condition: An :class:`.Expression` which defines the condition
            which must be matched on the filedata that will be retrieved from
            file data store. If a condition is unspecified, the following condition
            will be used ``fd_path == '~/'``.  This condition will match all file
            data in this accounts "home" directory (a sensible root).
        :type condition: :class:`.Expression` or None
        :param int page_size: The number of results to fetch in a single page.  Regardless
            of the size specified, :meth:`.get_filedata` will continue to fetch pages
            and yield results until all items have been fetched.
        :return: Generator yielding :class:`.FileDataObject` instances matching the
            provided conditions.

        """

        condition = validate_type(condition, type(None), Expression, *six.string_types)
        page_size = validate_type(page_size, *six.integer_types)
        offset = 0
        remaining_size = 1  # just needs to be non-zero

        if condition is None:
            condition = (fd_path == "~/")  # home directory

        while remaining_size > 0:
            response = self._conn.get_json(
                "/ws/FileData?embed=true"
                "&start={offset}"
                "&size={page_size}"
                "&condition={condition}".format(
                    condition=condition.compile(),
                    page_size=page_size,
                    offset=offset))

            offset += page_size
            remaining_size = int(response.get("remainingSize", "0"))
            for fd_json in response.get("items", []):
                yield FileDataObject.from_json(self, fd_json)
Beispiel #15
0
    def get_devices(self, condition=None, page_size=1000):
        """Iterates over each :class:`Device` for this device cloud account

        Examples::

            # get a list of all devices
            all_devices = list(dc.devicecore.get_devices())

            # build a mapping of devices by their vendor id using a
            # dict comprehension
            devices = dc.devicecore.get_devices()  # generator object
            devs_by_vendor_id = {d.get_vendor_id(): d for d in devices}

            # iterate over all devices in 'minnesota' group and
            # print the device mac and location
            for device in dc.get_devices(group_path == 'minnesota'):
                print "%s at %s" % (device.get_mac(), device.get_location())

        :param condition: An :class:`.Expression` which defines the condition
            which must be matched on the devicecore.  If unspecified,
            an iterator over all devices will be returned.
        :param int page_size: The number of results to fetch in a
            single page.  In general, the default will suffice.
        :returns: Iterator over each :class:`~Device` in this device cloud
            account in the form of a generator object.
        """

        condition = validate_type(condition, type(None), Expression,
                                  *six.string_types)
        page_size = validate_type(page_size, *six.integer_types)

        params = {"embed": "true"}
        if condition is not None:
            params["condition"] = condition.compile()

        for device_json in self._conn.iter_json_pages("/ws/DeviceCore",
                                                      page_size=page_size,
                                                      **params):
            yield Device(self._conn, self._sci, device_json)
Beispiel #16
0
    def get_group_tree_root(self, page_size=1000):
        r"""Return the root group for this accounts' group tree

        This will return the root group for this tree but with all links
        between nodes (i.e. children starting from root) populated.

        Examples::

            # print the group hierarchy to stdout
            dc.devicecore.get_group_tree_root().print_subtree()

            # gather statistics about devices in each group including
            # the count from its subgroups (recursively)
            #
            # This also shows how you can go from a group reference to devices
            # for that particular group.
            stats = {}  # group -> devices count including children
            def count_nodes(group):
                count_for_this_node = \
                    len(list(dc.devicecore.get_devices(group_path == group.get_path())))
                subnode_count = 0
                for child in group.get_children():
                    subnode_count += count_nodes(child)
                total = count_for_this_node + subnode_count
                stats[group] = total
                return total
            count_nodes(dc.devicecore.get_group_tree_root())

        :param int page_size: The number of results to fetch in a
            single page.  In general, the default will suffice.
        :returns: The root group for this device cloud accounts group
            hierarchy.

        """

        # first pass, build mapping
        group_map = {}  # map id -> group
        page_size = validate_type(page_size, *six.integer_types)
        for group in self.get_groups(page_size=page_size):
            group_map[group.get_id()] = group

        # second pass, find root and populate list of children for each node
        root = None
        for group_id, group in group_map.items():
            if group.is_root():
                root = group
            else:
                parent = group_map[group.get_parent_id()]
                parent.add_child(group)
        return root
Beispiel #17
0
    def get_group_tree_root(self, page_size=1000):
        r"""Return the root group for this accounts' group tree

        This will return the root group for this tree but with all links
        between nodes (i.e. children starting from root) populated.

        Examples::

            # print the group hierarchy to stdout
            dc.devicecore.get_group_tree_root().print_subtree()

            # gather statistics about devices in each group including
            # the count from its subgroups (recursively)
            #
            # This also shows how you can go from a group reference to devices
            # for that particular group.
            stats = {}  # group -> devices count including children
            def count_nodes(group):
                count_for_this_node = \
                    len(list(dc.devicecore.get_devices(group_path == group.get_path())))
                subnode_count = 0
                for child in group.get_children():
                    subnode_count += count_nodes(child)
                total = count_for_this_node + subnode_count
                stats[group] = total
                return total
            count_nodes(dc.devicecore.get_group_tree_root())

        :param int page_size: The number of results to fetch in a
            single page.  In general, the default will suffice.
        :returns: The root group for this device cloud accounts group
            hierarchy.

        """

        # first pass, build mapping
        group_map = {}  # map id -> group
        page_size = validate_type(page_size, *six.integer_types)
        for group in self.get_groups(page_size=page_size):
            group_map[group.get_id()] = group

        # second pass, find root and populate list of children for each node
        root = None
        for group_id, group in group_map.items():
            if group.is_root():
                root = group
            else:
                parent = group_map[group.get_parent_id()]
                parent.add_child(group)
        return root
Beispiel #18
0
    def set_quality(self, quality):
        """Set the quality for this sample

        Quality is stored on the device cloud as a 32-bit integer, so the input
        to this function should be either None, an integer, or a string that can
        be converted to an integer.

        """
        if isinstance(quality, *six.string_types):
            quality = int(quality)
        elif isinstance(quality, float):
            quality = int(quality)

        self._quality = validate_type(quality, type(None), *six.integer_types)
    def delete_file(self, path):
        """Delete a file or directory from the filedata store

        This method removes a file or directory (recursively) from
        the filedata store.

        :param path: The path of the file or directory to remove
            from the file data store.

        """
        path = validate_type(path, *six.string_types)
        if not path.startswith("/"):
            path = "/" + path

        self._conn.delete("/ws/FileData{path}".format(path=path))
    def delete_file(self, path):
        """Delete a file or directory from the filedata store

        This method removes a file or directory (recursively) from
        the filedata store.

        :param path: The path of the file or directory to remove
            from the file data store.

        """
        path = validate_type(path, *six.string_types)
        if not path.startswith("/"):
            path = "/" + path

        self._conn.delete("/ws/FileData{path}".format(path=path))
Beispiel #21
0
    def get_devices(self, condition=None, page_size=1000):
        """Iterates over each :class:`Device` for this device cloud account

        Examples::

            # get a list of all devices
            all_devices = list(dc.devicecore.get_devices())

            # build a mapping of devices by their vendor id using a
            # dict comprehension
            devices = dc.devicecore.get_devices()  # generator object
            devs_by_vendor_id = {d.get_vendor_id(): d for d in devices}

            # iterate over all devices in 'minnesota' group and
            # print the device mac and location
            for device in dc.get_devices(group_path == 'minnesota'):
                print "%s at %s" % (device.get_mac(), device.get_location())

        :param condition: An :class:`.Expression` which defines the condition
            which must be matched on the devicecore.  If unspecified,
            an iterator over all devices will be returned.
        :param int page_size: The number of results to fetch in a
            single page.  In general, the default will suffice.
        :returns: Iterator over each :class:`~Device` in this device cloud
            account in the form of a generator object.
        """

        condition = validate_type(condition, type(None), Expression, *six.string_types)
        page_size = validate_type(page_size, *six.integer_types)

        params = {"embed": "true"}
        if condition is not None:
            params["condition"] = condition.compile()

        for device_json in self._conn.iter_json_pages("/ws/DeviceCore", page_size=page_size, **params):
            yield Device(self._conn, self._sci, device_json)
Beispiel #22
0
    def create_stream(self,
                      stream_id,
                      data_type,
                      description=None,
                      data_ttl=None,
                      rollup_ttl=None,
                      units=None):
        """Create a new data stream on the device cloud

        This method will attempt to create a new data stream on the device cloud.
        This method will only succeed if the stream does not already exist.

        :param str stream_id: The path/id of the stream being created on the device cloud.
        :param str data_type: The type of this stream.  This must be in the set
            `{ INTEGER, LONG, FLOAT, DOUBLE, STRING, BINARY, UNKNOWN }`.  These values are
            available in constants like :attr:`~STREAM_TYPE_INTEGER`.
        :param str description: An optional description of this stream. See :meth:`~DataStream.get_description`.
        :param int data_ttl: The TTL for data points in this stream. See :meth:`~DataStream.get_data_ttl`.
        :param int rollup_ttl: The TTL for performing rollups on data. See :meth:~DataStream.get_rollup_ttl`.
        :param str units: Units for data in this stream.  See :meth:`~DataStream.get_units`

        """

        stream_id = validate_type(stream_id, *six.string_types)
        data_type = validate_type(data_type, type(None), *six.string_types)
        if isinstance(data_type, *six.string_types):
            data_type = str(data_type).upper()
        if not data_type in (set([
                None,
        ]) | set(list(DSTREAM_TYPE_MAP.keys()))):
            raise ValueError("data_type %r is not valid" % data_type)
        description = validate_type(description, type(None), *six.string_types)
        data_ttl = validate_type(data_ttl, type(None), *six.integer_types)
        rollup_ttl = validate_type(rollup_ttl, type(None), *six.integer_types)
        units = validate_type(units, type(None), *six.string_types)

        sio = StringIO()
        sio.write("<DataStream>")
        conditional_write(sio, "<streamId>{}</streamId>", stream_id)
        conditional_write(sio, "<dataType>{}</dataType>", data_type)
        conditional_write(sio, "<description>{}</description>", description)
        conditional_write(sio, "<dataTtl>{}</dataTtl>", data_ttl)
        conditional_write(sio, "<rollupTtl>{}</rollupTtl>", rollup_ttl)
        conditional_write(sio, "<units>{}</units>", units)
        sio.write("</DataStream>")

        self._conn.post("/ws/DataStream", sio.getvalue())
        logger.info("Data stream (%s) created successfully", stream_id)
        stream = DataStream(self._conn, stream_id)
        return stream
Beispiel #23
0
    def create_stream(self, stream_id, data_type, description=None, data_ttl=None,
                      rollup_ttl=None, units=None):
        """Create a new data stream on the device cloud

        This method will attempt to create a new data stream on the device cloud.
        This method will only succeed if the stream does not already exist.

        :param str stream_id: The path/id of the stream being created on the device cloud.
        :param str data_type: The type of this stream.  This must be in the set
            `{ INTEGER, LONG, FLOAT, DOUBLE, STRING, BINARY, UNKNOWN }`.  These values are
            available in constants like :attr:`~STREAM_TYPE_INTEGER`.
        :param str description: An optional description of this stream. See :meth:`~DataStream.get_description`.
        :param int data_ttl: The TTL for data points in this stream. See :meth:`~DataStream.get_data_ttl`.
        :param int rollup_ttl: The TTL for performing rollups on data. See :meth:~DataStream.get_rollup_ttl`.
        :param str units: Units for data in this stream.  See :meth:`~DataStream.get_units`

        """

        stream_id = validate_type(stream_id, *six.string_types)
        data_type = validate_type(data_type, type(None), *six.string_types)
        if isinstance(data_type, *six.string_types):
            data_type = str(data_type).upper()
        if not data_type in (set([None, ]) | set(list(DSTREAM_TYPE_MAP.keys()))):
            raise ValueError("data_type %r is not valid" % data_type)
        description = validate_type(description, type(None), *six.string_types)
        data_ttl = validate_type(data_ttl, type(None), *six.integer_types)
        rollup_ttl = validate_type(rollup_ttl, type(None), *six.integer_types)
        units = validate_type(units, type(None), *six.string_types)

        sio = StringIO()
        sio.write("<DataStream>")
        conditional_write(sio, "<streamId>{}</streamId>", stream_id)
        conditional_write(sio, "<dataType>{}</dataType>", data_type)
        conditional_write(sio, "<description>{}</description>", description)
        conditional_write(sio, "<dataTtl>{}</dataTtl>", data_ttl)
        conditional_write(sio, "<rollupTtl>{}</rollupTtl>", rollup_ttl)
        conditional_write(sio, "<units>{}</units>", units)
        sio.write("</DataStream>")

        self._conn.post("/ws/DataStream", sio.getvalue())
        logger.info("Data stream (%s) created successfully", stream_id)
        stream = DataStream(self._conn, stream_id)
        return stream
Beispiel #24
0
    def read(self, start_time=None, end_time=None, use_client_timeline=True, newest_first=True,
             rollup_interval=None, rollup_method=None, timezone=None, page_size=1000):
        """Read one or more DataPoints from a stream

        .. warning::
           The data points from the device cloud is a paged data set.  When iterating over the
           result set there could be delays when we hit the end of a page.  If this is undesirable,
           the caller should collect all results into a data structure first before iterating over
           the result set.

        :param start_time: The start time for the window of data points to read.  None means
            that we should start with the oldest data available.
        :type start_time: :class:`datetime.datetime` or None
        :param end_time: The end time for the window of data points to read.  None means
            that we should include all points received until this point in time.
        :type end_time: :class:`datetime.datetime` or None
        :param bool use_client_timeline: If True, the times used will be those provided by
              clients writing data points into the cloud (which also default to server time
              if the a timestamp was not included by the client).  This is usually what you
              want.  If False, the server timestamp will be used which records when the data
              point was received.
        :param bool newest_first: If True, results will be ordered from newest to oldest (descending order).
            If False, results will be returned oldest to newest.
        :param rollup_interval: the roll-up interval that should be used if one is desired at all.  Rollups
            will not be performed if None is specified for the interval.  Valid roll-up interval values
            are None, "half", "hourly", "day", "week", and "month".  See `DataPoints documentation
            <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#DataPoints>`_
            for additional details on these values.
        :type rollup_interval: str or None
        :param rollup_method: The aggregation applied to values in the points within the specified
            rollup_interval.  Available methods are None, "sum", "average", "min", "max", "count", and
            "standarddev".  See `DataPoint documentation
            <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#DataPoints>`_
            for additional details on these values.
        :type rollup_method: str or None
        :param timezone: timezone for calculating roll-ups. This determines roll-up interval
            boundaries and only applies to roll-ups of a day or larger (for example, day,
            week, or month). Note that it does not apply to the startTime and endTime parameters.
            See the `Timestamps <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#timestamp>`_
            and `Supported Time Zones <http://ftp1.digi.com/support/documentation/html/90002008/90002008_P/Default.htm#ProgrammingTopics/DataStreams.htm#TimeZones>`_
            sections for more information.
        :type timezone: str or None
        :param int page_size: The number of results that we should attempt to retrieve from the
            device cloud in each page.  Generally, this can be left at its default value unless
            you have a good reason to change the parameter for performance reasons.
        :returns: A generator object which one can iterate over the DataPoints read.

        """

        is_rollup = False
        if (rollup_interval is not None) or (rollup_method is not None):
            is_rollup = True
            numeric_types = [
                STREAM_TYPE_INTEGER,
                STREAM_TYPE_LONG,
                STREAM_TYPE_FLOAT,
                STREAM_TYPE_DOUBLE,
                STREAM_TYPE_STRING,
                STREAM_TYPE_BINARY,
                STREAM_TYPE_UNKNOWN,
            ]

            if self.get_data_type(use_cached=True) not in numeric_types:
                raise InvalidRollupDatatype('Rollups only support numerical DataPoints')

        # Validate function inputs
        start_time = to_none_or_dt(validate_type(start_time, datetime.datetime, type(None)))
        end_time = to_none_or_dt(validate_type(end_time, datetime.datetime, type(None)))
        use_client_timeline = validate_type(use_client_timeline, bool)
        newest_first = validate_type(newest_first, bool)
        rollup_interval = validate_type(rollup_interval, type(None), *six.string_types)
        if not rollup_interval in {None,
                                   ROLLUP_INTERVAL_HALF,
                                   ROLLUP_INTERVAL_HOUR,
                                   ROLLUP_INTERVAL_DAY,
                                   ROLLUP_INTERVAL_WEEK,
                                   ROLLUP_INTERVAL_MONTH, }:
            raise ValueError("Invalid rollup_interval %r provided" % (rollup_interval, ))
        rollup_method = validate_type(rollup_method, type(None), *six.string_types)
        if not rollup_method in {None,
                                 ROLLUP_METHOD_SUM,
                                 ROLLUP_METHOD_AVERAGE,
                                 ROLLUP_METHOD_MIN,
                                 ROLLUP_METHOD_MAX,
                                 ROLLUP_METHOD_COUNT,
                                 ROLLUP_METHOD_STDDEV}:
            raise ValueError("Invalid rollup_method %r provided" % (rollup_method, ))
        timezone = validate_type(timezone, type(None), *six.string_types)
        page_size = validate_type(page_size, *six.integer_types)

        # Remember that there could be multiple pages of data and we want to provide
        # in iterator over the result set.  To start the process out, we need to make
        # an initial request without a page cursor.  We should get one in response to
        # our first request which we will use to page through the result set
        query_parameters = {
            'timeline': 'client' if use_client_timeline else 'server',
            'order': 'descending' if newest_first else 'ascending',
            'size': page_size
        }
        if start_time is not None:
            query_parameters["startTime"] = isoformat(start_time)
        if end_time is not None:
            query_parameters["endTime"] = isoformat(end_time)
        if rollup_interval is not None:
            query_parameters["rollupInterval"] = rollup_interval
        if rollup_method is not None:
            query_parameters["rollupMethod"] = rollup_method
        if timezone is not None:
            query_parameters["timezone"] = timezone

        result_size = page_size
        while result_size == page_size:
            # request the next page of data or first if pageCursor is not set as query param
            try:
                result = self._conn.get_json("/ws/DataPoint/{stream_id}?{query_params}".format(
                    stream_id=self.get_stream_id(),
                    query_params=urllib.parse.urlencode(query_parameters)
                ))
            except DeviceCloudHttpException as http_exception:
                if http_exception.response.status_code == 404:
                    raise NoSuchStreamException()
                raise http_exception

            result_size = int(result["resultSize"])  # how many are actually included here?
            query_parameters["pageCursor"] = result.get("pageCursor")  # will not be present if result set is empty
            for item_info in result.get("items", []):
                if is_rollup:
                    data_point = DataPoint.from_rollup_json(self, item_info)
                else:
                    data_point = DataPoint.from_json(self, item_info)
                yield data_point
Beispiel #25
0
 def set_description(self, description):
     """Set the description for this data point"""
     self._description = validate_type(description, type(None), *six.string_types)
Beispiel #26
0
 def set_stream_id(self, stream_id):
     """Set the stream id associated with this data point"""
     stream_id = validate_type(stream_id, type(None), *six.string_types)
     if stream_id is not None:
         stream_id = stream_id.lstrip('/')
     self._stream_id = stream_id