def list_packet_histogram(self, name=None, start=None, stop=None, merge_time=2): """ Reads packet-related index records between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :param float merge_time: Maximum gap in seconds before two consecutive index records are merged together. :rtype: ~collections.abc.Iterable[.IndexGroup] """ params = {} if name is not None: params["name"] = name if start is not None: params["start"] = to_isostring(start) if stop is not None: params["stop"] = to_isostring(stop) if merge_time is not None: params["mergeTime"] = int(merge_time * 1000) return pagination.Iterator( ctx=self.ctx, path=f"/archive/{self._instance}/packet-index", params=params, response_class=index_service_pb2.IndexResponse, items_key="group", item_mapper=IndexGroup, )
def export_packets(self, name=None, start=None, stop=None, chunk_size=1024): """ Export raw packets. Packets are sorted by generation time and sequence number. :param str name: Archived name of the packet :param ~datetime.datetime start: Minimum generation time of the returned packets (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned packets (exclusive) :rtype: An iterator over received chunks """ params = {} if name is not None: params["name"] = name if start is not None: params["start"] = to_isostring(start) if stop is not None: params["stop"] = to_isostring(stop) path = f"/archive/{self._instance}:exportPackets" response = self.ctx.get_proto(path=path, params=params, stream=True) return response.iter_content(chunk_size=chunk_size)
def list_processed_parameter_group_histogram(self, group=None, start=None, stop=None, merge_time=20): """ Reads index records related to processed parameter groups between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :param float merge_time: Maximum gap in seconds before two consecutive index records are merged together. :rtype: ~collections.Iterable[.IndexGroup] """ params = {} if group is not None: params['group'] = group if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if merge_time is not None: params['mergeTime'] = int(merge_time * 1000) return pagination.Iterator( client=self._client, path='/archive/{}/parameter-index'.format(self._instance), params=params, response_class=archive_pb2.IndexResponse, items_key='group', item_mapper=IndexGroup, )
def list_alarms(self, start=None, stop=None): """ Lists the active alarms. Remark that this does not query the archive. Only active alarms on the current processor are returned. :param ~datetime.datetime start: Minimum trigger time of the returned alarms (inclusive) :param ~datetime.datetime stop: Maximum trigger time of the returned alarms (exclusive) :rtype: ~collections.Iterable[.Alarm] """ # TODO implement continuation token on server params = {"order": "asc"} if start is not None: params["start"] = to_isostring(start) if stop is not None: params["stop"] = to_isostring(stop) # Server does not do pagination on listings of this resource. # Return an iterator anyway for similarity with other API methods url = "/processors/{}/{}/alarms".format(self._instance, self._processor) response = self.ctx.get_proto(path=url, params=params) message = alarms_service_pb2.ListAlarmsResponse() message.ParseFromString(response.content) alarms = getattr(message, "alarms") return iter([_parse_alarm(alarm) for alarm in alarms])
def list_parameter_values(self, parameter, start=None, stop=None, page_size=500, descending=False, parameter_cache='realtime', source='ParameterArchive'): """ Reads parameter values between the specified start and stop time. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the returned values (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned values (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` values are fetched in reverse order (most recent first). :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :param str source: Specify how to retrieve parameter values. By default this uses the ``ParameterArchive`` which is optimized for retrieval. For Yamcs instances that do not enable the ``ParameterArchive``, you can still get results by specifying ``replay`` as the source. Replay requests take longer to return because the data needs to be reprocessed. :rtype: ~collections.Iterable[.ParameterValue] """ params = { 'source': source, 'order': 'desc' if descending else 'asc', } if page_size is not None: params['limit'] = page_size if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if parameter_cache: params['processor'] = parameter_cache else: params['norealtime'] = True return pagination.Iterator( client=self._client, path='/archive/{}/parameters{}'.format(self._instance, parameter), params=params, response_class=archive_pb2.ListParameterValuesResponse, items_key='parameter', item_mapper=ParameterValue, )
def list_events( self, source=None, severity=None, text_filter=None, start=None, stop=None, page_size=500, descending=False, ): """ Reads events between the specified start and stop time. Events are sorted by generation time, source, then sequence number. :param str source: The source of the returned events. :param str severity: The minimum severity level of the returned events. One of ``INFO``, ``WATCH``, ``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``. :param str text_filter: Filter the text message of the returned events :param ~datetime.datetime start: Minimum start date of the returned events (inclusive) :param ~datetime.datetime stop: Maximum start date of the returned events (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` events are fetched in reverse order (most recent first). :rtype: ~collections.abc.Iterable[.Event] """ params = { "order": "desc" if descending else "asc", } if source is not None: params["source"] = source if page_size is not None: params["limit"] = page_size if severity is not None: params["severity"] = severity if start is not None: params["start"] = to_isostring(start) if stop is not None: params["stop"] = to_isostring(stop) if text_filter is not None: params["q"] = text_filter return pagination.Iterator( ctx=self.ctx, path=f"/archive/{self._instance}/events", params=params, response_class=events_service_pb2.ListEventsResponse, items_key="event", item_mapper=Event, )
def list_command_history( self, command=None, queue=None, start=None, stop=None, page_size=500, descending=False, ): """ Reads command history entries between the specified start and stop time. :param str command: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param str queue: Name of the queue that the command was assigned to. :param ~datetime.datetime start: Minimum generation time of the returned command history entries (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned command history entries (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` results are fetched in reverse order (most recent first). :rtype: ~collections.abc.Iterable[.CommandHistory] """ params = { "order": "desc" if descending else "asc", } if queue: params["queue"] = queue if page_size is not None: params["limit"] = page_size if start is not None: params["start"] = to_isostring(start) if stop is not None: params["stop"] = to_isostring(stop) if command: path = f"/archive/{self._instance}/commands{command}" else: path = f"/archive/{self._instance}/commands" return pagination.Iterator( ctx=self.ctx, path=path, params=params, response_class=commands_service_pb2.ListCommandsResponse, items_key="entry", item_mapper=CommandHistory, )
def list_command_history(self, command=None, start=None, stop=None, page_size=500, descending=False): """ Reads command history entries between the specified start and stop time. :param str command: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the returned command history entries (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned command history entries (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` results are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.CommandHistory] """ params = { 'order': 'desc' if descending else 'asc', } if page_size is not None: params['limit'] = page_size if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if command: path = '/archive/{}/commands{}'.format(self._instance, command) else: path = '/archive/{}/commands'.format(self._instance) return pagination.Iterator( client=self._client, path=path, params=params, response_class=rest_pb2.ListCommandsResponse, items_key='entry', item_mapper=CommandHistory, )
def list_packets(self, name=None, start=None, stop=None, page_size=500, descending=False): """ Reads packet information between the specified start and stop time. Packets are sorted by generation time and sequence number. :param str name: Archived name of the packet :param ~datetime.datetime start: Minimum generation time of the returned packets (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned packets (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` packets are fetched in reverse order (most recent first). :rtype: ~collections.abc.Iterable[.Packet] """ params = { "order": "desc" if descending else "asc", } if name is not None: params["name"] = name if page_size is not None: params["limit"] = page_size if start is not None: params["start"] = to_isostring(start) if stop is not None: params["stop"] = to_isostring(stop) return pagination.Iterator( ctx=self.ctx, path=f"/archive/{self._instance}/packets", params=params, response_class=packets_service_pb2.ListPacketsResponse, items_key="packet", item_mapper=Packet, )
def save_credentials(credentials): if not os.path.exists(CONFIG_DIR): os.makedirs(CONFIG_DIR) with open(CREDENTIALS_FILE, 'wt') as f: json.dump({ 'access_token': credentials.access_token, 'refresh_token': credentials.refresh_token, 'expiry': to_isostring(credentials.expiry), }, f, indent=2)
def list_packets(self, name=None, start=None, stop=None, page_size=500, descending=False): """ Reads packet information between the specified start and stop time. Packets are sorted by generation time and sequence number. :param ~datetime.datetime start: Minimum generation time of the returned packets (inclusive) :param ~datetime.datetime stop: Maximum genreation time of the returned packets (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` packets are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Packet] """ params = { 'order': 'desc' if descending else 'asc', } if name is not None: params['name'] = name if page_size is not None: params['limit'] = page_size if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) return pagination.Iterator( client=self._client, path='/archive/{}/packets'.format(self._instance), params=params, response_class=archive_pb2.ListPacketsResponse, items_key='packet', item_mapper=Packet, )
def _to_argument_value(value): if isinstance(value, (bytes, bytearray)): return binascii.hexlify(value) elif isinstance(value, collections.Mapping): # Careful to do the JSON dump only at the end, # and not at every level of a nested hierarchy obj = _compose_aggregate_members(value) return json.dumps(obj) elif isinstance(value, datetime.datetime): return to_isostring(value) else: return str(value)
def list_completeness_index(self, start=None, stop=None): """ Reads completeness index records between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :rtype: ~collections.abc.Iterable[.IndexGroup] """ params = {} if start is not None: params["start"] = to_isostring(start) if stop is not None: params["stop"] = to_isostring(stop) return pagination.Iterator( ctx=self.ctx, path=f"/archive/{self._instance}/completeness-index", params=params, response_class=index_service_pb2.IndexResponse, items_key="group", item_mapper=IndexGroup, )
def list_completeness_index(self, start=None, stop=None): """ Reads completeness index records between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :rtype: ~collections.Iterable[.IndexGroup] """ params = {} if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) return pagination.Iterator( client=self._client, path='/archive/{}/completeness-index'.format(self._instance), params=params, response_class=archive_pb2.IndexResponse, items_key='group', item_mapper=IndexGroup, )
def send_event( self, instance, message, event_type=None, time=None, severity="info", source=None, sequence_number=None, ): """ Post a new event. :param str instance: A Yamcs instance name. :param str message: Event message. :param Optional[str] event_type: Type of event. :param severity: The severity level of the event. One of ``info``, ``watch``, ``warning``, ``critical`` or ``severe``. Defaults to ``info``. :type severity: Optional[str] :param time: Time of the event. If unspecified, defaults to mission time. :type time: Optional[~datetime.datetime] :param source: Source of the event. Useful for grouping events in the archive. When unset this defaults to ``User``. :type source: Optional[str] :param sequence_number: Sequence number of this event. This is primarily used to determine unicity of events coming from the same source. If not set Yamcs will automatically assign a sequential number as if every submitted event is unique. :type sequence_number: Optional[int] """ req = events_service_pb2.CreateEventRequest() req.message = message req.severity = severity if event_type: req.type = event_type if time: req.time = to_isostring(time) if source: req.source = source if sequence_number is not None: req.sequence_number = sequence_number url = "/archive/{}/events".format(instance) self.post_proto(url, data=req.SerializeToString())
def get_packet(self, generation_time, sequence_number): """ Gets a single packet by its identifying key (gentime, seqNum). :param ~datetime.datetime generation_time: When the packet was generated (packet time) :param int sequence_number: Sequence number of the packet :rtype: .Packet """ url = '/archive/{}/packets/{}/{}'.format(self._instance, to_isostring(generation_time), sequence_number) response = self._client.get_proto(url) message = yamcs_pb2.TmPacketData() message.ParseFromString(response.content) return Packet(message)
def _compose_aggregate_members(value): """ Recursively creates an object that can eventually be serialized to a valid aggregate value in JSON. This is a bit different than non-aggregate values, because Yamcs is more strict in the values that it accepts (for example: unlike regular arguments you cannot assign a numeric string to an integer argument, the JSON type needs to be numeric too). """ if isinstance(value, (bytes, bytearray)): return binascii.hexlify(value) elif isinstance(value, collections.Mapping): return {k: _compose_aggregate_members(v) for k, v in value.items()} elif isinstance(value, datetime.datetime): return to_isostring(value) else: # No string conversion here, use whatever the user is giving return value
def ls(self, args): opts = utils.CommandOptions(args) client = storage.Client(**opts.client_kwargs) if args.bucket: if '://' in args.bucket: bucket_name, prefix = args.bucket.split('://', 1) else: bucket_name = args.bucket prefix = None delimiter = '/' if args.recurse: delimiter = None listing = client.list_objects(opts.instance, bucket_name=bucket_name, delimiter=delimiter, prefix=prefix) rows = [] for prefix in listing.prefixes: url = '{}://{}'.format(bucket_name, prefix) if args.long: rows.append(['0', '', url]) else: rows.append([url]) for obj in listing.objects: url = '{}://{}'.format(bucket_name, obj.name) if args.long: rows.append( [str(obj.size), to_isostring(obj.created), url]) else: rows.append([url]) utils.print_table(rows) else: for bucket in client.list_buckets(opts.instance): print(bucket.name)
def _build_value_proto(value): proto = yamcs_pb2.Value() if isinstance(value, bool): proto.type = proto.BOOLEAN proto.booleanValue = value elif isinstance(value, float): proto.type = proto.DOUBLE proto.doubleValue = value elif isinstance(value, int) and value > 2147483647: proto.type = proto.SINT64 proto.sint64Value = value elif isinstance(value, int): proto.type = proto.SINT32 proto.sint32Value = value elif isinstance(value, str): proto.type = proto.STRING proto.stringValue = value elif isinstance(value, bytes): proto.type = proto.BINARY proto.binaryValue = value elif isinstance(value, bytearray): proto.type = proto.BINARY proto.binaryValue = bytes(value) elif isinstance(value, datetime.datetime): proto.type = proto.TIMESTAMP proto.stringValue = to_isostring(value) elif isinstance(value, collections.abc.Mapping): proto.type = proto.AGGREGATE proto.aggregateValue.name.extend(value.keys()) proto.aggregateValue.value.extend( [_build_value_proto(v) for v in value.values()]) elif isinstance(value, collections.abc.Sequence): proto.type = proto.ARRAY proto.arrayValue.extend([_build_value_proto(v) for v in value]) else: raise YamcsError("Unrecognized type") return proto
def list_parameter_ranges( self, parameter, start=None, stop=None, min_gap=None, max_gap=None, min_range=None, max_values=100, parameter_cache="realtime", ): """ Returns parameter ranges between the specified start and stop time. Each range indicates an interval during which this parameter's value was uninterrupted and unchanged. Ranges are a good fit for retrieving the value of a parameter that does not change frequently. For example an on/off indicator or some operational status. Querying ranges will then induce much less overhead than manually processing the output of :meth:`list_parameter_values` would. The maximum number of returned ranges is limited to 500. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param start: Minimum generation time of the considered values (inclusive) :type start: Optional[~datetime.datetime] :param stop: Maximum generation time of the considered values (exclusive) :type stop: Optional[~datetime.datetime] :param min_gap: Time in seconds. Any gap (detected based on parameter expiration) smaller than this will be ignored. However if the parameter changes value, the ranges will still be split. :type max_gap: Optional[float] :param max_gap: Time in seconds. If the distance between two subsequent parameter values is bigger than this value (but smaller than the parameter expiration), then an artificial gap is created. This also applies if there is no expiration defined for the parameter. :type max_gap: Optional[float] :param min_range: Time in seconds. Minimum duration of returned ranges. If multiple values occur within the range, the most frequent can be accessed using the ``entries`` property. :type min_range: Optional[float] :param max_values: Maximum number of unique values, tallied across the full requested range. Use this in combination with ``min_range`` to further optimize for transfer size. This value is limited to 100 at most. :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :type parameter_cache: Optional[str] :rtype: .ParameterRange[] """ path = f"/archive/{self._instance}/parameters{parameter}/ranges" params = {} if start is not None: params["start"] = to_isostring(start) if stop is not None: params["stop"] = to_isostring(stop) if min_gap is not None: params["minGap"] = int(min_gap * 1000) if max_gap is not None: params["maxGap"] = int(max_gap * 1000) if min_range is not None: params["minRange"] = int(min_range * 1000) if max_values is not None: params["maxValues"] = max_values if parameter_cache: params["processor"] = parameter_cache else: params["norealtime"] = True response = self.ctx.get_proto(path=path, params=params) message = pvalue_pb2.Ranges() message.ParseFromString(response.content) ranges = getattr(message, "range") return [ParameterRange(r) for r in ranges]
def list_parameter_values( self, parameter, start=None, stop=None, page_size=500, descending=False, parameter_cache="realtime", source="ParameterArchive", ): """ Reads parameter values between the specified start and stop time. .. note:: This method will send out multiple requests when more than ``page_size`` values are queried. For large queries, consider using :meth:`stream_parameter_values` instead, it uses server-streaming based on a single request, and supports downloading the values of multiple parameter at the same time. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the returned values (inclusive) :param ~datetime.datetime stop: Maximum generation time of the returned values (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` values are fetched in reverse order (most recent first). :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :param str source: Specify how to retrieve parameter values. By default this uses the ``ParameterArchive`` which is optimized for retrieval. For Yamcs instances that do not enable the ``ParameterArchive``, you can still get results by specifying ``replay`` as the source. Replay requests take longer to return because the data needs to be reprocessed. :rtype: ~collections.abc.Iterable[.ParameterValue] """ params = { "source": source, "order": "desc" if descending else "asc", } if page_size is not None: params["limit"] = page_size if start is not None: params["start"] = to_isostring(start) if stop is not None: params["stop"] = to_isostring(stop) if parameter_cache: params["processor"] = parameter_cache else: params["norealtime"] = True return pagination.Iterator( ctx=self.ctx, path=f"/archive/{self._instance}/parameters{parameter}", params=params, response_class=archive_pb2.ListParameterHistoryResponse, items_key="parameter", item_mapper=ParameterValue, )
def list_parameter_ranges(self, parameter, start=None, stop=None, min_gap=None, max_gap=None, parameter_cache='realtime'): """ Returns parameter ranges between the specified start and stop time. Each range indicates an interval during which this parameter's value was uninterrupted and unchanged. Ranges are a good fit for retrieving the value of a parameter that does not change frequently. For example an on/off indicator or some operational status. Querying ranges will then induce much less overhead than manually processing the output of :meth:`list_parameter_values` would. The maximum number of returned ranges is limited to 500. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the considered values (inclusive) :param ~datetime.datetime stop: Maximum generation time of the considered values (exclusive) :param float min_gap: Time in seconds. Any gap (detected based on parameter expiration) smaller than this will be ignored. However if the parameter changes value, the ranges will still be split. :param float max_gap: Time in seconds. If the distance between two subsequent parameter values is bigger than this value (but smaller than the parameter expiration), then an artificial gap is created. This also applies if there is no expiration defined for the parameter. :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :rtype: .ParameterRange[] """ path = '/archive/{}/parameters{}/ranges'.format( self._instance, parameter) params = {} if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if min_gap is not None: params['minGap'] = int(min_gap * 1000) if max_gap is not None: params['maxGap'] = int(max_gap * 1000) if parameter_cache: params['processor'] = parameter_cache else: params['norealtime'] = True response = self._client.get_proto(path=path, params=params) message = pvalue_pb2.Ranges() message.ParseFromString(response.content) ranges = getattr(message, 'range') return [ParameterRange(r) for r in ranges]
def sample_parameter_values(self, parameter, start=None, stop=None, sample_count=500, parameter_cache='realtime', source='ParameterArchive'): """ Returns parameter samples. The query range is split in sample intervals of equal length. For each interval a :class:`.Sample` is returned which describes the min, max, count and avg during that interval. Note that sample times are determined without considering the actual parameter values. Two separate queries with equal start/stop arguments will always return the same number of samples with the same timestamps. This is done to ease merging of multiple sample series. You should always be explicit about the ``start`` and ``stop`` times when relying on this property. :param str parameter: Either a fully-qualified XTCE name or an alias in the format ``NAMESPACE/NAME``. :param ~datetime.datetime start: Minimum generation time of the sampled parameter values (inclusive). If not set this defaults to one hour ago. :param ~datetime.datetime stop: Maximum generation time of the sampled parameter values (exclusive). If not set this defaults to the current time. :param int sample_count: The number of returned samples. :param str parameter_cache: Specify the name of the processor who's parameter cache is merged with already archived values. To disable results from the parameter cache, set this to ``None``. :param str source: Specify how to retrieve parameter values. By default this uses the ``ParameterArchive`` which is optimized for retrieval. For Yamcs instances that do not enable the ``ParameterArchive``, you can still get results by specifying ``replay`` as the source. Replay requests take longer to return because the data needs to be reprocessed. :rtype: .Sample[] """ path = '/archive/{}/parameters{}/samples'.format( self._instance, parameter) now = datetime.utcnow() params = { 'count': sample_count, 'source': source, 'start': to_isostring(now - timedelta(hours=1)), 'stop': to_isostring(now), } if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if parameter_cache: params['processor'] = parameter_cache else: params['norealtime'] = True response = self._client.get_proto(path=path, params=params) message = pvalue_pb2.TimeSeries() message.ParseFromString(response.content) samples = getattr(message, 'sample') return [Sample(s) for s in samples]