def get_segments(self, ts, channel, start, stop, gap_factor): """ Retrieve ranges of time for channel (between start and stop) where there exists contiguous data. Gap detection sensitivity can be adjusted using ``gap_factor`` which is multiplied with sampling period of channel for identifying gaps. """ channel_id = self._get_id(channel) package_id = self._get_id(ts) start = infer_epoch(start) stop = infer_epoch(stop) resp = self._get( # Note: uses streaming server host=self.session._streaming_host, endpoint='/ts/retrieve/segments', base='', stream=True, params=dict(channel=channel_id, package=package_id, session=self.session.token, start=start, end=stop, gapThreshold=gap_factor)) return [tuple(x) for x in resp]
def create_annotation(self, layer, annotation, **kwargs): """ Creates annotation for some timeseries package on the platform. """ if isinstance(annotation, TimeSeriesAnnotation): data = annotation.as_dict() elif all(x in kwargs for x in ["start", "end"]): start_time = infer_epoch(kwargs["start"]) end_time = infer_epoch(kwargs["end"]) data = { "name": "", "label": annotation, "start": int(start_time), "end": int(end_time), } if kwargs["channel_ids"]: channel_ids = kwargs["channel_ids"] if isinstance(channel_ids, string_types): channel_ids = [channel_ids] data["channelIds"] = channel_ids else: ts = layer._api.core.get(layer.time_series_id) data["channelIds"] = [x.id for x in ts.channels] if "description" in annotation: data["description"] = kwargs["description"] else: data["description"] = None else: raise Exception( "Must provide TimeSeriesAnnotation object or 'annotation','start','end' at minimum" ) data["time_series_id"] = layer.time_series_id data["layer_id"] = layer.id path = self._uri( "/{ts_id}/layers/{layer_id}/annotations", ts_id=layer.time_series_id, layer_id=layer.id, ) resp = self._post(path, json=data) tmp = TimeSeriesAnnotation.from_dict(resp, api=self.session) if isinstance(annotation, TimeSeriesAnnotation): annotation.__dict__.update(tmp.__dict__) return tmp
def create_annotation(self, layer, annotation, **kwargs): """ Creates annotation for some timeseries package on the platform. """ if isinstance(annotation, TimeSeriesAnnotation): data = annotation.as_dict() elif all(x in kwargs for x in ['start', 'end']): start_time = infer_epoch(kwargs['start']) end_time = infer_epoch(kwargs['end']) data = { 'name': '', 'label': annotation, 'start': long(start_time), 'end': long(end_time), } if kwargs['channel_ids']: channel_ids = kwargs['channel_ids'] if isinstance(channel_ids, basestring): channel_ids = [channel_ids] data['channelIds'] = channel_ids else: ts = layer._api.core.get(layer.time_series_id) data['channelIds'] = [x.id for x in ts.channels] if 'description' in annotation: data['description'] = kwargs['description'] else: data['description'] = None else: raise Exception( "Must provide TimeSeriesAnnotation object or 'annotation','start','end' at minimum" ) data['time_series_id'] = layer.time_series_id data['layer_id'] = layer.id path = self._uri('/{ts_id}/layers/{layer_id}/annotations', ts_id=layer.time_series_id, layer_id=layer.id) resp = self._post(path, json=data) tmp = TimeSeriesAnnotation.from_dict(resp, api=self.session) if isinstance(annotation, TimeSeriesAnnotation): annotation.__dict__.update(tmp.__dict__) return tmp
def get_segments(self, ts, channel, start, stop): channel_id = self._get_id(channel) package_id = self._get_id(ts) start = infer_epoch(start) stop = infer_epoch(stop) resp = self._get( # Note: uses streaming server host=self.session._streaming_host, endpoint='/ts/retrieve/segments', base='', stream=True, params=dict(channel=channel_id, package=package_id, session=self.session.token, start=start, end=stop)) return [tuple(x) for x in resp]
def get_ts_data_iter(self, ts, start, end, channels, chunk_size, use_cache, length=None): """ Iterator will be constructed based over timespan (start,end) or (start, start+seconds) Both :chunk_size and :length should be described using strings, e.g. 5 second = '5s' 3 minutes = '3m' 1 hour = '1h' otherwise microseconds assumed. """ if isinstance(ts, basestring): # assumed to be package ID ts = self.session.core.get(ts) #CHANNELS ts_channels = ts.channels #no channels specified if channels is None: channels = ts.channels #1 channel specified as TSC object elif isinstance(channels, TimeSeriesChannel): channels = [channels] #1 channel specified and channel id elif isinstance(channels, basestring): channels = [ch for ch in ts.channels if ch.id == channels] #list of channel ids OR ts channels else: all_ch = [] for chan in channels: if isinstance(chan, basestring): all_ch.extend([ch for ch in ts_channels if ch.id == chan]) else: all_ch.extend([ch for ch in ts_channels if ch == chan]) channels = all_ch # determine start (usecs) the_start = ts.start if start is None else infer_epoch(start) # chunk if chunk_size is not None and isinstance(chunk_size, basestring): chunk_size = parse_timedelta(chunk_size) # determine end if length is not None: if isinstance(length, basestring): length_usec = parse_timedelta(length) else: length_usec = length the_end = the_start + length_usec elif end is not None: the_end = infer_epoch(end) else: the_end = ts.end # logical check if the_end < the_start: raise Exception("End time cannot be before start time.") # loop through chunks the_start = long(the_start) the_end = long(the_end) channel_chunks = [ ChannelIterator(ch, the_start, the_end, chunk_size, api=self.session, use_cache=use_cache).get_chunks() for ch in channels ] while True: # get chunk for all channels values = [next(i, None) for i in channel_chunks] # no more results? if not [1 for v in values if v is not None]: break # make dataframe data_map = { c.name: v for c, v in zip(channels, values) if v is not None } yield pd.DataFrame.from_dict(data_map)
def get_ts_data_iter(self, ts, start, end, channels, chunk_size, length=None): """ Iterator will be constructed based over timespan (start,end) or (start, start+seconds) Both :chunk_size and :length should be described using strings, e.g. 5 second = '5s' 3 minutes = '3m' 1 hour = '1h' otherwise microseconds assumed. """ MAX_POINTS_PER_CHUNK = settings.max_points_per_chunk if channels is None: channels = ts.channels #if only one channel, make a list if isinstance(channels, TimeSeriesChannel): channels = [channels] max_rate = max([x.rate for x in channels]) # determine start (usecs) the_start = ts.start if start is None else infer_epoch(start) # chunk if chunk_size is not None: if isinstance(chunk_size, basestring): chunk_delta = parse_timedelta(chunk_size) else: chunk_delta = chunk_size chunk_points = chunk_delta / 1e6 * float(max_rate) if chunk_points > MAX_POINTS_PER_CHUNK: raise Exception( "Chunk size must be less than {} points".format( MAX_POINTS_PER_CHUNK)) else: chunk_delta = MAX_POINTS_PER_CHUNK / float(max_rate) * 1e6 # determine end if length is not None: if isinstance(length, basestring): length_usec = parse_timedelta(length) else: length_usec = length the_end = the_start + length_usec elif end is not None: the_end = infer_epoch(end) else: the_end = ts.end # logical check if the_end < the_start: raise Exception( "End time cannot be before start time - unless you're magic.") # loop through chunks the_start = long(the_start) the_end = long(the_end) chunk_delta = long(chunk_delta) for chunk_start in xrange(the_start, the_end, chunk_delta): chunk_end = chunk_start + chunk_delta if chunk_end > the_end: chunk_end = the_end # async data requests (over all channels) reqs = { ch.name: \ self._channel_data_request( channel = ch, start = chunk_start, end = chunk_end, limit = "", # always include limit param (even if empty) async = True ) for ch in channels } # wait for all data data = {