def _call(self): utime = self.data['Sensors']['Lumbar']['Unix Time'] # get the first timepoint to know which day to start and end with time_sdt = udatetime.utcfromtimestamp(utime[0]) time_edt = udatetime.utcfromtimestamp(utime[-1]) n_days = (time_edt.date() - time_sdt.date()).days if time_edt.hour > self._hours[0]: n_days += 1 # set the start and end hours for the first day day_start = time_sdt.replace(hour=self._hours[0], minute=0, second=0, microsecond=0) day_end = time_sdt.replace(hour=self._hours[1], minute=0, second=0, microsecond=0) iend = 10 # set so can reference in the i=0 loop for i in range(n_days): istart = argmin( abs(utime[iend - 10:] - day_start.timestamp())) + iend - 10 iend = argmin( abs(utime[istart:] - day_end.timestamp())) + istart + 1 self.data = (PROC.format(day_n=i + 1, value='Indices'), array([istart, iend])) day_start += datetime.timedelta(days=1) day_end += datetime.timedelta(days=1)
def test_utcfromtimestamp(self): DAY = 86400 HOUR = 3600 for t in range(0, DAY, HOUR): dt = datetime.utcfromtimestamp(t) udt = udatetime.utcfromtimestamp(t) self.assertIsInstance(udt, datetime) self.assertEqual(udt.year, dt.year) self.assertEqual(udt.month, dt.month) self.assertEqual(udt.day, dt.day) self.assertEqual(udt.hour, dt.hour) self.assertEqual(udt.minute, dt.minute) self.assertEqual(udt.second, dt.second) self.assertEqual(udt.microsecond, dt.microsecond) for t in range(0, DAY * -1, HOUR * -1): dt = datetime.utcfromtimestamp(t) udt = udatetime.utcfromtimestamp(t) self.assertIsInstance(udt, datetime) self.assertEqual(udt.year, dt.year) self.assertEqual(udt.month, dt.month) self.assertEqual(udt.day, dt.day) self.assertEqual(udt.hour, dt.hour) self.assertEqual(udt.minute, dt.minute) self.assertEqual(udt.second, dt.second) self.assertEqual(udt.microsecond, dt.microsecond)
def test_utcfromtimestamp(self): DAY = 86400 HOUR = 3600 for t in range(0, DAY, HOUR): dt = datetime.utcfromtimestamp(t) udt = udatetime.utcfromtimestamp(t) self.assertIsInstance(udt, datetime) self.assertEqual(udt.year, dt.year) self.assertEqual(udt.month, dt.month) self.assertEqual(udt.day, dt.day) self.assertEqual(udt.hour, dt.hour) self.assertEqual(udt.minute, dt.minute) self.assertEqual(udt.second, dt.second) self.assertEqual(udt.microsecond, dt.microsecond) self.assertEqual(udt.utcoffset(), timedelta(0)) self.assertEqual(udt.dst(), NO_DST) for t in range(0, DAY * -1, HOUR * -1): dt = datetime.utcfromtimestamp(t) udt = udatetime.utcfromtimestamp(t) self.assertIsInstance(udt, datetime) self.assertEqual(udt.year, dt.year) self.assertEqual(udt.month, dt.month) self.assertEqual(udt.day, dt.day) self.assertEqual(udt.hour, dt.hour) self.assertEqual(udt.minute, dt.minute) self.assertEqual(udt.second, dt.second) self.assertEqual(udt.microsecond, dt.microsecond) self.assertEqual(udt.utcoffset(), timedelta(0)) self.assertEqual(udt.dst(), NO_DST)
def reconstruct_interval(experiment_id): """ Reverse the construct_experiment_id operation :param experiment_id: The experiment id :return: time interval """ start, end = map(lambda x: udatetime.utcfromtimestamp(x / 1000.0), map(float, experiment_id.split("-"))) from ..time_interval import TimeInterval return TimeInterval(start, end)
def construct_experiment_id(time_interval): """ Construct an experiment id from a time interval :return: The experiment id :type time_interval: TimeInterval :rtype: str """ # Construct id based on unix epoch timestamps epoch = udatetime.utcfromtimestamp(0).replace(tzinfo=UTC) start = int((time_interval.start - epoch).total_seconds() * 1000.0) end = int((time_interval.end - epoch).total_seconds() * 1000.0) return "{}-{}".format(start, end)
def _udatetime(): return udatetime.utcfromtimestamp(TIME)
def udatetime_utcfromtimestamp(): udatetime.utcfromtimestamp(TIME)
def rfc3339strFromTimestamp(ts): return udatetime.to_string(udatetime.utcfromtimestamp(ts))
def udatetimeFromTimestamp(ts): """Convert a unix timestamp to a udatetime""" return udatetime.utcfromtimestamp(ts)
from datetime import datetime, timedelta import udatetime import pytz import six import apache_beam as beam from apache_beam.transforms.window import TimestampedValue from apache_beam import typehints from pipe_tools.coders import JSONDict EPOCH = udatetime.utcfromtimestamp(0) SECONDS_IN_DAY = 60 * 60 * 24 # This is the format the dataflow uses for TIMESTAMP fields returned from BigQuery BEAM_BQ_TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S.%f UTC" # this is the format bigquery uses for I/O # This is a much better string format for datetimes that is supported by udatetime # Note that %z is not supported in pythng 2.7 for udatetime.strptime() RFC3339_TIMESTAMP_FORMAT = "%Y-%m-%dT%H:%M:%S.%f%z" DATE_FORMAT = "%Y-%m-%d" """ Timestamp tools Convert between datetime, unix timestamp and various string represetnations Dataflow BigQuery Timestamp Issues ----------------------------------
def process_timestamps(times, accel, time_units=None, conv_kw=None, window=False, hours=('08:00', '20:00')): """ Convert timestamps into pandas datetime64 objects, and window as appropriate. Parameters ---------- times : array_like N-length array of timestamps to convert. accel : {numpy.ndarray, pd.Series} (N, 3) array of acceleration values. They will be windowed the same way as the timestamps if `window` is set to True. time_units : {None, str}, optional Time units. Useful if conversion is from unix timestamps in seconds (s), milliseconds (ms), microseconds (us), or nanoseconds (ns). If not None, will override the value in conv_kw, though one or the other must be provided. Default is None. conv_kw : {None, dict}, optional Additional key-word arguments for the pandas.to_datetime function. If time_units is not None, that value will be used and overwrite the value in conv_kw. If the timestamps are in unix time, it is unlikely this argument will be necessary. Default is None. window : bool, optional Window the timestamps into the selected hours per day. hours : array_like, optional Length two array_like of hours (24-hour format) as strings, defining the start (inclusive) and end (exclusive) times to include in the processing. Default is ('08:00', '20:00'). Returns ------- timestamps : {pandas.DatetimeIndex, pandas.Series, dict} Array_like of timestamps. DatetimeIndex if times was a numpy.ndarray, or list. pandas.Series with a dtype of 'datetime64' if times was a pandas.Series. If `window` is set to True, then a dictionary of timestamps for each day is returned. dt : float Sampling time in seconds. accel : {numpy.ndarray, pd.Series, dict}, optional Acceleration windowed the same way as the timestamps (dictionary of acceleration for each day), if `window` is True. If `window` is False, then the acceleration is not returned. """ if conv_kw is not None: if time_units is not None: conv_kw['unit'] = time_units else: if time_units is not None: conv_kw = {'unit': time_units} else: raise ValueError( 'Either (time_units) must be defined, or "unit" must be a key of (conv_kw).' ) # convert using pandas if 'unit' in conv_kw: if conv_kw['unit'] == 'ms': timestamps = to_datetime([ utcfromtimestamp(t).replace(tzinfo=None) for t in times / 1e3 ]) elif conv_kw['unit'] == 'us': timestamps = to_datetime([ utcfromtimestamp(t).replace(tzinfo=None) for t in times / 1e6 ]) elif conv_kw['unit'] == 'ns': timestamps = to_datetime([ utcfromtimestamp(t).replace(tzinfo=None) for t in times / 1e9 ]) elif conv_kw['unit'] == 's': timestamps = to_datetime( [utcfromtimestamp(t).replace(tzinfo=None) for t in times]) else: timestamps = to_datetime(times, **conv_kw) # find the sampling time dt = mean(diff(timestamps[:100])) / timedelta64(1, 's') # convert to seconds # windowing if window: hour_inds = timestamps.indexer_between_time(hours[0], hours[1]) day_splits = argwhere(diff(hour_inds) > 1) + 1 day_splits = append(insert(day_splits, 0, 0), hour_inds.size) timestamps_ = {} accel_ = {} for i in range(len(day_splits) - 1): timestamps_[f'Day {i + 1}'] = timestamps[hour_inds][ day_splits[i]:day_splits[i + 1]] accel_[f'Day {i + 1}'] = accel[hour_inds][ day_splits[i]:day_splits[i + 1]] return timestamps_, dt, accel_ else: return timestamps, dt
def tabulate_results(results, csv_path, method='stillness'): """ Tabulate the results as calculated by the sequential pipeline. Parameters ---------- results : {dict, str} Either a dictionary of the results, or the path to the h5 file where the results were stored. csv_path : str Path to save the tabular data at method : {'stillness', 'displacement'}, optional Which method to tabulate results for. Default is 'stillness'. """ # get the results days, times, duration, vdisp, mxa, mna, sparc = [], [], [], [], [], [], [] mtd = f'{method.capitalize()} Method' if isinstance(results, dict): day_list = [i for i in results['Processed']['Sit2Stand'] if 'Day' in i] for day in day_list: days.extend([int(day[4:])] * results['Processed']['Sit2Stand'][day] [mtd]['STS Times'].shape[0]) times.extend( results['Processed']['Sit2Stand'][day][mtd]['STS Times']) duration.extend( results['Processed']['Sit2Stand'][day][mtd]['Duration']) vdisp.extend(results['Processed']['Sit2Stand'][day][mtd] ['Vertical Displacement']) mxa.extend( results['Processed']['Sit2Stand'][day][mtd]['Max. Accel.']) mna.extend( results['Processed']['Sit2Stand'][day][mtd]['Min. Accel.']) sparc.extend(results['Processed']['Sit2Stand'][day][mtd]['SPARC']) else: with h5py.File(results, 'r') as f: day_list = [i for i in f['Processed/Sit2Stand'] if 'Day' in i] for day in day_list: days.extend( [int(day[4:])] * f[f'Processed/Sit2Stand/{day}/{mtd}/STS Times'].shape[0]) times.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/STS Times']) duration.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Duration']) vdisp.extend( f[f'Processed/Sit2Stand/{day}/{mtd}/Vertical Displacement'] ) mxa.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Max. Accel.']) mna.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/Min. Accel.']) sparc.extend(f[f'Processed/Sit2Stand/{day}/{mtd}/SPARC']) table = zeros((len(days), 12), dtype='object') table[:, 0] = days table[:, 1:3] = array(times) table[:, 7] = duration # table[:, 8] = vdisp table[:, 9] = mxa table[:, 10] = mna table[:, 11] = sparc for i, ts in enumerate(table[:, 1]): dt = udt.utcfromtimestamp(ts) table[i, 3] = dt.strftime('%Y-%m-%d %H:%M:%S.%f') table[i, 4] = dt.hour table[i, 5] = dt.minute table[ i, 6] = dt.weekday() >= 5 # is the day a weekend. 0=Monday, 6=Sunday hdr = 'Day,Start Unix Time,End Unix Time,Start Time,Hour,Minute,Weekend,Duration,Vertical Displacement,' \ 'Max. Accel.,Min. Accel., SPARC' fmt = '%d, %f, %f, %s, %i, %i, %s, %f, %f, %f, %f, %f' savetxt(csv_path, table, header=hdr, fmt=fmt)
def process_result_value(self, value: Optional[int], dialect) -> Optional[datetime]: if value is None: return None return udatetime.utcfromtimestamp(value / 1000)