def setUpClass(cls): super(TestAccelerometer, cls).setUpClass() tz = pytz.timezone('US/Eastern') cls.sampling_frequency = 64.0 / 6.0 cls.accelx = [] with gzip.open(os.path.join(os.path.dirname(__file__), 'res/accelx.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) cls.accelx.append( DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1])) cls.accelx_ds = DataStream(None, None) cls.accelx_ds.data = cls.accelx cls.accely = [] with gzip.open(os.path.join(os.path.dirname(__file__), 'res/accely.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) cls.accely.append( DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1])) cls.accely_ds = DataStream(None, None) cls.accely_ds.data = cls.accely cls.accelz = [] with gzip.open(os.path.join(os.path.dirname(__file__), 'res/accelz.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) cls.accelz.append( DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1])) cls.accelz_ds = DataStream(None, None) cls.accelz_ds.data = cls.accelz
def getStats(data_window, list_qDev, list_mean, list_median, list_80): # data array data = np.array([i.sample for i in data_window]) # Quantile deviation value_qDev = (0.5 * (np.percentile(data, 75) - np.percentile(data, 25))) list_qDev.append( DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=value_qDev)) # Mean value_mean = np.mean(data) list_mean.append( DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=value_mean)) # Median value_median = np.median(data) list_median.append( DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=value_median)) # 80 Percentile value_80 = np.percentile(data, 80) list_80.append( DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=value_80)) return list_qDev, list_mean, list_median, list_80
def setUpClass(cls): super(TestAllFeatures, cls).setUpClass() tz = pytz.timezone('US/Eastern') # Load RIP rip = [] rip_sampling_frequency = 64.0 / 3 with gzip.open( os.path.join(os.path.dirname(__file__), 'res/rip.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) rip.append( DataPoint.from_tuple( datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1])) # Load ECG ecg = [] ecg_sampling_frequency = 64.0 with gzip.open( os.path.join(os.path.dirname(__file__), 'res/ecg.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) ecg.append( DataPoint.from_tuple( datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1])) # Dataframe ecg_date = np.array([np.nan] * len(ecg)).astype(datetime.datetime) ecg_sample = np.array([np.nan] * len(ecg)) rip_date = np.array([np.nan] * len(rip)).astype(datetime.datetime) rip_sample = np.array([np.nan] * len(rip)) for i in range(len(ecg)): ecg_date[i] = ecg[i].start_time ecg_sample[i] = ecg[i].sample for i in range(len(rip)): rip_date[i] = rip[i].start_time rip_sample[i] = rip[i].sample df_ecg = pd.DataFrame(index=np.arange(len(ecg)), data=ecg_sample, columns=['sample']) df_ecg['Date'] = ecg_date df_rip = pd.DataFrame(index=np.arange(len(rip)), data=rip_sample, columns=['sample']) df_rip['Date'] = rip_date print(df_rip.head()) print(df_rip.tail()) print(df_ecg.head()) print(df_ecg.tail()) cls.df_ecg = df_ecg cls.df_rip = df_rip cls.sampling_frequency = {} cls.sampling_frequency['ecg'] = 64.0 cls.sampling_frequency['rip'] = 64.0 / 3
def rsa_feature_computation(valleys_datastream: DataStream, rr_datastream: DataStream, window_size: float, window_offset: float ): """ :param rr_datastream: :param valleys_datastream: :param window_size: :param window_offset: :return: rsa_features computed over the given window """ if (valleys_datastream or rr_datastream) is None: return None if (len(valleys_datastream.data) or len(rr_datastream)) == 0: return None rsa_datastream = compute_datastream_rsa(valleys_datastream, rr_datastream) # perform windowing of rsa_datastream window_data = window_sliding(rsa_datastream.data, window_size, window_offset) # initialize each RSA feature RSA_Quartile_Deviation = [] RSA_Mean = [] RSA_Median = [] RSA_80thPercentile = [] # iterate over each window and calculate features for key, value in window_data.items(): starttime, endtime = key rsa = np.array([i.sample for i in value]) RSA_Quartile_Deviation.append(DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=(0.5 * ( np.percentile(rsa, 75) - np.percentile( rsa, 25))))) RSA_80thPercentile.append( DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=np.percentile(rsa, 80))) RSA_Mean.append(DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=np.mean(rsa))) RSA_Median.append(DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=np.median(rsa))) rsa_mean = DataStream.from_datastream([valleys_datastream]) rsa_mean.data = RSA_Mean rsa_median = DataStream.from_datastream([valleys_datastream]) rsa_median.data = RSA_Median rsa_quartile = DataStream.from_datastream([valleys_datastream]) rsa_quartile.data = RSA_Quartile_Deviation rsa_80 = DataStream.from_datastream([valleys_datastream]) rsa_80.data = RSA_80thPercentile return rsa_mean, rsa_median, rsa_quartile, rsa_80
def sensor_failure_marker(attachment_marker_stream_id: uuid, mshrv_accel_id: uuid, mshrv_gyro_id: uuid, wrist: str, owner_id: uuid, dd_stream_name, CC: CerebralCortex, config: dict): """ Label a window as packet-loss if received packets are less than the expected packets. All the labeled data (st, et, label) with its metadata are then stored in a datastore. :param stream_id: :param CC_obj: :param config: """ # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker sensor_failure_stream_id = uuid.uuid3(uuid.NAMESPACE_DNS, str( attachment_marker_stream_id + dd_stream_name + owner_id + "SENSOR FAILURE MARKER")) stream_days = get_stream_days(attachment_marker_stream_id, sensor_failure_stream_id, CC) try: for day in stream_days: # load stream data to be diagnosed attachment_marker_stream = CC.get_datastream(attachment_marker_stream_id, day, data_type=DataSet.COMPLETE) results = OrderedDict() if attachment_marker_stream.data: for marker_window in attachment_marker_stream.data: if "MOTIONSENSE-ON-BODY" in marker_window.sample: mshrv_accel_stream = CC.get_datastream(mshrv_accel_id, day, data_type=DataSet.ONLY_DATA, start_time=marker_window.start_time, end_time=marker_window.end_time) mshrv_gyro_stream = CC.get_datastream(mshrv_gyro_id, day, data_type=DataSet.ONLY_DATA, start_time=marker_window.start_time, end_time=marker_window.end_time) results_accel = process_windows(mshrv_accel_stream, config) results_gyro = process_windows(mshrv_gyro_stream, config) key = marker_window.start_time, marker_window.end_time # if sensor failure period is more than 12 hours then mark it as a sensor failure if results_accel > 0 and results_gyro < 1: sample = "MOTIONSENE-HRV-" + str(wrist) + "ACCELEROMETER-FAILURE" results[key].append(DataPoint(marker_window.start_time, marker_window.end_time, sample)) elif results_accel < 1 and results_gyro > 0: sample = "MOTIONSENE-HRV-" + str(wrist) + "GYRO-FAILURE" results[key].append(DataPoint(marker_window.start_time, marker_window.end_time, sample)) merged_windows = merge_consective_windows(results) if len(results) > 0: input_streams = [{"owner_id": owner_id, "id": str(attachment_marker_stream_id), "name": attachment_marker_stream.name}] output_stream = {"id": sensor_failure_stream_id, "name": dd_stream_name, "algo_type": config["algo_type"]["sensor_failure"]} metadata = get_metadata(dd_stream_name, input_streams, config) store(merged_windows, input_streams, output_stream, metadata, CC, config) except Exception as e: print(e)
def getBasicStatistics(datastream: DataStream, window_size: float, window_offset: float): """ Computes mean, median, 80th percentile, and quartile deviation of datastream :param datastream: DataStream :return: mean, median, 80th percentile, and quartile deviation DataStreams of datastream """ # perform windowing of datastream window_data = window_sliding(datastream.data, window_size, window_offset) datastream_mean_data = [] datastream_median_data = [] datastream_80percentile_data = [] datastream_quartile_deviation_data = [] for key, value in window_data.items(): starttime, endtime = key reference_data = np.array([i.sample for i in value]) datastream_mean_data.append( DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=np.mean(reference_data))) datastream_median_data.append( DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=np.median(reference_data))) datastream_quartile_deviation_data.append( DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=(0.5 * (np.percentile(reference_data, 75) - np.percentile(reference_data, 25))))) datastream_80percentile_data.append( DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=np.percentile(reference_data, 80))) datastream_mean = DataStream.from_datastream([datastream]) datastream_mean.data = datastream_mean_data datastream_median = DataStream.from_datastream([datastream]) datastream_median.data = datastream_median_data datastream_quartile = DataStream.from_datastream([datastream]) datastream_quartile.data = datastream_quartile_deviation_data datastream_80 = DataStream.from_datastream([datastream]) datastream_80.data = datastream_80percentile_data return datastream_mean, datastream_median, datastream_quartile, datastream_80
def rsa_calculate(start_time, end_time, datastream: DataStream): """ :param start_time: :param end_time: :param datastream: :return: rsa sample """ result = DataPoint.from_tuple(start_time, -1.0) _max = DataPoint.from_tuple(0, 0.0) _min = DataPoint.from_tuple(0, 0.0) maxFound = False minFound = False for dp in datastream.data: if start_time < dp.start_time < end_time: if _max.start_time == 0 and _min.start_time == 0: _max = DataPoint.from_tuple(start_time=dp.start_time, sample=dp.sample) _min = DataPoint.from_tuple(start_time=dp.start_time, sample=dp.sample) elif dp.sample >= _max.sample: _max = DataPoint.from_tuple(start_time=dp.start_time, sample=dp.sample) maxFound = True elif dp.sample <= _min.sample: _min = DataPoint.from_tuple(start_time=dp.start_time, sample=dp.sample) minFound = True if maxFound and minFound: result = DataPoint.from_tuple(start_time=result.start_time, sample=(_max.sample - _min.sample)) return result.sample
def autosense_sequence_align(datastreams: List[DataStream], sampling_frequency: float) -> DataStream: result = DataStream.from_datastream(input_streams=datastreams) result.data = [] if len(datastreams) == 0: return result start_time = None for ds in datastreams: ts = ds.data[0].start_time if not start_time: start_time = ts elif start_time < ts: start_time = ts start_time -= datetime.timedelta(seconds=1.0 / sampling_frequency) data_block = [] max_index = np.Inf for ds in datastreams: d = [i for i in ds.data if i.start_time > start_time] if len(d) < max_index: max_index = len(d) data_block.append(d) data_array = np.array(data_block) dimensions = data_array.shape[0] for i in range(0, max_index): sample = [data_array[d][i].sample for d in range(0, dimensions)] result.data.append(DataPoint.from_tuple(data_array[0][i].start_time, sample)) return result
def setUpClass(cls): configuration_file = os.path.join(os.path.dirname(__file__), '../../../cerebralcortex.yml') cls.CC = CerebralCortex(configuration_file, master="local[*]", name="Data Diagnostic App", time_zone="US/Central") cls.config = Configuration( filepath="../data_diagnostic/data_diagnostic_config.yml").config cls.sample_battery_data = [] for row in range(1, 481): if row < 61: battery = 87.0 elif row > 60 and row < 120: battery = 0.0 elif row > 120 and row < 240: battery = 87.0 elif row > 240 and row < 300: battery = 7.0 elif row > 300 and row < 360: battery = 0.0 elif row > 360: battery = 60.0 tz = pytz.timezone("US/Central") start_time = tz.localize( datetime.fromtimestamp( int(round((time.time() + row) * 1000)) / 1e3)) dp = DataPoint(start_time=start_time, sample=battery) cls.sample_battery_data.append(dp) cls.window_size = 60
def setUpClass(cls): super(TestPeakValleyComputation, cls).setUpClass() tz = pytz.timezone('US/Eastern') data = [] cls._sample_frequency = 21.33 cls._smoothing_factor = 5 cls._time_window = 8 cls._expiration_amplitude_threshold_perc = 0.10 cls._threshold_expiration_duration = 0.312 cls._max_amplitude_change_peak_correction = 30 cls._inspiration_amplitude_threshold_perc = 0.10 cls._min_neg_slope_count_peak_correction = 4 cls._minimum_peak_to_valley_time_diff = 0.31 cls._window_length = int( round(cls._time_window * cls._sample_frequency)) with gzip.open( os.path.join(os.path.dirname(__file__), 'res/rip.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) data.append( DataPoint.from_tuple( datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1])) cls._data_start_time_to_index = get_data_start_time_to_index_dic( data=data) cls.rip_datastream = DataStream(None, None) cls.rip_datastream.data = data
def test_DataPoint(self): ts = datetime.datetime.now() dp = DataPoint(start_time=ts, end_time=ts, sample={'Foo': 123}, metadata={'label': 'good'}) self.assertDictEqual(dp.sample, {'Foo': 123}) self.assertEqual(dp.start_time, ts) self.assertEqual(dp.end_time, ts) self.assertEqual(dp.metadata, {'label': 'good'})
def test_classmethod_from_tuple(self): ts = datetime.datetime.now() dp = DataPoint.from_tuple(start_time=ts, end_time=ts, sample=[1, 2, 3]) self.assertIsInstance(dp, DataPoint) self.assertEqual(dp.start_time, ts) self.assertEqual(dp.end_time, ts) self.assertEqual(dp.sample, [1, 2, 3])
def test_accelerometer_features(self): ds = DataStream(None, None) data = [] # TODO: Fix the test case once timestamp correction and sequence alignment is written for i in range( min(len(self.accelx), len(self.accely), len(self.accelz))): data.append( DataPoint.from_tuple(self.accelx[i].start_time, [ self.accelx[i].sample, self.accely[i].sample, self.accelz[i].sample ])) ds.datapoints = data accelerometer_magnitude, accelerometer_win_mag_deviations, accel_activity = accelerometer_features( ds) self.assertEqual(len(accelerometer_magnitude.datapoints), 62870) self.assertEqual(len(accelerometer_win_mag_deviations.datapoints), 687) self.assertEqual(len(accel_activity.datapoints), 687) self.assertEqual( len([dp for dp in accel_activity.datapoints if dp.sample]), 0) # TODO: Is this correct
def getMinuteVentilation(peak_datastream: DataStream, valley_datastream: DataStream, window_size: float, window_offset: float): """ :param peak_datastream: DataStream :param valley_datastream: DataStream :return: respiration minute ventilation datastream """ peak_window_data = window_sliding(peak_datastream.data, window_size, window_offset) valley_window_data = window_sliding(valley_datastream.data, window_size, window_offset) minuteVentilation = [] for valley_key, valley_value in valley_window_data.items(): starttime, endtime = valley_key for peak_key, peak_value in peak_window_data.items(): if peak_key == valley_key: minuteVentilation.append( DataPoint.from_tuple(start_time=starttime, end_time=endtime, sample=calculateMinuteVentilation( valley_value, peak_value))) datastream_minuteVentilation = DataStream.from_datastream( [valley_datastream]) datastream_minuteVentilation.data = minuteVentilation return datastream_minuteVentilation
def data(self, value): result = [] for dp in value: result.append( DataPoint(self._identifier, dp.start_time, dp.end_time, dp.sample)) self._data = result
def form_data_point_from_sample_array(sample_list): datapoints = [] for i in sample_list: datapoints.append( DataPoint.from_tuple(start_time=datetime.now(), sample=i)) return datapoints
def setUp(self): self.size = 100000 self.ds = DataStream(None, None) data = [ DataPoint.from_tuple(datetime.datetime.now(), [random()]) for i in range(0, self.size) ] self.ds.data = data
def ecg_data_quality(datastream: DataStream, window_size: float = 2.0, acceptable_outlier_percent: float = .34, outlier_threshold_high: float = .9769, outlier_threshold_low: float = .004884, ecg_threshold_band_loose: float = .01148, ecg_threshold_slope: float = .02443, buffer_length: int = 3) -> DataStream: """ :param datastream: Input ECG datastream :param window_size: Window size specifying the number of seconds the datastream is divided to check for data quality :param acceptable_outlier_percent: The acceptable outlier percentage in a window default is 34 percent :param outlier_threshold_high: The percentage of ADC range above which any value is considered an outlier :param outlier_threshold_low: The percentage of ADC range below which any value is considered an outlier :param ecg_threshold_band_loose: The Band Loose Threshold for ECG signal expressed in the percentage of ADC range :param ecg_threshold_slope: The Slope threshold of ECG signal- No consecutive datapoints can have this difference in values(expressed as percentage of ADC range) :param buffer_length: This specifies the memory of the data quality computation. Meaning this number of past windows will also have a role to decide the quality of the current window :return: An Annotated Datastream of ECG Data quality specifying the time ranges when data quality was acceptable/non-acceptable """ ecg_quality_stream = DataStream.from_datastream(input_streams=[datastream]) window_data = window(datastream.data, window_size=window_size) ecg_quality = [] ecg_range = [] for key, data in window_data.items(): if len(data) > 0: result = compute_data_quality(data, ecg_range, True, ecg_threshold_band_loose, ecg_threshold_slope, acceptable_outlier_percent, outlier_threshold_high, outlier_threshold_low, buffer_length) if not ecg_quality: ecg_quality.append(DataPoint.from_tuple(data[0].start_time, result, data[-1].start_time)) else: if ecg_quality[-1].sample == result: new_point = DataPoint.from_tuple(ecg_quality[-1].start_time, result, data[-1].start_time) ecg_quality[-1] = new_point else: ecg_quality.append(DataPoint.from_tuple(data[0].start_time, result, data[-1].start_time)) ecg_quality_stream.data = ecg_quality return ecg_quality_stream
def ground_truth_data_processor(input_string): try: elements = [x.strip() for x in input_string.split(',')] start_timestamp = datetime.fromtimestamp(float(elements[2]) / 1000.0, pytz.timezone('US/Central')) end_timestamp = datetime.fromtimestamp(float(elements[3]) / 1000.0, pytz.timezone('US/Central')) return DataPoint.from_tuple(start_time=start_timestamp, sample=(elements[0], elements[1], elements[4]), end_time=end_timestamp) except ValueError: return
def data_processor(input_string): try: [val, ts] = input_string.split(' ') timestamp = datetime.fromtimestamp(float(ts) / 1000.0, pytz.timezone('US/Central')) return DataPoint.from_tuple(start_time=timestamp, sample=float(val)) except ValueError: # Skip bad values and filter them later # print("ValueError: " + str(input)) return
def form_data_point_list_from_start_time_sample(start_time_list, sample_list): datapoints = [] if len(start_time_list) == len(sample_list): for i, start_time in enumerate(start_time_list): datapoints.append(DataPoint.from_tuple(start_time, sample_list[i])) else: raise Exception('Length of start_time list and sample list missmatch.') return datapoints
def setUp(self): self.user = uuid4() self.dd = [DataDescriptor("float", "milliseconds", None)] self.ec = ExecutionContext(88, None, None, None) self.annotations = [ StreamReference("TestAnnotation2", 56), StreamReference("TestAnnotation2", 59) ] self.data = [DataPoint.from_tuple(datetime.datetime.now(), 234)]
def compute_outlier_ecg(ecg_rr: DataStream) -> DataStream: """ Reference - Berntson, Gary G., et al. "An approach to artifact identification: Application to heart period data." Psychophysiology 27.5 (1990): 586-598. :param ecg_rr: RR interval datastream :return: An annotated datastream specifying when the ECG RR interval datastream is acceptable """ ecg_rr_outlier_stream = DataStream.from_datastream(input_streams=[ecg_rr]) if not ecg_rr.data: ecg_rr_outlier_stream.data = [] return ecg_rr_outlier_stream valid_rr_interval_sample = [i.sample for i in ecg_rr.data if i.sample > .3 and i.sample < 2] valid_rr_interval_time = [i.start_time for i in ecg_rr.data if i.sample > .3 and i.sample < 2] valid_rr_interval_difference = abs(np.diff(valid_rr_interval_sample)) # Maximum Expected Difference(MED)= 3.32* Quartile Deviation maximum_expected_difference = 4.5 * 0.5 * iqr(valid_rr_interval_difference) # Shortest Expected Beat(SEB) = Median Beat – 2.9 * Quartile Deviation # Minimal Artifact Difference(MAD) = SEB/ 3 maximum_artifact_difference = (np.median(valid_rr_interval_sample) - 2.9 * .5 * iqr( valid_rr_interval_difference)) / 3 # Midway between MED and MAD is considered criterion_beat_difference = (maximum_expected_difference + maximum_artifact_difference) / 2 if criterion_beat_difference < .2: criterion_beat_difference = .2 ecg_rr_quality_array = [ DataPoint.from_tuple(valid_rr_interval_time[0], Quality.ACCEPTABLE, valid_rr_interval_time[0])] for data in outlier_computation(valid_rr_interval_time, valid_rr_interval_sample, criterion_beat_difference): if ecg_rr_quality_array[-1].sample == data.sample: new_point = DataPoint.from_tuple(ecg_rr_quality_array[-1].start_time, data.sample, data.start_time) ecg_rr_quality_array[-1] = new_point else: ecg_rr_quality_array.append(data) ecg_rr_outlier_stream.data = ecg_rr_quality_array return ecg_rr_outlier_stream
def window_std_dev(datapoints: List[DataPoint], window_start: datetime) -> DataPoint: """ :param datapoints: :param window_start: :return: """ data = np.array([dp.sample for dp in datapoints]) return DataPoint.from_tuple(window_start, np.std(data))
def timestamp_correct_and_sequence_align(datastream_array: list, sampling_frequency: float = None) -> DataStream: result = DataStream.from_datastream(input_streams=datastream_array) data = [] for dp in datastream_array[0].datapoints: data.append(DataPoint.from_tuple(dp.start_time, [dp.sample, dp.sample, dp.sample])) # TODO: Fix with a proper sequence alignment operation later result.datapoints = data return result
def test_Window_Valid(self): data = [] for i in range(0, 100): data.append( DataPoint.from_tuple(datetime.now(tz=self.timezone), None, random())) sleep(0.01) self.assertEqual(100, len(data)) result = window_sliding(data, window_size=0.25, window_offset=0.05) # TODO: check results of function output self.assertIsInstance(result, OrderedDict)
def data_quality_led(windowed_data): """ :param windowed_data: a datastream with a collection of windows :return: a list of window labels """ window_list = windowed_data dps = [] for key, window in window_list.items(): quality_results = compute_quality(window) dps.append(DataPoint(key[0], key[1], quality_results)) return dps
def setUpClass(cls): super(TestRPeakDetect, cls).setUpClass() tz = pytz.timezone('US/Eastern') cls.ecg = [] cls._fs = 64.0 with gzip.open(os.path.join(os.path.dirname(__file__), 'res/ecg.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) cls.ecg.append( DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1])) cls.ecg_datastream = DataStream(None, None) cls.ecg_datastream.data = cls.ecg print (len(cls.ecg_datastream.data))
def setUpClass(cls): super(TestVector, cls).setUpClass() tz = pytz.timezone('US/Eastern') cls.ecg = [] cls.sample_rate = 64.0 with gzip.open(os.path.join(os.path.dirname(__file__), 'res/ecg.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) cls.ecg.append( DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1])) cls.ds = DataStream(None, None, data=cls.ecg) accelx = [] accel_sample_rate = 64.0 / 6 with gzip.open(os.path.join(os.path.dirname(__file__), 'res/accelx.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) accelx.append( DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1])) accelx = DataStream(None, None, data=accelx) accely = [] with gzip.open(os.path.join(os.path.dirname(__file__), 'res/accely.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) accely.append( DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1])) accely = DataStream(None, None, data=accely) accelz = [] with gzip.open(os.path.join(os.path.dirname(__file__), 'res/accelz.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) accelz.append( DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1])) accelz = DataStream(None, None, data=accelz) cls.accel = autosense_sequence_align([accelx, accely, accelz], accel_sample_rate)
def setUpClass(cls): super(TestVector, cls).setUpClass() tz = pytz.timezone('US/Eastern') cls.ecg = [] cls.sample_rate = 64.0 with gzip.open( os.path.join(os.path.dirname(__file__), 'res/ecg.csv.gz'), 'rt') as f: for l in f: values = list(map(int, l.split(','))) cls.ecg.append( DataPoint.from_tuple( datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1]))