Пример #1
0
def merge_consective_windows(data: OrderedDict) -> List[DataPoint]:
    """
    Merge two or more windows if the time difference between them is 0
    :param data:
    :return:
    """
    merged_windows = []
    element = None
    start = None
    end = None
    val = None
    if data:
        for key, val in data.items():
            if element is None:
                element = val
                start = key[0]
                end = key[1]
            elif element == val and (end == key[0]):
                element = val
                end = key[1]
            else:
                merged_windows.append(DataPoint(
                    start, end, element))  # [(export_data, end)] = element
                element = val
                start = key[0]
                end = key[1]
        if val is not None:
            merged_windows.append(DataPoint(
                start, end, val))  # merged_windows[(export_data, end)] = val

    return merged_windows
Пример #2
0
    def gps_interpolation(self, gps_data: object) -> object:
        """
        Interpolate raw gps data

        :param List(Datapoints) gps_data: list of gps data
        :return: list of interpolated gps data
        """
        interpolated_data = []
        for i in range(len(gps_data) - 1):
            curr_time_point = gps_data[i].start_time
            curr_sample = gps_data[i].sample
            curr_offset = gps_data[i].offset
            next_time_point = gps_data[i + 1].start_time
            dp = DataPoint(curr_time_point, None, curr_offset, curr_sample)
            interpolated_data.append(dp)
            while ((
                           next_time_point - curr_time_point).total_seconds()
                   / self.IN_SECONDS) > self.INTERPOLATION_TIME:
                new_start_time = curr_time_point + datetime.timedelta(
                    seconds=self.IN_SECONDS)
                new_sample = curr_sample
                new_offset = curr_offset
                dp = DataPoint(new_start_time, None, new_offset, new_sample)
                curr_time_point = new_start_time
                interpolated_data.append(dp)
        return interpolated_data
Пример #3
0
def merge_consective_windows(data: OrderedDict) -> List[DataPoint]:
    """
    Merge two or more windows if the time difference between them is 0
    :param data: windowed data (start-time, end-time, sample)
    :return:
    """
    merged_windows = []
    element = None
    start = None
    end = None
    val = None
    if data:
        for key, val in data.items():
            if element is None:
                element = val
                start = key[0]
                end = key[1]
            elif element == val and (end == key[0]):
                element = val
                end = key[1]
            else:
                merged_windows.append(
                    DataPoint(start_time=start, end_time=end, sample=element))
                element = val
                start = key[0]
                end = key[1]
        if val is not None:
            merged_windows.append(
                DataPoint(start_time=start, end_time=end, sample=val))

    return merged_windows
def merge_left_right(left_data: List[DataPoint],
                     right_data: List[DataPoint],
                     window_size=10.0):
    data = left_data + right_data
    data.sort(key=lambda x: x.start_time)

    merged_data = []
    win_size = timedelta(seconds=window_size)

    index = 0
    while index < len(data) - 1:
        if data[index].start_time + win_size > data[index + 1].start_time:
            updated_label = get_max_label(data[index].sample,
                                          data[index + 1].sample)
            merged_data.append(
                DataPoint(start_time=data[index].start_time,
                          end_time=data[index].end_time,
                          offset=data[index].offset,
                          sample=updated_label))
            index = index + 2

        else:
            merged_data.append(
                DataPoint(start_time=data[index].start_time,
                          end_time=data[index].end_time,
                          offset=data[index].offset,
                          sample=data[index].sample))
            index = index + 1
    return merged_data
Пример #5
0
def return_neighbour_cycle_correlation(sample: object,
                                       ts: object,
                                       inspiration: object,
                                       unacceptable: object = -9999) -> object:
    """
    Return Neighbour Cycle correlation array of respiration cycles.

    :rtype: object
    :param sample:
    :param ts:
    :param inspiration:

    :return: modified cycle quality,correlation with previous cycle,
    correlation with next cycle
    """
    cycle_quality = []
    corr_pre_cycle = [0] * len(inspiration)
    corr_post_cycle = [0] * len(inspiration)

    corr_pre_cycle[0] = DataPoint.from_tuple(
        start_time=inspiration[0].start_time,
        end_time=inspiration[0].end_time,
        sample=1)

    corr_post_cycle[-1] = DataPoint.from_tuple(
        start_time=inspiration[-1].start_time,
        end_time=inspiration[-1].end_time,
        sample=unacceptable)

    start_time = inspiration[0].start_time.timestamp()
    end_time = inspiration[0].end_time.timestamp()
    current_cycle = sample[np.where((ts >= start_time) & (ts < end_time))[0]]

    for i, dp in enumerate(inspiration):
        start_time = dp.start_time.timestamp()
        end_time = dp.end_time.timestamp()
        sample_temp = sample[np.where((ts >= start_time) & (ts < end_time))[0]]
        ts_temp = ts[np.where((ts >= start_time) & (ts < end_time))[0]]
        cycle_quality.append(
            DataPoint.from_tuple(start_time=dp.start_time,
                                 end_time=dp.end_time,
                                 sample=get_cycle_quality(
                                     sample_temp, ts_temp)))
        if i > 0:
            if cycle_quality[i].sample == Quality.ACCEPTABLE and cycle_quality[
                    i - 1].sample == Quality.ACCEPTABLE:
                corr_pre_cycle[i] = DataPoint.from_tuple(
                    start_time=dp.start_time,
                    end_time=dp.end_time,
                    sample=get_covariance(sample_temp, current_cycle))
            else:
                corr_pre_cycle[i] = DataPoint.from_tuple(
                    start_time=dp.start_time,
                    end_time=dp.end_time,
                    sample=unacceptable)
            corr_post_cycle[i - 1] = corr_pre_cycle[i]
        current_cycle = sample_temp
    return np.array(cycle_quality), np.array(corr_pre_cycle), np.array(
        corr_post_cycle)
Пример #6
0
def compute_outlier_ecg(ecg_rr: DataStream) -> DataStream:
    """
    Reference - Berntson, Gary G., et al. "An approach to artifact identification: Application to heart period data."
    Psychophysiology 27.5 (1990): 586-598.

    :param ecg_rr: RR interval datastream

    :return: An annotated datastream specifying when the ECG RR interval datastream is acceptable
    """

    ecg_rr_outlier_stream = DataStream.from_datastream(input_streams=[ecg_rr])
    if not ecg_rr.data:
        ecg_rr_outlier_stream.data = []
        return ecg_rr_outlier_stream

    valid_rr_interval_sample = [
        i.sample for i in ecg_rr.data if i.sample > .3 and i.sample < 2
    ]
    valid_rr_interval_time = [
        i.start_time for i in ecg_rr.data if i.sample > .3 and i.sample < 2
    ]
    valid_rr_interval_difference = abs(np.diff(valid_rr_interval_sample))

    # Maximum Expected Difference(MED)= 3.32* Quartile Deviation
    maximum_expected_difference = 4.5 * 0.5 * iqr(valid_rr_interval_difference)

    # Shortest Expected Beat(SEB) = Median Beat – 2.9 * Quartile Deviation
    # Minimal Artifact Difference(MAD) = SEB/ 3
    maximum_artifact_difference = (np.median(valid_rr_interval_sample) - 2.9 *
                                   .5 * iqr(valid_rr_interval_difference)) / 3

    # Midway between MED and MAD is considered
    criterion_beat_difference = (maximum_expected_difference +
                                 maximum_artifact_difference) / 2
    if criterion_beat_difference < .2:
        criterion_beat_difference = .2

    ecg_rr_quality_array = [
        DataPoint.from_tuple(valid_rr_interval_time[0], Quality.ACCEPTABLE,
                             valid_rr_interval_time[0])
    ]

    for data in outlier_computation(valid_rr_interval_time,
                                    valid_rr_interval_sample,
                                    criterion_beat_difference):
        if ecg_rr_quality_array[-1].sample == data.sample:
            new_point = DataPoint.from_tuple(
                ecg_rr_quality_array[-1].start_time, data.sample,
                data.start_time)
            ecg_rr_quality_array[-1] = new_point
        else:
            ecg_rr_quality_array.append(data)

    ecg_rr_outlier_stream.data = ecg_rr_quality_array
    return ecg_rr_outlier_stream
Пример #7
0
def ecg_data_quality(datastream: DataStream,
                     window_size: float = 2.0,
                     acceptable_outlier_percent: float = .34,
                     outlier_threshold_high: float = .9769,
                     outlier_threshold_low: float = .004884,
                     ecg_threshold_band_loose: float = .01148,
                     ecg_threshold_slope: float = .02443,
                     buffer_length: int = 3) -> DataStream:
    """

    :param datastream: Input ECG datastream
    :param window_size: Window size specifying the number of seconds the datastream is divided to check for data quality
    :param acceptable_outlier_percent: The acceptable outlier percentage in a window default is 34 percent
    :param outlier_threshold_high: The percentage of ADC range above which any value is considered an outlier
    :param outlier_threshold_low: The percentage of ADC range below which any value is considered an outlier
    :param ecg_threshold_band_loose: The Band Loose Threshold for ECG signal expressed in the percentage of ADC range
    :param ecg_threshold_slope: The Slope threshold of ECG signal- No consecutive DataPoints can have this
    difference in values(expressed as percentage of ADC range)
    :param buffer_length: This specifies the memory of the data quality computation. Meaning this number of past windows
    will also have a role to decide the quality of the current window

    :return: An Annotated Datastream of ECG Data quality specifying the time ranges when data quality was acceptable/non-acceptable
    """

    ecg_quality_stream = DataStream.from_datastream(input_streams=[datastream])
    window_data = window(datastream.data, window_size=window_size)

    ecg_quality = []
    ecg_range = []
    for key, data in window_data.items():
        if len(data) > 0:
            result = compute_data_quality(data, ecg_range, True,
                                          ecg_threshold_band_loose,
                                          ecg_threshold_slope,
                                          acceptable_outlier_percent,
                                          outlier_threshold_high,
                                          outlier_threshold_low, buffer_length)
            if not ecg_quality:
                ecg_quality.append(
                    DataPoint.from_tuple(data[0].start_time, result,
                                         data[-1].start_time))
            else:
                if ecg_quality[-1].sample == result:
                    new_point = DataPoint.from_tuple(
                        ecg_quality[-1].start_time, result,
                        data[-1].start_time)
                    ecg_quality[-1] = new_point
                else:
                    ecg_quality.append(
                        DataPoint.from_tuple(data[0].start_time, result,
                                             data[-1].start_time))

    ecg_quality_stream.data = ecg_quality

    return ecg_quality_stream
def autosense_sequence_align(datastreams: List[DataStream],
                             sampling_frequency: float) -> DataStream:
    result = DataStream.from_datastream(input_streams=datastreams)
    result.data = []

    if len(datastreams) == 0:
        return result

    start_time = None
    for ds in datastreams:
        ts = ds.data[0].start_time
        if not start_time:
            start_time = ts
        elif start_time < ts:
            start_time = ts

    start_time -= datetime.timedelta(seconds=1.0 / sampling_frequency)

    data_block = []
    max_index = np.Inf
    for ds in datastreams:
        d = [i for i in ds.data if i.start_time > start_time]
        if len(d) < max_index:
            max_index = len(d)
        data_block.append(d)

    data_array = np.array(data_block)

    dimensions = data_array.shape[0]
    for i in range(0, max_index):
        sample = [data_array[d][i].sample for d in range(0, dimensions)]
        result.data.append(DataPoint.from_tuple(data_array[0][i].start_time, sample))

    return result
    def process(self, user:str, all_days):

        """
        :param user: user id string
        :param all_days: list of days to compute

        """
        if not list(all_days):
            return

        if self.CC is None:
            return

        if user is None:
            return

        streams = self.CC.get_user_streams(user)

        if streams is None:
            return

        user_id = user

        if respiration_cycle_feature not in streams:
            return

        for day in all_days:
            respiration_cycle_stream = self.CC.get_stream(streams[
                                                      respiration_cycle_feature][
                                                      "identifier"],
                                                      day=day,
                                                      user_id=user_id,
                                                      localtime=False)
            if len(respiration_cycle_stream.data) < window_size:
                continue
            offset = respiration_cycle_stream.data[0].offset
            windowed_data = window_sliding(respiration_cycle_stream.data,
                                           window_size=window_size,
                                           window_offset=window_offset)
            final_stress = []
            model,scaler = get_model()
            for key in windowed_data.keys():
                st = key[0]
                et = key[0]
                sample = np.array([i.sample for i in windowed_data[key]])
                if np.shape(sample)[0]>1:
                    sample_final = np.zeros((1,14))
                    for k in range(14):
                        sample_final[0,k] = np.median(sample[:,k])
                    sample_transformed = scaler.transform(sample_final)
                    stress = model.predict(sample_transformed)
                    final_stress.append(DataPoint.from_tuple(start_time=st,
                                                             end_time=et,
                                                             sample=stress[0],
                                                             offset=offset))
            json_path = 'stress_respiration.json'
            self.store_stream(json_path,
                              [streams[respiration_cycle_feature]],
                              user_id,
                              final_stress,localtime=False)
Пример #10
0
def smooth(data: List[DataPoint], span: int = 5) -> List[DataPoint]:
    """

    :rtype: object
    :param data:
    :param span:
    :return:
    """
    if data is None or len(data) == 0:
        return []

    sample = [i.sample for i in data]
    sample_middle = np.convolve(sample, np.ones(span, dtype=int),
                                'valid') / span
    divisor = np.arange(1, span - 1, 2)
    sample_start = np.cumsum(sample[:span - 1])[::2] / divisor
    sample_end = (np.cumsum(sample[:-span:-1])[::2] / divisor)[::-1]
    sample_smooth = np.concatenate((sample_start, sample_middle, sample_end))

    data_smooth = []

    if len(sample_smooth) == len(data):
        for i, item in enumerate(data):
            dp = DataPoint.from_tuple(sample=sample_smooth[i],
                                      start_time=item.start_time,
                                      end_time=item.end_time)
            data_smooth.append(dp)
    else:
        raise Exception(
            "Smoothed data length does not match with original data length.")

    return data_smooth
Пример #11
0
def get_recovery(rip: List[DataPoint], baseline: List[DataPoint],
                 Fs) -> List[DataPoint]:
    """
    matches respiration raw with baseline signal and returns the recovered
    signal
    :param rip: respiration raw
    :param baseline:
    :param Fs:
    :return: respiration recovered signal
    """
    rip_index_dict = {
        rip[i].start_time.timestamp(): i
        for i in range(len(rip))
    }
    baseline_index_dict = {
        baseline[i].start_time.timestamp(): i
        for i in range(len(baseline))
    }
    common_ts = np.intersect1d([i.start_time.timestamp() for i in rip],
                               [i.start_time.timestamp() for i in baseline])
    baseline_ind = np.array([baseline_index_dict[i] for i in common_ts])
    rip_ind = np.array([rip_index_dict[i] for i in common_ts])
    rip = np.array(rip)[rip_ind]
    baseline = np.array(baseline)[baseline_ind]
    recovered = \
        recover_rip_rawwithmeasuredref(
            [i.sample for i in rip],[i.sample for i in baseline],
            Fs)
    recovered_dp_list = \
        [DataPoint.from_tuple(start_time=rip[i].start_time,sample=recovered[i],
                              offset=rip[i].offset)
         for i in range(len(recovered))]
    return recovered_dp_list
 def save_data(self,decoded_data,offset,tzinfo,json_path,all_streams,user_id,localtime=False):
     final_data = []
     for i in range(len(decoded_data[:, 0])):
         final_data.append(DataPoint.from_tuple(
             start_time=datetime.utcfromtimestamp((decoded_data[i, 0])/1000).replace(tzinfo=tzinfo),
             offset=offset, sample=decoded_data[i, 1:]))
     print(final_data[0],all_streams)
     self.store_stream(json_path, [all_streams],
                       user_id, final_data, localtime=localtime)
Пример #13
0
def calculate_yaw(accel_data: List[DataPoint]):
    yaw_list = []
    for dp in accel_data:
        ax = dp.sample[0]
        ay = dp.sample[1]
        yw = 180 * math.atan2(ay, ax) / math.pi
        yaw_list.append(
            DataPoint(start_time=dp.start_time, end_time=dp.end_time,
                      offset=dp.offset, sample=yw))

    return yaw_list
Пример #14
0
def calculate_pitch(accel_data: List[DataPoint]):
    pitch_list = []
    for dp in accel_data:
        ay = dp.sample[1]
        az = dp.sample[2]
        ptch = 180 * math.atan2(-ay, -az) / math.pi
        pitch_list.append(
            DataPoint(start_time=dp.start_time, end_time=dp.end_time,
                      offset=dp.offset, sample=ptch))

    return pitch_list
Пример #15
0
def calculate_roll(accel_data: List[DataPoint]):
    roll_list = []
    for dp in accel_data:
        ax = dp.sample[0]
        ay = dp.sample[1]
        az = dp.sample[2]
        rll = 180 * math.atan2(ax, math.sqrt(ay * ay + az * az)) / math.pi
        roll_list.append(
            DataPoint(start_time=dp.start_time, end_time=dp.end_time,
                      offset=dp.offset, sample=rll))

    return roll_list
def data_quality_led(windowed_data):
    """
    
    :param windowed_data: a datastream with a collection of windows 
    :return: a list of window labels
    """
    window_list = windowed_data
    dps = []
    for key ,window in window_list.items():
        quality_results = compute_quality(window)
        dps.append(DataPoint(key[0], key[1], quality_results))

    return dps
Пример #17
0
def compute_candidate_features(gyr_intersections, gyr_mag_data, roll_list,
                               pitch_list, yaw_list):
    '''
    Computes feature vector for single hand-to-mouth gesture. Mainly statistical features of hand orientation
    :param gyr_intersections:
    :param gyr_mag_stream:
    :param roll_list:
    :param pitch_list:
    :param yaw_list:
    :return:
    '''
    all_features = []
    offset = gyr_mag_data[0].offset

    for I in gyr_intersections:
        start_time = I.start_time
        end_time = I.end_time
        start_index = I.sample[0]
        end_index = I.sample[1]

        temp_roll = [roll_list[i].sample for i in range(start_index, end_index)]
        temp_pitch = [pitch_list[i].sample for i in
                      range(start_index, end_index)]
        temp_yaw = [yaw_list[i].sample for i in range(start_index, end_index)]
        Gmag_sub = [gyr_mag_data[i].sample for i in
                    range(start_index, end_index)]

        duration = 1000 * (
                end_time - start_time).total_seconds()  # convert to milliseconds

        roll_mean, roll_median, roll_sd, roll_quartile = compute_basic_statistical_features(
            temp_roll)
        pitch_mean, pitch_median, pitch_sd, pitch_quartile = compute_basic_statistical_features(
            temp_pitch)
        yaw_mean, yaw_median, yaw_sd, yaw_quartile = compute_basic_statistical_features(
            temp_yaw)

        gyro_mean, gyro_median, gyro_sd, gyro_quartile = compute_basic_statistical_features(
            Gmag_sub)

        feature_vector = [duration,
                          roll_mean, roll_median, roll_sd, roll_quartile,
                          pitch_mean, pitch_median, pitch_sd, pitch_quartile,
                          yaw_mean, yaw_median, yaw_sd, yaw_quartile,
                          gyro_mean, gyro_median, gyro_sd, gyro_quartile]

        all_features.append(
            DataPoint(start_time=start_time, end_time=end_time, offset=offset,
                      sample=feature_vector))

    return all_features
def window_std_dev(data: List[DataPoint],
                   window_start: datetime) -> DataPoint:
    """

    :param data:
    :param window_start:
    :return:
    """

    if data is None or len(data) < 2:
        raise Exception('Standard deviation requires at least 2 values to compute')

    data_points = np.array([dp.sample for dp in data])
    return DataPoint.from_tuple(window_start, np.std(data_points))
Пример #19
0
def moving_average_convergence_divergence(
        slow_moving_average_data: List[DataPoint],
        fast_moving_average_data: List[DataPoint], THRESHOLD: float,
        near: int):
    '''
    Generates intersection points of two moving average signals
    :param slow_moving_average_data:
    :param fast_moving_average_data:
    :param THRESHOLD: Cut-off value
    :param near: # of nearest points to ignore; i.e. gap between two segment should be greater than near
    :return:
    '''
    slow_moving_average = np.array(
        [data.sample for data in slow_moving_average_data])
    fast_moving_average = np.array(
        [data.sample for data in fast_moving_average_data])

    index_list = [0] * len(slow_moving_average)
    cur_index = 0

    for index in range(len(slow_moving_average)):
        diff = slow_moving_average[index] - fast_moving_average[index]
        if diff > THRESHOLD:
            if cur_index == 0:
                index_list[cur_index] = index
                cur_index = cur_index + 1
                index_list[cur_index] = index
            else:
                if index <= index_list[cur_index] + near:
                    index_list[cur_index] = index
                else:
                    cur_index = cur_index + 1
                    index_list[cur_index] = index
                    cur_index = cur_index + 1
                    index_list[cur_index] = index

    intersection_points = []
    if cur_index > 0:
        for index in range(0, cur_index, 2):
            start_index = index_list[index]
            end_index = index_list[index + 1]
            start_time = slow_moving_average_data[start_index].start_time
            end_time = slow_moving_average_data[end_index].start_time
            intersection_points.append(
                DataPoint(start_time=start_time,
                          end_time=end_time,
                          sample=[index_list[index], index_list[index + 1]]))

    return intersection_points
Пример #20
0
def compute_data_quality(data: list,
                         range_memory: list,
                         signal_type: bool,
                         threshold_band_loose: float,
                         threshold_slope: float,
                         acceptable_outlier_percent: float = .34,
                         outlier_threshold_high: float = .9769,
                         outlier_threshold_low: float = .004884,
                         buffer_length: int = 3,
                         adc_range: int = 4095) -> Quality:
    """

    :param data: A window of ECG/Respiration signal. An array of datapoints.
    :param range_memory: The array containing the range of each window on a sequential basis
    :param signal_type: The check for if the signal passed is respiration or ECG
    :param threshold_band_loose: The Band Loose Threshold for ECG/Respiration signal expressed in the percentage of ADC range
    :param threshold_slope: The Slope threshold of ECG/Respiration signal- No consecutive DataPoints can have this
    difference in values(expressed as percentage of ADC range)
    :param acceptable_outlier_percent: The acceptable outlier percentage in a window default is 34 percent
    :param outlier_threshold_high: The percentage of ADC range above which any value is considered an outlier
    :param outlier_threshold_low: The percentage of ADC range below which any value is considered an outlier
    :param buffer_length: This specifies the memory of the data quality computation. Meaning this number of past windows
    will also have a role to decide the quality of the current window
    :param adc_range: The maximum ADC value possible in the system

    :return: The data quality of the window passed to the method
    """
    no_of_outliers, max_value, min_value = classify_data_points(
        data, signal_type, threshold_slope, outlier_threshold_high,
        outlier_threshold_low)

    segment_class = classify_segment(data, no_of_outliers,
                                     acceptable_outlier_percent)

    range_memory.append(
        DataPoint.from_tuple(data[0].start_time, max_value - min_value))

    range_values = range_memory[(-1) * buffer_length:]
    amplitude_small = classify_buffer(range_values, threshold_band_loose,
                                      buffer_length)

    if segment_class == Quality.UNACCEPTABLE:
        return Quality.UNACCEPTABLE
    elif 2 * amplitude_small > buffer_length:
        return Quality.UNACCEPTABLE
    elif (max_value - min_value) <= threshold_band_loose * adc_range:
        return Quality.UNACCEPTABLE
    else:
        return Quality.ACCEPTABLE
def gravityFilter_function(accl_data, gyro_data, sampling_freq,
                           is_gyro_in_degree):
    accl = [value.sample for value in accl_data]
    gyro = [value.sample for value in gyro_data]

    # if gyro in degree
    if is_gyro_in_degree:
        gyro = [[
            degree_to_radian(v[0]),
            degree_to_radian(v[1]),
            degree_to_radian(v[2])
        ] for v in gyro]

    #     Fs = 16.0
    AHRS_motion = MadgwickAHRS(sampleperiod=(1.0 / sampling_freq),
                               beta=MADGWICKFILTER_BETA)

    quaternion_motion = []

    for t, value in enumerate(accl):
        AHRS_motion.update_imu(gyro[t], accl[t])
        quaternion_motion.append(AHRS_motion.quaternion.q)

    #   filtering out the gravity
    wtist_orientation = [[v[1], v[2], v[3], v[0]] for v in quaternion_motion]

    Acc_sync_filtered = []
    gravity_reference = [0, 0, 1, 0]
    for t, value in enumerate(wtist_orientation):
        Q_temp = Quaternion_multiplication(Quatern_Conjugate(value),
                                           gravity_reference)
        gravity_temp = Quaternion_multiplication(Q_temp, wtist_orientation[t])

        x_filtered = accl[t][0] - gravity_temp[0]
        y_filtered = accl[t][1] - gravity_temp[1]
        z_filtered = accl[t][2] - gravity_temp[2]

        Acc_sync_filtered.append([x_filtered, y_filtered, z_filtered])

    accl_filtered_data = [
        DataPoint(start_time=value.start_time,
                  end_time=value.end_time,
                  offset=value.offset,
                  sample=Acc_sync_filtered[index])
        for index, value in enumerate(accl_data)
    ]
    return accl_filtered_data
Пример #22
0
def magnitude(data: List[DataPoint]) -> List[DataPoint]:
    """

    :param list[DataPoint] data:
    :return: magnitude of the data
    """
    if data is None or len(data) == 0:
        result = []
        return result

    result_data = [
        DataPoint(start_time=value.start_time,
                  offset=value.offset,
                  sample=norm(value.sample)) for value in data
    ]

    return result_data
Пример #23
0
def merge_two_datastream(accel: List[DataPoint], gyro: List[DataPoint]):
    A = np.array(
        [[dp.start_time.timestamp(), dp.sample[0], dp.sample[1], dp.sample[2]]
         for dp in accel])
    G = np.array(
        [[dp.start_time.timestamp(), dp.sample[0], dp.sample[1], dp.sample[2]]
         for dp in gyro])
    At = A[:, 0]

    Gt = G[:, 0]
    Gx = G[:, 1]
    Gy = G[:, 2]
    Gz = G[:, 3]
    i = 0
    j = 0
    _Gx = [0] * len(At)
    _Gy = [0] * len(At)
    _Gz = [0] * len(At)
    while (i < len(At)) and (j < len(Gt)):
        while Gt[j] < At[i]:
            j = j + 1
            if j >= len(Gt):
                break
        if j < len(Gt):
            if (At[i] == Gt[j]) | (j == 0):
                _Gx[i] = Gx[j]
                _Gy[i] = Gy[j]
                _Gz[i] = Gz[j]
            else:
                _Gx[i] = getInterpoletedValue(Gx[j - 1], Gx[j], Gt[j - 1],
                                              Gt[j], At[i])
                _Gy[i] = getInterpoletedValue(Gy[j - 1], Gy[j], Gt[j - 1],
                                              Gt[j], At[i])
                _Gz[i] = getInterpoletedValue(Gz[j - 1], Gz[j], Gt[j - 1],
                                              Gt[j], At[i])
        i = i + 1

    gyro = [
        DataPoint(start_time=dp.start_time,
                  end_time=dp.end_time,
                  offset=dp.offset,
                  sample=[_Gx[i], _Gy[i], _Gz[i]])
        for i, dp in enumerate(accel)
    ]
    return gyro
Пример #24
0
def accelerometer_features(
        accel: DataStream,
        window_length: float = 10.0,
        activity_threshold: float = 0.21,
        percentile_low: int = 1,
        percentile_high: int = 99
) -> Tuple[DataStream, DataStream, DataStream]:
    """

    References:
        Figure 3: http://www.cs.memphis.edu/~santosh/Papers/Timing-JIT-UbiComp-2014.pdf

    :param percentile_high:
    :param percentile_low:
    :param accel:
    :param window_length:
    :param activity_threshold:
    :return:
    """
    accelerometer_magnitude = magnitude(normalize(accel))

    accelerometer_win_mag_deviations_data = []
    for key, data in window(accelerometer_magnitude.data,
                            window_length).items():
        accelerometer_win_mag_deviations_data.append(
            window_std_dev(data, key[0]))

    accelerometer_win_mag_deviations = DataStream.from_datastream([accel])
    accelerometer_win_mag_deviations.data = accelerometer_win_mag_deviations_data

    am_values = np.array([dp.sample for dp in accelerometer_magnitude.data])
    low_limit = np.percentile(am_values, percentile_low)
    high_limit = np.percentile(am_values, percentile_high)
    range = high_limit - low_limit

    accel_activity_data = []
    for dp in accelerometer_win_mag_deviations_data:
        comparison = dp.sample > (low_limit + activity_threshold * range)
        accel_activity_data.append(
            DataPoint.from_tuple(dp.start_time, comparison))

    accel_activity = DataStream.from_datastream([accel])
    accel_activity.data = accel_activity_data

    return accelerometer_magnitude, accelerometer_win_mag_deviations, accel_activity
Пример #25
0
    def get_context_interaction(self, before_survey_time: dict, user: uuid, phone_app_cat_usage: dict,
                                call_duration_cu: dict,
                                voice_feature: dict):
        """
        Compute a user interaction right before (10 minutes) (s)he started qualtrics context survey
        :param before_survey_time:
        :param user:
        :param phone_app_cat_usage:
        :param call_duration_cu:
        :param voice_feature:
        """

        # Metadata is not accurate, that's why I put sample output of all input streams here
        # category sample - [DataPoint(2018-01-15 22:45:24.203000+00:00, 2018-01-15 22:50:25.303000+00:00, 0, Communication)]
        # call duration - [DataPoint(2017-11-05 14:30:55.689000+00:00, None, -21600000, 53.0)]
        # voice feature - 1 for voice and 0 for no voice - per minute

        start_data_time = before_survey_time.get("start_time", None)
        end_data_time = before_survey_time.get("end_time", None)
        offset = before_survey_time.get("offset", None)

        talking = is_talking(voice_feature.get("data", []), start_data_time, end_data_time)
        on_phone = is_on_phone(call_duration_cu.get("data", []), start_data_time, end_data_time)
        on_social_app = is_on_social_app(phone_app_cat_usage.get("data", []), start_data_time, end_data_time)
        sample = [0, 0, 0]

        if on_social_app:
            sample[1] = 1
        elif talking or on_phone:
            sample[0] = 1
        else:
            sample[2] = 1

        dp = [DataPoint(start_time=start_data_time, end_time=end_data_time, offset=offset, sample=sample)]

        input_streams = []
        input_streams.extend(get_input_streams(phone_app_cat_usage))
        input_streams.extend(get_input_streams(call_duration_cu))
        input_streams.extend(get_input_streams(voice_feature))

        self.store_stream(filepath="context_interaction.json",
                          input_streams=input_streams,
                          user_id=user,
                          data=dp, localtime=False)
def magnitude(datastream: DataStream) -> DataStream:
    """

    :param datastream:
    :return:
    """
    result = DataStream.from_datastream(input_streams=[datastream])
    if datastream.data is None or len(datastream.data) == 0:
        result.data = []
        return result

    input_data = np.array([i.sample for i in datastream.data])

    data = norm(input_data, axis=1).tolist()

    result.data = [DataPoint.from_tuple(start_time=v.start_time, sample=data[i])
                   for i, v in enumerate(datastream.data)]

    return result
def moving_average_curve(data: List[DataPoint],
                         window_length: int) -> List[DataPoint]:
    """
    Moving average curve from filtered (using moving average) samples.

    :return: mac
    :param data:
    :param window_length:
    """
    if data is None or len(data) == 0:
        return []

    sample = [i.sample for i in data]
    mac = []
    for i in range(window_length, len(sample) - (window_length + 1)):
        sample_avg = np.mean(sample[i - window_length:i + window_length + 1])
        mac.append(DataPoint.from_tuple(sample=sample_avg, start_time=data[i].start_time, end_time=data[i].end_time))

    return mac
def outlier_computation(valid_rr_interval_time: list,
                        valid_rr_interval_sample: list,
                        criterion_beat_difference: float):
    """
    This function implements the rr interval outlier calculation through comparison with the criterion
    beat difference and consecutive differences with the previous and next sample

    :param valid_rr_interval_time: A python array of rr interval time
    :param valid_rr_interval_sample: A python array of rr interval samples
    :param criterion_beat_difference: A threshold calculated from the RR interval data passed

    yields: The quality of each data point in the RR interval array
    """
    standard_rr_interval_sample = valid_rr_interval_sample[0]
    previous_rr_interval_quality = Quality.ACCEPTABLE

    for i in range(1, len(valid_rr_interval_sample) - 1):

        rr_interval_diff_with_last_good = abs(standard_rr_interval_sample - valid_rr_interval_sample[i])
        rr_interval_diff_with_prev_sample = abs(valid_rr_interval_sample[i - 1] - valid_rr_interval_sample[i])
        rr_interval_diff_with_next_sample = abs(valid_rr_interval_sample[i] - valid_rr_interval_sample[i + 1])

        if previous_rr_interval_quality == Quality.UNACCEPTABLE and rr_interval_diff_with_last_good < criterion_beat_difference:
            yield DataPoint.from_tuple(valid_rr_interval_time[i], Quality.ACCEPTABLE, valid_rr_interval_time[i])
            previous_rr_interval_quality = Quality.ACCEPTABLE
            standard_rr_interval_sample = valid_rr_interval_sample[i]

        elif previous_rr_interval_quality == Quality.UNACCEPTABLE and rr_interval_diff_with_last_good > criterion_beat_difference >= rr_interval_diff_with_prev_sample and rr_interval_diff_with_next_sample <= criterion_beat_difference:
            yield DataPoint.from_tuple(valid_rr_interval_time[i], Quality.ACCEPTABLE, valid_rr_interval_time[i])
            previous_rr_interval_quality = Quality.ACCEPTABLE
            standard_rr_interval_sample = valid_rr_interval_sample[i]

        elif previous_rr_interval_quality == Quality.UNACCEPTABLE and rr_interval_diff_with_last_good > criterion_beat_difference and (
                        rr_interval_diff_with_prev_sample > criterion_beat_difference or rr_interval_diff_with_next_sample > criterion_beat_difference):
            yield DataPoint.from_tuple(valid_rr_interval_time[i], Quality.UNACCEPTABLE, valid_rr_interval_time[i])
            previous_rr_interval_quality = Quality.UNACCEPTABLE

        elif previous_rr_interval_quality == Quality.ACCEPTABLE and rr_interval_diff_with_prev_sample <= criterion_beat_difference:
            yield DataPoint.from_tuple(valid_rr_interval_time[i], Quality.ACCEPTABLE, valid_rr_interval_time[i])
            previous_rr_interval_quality = Quality.ACCEPTABLE
            standard_rr_interval_sample = valid_rr_interval_sample[i]

        elif previous_rr_interval_quality == Quality.ACCEPTABLE and rr_interval_diff_with_prev_sample > criterion_beat_difference:
            yield DataPoint.from_tuple(valid_rr_interval_time[i], Quality.UNACCEPTABLE, valid_rr_interval_time[i])
            previous_rr_interval_quality = Quality.UNACCEPTABLE

        else:
            yield DataPoint.from_tuple(valid_rr_interval_time[i], Quality.UNACCEPTABLE, valid_rr_interval_time[i])
def generate_smoking_episode(puff_labels) -> List[DataPoint]:
    '''
    Generates smoking episodes from classified puffs
    :param puff_labels:
    :return: list of smoking episodes
    '''
    only_puffs = [dp for dp in puff_labels if dp.sample > 0]

    smoking_episode_data = []

    cur_index = 0
    while cur_index < len(only_puffs):
        temp_index = cur_index
        dp = only_puffs[temp_index]
        prev = dp
        temp_index = temp_index + 1
        if temp_index >= len(only_puffs):
            break
        while ((
            (only_puffs[temp_index].start_time - dp.start_time <=
             timedelta(seconds=MINIMUM_TIME_DIFFERENCE_FIRST_AND_LAST_PUFFS))
                | (only_puffs[temp_index].start_time - prev.start_time <
                   timedelta(seconds=MINIMUM_INTER_PUFF_DURATION)))):
            prev = only_puffs[temp_index]
            temp_index = temp_index + 1
            if temp_index >= len(only_puffs):
                break
        temp_index = temp_index - 1
        if (temp_index - cur_index + 1) >= MINIMUM_PUFFS_IN_EPISODE:
            wrist = get_smoking_wrist(only_puffs, cur_index, temp_index)
            smoking_episode_data.append(
                DataPoint(start_time=only_puffs[cur_index].start_time,
                          end_time=only_puffs[temp_index].end_time,
                          offset=only_puffs[cur_index].offset,
                          sample=wrist))

            cur_index = temp_index + 1
        else:
            cur_index = cur_index + 1
    return smoking_episode_data
def smooth(data: List[DataPoint],
           span: int = 5) -> List[DataPoint]:
    """
    Smooths data using moving average filter over a span.
    The first few elements of data_smooth are given by
    data_smooth(1) = data(1)
    data_smooth(2) = (data(1) + data(2) + data(3))/3
    data_smooth(3) = (data(1) + data(2) + data(3) + data(4) + data(5))/5
    data_smooth(4) = (data(2) + data(3) + data(4) + data(5) + data(6))/5

    for more details follow the below links:
    https://www.mathworks.com/help/curvefit/smooth.html
    http://stackoverflow.com/a/40443565

    :return: data_smooth
    :param data:
    :param span:
    """

    if data is None or len(data) == 0:
        return []

    sample = [i.sample for i in data]
    sample_middle = np.convolve(sample, np.ones(span, dtype=int), 'valid') / span
    divisor = np.arange(1, span - 1, 2)
    sample_start = np.cumsum(sample[:span - 1])[::2] / divisor
    sample_end = (np.cumsum(sample[:-span:-1])[::2] / divisor)[::-1]
    sample_smooth = np.concatenate((sample_start, sample_middle, sample_end))

    data_smooth = []

    if len(sample_smooth) == len(data):
        for i, item in enumerate(data):
            dp = DataPoint.from_tuple(sample=sample_smooth[i], start_time=item.start_time, end_time=item.end_time)
            data_smooth.append(dp)
    else:
        raise Exception("Smoothed data length does not match with original data length.")

    return data_smooth