コード例 #1
0
    def test_accelerometer_features(self):
        ds = autosense_sequence_align([self.accelx_ds, self.accely_ds, self.accelz_ds], self.sampling_frequency)

        accelerometer_magnitude, accelerometer_win_mag_deviations, accel_activity = accelerometer_features(ds)

        self.assertEqual(len(accelerometer_magnitude.data), 62870)
        self.assertEqual(len(accelerometer_win_mag_deviations.data), 687)
        self.assertEqual(len(accel_activity.data), 687)

        self.assertEqual(len([dp for dp in accel_activity.data if dp.sample]), 0)  # TODO: Is this correct
def cStress(rdd: RDD) -> RDD:

    # TODO: TWH Temporary
    ecg_sampling_frequency = 64.0
    rip_sampling_frequency = 64.0
    accel_sampling_frequency = 64.0 / 6.0

    # Timestamp correct datastreams
    ecg_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['ecg'],
                          sampling_frequency=ecg_sampling_frequency)))
    rip_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['rip'],
                          sampling_frequency=rip_sampling_frequency)))

    accelx_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['accelx'],
                          sampling_frequency=accel_sampling_frequency)))
    accely_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['accely'],
                          sampling_frequency=accel_sampling_frequency)))
    accelz_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['accelz'],
                          sampling_frequency=accel_sampling_frequency)))

    accel_group = accelx_corrected.join(accely_corrected).join(
        accelz_corrected).map(fix_two_joins)
    accel = accel_group.map(lambda ds: (
        ds[0],
        autosense_sequence_align(datastreams=[ds[1][0], ds[1][1], ds[1][2]],
                                 sampling_frequency=accel_sampling_frequency)))

    # Accelerometer Feature Computation
    accel_features = accel.map(
        lambda ds: (ds[0], accelerometer_features(ds[1], window_length=10.0)))

    # rip features
    peak_valley = rip_corrected.map(
        lambda ds: (ds[0], rip.compute_peak_valley(rip=ds[1])))
    rip_features = peak_valley.map(
        lambda ds: (ds[0], rip_feature_computation(ds[1][0], ds[1][1])))

    # r-peak datastream computation
    ecg_rr_rdd = ecg_corrected.map(lambda ds: (ds[
        0], compute_rr_intervals(ds[1], ecg_sampling_frequency)))
    ecg_features = ecg_rr_rdd.map(lambda ds: (ds[
        0], ecg_feature_computation(ds[1], window_size=60, window_offset=60)))

    # return rip_features.join(ecg_features).join(accel_features).map(fix_two_joins)
    return ecg_features
コード例 #3
0
    def setUpClass(cls):
        super(TestVector, cls).setUpClass()
        tz = pytz.timezone('US/Eastern')
        cls.ecg = []
        cls.sample_rate = 64.0
        with gzip.open(os.path.join(os.path.dirname(__file__), 'res/ecg.csv.gz'), 'rt') as f:
            for l in f:
                values = list(map(int, l.split(',')))
                cls.ecg.append(
                    DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1]))
        cls.ds = DataStream(None, None, data=cls.ecg)

        accelx = []
        accel_sample_rate = 64.0 / 6
        with gzip.open(os.path.join(os.path.dirname(__file__), 'res/accelx.csv.gz'), 'rt') as f:
            for l in f:
                values = list(map(int, l.split(',')))
                accelx.append(
                    DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1]))
        accelx = DataStream(None, None, data=accelx)

        accely = []
        with gzip.open(os.path.join(os.path.dirname(__file__), 'res/accely.csv.gz'), 'rt') as f:
            for l in f:
                values = list(map(int, l.split(',')))
                accely.append(
                    DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1]))
        accely = DataStream(None, None, data=accely)

        accelz = []
        with gzip.open(os.path.join(os.path.dirname(__file__), 'res/accelz.csv.gz'), 'rt') as f:
            for l in f:
                values = list(map(int, l.split(',')))
                accelz.append(
                    DataPoint.from_tuple(datetime.datetime.fromtimestamp(values[0] / 1000000.0, tz=tz), values[1]))
        accelz = DataStream(None, None, data=accelz)

        cls.accel = autosense_sequence_align([accelx, accely, accelz], accel_sample_rate)
コード例 #4
0
def cStress(rdd: RDD) -> RDD:
    # TODO: TWH Temporary
    ecg_sampling_frequency = 64.0
    rip_sampling_frequency = 64.0
    accel_sampling_frequency = 64.0 / 6.0

    # Timestamp correct datastreams
    ecg_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['ecg'],
                          sampling_frequency=ecg_sampling_frequency)))
    rip_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['rip'],
                          sampling_frequency=rip_sampling_frequency)))

    accelx_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['accelx'],
                          sampling_frequency=accel_sampling_frequency)))
    accely_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['accely'],
                          sampling_frequency=accel_sampling_frequency)))
    accelz_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['accelz'],
                          sampling_frequency=accel_sampling_frequency)))

    ecg_quality = ecg_corrected.map(lambda ds:
                                    (ds[0], ecg_data_quality(ds[1])))
    rip_quality = rip_corrected.map(lambda ds:
                                    (ds[0], rip_data_quality(ds[1])))

    accel_group = accelx_corrected.join(accely_corrected).join(
        accelz_corrected).map(fix_two_joins)
    accel = accel_group.map(lambda ds: (
        ds[0],
        autosense_sequence_align(datastreams=[ds[1][0], ds[1][1], ds[1][2]],
                                 sampling_frequency=accel_sampling_frequency)))

    # Accelerometer Feature Computation
    accel_features = accel.map(
        lambda ds: (ds[0], accelerometer_features(ds[1], window_length=10.0)))

    windowed_accel_features = accel_features.map(
        lambda ds: (ds[0], window_accel(ds[1], window_size=60)))

    rip_corrected_and_quality = rip_corrected.join(rip_quality)

    # rip features
    peak_valley = rip_corrected_and_quality.map(lambda ds: (ds[
        0], rip.compute_peak_valley(rip=ds[1][0], rip_quality=ds[1][1])))

    rip_cycle_features = peak_valley.map(
        lambda ds: (ds[0], rip_cycle_feature_computation(ds[1][0])))

    windowed_rip_features = rip_cycle_features.map(
        lambda ds: (ds[0],
                    window_rip(peak_datastream=ds[1][0],
                               valley_datastream=ds[1][1],
                               inspiration_duration=ds[1][2],
                               expiration_duration=ds[1][3],
                               respiration_duration=ds[1][4],
                               inspiration_expiration_ratio=ds[1][5],
                               stretch=ds[1][6],
                               window_size=60,
                               window_offset=60)))

    ecg_corrected_and_quality = ecg_corrected.join(ecg_quality)

    # r-peak datastream computation
    ecg_rr_rdd = ecg_corrected_and_quality.map(lambda ds: (
        ds[0],
        compute_rr_intervals(
            ecg=ds[1][0], ecg_quality=ds[1][1], fs=ecg_sampling_frequency)))

    ecg_rr_quality = ecg_rr_rdd.map(lambda ds:
                                    (ds[0], compute_outlier_ecg(ds[1])))
    ecg_rr_and_quality = ecg_rr_rdd.join(ecg_rr_quality)

    windowed_ecg_features = ecg_rr_and_quality.map(
        lambda ds: (ds[0],
                    ecg_feature_computation(datastream=ds[1][0],
                                            quality_datastream=ds[1][1],
                                            window_size=60,
                                            window_offset=60)))

    peak_valley_rr_int = peak_valley.join(
        ecg_rr_rdd)  # TODO: Add RR_Quality here?

    # rsa_cycle_features = peak_valley_rr_int.map(
    #     lambda ds: (ds[0], compute_rsa_cycle_feature(valleys=ds[1][1], rr_int=ds[1][2])))
    # windowed_rsa_features = rsa_cycle_features.map(lambda ds: (ds[0], window_rsa(ds[0][0], window_size=60)))
    #
    # combined_features = windowed_accel_features.join(windowed_ecg_features).join(windowed_rip_features).join(
    #     windowed_rsa_features)
    # Fix joins here

    feature_vector_ecg_rip = windowed_ecg_features.map(
        lambda ds: (ds[0], generate_cStress_feature_vector(ds[1])))

    stress_ground_truth = rdd.map(lambda ds:
                                  (ds['participant'], ds['stress_marks']))

    feature_vector_with_ground_truth = feature_vector_ecg_rip.join(
        stress_ground_truth)

    train_data_with_ground_truth_and_subjects = feature_vector_with_ground_truth.map(
        lambda ds: analyze_events_with_features(participant=ds[0],
                                                stress_mark_stream=ds[1][1],
                                                feature_stream=ds[1][0]))

    return train_data_with_ground_truth_and_subjects  # Data stream with data points (ST, ET, [...37 values...])
コード例 #5
0
ファイル: cStress.py プロジェクト: Soujanya27/CerebralCortex
def cStress(rdd: RDD) -> RDD:
    # TODO: TWH Temporary
    ecg_sampling_frequency = 64.0
    rip_sampling_frequency = 64.0
    accel_sampling_frequency = 64.0 / 6.0

    # Timestamp correct datastreams
    ecg_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['ecg'],
                          sampling_frequency=ecg_sampling_frequency)))
    rip_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['rip'],
                          sampling_frequency=rip_sampling_frequency)))

    accelx_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['accelx'],
                          sampling_frequency=accel_sampling_frequency)))
    accely_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['accely'],
                          sampling_frequency=accel_sampling_frequency)))
    accelz_corrected = rdd.map(lambda ds: (
        ds['participant'],
        timestamp_correct(datastream=ds['accelz'],
                          sampling_frequency=accel_sampling_frequency)))

    ecg_quality = ecg_corrected.map(lambda ds:
                                    (ds[0], ecg_data_quality(ds[1])))
    rip_quality = rip_corrected.map(lambda ds:
                                    (ds[0], rip_data_quality(ds[1])))

    accel_group = accelx_corrected.join(accely_corrected).join(
        accelz_corrected).map(fix_two_joins)
    accel = accel_group.map(lambda ds: (
        ds[0],
        autosense_sequence_align(datastreams=[ds[1][0], ds[1][1], ds[1][2]],
                                 sampling_frequency=accel_sampling_frequency)))

    # Accelerometer Feature Computation
    accel_features = accel.map(
        lambda ds: (ds[0], accelerometer_features(ds[1], window_length=10.0)))

    windowed_accel_features = accel_features.map(
        lambda ds: (ds[0], window_accel(ds[1], window_size=60)))

    rip_corrected_and_quality = rip_corrected.join(rip_quality)

    # rip features
    peak_valley = rip_corrected_and_quality.map(lambda ds: (ds[
        0], rip.compute_peak_valley(rip=ds[1][0], rip_quality=ds[1][1])))

    rip_cycle_features = peak_valley.map(
        lambda ds: (ds[0], rip_feature_computation(ds[1][0])))

    windowed_rip_features = rip_cycle_features.map(
        lambda ds: (ds[0],
                    window_rip(inspiration_duration=ds[1][0],
                               expiration_duration=ds[1][1], ...,
                               window_size=60)))

    ecg_corrected_and_quality = ecg_corrected.join(ecg_quality)

    # r-peak datastream computation
    ecg_rr_rdd = ecg_corrected_and_quality.map(lambda ds: (
        ds[0],
        compute_rr_intervals(
            ecg=ds[1][0], ecg_quality=ds[1][1], fs=ecg_sampling_frequency)))

    ecg_rr_quality = ecg_rr_rdd.map(lambda ds:
                                    (ds[0], compute_outlier_ecg(ds[1])))
    ecg_rr_and_quality = ecg_rr_rdd.join(ecg_rr_quality)

    windowed_ecg_features = ecg_rr_and_quality.map(lambda ds: (
        ds[0],
        ecg_feature_computation(
            rr_intervals=ds[1][0], data_quality=ds[1][1], window_size=60)))

    peak_valley_rr_int = peak_valley.join(
        ecg_rr_rdd)  # TODO: Add RR_Quality here?

    rsa_cycle_features = peak_valley_rr_int.map(lambda ds: (ds[
        0], compute_rsa_cycle_feature(valleys=ds[1][1], rr_int=ds[1][2])))
    windowed_rsa_features = rsa_cycle_features.map(
        lambda ds: (ds[0], window_rsa(ds[0][0], window_size=60)))

    combined_features = windowed_accel_features.join(
        windowed_ecg_features).join(windowed_rip_features).join(
            windowed_rsa_features)
    # Fix joins here

    feature_vector_ecg_rip = combined_features.map(lambda ds: (
        ds[0],
        generate_cStress_feature_vector(
            accel=ds[1][0], ecg=ds[1][1], rip=ds[1][2], rsa=ds[1][3])))

    return feature_vector_ecg_rip  # Data stream with data points (ST, ET, [...37 values...])