コード例 #1
0
def mobile_app_availability_marker(raw_stream_id: uuid, stream_name: str, owner_id, dd_stream_name, CC: CerebralCortex,
                                   config: dict):
    """
    This algorithm uses phone battery percentages to decide whether mobile app was available or unavailable.
    Theoretically, phone battery data shall be collected 24/7.
    :param raw_stream_id:
    :param CC:
    :param config:
    """

    try:
        # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
        app_availability_marker_stream_id = uuid.uuid3(uuid.NAMESPACE_DNS, str(
            raw_stream_id + dd_stream_name + owner_id + "MOBILE APP AVAILABILITY MARKER"))

        stream_days = get_stream_days(raw_stream_id, app_availability_marker_stream_id, CC)

        for day in stream_days:
            stream = CC.get_stream(raw_stream_id, day=day, data_type=DataSet.COMPLETE)
            if len(stream.data) > 0:
                windowed_data = window(stream.data, config['general']['window_size'], True)
                results = process_windows(windowed_data, config)

                merged_windows = merge_consective_windows(results)
                if len(merged_windows) > 0:
                    input_streams = [{"owner_id": owner_id, "id": str(raw_stream_id), "name": stream_name}]
                    output_stream = {"id": app_availability_marker_stream_id, "name": dd_stream_name,
                                     "algo_type": config["algo_type"]["app_availability_marker"]}
                    metadata = get_metadata(dd_stream_name, input_streams, config)
                    store(merged_windows, input_streams, output_stream, metadata, CC, config)

    except Exception as e:
        print(e)
コード例 #2
0
def analyze_quality(streams, owner_id, led_right_wrist_quality_stream_name, wrist, CC):
    led_stream_quality_id = uuid.uuid3(uuid.NAMESPACE_DNS, str(led_right_wrist_quality_stream_name + owner_id+"LED quality computed on CerebralCortex"))
    window_size = 3 # in seconds
    if wrist=="right":
        if "LED--org.md2k.motionsense--MOTION_SENSE_HRV--RIGHT_WRIST" in streams:
            led_wrist_stream_id = streams["LED--org.md2k.motionsense--MOTION_SENSE_HRV--RIGHT_WRIST"][
                "identifier"]
            led_wrist_stream_name = streams["LED--org.md2k.motionsense--MOTION_SENSE_HRV--RIGHT_WRIST"]["name"]
        else:
            led_wrist_stream_id = None
    else:
        if "LED--org.md2k.motionsense--MOTION_SENSE_HRV--LEFT_WRIST" in streams:
            led_wrist_stream_id = streams["LED--org.md2k.motionsense--MOTION_SENSE_HRV--LEFT_WRIST"][
                "identifier"]
            led_wrist_stream_name = streams["LED--org.md2k.motionsense--MOTION_SENSE_HRV--LEFT_WRIST"]["name"]
        else:
            led_wrist_stream_id = None
#cassandra - cerebralcortex.data
    #mysql - stream
    if led_wrist_stream_id:
        stream_end_days = CC.get_stream_duration(led_wrist_stream_id)
        if stream_end_days["start_time"] and stream_end_days["end_time"]:
            days = stream_end_days["end_time"] - stream_end_days["start_time"]
            for day in range(days.days + 1):
                day = (stream_end_days["start_time"]+timedelta(days=day)).strftime('%Y%m%d')
                stream = CC.get_stream(led_wrist_stream_id, day=day, data_type=DataSet.COMPLETE)
                if len(stream.data) > 0:
                    windowed_data = window(stream.data, window_size, False)
                    led_quality_windows = data_quality_led(windowed_data)

                    input_streams = [{"owner_id": str(owner_id), "id": str(led_wrist_stream_id),
                                      "name": led_wrist_stream_name}]
                    output_stream = {"id": str(led_stream_quality_id), "name": led_right_wrist_quality_stream_name, "algo_type": ""}

                    store(led_quality_windows, input_streams, output_stream, CC)
コード例 #3
0
    def calc_voiced_segments_hourly(self, audio_data: list) -> List[DataPoint]:
        """
        calculates amout of voice_segments present every hour of a day.
        returns start_time, end_time and amount of voiced segments present
        every hour
        Algorithm:
            window(input audio_data) per hour
            Calculate voiced_segments_for_each_hour
            return voiced_segments_for_each hour
        :param audio_data:list of input data after thresholding
        :return:Datapoint with start_time,end_time and amount of voiced
        segments in minute
        :rtype:List(DataPoint)
        """
        windowed_per_hour = window(audio_data, 3600, False)

        voiced_per_hour = []
        for key in windowed_per_hour:
            no_voiced_segments_hr = 0
            for values in windowed_per_hour[key]:
                if (values.sample == 1):
                    no_voiced_segments_hr += 1
            voiced_per_hour.append((DataPoint(start_time=key[0],
                                              end_time=key[1],
                                              offset=audio_data[0].offset,
                                              sample=no_voiced_segments_hr)))
        return voiced_per_hour
コード例 #4
0
    def work_beacon_context(self, beaconworkstream, input_streams, user_id):
        """
        produces datapoint sample as 1 or 2 if around work beacons else 0
        :param beaconworkstream:
        :param input_streams:
        :param user_id:
        :return: stream with (start_time,end_time,offset,sample= 0 or 1]
        based on context of work_beacon 1 or work_beacon 2
        """
        if (len(beaconworkstream) > 0):
            beaconstream = beaconworkstream

            windowed_data = window(beaconstream, self.window_size, True)

            new_data = []
            for i, j in windowed_data:
                if (len(windowed_data[i, j]) > 0):
                    values = []
                    for items in windowed_data[i, j]:
                        values.append(items.sample)

                    if ('1' in items.sample) & ('2' in items.sample):
                        windowed_data[i, j] = "1"
                    else:
                        windowed_data[i, j] = values[0]

                else:
                    windowed_data[i, j] = "0"

            data = merge_consective_windows(windowed_data)
            for items in data:
                if items.sample is not None and items.sample != "":
                    new_data.append(
                        DataPoint(start_time=items.start_time,
                                  end_time=items.end_time,
                                  offset=beaconworkstream[0].offset,
                                  sample=items.sample))

            try:

                self.store_stream(filepath="work_beacon_context.json",
                                  input_streams=input_streams,
                                  user_id=user_id,
                                  data=new_data,
                                  localtime=False)
                self.CC.logging.log(
                    '%s %s work_beacon_context stored %d '
                    'DataPoints for user %s ' %
                    (str(datetime.datetime.now()), self.__class__.__name__,
                     len(new_data), str(new_data)))

            except Exception as e:
                self.CC.logging.log("Exception:", str(e))
                self.CC.logging.log(str(traceback.format_exc()))
        else:
            self.CC.logging.log("No work beacon streams found for user %s" %
                                str(user_id))
コード例 #5
0
def phone_screen_touch_marker(raw_stream_id: uuid,
                              raw_stream_name: str,
                              owner_id,
                              dd_stream_name,
                              CC: CerebralCortex,
                              config: dict,
                              start_time=None,
                              end_time=None):
    """
    This is not part of core data diagnostic suite.
    It only calculates how many screen touches are there.
    :param raw_stream_id:
    :param CC:
    :param config:
    """

    try:
        # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
        screen_touch_stream_id = uuid.uuid3(
            uuid.NAMESPACE_DNS,
            str(raw_stream_id + dd_stream_name + owner_id +
                "mobile phone screen touch marker"))

        stream_days = get_stream_days(raw_stream_id, screen_touch_stream_id,
                                      CC)

        for day in stream_days:
            stream = CC.get_datastream(raw_stream_id,
                                       data_type=DataSet.COMPLETE,
                                       day=day,
                                       start_time=start_time,
                                       end_time=end_time)
            if len(stream.data) > 0:
                windowed_data = window(stream.data,
                                       config['general']['window_size'], True)
                results = process_windows(windowed_data)

                merged_windows = merge_consective_windows(results)
                if len(merged_windows) > 0:
                    input_streams = [{
                        "owner_id": owner_id,
                        "id": str(raw_stream_id),
                        "name": raw_stream_name
                    }]
                    output_stream = {
                        "id": screen_touch_stream_id,
                        "name": dd_stream_name,
                        "algo_type":
                        config["algo_type"]["app_availability_marker"]
                    }
                    metadata = get_metadata(dd_stream_name, input_streams,
                                            config)
                    store(merged_windows, input_streams, output_stream,
                          metadata, CC, config)

    except Exception as e:
        print(e)
コード例 #6
0
    def home_beacon_context(self, beaconhomestream, beacon_stream_id,
                            beacon_stream_name, user_id):
        """
        produces datapoint sample as 1 if around home beacon else 0
        :param beaconhomestream:
        :param beacon_stream_id:
        :param beacon_stream_name:
        :param user_id:
        :return: new stream (start_time,end_time,offset,sample=[0 or 1]
        """
        input_streams = []
        input_streams.append({
            "identifier": beacon_stream_id,
            "name": beacon_stream_name
        })

        if (len(beaconhomestream) > 0):
            beaconstream = beaconhomestream
            windowed_data = window(beaconstream, self.window_size, True)
            new_data = []

            for i, j in windowed_data:
                if (len(windowed_data[i, j]) > 0):
                    windowed_data[i, j] = "1"

                else:
                    windowed_data[i, j] = "0"

            data = merge_consective_windows(windowed_data)
            for items in data:
                if items.sample is not None and items.sample != "":
                    new_data.append(
                        DataPoint(start_time=items.start_time,
                                  end_time=items.end_time,
                                  offset=beaconhomestream[0].offset,
                                  sample=items.sample))

            try:

                self.store_stream(filepath="home_beacon_context.json",
                                  input_streams=input_streams,
                                  user_id=user_id,
                                  data=new_data,
                                  localtime=False)
                self.CC.logging.log(
                    '%s %s home_beacon_context stored %d '
                    'DataPoints for user %s ' %
                    (str(datetime.datetime.now()), self.__class__.__name__,
                     len(new_data), str(new_data)))

            except Exception as e:
                self.CC.logging.log("Exception:", str(e))
                self.CC.logging.log(str(traceback.format_exc()))
        else:
            self.CC.logging.log("No home beacon streams found for user %s" %
                                str(user_id))
コード例 #7
0
def ecg_data_quality(datastream: DataStream,
                     window_size: float = 2.0,
                     acceptable_outlier_percent: float = .34,
                     outlier_threshold_high: float = .9769,
                     outlier_threshold_low: float = .004884,
                     ecg_threshold_band_loose: float = .01148,
                     ecg_threshold_slope: float = .02443,
                     buffer_length: int = 3) -> DataStream:
    """

    :param datastream: Input ECG datastream
    :param window_size: Window size specifying the number of seconds the datastream is divided to check for data quality
    :param acceptable_outlier_percent: The acceptable outlier percentage in a window default is 34 percent
    :param outlier_threshold_high: The percentage of ADC range above which any value is considered an outlier
    :param outlier_threshold_low: The percentage of ADC range below which any value is considered an outlier
    :param ecg_threshold_band_loose: The Band Loose Threshold for ECG signal expressed in the percentage of ADC range
    :param ecg_threshold_slope: The Slope threshold of ECG signal- No consecutive DataPoints can have this
    difference in values(expressed as percentage of ADC range)
    :param buffer_length: This specifies the memory of the data quality computation. Meaning this number of past windows
    will also have a role to decide the quality of the current window

    :return: An Annotated Datastream of ECG Data quality specifying the time ranges when data quality was acceptable/non-acceptable
    """

    ecg_quality_stream = DataStream.from_datastream(input_streams=[datastream])
    window_data = window(datastream.data, window_size=window_size)

    ecg_quality = []
    ecg_range = []
    for key, data in window_data.items():
        if len(data) > 0:
            result = compute_data_quality(data, ecg_range, True,
                                          ecg_threshold_band_loose,
                                          ecg_threshold_slope,
                                          acceptable_outlier_percent,
                                          outlier_threshold_high,
                                          outlier_threshold_low, buffer_length)
            if not ecg_quality:
                ecg_quality.append(
                    DataPoint.from_tuple(data[0].start_time, result,
                                         data[-1].start_time))
            else:
                if ecg_quality[-1].sample == result:
                    new_point = DataPoint.from_tuple(
                        ecg_quality[-1].start_time, result,
                        data[-1].start_time)
                    ecg_quality[-1] = new_point
                else:
                    ecg_quality.append(
                        DataPoint.from_tuple(data[0].start_time, result,
                                             data[-1].start_time))

    ecg_quality_stream.data = ecg_quality

    return ecg_quality_stream
コード例 #8
0
def battery_marker(raw_stream_id: uuid, stream_name: str, user_id,
                   dd_stream_name, CC: CerebralCortex, config: dict):
    """
    This algorithm uses battery percentages to decide whether device was powered-off or battery was low.
    All the labeled data (st, et, label) with its metadata are then stored in a datastore.
    :param raw_stream_id:
    :param CC:
    :param config:
    """

    try:
        # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
        battery_marker_stream_id = uuid.uuid3(
            uuid.NAMESPACE_DNS,
            str(raw_stream_id + dd_stream_name + user_id + "BATTERY MARKER"))

        stream_days = get_stream_days(raw_stream_id, battery_marker_stream_id,
                                      CC)

        for day in stream_days:
            stream = CC.get_stream(raw_stream_id,
                                   day=day,
                                   data_type=DataSet.COMPLETE)

            if len(stream.data) > 0:
                windowed_data = window(stream.data,
                                       config['general']['window_size'], True)
                results = process_windows(windowed_data, stream_name, config)

                merged_windows = merge_consective_windows(results)
                if len(merged_windows) > 0:
                    input_streams = [{
                        "owner_id": user_id,
                        "id": str(raw_stream_id),
                        "name": stream_name
                    }]
                    output_stream = {
                        "id": battery_marker_stream_id,
                        "name": dd_stream_name,
                        "algo_type": config["algo_type"]["battery_marker"]
                    }
                    labelled_windows = mark_windows(battery_marker_stream_id,
                                                    merged_windows, CC, config)
                    metadata = get_metadata(dd_stream_name, input_streams,
                                            config)
                    store(labelled_windows, input_streams, output_stream,
                          metadata, CC, config)
    except Exception as e:
        print(e)
コード例 #9
0
def sensor_availability(raw_stream_id: uuid, stream_name: str, owner_id: uuid,
                        dd_stream_name, phone_physical_activity,
                        CC: CerebralCortex, config: dict):
    """
    Mark missing data as wireless disconnection if a participate walks away from phone or sensor
    :param raw_stream_id:
    :param stream_name:
    :param owner_id:
    :param dd_stream_name:
    :param phone_physical_activity:
    :param CC:
    :param config:
    """

    # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
    wireless_marker_stream_id = uuid.uuid3(
        uuid.NAMESPACE_DNS, str(raw_stream_id + dd_stream_name + owner_id))

    stream_days = get_stream_days(raw_stream_id, wireless_marker_stream_id, CC)

    for day in stream_days:
        # load stream data to be diagnosed
        raw_stream = CC.get_stream(raw_stream_id,
                                   day=day,
                                   data_type=DataSet.COMPLETE)
        if len(raw_stream.data) > 0:

            windowed_data = window(raw_stream.data,
                                   config['general']['window_size'], True)
            results = process_windows(windowed_data, day, CC,
                                      phone_physical_activity, config)
            merged_windows = merge_consective_windows(results)

            if len(merged_windows) > 0:
                input_streams = [{
                    "owner_id": owner_id,
                    "id": str(raw_stream_id),
                    "name": stream_name
                }]
                output_stream = {
                    "id": wireless_marker_stream_id,
                    "name": dd_stream_name,
                    "algo_type":
                    config["algo_type"]["sensor_unavailable_marker"]
                }
                metadata = get_metadata(dd_stream_name, input_streams, config)
                store(merged_windows, input_streams, output_stream, metadata,
                      CC, config)
コード例 #10
0
def accelerometer_features(
        accel: DataStream,
        window_length: float = 10.0,
        activity_threshold: float = 0.21,
        percentile_low: int = 1,
        percentile_high: int = 99
) -> Tuple[DataStream, DataStream, DataStream]:
    """

    References:
        Figure 3: http://www.cs.memphis.edu/~santosh/Papers/Timing-JIT-UbiComp-2014.pdf

    :param percentile_high:
    :param percentile_low:
    :param accel:
    :param window_length:
    :param activity_threshold:
    :return:
    """
    accelerometer_magnitude = magnitude(normalize(accel))

    accelerometer_win_mag_deviations_data = []
    for key, data in window(accelerometer_magnitude.data,
                            window_length).items():
        accelerometer_win_mag_deviations_data.append(
            window_std_dev(data, key[0]))

    accelerometer_win_mag_deviations = DataStream.from_datastream([accel])
    accelerometer_win_mag_deviations.data = accelerometer_win_mag_deviations_data

    am_values = np.array([dp.sample for dp in accelerometer_magnitude.data])
    low_limit = np.percentile(am_values, percentile_low)
    high_limit = np.percentile(am_values, percentile_high)
    range = high_limit - low_limit

    accel_activity_data = []
    for dp in accelerometer_win_mag_deviations_data:
        comparison = dp.sample > (low_limit + activity_threshold * range)
        accel_activity_data.append(
            DataPoint.from_tuple(dp.start_time, comparison))

    accel_activity = DataStream.from_datastream([accel])
    accel_activity.data = accel_activity_data

    return accelerometer_magnitude, accelerometer_win_mag_deviations, accel_activity
コード例 #11
0
def attachment_marker(raw_stream_id: uuid, stream_name: str, owner_id: uuid,
                      dd_stream_name, CC: CerebralCortex, config: dict):
    """
    Label sensor data as sensor-on-body, sensor-off-body, or improper-attachment.
    All the labeled data (st, et, label) with its metadata are then stored in a datastore

    """
    # TODO: quality streams could be multiple so find the one computed with CC
    # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
    attachment_marker_stream_id = uuid.uuid3(
        uuid.NAMESPACE_DNS,
        str(raw_stream_id + dd_stream_name + owner_id + "ATTACHMENT MARKER"))

    stream_days = get_stream_days(raw_stream_id, attachment_marker_stream_id,
                                  CC)

    for day in stream_days:
        # load stream data to be diagnosed
        raw_stream = CC.get_stream(raw_stream_id,
                                   day=day,
                                   data_type=DataSet.COMPLETE)

        if len(raw_stream.data) > 0:
            windowed_data = window(raw_stream.data,
                                   config['general']['window_size'], True)
            results = process_windows(windowed_data, config)
            merged_windows = merge_consective_windows(results)

            input_streams = [{
                "owner_id": owner_id,
                "id": str(raw_stream_id),
                "name": stream_name
            }]
            output_stream = {
                "id": attachment_marker_stream_id,
                "name": dd_stream_name,
                "algo_type": config["algo_type"]["attachment_marker"]
            }
            metadata = get_metadata(dd_stream_name, input_streams, config)
            store(merged_windows, input_streams, output_stream, metadata, CC,
                  config)
コード例 #12
0
    def audio_context(self,
                      user_id: str,
                      input_streams_audio: dict,
                      stream1_data: list,
                      input_streams_audio_work: dict = None,
                      stream2_data: list = None):
        """
        redirects appropirate streams for appropriate calculations.
        takes raw input stream and labels every minute window as voiced or noise
        based on threshold of 20 secs.
        Algorithm:
        window(input_audio inference stream) on 1 minute
        if voiced segments >= 20 secs
            label minute window as 'voiced'
        else:
            label minute window as 'noise'
        :param str user_id:id of user
        :param dict input_streams_audio: input stream for audio for whole day
        :param list stream1_data: list of Datapoints
        :param dict input_streams_audio_work:input stream for audio for
        office only
        :param list stream2_data: list of DataPoints
        """

        if (len(stream1_data) > 0):
            audio_stream = stream1_data
            timestamp = []

            for items in audio_stream:
                timestamp_in_unix = mktime(items.start_time.timetuple())
                timestamp.append(items.start_time)
            windowed_data = window(audio_stream, 60, False)

            no_voiced_segments = 0
            for key in windowed_data:
                temp_voice = 0
                temp_noise = 0
                for idx, val in enumerate(windowed_data[key]):
                    if (idx + 1) < len(windowed_data[key]):
                        if val.sample[0] == "noise":
                            temp_noise += (
                                windowed_data[key][idx + 1].start_time -
                                val.start_time).total_seconds()
                        else:
                            temp_voice += (
                                windowed_data[key][idx + 1].start_time -
                                val.start_time).total_seconds()

                if (temp_voice >= 20):
                    windowed_data[key] = "voice"
                    no_voiced_segments += 1
                else:
                    windowed_data[key] = "noise"

            audio_data = []
            for keys in windowed_data.keys():
                if windowed_data[keys] == "voice":
                    audio_data.append(
                        DataPoint(start_time=keys[0],
                                  end_time=keys[1],
                                  offset=audio_stream[0].offset,
                                  sample=1))
                else:
                    audio_data.append(
                        DataPoint(start_time=keys[0],
                                  end_time=keys[1],
                                  offset=audio_stream[0].offset,
                                  sample=0))

            file_path_voiced_minute = "voice_segments_context_per_minute.json"
            self.store_data(file_path_voiced_minute, input_streams_audio,
                            user_id, audio_data)
            voiced_hourly = self.calc_voiced_segments_hourly(audio_data)
            file_path_voiced_hourly = "average_voiced_segments_hourly.json"
            self.store_data(file_path_voiced_hourly, input_streams_audio,
                            user_id, voiced_hourly)

            voiced_daily = \
                self.calc_voiced_segments_daily_average(audio_data,
                                                            no_voiced_segments)

            file_path_voiced_daily = "average_voiced_segments_daily.json"
            self.store_data(file_path_voiced_daily, input_streams_audio,
                            user_id, voiced_daily)
            if stream2_data:
                data_at_office = []
                office_start_time = stream2_data[0].start_time
                office_end_time = stream2_data[0].end_time
                for items in audio_data:
                    if (items.start_time>= office_start_time)and \
                            (items.end_time<= office_end_time):
                        data_at_office.append(items)


                voiced_hourly_at_work = \
                    self.calc_voiced_segments_hourly(data_at_office)
                file_path_hourly_at_work =\
                    "average_voiced_segments_office_hourly.json"
                self.store_data(file_path_hourly_at_work,
                                input_streams_audio_work, user_id,
                                voiced_hourly_at_work)

                voiced_daily_at_work = \
                    self.calc_voiced_segments_daily_average(data_at_office,
                                                            no_voiced_segments)
                file_path_daily_at_work =\
                    "average_voiced_segments_office_daily.json"
                self.store_data(file_path_daily_at_work,
                                input_streams_audio_work, user_id,
                                voiced_daily_at_work)
コード例 #13
0
def packet_loss_marker(raw_stream_id: uuid, stream_name: str, owner_id: uuid,
                       dd_stream_name, CC: CerebralCortex, config: dict):
    """
    Label a window as packet-loss if received packets are less than the expected packets.
    All the labeled data (st, et, label) with its metadata are then stored in a datastore.
    :param raw_stream_id:
    :param CC_obj:
    :param config:
    """

    # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
    packetloss_marker_stream_id = uuid.uuid3(
        uuid.NAMESPACE_DNS,
        str(raw_stream_id + dd_stream_name + owner_id + "PACKET LOSS MARKER"))

    stream_days = get_stream_days(raw_stream_id, packetloss_marker_stream_id,
                                  CC)

    if stream_name == config["stream_names"]["autosense_ecg"]:
        sampling_rate = config["sampling_rate"]["ecg"]
        threshold_val = config["packet_loss_marker"][
            "ecg_acceptable_packet_loss"]
        label = config["labels"]["ecg_packet_loss"]
    elif stream_name == config["stream_names"]["autosense_rip"]:
        sampling_rate = config["sampling_rate"]["rip"]
        threshold_val = config["packet_loss_marker"][
            "rip_acceptable_packet_loss"]
        label = config["labels"]["rip_packet_loss"]
    elif stream_name == config["stream_names"][
            "motionsense_hrv_accel_right"] or stream_name == config[
                "stream_names"]["motionsense_hrv_accel_left"]:
        sampling_rate = config["sampling_rate"]["motionsense_accel"]
        threshold_val = config["packet_loss_marker"][
            "motionsense_accel_acceptable_packet_loss"]
        label = config["labels"]["motionsense_gyro_packet_loss"]
    elif stream_name == config["stream_names"][
            "motionsense_hrv_gyro_right"] or stream_name == config[
                "stream_names"]["motionsense_hrv_gyro_left"]:
        sampling_rate = config["sampling_rate"]["motionsense_gyro"]
        threshold_val = config["packet_loss_marker"][
            "motionsense_gyro_acceptable_packet_loss"]
        label = config["labels"]["motionsense_gyro_packet_loss"]

    for day in stream_days:
        # load stream data to be diagnosed
        stream = CC.get_stream(raw_stream_id,
                               day=day,
                               data_type=DataSet.COMPLETE)

        if len(stream.data) > 0:

            windowed_data = window(stream.data,
                                   config['general']['window_size'], True)

            results = process_windows(windowed_data, sampling_rate,
                                      threshold_val, label, config)
            merged_windows = merge_consective_windows(results)
            if len(merged_windows) > 0:
                input_streams = [{
                    "owner_id": owner_id,
                    "id": str(raw_stream_id),
                    "name": stream_name
                }]
                output_stream = {
                    "id": packetloss_marker_stream_id,
                    "name": dd_stream_name,
                    "algo_type": config["algo_type"]["packet_loss_marker"]
                }
                metadata = get_metadata(dd_stream_name, input_streams, config)
                store(merged_windows, input_streams, output_stream, metadata,
                      CC, config)
コード例 #14
0
    def work_beacon_context(self, beaconworkstream: list, input_streams: dict,
                            user_id: str):
        """
        produces datapoint sample as 1 if around work beacon 1, 2 if around workbeacon2
        and 0 if not around work beacon
        
         Algorithm::
            data = window beaconstream 
            if [values] in a minute window in data
                if 1 in values and 2 in values:
                    around work_beacon1 (1)
                else
                    around work_beacon(values[0]):could be either 1 or 2
            else
                not around beacon:0
        
        :param List(Datapoint) beaconworkstream : Input list
        :param dict input_streams: Dict to store stream id and name for storing
        :param string user_id: id of user
        
        """
        if (len(beaconworkstream) > 0):
            beaconstream = beaconworkstream

            windowed_data = window(beaconstream, self.window_size, True)

            new_data = []
            for i, j in windowed_data:
                if (len(windowed_data[i, j]) > 0):
                    values = []
                    for items in windowed_data[i, j]:
                        values.append(items.sample)

                    if ('1' in items.sample) & ('2' in items.sample):
                        windowed_data[i, j] = 1
                    else:
                        windowed_data[i, j] = int(values[0])

                else:
                    windowed_data[i, j] = 0

            data = merge_consective_windows(windowed_data)
            print(data)
            for items in data:
                if items.sample is not None and items.sample != "":
                    new_data.append(
                        DataPoint(start_time=items.start_time,
                                  end_time=items.end_time,
                                  offset=beaconworkstream[0].offset,
                                  sample=items.sample))

            try:

                self.store_stream(filepath="work_beacon_context.json",
                                  input_streams=input_streams,
                                  user_id=user_id,
                                  data=new_data,
                                  localtime=True)
                self.CC.logging.log(
                    '%s %s work_beacon_context stored %d '
                    'DataPoints for user %s ' %
                    (str(datetime.datetime.now()), self.__class__.__name__,
                     len(new_data), str(new_data)))

            except Exception as e:
                self.CC.logging.log("Exception:", str(e))
                self.CC.logging.log(str(traceback.format_exc()))
        else:
            self.CC.logging.log("No work beacon streams found for user %s" %
                                str(user_id))
コード例 #15
0
    def home_beacon_context(self, beaconhomestream: list,
                            beacon_stream_id: str, beacon_stream_name: str,
                            user_id: str):
        """
        produces datapoint sample as 1 if around home beacon else 0
        
        Algorithm::
            data = window beaconstream 
            if values in a minute window in data
                around beacon:1
            else
                not around beacon:0

        :param List(Datapoint) beaconhomestream : Input list
        :param str beacon_stream_id: stream name representing workbeacon1
        :param str beacon_stream_name: stream name representing workbeacon2
        :param str user_id: id of user
       
        """

        input_streams = []

        input_streams.append({
            "identifier": beacon_stream_id,
            "name": beacon_stream_name
        })

        if (len(beaconhomestream) > 0):
            beaconstream = beaconhomestream
            windowed_data = window(beaconstream, self.window_size, True)
            new_data = []

            for i, j in windowed_data:
                if (len(windowed_data[i, j]) > 0):
                    windowed_data[i, j] = 1

                else:
                    windowed_data[i, j] = 0

            data = merge_consective_windows(windowed_data)
            print(data)
            for items in data:
                if items.sample is not None and items.sample != "":
                    new_data.append(
                        DataPoint(start_time=items.start_time,
                                  end_time=items.end_time,
                                  offset=beaconhomestream[0].offset,
                                  sample=items.sample))

            try:

                self.store_stream(filepath="home_beacon_context.json",
                                  input_streams=input_streams,
                                  user_id=user_id,
                                  data=new_data,
                                  localtime=True)
                self.CC.logging.log(
                    '%s %s home_beacon_context stored %d '
                    'DataPoints for user %s ' %
                    (str(datetime.datetime.now()), self.__class__.__name__,
                     len(new_data), str(new_data)))

            except Exception as e:
                self.CC.logging.log("Exception:", str(e))
                self.CC.logging.log(str(traceback.format_exc()))
        else:
            self.CC.logging.log("No home beacon streams found for user %s" %
                                str(user_id))