예제 #1
0
def mobile_app_availability_marker(raw_stream_id: uuid, stream_name: str, owner_id, dd_stream_name, CC: CerebralCortex,
                                   config: dict, start_time=None, end_time=None):
    """
    This algorithm uses phone battery percentages to decide whether mobile app was available or unavailable.
    Theoretically, phone battery data shall be collected 24/7.
    :param raw_stream_id:
    :param CC:
    :param config:
    """

    try:
        # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
        app_availability_marker_stream_id = uuid.uuid3(uuid.NAMESPACE_DNS, str(
            raw_stream_id + dd_stream_name + owner_id + "mobile app availability marker"))

        stream_days = get_stream_days(raw_stream_id, app_availability_marker_stream_id, CC)

        for day in stream_days:
            stream = CC.get_datastream(raw_stream_id, data_type=DataSet.COMPLETE, day=day)
            if len(stream.data) > 0:
                windowed_data = window(stream.data, config['general']['window_size'], True)
                results = process_windows(windowed_data, config)

                merged_windows = merge_consective_windows(results)
                if len(merged_windows) > 0:
                    input_streams = [{"owner_id": owner_id, "id": str(raw_stream_id), "name": stream_name}]
                    output_stream = {"id": app_availability_marker_stream_id, "name": dd_stream_name,
                                     "algo_type": config["algo_type"]["app_availability_marker"]}
                    metadata = get_metadata(dd_stream_name, input_streams, config)
                    store(merged_windows, input_streams, output_stream, metadata, CC, config)

    except Exception as e:
        print(e)
예제 #2
0
def filter_bad_ecg(ecg: DataStream,
				   fs: float,
				   no_of_secs: int = 2) -> DataStream:
	"""
	This function splits the ecg array into non overlapping windows of specified seconds
	and assigns a binary decision on them returns a filtered ecg array which contains
	only the windows to be kept
	:param ecg: raw ecg datastream
	:param fs: sampling frequency
	:param no_of_secs : no of seconds is the length of the window by which the ecg datastream is
	 divided and checked.
	:return:  filtered ecg datastream
	"""

	window_length = int(no_of_secs * fs)
	window_data = window(ecg.data, window_size=window_length)

	ecg_filtered = DataStream.from_datastream([ecg])
	ecg_filtered_array = []

	for key, data in window_data.items():
		if classify_ecg_window(data, range_threshold=200, slope_threshold=50, maximum_value=4000):
			ecg_filtered_array.extend(data)

	ecg_filtered.data = ecg_filtered_array

	return ecg_filtered
    def test_phone_battery(self):
        """
        settings: powered-off = 0 and battery-low=10
        """
        merged_windows_samples_expect = [
            'charged', 'no-data', 'charged', 'low', 'no-data', 'charged'
        ]
        labelled_windows_samples_expect = [0, 10]
        cc = [self.sample_battery_data, self.sample_battery_data]
        dd = self.CC.sc.parallelize(cc)

        windowed_data = window(self.sample_battery_data, self.window_size,
                               True)
        results = OrderedDict()
        for key, data in windowed_data.items():
            dp = []
            for k in data:
                dp.append(float(k.sample))

            results[key] = battery(
                dp, self.config["sensor_types"]["phone_battery"], self.config)

        merged_windows = merge_consective_windows(results)
        merged_windows_samples = [mw.sample for mw in merged_windows]

        labelled_windows = mark_windows(merged_windows, self.config)
        labelled_windows_samples = [mw.sample for mw in labelled_windows]

        self.assertListEqual(merged_windows_samples,
                             merged_windows_samples_expect)
        self.assertListEqual(labelled_windows_samples,
                             labelled_windows_samples_expect)
def battery_marker(raw_stream_id: uuid, stream_name: str, owner_id, dd_stream_name, CC: CerebralCortex, config: dict,
                   start_time=None, end_time=None):
    """
    This algorithm uses battery percentages to decide whether device was powered-off or battery was low.
    All the labeled data (st, et, label) with its metadata are then stored in a datastore.
    :param raw_stream_id:
    :param CC:
    :param config:
    """

    try:
        # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
        battery_marker_stream_id = uuid.uuid3(uuid.NAMESPACE_DNS, str(raw_stream_id + dd_stream_name + owner_id))

        stream_days = get_stream_days(raw_stream_id, battery_marker_stream_id, CC)

        for day in stream_days:
            stream = CC.get_datastream(raw_stream_id, data_type=DataSet.COMPLETE, day=day)

            if len(stream.data) > 0:
                windowed_data = window(stream.data, config['general']['window_size'], True)
                results = process_windows(windowed_data, stream_name, config)

                merged_windows = merge_consective_windows(results)
                if len(merged_windows) > 0:
                    input_streams = [{"owner_id": owner_id, "id": str(raw_stream_id), "name": stream_name}]
                    output_stream = {"id": battery_marker_stream_id, "name": dd_stream_name,
                                     "algo_type": config["algo_type"]["battery_marker"]}
                    labelled_windows = mark_windows(battery_marker_stream_id, merged_windows, CC, config)
                    metadata = get_metadata(dd_stream_name, input_streams, config)
                    store(labelled_windows, input_streams, output_stream, metadata, CC, config)
    except Exception as e:
        print(e)
예제 #5
0
def phone_screen_touch_marker(raw_stream_id: uuid,
                              raw_stream_name: str,
                              owner_id,
                              dd_stream_name,
                              CC: CerebralCortex,
                              config: dict,
                              start_time=None,
                              end_time=None):
    """
    This is not part of core data diagnostic suite.
    It only calculates how many screen touches are there.
    :param raw_stream_id:
    :param CC:
    :param config:
    """

    try:
        # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
        screen_touch_stream_id = uuid.uuid3(
            uuid.NAMESPACE_DNS,
            str(raw_stream_id + dd_stream_name + owner_id +
                "mobile phone screen touch marker"))

        stream_days = get_stream_days(raw_stream_id, screen_touch_stream_id,
                                      CC)

        for day in stream_days:
            stream = CC.get_datastream(raw_stream_id,
                                       data_type=DataSet.COMPLETE,
                                       day=day,
                                       start_time=start_time,
                                       end_time=end_time)
            if len(stream.data) > 0:
                windowed_data = window(stream.data,
                                       config['general']['window_size'], True)
                results = process_windows(windowed_data)

                merged_windows = merge_consective_windows(results)
                if len(merged_windows) > 0:
                    input_streams = [{
                        "owner_id": owner_id,
                        "id": str(raw_stream_id),
                        "name": raw_stream_name
                    }]
                    output_stream = {
                        "id": screen_touch_stream_id,
                        "name": dd_stream_name,
                        "algo_type":
                        config["algo_type"]["app_availability_marker"]
                    }
                    metadata = get_metadata(dd_stream_name, input_streams,
                                            config)
                    store(merged_windows, input_streams, output_stream,
                          metadata, CC, config)

    except Exception as e:
        print(e)
def packet_loss_marker(stream_id: uuid, CC_obj: CerebralCortex, config: dict, start_time=None, end_time=None):
    """
    Label a window as packet-loss if received packets are less than the expected packets.
    All the labeled data (st, et, label) with its metadata are then stored in a datastore.
    :param stream_id:
    :param CC_obj:
    :param config:
    """
    stream = CC_obj.get_datastream(stream_id, data_type=DataSet.COMPLETE, start_time=start_time, end_time=end_time)
    name = stream._name
    results = OrderedDict()

    if name == config["sensor_types"]["autosense_ecg"]:
        sampling_rate = config["sampling_rate"]["ecg"]
        threshold_val = config["packet_loss_marker"]["ecg_acceptable_packet_loss"]
        label = config["labels"]["ecg_packet_loss"]
        windowed_data = window(stream.data, config['general']['window_size'], False)
    elif name == config["sensor_types"]["autosense_rip"]:
        sampling_rate = config["sampling_rate"]["rip"]
        threshold_val = config["packet_loss_marker"]["rip_acceptable_packet_loss"]
        label = config["labels"]["rip_packet_loss"]
        windowed_data = window(stream.data, config['general']['window_size'], False)
    elif name == config["sensor_types"]["motionsense_accel"]:
        sampling_rate = config["sampling_rate"]["motionsense"]
        threshold_val = config["packet_loss_marker"]["motionsense_acceptable_packet_loss"]
        label = config["labels"]["motionsense_packet_loss"]
        motionsense_accel_magni = magnitude(stream)
        windowed_data = window(motionsense_accel_magni.data, config['general']['window_size'], False)
    else:
        raise ValueError("Incorrect sensor type.")

    for key, data in windowed_data.items():

        available_packets = len(data)
        expected_packets = sampling_rate * config['general']['window_size']

        if (available_packets / expected_packets) < threshold_val:
            results[key] = label

    merged_windows = merge_consective_windows(results)
    input_streams = [{"id": str(stream_id), "name": name}]
    store(input_streams, merged_windows, CC_obj, config, config["algo_names"]["packet_loss_marker"])
def attachment_marker(stream_id: uuid,
                      CC_obj: CerebralCortex,
                      config: dict,
                      start_time=None,
                      end_time=None):
    """
    Label sensor data as sensor-on-body, sensor-off-body, or improper-attachment.
    All the labeled data (st, et, label) with its metadata are then stored in a datastore
    :param stream_id: UUID
    :param CC_obj: CerebralCortex object
    :param config: Data diagnostics configurations
    """

    stream = CC_obj.get_datastream(stream_id,
                                   data_type=DataSet.COMPLETE,
                                   start_time=start_time,
                                   end_time=end_time)

    results = OrderedDict()
    threshold_val = None
    stream_name = stream._name

    if stream_name == config["stream_names"]["autosense_ecg"]:
        threshold_val = config['attachment_marker']['ecg_on_body']
        label_on = config['labels']['ecg_on_body']
        label_off = config['labels']['ecg_off_body']
    elif stream_name == config["stream_names"]["autosense_rip"]:
        threshold_val = config['attachment_marker']['rip_on_body']
        label_on = config['labels']['rip_on_body']
        label_off = config['labels']['rip_off_body']
    else:
        raise ValueError("Incorrect sensor type.")

    windowed_data = window(stream.data, config['general']['window_size'],
                           False)

    for key, data in windowed_data.items():
        # remove outliers from a window data
        normal_values = outlier_detection(data)

        if stat.variance(normal_values) < threshold_val:
            results[key] = label_off
        else:
            results[key] = label_on

    merged_windows = merge_consective_windows(results)
    input_streams = [{"id": str(stream_id), "name": stream_name}]
    store(input_streams, merged_windows, CC_obj, config,
          config["algo_names"]["attachment_marker"])
def sensor_availability(raw_stream_id: uuid, stream_name: str, owner_id: uuid,
                        dd_stream_name, phone_physical_activity,
                        CC: CerebralCortex, config: dict):
    """
    Mark missing data as wireless disconnection if a participate walks away from phone or sensor
    :param raw_stream_id:
    :param stream_name:
    :param owner_id:
    :param dd_stream_name:
    :param phone_physical_activity:
    :param CC:
    :param config:
    """

    # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
    wireless_marker_stream_id = uuid.uuid3(
        uuid.NAMESPACE_DNS, str(raw_stream_id + dd_stream_name + owner_id))

    stream_days = get_stream_days(raw_stream_id, wireless_marker_stream_id, CC)

    for day in stream_days:
        # load stream data to be diagnosed
        raw_stream = CC.get_datastream(raw_stream_id,
                                       day,
                                       data_type=DataSet.COMPLETE)
        if len(raw_stream.data) > 0:

            windowed_data = window(raw_stream.data,
                                   config['general']['window_size'], True)
            results = process_windows(windowed_data, day, CC,
                                      phone_physical_activity, config)
            merged_windows = merge_consective_windows(results)

            if len(merged_windows) > 0:
                input_streams = [{
                    "owner_id": owner_id,
                    "id": str(raw_stream_id),
                    "name": stream_name
                }]
                output_stream = {
                    "id": wireless_marker_stream_id,
                    "name": dd_stream_name,
                    "algo_type":
                    config["algo_type"]["sensor_unavailable_marker"]
                }
                metadata = get_metadata(dd_stream_name, input_streams, config)
                store(merged_windows, input_streams, output_stream, metadata,
                      CC, config)
def ecg_data_quality(datastream: DataStream,
                     window_size: float = 2.0,
                     acceptable_outlier_percent: float = .34,
                     outlier_threshold_high: float = .9769,
                     outlier_threshold_low: float = .004884,
                     ecg_threshold_band_loose: float = .01148,
                     ecg_threshold_slope: float = .02443,
                     buffer_length: int = 3) -> DataStream:
    """

    :param datastream: Input ECG datastream
    :param window_size: Window size specifying the number of seconds the datastream is divided to check for data quality
    :param acceptable_outlier_percent: The acceptable outlier percentage in a window default is 34 percent
    :param outlier_threshold_high: The percentage of ADC range above which any value is considered an outlier
    :param outlier_threshold_low: The percentage of ADC range below which any value is considered an outlier
    :param ecg_threshold_band_loose: The Band Loose Threshold for ECG signal expressed in the percentage of ADC range
    :param ecg_threshold_slope: The Slope threshold of ECG signal- No consecutive datapoints can have this
    difference in values(expressed as percentage of ADC range)
    :param buffer_length: This specifies the memory of the data quality computation. Meaning this number of past windows
    will also have a role to decide the quality of the current window

    :return: An Annotated Datastream of ECG Data quality specifying the time ranges when data quality was acceptable/non-acceptable
    """

    ecg_quality_stream = DataStream.from_datastream(input_streams=[datastream])
    window_data = window(datastream.data, window_size=window_size)

    ecg_quality = []
    ecg_range = []
    for key, data in window_data.items():
        if len(data) > 0:
            result = compute_data_quality(data, ecg_range, True, ecg_threshold_band_loose, ecg_threshold_slope,
                                          acceptable_outlier_percent, outlier_threshold_high, outlier_threshold_low,
                                          buffer_length)
            if not ecg_quality:
                ecg_quality.append(DataPoint.from_tuple(data[0].start_time, result, data[-1].start_time))
            else:
                if ecg_quality[-1].sample == result:
                    new_point = DataPoint.from_tuple(ecg_quality[-1].start_time, result, data[-1].start_time)
                    ecg_quality[-1] = new_point
                else:
                    ecg_quality.append(DataPoint.from_tuple(data[0].start_time, result, data[-1].start_time))

    ecg_quality_stream.data = ecg_quality

    return ecg_quality_stream
예제 #10
0
def accelerometer_features(accel: DataStream,
                           window_length: float = 10.0,
                           activity_threshold: float = 0.21,
                           percentile_low: int = 1,
                           percentile_high: int = 99):
    """

    References:
        Figure 3: http://www.cs.memphis.edu/~santosh/Papers/Timing-JIT-UbiComp-2014.pdf

    :param percentile_high:
    :param percentile_low:
    :param accel:
    :param window_length:
    :param activity_threshold:
    :return:
    """
    accelerometer_magnitude = magnitude(normalize(accel))

    accelerometer_win_mag_deviations_data = []
    for key, data in window(accelerometer_magnitude.data,
                            window_length).items():
        accelerometer_win_mag_deviations_data.append(
            window_std_dev(data, key[0]))

    accelerometer_win_mag_deviations = DataStream.from_datastream([accel])
    accelerometer_win_mag_deviations.data = accelerometer_win_mag_deviations_data

    am_values = np.array([dp.sample for dp in accelerometer_magnitude.data])
    low_limit = np.percentile(am_values, percentile_low)
    high_limit = np.percentile(am_values, percentile_high)
    range = high_limit - low_limit
    #print (range)
    #print (low_limit + activity_threshold * range)
    #print (accelerometer_win_mag_deviations_data)

    accel_activity_data = []
    for dp in accelerometer_win_mag_deviations_data:
        comparison = dp.sample > (low_limit + activity_threshold * range)
        accel_activity_data.append(
            DataPoint.from_tuple(dp.start_time, comparison))

    accel_activity = DataStream.from_datastream([accel])
    accel_activity.data = accel_activity_data

    return accelerometer_magnitude, accelerometer_win_mag_deviations, accel_activity
def battery_marker(stream_id: uuid,
                   CC_obj: CerebralCortex,
                   config: dict,
                   start_time=None,
                   end_time=None):
    """
    This algorithm uses battery percentages to decide whether phone was powered-off or battery was low.
    All the labeled data (st, et, label) with its metadata are then stored in a datastore.
    :param stream_id:
    :param CC_obj:
    :param config:
    """
    results = OrderedDict()

    # stream = CC_obj.get_datastream(stream_id, data_type="all")

    stream = CC_obj.get_datastream(stream_id,
                                   data_type=DataSet.COMPLETE,
                                   start_time=start_time,
                                   end_time=end_time)
    windowed_data = window(stream.data, config['general']['window_size'], True)

    name = stream._name

    for key, data in windowed_data.items():
        dp = []
        for k in data:
            dp.append(float(k.sample))

        if name == config["sensor_types"]["phone_battery"]:
            results[key] = phone_battery(dp, config)
        elif name == config["sensor_types"]["motionsense_battery"]:
            results[key] = motionsense_battery(dp, config)
        elif name == config["sensor_types"]["autosense_battery"]:
            results[key] = autosense_battery(dp, config)
        else:
            raise ValueError("Incorrect sensor type.")

    merged_windows = merge_consective_windows(results)
    input_streams = [{"id": str(stream_id), "name": name}]
    store(input_streams, merged_windows, CC_obj, config,
          config["algo_names"]["battery_marker"])
예제 #12
0
def attachment_marker(raw_stream_id: uuid, stream_name: str, owner_id: uuid,
                      dd_stream_name, CC: CerebralCortex, config: dict):
    """
    Label sensor data as sensor-on-body, sensor-off-body, or improper-attachment.
    All the labeled data (st, et, label) with its metadata are then stored in a datastore

    """
    # TODO: quality streams could be multiple so find the one computed with CC
    # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
    attachment_marker_stream_id = uuid.uuid3(
        uuid.NAMESPACE_DNS, str(raw_stream_id + dd_stream_name + owner_id))

    stream_days = get_stream_days(raw_stream_id, attachment_marker_stream_id,
                                  CC)

    for day in stream_days:
        # load stream data to be diagnosed
        raw_stream = CC.get_datastream(raw_stream_id,
                                       day,
                                       data_type=DataSet.COMPLETE)

        if len(raw_stream.data) > 0:
            windowed_data = window(raw_stream.data,
                                   config['general']['window_size'], True)
            results = process_windows(windowed_data, config)
            merged_windows = merge_consective_windows(results)

            input_streams = [{
                "owner_id": owner_id,
                "id": str(raw_stream_id),
                "name": stream_name
            }]
            output_stream = {
                "id": attachment_marker_stream_id,
                "name": dd_stream_name,
                "algo_type": config["algo_type"]["attachment_marker"]
            }
            metadata = get_metadata(dd_stream_name, input_streams, config)
            store(merged_windows, input_streams, output_stream, metadata, CC,
                  config)
def wireless_disconnection(stream_id: uuid, stream_name: str, owner_id: uuid,
                           CC_obj: CerebralCortex, config: dict):
    """
    Analyze whether a sensor was unavailable due to a wireless disconnection
    or due to sensor powered off. This method automatically loads related
    accelerometer streams of an owner. All the labeled data (st, et, label)
    with its metadata are then stored in a datastore.
    Note: If an owner owns more than one accelerometer (for example, more
    than one motionsense accelerometer) then this might not work.
    :param stream_id: stream_id should be of "battery-powered-off"
    :param CC_obj:
    :param config:
    """

    results = OrderedDict()

    stream_end_time = CC_obj.get_stream_start_end_time(stream_id)["end_time"]
    day = stream_end_time

    # load stream data to be diagnosed
    stream = CC_obj.get_datastream(stream_id, day, data_type=DataSet.COMPLETE)
    windowed_data = window(stream.data, config['general']['window_size'], True)

    owner_id = stream._owner
    stream_name = stream._name

    windowed_data = filter_battery_off_windows(stream_id, stream_name,
                                               windowed_data, owner_id, config,
                                               CC_obj)

    threshold = config['sensor_unavailable_marker']['autosense']
    label = config['labels']['autosense_unavailable']

    if windowed_data:
        # prepare input streams metadata
        x = all_stream_ids_names[config["stream_names"]["autosense_accel_x"]]
        y = all_stream_ids_names[config["stream_names"]["autosense_accel_y"]]
        z = all_stream_ids_names[config["stream_names"]["autosense_accel_z"]]

        input_streams = [{
            "id": str(stream_id),
            "name": stream_name
        }, {
            "id": str(x),
            "name": config["stream_names"]["autosense_accel_x"]
        }, {
            "id": str(y),
            "name": config["stream_names"]["autosense_accel_y"]
        }, {
            "id": str(z),
            "name": config["stream_names"]["autosense_accel_z"]
        }]

        for dp in windowed_data:
            if not dp.data and dp.start_time != "" and dp.end_time != "":
                start_time = dp.start_time - timedelta(
                    seconds=config['general']['window_size'])
                end_time = dp.start_time

                autosense_accel_x = CC_obj.get_datastream(
                    x,
                    start_time=start_time,
                    end_time=end_time,
                    data_type=DataSet.ONLY_DATA)
                autosense_accel_y = CC_obj.get_datastream(
                    y,
                    start_time=start_time,
                    end_time=end_time,
                    data_type=DataSet.ONLY_DATA)
                autosense_accel_z = CC_obj.get_datastream(
                    z,
                    start_time=start_time,
                    end_time=end_time,
                    data_type=DataSet.ONLY_DATA)

                magnitudeVals = magnitude_autosense_v1(autosense_accel_x,
                                                       autosense_accel_y,
                                                       autosense_accel_z)

                if np.var(magnitudeVals) > threshold:
                    key = (dp.start_time, dp.end_time)
                    results[key] = label

        merged_windows = merge_consective_windows(results)
        store(input_streams, merged_windows, CC_obj, config,
              config["algo_names"]["sensor_unavailable_marker"])
def packet_loss_marker(raw_stream_id: uuid, stream_name: str, owner_id: uuid,
                       dd_stream_name, CC: CerebralCortex, config: dict):
    """
    Label a window as packet-loss if received packets are less than the expected packets.
    All the labeled data (st, et, label) with its metadata are then stored in a datastore.
    :param raw_stream_id:
    :param CC_obj:
    :param config:
    """

    # using stream_id, data-diagnostic-stream-id, and owner id to generate a unique stream ID for battery-marker
    packetloss_marker_stream_id = uuid.uuid3(
        uuid.NAMESPACE_DNS, str(raw_stream_id + dd_stream_name + owner_id))

    stream_days = get_stream_days(raw_stream_id, packetloss_marker_stream_id,
                                  CC)

    if stream_name == config["stream_names"]["autosense_ecg"]:
        sampling_rate = config["sampling_rate"]["ecg"]
        threshold_val = config["packet_loss_marker"][
            "ecg_acceptable_packet_loss"]
        label = config["labels"]["ecg_packet_loss"]
    elif stream_name == config["stream_names"]["autosense_rip"]:
        sampling_rate = config["sampling_rate"]["rip"]
        threshold_val = config["packet_loss_marker"][
            "rip_acceptable_packet_loss"]
        label = config["labels"]["rip_packet_loss"]
    elif stream_name == config["stream_names"][
            "motionsense_hrv_accel_right"] or stream_name == config[
                "stream_names"]["motionsense_hrv_accel_left"]:
        sampling_rate = config["sampling_rate"]["motionsense_accel"]
        threshold_val = config["packet_loss_marker"][
            "motionsense_accel_acceptable_packet_loss"]
        label = config["labels"]["motionsense_gyro_packet_loss"]
    elif stream_name == config["stream_names"][
            "motionsense_hrv_gyro_right"] or stream_name == config[
                "stream_names"]["motionsense_hrv_gyro_left"]:
        sampling_rate = config["sampling_rate"]["motionsense_gyro"]
        threshold_val = config["packet_loss_marker"][
            "motionsense_gyro_acceptable_packet_loss"]
        label = config["labels"]["motionsense_gyro_packet_loss"]

    for day in stream_days:
        # load stream data to be diagnosed
        stream = CC.get_datastream(raw_stream_id,
                                   day,
                                   data_type=DataSet.COMPLETE)

        if len(stream.data) > 0:

            windowed_data = window(stream.data,
                                   config['general']['window_size'], True)

            results = process_windows(windowed_data, sampling_rate,
                                      threshold_val, label, config)
            merged_windows = merge_consective_windows(results)
            if len(merged_windows) > 0:
                input_streams = [{
                    "owner_id": owner_id,
                    "id": str(raw_stream_id),
                    "name": stream_name
                }]
                output_stream = {
                    "id": packetloss_marker_stream_id,
                    "name": dd_stream_name,
                    "algo_type": config["algo_type"]["packet_loss_marker"]
                }
                metadata = get_metadata(dd_stream_name, input_streams, config)
                store(merged_windows, input_streams, output_stream, metadata,
                      CC, config)