コード例 #1
0
    def test_tool_channel_new_api(self):
        with HyperStream(file_logger=False,
                         console_logger=False,
                         mqtt_logger=None) as hs:
            M = hs.channel_manager.memory

            # new way of loading tools
            clock_new = hs.tools.clock()

            # old way of loading tools
            clock_old = hs.channel_manager.tools["clock"].window(
                (MIN_DATE, utcnow())).last().value()

            # TODO: NOTE THAT IF WE DO IT THE OLD WAY FIRST, THEN THE NEW WAY FAILS WITH:
            # TypeError: super(type, obj): obj must be an instance or subtype of type
            # which possibly relates to:
            # https://stackoverflow.com/questions/9722343/python-super-behavior-not-dependable

            ticker_old = M.get_or_create_stream("ticker_old")
            ticker_new = M.get_or_create_stream("ticker_new")

            now = utcnow()
            before = (now - timedelta(seconds=30)).replace(tzinfo=UTC)
            ti = TimeInterval(before, now)

            clock_old.execute(sources=[], sink=ticker_old, interval=ti)
            clock_new.execute(sources=[], sink=ticker_new, interval=ti)

            self.assertListEqual(ticker_old.window().values(),
                                 ticker_new.window().values())
コード例 #2
0
    def update_channels(self):
        """
        Pulls out all of the stream definitions from the database, and populates the channels with stream references
        """
        logging.info("Updating channels")
        with switch_db(StreamDefinitionModel, 'hyperstream'):
            for s in StreamDefinitionModel.objects():
                try:
                    stream_id = StreamId(name=s.stream_id.name,
                                         meta_data=s.stream_id.meta_data)
                except AttributeError as e:
                    raise e
                logging.debug("Processing {}".format(stream_id))

                try:
                    # This can fail if a plugin has been defined by a different instantiation of HyperStream on the same
                    # database.
                    channel = self.get_channel(s.channel_id)
                except ChannelNotFoundError as e:
                    logging.warn(e)
                    continue

                # calculated_intervals = TimeIntervals(map(lambda x: (x.start, x.end), s.calculated_intervals))
                last_accessed = utcnow()
                last_updated = s.last_updated if s.last_updated else utcnow()

                if stream_id in channel.streams:
                    if isinstance(channel, (AssetsChannel, AssetsFileChannel)):
                        continue
                    raise StreamAlreadyExistsError(stream_id)

                from . import MemoryChannel, DatabaseChannel
                if isinstance(channel, MemoryChannel):
                    channel.create_stream(stream_id)
                elif isinstance(channel, DatabaseChannel):
                    if channel == self.assets:
                        stream_type = AssetStream
                    else:
                        stream_type = DatabaseStream

                    channel.streams[stream_id] = stream_type(
                        channel=channel,
                        stream_id=stream_id,
                        calculated_intervals=
                        None,  # Not required since it's initialised from mongo_model in __init__
                        last_accessed=last_accessed,
                        last_updated=last_updated,
                        sandbox=s.sandbox,
                        mongo_model=s)
                else:
                    logging.warn("Unable to parse stream {}".format(stream_id))
コード例 #3
0
    def test_mqtt_logger(self):
        """
        Test the MQTT logger using the standard format
        Note that mosquitto should be running first:
        $ docker run -ti -p 1883:1883 -p 9001:9001 toke/mosquitto
        or
        $ brew services start mosquitto

        """
        # assert(mosquitto_is_running())
        logging.raiseExceptions = True

        mqtt_logger = dict(host=mqtt_ip, port=1883, topic="topics/test", loglevel=MON, qos=1)

        with HyperStream() as hs:
            hs.logger = HyperStreamLogger(
                default_loglevel=logging.INFO,
                file_logger=False,
                console_logger=False,
                mqtt_logger=mqtt_logger,
                close_existing=True)

            with MqttClient() as client:
                # Make sure the client is publishing
                client.client.publish("topics/test", "{} ABC".format(utcnow()))

                logging.monitor("1234567890")
                sleep(1)
                print(client.last_messages)
                print(client.last_messages["topics/test"][24:])
                assert(str(client.last_messages["topics/test"].decode("utf8")[24:]) == '[MON  ]  1234567890')
コード例 #4
0
    def test_index_of_by_stream(self):
        w = basic_workflow(sys._getframe().f_code.co_name)

        aggregate_loc = channels.get_tool(
            name="index_of_by_stream",
            parameters=dict(index="kitchen")
        )

        # Create a stream with the single value "location" in it
        w.create_node(stream_name="selector_meta_data", channel=A, plate_ids=None)

        A.write_to_stream(stream_id=StreamId(name="selector_meta_data"),
                          data=StreamInstance(timestamp=utcnow(), value="location"))

        N = w.nodes
        w.create_factor(
            tool=aggregate_loc,
            sources=[N["selector_meta_data"], N["rss"]],
            sink=N["rss_kitchen"]
        )

        time_interval = TimeInterval(scripted_experiments[0].start, scripted_experiments[0].start + 2 * minute)
        w.execute(time_interval)

        key = h1 + (('location', 'kitchen'),) + wA

        assert all(a == b for a, b in zip(N['rss_kitchen'].streams[h1 + wA].window(time_interval).head(10),
                                          N['rss'].streams[key].window(time_interval).head(10)))
コード例 #5
0
    def test_database_channel(self):
        # Simple querying
        ti = TimeInterval(t1, t1 + minute)

        # Get or create the stream that lives in the database
        env = D.get_or_create_stream(
            stream_id=StreamId('environmental_db', (("house", "1"), )))

        D.purge_stream(env.stream_id)

        env_tool = channels.get_tool(
            "sphere",
            dict(modality="environmental", rename_keys=True, dedupe=True))

        env_tool.execute(source=None,
                         splitting_stream=None,
                         sinks=[env],
                         interval=ti,
                         input_plate_value=None,
                         output_plate=hyperstream.plate_manager.plates["H"])

        # Create stream whose source will be the above database stream
        elec = M.create_stream(StreamId('electricity'))

        env_tool = channels.get_tool(
            "sphere",
            dict(modality="environmental", rename_keys=True, dedupe=True))
        elec_tool = T[component].window(
            (MIN_DATE, utcnow())).last().value(key='electricity-04063')

        env_tool.execute(source=None,
                         splitting_stream=None,
                         sinks=[env],
                         interval=ti,
                         input_plate_value=None,
                         output_plate=hyperstream.plate_manager.plates["H"])

        elec_tool.execute(sources=[env],
                          sink=elec,
                          interval=ti,
                          alignment_stream=None)

        q1 = "\n".join("=".join(map(str, ee)) for ee in elec.window(ti))

        # print(q1)
        # print(edl)

        assert (q1 == '2016-04-28 20:00:00.159000+00:00=0.0\n'
                '2016-04-28 20:00:06.570000+00:00=0.0\n'
                '2016-04-28 20:00:12.732000+00:00=0.0\n'
                '2016-04-28 20:00:25.125000+00:00=0.0\n'
                '2016-04-28 20:00:31.405000+00:00=0.0\n'
                '2016-04-28 20:00:50.132000+00:00=0.0')

        assert (elec.window(ti).values() == [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
コード例 #6
0
    def to_python(self, value):
        if value.upper() in ('0', 'MIN_DATE', 'MIN', '0-0-0'):
            dt = MIN_DATE
        elif value.upper() == 'NOW':
            dt = utcnow()
        elif value.upper() in ('MAX_DATE', 'MAX'):
            dt = MAX_DATE
        else:
            dt = ciso8601.parse_datetime(value)

        if dt is None:
            raise ValueError("{} not parsed".format(value))

        return dt.replace(tzinfo=UTC)
コード例 #7
0
    def test_tool_channel(self):
        # Load in the objects and print them
        clock_stream = T[clock]
        assert (isinstance(clock_stream, Stream))
        # assert(clock_stream.modifier == Last() + IData())

        sphere_silhouette_stream = channels["sphere_tools"].streams[
            sphere_silhouette]
        assert (sphere_silhouette_stream.channel.can_create is False)

        agg = T[aggregate].window((MIN_DATE, utcnow())).items()
        assert (len(agg) > 0)
        assert (agg[0].timestamp == datetime(2016, 10, 26, 0, 0, tzinfo=UTC))
        assert (isinstance(agg[0].value, type))
コード例 #8
0
    def test_mqtt_logger_json(self):
        """
        Test the MQTT logger using the JSON format
        Note that mosquitto should be running first:
        $ docker run -ti -p 1883:1883 -p 9001:9001 toke/mosquitto
        or
        $ brew services start mosquitto

        """
        # assert (mosquitto_is_running())
        logging.raiseExceptions = True

        # noinspection PyPep8Naming
        def handleError(self, record):
            raise

        mqtthandler.MQTTHandler.handleError = handleError

        # noinspection PyTypeChecker
        mqtt_logger = dict(host=mqtt_ip, port=1883, topic="topics/test", loglevel=MON, qos=1,
                           formatter=SenMLFormatter())

        with HyperStream() as hs:
            hs.logger = HyperStreamLogger(
                default_loglevel=logging.INFO,
                file_logger=False,
                console_logger=False,
                mqtt_logger=mqtt_logger,
                close_existing=True)

            with MqttClient() as client:
                # Make sure the client is publishing
                client.client.publish("topics/test", "{} ABC".format(utcnow()))

                logging.monitor("1234567890", extra=dict(n="blah"))
                sleep(1)
                # print(client.last_messages["topics/test"])
                msg = json.loads(client.last_messages["topics/test"])
                assert(msg['e'][0]['n'] == 'blah')
                assert(msg['e'][0]['v'] == '1234567890')
                assert(msg['uid'] == 'hyperstream')

                logging.monitor("1234567890")
                sleep(1)
                # print(client.last_messages["topics/test"])
                msg = json.loads(client.last_messages["topics/test"])
                assert(msg['e'][0]['n'] == 'default')
                assert(msg['e'][0]['v'] == '1234567890')
                assert(msg['uid'] == 'hyperstream')
コード例 #9
0
    def test_tool_channel(self):
        with HyperStream(file_logger=False,
                         console_logger=False,
                         mqtt_logger=None) as hs:
            T = hs.channel_manager.tools

            # Load in the objects and print them
            clock_stream = T["clock"]
            assert (isinstance(clock_stream, Stream))
            # assert(clock_stream.modifier == Last() + IData())

            agg = T["aggregate"].window((MIN_DATE, utcnow())).items()
            assert (len(agg) > 0)
            # noinspection PyTypeChecker
            assert (agg[0].timestamp == datetime(2016,
                                                 10,
                                                 26,
                                                 0,
                                                 0,
                                                 tzinfo=UTC))
            assert (isinstance(agg[0].value, type))
コード例 #10
0
    def __init__(self, plugins, **kwargs):
        super(ChannelManager, self).__init__(**kwargs)

        # See this answer http://stackoverflow.com/a/14620633 for why we do the following:
        self.__dict__ = self

        tool_path = os.path.abspath(
            os.path.join(os.path.dirname(__file__), '..', 'tools'))

        self.tools = ToolChannel("tools", tool_path, up_to_timestamp=utcnow())
        self.memory = MemoryChannel("memory")
        self.mongo = DatabaseChannel("mongo")
        self.assets = AssetsChannel("assets")

        for plugin in plugins:
            for channel in plugin.load_channels():
                if channel.channel_id in self:
                    raise ChannelAlreadyExistsError(channel.channel_id)
                self[channel.channel_id] = channel

        self.update_channels()
コード例 #11
0
    def test_simple_query(self):
        # Simple querying
        ti = TimeInterval(t1, t1 + minute)

        elec = M[StreamId('electricity')]
        env = S.create_stream(stream_id=StreamId('environmental_data'))

        env_tool = channels.get_tool("sphere", dict(modality="environmental"))
        elec_tool = T[component].window(
            (MIN_DATE, utcnow())).last().value(key='electricity-04063')

        env_tool.execute(source=None,
                         splitting_stream=None,
                         sinks=[env],
                         interval=ti,
                         input_plate_value=None,
                         output_plate=hyperstream.plate_manager.plates["H"])

        elec_tool.execute(sources=[env],
                          sink=elec,
                          interval=ti,
                          alignment_stream=None)

        q1 = "\n".join("=".join(map(str, ee)) for ee in elec.window(ti))

        print(q1)
        # print(edl)

        assert (q1 == '2016-04-28 20:00:00.159000+00:00=0.0\n'
                '2016-04-28 20:00:06.570000+00:00=0.0\n'
                '2016-04-28 20:00:12.732000+00:00=0.0\n'
                '2016-04-28 20:00:25.125000+00:00=0.0\n'
                '2016-04-28 20:00:31.405000+00:00=0.0\n'
                '2016-04-28 20:00:50.132000+00:00=0.0')

        assert (elec.window(ti).values() == [0.0, 0.0, 0.0, 0.0, 0.0, 0.0])
コード例 #12
0
def run(house,
        selection,
        delete_existing_workflows=True,
        loglevel=logging.INFO):
    from hyperstream import HyperStream, StreamId, TimeInterval
    from workflows.display_experiments import create_workflow_list_technicians_walkarounds
    from workflows.learn_localisation_model import create_workflow_lda_localisation_model_learner
    from hyperstream.utils import StreamNotFoundError, reconstruct_interval

    hyperstream = HyperStream(loglevel=loglevel, file_logger=None)
    M = hyperstream.channel_manager.memory
    D = hyperstream.channel_manager.mongo
    A = hyperstream.channel_manager.assets

    workflow_id0 = "list_technicians_walkarounds"

    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id0)

    try:
        w0 = hyperstream.workflow_manager.workflows[workflow_id0]
    except KeyError:
        w0 = create_workflow_list_technicians_walkarounds(hyperstream,
                                                          house=house,
                                                          safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id0)
    time_interval = TimeInterval.up_to_now()
    w0.execute(time_interval)

    # from datetime import timedelta
    # time_interval.end += timedelta(milliseconds=1)
    df = M[StreamId('experiments_dataframe',
                    (('house', house), ))].window().values()[0]
    experiment_ids = set([df['experiment_id'][i - 1] for i in selection])

    experiment_ids_str = '_'.join(experiment_ids)

    create_selected_localisation_plates(hyperstream)

    # Ensure the model is overwritten if it's already there
    for model_name in ('lda', 'svm', 'room_rssi_hmm'):
        model_id = StreamId(name="location_prediction",
                            meta_data=(('house', house), ('localisation_model',
                                                          model_name)))
        try:
            hyperstream.channel_manager.mongo.purge_stream(model_id)
        except StreamNotFoundError:
            pass

    workflow_id1 = "lda_localisation_model_learner_" + experiment_ids_str

    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id1)

    try:
        w1 = hyperstream.workflow_manager.workflows[workflow_id1]
    except KeyError:
        w1 = create_workflow_lda_localisation_model_learner(
            hyperstream,
            house=house,
            experiment_ids=experiment_ids,
            safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id1)

    # Put the experiments selected into an asset stream
    from hyperstream import StreamInstance
    from hyperstream.utils import utcnow

    A.write_to_stream(stream_id=StreamId(name="experiments_selected",
                                         meta_data=(('house', house), )),
                      data=StreamInstance(timestamp=utcnow(),
                                          value=list(experiment_ids)))

    time_interval = TimeInterval.up_to_now()
    w1.execute(time_interval)

    print('number of non_empty_streams: {}'.format(
        len(hyperstream.channel_manager.memory.non_empty_streams)))

    for model_name in ('lda', 'svm', 'room_rssi_hmm'):
        print("Model: {}".format(model_name))
        model_id = StreamId(name="location_prediction",
                            meta_data=(('house', house), ('localisation_model',
                                                          model_name)))
        try:
            model = D[model_id].window().last().value
        except (AttributeError, KeyError):
            print(
                "No {} model was learnt - not requested or no data recorded?".
                format(model_name))

        for experiment_id in list(experiment_ids):
            print("Experiment id: {}".format(experiment_id))
            print("Time interval: {}".format(
                reconstruct_interval(experiment_id)))
            print("Accuracy: {}".format(
                pformat(model['performance'][experiment_id]['accuracy'])))
            print("Macro F1: {}".format(
                pformat(
                    model['performance'][experiment_id]['f1_score_macro'])))
            print("Micro F1: {}".format(
                pformat(
                    model['performance'][experiment_id]['f1_score_micro'])))
            print("Confusion Matrix:")
            pprint(model['performance'][experiment_id]['confusion_matrix'])
            print("")
    return True
コード例 #13
0
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.

from hyperstream import HyperStream, StreamId, TimeInterval
from hyperstream.utils import utcnow, UTC
from datetime import timedelta
import os

os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))

if __name__ == '__main__':
    hs = HyperStream(loglevel=20)
    M = hs.channel_manager.memory
    T = hs.channel_manager.tools
    clock = StreamId(name="clock")
    clock_tool = T[clock].window().last().value()
    ticker = M.get_or_create_stream(stream_id=StreamId(name="ticker"))
    now = utcnow()
    before = (now - timedelta(seconds=30)).replace(tzinfo=UTC)
    ti = TimeInterval(before, now)
    clock_tool.execute(sources=[],
                       sink=ticker,
                       interval=ti,
                       alignment_stream=None)
    print(list(ticker.window().tail(5)))
コード例 #14
0
def display_diagnostics(house):
    from hyperstream.utils import utcnow
    from sphere_connector_package.sphere_connector import SphereConnector, DataWindow

    if not globs['sphere_connector']:
        globs['sphere_connector'] = SphereConnector(
            config_filename='config.json',
            include_mongo=True,
            include_redcap=False,
            sphere_logger=None)

    t2 = utcnow()
    t1 = t2 - timedelta(seconds=60)

    sphere_connector = globs['sphere_connector']
    window = DataWindow(sphere_connector, t1, t2)
    rss = window.wearable.get_data(elements={'rss'}, rename_keys=False)
    # aids = set(d['aid'] for d in filter(lambda x: x['hid'] == house if 'hid' in x else True, docs))

    #if aids:
    #    print("Access points: ")
    #    for i, aid in enumerate(aids):
    #        print("{}: {}".format(i, aid))

    df = pd.DataFrame(rss)
    if not df.empty:
        print(df.groupby(['aid', 'uid']).agg({'wearable-rss': np.max}))
        print()
    else:
        print("No access points found")
        print()

    env = window.environmental.get_data(rename_keys=False)
    df = pd.DataFrame(env)
    if not df.empty:
        for sensor in ['electricity', 'humidity', 'light', 'pressure', 'temperature', 'water']:
            if sensor in df:
                print(df.groupby('uid').agg({sensor: [np.min, np.median, np.max]}).dropna())
            else:
                print("no {} data".format(sensor))
            print()
    else:
        print("No environmental data found")
        print()

    vid = window.video.get_data(rename_keys=False)
    df = pd.DataFrame(vid)
    if not df.empty:
        # Leave off 'video-FeaturesREID' for now
        for sensor in ['video-2DCen', 'video-2Dbb', 'video-3Dbb', 'video-3Dcen', 'video-Activity', 'video-Intensity']:
            if sensor in df:
                print(sensor[6:])
                print(df.groupby('uid')[sensor].describe())
            else:
                print("no {} data".format(sensor))
            print()

        for sensor in ['video-silhouette']:
            if sensor in df:
                print(sensor[6:])
                print(df.groupby('uid').size())
            else:
                print("no {}s".format(sensor[6:]))
            print()

        for sensor in ['video-userID']:
            if sensor in df:
                print("Unique {}s: {}".format(sensor[6:], list(set(df[sensor].dropna()))))
            else:
                print("no {}s".format(sensor[6:]))
            print()

    else:
        print("No video data found")
        print()
コード例 #15
0
def create_workflow_rssi_distributions_per_room(hyperstream,
                                                house,
                                                experiment_indices,
                                                experiment_ids,
                                                safe=True):
    experiment_ids_str = '_'.join(experiment_ids)
    # Create a simple one step workflow for querying
    workflow_id = "rssi_distributions_per_room_" + experiment_ids_str

    houses = hyperstream.plate_manager.plates["H"]
    selected_experiments = hyperstream.plate_manager.plates[
        "H.SelectedLocalisationExperiment"]
    models = hyperstream.plate_manager.plates["LocalisationModels"]

    with hyperstream.create_workflow(
            workflow_id=workflow_id,
            name="RSSI distributions per room",
            owner="MK",
            description=
            "RSSI distributions per room, based on technician walkarounds",
            online=False,
            safe=safe) as w:

        # Various channels
        M = hyperstream.channel_manager.memory
        S = hyperstream.channel_manager.sphere
        T = hyperstream.channel_manager.tools
        D = hyperstream.channel_manager.mongo
        A = hyperstream.channel_manager.assets

        nodes = (
            ("experiments_list", M,
             [houses]),  # Current annotation data in 2s windows
            ("experiments_mapping", M,
             [houses]),  # Current annotation data in 2s windows
            ("rss_raw", S, [houses]),  # Raw RSS data
            ("rss_time", S, [selected_experiments
                             ]),  # RSS data split by experiment
            ("annotation_raw_locations", S, [houses]),  # Raw annotation data
            ("annotation_time", S, [selected_experiments
                                    ]),  # RSS data split by experiment
            ("every_2s", M, [selected_experiments
                             ]),  # sliding windows one every minute
            ("annotation_state_location", M,
             [selected_experiments]),  # Annotation data in 2s windows
            ("annotation_state_2s_windows", M, [selected_experiments]),
            ("rss_2s", M, [selected_experiments
                           ]),  # max(RSS) per AP in past 2s of RSS
            ("merged_2s", M, [selected_experiments
                              ]),  # rss_2s with annotation_state_2s
            ("merged_2s_flat_" + experiment_ids_str, M,
             [houses]),  # flattened version of merged_2s
            ("dataframe_" + experiment_ids_str, M, [houses]),
            ("csv_string_" + experiment_ids_str, M, [houses]),
            ("pdf_" + experiment_ids_str, M, [houses]),
            ("experiments_selected", A, [houses]))

        # Create all of the nodes
        N = dict((stream_name, w.create_node(stream_name, channel, plate_ids))
                 for stream_name, channel, plate_ids in nodes)

        # TODO: Perhaps we want to do this same
        A.write_to_stream(stream_id=StreamId(name="experiments_selected",
                                             meta_data=(('house', house), )),
                          data=StreamInstance(timestamp=utcnow(),
                                              value=list(experiment_ids)))

        w.create_factor(
            tool=hyperstream.channel_manager.get_tool(
                name="experiments_mapping_builder", parameters={}),
            sources=[N["experiments_list"], N["experiments_selected"]],
            sink=N["experiments_mapping"])

        w.create_multi_output_factor(tool=hyperstream.channel_manager.get_tool(
            name="sphere",
            parameters=dict(modality="wearable", elements={"rss"})),
                                     source=None,
                                     splitting_node=None,
                                     sink=N["rss_raw"])

        w.create_multi_output_factor(tool=hyperstream.channel_manager.get_tool(
            name="splitter_time_aware_from_stream",
            parameters=dict(meta_data_id="localisation-experiment")),
                                     source=N["rss_raw"],
                                     splitting_node=N["experiments_mapping"],
                                     sink=N["rss_time"])

        w.create_multi_output_factor(tool=hyperstream.channel_manager.get_tool(
            name="sphere",
            parameters=dict(modality="annotations",
                            annotators=[0],
                            elements={"Location"},
                            filters={})),
                                     source=None,
                                     splitting_node=None,
                                     sink=N["annotation_raw_locations"])

        w.create_multi_output_factor(tool=hyperstream.channel_manager.get_tool(
            name="splitter_time_aware_from_stream",
            parameters=dict(meta_data_id="localisation-experiment")),
                                     source=N["annotation_raw_locations"],
                                     splitting_node=N["experiments_mapping"],
                                     sink=N["annotation_time"])

        w.create_factor(tool=hyperstream.channel_manager.get_tool(
            name="sliding_window",
            parameters=dict(lower=-2.0, upper=0.0, increment=2.0)),
                        sources=None,
                        sink=N["every_2s"])

        w.create_factor(tool=hyperstream.channel_manager.get_tool(
            name="annotation_state_location", parameters=dict()),
                        sources=[N["every_2s"], N["annotation_time"]],
                        sink=N["annotation_state_location"])

        w.create_factor(tool=hyperstream.channel_manager.get_tool(
            name="aligning_window", parameters=dict(lower=-2.0, upper=0.0)),
                        sources=[N["annotation_state_location"]],
                        sink=N["annotation_state_2s_windows"])

        def component_wise_max(init_value=None,
                               id_field='aid',
                               value_field='wearable-rss'):
            if init_value is None:
                init_value = {}

            def func(data):
                result = init_value.copy()
                for (time, value) in data:
                    if result.has_key(value[id_field]):
                        result[value[id_field]] = max(result[value[id_field]],
                                                      value[value_field])
                    else:
                        result[value[id_field]] = value[value_field]
                return result

            return func

        w.create_factor(
            tool=hyperstream.channel_manager.get_tool(
                name="sliding_apply",
                parameters=dict(func=component_wise_max())),
            sources=[N["annotation_state_2s_windows"], N["rss_time"]],
            sink=N["rss_2s"])

        w.create_factor(tool=hyperstream.channel_manager.get_tool(
            name="aligned_merge",
            parameters=dict(names=["annotations", "rssi"])),
                        sources=[N["annotation_state_location"], N["rss_2s"]],
                        sink=N["merged_2s"])

        w.create_factor(tool=hyperstream.channel_manager.get_tool(
            name="aggregate_plate",
            parameters=dict(aggregation_meta_data="localisation-experiment")),
                        sources=[N["merged_2s"]],
                        sink=N["merged_2s_flat_" + experiment_ids_str])

        w.create_factor(tool=hyperstream.channel_manager.get_tool(
            name="dallan_data_frame_builder", parameters=dict()),
                        sources=[N["merged_2s_flat_" + experiment_ids_str]],
                        sink=N["dataframe_" + experiment_ids_str])

        w.create_factor(tool=hyperstream.channel_manager.get_tool(
            name="data_frame_to_csv_string", parameters=dict()),
                        sources=[N["dataframe_" + experiment_ids_str]],
                        sink=N["csv_string_" + experiment_ids_str])

        w.create_factor(tool=hyperstream.channel_manager.get_tool(
            name="r_rssi_comparison_plot",
            parameters=dict(
                output_path=hyperstream.config.output_path,
                filename_suffix="_rssi_comparison_plot_{}.pdf".format('_'.join(
                    map(str, experiment_indices))))),
                        sources=[N["csv_string_" + experiment_ids_str]],
                        sink=N["pdf_" + experiment_ids_str])

        #   w.create_factor(
        #        tool=hyperstream.channel_manager.get_tool(
        #             name="localisation_model_learn",
        #             parameters=dict(nan_value=-110.0)
        #         ),
        #         sources=[N["merged_2s_flat_"+experiment_ids_str]],
        #         sink=N["location_prediction_lda_"+experiment_ids_str])
        #
        return w
コード例 #16
0
ファイル: assets.py プロジェクト: vishalbelsare/HyperStream
# 
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# 
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.

from hyperstream import HyperStream, StreamId, StreamInstance, TimeInterval
from hyperstream.utils import utcnow, UTC
from datetime import timedelta
import os


os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))


if __name__ == '__main__':
    hs = HyperStream(loglevel=20)
    M = hs.channel_manager.memory
    T = hs.channel_manager.tools
    A = hs.channel_manager.assets
    test_assets = StreamId(name="test_assets")
    A.get_or_create_stream(test_assets)
    A.write_to_stream(test_assets, StreamInstance(utcnow(), {'a', 'b', 'c'}))
    print(list(A[test_assets].window().tail(5)))