コード例 #1
0
class HyperStreamTests(unittest.TestCase):
    def __init__(self, *args, **kwargs):
        super(HyperStreamTests, self).__init__(*args, **kwargs)
        if not hasattr(self, 'assertItemsEqual'):
            # patch for python > 3.2
            # noinspection PyUnresolvedReferences
            self.assertItemsEqual = self.assertCountEqual

    def setUp(self):
        self.hs = HyperStream(loglevel=logging.INFO,
                              file_logger=False,
                              console_logger=False)

    def test___init__(self):
        self.assertIs(type(self.hs), HyperStream)

    def test___repr__(self):
        self.assertIs(type(self.hs.__repr__()), str)

    def test___str__(self):
        self.assertIs(type(self.hs.__str__()), str)

    def test_create_workflow(self):
        workflow_id = 1
        name = 'test_workflow'
        owner = 'unittest'
        description = 'test of workflow'
        with self.hs.create_workflow(workflow_id, name, owner,
                                     description) as w:
            self.assertIs(type(w), Workflow)
            self.assertEqual(w.workflow_id, workflow_id)
            self.assertEqual(w.name, name)
            self.assertEqual(w.owner, owner)
            self.assertEqual(w.description, description)

    def test_usecase_1(self):
        M = self.hs.channel_manager.memory
        T = self.hs.channel_manager.tools

        clock = StreamId(name="clock")
        clock_tool = T[clock].window().last().value()
        s_ticker = M.get_or_create_stream(stream_id=StreamId(name="ticker"))

        now = datetime.utcnow().replace(tzinfo=UTC)
        before = (now - timedelta(seconds=30)).replace(tzinfo=UTC)
        ti = TimeInterval(before, now)
        clock_tool.execute(sources=[],
                           sink=s_ticker,
                           interval=ti,
                           alignment_stream=None)
        items = s_ticker.window().items()
        timestamps, values = zip(*[(it.timestamp, it.value) for it in items])

        self.assertItemsEqual(timestamps, values)

        before_s = before.replace(microsecond=0)
        expected = [before_s + timedelta(seconds=i) for i in range(1, 31)]
        self.assertItemsEqual(values, expected)
コード例 #2
0
    def test_mqtt_logger(self):
        """
        Test the MQTT logger using the standard format
        Note that mosquitto should be running first:
        $ docker run -ti -p 1883:1883 -p 9001:9001 toke/mosquitto
        or
        $ brew services start mosquitto

        """
        # assert(mosquitto_is_running())
        logging.raiseExceptions = True

        # noinspection PyTypeChecker
        mqtt_logger = dict(host=mqtt_ip,
                           port=1883,
                           topic="topics/test",
                           loglevel=MON,
                           qos=1)

        with HyperStream(file_logger=False,
                         console_logger=False,
                         mqtt_logger=mqtt_logger):
            with MqttClient() as client:
                # client.client.publish("topics/test", "{} ABC".format(utcnow()))
                logging.monitor("1234567890")
                sleep(1)
                print(client.last_messages["topics/test"])
                assert (client.last_messages["topics/test"][24:] ==
                        '[MON  ]  1234567890')
コード例 #3
0
def run(house, wearables, delete_existing_workflows=True, loglevel=logging.INFO):
    from hyperstream import HyperStream, TimeInterval, StreamNotFoundError
    from workflows.asset_splitter import split_sphere_assets
    from workflows.deploy_localisation_model import create_workflow_localisation_predict
    # from workflows.deploy_localisation_model_new_api import create_workflow_localisation_predict

    hyperstream = HyperStream(loglevel=loglevel, file_logger=None)
    D = hyperstream.channel_manager.mongo
    A = hyperstream.channel_manager.assets

    experiment_ids = A.find_stream(name="experiments_selected", house=house).window(
        TimeInterval.up_to_now()).last().value

    experiment_ids_str = '_'.join(experiment_ids)
    workflow_id0 = "asset_splitter"
    workflow_id1 = "lda_localisation_model_predict_"+experiment_ids_str

    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id0)
        hyperstream.workflow_manager.delete_workflow(workflow_id1)

    split_sphere_assets(hyperstream, house)

    try:
        w = hyperstream.workflow_manager.workflows[workflow_id1]
    except KeyError:
        w = create_workflow_localisation_predict(hyperstream, house=house, experiment_ids=experiment_ids, safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id1)

    # def safe_purge(channel, stream_id):
    #     try:
    #         channel.purge_stream(stream_id)
    #     except StreamNotFoundError:
    #         pass

    # A.purge_node("wearables_by_house")
    # A.purge_node("access_points_by_house")
    # D.purge_node("predicted_locations_broadcasted")

    # for h in [1, 2, 1176, 1116]:
    #     safe_purge(A, StreamId(name="wearables_by_house", meta_data=(('house', h),)))
    #     safe_purge(A, StreamId(name="access_points_by_house", meta_data=(('house', h),)))
    #     for w in wearables:
    #         safe_purge(D, StreamId(name="predicted_locations_broadcasted", meta_data=(('house', h), ('wearable', w))))

    ti0 = TimeInterval.up_to_now()
    ti1 = TimeInterval.now_minus(minutes=1)

    # ti0 = TimeInterval(MIN_DATE, parse("2016-12-02 17:14:25.075Z"))
    # ti1 = TimeInterval(start=ti0.end - timedelta(minutes=1), end=ti0.end)

    w.execute(ti1)

    print('number of non_empty_streams: {}'.format(
        len(hyperstream.channel_manager.memory.non_empty_streams)))

    from display_localisation_predictions import display_predictions
    display_predictions(hyperstream, ti1, house, wearables=wearables)
コード例 #4
0
def run(delete_existing_workflows=True, loglevel=logging.INFO):
    from hyperstream import HyperStream, TimeInterval
    from workflows.summaries_to_csv import create_workflow_summaries_to_csv
    from sphere_connector_package.sphere_connector import SphereConnector

    if not globs['sphere_connector']:
        globs['sphere_connector'] = SphereConnector(
            config_filename='config.json',
            include_mongo=True,
            include_redcap=False,
            sphere_logger=None)

    hyperstream = HyperStream(loglevel=loglevel, file_logger=None)

    workflow_id = "summaries_to_csv"
    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id)
    try:
        w = hyperstream.workflow_manager.workflows[workflow_id]
    except KeyError:
        # percentile_results = []
        # w = create_workflow_summaries_to_csv(hyperstream,percentile_results=percentile_results,safe=False)
        w = create_workflow_summaries_to_csv(hyperstream, safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id)

    day_str = "2016_12_15_23_00"
    t1 = parse("2016-12-15T19:58:25Z")
    t2 = parse("2016-12-15T20:01:05Z")
    t1 = parse("2016-12-15T22:58:25Z")
    t2 = parse("2016-12-15T23:01:05Z")
    t1 = parse("2017-02-24T08:01:00Z")
    t2 = parse("2017-02-24T08:04:00Z")

    t_1_2 = TimeInterval(start=t1, end=t2)
    # w.factors[0].execute(t_1_2)
    w.execute(t_1_2)

    env_results = w.factors[0].tool.global_result_list

    csv_string = pd.DataFrame(env_results).to_csv(sep="\t", header=False)

    with open("mk/visualise_summaries/env_summaries_{}.csv".format(day_str),
              "w") as text_file:
        text_file.write(csv_string)

    # print(env_results)
    # print(percentile_results)


#    time_interval = TimeInterval.now_minus(minutes=1)
#    w.execute(time_interval)

    print('number of non_empty_streams: {}'.format(
        len(hyperstream.channel_manager.memory.non_empty_streams)))
コード例 #5
0
def main(dataset, classifier, epochs, seed, batchsize):
    hs = HyperStream(loglevel=30)
    print(hs)
    print([p.channel_id_prefix for p in hs.config.plugins])

    M = hs.channel_manager.memory

    data = getattr(datasets, 'load_{}'.format(dataset))()
    data_tool = hs.plugins.sklearn.tools.dataset(data,
                                                 shuffle=True,
                                                 epochs=epochs,
                                                 seed=seed)
    data_stream = M.get_or_create_stream('dataset')

    model = getattr(linear_model, classifier)()
    classifier_tool = hs.plugins.sklearn.tools.classifier(model)
    classifier_stream = M.get_or_create_stream('classifier')

    now = datetime.utcnow().replace(tzinfo=UTC)
    now = (now - timedelta(hours=1)).replace(tzinfo=UTC)
    before = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)
    ti = TimeInterval(before, now)

    data_tool.execute(sources=[], sink=data_stream, interval=ti)

    print("Example of a data stream")
    key, value = data_stream.window().iteritems().next()
    print('[%s]: %s' % (key, value))

    mini_batch_tool = hs.plugins.sklearn.tools.minibatch(batchsize=batchsize)
    mini_batch_stream = M.get_or_create_stream('mini_batch')
    mini_batch_tool.execute(sources=[data_stream],
                            sink=mini_batch_stream,
                            interval=ti)

    classifier_tool.execute(sources=[mini_batch_stream],
                            sink=classifier_stream,
                            interval=ti)

    scores = []
    for key, value in classifier_stream.window():
        scores.append(value['score'])

    # The data is repeated the number of epochs. This makes the mini-batches to
    # cycle and contain data from the begining and end of the dataset. This
    # makes possible that the number of scores is not divisible by epochs.
    if batchsize == 1:
        print("Test scores per epoch")
        scores = np.array(scores).reshape(epochs, -1)
        print(scores.mean(axis=1).round(decimals=2))
    else:
        scores = np.array(scores).reshape(1, -1)
        print("Test scores per minibatch (cyclic)")
        print(scores.round(decimals=2))
コード例 #6
0
def main(dataset, model, epochs, seed, batchsize):
    hs = HyperStream(loglevel=30)
    print(hs)
    print([p.channel_id_prefix for p in hs.config.plugins])

    M = hs.channel_manager.memory

    data = getattr(datasets, 'load_{}'.format(dataset))()
    data_tool = hs.plugins.sklearn.tools.dataset(data,
                                                 shuffle=True,
                                                 epochs=epochs,
                                                 seed=seed)
    data_stream = M.get_or_create_stream('dataset')

    anomaly_detector_tool = hs.plugins.sklearn.tools.anomaly_detector(model)
    anomaly_detector_stream = M.get_or_create_stream('anomaly_detector')

    now = datetime.utcnow()
    now = (now - timedelta(hours=1))
    before = datetime.utcfromtimestamp(0)
    ti = TimeInterval(before, now)

    data_tool.execute(sources=[], sink=data_stream, interval=ti)

    print("Example of a data stream")
    key, value = next(iter(data_stream.window()))
    print('[%s]: %s' % (key, value))

    mini_batch_tool = hs.plugins.sklearn.tools.minibatch(batchsize=batchsize)
    mini_batch_stream = M.get_or_create_stream('mini_batch')
    mini_batch_tool.execute(sources=[data_stream],
                            sink=mini_batch_stream,
                            interval=ti)

    anomaly_detector_tool.execute(sources=[mini_batch_stream],
                                  sink=anomaly_detector_stream,
                                  interval=ti)

    probas = []
    for key, value in anomaly_detector_stream.window():
        probas.append(value['proba'])

    # The data is repeated the number of epochs. This makes the mini-batches to
    # cycle and contain data from the beginning and end of the dataset. This
    # makes possible that the number of scores is not divisible by epochs.
    probas = np.array(probas)
    print(probas.shape)
    means = np.array([np.nanmean(aux) for aux in probas])
    print(means.shape)
    print("Test probabilities per minibatch (cyclic)")
    print(means.round(decimals=2))
コード例 #7
0
    def write_to_history(**kwargs):
        """
        Write to the history of executions of this tool

        :param kwargs: keyword arguments describing the executions
        :return: None
        """
        from hyperstream import HyperStream
        hs = HyperStream(loglevel=logging.CRITICAL,
                         file_logger=False,
                         console_logger=False,
                         mqtt_logger=None)
        if hs.current_session:
            hs.current_session.write_to_history(**kwargs)
def run(house, wearables, loglevel=logging.CRITICAL):
    from hyperstream import HyperStream, TimeInterval

    if not globs['hyperstream']:
        globs['hyperstream'] = HyperStream(loglevel=loglevel, file_logger=None)

    display_predictions(globs['hyperstream'],
                        TimeInterval.now_minus(minutes=1), house, wearables)
    print()

    from display_access_points import display_access_points

    display_access_points(house=house)
    print()
コード例 #9
0
def run(delete_existing_workflows=True, loglevel=logging.INFO):
    from hyperstream import HyperStream, TimeInterval
    from workflows.deploy_summariser import create_workflow_coord_plate_creation, create_workflow_summariser
    from sphere_connector_package.sphere_connector import SphereConnector

    hyperstream = HyperStream(loglevel=loglevel, file_logger=None)

    if not globs['sphere_connector']:
        globs['sphere_connector'] = SphereConnector(
            config_filename='config.json',
            include_mongo=True,
            include_redcap=False,
            sphere_logger=None)

    workflow_id = "coord3d_plate_creation"
    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id)
    try:
        w = hyperstream.workflow_manager.workflows[workflow_id]
    except KeyError:
        w = create_workflow_coord_plate_creation(hyperstream, safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id)

    time_interval = TimeInterval.now_minus(minutes=1)
    w.execute(time_interval)

    workflow_id = "periodic_summaries"
    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id)
    try:
        w = hyperstream.workflow_manager.workflows[workflow_id]
    except KeyError:

        w = create_workflow_summariser(hyperstream,
                                       env_window_size=1 * 60 * 60.0,
                                       rss_window_size=4 * 60 * 60.0,
                                       acc_window_size=4 * 60 * 60.0,
                                       vid_window_size=4 * 60 * 60.0,
                                       pred_window_size=4 * 60 * 60.0,
                                       safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id)

    time_interval = TimeInterval.now_minus(minutes=1)
    w.execute(time_interval)

    print('number of non_empty_streams: {}'.format(
        len(hyperstream.channel_manager.memory.non_empty_streams)))
コード例 #10
0
    def test_mqtt_logger_json(self):
        """
        Test the MQTT logger using the JSON format
        Note that mosquitto should be running first:
        $ docker run -ti -p 1883:1883 -p 9001:9001 toke/mosquitto
        or
        $ brew services start mosquitto

        """
        # assert (mosquitto_is_running())
        logging.raiseExceptions = True

        def handleError(self, record):
            raise

        mqtthandler.MQTTHandler.handleError = handleError

        # noinspection PyTypeChecker
        mqtt_logger = dict(host=mqtt_ip,
                           port=1883,
                           topic="topics/test",
                           loglevel=MON,
                           qos=1,
                           formatter=SenMLFormatter())

        hs = HyperStream(file_logger=False,
                         console_logger=False,
                         mqtt_logger=mqtt_logger)

        with MqttClient() as client:
            # client.client.publish("topics/test", "{} ABC".format(utcnow()))
            logging.monitor("1234567890", extra=dict(n="blah"))
            sleep(1)
            # print(client.last_messages["topics/test"])
            msg = json.loads(client.last_messages["topics/test"])
            assert (msg['e'][0]['n'] == 'blah')
            assert (msg['e'][0]['v'] == '1234567890')
            assert (msg['uid'] == 'hyperstream')

            logging.monitor("1234567890")
            sleep(1)
            # print(client.last_messages["topics/test"])
            msg = json.loads(client.last_messages["topics/test"])
            assert (msg['e'][0]['n'] == 'default')
            assert (msg['e'][0]['v'] == '1234567890')
            assert (msg['uid'] == 'hyperstream')
コード例 #11
0
def teardown():
    hs = HyperStream(file_logger=False, console_logger=False, mqtt_logger=None)
    for plate_id, tag in ((None, 'test_meta_data'), ('T1',
                                                     'test_plate_creation')):
        # delete_meta_data(hs, tag, items)
        delete_plate(hs, plate_id)

    tag = 'test_meta_data'
    values = map(str, range(4))
    delete_meta_data(hs, tag, values)

    # Note that this deletes the nested items as well
    # delete_meta_data(hs, 'test', items)
    # for data in items:
    #     delete_meta_data(hs, 'nested_test', items[data], 'test_' + data)
    delete_plate(hs, 'T.U')
    delete_plate(hs, 'T')
    delete_plate(hs, 'V')
    delete_plate(hs, "T1")  # note this now deletes meta data as well
    return hs
コード例 #12
0
from sphere_connector_package.sphere_connector import SphereConnector
from scripts.workflows.asset_splitter import split_sphere_assets

os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", ".."))

# Various constants
t1 = datetime(2016, 4, 28, 20, 0, 0, 0, UTC)
t2 = datetime(2016, 4, 29, 13, 0, 0, 0, UTC)
now = datetime(2016, 1, 1, 0, 0, 0)
hour = timedelta(hours=1)
minute = timedelta(minutes=1)
second = timedelta(seconds=1)
zero = timedelta(0)

# Hyperstream setup
hyperstream = HyperStream(file_logger=None)
sphere_connector = SphereConnector(include_mongo=True, include_redcap=False)

channels = hyperstream.channel_manager

# Various channel_manager
M = hyperstream.channel_manager.memory
S = hyperstream.channel_manager.sphere
T = hyperstream.channel_manager.tools
D = hyperstream.channel_manager.mongo
A = hyperstream.channel_manager.assets

# Some predefined tools
tools = PredefinedTools(hyperstream)

# Some useful Stream IDs
コード例 #13
0
def main(dataset, components, epochs, seed, batchsize):
    hs = HyperStream(loglevel=30)
    print(hs)
    print([p.channel_id_prefix for p in hs.config.plugins])

    M = hs.channel_manager.memory

    data = getattr(datasets, 'load_{}'.format(dataset))()
    data_tool = hs.plugins.sklearn.tools.dataset(data,
                                                 shuffle=True,
                                                 epochs=epochs,
                                                 seed=seed)
    data_stream = M.get_or_create_stream('dataset')

    print('components = {}'.format(components))
    model = MyIncrementalPCA(n_components=components)
    print(model.n_components)
    unsupervised_tool = hs.plugins.sklearn.tools.unsupervised(model)
    unsupervised_stream = M.get_or_create_stream('unsupervised')

    now = datetime.utcnow().replace(tzinfo=UTC)
    now = (now - timedelta(hours=1)).replace(tzinfo=UTC)
    before = datetime.utcfromtimestamp(0).replace(tzinfo=UTC)
    ti = TimeInterval(before, now)

    data_tool.execute(sources=[], sink=data_stream, interval=ti)

    print("Example of a data stream")
    key, value = data_stream.window().iteritems().next()
    print('[%s]: %s' % (key, value))

    mini_batch_tool = hs.plugins.sklearn.tools.minibatch(batchsize=batchsize)
    mini_batch_stream = M.get_or_create_stream('mini_batch')
    mini_batch_tool.execute(sources=[data_stream],
                            sink=mini_batch_stream,
                            interval=ti)

    unsupervised_tool.execute(sources=[mini_batch_stream],
                              sink=unsupervised_stream,
                              interval=ti)

    scores = []
    for key, value in unsupervised_stream.window():
        scores.append(value['score'])

    # The data is repeated the number of epochs. This makes the mini-batches to
    # cycle and contain data from the begining and end of the dataset. This
    # makes possible that the number of scores is not divisible by epochs.
    if batchsize == 1:
        print("Test scores per epoch")
        scores = np.array(scores).reshape(epochs, -1)
        print(scores.mean(axis=1).round(decimals=2))
    else:
        scores = np.array(scores).reshape(1, -1)
        print("Test scores per minibatch (cyclic)")
        print(scores.round(decimals=2))

    if dataset == 'digits' and components == 2:
        minmax = 15
        image = generate_hidden_images(model,
                                       digit_size=8,
                                       n=15,
                                       minmax=minmax)
        fig = plt.figure(figsize=(6, 6))
        ax = fig.add_subplot(111)
        ax.imshow(image,
                  extent=[-minmax, minmax, -minmax, minmax],
                  cmap='Greys')
        fig.savefig('pca_inverse_transform.svg')
コード例 #14
0
def run(house, loglevel=logging.INFO):
    from hyperstream import HyperStream
    from workflows.asset_splitter import split_sphere_assets

    hyperstream = HyperStream(loglevel=loglevel, file_logger=None)
    split_sphere_assets(hyperstream, house=house)
コード例 #15
0
def run(house,
        selection,
        delete_existing_workflows=True,
        loglevel=logging.INFO):
    from hyperstream import HyperStream, StreamId, TimeInterval
    from workflows.display_experiments import create_workflow_list_technicians_walkarounds
    from workflows.rssi_distributions_per_room import create_workflow_rssi_distributions_per_room

    hyperstream = HyperStream(loglevel=loglevel, file_logger=None)
    M = hyperstream.channel_manager.memory

    workflow_id0 = "list_technicians_walkarounds"

    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id0)

    try:
        w0 = hyperstream.workflow_manager.workflows[workflow_id0]
    except KeyError:
        w0 = create_workflow_list_technicians_walkarounds(hyperstream,
                                                          house=house,
                                                          safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id0)
    time_interval = TimeInterval.up_to_now()
    w0.execute(time_interval)

    df = M[StreamId('experiments_dataframe', (('house', house), ))].window(
        TimeInterval.up_to_now()).values()[0]
    experiment_indices = selection
    experiment_ids = set([df['experiment_id'][i - 1] for i in selection])

    hyperstream.plate_manager.delete_plate("H.SelectedLocalisationExperiment")
    hyperstream.plate_manager.create_plate(
        plate_id="H.SelectedLocalisationExperiment",
        description=
        "Localisation experiments selected by the technician in SPHERE house",
        meta_data_id="localisation-experiment",
        values=[],
        complement=True,
        parent_plate="H")

    experiment_ids_str = '_'.join(experiment_ids)
    # Create a simple one step workflow for querying
    workflow_id1 = "rssi_distributions_per_room_" + experiment_ids_str

    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id1)

    try:
        w1 = hyperstream.workflow_manager.workflows[workflow_id1]
    except KeyError:
        w1 = create_workflow_rssi_distributions_per_room(
            hyperstream,
            house=house,
            experiment_indices=experiment_indices,
            experiment_ids=experiment_ids,
            safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id1)

    time_interval = TimeInterval.up_to_now()
    w1.execute(time_interval)

    df = M[StreamId('dataframe_' + experiment_ids_str,
                    (('house', house), ))].window(
                        TimeInterval.up_to_now()).values()[0]
    df.to_csv(
        os.path.join(hyperstream.config.output_path,
                     'dataframe_{}.csv'.format(experiment_ids_str)))

    print('number of non_empty_streams: {}'.format(
        len(hyperstream.channel_manager.memory.non_empty_streams)))
コード例 #16
0
def run(house, sync_approx_time, delete_existing_workflows=True, loglevel=logging.INFO):
    from hyperstream import HyperStream, TimeInterval
    from hyperstream.utils import duration2str
    from workflows.display_wearable_sync_events import create_workflow_list_wearable_sync_events
    from workflows.asset_splitter import split_sphere_assets
    from dateutil.parser import parse
    from datetime import timedelta

    hyperstream = HyperStream(loglevel=loglevel, file_logger=None)

    # Various channels
    S = hyperstream.channel_manager.sphere
    D = hyperstream.channel_manager.mongo
    X = hyperstream.channel_manager.summary
    M = hyperstream.channel_manager.memory
    A = hyperstream.channel_manager.assets

    # if delete_existing_workflows:
    #     hyperstream.workflow_manager.delete_workflow("asset_splitter")

    # split_sphere_assets(hyperstream, house)

    hyperstream.plate_manager.delete_plate("H")
    hyperstream.plate_manager.create_plate(
        plate_id="H",
        description="All houses",
        meta_data_id="house",
        values=[],
        complement=True,
        parent_plate=None
    )

    workflow_id = "list_wearable_sync_events"

    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id)

    try:
        w = hyperstream.workflow_manager.workflows[workflow_id]
    except KeyError:
        w = create_workflow_list_wearable_sync_events(hyperstream, house, safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id)
    time_interval = TimeInterval.now_minus(minutes=1)
    time_interval = TimeInterval(
        parse("2017-04-29 19:04:55.000Z"),
        parse("2017-04-29 19:05:10.000Z"))
    time_interval = TimeInterval( # DS350055.DS2 recording started 16:26:11, sync in 2s
        parse("2017-04-29 15:26:00.000Z"),
        parse("2017-04-29 15:27:00.000Z"))
    # A 2.1 2017-04-29 15:26:41.091000 --- +28s
    # A 2.2 2017-04-29 15:26:41.251000
    # A 2.5 2017-04-29 15:26:41.601000
    # A 2.7 2017-04-29 15:26:41.761000
    # A 2.0 2017-04-29 15:26:42.041000
    # A 2.8 2017-04-29 15:26:42.631000
    # A 2.0 2017-04-29 15:26:43.049001
    # A 3.8 2017-04-29 15:26:43.209000
    # A 2.9 2017-04-29 15:26:43.289000
    time_interval = TimeInterval( # DS350055.DS2 recording ended 16:34:09, sync in -5s
        parse("2017-04-29 15:34:25.000Z"),
        parse("2017-04-29 15:34:45.000Z"))
    # too gentle taps
    time_interval = TimeInterval(  # DS350054.DS2 recording started 11:55:47, sync in 2s
        parse("2017-04-29 10:56:00.000Z"),
        parse("2017-04-29 10:56:30.000Z"))
    # A 2.1 2017-04-29 10:55:24.084000 --- -25s --- WRONG, should be ~ +25s
    # A 2.8 2017-04-29 10:55:24.244000
    # A 3.1 2017-04-29 10:55:24.514000
    # A 3.1 2017-04-29 10:55:24.654000
    # A 3.2 2017-04-29 10:55:25.044000
    # A 3.5 2017-04-29 10:55:25.174000
    # A 3.4 2017-04-29 10:55:25.524000
    # A 3.9 2017-04-29 10:55:25.604000
    # A 3.3 2017-04-29 10:55:25.684000
    # A 3.8 2017-04-29 10:55:25.964001
    # A 2.3 2017-04-29 10:55:26.124001
    # A 2.8 2017-04-29 10:55:26.294000
    # A 2.0 2017-04-29 10:55:26.374000
    # time_interval = TimeInterval(  # DS350054.DS2 recording ended 12:11:39, sync by wear D put hanging ~ -8s..-2s
    #     parse("2017-04-29 11:10:40.000Z"),
    #     parse("2017-04-29 11:12:00.000Z"))
    # C put hanging ~ 2017-04-29 11:11:41
    # D put hanging ~ 2017-04-29 11:11:52 --- +25s..+15s
    time_interval = TimeInterval(  # DS350054.DS2 recording started 11:55:47, sync in 2s
        parse("2017-04-30 09:38:00.000Z"),
        parse("2017-04-30 09:39:40.000Z"))

    time_centre = parse(sync_approx_time)
    time_interval = TimeInterval(  # DS350054.DS2 recording started 11:55:47, sync in 2s
        time_centre - timedelta(seconds=40),
        time_centre + timedelta(seconds=40))


    w.execute(time_interval)

    return True
    
    print('number of sphere non_empty_streams: {}'.format(len(S.non_empty_streams)))
    print('number of memory non_empty_streams: {}'.format(len(M.non_empty_streams)))
    
    # df = M.find_stream(name='experiments_dataframe', house=house).window().values()[0]

    # if len(df) > 0:
    if False:
        # arrow.get(x).humanize()
        # df['start'] = df['start'].map('{:%Y-%m-%d %H:%M:%S}'.format)
        df['duration'] = df['end'] - df['start']
        df['start'] = map(lambda x: '{:%Y-%m-%d %H:%M:%S}'.format(x), df['start'])
        df['end'] = map(lambda x: '{:%Y-%m-%d %H:%M:%S}'.format(x), df['end'])
        # df['duration'] = map(lambda x:'{:%Mmin %Ssec}'.format(x),df['duration'])

        df['start_as_text'] = map(lambda x: arrow.get(x).humanize(), df['start'])
        df['duration_as_text'] = map(lambda x: duration2str(x), df['duration'])

        pd.set_option('display.width', 1000)
        print(df[['id', 'start_as_text', 'duration_as_text', 'start', 'end', 'annotator']].to_string(index=False))
        return True
    else:
        print("DataFrame is empty")
        return False
コード例 #17
0
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# 
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# 
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.

import logging
import sys
import os

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))


if __name__ == '__main__':
    from hyperstream import HyperStream
    from hyperstream.online_engine import OnlineEngine

    logging.info("Simple demonstration of the online engine")

    hyperstream = HyperStream(loglevel=logging.INFO)
    online_engine = OnlineEngine(hyperstream)
    online_engine.execute()
コード例 #18
0
 def setUp(self):
     self.hs = HyperStream(loglevel=logging.INFO,
                           file_logger=False,
                           console_logger=False)
コード例 #19
0
ファイル: main.py プロジェクト: vishalbelsare/HyperStream
#  OR OTHER DEALINGS IN THE SOFTWARE.

import logging
import sys
import os

sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
path = os.path.abspath(
    os.path.join(os.path.dirname(__file__), 'hyperstream_core'))
sys.path.append(path)

if __name__ == '__main__':
    from hyperstream import HyperStream
    from hyperstream.online_engine import OnlineEngine
    from hyperstream.utils.hyperstream_logger import MON, SenMLFormatter

    # noinspection PyTypeChecker
    mqtt_logger = dict(host='127.0.0.1',
                       port=1883,
                       loglevel=MON,
                       topic='SPHERE/MON/SHG/ML',
                       qos=1,
                       formatter=SenMLFormatter())

    hyperstream = HyperStream(loglevel=logging.INFO,
                              file_logger=False,
                              console_logger=True,
                              mqtt_logger=mqtt_logger)
    online_engine = OnlineEngine(hyperstream)
    online_engine.execute()
コード例 #20
0
def resource_manager():
    yield HyperStream(loglevel=logging.CRITICAL)
コード例 #21
0
def run(house, delete_existing_workflows=True, loglevel=logging.INFO):
    from hyperstream import HyperStream, TimeInterval
    from hyperstream.utils import duration2str
    from workflows.display_experiments import create_workflow_list_technicians_walkarounds
    from workflows.asset_splitter import split_sphere_assets

    hyperstream = HyperStream(loglevel=loglevel, file_logger=None)

    # Various channels
    M = hyperstream.channel_manager.memory
    S = hyperstream.channel_manager.sphere

    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow("asset_splitter")

    split_sphere_assets(hyperstream, house)

    hyperstream.plate_manager.delete_plate("H")
    hyperstream.plate_manager.create_plate(plate_id="H",
                                           description="All houses",
                                           meta_data_id="house",
                                           values=[],
                                           complement=True,
                                           parent_plate=None)

    workflow_id = "list_technicians_walkarounds"

    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id)

    try:
        w = hyperstream.workflow_manager.workflows[workflow_id]
    except KeyError:
        w = create_workflow_list_technicians_walkarounds(hyperstream,
                                                         house,
                                                         safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id)
    time_interval = TimeInterval.up_to_now()
    w.execute(time_interval)

    print('number of sphere non_empty_streams: {}'.format(
        len(S.non_empty_streams)))
    print('number of memory non_empty_streams: {}'.format(
        len(M.non_empty_streams)))

    df = M.find_stream(name='experiments_dataframe',
                       house=house).window().values()[0]

    if len(df) > 0:
        # TODO: the following line gave a timezone error in some circumstances (when only 1 experiment?)
        df['duration'] = df['end'] - df['start']
        # TODO: the following 3 lines worked in that cas but give an error in some cases (when more than 1 experiment?)
        # df['start_utc'] = pd.Series(pd.DatetimeIndex(df['start']).tz_convert('UTC'))
        # df['end_utc'] = pd.Series(pd.DatetimeIndex(df['end']).tz_convert('UTC'))
        # df['duration'] = df['end_utc'] - df['start_utc']
        df['start'] = map(lambda x: '{:%Y-%m-%d %H:%M:%S}'.format(x),
                          df['start'])
        df['end'] = map(lambda x: '{:%Y-%m-%d %H:%M:%S}'.format(x), df['end'])
        # df['duration'] = map(lambda x:'{:%Mmin %Ssec}'.format(x),df['duration'])

        df['start_as_text'] = map(lambda x: arrow.get(x).humanize(),
                                  df['start'])
        df['duration_as_text'] = map(lambda x: duration2str(x), df['duration'])

        pd.set_option('display.width', 1000)
        print(df[[
            'id', 'start_as_text', 'duration_as_text', 'start', 'end',
            'annotator'
        ]].to_string(index=False))
        return True
    else:
        print("DataFrame is empty")
        return False
コード例 #22
0
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.

from hyperstream import HyperStream, StreamId, TimeInterval
from hyperstream.utils import utcnow, UTC
from datetime import timedelta
import os

os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), ".."))

if __name__ == '__main__':
    hs = HyperStream(loglevel=20)
    M = hs.channel_manager.memory
    T = hs.channel_manager.tools
    clock = StreamId(name="clock")
    clock_tool = T[clock].window().last().value()
    ticker = M.get_or_create_stream(stream_id=StreamId(name="ticker"))
    now = utcnow()
    before = (now - timedelta(seconds=30)).replace(tzinfo=UTC)
    ti = TimeInterval(before, now)
    clock_tool.execute(sources=[],
                       sink=ticker,
                       interval=ti,
                       alignment_stream=None)
    print(list(ticker.window().tail(5)))
コード例 #23
0
def run(house,
        selection,
        delete_existing_workflows=True,
        loglevel=logging.INFO):
    from hyperstream import HyperStream, StreamId, TimeInterval
    from workflows.display_experiments import create_workflow_list_technicians_walkarounds
    from workflows.learn_localisation_model import create_workflow_lda_localisation_model_learner
    from hyperstream.utils import StreamNotFoundError, reconstruct_interval

    hyperstream = HyperStream(loglevel=loglevel, file_logger=None)
    M = hyperstream.channel_manager.memory
    D = hyperstream.channel_manager.mongo
    A = hyperstream.channel_manager.assets

    workflow_id0 = "list_technicians_walkarounds"

    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id0)

    try:
        w0 = hyperstream.workflow_manager.workflows[workflow_id0]
    except KeyError:
        w0 = create_workflow_list_technicians_walkarounds(hyperstream,
                                                          house=house,
                                                          safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id0)
    time_interval = TimeInterval.up_to_now()
    w0.execute(time_interval)

    # from datetime import timedelta
    # time_interval.end += timedelta(milliseconds=1)
    df = M[StreamId('experiments_dataframe',
                    (('house', house), ))].window().values()[0]
    experiment_ids = set([df['experiment_id'][i - 1] for i in selection])

    experiment_ids_str = '_'.join(experiment_ids)

    create_selected_localisation_plates(hyperstream)

    # Ensure the model is overwritten if it's already there
    for model_name in ('lda', 'svm', 'room_rssi_hmm'):
        model_id = StreamId(name="location_prediction",
                            meta_data=(('house', house), ('localisation_model',
                                                          model_name)))
        try:
            hyperstream.channel_manager.mongo.purge_stream(model_id)
        except StreamNotFoundError:
            pass

    workflow_id1 = "lda_localisation_model_learner_" + experiment_ids_str

    if delete_existing_workflows:
        hyperstream.workflow_manager.delete_workflow(workflow_id1)

    try:
        w1 = hyperstream.workflow_manager.workflows[workflow_id1]
    except KeyError:
        w1 = create_workflow_lda_localisation_model_learner(
            hyperstream,
            house=house,
            experiment_ids=experiment_ids,
            safe=False)
        hyperstream.workflow_manager.commit_workflow(workflow_id1)

    # Put the experiments selected into an asset stream
    from hyperstream import StreamInstance
    from hyperstream.utils import utcnow

    A.write_to_stream(stream_id=StreamId(name="experiments_selected",
                                         meta_data=(('house', house), )),
                      data=StreamInstance(timestamp=utcnow(),
                                          value=list(experiment_ids)))

    time_interval = TimeInterval.up_to_now()
    w1.execute(time_interval)

    print('number of non_empty_streams: {}'.format(
        len(hyperstream.channel_manager.memory.non_empty_streams)))

    for model_name in ('lda', 'svm', 'room_rssi_hmm'):
        print("Model: {}".format(model_name))
        model_id = StreamId(name="location_prediction",
                            meta_data=(('house', house), ('localisation_model',
                                                          model_name)))
        try:
            model = D[model_id].window().last().value
        except (AttributeError, KeyError):
            print(
                "No {} model was learnt - not requested or no data recorded?".
                format(model_name))

        for experiment_id in list(experiment_ids):
            print("Experiment id: {}".format(experiment_id))
            print("Time interval: {}".format(
                reconstruct_interval(experiment_id)))
            print("Accuracy: {}".format(
                pformat(model['performance'][experiment_id]['accuracy'])))
            print("Macro F1: {}".format(
                pformat(
                    model['performance'][experiment_id]['f1_score_macro'])))
            print("Micro F1: {}".format(
                pformat(
                    model['performance'][experiment_id]['f1_score_micro'])))
            print("Confusion Matrix:")
            pprint(model['performance'][experiment_id]['confusion_matrix'])
            print("")
    return True
コード例 #24
0
ファイル: test.py プロジェクト: yuefengli2016/MSC_PROJECT
import sys
import numpy as np
stdout = sys.stdout
from hyperstream import HyperStream, TimeInterval, UTC, Tool, StreamInstance
from datetime import datetime, timedelta
sys.stdout = stdout

hs = HyperStream()
print(hs)

averageperceptron = hs.plugins.online_learning.tools.averageperceptron()
#ogdtypeone = hs.plugins.online_learning.tools.ogdtypeone()
#passiveaggressiveone = hs.plugins.online_learning.tools.passiveaggressiveone()
#passiveaggressivetwo = hs.plugins.online_learning.tools.passiveaggressivetwo()
#passiveaggressivethree = hs.plugins.online_learning.tools.passiveaggressivethree()
#perceptron = hs.plugins.online_learning.tools.perceptron()
reader_tool = hs.plugins.online_learning.tools.csv_reader(
    'plugins/online_learning/data/wdbc_data.csv')
#clock = hs.tools.clock()
#rng = hs.plugins.data_generators.tools.random(seed=1234)

wdbc_data_stream = hs.channel_manager.memory.get_or_create_stream("wdbc_data")
output = hs.channel_manager.memory.get_or_create_stream("output")

ti = TimeInterval(
    datetime(1960, 1, 1).replace(tzinfo=UTC),
    datetime(2007, 5, 1).replace(tzinfo=UTC))
reader_tool.execute(sources=[], sink=wdbc_data_stream, interval=ti)
wdbc_data_stream.calculated_intervals

ti = TimeInterval(
コード例 #25
0
# OR OTHER DEALINGS IN THE SOFTWARE.
from flask import Flask, render_template, request, jsonify, url_for
from flask_bower import Bower
import simplejson as json
import os
import logging


from view_helpers import ListConverter, DictConverter, exception_json, \
    ParameterListConverter, DatetimeConverter, ENDPOINTS, KNOWN_TYPES, Helpers, Filters

from hyperstream import HyperStream, Tool, TimeInterval
from hyperstream.utils import MultipleStreamsFoundError, StreamNotFoundError, StreamNotAvailableError, \
    ToolInitialisationError, ChannelNotFoundError

hs = HyperStream(loglevel=logging.INFO,
                 file_logger={'path': '/tmp/HyperStreamViewer'})
app = Flask(__name__)
Bower(app)
app.url_map.converters['list'] = ListConverter
app.url_map.converters['params_list'] = ParameterListConverter
app.url_map.converters['dict'] = DictConverter
app.url_map.converters['datetime'] = DatetimeConverter
app.jinja_env.add_extension('jinja2.ext.do')
app.jinja_env.filters['treelib_to_treeview'] = Filters.treelib_to_treeview
app.jinja_env.filters['custom_sort'] = Filters.custom_sort
app.jinja_env.filters['custom_format'] = Filters.custom_format
app.jinja_env.filters['u'] = Filters.stream_id_to_url
app.jinja_env.auto_reload = True
app.config['TEMPLATES_AUTO_RELOAD'] = True

コード例 #26
0
import sys
sys.path.append("../")  # Add parent dir in the Path

from hyperstream import HyperStream, Workflow
from hyperstream import TimeInterval
from hyperstream.utils import UTC
import hyperstream

from datetime import datetime
from utils import plot_high_chart
from utils import plot_multiple_stock
from dateutil.parser import parse

hs = HyperStream(loglevel=30)
print(hs)
print([p.channel_id_prefix for p in hs.config.plugins])


def dateparser(dt):
    return parse(dt.replace('M', '-')).replace(tzinfo=UTC)


ti_all = TimeInterval(
    datetime(1999, 1, 1).replace(tzinfo=UTC),
    datetime(2013, 1, 1).replace(tzinfo=UTC))
ti_sample = TimeInterval(
    datetime(2007, 1, 1).replace(tzinfo=UTC),
    datetime(2007, 3, 1).replace(tzinfo=UTC))

# M will be the Memory Channel
M = hs.channel_manager.memory