Exemple #1
0
def test_scan_id_lookup():
    for i in range(5):
        insert_run_start(time=float(i), scan_id=i + 1,
                         owner='docbrown', beamline_id='example',
                         beamline_config=insert_beamline_config({}, time=0.))
    for i in range(5):
        insert_run_start(time=float(i), scan_id=i + 1,
                         owner='nedbrainard', beamline_id='example',
                         beamline_config=insert_beamline_config({}, time=0.))
    header = db[3]
    scan_id = header.scan_id
    owner = header.owner
    assert_equal(scan_id, 3)
    # This should be the most *recent* Scan 3. There is ambiguity.
    assert_equal(owner, 'nedbrainard')
Exemple #2
0
def test_event_queue():
    scan_id = np.random.randint(1e12)  # unique enough for government work
    rs = insert_run_start(time=0., scan_id=scan_id,
                          owner='queue-tester', beamline_id='example',
                          beamline_config=insert_beamline_config({}, time=0.))
    header = db.find_headers(scan_id=scan_id)
    queue = EventQueue(header)
    # Queue should be empty until we create Events.
    empty_bundle = queue.get()
    assert_equal(len(empty_bundle), 0)
    queue.update()
    empty_bundle = queue.get()
    assert_equal(len(empty_bundle), 0)
    events = temperature_ramp.run(rs)
    # This should add a bundle of Events to the queue.
    queue.update()
    first_bundle = queue.get()
    assert_equal(len(first_bundle), len(events))
    more_events = temperature_ramp.run(rs)
    # Queue should be empty until we update.
    empty_bundle = queue.get()
    assert_equal(len(empty_bundle), 0)
    queue.update()
    second_bundle = queue.get()
    assert_equal(len(second_bundle), len(more_events))
    # Add Events from a different example into the same Header.
    other_events = image_and_scalar.run(rs)
    queue.update()
    third_bundle = queue.get()
    assert_equal(len(third_bundle), len(other_events))
Exemple #3
0
def test_replay_plotx_ploty():
    # insert a run header with one plotx and one ploty
    rs = mdsapi.insert_run_start(
        time=ttime.time(), beamline_id='replay testing', scan_id=1,
        custom={'plotx': 'Tsam', 'ploty': ['point_det']},
        beamline_config=mdsapi.insert_beamline_config({}, ttime.time()))
    temperature_ramp.run(rs)
    # plotting replay in live mode with plotx and ploty should have the
    # following state after a few seconds of execution:
    # replay.
    app = QtApplication()
    ui = replay.create(replay.define_live_params())
    ui.title = 'testing replay with plotx and one value of ploty'
    ui.show()
    app.timed_call(4000, app.stop)
    app.start()
    try:
        # the x axis should be 'plotx'
        assert ui.scalar_collection.x == 'Tsam'
        # there should only be 1 scalar model currently plotting
        assert len([scalar_model for scalar_model
                    in ui.scalar_collection.scalar_models.values()
                    if scalar_model.is_plotting]) == 1
        # the x axis should not be the index
        assert not ui.scalar_collection.x_is_index
    except AssertionError:
        # gotta destroy the app or it will cause cascading errors
        ui.close()
        app.destroy()
        raise

    ui.close()
    app.destroy()
Exemple #4
0
def setup():
    fs_setup()
    mds_setup()

    blc_uid = insert_beamline_config({}, ttime.time())
    rs_uid = insert_run_start(time=0.0, scan_id=1, owner='test',
                              beamline_id='test', beamline_config=blc_uid)
    temperature_ramp.run(run_start_uid=rs_uid)
Exemple #5
0
def test_uid_lookup():
    uid = str(uuid.uuid4())
    uid2 = uid[0] + str(uuid.uuid4())[1:]  # same first character as uid
    rs1 = insert_run_start(time=100., scan_id=1, uid=uid,
                           owner='drstrangelove', beamline_id='example',
                           beamline_config=insert_beamline_config({}, time=0.))
    rs2 = insert_run_start(time=100., scan_id=1, uid=uid2,
                           owner='drstrangelove', beamline_id='example',
                           beamline_config=insert_beamline_config({}, time=0.))
    # using full uid
    actual_uid = db[uid].run_start_uid
    assert_equal(actual_uid, uid)

    # using first 6 chars
    actual_uid = db[uid[:6]].run_start_uid
    assert_equal(actual_uid, uid)

    # using first char (will error)
    f = lambda: db[uid[0]]
    assert_raises(ValueError, f)
Exemple #6
0
def test_data_key():
    rs1_uid = insert_run_start(time=100., scan_id=1,
                               owner='nedbrainard', beamline_id='example',
                               beamline_config=insert_beamline_config(
                                   {}, time=0.))
    rs2_uid = insert_run_start(time=200., scan_id=2,
                               owner='nedbrainard', beamline_id='example',
                               beamline_config=insert_beamline_config(
                                   {}, time=0.))
    rs1, = find_run_starts(uid=rs1_uid)
    rs2, = find_run_starts(uid=rs2_uid)
    data_keys = {'fork': {'source': '_', 'dtype': 'number'},
                 'spoon': {'source': '_', 'dtype': 'number'}}
    evd1_uid = insert_event_descriptor(run_start=rs1_uid, data_keys=data_keys,
                                       time=100.)
    insert_event_descriptor(run_start=rs2_uid, data_keys=data_keys, time=200.)
    result1 = db.find_headers(data_key='fork')
    result2 = db.find_headers(data_key='fork', start_time=150)
    assert_equal(len(result1), 2)
    assert_equal(len(result2), 1)
    actual = result2[0].run_start_uid
    assert_equal(actual, str(rs2.uid))
Exemple #7
0
def test_bad_header():
    # Exercise the code path that results in a 'badly formatted header'
    # in this case it works by inserting three run stops
    # one comes from the temperature_ramp.run() command
    # then two more come from stop1 and stop2
    start = insert_run_start(time=ttime.time(), scan_id=8985, owner='docbrown',
                          beamline_id='example',
                          beamline_config=insert_beamline_config({}, time=0.))
    ev = temperature_ramp.run(start)
    stop1 = insert_run_stop(start, time=ttime.time())
    stop2 = insert_run_stop(start, time=ttime.time())

    hdr = db[-1]
Exemple #8
0
def setup():
    global conn
    db_disconnect()
    conn = db_connect(db_name, 'localhost', 27017)
    blc = insert_beamline_config({}, ttime.time())

    switch(channelarchiver=False)
    start, end = '2015-01-01 00:00:00', '2015-01-01 00:01:00'
    simulated_ca_data = generate_ca_data(['ch1', 'ch2'], start, end)
    ca.insert_data(simulated_ca_data)

    for i in range(5):
        insert_run_start(time=float(i),
                         scan_id=i + 1,
                         owner='docbrown',
                         beamline_id='example',
                         beamline_config=insert_beamline_config({}, time=0.))
    for i in range(5):
        insert_run_start(time=float(i),
                         scan_id=i + 1,
                         owner='nedbrainard',
                         beamline_id='example',
                         beamline_config=insert_beamline_config({}, time=0.))
Exemple #9
0
 def mock_run_start(run_start_uid=None, sleep=0, make_run_stop=True):
     if run_start_uid is None:
         blc_uid = insert_beamline_config({}, time=get_time())
         run_start_uid = insert_run_start(time=get_time(), scan_id=1,
                                          beamline_id='example',
                                          uid=str(uuid.uuid4()),
                                          beamline_config=blc_uid)
     # these events are already the sanitized version, not raw mongo objects
     events = func(run_start_uid, sleep)
     # Infer the end run time from events, since all the times are
     # simulated and not necessarily based on the current time.
     time = max([event['time'] for event in events])
     if make_run_stop:
         run_stop_uid = insert_run_stop(run_start_uid, time=get_time(),
                                        exit_status='success')
         run_stop, = find_run_stops(uid=run_stop_uid)
     return events
Exemple #10
0
def hdf_data_io():
    """
    Save data to db and run test when data is retrieved.
    """
    blc = insert_beamline_config({'cfg1': 1}, 0.0)
    run_start_uid = insert_run_start(time=0., scan_id=1, beamline_id='csx',
                                     uid=str(uuid.uuid4()),
                                     beamline_config=blc)

    # data keys entry
    data_keys = {'x_pos': dict(source='MCA:pos_x', dtype='number'),
                 'y_pos': dict(source='MCA:pos_y', dtype='number'),
                 'xrf_spectrum': dict(source='MCA:spectrum', dtype='array',
                                      #shape=(5,),
                                      external='FILESTORE:')}

    # save the event descriptor
    descriptor_uid = insert_event_descriptor(
        run_start=run_start_uid, data_keys=data_keys, time=0.,
        uid=str(uuid.uuid4()))

    # number of positions to record, basically along a horizontal line
    num = 5

    events = []
    for i in range(num):
        v_pos = 0
        h_pos = i

        spectrum_uid = get_data(v_pos, h_pos)

        # Put in actual ndarray data, as broker would do.
        data1 = {'xrf_spectrum': spectrum_uid,
                 'v_pos': v_pos,
                 'h_pos': h_pos}
        timestamps1 = {k: noisy(i) for k in data1}

        event_uid = insert_event(descriptor=descriptor_uid, seq_num=i,
                                 time=noisy(i), data=data1,
                                 uid=str(uuid.uuid4()),
                                 timestamps=timestamps1)
        event, = find_events(uid=event_uid)
        # test on retrieve data for all data sets
        events.append(event)
    return events
Exemple #11
0
def test_basic_usage():
    for i in range(5):
        insert_run_start(time=float(i), scan_id=i + 1,
                         owner='nedbrainard', beamline_id='example',
                         beamline_config=insert_beamline_config({}, time=0.))
    header_1 = db[-1]
    # Exercise reprs.
    header_1._repr_html_()
    repr(header_1)
    str(header_1)
    headers = db[-3:]
    headers._repr_html_()
    repr(headers)
    str(headers)

    header_ned = db.find_headers(owner='nedbrainard')
    header_null = db.find_headers(owner='this owner does not exist')
    events_1 = db.fetch_events(header_1)
    events_ned = db.fetch_events(header_ned)
    events_null = db.fetch_events(header_null)
Exemple #12
0
 def setup(self, n):
     self.obj = range(n)
     self.bcfg = mdsc.insert_beamline_config(time=1315315135.5135,
                                             config_params={'param1': 1})
     self.data_keys = {'linear_motor': {'source': 'PV:pv1',
                                        'shape': None,
                                        'dtype': 'number'},
                       'scalar_detector': {'source': 'PV:pv2',
                                           'shape': None,
                                           'dtype': 'number'},
                       'Tsam': {'source': 'PV:pv3',
                                'dtype': 'number',
                                'shape': None}}
     self.custom = {'custom_key': 'value'}
     self.scan_id = 1903
     self.run_start = mdsc.insert_run_start(scan_id=int(self.scan_id),
                                            owner='benchmark_script',
                                            beamline_id='benchmark_b',
                                            time=1315315135.5135,
                                            beamline_config=self.bcfg,
                                            custom=self.custom)
     self.e_desc = mdsc.insert_event_descriptor(data_keys=self.data_keys,
                                                time=1315315135.5135,
                                                run_start=self.run_start)
     # Compose event data list for 1mil events in setup.
     # See params in event insert test to see how many of these are used
     func = np.cos
     num = EVENT_COUNT
     start = 0
     stop = 10
     sleep_time = .1
     self.data = list()
     for idx, i in enumerate(np.linspace(start, stop, num)):
         self.data.append({'linear_motor': [i, 1315315135.5135],
                           'Tsam': [i + 5, 1315315135.5135],
                           'scalar_detector': [func(i) +
                                               np.random.randn() / 100,
                                               1315315135.5135]})
Exemple #13
0
def setup():
    mds_setup()
    fs_setup()
    blc = insert_beamline_config({}, ttime.time())

    switch(channelarchiver=False)
    start, end = '2015-01-01 00:00:00', '2015-01-01 00:01:00'
    simulated_ca_data = generate_ca_data(['ch1', 'ch2'], start, end)
    ca.insert_data(simulated_ca_data)

    owners = ['docbrown', 'nedbrainard']
    num_entries = 5
    for owner in owners:
        for i in range(num_entries):
            logger.debug('{}: {} of {}'.format(owner, i+1, num_entries))
            rs = insert_run_start(time=ttime.time(), scan_id=i + 1,
                                  owner=owner, beamline_id='example',
                                  beamline_config=blc)
            # insert some events into mds
            temperature_ramp.run(run_start_uid=rs, make_run_stop=(i!=0))
            if i == 0:
                # only need to do images once, it takes a while...
                image_and_scalar.run(run_start_uid=rs, make_run_stop=True)
conf.mds_config["database"] = "datastore2"

import mongoengine

connect(db="datastore2", host="xf23id-broker", port=27017)
assert mongoengine.connection.get_db().name == "datastore2"

client = MongoClient(host="xf23id-broker", port=27017)
db = client.toBemigrated1
beamline_cfg_mapping = dict()


beamline_configs = db.beamline_config.find()
for bc in beamline_configs:
    bcfg_id = bc["_id"]
    the_bc = insert_beamline_config(config_params=bc["config_params"], time=bc["time"])
    beamline_cfg_mapping[bc["_id"]] = the_bc


begin_runs = db.begin_run_event.find()
for br in begin_runs:
    the_run_start = insert_run_start(
        time=br["time"],
        beamline_id=br["beamline_id"],
        beamline_config=the_bc,
        owner=br["owner"],
        scan_id=br["scan_id"],
        custom=br.get("custom", {}),
        uid=br["uid"],
    )
    event_descs = db.event_descriptor.find({"begin_run_id": br["_id"]})
Exemple #15
0
def _make_blc():
    return mds.insert_beamline_config({}, time=ttime.time())
Exemple #16
0
def _make_blc():
    return mds.insert_beamline_config({}, time=ttime.time())
from __future__ import print_function

from metadatastore.api import (
    insert_run_start,
    insert_beamline_config,
    insert_event,
    insert_event_descriptor,
    insert_run_stop,
)
from metadatastore.api import find_last, find_events
import time
import numpy as np

b_config = insert_beamline_config(config_params={"my_beamline": "my_value"}, time=time.time())

data_keys = {
    "linear_motor": {"source": "PV:pv1", "shape": None, "dtype": "number"},
    "scalar_detector": {"source": "PV:pv2", "shape": None, "dtype": "number"},
    "Tsam": {"source": "PV:pv3", "dtype": "number", "shape": None},
}

try:
    last_hdr = next(find_last())
    scan_id = int(last_hdr.scan_id) + 1
except (IndexError, TypeError):
    scan_id = 1

custom = {"plotx": "linear_motor", "ploty": "scalar_detector"}
# Create a BeginRunEvent that serves as entry point for a run
rs = insert_run_start(scan_id=scan_id, beamline_id="csx", time=time.time(), beamline_config=b_config, custom=custom)
Exemple #18
0
    # Temperature Events
    for i, (time, temp) in enumerate(zip(*deadbanded_ramp)):
        time = float(time) + base_time
        data = {'Tsam': temp, 'Troom': temp + 10}
        timestamps = {'Tsam': time, 'Troom': time}
        event_uid = insert_event(descriptor=ev_desc2_uid,
                                 time=time,
                                 data=data,
                                 seq_num=i,
                                 uid=str(uuid.uuid4()),
                                 timestamps=timestamps)
        event, = find_events(uid=event_uid)
        events.append(event)
    return events


if __name__ == '__main__':
    import metadatastore.api as mdsc
    blc_uid = mdsc.insert_beamline_config({}, time=0.)
    run_start_uid = mdsc.insert_run_start(scan_id=2032013,
                                          beamline_id='testbed',
                                          beamline_config=blc_uid,
                                          owner='tester',
                                          group='awesome-devs',
                                          project='Nikea',
                                          time=0.)

    print('beamline_config_uid = %s' % blc_uid)
    print('run_start_uid = %s' % run_start_uid)
    run(run_start_uid)
Exemple #19
0
def test_indexing():
    for i in range(5):
        insert_run_start(time=float(i), scan_id=i + 1,
                         owner='nedbrainard', beamline_id='example',
                         beamline_config=insert_beamline_config({}, time=0.))
    header = db[-1]
    is_list = isinstance(header, list)
    assert_false(is_list)
    scan_id = header.scan_id
    assert_equal(scan_id, 5)

    header = db[-2]
    is_list = isinstance(header, list)
    assert_false(is_list)
    scan_id = header.scan_id
    assert_equal(scan_id, 4)

    f = lambda: db[-100000]
    assert_raises(IndexError, f)

    headers = db[-5:]
    is_list = isinstance(headers, list)
    assert_true(is_list)
    num = len(headers)
    assert_equal(num, 5)

    header = db[-6:]
    assert_true(is_list)
    num = len(headers)
    assert_equal(num, 5)

    headers = db[-1:]
    assert_true(is_list)
    num = len(headers)
    assert_equal(num, 1)
    header, = headers
    scan_id = header.scan_id
    assert_equal(scan_id, 5)

    headers = db[-2:-1]
    assert_true(is_list)
    num = len(headers)
    print(headers)
    assert_equal(num, 1)
    header, = headers
    scan_id = header.scan_id
    assert_equal(scan_id, 4)

    headers = db[-3:-1]
    scan_ids = [h.scan_id for h in headers]
    assert_equal(scan_ids, [4, 3])

    # fancy indexing, by location
    headers = db[[-3, -1, -2]]
    scan_ids = [h.scan_id for h in headers]
    assert_equal(scan_ids, [3, 5, 4])

    # fancy indexing, by scan id
    headers = db[[3, 1, 2]]
    scan_ids = [h.scan_id for h in headers]
    assert_equal(scan_ids, [3, 1, 2])
Exemple #20
0
def define_parser():
    parser = argparse.ArgumentParser(description='Launch a data viewer')
    parser.add_argument('time', nargs='?', default=0,
                        help="Sleep duration between scan steps")
    return parser

if __name__ == '__main__':
    parser = define_parser()

    args = parser.parse_args()
    sleep_time = float(args.time)

    from metadatastore.api import (insert_run_start, insert_run_stop,
                                   insert_beamline_config, find_last)
    b_config = insert_beamline_config(config_params={'my_beamline': 'my_value'},
                                      time=ttime.time())
    try:
        last_start_event = next(find_last())
        scan_id = int(last_start_event.scan_id)+1
    except IndexError:
        scan_id = 1
    scan_id = str(scan_id)
    custom = {'plotx': 'linear_motor', 'ploty': ['total_img_sum'],
              'moon': 'full'}
    # insert the run start
    run_start_uid = insert_run_start(scan_id=scan_id, time=ttime.time(),
                                     beamline_id='csx',
                                     beamline_config=b_config, custom=custom)
    events = run(run_start_uid=run_start_uid, sleep=sleep_time,
                 make_run_stop=False)
    run_stop = insert_run_stop(run_start=run_start_uid, time=ttime.time(),
Exemple #21
0
    def start_run(self, runid, start_args=None, end_args=None, scan_args=None):
        """

        Parameters
        ----------
        runid : sortable
        start_args
        end_args
        scan_args

        Returns
        -------
        data : dict
            {data_name: []}
        """
        if start_args is None:
            start_args = {}
        if end_args is None:
            end_args = {}
        if scan_args is None:
            scan_args = {}

        # format the begin run event information
        beamline_id = scan_args.get('beamline_id', None)
        if beamline_id is None:
            beamline_id = os.uname()[1].split('-')[0]
        custom = scan_args.get('custom', None)
        beamline_config = scan_args.get('beamline_config', None)
        owner = scan_args.get('owner', None)
        if owner is None:
            owner = getpass.getuser()
        runid = str(runid)

        blc = mds.insert_beamline_config(beamline_config, time=time.time())
        # insert the run_start into metadatastore
        recorded_time = time.time()
        run_start = mds.insert_run_start(
            time=recorded_time, beamline_id=beamline_id, owner=owner,
            beamline_config=blc, scan_id=runid, custom=custom)
        pretty_time = datetime.datetime.fromtimestamp(
                                          recorded_time).isoformat()
        self.logger.info("Scan ID: %s", runid)
        self.logger.info("Time: %s", pretty_time)
        self.logger.info("uid: %s", str(run_start.uid))

        # stash bre for later use
        scan_args['run_start'] = run_start
        end_args['run_start'] = run_start

        keys = self._get_data_keys(**scan_args)
        data = defaultdict(list)

        scan_args['data'] = data

        self._run_start(start_args)
        self._scan_thread = Thread(target=self._start_scan,
                                   name='Scanner',
                                   kwargs=scan_args)
        self._scan_thread.daemon = True
        self._scan_state = True
        self._scan_thread.start()
        try:
            while self._scan_state is True:
                time.sleep(0.10)
        except KeyboardInterrupt:
            self._scan_state = False
            self._scan_thread.join()
            end_args['state'] = 'abort'
        finally:
            self._end_run(end_args)

        return data
conf.mds_config['database'] = 'datastore2'

import mongoengine

connect(db='datastore2', host='xf23id-broker', port=27017)
assert mongoengine.connection.get_db().name == 'datastore2'

client = MongoClient(host='xf23id-broker', port=27017)
db = client.toBemigrated1
beamline_cfg_mapping = dict()

beamline_configs = db.beamline_config.find()
for bc in beamline_configs:
    bcfg_id = bc['_id']
    the_bc = insert_beamline_config(config_params=bc['config_params'],
                                    time=bc['time'])
    beamline_cfg_mapping[bc['_id']] = the_bc

begin_runs = db.begin_run_event.find()
for br in begin_runs:
    the_run_start = insert_run_start(time=br['time'],
                                     beamline_id=br['beamline_id'],
                                     beamline_config=the_bc,
                                     owner=br['owner'],
                                     scan_id=br['scan_id'],
                                     custom=br.get('custom', {}),
                                     uid=br['uid'])
    event_descs = db.event_descriptor.find({'begin_run_id': br['_id']})
    max_time = 0.0
    for e_desc in event_descs:
        the_e_desc = insert_event_descriptor(run_start=the_run_start,
Exemple #23
0
def test_replay_persistence():
    rs1 = mdsapi.insert_run_start(
        time=ttime.time(), beamline_id='replay testing', scan_id=1,
        beamline_config=mdsapi.insert_beamline_config({}, ttime.time()))
    events1 = temperature_ramp.run(rs1)
    rs2 = mdsapi.insert_run_start(
        time=ttime.time(), beamline_id='replay testing', scan_id=2,
        beamline_config=mdsapi.insert_beamline_config({}, ttime.time()))
    events2 = temperature_ramp.run(rs2)
    dbfile = os.path.join(tempfile.gettempdir(), str(uuid.uuid1()) + '.db')

    h = History(dbfile)
    # making sure that replay and the test suite are pointing at the same
    # sql db is good mmmmkay?
    replay.history = h

    # set up some state for the first run start
    scan_id1 = random.randint(50000, 10000000)
    hdr_update_rate1 = random.randint(50000, 10000000)
    num_to_retrieve1 = random.randint(10, 20)
    # store some state
    state1 = {'x': 'Tsam', 'y': ['Tsam', 'point_det'], 'x_is_index': True}
    h.put(six.text_type(rs1.uid), state1)
    returned_state = h.get(six.text_type(rs1.uid))
    h.put('WatchForHeadersModel', {'update_rate': hdr_update_rate1})
    h.put('ScanIDSearchModel', {'scan_id': scan_id1})
    h.put('GetLastModel', {'num_to_retrieve': num_to_retrieve1})
    # store some more state
    h.put(six.text_type(rs2.uid), {'y': ['Tsam', 'point_det'],
                                  'x_is_index': True})

    # open up replay
    app = QtApplication()
    ui = replay.create(replay.define_live_params())
    ui.title = ('Testing replay by manually triggering various models. '
                'Sit back and enjoy the show!')
    ui.show()
    hdr1 = db.find_headers(uid=rs1.uid)[0]
    ui.muxer_model.header = hdr1
    # start replay so that it will stop in 4 seconds
    app.timed_call(4000, app.stop)
    app.start()

    try:
        # check that the observer between the muxer model and the scalar collection
        # is working properly
        assert (six.text_type(ui.scalar_collection.dataframe_uid) ==
                six.text_type(rs1.uid))
        # check that the scalar collection is correctly loading plotting state
        assert ui.scalar_collection.x == state1['x']
        # check that the scalar collection is correctly loading plotting state
        assert ui.scalar_collection.x_is_index == state1['x_is_index']
        y_matches = True
        for y in ui.scalar_collection.y:
            if y not in state1['y']:
                y_matches = False
        # make sure that the datasets that should be plotted are plotting
        assert y_matches
        # make sure that no extra datasets are plotting
        assert len(ui.scalar_collection.y) == len(state1['y'])
        # check state in the "live" mode
        assert ui.watch_headers_model.update_rate == hdr_update_rate1
        # check state loading in the search by scan_id model
        assert ui.scan_id_model.scan_id == scan_id1
        # check state loading in the get_last_model
        assert ui.get_last_model.num_to_retrieve == num_to_retrieve1
    except AssertionError:
        # make sure the app gets destroyed, even if there is an AssertionError
        # as this will cause problems later
        ui.close()
        app.destroy()
        raise

    # store some state for the 2nd header that differs from the first
    state2 = {'x': 'point_det', 'y': ['Tsam'], 'x_is_index': False}
    h.put(six.text_type(rs2.uid), state2)
    hdr2 = db.find_headers(_id=rs2.uid)[0]
    ui.muxer_model.header = hdr2


    # make sure that the things that are display are the things that we think
    # should be displayed. This requires starting/stopping replay
    app.timed_call(4000, app.stop)
    app.start()
    try:
        # check that the observer between the muxer model and the scalar collection
        # is working properly
        assert six.text_type(ui.scalar_collection.dataframe_uid) == six.text_type(rs2.uid)
        # check that the scalar collection is correctly loading plotting state
        assert ui.scalar_collection.x == state2['x']
        # check that the scalar collection is correctly loading plotting state
        assert ui.scalar_collection.x_is_index == state2['x_is_index']
        # check that the scalar collection is correctly loading plotting state
        for y in ui.scalar_collection.y:
            if y not in state2['y']:
                y_matches = False
        # make sure that the datasets that should be plotted are plotting
        assert y_matches
        # make sure that no extra datasets are plotting
        assert len(ui.scalar_collection.y) == len(state2['y'])
    except AssertionError:
        # make sure the app gets destroyed, even if there is an AssertionError
        # as this will cause problems later
        ui.close()
        app.destroy()
        raise

    def use_ram_state():
        # now set the plot state to be 'ram'
        ui.scalar_collection.use_ram_state = True
    app.timed_call(500, use_ram_state)
    app.timed_call(1000, app.stop)
    app.start()
    # make sure that it updated the disk state correctly and that no unexpected
    # updates happened
    assert ui.scalar_collection.use_disk_state == False
    assert ui.scalar_collection.use_ram_state == True

    # change the header back to the first header
    ui.muxer_model.header = hdr1
    # start/stop replay again
    app.timed_call(1000, app.stop)
    app.start()

    # make sure that the updates triggered correctly. It should now match the
    # state from the second run header
    try:
        # check that the observer between the muxer model and the scalar collection
        # is working properly
        assert six.text_type(ui.scalar_collection.dataframe_uid) == six.text_type(rs1.uid)
        # check that the scalar collection is correctly loading plotting state
        assert ui.scalar_collection.x == state2['x']
        # check that the scalar collection is correctly loading plotting state
        assert ui.scalar_collection.x_is_index == state2['x_is_index']
        # check that the scalar collection is correctly loading plotting state
        for y in ui.scalar_collection.y:
            if y not in state2['y']:
                y_matches = False
        # make sure that the datasets that should be plotted are plotting
        assert y_matches
        # make sure that no extra datasets are plotting
        assert len(ui.scalar_collection.y) == len(state2['y'])
    except AssertionError:
        # make sure the app gets destroyed, even if there is an AssertionError
        # as this will cause problems later
        ui.close()
        app.destroy()
        raise

    def use_disk_state():
        # now set the plot state to be 'ram'
        ui.scalar_collection.use_disk_state = True
    ui.scalar_collection.use_disk_state = True
    # make sure that the switch took place
    app.timed_call(500, use_disk_state)
    app.timed_call(1000, app.stop)
    app.start()
    # make sure that it updated the disk state correctly and that no unexpected
    # updates happened
    assert ui.scalar_collection.use_disk_state == True
    assert ui.scalar_collection.use_ram_state == False

    # the original state for run header 2 is now the state for run header 2.
    # Let's change the state for run header 2 on disk and switch back.
    h.put(six.text_type(rs2.uid), state1)
    ui.muxer_model.header = hdr2
    app.timed_call(1000, app.stop)
    app.start()    # make sure that the things that are display are the things that we think
    # should be displayed. This requires starting/stopping replay
    try:
        # check that the observer between the muxer model and the scalar collection
        # is working properly
        assert six.text_type(ui.scalar_collection.dataframe_uid) == six.text_type(rs2.uid)
        # check that the scalar collection is correctly loading plotting state
        assert ui.scalar_collection.x == state1['x']
        # check that the scalar collection is correctly loading plotting state
        assert ui.scalar_collection.x_is_index == state1['x_is_index']
        # check that the scalar collection is correctly loading plotting state
        for y in ui.scalar_collection.y:
            if y not in state1['y']:
                y_matches = False
        # make sure that the datasets that should be plotted are plotting
        assert y_matches
        # make sure that no extra datasets are plotting
        assert len(ui.scalar_collection.y) == len(state1['y'])
    except AssertionError:
        # make sure the app gets destroyed, even if there is an AssertionError
        # as this will cause problems later
        ui.close()
        app.destroy()
        raise

    ui.close()
    app.destroy()
    # Temperature Events
    for i, (time, temp) in enumerate(zip(*deadbanded_ramp)):
        time = float(time) + base_time
        data = {'Tsam': temp,
                'Troom': temp + 10}
        timestamps = {'Tsam': time,
                      'Troom': time}
        event_uid = insert_event(descriptor=ev_desc2_uid, time=time,
                                 data=data, seq_num=i, uid=str(uuid.uuid4()),
                                 timestamps=timestamps)
        event, = find_events(uid=event_uid)
        events.append(event)
    return events


if __name__ == '__main__':
    import metadatastore.api as mdsc
    blc_uid = mdsc.insert_beamline_config({}, time=0.)
    run_start_uid = mdsc.insert_run_start(scan_id=2032013,
                                          beamline_id='testbed',
                                          beamline_config=blc_uid,
                                          owner='tester',
                                          group='awesome-devs',
                                          project='Nikea',
                                          time=0.)

    print('beamline_config_uid = %s' % blc_uid)
    print('run_start_uid = %s' % run_start_uid)
    run(run_start_uid)
Exemple #25
0
    # Temperature Events
    for i, (time, temp) in enumerate(zip(*deadbanded_ramp)):
        time = float(time) + base_time 
        data = {'Tsam': temp}
        timestamps = {'Tsam': time}
        event_dict = dict(descriptor=ev_desc2_uid, time=time,
                          data=data, timestamps=timestamps, seq_num=i)
        event_uid = insert_event(**event_dict)
        event, = find_events(uid=event_uid)
        events.append(event)

    #todo insert run stop if run_start_uid is not None

    return events


if __name__ == '__main__':
    import metadatastore.api as mdsc
    blc_uid = mdsc.insert_beamline_config({}, time=common.get_time())
    run_start_uid = mdsc.insert_run_start(scan_id=3022013,
                                          beamline_id='testbed',
                                          beamline_config=blc_uid,
                                          owner='tester',
                                          group='awesome-devs',
                                          project='Nikea',
                                          time=common.get_time())

    print('beamline_config_uid = %s' % blc_uid)
    print('run_start_uid = %s' % run_start_uid)
    run(run_start_uid)
Exemple #26
0
 def time_single_bcfg(self, n):
     for _ in self.obj:
         mdsc.insert_beamline_config(time=1315315135.5135,
                                     config_params={'param1': 1})