Example #1
0
def build_pymongo_backed_broker():
    """Provide a function level scoped MDS instance talking to
    temporary database on localhost:27017 with v1 schema.

    """
    from databroker.broker import Broker
    from metadatastore.mds import MDS
    from filestore.utils import create_test_database
    from filestore.fs import FileStore
    from filestore.handlers import NpyHandler

    db_name = "mds_testing_disposable_{}".format(str(uuid.uuid4()))
    mds_test_conf = dict(database=db_name, host='localhost',
                         port=27017, timezone='US/Eastern')
    try:
       # nasty details: to save MacOS user
        mds = MDS(mds_test_conf, 1, auth=False)
    except TypeError:
        mds = MDS(mds_test_conf, 1)

    db_name = "fs_testing_base_disposable_{}".format(str(uuid.uuid4()))
    fs_test_conf = create_test_database(host='localhost',
                                        port=27017,
                                        version=1,
                                        db_template=db_name)
    fs = FileStore(fs_test_conf, version=1)
    fs.register_handler('npy', NpyHandler)

    db = Broker(mds, fs)
    insert_imgs(db.mds, db.fs, 1, (2048,2048))

    return db
Example #2
0
def test_pickle():
    from metadatastore.mds import MDS
    md = MDS(config={'host': 'portland'}, version=1)
    md2 = pickle.loads(pickle.dumps(md))

    assert md.version == md2.version
    assert md.config == md2.config
Example #3
0
def mds(request):
    db_template = "mds_testing_disposable_{}".format(str(uuid.uuid4()))
    test_conf = create_test_database('localhost', db_template=db_template)
    mds = MDS(test_conf)

    def delete_dm():
        print("DROPPING DB")
        mds._connection.drop_database(test_conf['database'])

    request.addfinalizer(delete_dm)

    return mds
Example #4
0
def mds_all(request):
    '''Provide a function level scoped FileStore instance talking to
    temporary database on localhost:27017 with both v0 and v1.
    '''
    db_name = "mds_testing_disposable_{}".format(str(uuid.uuid4()))
    test_conf = dict(database=db_name,
                     host='localhost',
                     port=27017,
                     timezone='US/Eastern',
                     mongo_user='******',
                     mongo_pwd='jerry')
    ver = request.param
    mds = MDS(test_conf, ver, auth=AUTH)

    def delete_dm():
        print("DROPPING DB")
        mds._connection.drop_database(db_name)

    request.addfinalizer(delete_dm)

    return mds
Example #5
0
# Set up the logbook. This configures bluesky's summaries of
# data acquisition (scan type, ID, etc.).
# Make ophyd listen to pyepics.
from ophyd import setup_ophyd
setup_ophyd()

from metadatastore.mds import MDS
# from metadataclient.mds import MDS
from databroker import Broker
from databroker.core import register_builtin_handlers
from filestore.fs import FileStore

# pull from /etc/metadatastore/connection.yaml or
# /home/BLUSER/.config/metdatastore/connection.yml
mds = MDS({'host': 'xf21id1-ca1',
   'database': 'metadatastore',
   'port': 27017,
   'timezone': 'US/Eastern'}, auth=False)
# mds = MDS({'host': CA, 'port': 7770})

# pull configuration from /etc/filestore/connection.yaml or
# /home/BLUSER/.config/filestore/connection.yml
db = Broker(mds, FileStore({'host': 'xf21id1-ca1',
		    'port': 27017,
		    'database': 'filestore'}))
register_builtin_handlers(db.fs)

# Subscribe metadatastore to documents.
# If this is removed, data is not saved to metadatastore.
from bluesky.global_state import gs
gs.RE.subscribe('all', mds.insert)
Example #6
0
            pass
        else:
            print(o)
            print(n)
            raise

old_config = dict(database=OLD_DATABASE,
                  host='localhost',
                  port=27017,
                  timezone='US/Eastern')
new_config = old_config.copy()

new_config['database'] = NEW_DATABASE

old = MDSRO(version=0, config=old_config)
new = MDS(version=1, config=new_config)

total = old._runstart_col.find().count()
old_starts = tqdm(old.find_run_starts(), unit='start docs', total=total,
                  leave=True)
new_starts = new.find_run_starts()
for o, n in zip(old_starts, new_starts):
    compare(o, n)

total = old._runstop_col.find().count()
old_stops = tqdm(old.find_run_stops(), unit='stop docs', total=total)
new_stops = new.find_run_stops()
for o, n in zip(old_stops, new_stops):
    compare(o, n)
descs = deque()
counts = deque()
Example #7
0
#from hxntools.handlers import register
#import filestore
from metadatastore.mds import MDS
from databroker import Broker
from filestore.fs import FileStore

# database #1
_mds_config = {'host': 'xf03id-ca1',
               'port': 27017,
               'database': 'datastore-new',
               'timezone': 'US/Eastern'}
mds = MDS(_mds_config)
_fs_config = {'host': 'xf03id-ca1',
              'port': 27017,
              'database': 'filestore-new'}
db1 = Broker(mds, FileStore(_fs_config))

# database #2
_mds_config = {'host': 'xf03id-ca1',
               'port': 27017,
               'database': 'datastore-1',
               'timezone': 'US/Eastern'}
mds = MDS(_mds_config)
_fs_config = {'host': 'xf03id-ca1',
              'port': 27017,
              'database': 'filestore-1'}
db2 = Broker(mds, FileStore(_fs_config))

# database old
_mds_config_old = {'host': 'xf03id-ca1',
                   'port': 27017,
Example #8
0
# Make ophyd listen to pyepics.
from ophyd import setup_ophyd
setup_ophyd()

# Subscribe metadatastore to documents.
# If this is removed, data is not saved to metadatastore.
from metadatastore.mds import MDS
from filestore.fs import FileStore
from databroker import Broker

mds = MDS({'host': 'xf17bm-ioc1',
           'port': 27017,
           'database': 'metadatastore-production-v1',
           'timezone': 'US/Eastern'})
fs = FileStore({'host': 'xf17bm-ioc1',
                'port': 27017,
                'database': 'filestore-production-v1'})
db = Broker(mds, fs)
# usage: db[-1]  # i.e., one scan ago
from bluesky.global_state import gs
gs.RE.subscribe_lossless('all', mds.insert)

# At the end of every run, verify that files were saved and
# print a confirmation message.
# from bluesky.callbacks.broker import verify_files_saved
# gs.RE.subscribe('stop', post_run(verify_files_saved))

# Import matplotlib and put it in interactive mode.
import matplotlib.pyplot as plt
plt.ion()
os.environ['FS_HOST'] = 'localhost'
os.environ['FS_PORT'] = '27017'
os.environ['FS_DATABASE'] = 'filestore-production-v1'

# Connect to metadatastore and filestore.
from metadatastore.mds import MDS, MDSRO
from filestore.fs import FileStoreRO
from databroker import Broker
mds_config = {'host': 'localhost',
              'port': 27017,
              'database': 'metadatastore-production-v1',
              'timezone': 'US/Eastern'}
fs_config = {'host': 'localhost',
             'port': 27017,
             'database': 'filestore-production-v1'}
mds = MDS(mds_config)
#mds_readonly = MDSRO(mds_config)
# As we aren't writing any files at the moment, use 
# the readonly version
fs_readonly = FileStoreRO(fs_config)

db = Broker(mds, fs_readonly)


# Subscribe metadatastore to documents.
# If this is removed, data is not saved to metadatastore.
from bluesky.global_state import gs
gs.RE.subscribe('all', mds.insert)

# Import matplotlib and put it in interactive mode.
import matplotlib.pyplot as plt
Example #10
0
from metadatastore.mds import MDS
from databroker import Broker
from databroker.core import register_builtin_handlers
from filestore.fs import FileStore

_mds_config = {'host': 'xf03id-ca1',
               'port': 27017,
               'database': 'datastore-new',
               'timezone': 'US/Eastern'}
mds = MDS(_mds_config, auth=False)

_fs_config = {'host': 'xf03id-ca1',
              'port': 27017,
              'database': 'filestore-new'}
db_new = Broker(mds, FileStore(_fs_config))

_mds_config_old = {'host': 'xf03id-ca1',
               'port': 27017,
               'database': 'datastore',
               'timezone': 'US/Eastern'}
mds_old = MDS(_mds_config_old, auth=False)

_fs_config_old = {'host': 'xf03id-ca1',
              'port': 27017,
              'database': 'filestore'}
db_old = Broker(mds_old, FileStore(_fs_config_old))


from hxntools.handlers.xspress3 import Xspress3HDF5Handler
from hxntools.handlers.timepix import TimepixHDF5Handler
Example #11
0
def main(target, source):
    print('Database to be migrated {}'.format(source))
    print('Database migrated to {}'.format(target))
    target = target
    rc = ipp.Client()

    dview = rc[:]

    with dview.sync_imports():
        from metadatastore.mds import MDS, MDSRO
        from collections import deque

    old_config = {
        'database': source,
        'host': 'localhost',
        'port': 27017,
        'timezone': 'US/Eastern'}
    new_config = {
        'database': target,
        'host': 'localhost',
        'port': 27017,
        'timezone': 'US/Eastern'}

    old_t = MDSRO(version=0, config=old_config)
    new_t = MDS(version=1, config=new_config)

    def condition_config():
        import time

        global new, old
        for md in [new, old]:
            md._runstart_col.find_one()
            md._runstop_col.find_one()
            md._event_col.find_one()
            md._descriptor_col.find_one()
        time.sleep(1)

    def invasive_checks():
        global old, new
        return (old._MDSRO__conn is None,
                new._MDSRO__conn is None)

    dview.push({'old': old_t, 'new': new_t})
    dview.apply(condition_config)
    print(list(dview.apply(invasive_checks)))
    new_t._connection.drop_database(target)

    # Drop all indexes on event collection to speed up insert.
    # They will be rebuilt the next time an MDS(RO) object connects.
    new_t._event_col.drop_indexes()
    new = new_t
    old = old_t
    # old._runstart_col.drop_indexes()
    total = old._runstart_col.find().count()
    for start in tqdm(old.find_run_starts(), desc='start docs', total=total):
        new.insert('start', start)

    total = old._runstop_col.find().count()
    for stop in tqdm(old.find_run_stops(), desc='stop docs', total=total):
        try:
            new.insert('stop', stop)
        except RuntimeError:
            print("error inserting run stop with uid {!r}".format(stop['uid']))

    descs = deque()
    counts = deque()
    old._descriptor_col.drop_indexes()
    total = old._descriptor_col.find().count()
    for desc in tqdm(old.find_descriptors(), unit='descriptors', total=total):
        d_raw = next(old._descriptor_col.find({'uid': desc['uid']}))
        num_events = old._event_col.find(
            {'descriptor_id': d_raw['_id']}).count()
        new.insert('descriptor', desc)
        out = dict(desc)
        out['run_start'] = out['run_start']['uid']
        descs.append(dict(desc))
        counts.append(num_events)

    new.clear_process_cache()
    old.clear_process_cache()

    def migrate_event_stream(desc_in, num_events):
        import pymongo.errors
        import time

        global new, old
        if num_events:
            flag = True
            # skip empty event stream of bulk insert raises
            while flag:
                flag = False
                try:
                    events = old.get_events_generator(descriptor=desc_in,
                                                      convert_arrays=False)
                    events = iter(events)
                    l_cache = deque()
                    while True:
                        try:
                            for j in range(5000):
                                l_cache.append(next(events))
                        except StopIteration:
                            break
                        finally:
                            if l_cache:
                                new.bulk_insert_events(descriptor=desc_in,
                                                       events=l_cache)
                            l_cache.clear()
                except KeyError:
                    print("here here, key error")
                except pymongo.errors.AutoReconnect:
                    flag = True
                    time.sleep(10)

        new.clear_process_cache()
        old.clear_process_cache()
        return num_events

    v = rc.load_balanced_view()
    amr = v.map(migrate_event_stream, descs, list(counts), ordered=False)
    total = sum(counts)
    with tqdm(total=total, unit='events') as pbar:
        for res in amr:
            pbar.update(res)
Example #12
0
        else:
            print(o)
            print(n)
            raise


old_config = dict(database=OLD_DATABASE,
                  host='localhost',
                  port=27017,
                  timezone='US/Eastern')
new_config = old_config.copy()

new_config['database'] = NEW_DATABASE

old = MDSRO(version=0, config=old_config)
new = MDS(version=1, config=new_config)

total = old._runstart_col.find().count()
old_starts = tqdm(old.find_run_starts(),
                  unit='start docs',
                  total=total,
                  leave=True)
new_starts = new.find_run_starts()
for o, n in zip(old_starts, new_starts):
    compare(o, n)

total = old._runstop_col.find().count()
old_stops = tqdm(old.find_run_stops(), unit='stop docs', total=total)
new_stops = new.find_run_stops()
for o, n in zip(old_stops, new_stops):
    compare(o, n)
Example #13
0
        global proposal_id
        global run_id

        if username is None or proposal_id is None or run_id is None:
            login()

        return super().__call__(*args, **kwargs)


RE = CustomRunEngine()
gs.RE = RE

mds = MDS(
    {
        'host': 'xf16idc-ca',
        'database': 'metadatastore_production_v1',
        'port': 27017,
        'timezone': 'US/Eastern'
    },
    auth=False)

db = Broker(
    mds,
    FileStore({
        'host': 'xf16idc-ca',
        'database': 'filestore',
        'port': 27017
    }))

register_builtin_handlers(db.fs)
RE.subscribe('all', mds.insert)