Beispiel #1
0
def test_plugins():
    class EchoPlugin:
        def get_events(self, header, a):
            yield a

    b = Broker(metadatastore.commands, filestore.api, {'a': EchoPlugin()})
    hdr = b[-1]
    assert 'echo-plugin-test' in list(b.get_events(hdr, a='echo-plugin-test'))
    assert 'echo-plugin-test' not in list(b.get_events(hdr))
Beispiel #2
0
def main(args=None):
    parser = create_parser()
    ns = parser.parse_args(args)
    nd = ns.__dict__
    if ns.cmd in FACILITY_PARSER_MAP:
        db = Broker.named(nd['db_name'])
        db_path = db.get_config()['metadatastore']['config']['directory']
        writer = NpyWriter(db.fs, db_path)
        for n, d in FACILITY_PARSER_MAP[ns.cmd]['cmd'](nd['input_data']):
            if n == 'descriptor':
                for k in ['tof', 'intensity', 'error']:
                    d['data_keys'][k]['external'] = True
            if n == 'event':
                for k in ['tof', 'intensity', 'error']:
                    d['data'][k] = writer.write(d['data'][k])
                    d['filled'][k] = False
            print(n)
            pprint(d)
            db.insert(n, d)
    else:
        db_config_path = os.path.expanduser('~/.config/databroker/'
                                            '{}.yaml'.format(nd['name']))
        path = os.path.expanduser(nd['path'])
        config = {'description': 'lightweight personal database',
                  'metadatastore': {'module': 'databroker.headersource.sqlite',
                                    'class': 'MDS',
                                    'config': {'directory': path,
                                               'timezone': 'US/Eastern'}},
                  'assets': {'module': 'databroker.assets.sqlite',
                             'class': 'Registry',
                             'config': {'dbpath': os.path.join(
                                 path, 'database.sql')}}}
        os.makedirs(path, exist_ok=True)
        os.makedirs(os.path.split(db_config_path)[0], exist_ok=True)
        with open(db_config_path, 'w', encoding='utf8') as f:
            yaml.dump(config, f)
Beispiel #3
0
from databroker import Broker

db = Broker.named('hxn')
db_analysis = Broker.named('hxn_analysis')

from hxntools.handlers.xspress3 import Xspress3HDF5Handler
from hxntools.handlers.timepix import TimepixHDF5Handler

db.reg.register_handler(Xspress3HDF5Handler.HANDLER_NAME,
                       Xspress3HDF5Handler)
db.reg.register_handler(TimepixHDF5Handler._handler_name,
                       TimepixHDF5Handler, overwrite=True)

Beispiel #4
0
from databroker import Broker

db = Broker.named('hxn')
#db_analysis = Broker.named('hxn_analysis')

from hxntools.handlers.xspress3 import Xspress3HDF5Handler
from hxntools.handlers.timepix import TimepixHDF5Handler

db.reg.register_handler(Xspress3HDF5Handler.HANDLER_NAME,
                       Xspress3HDF5Handler)
db.reg.register_handler(TimepixHDF5Handler._handler_name,
                       TimepixHDF5Handler, overwrite=True)

# Set up a RunEngine and use metadata backed by a sqlite file.
from bluesky import RunEngine
from bluesky.utils import get_history
RE = RunEngine(get_history())

# Set up a Broker.
from databroker import Broker
db = Broker.named('csx')

# Subscribe metadatastore to documents.
# If this is removed, data is not saved to metadatastore.
RE.subscribe(db.insert)

# Set up SupplementalData.
from bluesky import SupplementalData
sd = SupplementalData()
RE.preprocessors.append(sd)

# Add a progress bar.
from bluesky.utils import ProgressBarManager
pbar_manager = ProgressBarManager()
RE.waiting_hook = pbar_manager

# Register bluesky IPython magics.
from bluesky.magics import BlueskyMagics
get_ipython().register_magics(BlueskyMagics)

# Set up the BestEffortCallback.
from bluesky.callbacks.best_effort import BestEffortCallback
bec = BestEffortCallback()
RE.subscribe(bec)
import time
import sys
from bluesky.simulators import summarize_plan


# Set up a RunEngine and use metadata backed by a sqlite file.
from bluesky import RunEngine
from bluesky.utils import get_history
RE = RunEngine({})

# Set up a Broker.
from databroker import Broker
db = Broker.named('iss')
db_analysis = Broker.named('iss-analysis')

# Subscribe metadatastore to documents.
# If this is removed, data is not saved to metadatastore.
RE.subscribe(db.insert)

# Set up SupplementalData.
from bluesky import SupplementalData
sd = SupplementalData()
RE.preprocessors.append(sd)

# Add a progress bar.
from timeit import default_timer as timer


from bluesky.utils import ProgressBarManager
pbar_manager = ProgressBarManager()
#RE.waiting_hook = pbar_manager
Beispiel #7
0
def configure_base(user_ns,
                   broker_name,
                   *,
                   bec=True,
                   epics_context=False,
                   magics=True,
                   mpl=True,
                   ophyd_logging=True,
                   pbar=True):
    """
    Perform base setup and instantiation of important objects.

    This factory function instantiates essential objects to data collection
    environments at NSLS-II and adds them to the current namespace. In some
    cases (documented below), it will check whether certain variables already
    exist in the user name space, and will avoid creating them if so. The
    following are added:

    * ``RE`` -- a RunEngine
        This is created only if an ``RE`` instance does not currently exist in
        the namespace.
    * ``db`` -- a Broker (from "databroker"), subscribe to ``RE``
    * ``bec`` -- a BestEffortCallback, subscribed to ``RE``
    * ``peaks`` -- an alias for ``bec.peaks``
    * ``sd`` -- a SupplementalData preprocessor, added to ``RE.preprocessors``
    * ``pbar_maanger`` -- a ProgressBarManager, set as the ``RE.waiting_hook``

    And it performs some low-level configuration:

    * creates a context in ophyd's control layer (``ophyd.setup_ophyd()``)
    * turns out interactive plotting (``matplotlib.pyplot.ion()``)
    * bridges the RunEngine and Qt event loops
      (``bluesky.utils.install_kicker()``)
    * logs ERROR-level log message from ophyd to the standard out

    Parameters
    ----------
    user_ns: dict
        a namespace --- for example, ``get_ipython().user_ns``
    broker_name : Union[str, Broker]
        Name of databroker configuration or a Broker instance.
    bec : boolean, optional
        True by default. Set False to skip BestEffortCallback.
    epics_context : boolean, optional
        True by default. Set False to skip ``setup_ophyd()``.
    magics : boolean, optional
        True by default. Set False to skip registration of custom IPython
        magics.
    mpl : boolean, optional
        True by default. Set False to skip matplotlib ``ion()`` at event-loop
        bridging.
    ophyd_logging : boolean, optional
        True by default. Set False to skip ERROR-level log configuration for
        ophyd.
    pbar : boolean, optional
        True by default. Set false to skip ProgressBarManager.

    Returns
    -------
    names : list
        list of names added to the namespace

    Examples
    --------
    Configure IPython for CHX.

    >>>> configure_base(get_ipython().user_ns, 'chx');
    """
    ns = {}  # We will update user_ns with this at the end.

    # Set up a RunEngine and use metadata backed by a sqlite file.
    from bluesky import RunEngine
    from bluesky.utils import get_history
    # if RunEngine already defined grab it
    # useful when users make their own custom RunEngine
    if 'RE' in user_ns:
        RE = user_ns['RE']
    else:
        RE = RunEngine(get_history())
        ns['RE'] = RE

    # Set up SupplementalData.
    # (This is a no-op until devices are added to it,
    # so there is no need to provide a 'skip_sd' switch.)
    from bluesky import SupplementalData
    sd = SupplementalData()
    RE.preprocessors.append(sd)
    ns['sd'] = sd

    if isinstance(broker_name, str):
        # Set up a Broker.
        from databroker import Broker
        db = Broker.named(broker_name)
        ns['db'] = db
    else:
        db = broker_name

    RE.subscribe(db.insert)

    if pbar:
        # Add a progress bar.
        from bluesky.utils import ProgressBarManager
        pbar_manager = ProgressBarManager()
        RE.waiting_hook = pbar_manager
        ns['pbar_manager'] = pbar_manager

    if magics:
        # Register bluesky IPython magics.
        from bluesky.magics import BlueskyMagics
        get_ipython().register_magics(BlueskyMagics)

    if bec:
        # Set up the BestEffortCallback.
        from bluesky.callbacks.best_effort import BestEffortCallback
        _bec = BestEffortCallback()
        RE.subscribe(_bec)
        ns['bec'] = _bec
        ns['peaks'] = _bec.peaks  # just as alias for less typing

    if mpl:
        # Import matplotlib and put it in interactive mode.
        import matplotlib.pyplot as plt
        ns['plt'] = plt
        plt.ion()

        # Make plots update live while scans run.
        from bluesky.utils import install_kicker
        install_kicker()

    if epics_context:
        # Create a context in the underlying EPICS client.
        from ophyd import setup_ophyd
        setup_ophyd()

    if not ophyd_logging:
        # Turn on error-level logging, particularly useful for knowing when
        # pyepics callbacks fail.
        import logging
        import ophyd.ophydobj
        ch = logging.StreamHandler()
        ch.setLevel(logging.ERROR)
        ophyd.ophydobj.logger.addHandler(ch)

    # convenience imports
    # some of the * imports are for 'back-compatibility' of a sort -- we have
    # taught BL staff to expect LiveTable and LivePlot etc. to be in their
    # namespace
    import numpy as np
    ns['np'] = np

    import bluesky.callbacks
    ns['bc'] = bluesky.callbacks
    import_star(bluesky.callbacks, ns)

    import bluesky.plans
    ns['bp'] = bluesky.plans
    import_star(bluesky.plans, ns)

    import bluesky.plan_stubs
    ns['bps'] = bluesky.plan_stubs
    import_star(bluesky.plan_stubs, ns)
    # special-case the commonly-used mv / mvr and its aliases mov / movr4
    ns['mv'] = bluesky.plan_stubs.mv
    ns['mvr'] = bluesky.plan_stubs.mvr
    ns['mov'] = bluesky.plan_stubs.mov
    ns['movr'] = bluesky.plan_stubs.movr

    import bluesky.preprocessors
    ns['bpp'] = bluesky.preprocessors

    import bluesky.callbacks.broker
    import_star(bluesky.callbacks.broker, ns)

    import bluesky.simulators
    import_star(bluesky.simulators, ns)

    user_ns.update(ns)
    return list(ns)
    from bluesky.utils import install_qt_kicker
    install_qt_kicker()
    print("Installing Qt Kicker...")

# Make ophyd listen to pyepics.
from ophyd import setup_ophyd
setup_ophyd()

# Set up a RunEngine and use metadata backed by a sqlite file.
from bluesky import RunEngine
from bluesky.utils import get_history
RE = RunEngine(get_history())

# Set up a Broker.
from databroker import Broker
db = Broker.named('fmx')

# Subscribe metadatastore to documents.
# If this is removed, data is not saved to metadatastore.
RE.subscribe(db.insert)

# Set up SupplementalData.
from bluesky import SupplementalData
sd = SupplementalData()
RE.preprocessors.append(sd)

# Add a progress bar.
# from bluesky.utils import ProgressBarManager
# pbar_manager = ProgressBarManager()
# RE.waiting_hook = pbar_manager
Beispiel #9
0
import networkx as nx
from databroker import Broker
from shed_streaming.translation import FromEventStream
from shed_streaming.replay import replay

db_path = '/home/christopher/ldrd_demo'
config = {'description': 'lightweight personal database',
          'metadatastore': {'module': 'databroker.headersource.sqlite',
                            'class': 'MDS',
                            'config': {'directory': db_path,
                                       'timezone': 'US/Eastern'}},
          'assets': {'module': 'databroker.assets.sqlite',
                     'class': 'Registry',
                     'config': {'dbpath': db_path + '/database.sql'}}}
db = Broker.from_config(config)


def y():
    suid = str(uuid.uuid4())
    yield ('start', {'uid': suid,
                     'time': time.time()})
    duid = str(uuid.uuid4())
    yield ('descriptor', {'uid': duid,
                          'run_start': suid,
                          'name': 'primary',
                          'data_keys': {'det_image': {'dtype': 'int',
                                                      'units': 'arb'}},
                          'time': time.time()})
    for i in range(5):
        yield ('event', {'uid': str(uuid.uuid4()),
from seisidd.utility import print_dict
list_predefined_vars = lambda : print_dict(keywords_vars)
list_predefined_func = lambda : print_dict(keywords_func)


# --- get system info
HOSTNAME = socket.gethostname() or 'localhost'
USERNAME = getpass.getuser() or '6-BM-A user'
keywords_vars['HOSTNAME'] = 'host name'
keywords_vars['USERNAME'] = '******'


# --- setup metadata handler
from databroker import Broker
metadata_db = Broker.named("mongodb_config")
keywords_vars['metadata_db'] = 'Default metadata handler'


# --- setup RunEngine
from bluesky import RunEngine
from bluesky.callbacks.best_effort import BestEffortCallback
keywords_func['get_runengine'] = 'Get a bluesky RunEngine'
def get_runengine(db=None):
    """
    Return an instance of RunEngine.  It is recommended to have only
    one RunEngine per session.
    """
    RE = RunEngine({})
    db = metadata_db if db is None else db
    RE.subscribe(db.insert)
from databroker import Broker

db = Broker.named('csx2')

import nslsii
nslsii.configure_base(get_ipython().user_ns, db)
# make sure Best Effort Callback does not plot the baseline readings
bec.noplot_streams.append('pgm_energy_monitor')

### comment this out to:
### disable the zmq servce and re-enable best effort callback plotting locally
bec.disable_plots()

from bluesky.callbacks.zmq import Publisher
pub = Publisher('xf23id-ca:5577')
RE.subscribe(pub)
#####

# TODO not need this
from epics import caget, caput
from amostra.client.commands import SampleReference, ContainerReference

# Optional: set any metadata that rarely changes.

# convenience imports


def ensure_proposal_id(md):
    if 'proposal_id' not in md:
        raise ValueError("Please run user_checkin() first")
Beispiel #12
0
        print(f"creating {omnia_xy_index_fp}")
        t0 = time.time()
        grid_x = list()
        grid_y = list()
        uids = list()
        for h in self.dbr(sample_name="omnia"):
            grid_x.append(h.start["Grid_X"])
            grid_y.append(h.start["Grid_Y"])
            uids.append(h.start["uid"])

        df = pd.DataFrame(zip(grid_x, grid_y, uids),
                          columns=("Grid_X", "Grid_Y", "uid"))
        df.to_csv(omnia_xy_index_fp, sep="\t")
        print(f"finished {omnia_xy_index_fp} in {time.time()-t0:.3}s")


from databroker import Broker
from ophyd.sim import hw

dbr = Broker.named("pdf")
sim = hw()

omnia_det = OmniaDetector(
    dbr=dbr,
    name="omnia_det",
    motor1=sim.motor1,
    motor_field1="motor1",
    motor2=sim.motor2,
    motor_field2="motor2",
)
uids = ('79b54d30-7fff-4a24-80c1-1d5cb3e71373',
        'b0b7a12b-ec80-47a1-ac24-c86eb9ebf464',
        'c1e766fa-dff6-4747-877a-5a26de278ca4',
        '5de9a73c-367e-43a9-8377-80e945ad165f')

from databroker import Broker
db = Broker.named('xpd')

peak1_c = db[uids[0]].table()
peak1_f = db[uids[1]].table()
peak2_c = db[uids[2]].table()
peak2_f = db[uids[3]].table()

peaks_x = list(peak1_f.th_cal)
peaks_y = list(peak1_f.sc_chan1)
peaks_x.extend(list(peak2_f.th_cal))
peaks_y.extend(list(peak2_f.sc_chan1))
xdata = peaks_x
ydata = peaks_y
import matplotlib.pyplot as plt

theta_offset = -35.26

wguess = .1878
max_step = .004
D = 'Si'
detector_name = 'sc_chan1'
theta_offset = -35.26
guessed_sigma = .0012
nsigmas = 15
Beispiel #14
0
import h5py
from databroker import Broker
from databroker._core import register_builtin_handlers

db = Broker.named('xfm')
register_builtin_handlers(db.reg)

# srx detector, to be moved to filestore
from databroker.assets.handlers import Xspress3HDF5Handler, HandlerBase


class BulkXSPRESS(HandlerBase):
    HANDLER_NAME = 'XPS3_FLY'

    def __init__(self, resource_fn):
        self._handle = h5py.File(resource_fn, 'r')

    def __call__(self):
        return self._handle['entry/instrument/detector/data'][:]


db.reg.register_handler(BulkXSPRESS.HANDLER_NAME, BulkXSPRESS, overwrite=True)


class ZebraHDF5Handler(HandlerBase):
    HANDLER_NAME = 'ZEBRA_HDF51'

    def __init__(self, resource_fn):
        self._handle = h5py.File(resource_fn, 'r')

    def __call__(self, column):
from databroker import Broker
db = Broker.named("iss")

import sys
sys.path.insert(0, '/home/xf08id/Repos/workflows')
import interpolation

data = dict()
data['requester'] = "xf08id-ws02"
#data['uid'] = "55f14401-8c60-4474-a24e-62b7722c933c"
data['uid'] = db[-1].start['uid']

store =data.copy()
signal = None
context = None

interpolation.create_req_func(data, store, signal, context)
interpolation.process_run_func(data, store, signal, context)
Beispiel #16
0
def interpolate_and_save(db_name,
                         db_analysis_name,
                         uid,
                         mono_name='mono1_enc',
                         pulses_per_degree=None):
    ''' Interpolate measured data and save to an analysis store. 

        Parameters
        ----------
        # TODO : change to config (don't rely on Broker.named which explores
            local directory)
        db_config: str
            the name the database (in /etc/databroker/name.yml)
        db_analysis_config : str
            the name of the analysis database (in /etc/databroker/name.yml)
        uid : str
            The uid of the data set
        mono_name : str
            the monochromator encoder name. Defaults to 'mono1_enc'
        pulses_per_degree : float
            pulses per degree of the encoder from the monochromator
            defaults to the current setup at QAS

        Returns
        -------
            interp_df : the interpolated data
            bin_df : the binned data if e0 is set
    '''
    # the pulses per degree, hard coded for now
    # TODO : Make a signal to pb1.enc1
    # and have it passed at configuration_attrs
    # (which results in data in descriptor)
    if pulses_per_degree is None:
        ppd = 23600 * 400 / 360
    else:
        ppd = pulses_per_degree

    db = Broker.named(db_name)
    hdr = db[uid]
    start = hdr.start
    if 'e0' not in start:
        e0 = 8979
        print("Warning, e0 not in start, setting to Cu: {}".format(e0))
    else:
        e0 = float(hdr.start['e0'])

    db_analysis = Broker.named(db_analysis_name)

    # the important part of Bruno's code that does the interpolation
    gen_parser = xasdata.XASdataGeneric(ppd, db=db, mono_name='mono1_enc')
    gen_parser.load(uid)
    # data saves in gen_parser.interp_df
    gen_parser.interpolate()

    # useful command for debugging, looking at energy
    # this is automatically run by gen_parser
    #energy = encoder2energy(res[:,3], ppd)

    PREFIX = "/nsls2/xf07bm/data/interpolated_data"
    write_path_template = PREFIX + '/%Y/%m/%d/'
    DIRECTORY = datetime.now().strftime(write_path_template)
    scan_id = hdr.start['scan_id']

    md = hdr.start
    filename = 'xas_' + md.get("name", str(uuid4())[:6]) + "_" + str(scan_id)
    os.makedirs(DIRECTORY, exist_ok=True)
    filepath = DIRECTORY

    # file is exported
    fileout = gen_parser.export_trace(filename, filepath)
    call(['chmod', '774', fileout])

    bin_df, bin_df_filename = bin_data(gen_parser,
                                       fileout,
                                       e0,
                                       scan_id=scan_id)

    result = dict(bin_df=bin_df,
                  bin_df_filename=bin_df_filename,
                  interp_df=gen_parser.interp_df,
                  interp_df_filename=fileout,
                  scan_id=scan_id)

    return result
from matplotlib import rcParams
from databroker import Broker
from scipy.interpolate import splev, splrep

from plot_configs import params, dpi, figure_dir
from metadata_parsing import print_meta

rcParams.update(params)

SAVE_FIGS = False
SHOW_FIT = False

try:
    db
except NameError:
    db = Broker.named('local_file')

uid = '3de221b4-b9d5-470f-a2b8-a9c3e09a6e94'
print('UID = {}'.format(uid[0:6]))
header = db[uid]  # db is a DataBroker instance
df = header.table()

plt.figure(dpi=dpi)
for fs in df['srs_lockin_filt_slope'].unique():
    idx = (df['srs_lockin_filt_slope'] == fs)
    plt.semilogy(df.loc[idx]['fg_freq'],
                 df.loc[idx]['amp'],
                 marker='*',
                 label='Filter = {}'.format(fs.replace('-', ' ')))

plt.grid(True)
'''
    Mock file resource

    In this example, we're going to save a file resource to databroker and try
    to retrieve it with a file handler

    You can run this from your laptop at home as well, so long as you install
    databroker.

    You'll create a resource as in the previous example, and try to retrieve it
    using the datum info
'''
from databroker.tests.utils import temp_config
from databroker import Broker
# this will create a temporary databroker object with nothing in it
db = Broker.from_config(temp_config)

from databroker.assets.handlers_base import HandlerBase


class MyHandler(HandlerBase):
    def __init__(self, *args, **kwargs):
        print("In init, received args : {}, kwargs {}".format(args, kwargs))

    def __call__(self, *args, **kwargs):
        print("In call, received args : {}, kwargs {}".format(args, kwargs))


# registering a handler according to a certain spec
db.reg.register_handler("ISSFILE", MyHandler)
Beispiel #19
0
import h5py
from databroker import Broker
from databroker._core import register_builtin_handlers

db = Broker.named('srx')
register_builtin_handlers(db.fs)

# srx detector, to be moved to filestore
from databroker.assets.handlers import Xspress3HDF5Handler, HandlerBase


class BulkXSPRESS(HandlerBase):
    HANDLER_NAME = 'XPS3_FLY'

    def __init__(self, resource_fn):
        self._handle = h5py.File(resource_fn, 'r')

    def __call__(self):
        return self._handle['entry/instrument/detector/data'][:]


db.reg.register_handler(BulkXSPRESS.HANDLER_NAME, BulkXSPRESS, overwrite=True)


class ZebraHDF5Handler(HandlerBase):
    HANDLER_NAME = 'ZEBRA_HDF51'

    def __init__(self, resource_fn):
        self._handle = h5py.File(resource_fn, 'r')

    def __call__(self, column):
Beispiel #20
0
def run_server(
    folder,
    outbound_proxy_address=glbl_dict["outbound_proxy_address"],
    prefix=None,
    handlers=None,
):
    """Start up the portable databroker server

    Parameters
    ----------
    folder : str
        The location where to save the portable databrokers
    outbound_proxy_address : str, optional
        The address and port of the zmq proxy. Defaults to
        ``glbl_dict["outbound_proxy_address"]``
    prefix : bytes or list of bytes, optional
        The Publisher channels to listen to. Defaults to
        ``[b"an", b"raw"]``
    handlers : dict
        The map between handler specs and handler classes, defaults to
        the map used by the experimental databroker if possible
    """
    # TODO: convert to bytestrings if needed
    # TODO: maybe separate this into different processes?
    # TODO: support multiple locations for folders
    if prefix is None:
        prefix = [b"an", b"raw"]
    d = RemoteDispatcher(outbound_proxy_address, prefix=prefix)
    portable_folder = folder
    portable_configs = {}
    for folder_name in ["an", "raw"]:
        fn = os.path.join(portable_folder, folder_name)
        os.makedirs(fn, exist_ok=True)
        # if the path doesn't exist then make the databrokers
        with open(
            os.path.join(portable_folder, f"{folder_name}.yml"), "w"
        ) as f:
            f.write(portable_template.format(folder_name))
        print(portable_template.format(folder_name))

        print(fn)
        portable_configs[folder_name] = yaml.load(
            io.StringIO(portable_template.format(fn))
        )
        os.makedirs(os.path.join(fn, "data"), exist_ok=True)

    # TODO: add more files here, eg. a databroker readme/tutorial
    with open(os.path.join(portable_folder, "db_load.py"), "w") as f:
        f.write(load_script)
    an_broker = Broker.from_config(portable_configs["an"])

    an_source = Stream()
    zed = an_source.Store(
        os.path.join(
            portable_configs["an"]["metadatastore"]["config"]["directory"],
            "data",
        ),
        NpyWriter,
    )
    zed.starsink(an_broker.insert)

    raw_broker = Broker.from_config(portable_configs["raw"])
    if handlers is None:
        handlers = raw_broker.reg.handler_reg

    raw_source = Stream()
    raw_source.starmap(
        ExportCallback(
            os.path.join(
                portable_configs["raw"]["metadatastore"]["config"][
                    "directory"
                ],
                "data",
            ),
            handler_reg=handlers,
        )
    ).starsink(raw_broker.insert)

    rr = RunRouter(
        [
            lambda x: (lambda *nd: raw_source.emit(nd))
            if x.get("analysis_stage", "") == "raw"
            else None
        ]
        + [
            lambda x: (lambda *nd: an_source.emit(nd))
            if x.get("analysis_stage", None) == "pdf"
            else None,
            lambda x: (lambda *nd: an_source.emit(nd))
            if x.get("analysis_stage", None) == "integration"
            else None,
        ]
    )

    d.subscribe(rr)

    print("Starting Portable DB Server")
    d.start()
import scipy.interpolate as inter
from databroker import Broker

from plot_configs import params, dpi, figure_dir
from metadata_parsing import print_meta

rcParams.update(params)
plt.figure(dpi=dpi)

SAVE_FIGS = False
PLOT_FIT = False

try:
    db
except NameError:
    db = Broker.named(
        'local_file')  # a broker poses queries for saved data sets
'''
|  0 | f31271f1-4231-4798-afe6-83e8647c0927 | 2018-07-06 15:29:07 | phase_dependence_offset | count       | ADA2200 |           10 |
|  1 | 63bfefd6-3cc6-45ef-9497-2e97a5cf8f6f | 2018-07-06 15:28:36 | phase_dependence_offset | count       | ADA2200 |           10 |
|  2 | b270dad3-96fd-475a-ad1e-b2789db96ed0 | 2018-07-06 15:27:07 | phase_dependence        | scan        | ADA2200 |           60 |
'''

# get data into a pandas data-frame
uid = 'b270dad3-96fd-475a-ad1e-b2789db96ed0'
print('UID = {}'.format(uid[0:6]))
header = db[uid]
df = header.table()
print_meta(header, os.path.basename(__file__))

# view the baseline data (i.e. configuration values)
print('open meta table')
    from bluesky.utils import install_qt_kicker
    install_qt_kicker()
    print("Insalling Qt Kicker...")

# Make ophyd listen to pyepics.
from ophyd import setup_ophyd
setup_ophyd()

# Set up a RunEngine and use metadata backed by a sqlite file.
from bluesky import RunEngine
from bluesky.utils import get_history
RE = RunEngine(get_history())

# Set up a Broker.
from databroker import Broker
db = Broker.named('amx')

# Subscribe metadatastore to documents.
# If this is removed, data is not saved to metadatastore.

from bluesky import RunEngine
RE = RunEngine()

abort = RE.abort
resume = RE.resume
stop = RE.stop

RE.subscribe(db.insert)

# Set up SupplementalData.
from bluesky import SupplementalData
Beispiel #23
0
from bluesky import RunEngine
from bluesky.run_engine import get_bluesky_event_loop

from bluesky.callbacks.best_effort import BestEffortCallback
from databroker import Broker

# The following plans/devices must be imported (otherwise plan parsing wouldn't work)
from ophyd.sim import det1, det2, motor  # noqa: F401
from bluesky.plans import count, scan  # noqa: F401

import logging

logger = logging.getLogger(__name__)

DB = [Broker.named('temp')]


class RunEngineWorker(Process):
    """
    The class implementing Run Engine Worker thread.

    Parameters
    ----------
    conn: multiprocessing.Connection
        One end of bidirectional (input/output) pipe. The other end is used by RE Manager.
    args, kwargs
        `args` and `kwargs` of the `multiprocessing.Process`
    """
    def __init__(self, *args, conn, **kwargs):
Beispiel #24
0
from ophyd.sim import det4, motor1, motor2, motor3
from bluesky import RunEngine
from bluesky.callbacks.best_effort import BestEffortCallback

from databroker.tests.utils import temp_config
from databroker import Broker


# db setup
config = temp_config()
tempdir = config['metadatastore']['config']['directory']

def cleanup():
    shutil.rmtree(tempdir)

db = Broker.from_config(config)


RE = RunEngine({})
# subscribe BEC
bec = BestEffortCallback()
RE.subscribe(bec)
RE.subscribe(db.insert)



# move motor to a reproducible location
RE(mov(motor1, 0))
RE(mov(motor2, 0))
RE(relative_outer_product_scan([det4], motor1, -1, 0, 10, motor2, -2,
                               0, 20, True))
Beispiel #25
0
def test_auto_register():
    db_auto = Broker.from_config(temp_config())
    db_manual = Broker.from_config(temp_config(), auto_register=False)
    assert db_auto.reg.handler_reg
    assert not db_manual.reg.handler_reg
Beispiel #26
0
import os
import shutil
import tempfile

import yaml
from databroker import Broker
try:
    db = Broker.named('xpd')
except NameError:
    from xpdsim import db
import logging
from pkg_resources import resource_filename as rs_fn

logger = logging.getLogger(__name__)
pytest_dir = rs_fn('xpdan', 'tests')


def load_configuration(name):
    """
    Load configuration data from a cascading series of locations.

    The precedence order is (highest priority last):

    1. The conda environment
       - CONDA_ENV/etc/{name}.yaml (if CONDA_ETC_ is defined for the env)
    2. The shipped version
    3. At the system level
       - /etc/{name}.yml
    4. In the user's home directory
       - ~/.config/{name}.yml
Beispiel #27
0
logger.info(__file__)
"""set up the databroker (db)"""

import os
callback_db = {}

# load config from ~/.config/databroker/mongodb_config.yml
from databroker import Broker
db = Broker.named("mongodb_config")

# Subscribe metadatastore to documents.
# If this is removed, data is not saved to metadatastore.
callback_db['Broker'] = RE.subscribe(db.insert)

# Set up SupplementalData.
from bluesky import SupplementalData
sd = SupplementalData()
RE.preprocessors.append(sd)

# Add a progress bar.
from bluesky.utils import ProgressBarManager
pbar_manager = ProgressBarManager()
RE.waiting_hook = pbar_manager

# Register bluesky IPython magics.
from bluesky.magics import BlueskyMagics
get_ipython().register_magics(BlueskyMagics)

# Set up the BestEffortCallback.
from bluesky.callbacks.best_effort import BestEffortCallback
bec = BestEffortCallback()
Beispiel #28
0
simmotor2.read()
simmotor2.set(1)

# RE(scan([detector],simmotor1,0,14,10))


#Flyer
from ophyd.sim import hw
from bluesky.run_engine import RunEngine
from databroker import temp_config, Broker
from bluesky.plans import fly
import bluesky.plans as bp

hw = hw()
flying_zebra = hw.flyer1
db = Broker.named('temp')
RE = RunEngine()
RE.subscribe(db.insert)
RE(fly([flying_zebra]))
RE(fly([flying_zebra]))
hdr = db[-1]
hdr.stream_names
hdr.table('stream_name')

hw.direct_img

hw.det.exposure_time = 1

RE(bp.count([hw.det], num=3))

db[-1].table()
Beispiel #29
0
RE = CustomRunEngine()
gs.RE = RE

mds = MDS(
    {
        'host': 'xf16idc-ca',
        'database': 'metadatastore_production_v1',
        'port': 27017,
        'timezone': 'US/Eastern'
    },
    auth=False)

db = Broker(
    mds,
    FileStore({
        'host': 'xf16idc-ca',
        'database': 'filestore',
        'port': 27017
    }))

register_builtin_handlers(db.fs)
RE.subscribe('all', mds.insert)

if is_ipython():
    # FIXME: Remove this once we migrate to PYTHON 3.5
    from IPython import get_ipython
    from IPython.core.pylabtools import backend2gui
    from matplotlib import get_backend
    ip = get_ipython()
    ipython_gui_name = backend2gui.get(get_backend())
    if ipython_gui_name:
Beispiel #30
0
import h5py
from databroker import Broker
from databroker._core import register_builtin_handlers

db = Broker.named('xfm')
register_builtin_handlers(db.reg)


# srx detector, to be moved to filestore
from databroker.assets.handlers import Xspress3HDF5Handler, HandlerBase

class BulkXSPRESS(HandlerBase):
    HANDLER_NAME = 'XPS3_FLY'
    def __init__(self, resource_fn):
        self._handle = h5py.File(resource_fn, 'r')

    def __call__(self):
        return self._handle['entry/instrument/detector/data'][:]

db.reg.register_handler(BulkXSPRESS.HANDLER_NAME, BulkXSPRESS,
                       overwrite=True)

class ZebraHDF5Handler(HandlerBase):
    HANDLER_NAME = 'ZEBRA_HDF51'
    def __init__(self, resource_fn):
        self._handle = h5py.File(resource_fn, 'r')

    def __call__(self, column):
        return self._handle[column][:]

class SISHDF5Handler(HandlerBase):
Beispiel #31
0
This module is for the necessary packages for the XPCS analysis
"""
from IPython.core.magics.display import Javascript
from skbeam.core.utils import multi_tau_lags
from skimage.draw import line_aa, line, polygon, ellipse, circle
from modest_image import imshow
# edit handlers here to switch to PIMS or dask
# this does the databroker import
#from chxtools.handlers import EigerHandler
from eiger_io.fs_handler import EigerHandler
#from databroker import DataBroker as db, get_images, get_table, get_events, get_fields
from databroker import get_images, get_table, get_events, get_fields
from databroker.assets.path_only_handlers import RawHandler
## Import all the required packages for  Data Analysis
from databroker import Broker
db = Broker.named('chx')
#* scikit-beam - data analysis tools for X-ray science
#    - https://github.com/scikit-beam/scikit-beam
#* xray-vision - plotting helper functions for X-ray science
#    - https://github.com/Nikea/xray-vision
import xray_vision
import matplotlib.cm as mcm
import copy
import xray_vision.mpl_plotting as mpl_plot
from xray_vision.mpl_plotting import speckle
from xray_vision.mask.manual_mask import ManualMask
import skbeam.core.roi as roi
import skbeam.core.correlation as corr
import skbeam.core.utils as utils
import numpy as np
from datetime import datetime
Beispiel #32
0
# import nslsii

# Register bluesky IPython magics.
#from bluesky.magics import BlueskyMagics
#get_ipython().register_magics(BlueskyMagics)

#nslsii.configure_base(get_ipython().user_ns, 'amx')
import bluesky.plans as bp

from bluesky.run_engine import RunEngine
from bluesky.utils import get_history
RE = RunEngine(get_history())
beamline = os.environ["BEAMLINE_ID"]
from databroker import Broker
db = Broker.named(beamline)

RE.subscribe(db.insert)

# from bluesky.callbacks.best_effort import BestEffortCallback
# bec = BestEffortCallback()
# RE.subscribe(bec)


# convenience imports
# from ophyd.commands import *
from bluesky.callbacks import *
# from bluesky.spec_api import *
# from bluesky.global_state import gs, abort, stop, resume
# from databroker import (DataBroker as db, get_events, get_images,
#                                                 get_table, get_fields, restream, process)
Beispiel #33
0
    def run(self):
        """
        Overrides the `run()` function of the `multiprocessing.Process` class. Called
        by the `start` method.
        """
        logging.basicConfig(level=max(logging.WARNING, self._log_level))
        logging.getLogger(__name__).setLevel(self._log_level)

        success = True

        from .profile_tools import set_re_worker_active, clear_re_worker_active

        # Set the environment variable indicating that RE Worker is active. Status may be
        #   checked using 'is_re_worker_active()' in startup scripts or modules.
        set_re_worker_active()

        from .plan_monitoring import RunList, CallbackRegisterRun

        self._active_run_list = RunList(
        )  # Initialization should be done before communication is enabled.

        self._comm_to_manager.add_method(self._request_state_handler,
                                         "request_state")
        self._comm_to_manager.add_method(self._request_plan_report_handler,
                                         "request_plan_report")
        self._comm_to_manager.add_method(self._request_run_list_handler,
                                         "request_run_list")
        self._comm_to_manager.add_method(self._command_close_env_handler,
                                         "command_close_env")
        self._comm_to_manager.add_method(self._command_confirm_exit_handler,
                                         "command_confirm_exit")
        self._comm_to_manager.add_method(self._command_run_plan_handler,
                                         "command_run_plan")
        self._comm_to_manager.add_method(self._command_pause_plan_handler,
                                         "command_pause_plan")
        self._comm_to_manager.add_method(self._command_continue_plan_handler,
                                         "command_continue_plan")
        self._comm_to_manager.add_method(self._command_reset_worker_handler,
                                         "command_reset_worker")
        self._comm_to_manager.start()

        self._exit_event = threading.Event()
        self._exit_confirmed_event = threading.Event()
        self._re_report_lock = threading.Lock()

        from bluesky import RunEngine
        from bluesky.run_engine import get_bluesky_event_loop
        from bluesky.callbacks.best_effort import BestEffortCallback
        from bluesky_kafka import Publisher as kafkaPublisher
        from bluesky.utils import PersistentDict

        from .profile_tools import global_user_namespace

        # TODO: TC - Do you think that the following code may be included in RE.__init__()
        #   (for Python 3.8 and above)
        # Setting the default event loop is needed to make the code work with Python 3.8.
        loop = get_bluesky_event_loop()
        asyncio.set_event_loop(loop)

        try:
            keep_re = self._config_dict["keep_re"]
            startup_dir = self._config_dict.get("startup_dir", None)
            startup_module_name = self._config_dict.get(
                "startup_module_name", None)
            startup_script_path = self._config_dict.get(
                "startup_script_path", None)

            self._re_namespace = load_worker_startup_code(
                startup_dir=startup_dir,
                startup_module_name=startup_module_name,
                startup_script_path=startup_script_path,
                keep_re=keep_re,
            )

            if keep_re and ("RE" not in self._re_namespace):
                raise RuntimeError(
                    "Run Engine is not created in the startup code and 'keep_re' option is activated."
                )
            self._existing_plans = plans_from_nspace(self._re_namespace)
            self._existing_devices = devices_from_nspace(self._re_namespace)
            logger.info("Startup code loading was completed")

        except Exception as ex:
            logger.exception(
                "Failed to start RE Worker environment. Error while loading startup code: %s.",
                str(ex),
            )
            success = False

        # Load lists of allowed plans and devices
        logger.info("Loading the lists of allowed plans and devices ...")
        path_pd = self._config_dict["existing_plans_and_devices_path"]
        path_ug = self._config_dict["user_group_permissions_path"]
        try:
            self._allowed_plans, self._allowed_devices = load_allowed_plans_and_devices(
                path_existing_plans_and_devices=path_pd,
                path_user_group_permissions=path_ug)
        except Exception as ex:
            logger.exception(
                "Error occurred while loading lists of allowed plans and devices from '%s': %s",
                path_pd, str(ex))

        if success:
            logger.info("Instantiating and configuring Run Engine ...")

            try:
                # Make RE namespace available to the plan code.
                global_user_namespace.set_user_namespace(
                    user_ns=self._re_namespace, use_ipython=False)

                if self._config_dict["keep_re"]:
                    # Copy references from the namespace
                    self._RE = self._re_namespace["RE"]
                    self._db = self._re_namespace.get("RE", None)
                else:
                    # Instantiate a new Run Engine and Data Broker (if needed)
                    md = {}
                    if self._config_dict["use_persistent_metadata"]:
                        # This code is temporarily copied from 'nslsii' before better solution for keeping
                        #   continuous sequence Run ID is found. TODO: continuous sequence of Run IDs.
                        directory = os.path.expanduser("~/.config/bluesky/md")
                        os.makedirs(directory, exist_ok=True)
                        md = PersistentDict(directory)

                    self._RE = RunEngine(md)
                    self._re_namespace["RE"] = self._RE

                    def factory(name, doc):
                        # Documents from each run are routed to an independent
                        #   instance of BestEffortCallback
                        bec = BestEffortCallback()
                        return [bec], []

                    # Subscribe to Best Effort Callback in the way that works with multi-run plans.
                    rr = RunRouter([factory])
                    self._RE.subscribe(rr)

                    # Subscribe RE to databroker if config file name is provided
                    self._db = None
                    if "databroker" in self._config_dict:
                        config_name = self._config_dict["databroker"].get(
                            "config", None)
                        if config_name:
                            logger.info(
                                "Subscribing RE to Data Broker using configuration '%s'.",
                                config_name)
                            from databroker import Broker

                            self._db = Broker.named(config_name)
                            self._re_namespace["db"] = self._db

                            self._RE.subscribe(self._db.insert)

                # Subscribe Run Engine to 'CallbackRegisterRun'. This callback is used internally
                #   by the worker process to keep track of the runs that are open and closed.
                run_reg_cb = CallbackRegisterRun(
                    run_list=self._active_run_list)
                self._RE.subscribe(run_reg_cb)

                if "kafka" in self._config_dict:
                    logger.info(
                        "Subscribing to Kafka: topic '%s', servers '%s'",
                        self._config_dict["kafka"]["topic"],
                        self._config_dict["kafka"]["bootstrap"],
                    )
                    kafka_publisher = kafkaPublisher(
                        topic=self._config_dict["kafka"]["topic"],
                        bootstrap_servers=self._config_dict["kafka"]
                        ["bootstrap"],
                        key="kafka-unit-test-key",
                        # work with a single broker
                        producer_config={
                            "acks": 1,
                            "enable.idempotence": False,
                            "request.timeout.ms": 5000
                        },
                        serializer=partial(msgpack.dumps, default=mpn.encode),
                    )
                    self._RE.subscribe(kafka_publisher)

                if "zmq_data_proxy_addr" in self._config_dict:
                    from bluesky.callbacks.zmq import Publisher

                    publisher = Publisher(
                        self._config_dict["zmq_data_proxy_addr"])
                    self._RE.subscribe(publisher)

                self._execution_queue = queue.Queue()

                self._state["environment_state"] = "ready"

            except BaseException as ex:
                success = False
                logger.exception(
                    "Error occurred while initializing the environment: %s.",
                    str(ex))

        if success:
            logger.info("RE Environment is ready")
            self._execute_in_main_thread()
        else:
            self._exit_event.set()

        logger.info("Environment is waiting to be closed ...")
        self._state["environment_state"] = "closing"

        # Wait until confirmation is received from RE Manager
        while not self._exit_confirmed_event.is_set():
            ttime.sleep(0.02)

        # Clear the environment variable indicating that RE Worker is active. It is an optional step
        #   since the process is about to close, but we still do it for consistency.
        clear_re_worker_active()

        self._RE = None

        self._comm_to_manager.stop()

        logger.info("Run Engine environment was closed successfully")