Esempio n. 1
0
from bluesky import RunEngine
from bluesky.callbacks.best_effort import BestEffortCallback
from bluesky.utils import install_kicker

bec = BestEffortCallback()

RE = RunEngine(bec)

install_kicker()
Esempio n. 2
0
def load_conf(conf, hutch_dir=None):
    """
    Step through the object loading procedure, given a configuration.

    The procedure is:

    - Check the configuration for errors
    - Display the banner by calling `hutch_banner`
    - Use ``hutch`` key to create ``hutch.db`` importable namespace to
      stash the objects. This will be literally ``hutch.db`` if hutch is
      not provided, or the hutch name e.g. ``mfx.db``.
    - Create a ``RunEngine``, ``RE``
    - Import ``plan_defaults`` and include as ``p``, ``plans``
    - Create a ``daq`` object with ``RE`` registered, using ``daq_platform``
      to define the ``platform`` argument if provided. The default value if
      ``daq_platform`` was not defined is 0.
    - Use ``db`` key to load devices from the ``happi`` beamline database
      and create a ``hutch_beampath`` object from ``lightpath``
    - Use ``load`` key to bring up the user's ``beamline`` modules
    - Use ``experiment`` key to select the current experiment

        - If ``experiment`` was missing, autoselect experiment using
          ``hutch`` key

    - Use current experiment to load experiment objects from questionnaire
    - Use current experiment to load experiment file

    If a conf key is missing, we'll note it in a ``logger.info`` message.
    If an extra conf entry is found, we'll note it in a ``logger.warning``
    message.
    If an automatically selected file is missing, we'll note it in a
    ``logger.info`` message.
    All other errors will be noted in a logger.error message.

    Parameters
    ----------
    conf: ``dict``
        ``dict`` interpretation of the original yaml file

    hutch_dir: ``Path`` or ``str``, optional
        ``Path`` object that points to the hutch's launch directory. This is
        the directory that includes the ``experiments`` directory and a
        hutchname directory e.g. ``mfx``
        If this is missing, we'll be unable to write the ``db.txt`` file,
        do relative filepath database selection for ``happi``,
        or establish a preset positions directory.

    Returns
    ------
    objs: ``dict{str: object}``
        See the return value of `load`
    """
    # Warn user about excess config entries
    for key in conf:
        if key not in VALID_KEYS:
            txt = ('Found %s in configuration, but this is not a valid key. '
                   'The valid keys are %s')
            logger.warning(txt, key, VALID_KEYS)

    # Grab configurations from dict, set defaults, show missing
    try:
        hutch = conf['hutch']
        if isinstance(hutch, str):
            hutch = hutch.lower()
        else:
            logger.error('Invalid hutch conf %s, must be string.', hutch)
            hutch = None
    except KeyError:
        hutch = None
        logger.info('Missing hutch from conf. Will skip DAQ.')

    # Display the banner
    if hutch is None:
        hutch_banner()
    else:
        hutch_banner(hutch)

    try:
        db = conf['db']
        if isinstance(db, str):
            if db[0] == '/':
                db = Path(db)
            else:
                db = Path(hutch_dir) / db
        else:
            logger.error('Invalid db conf %s, must be string.', db)
            db = None
    except KeyError:
        db = None
        logger.info(('Missing db from conf. Will skip loading from shared '
                     'database.'))
    try:
        load = conf['load']
        if not isinstance(load, (str, list)):
            logger.error('Invalid load conf %s, must be string or list', load)
            load = None
    except KeyError:
        load = None
        logger.info('Missing load from conf. Will skip loading hutch files.')

    try:
        experiment = conf['experiment']
        if (not isinstance(experiment, dict)
                or 'proposal' not in experiment
                or 'run' not in experiment):
            logger.error(('Invalid experiment selection %s, must be a dict '
                          'with keys "proposal" and "run"'), experiment)
            experiment = None
    except KeyError:
        experiment = None
        if hutch is None:
            logger.info(('Missing hutch and experiment from conf. Will not '
                         'load objects from questionnaire or experiment '
                         'file.'))

    try:
        # This is an internal variable here for note-keeping. The ELog uses
        # this to determine if we are in the secondary or primary DAQ mode
        default_platform = True
        platform_info = conf['daq_platform']
        hostname = gethostname()
        try:
            daq_platform = platform_info[hostname]
            logger.info('Selected %s daq platform: %s',
                        hostname, daq_platform)
            default_platform = False
        except KeyError:
            daq_platform = platform_info['default']
            logger.info('Selected default %s daq platform: %s',
                        hutch, daq_platform)
    except KeyError:
        daq_platform = 0
        logger.info('Selected default hutch-python daq platform: 0')

    # Make cache namespace
    cache = LoadCache((hutch or 'hutch') + '.db', hutch_dir=hutch_dir)

    # Make RunEngine
    RE = RunEngine({})
    bec = BestEffortCallback()
    RE.subscribe(bec)
    cache(RE=RE)
    try:
        install_kicker()
    except RuntimeError:
        # Probably don't have a display if this failed, so nothing to kick
        pass

    # Collect Plans
    cache(plans=plan_defaults)
    cache(p=plan_defaults)

    # Daq
    with safe_load('daq'):
        daq_objs = get_daq_objs(daq_platform, RE)
        cache(**daq_objs)

    # Happi db and Lightpath
    if db is not None:
        with safe_load('database'):
            happi_objs = get_happi_objs(db, hutch)
            cache(**happi_objs)
            bp = get_lightpath(db, hutch)
            cache(**{"{}_beampath".format(hutch.lower()): bp})

    # Elog
    with safe_load('elog'):
        # Use the fact if we we used the default_platform or not to decide
        # whether we are in a specialty station or not
        if default_platform:
            logger.debug("Using primary experiment ELog")
            kwargs = dict()
        else:
            logger.info("Configuring ELog to post to secondary experiment")
            kwargs = {'station': '1'}
        cache(elog=HutchELog.from_conf(hutch.upper(), **kwargs))

    # Load user files
    if load is not None:
        load_objs = get_user_objs(load)
        cache(**load_objs)

    # Auto select experiment if we need to
    proposal = None
    if experiment is None:
        if hutch is not None:
            try:
                # xpplp1216
                expname = get_current_experiment(hutch)
                logger.info('Selected active experiment %s', expname)
                # lp12
                proposal = expname[3:-2]
                # 16
                run = expname[-2:]
            except Exception:
                err = 'Failed to select experiment automatically'
                logger.error(err)
                logger.debug(err, exc_info=True)

    # Experiment objects
    if experiment is not None:
        proposal = experiment['proposal']
        run = experiment['run']

    if proposal is not None:
        qs_objs = get_qs_objs(proposal, run)
        cache(**qs_objs)
        user = get_exp_objs(proposal, run)
        for name, obj in qs_objs.items():
            setattr(user, name, obj)
        cache(x=user, user=user)

    # Default namespaces
    with safe_load('default groups'):
        default_class_namespace('EpicsMotor', 'motors', cache)
        default_class_namespace('Slits', 'slits', cache)
        if hutch is not None:
            tree = tree_namespace(scope='hutch_python.db')
            # Prune meta, remove branches with only one object
            for name, space in tree.__dict__.items():
                if count_ns_leaves(space) > 1:
                    cache(**{name: space})

        all_objs = copy(cache.objs)
        cache(a=all_objs, all_objects=all_objs)

    # Install Presets
    if hutch_dir is not None:
        with safe_load('position presets'):
            presets_dir = Path(hutch_dir) / 'presets'
            beamline_presets = presets_dir / 'beamline'
            preset_paths = [presets_dir, beamline_presets]
            if proposal is not None:
                experiment_presets = presets_dir / (proposal + str(run))
                preset_paths.append(experiment_presets)
            for path in preset_paths:
                if not path.exists():
                    path.mkdir()
                    path.chmod(0o777)
            if proposal is None:
                setup_preset_paths(hutch=beamline_presets)
            else:
                setup_preset_paths(hutch=beamline_presets,
                                   exp=experiment_presets)

    # Write db.txt info file to the user's module
    try:
        cache.write_file()
    except OSError:
        logger.warning('No permissions to write db.txt file')

    return cache.objs.__dict__
Esempio n. 3
0
def configure_base(
    user_ns,
    broker_name,
    *,
    bec=True,
    epics_context=False,
    magics=True,
    mpl=True,
    configure_logging=True,
    pbar=True,
    ipython_exc_logging=True,
):
    """
    Perform base setup and instantiation of important objects.

    This factory function instantiates essential objects to data collection
    environments at NSLS-II and adds them to the current namespace. In some
    cases (documented below), it will check whether certain variables already
    exist in the user name space, and will avoid creating them if so. The
    following are added:

    * ``RE`` -- a RunEngine
        This is created only if an ``RE`` instance does not currently exist in
        the namespace.
    * ``db`` -- a Broker (from "databroker"), subscribe to ``RE``
    * ``bec`` -- a BestEffortCallback, subscribed to ``RE``
    * ``peaks`` -- an alias for ``bec.peaks``
    * ``sd`` -- a SupplementalData preprocessor, added to ``RE.preprocessors``
    * ``pbar_maanger`` -- a ProgressBarManager, set as the ``RE.waiting_hook``

    And it performs some low-level configuration:

    * creates a context in ophyd's control layer (``ophyd.setup_ophyd()``)
    * turns on interactive plotting (``matplotlib.pyplot.ion()``)
    * bridges the RunEngine and Qt event loops
      (``bluesky.utils.install_kicker()``)
    * logs ERROR-level log message from ophyd to the standard out

    Parameters
    ----------
    user_ns: dict
        a namespace --- for example, ``get_ipython().user_ns``
    broker_name : Union[str, Broker]
        Name of databroker configuration or a Broker instance.
    bec : boolean, optional
        True by default. Set False to skip BestEffortCallback.
    epics_context : boolean, optional
        True by default. Set False to skip ``setup_ophyd()``.
    magics : boolean, optional
        True by default. Set False to skip registration of custom IPython
        magics.
    mpl : boolean, optional
        True by default. Set False to skip matplotlib ``ion()`` at event-loop
        bridging.
    configure_logging : boolean, optional
        True by default. Set False to skip INFO-level logging to
        /var/logs/bluesky/bluesky.log.
    pbar : boolean, optional
        True by default. Set false to skip ProgressBarManager.
    ipython_exc_logging : boolean, optional
        True by default. Exception stack traces will be written to IPython
        log file when IPython logging is enabled.

    Returns
    -------
    names : list
        list of names added to the namespace

    Examples
    --------
    Configure IPython for CHX.

    >>>> configure_base(get_ipython().user_ns, 'chx');
    """
    ns = {}  # We will update user_ns with this at the end.
    # Protect against double-subscription.
    SENTINEL = "__nslsii_configure_base_has_been_run"
    if user_ns.get(SENTINEL):
        raise RuntimeError(
            "configure_base should only be called once per process.")
    ns[SENTINEL] = True
    # Set up a RunEngine and use metadata backed by files on disk.
    from bluesky import RunEngine, __version__ as bluesky_version

    if LooseVersion(bluesky_version) >= LooseVersion("1.6.0"):
        # current approach using PersistentDict
        from bluesky.utils import PersistentDict

        directory = os.path.expanduser("~/.config/bluesky/md")
        os.makedirs(directory, exist_ok=True)
        md = PersistentDict(directory)
    else:
        # legacy approach using HistoryDict
        from bluesky.utils import get_history

        md = get_history()
    # if RunEngine already defined grab it
    # useful when users make their own custom RunEngine
    if "RE" in user_ns:
        RE = user_ns["RE"]
    else:
        RE = RunEngine(md)
        ns["RE"] = RE

    # Set up SupplementalData.
    # (This is a no-op until devices are added to it,
    # so there is no need to provide a 'skip_sd' switch.)
    from bluesky import SupplementalData

    sd = SupplementalData()
    RE.preprocessors.append(sd)
    ns["sd"] = sd

    if isinstance(broker_name, str):
        # Set up a Broker.
        from databroker import Broker

        db = Broker.named(broker_name)
        ns["db"] = db
    else:
        db = broker_name

    RE.subscribe(db.insert)

    if pbar:
        # Add a progress bar.
        from bluesky.utils import ProgressBarManager

        pbar_manager = ProgressBarManager()
        RE.waiting_hook = pbar_manager
        ns["pbar_manager"] = pbar_manager

    if magics:
        # Register bluesky IPython magics.
        from bluesky.magics import BlueskyMagics

        get_ipython().register_magics(BlueskyMagics)

    if bec:
        # Set up the BestEffortCallback.
        from bluesky.callbacks.best_effort import BestEffortCallback

        _bec = BestEffortCallback()
        RE.subscribe(_bec)
        ns["bec"] = _bec
        ns["peaks"] = _bec.peaks  # just as alias for less typing

    if mpl:
        # Import matplotlib and put it in interactive mode.
        import matplotlib.pyplot as plt

        ns["plt"] = plt
        plt.ion()

        # Make plots update live while scans run.
        if LooseVersion(bluesky_version) < LooseVersion("1.6.0"):
            from bluesky.utils import install_kicker

            install_kicker()

    if epics_context:
        # Create a context in the underlying EPICS client.
        from ophyd import setup_ophyd

        setup_ophyd()

    if configure_logging:
        configure_bluesky_logging(ipython=get_ipython())

    if ipython_exc_logging:
        # IPython logging must be enabled separately
        from nslsii.common.ipynb.logutils import log_exception

        configure_ipython_exc_logging(exception_logger=log_exception,
                                      ipython=get_ipython())

    # always configure %xmode minimal
    get_ipython().magic("xmode minimal")

    # convenience imports
    # some of the * imports are for 'back-compatibility' of a sort -- we have
    # taught BL staff to expect LiveTable and LivePlot etc. to be in their
    # namespace
    import numpy as np

    ns["np"] = np

    import bluesky.callbacks

    ns["bc"] = bluesky.callbacks
    import_star(bluesky.callbacks, ns)

    import bluesky.plans

    ns["bp"] = bluesky.plans
    import_star(bluesky.plans, ns)

    import bluesky.plan_stubs

    ns["bps"] = bluesky.plan_stubs
    import_star(bluesky.plan_stubs, ns)
    # special-case the commonly-used mv / mvr and its aliases mov / movr4
    ns["mv"] = bluesky.plan_stubs.mv
    ns["mvr"] = bluesky.plan_stubs.mvr
    ns["mov"] = bluesky.plan_stubs.mov
    ns["movr"] = bluesky.plan_stubs.movr

    import bluesky.preprocessors

    ns["bpp"] = bluesky.preprocessors

    import bluesky.callbacks.broker

    import_star(bluesky.callbacks.broker, ns)

    import bluesky.simulators

    import_star(bluesky.simulators, ns)

    user_ns.update(ns)
    return list(ns)
Esempio n. 4
0
        grp = short_uid("set")
        for motor, pos in step.items():
            if pos == pos_cache[motor]:
                # This step does not move this motor.
                continue
            yield from abs_set(motor, pos, group=grp)
            pos_cache[motor] = pos
        yield from wait(group=grp)

    motors = step.keys()
    yield from move()
    plt.pause(.001)
    yield from trigger_and_read(list(detectors) + list(motors))


install_kicker()
p = Publisher(glbl_dict["inbound_proxy_address"])
hw = hw()
import numpy as np

rand_img = SynSignal(
    func=lambda: np.array(np.random.random((10, 10))),
    name="img",
    labels={"detectors"},
)
RE = RunEngine()
# build the pipeline
raw_source = Stream()
raw_output = SimpleFromEventStream(
    "event", ("data", "det_a"), raw_source, principle=True
)
Esempio n. 5
0
def configure_base(user_ns,
                   broker_name,
                   *,
                   bec=True,
                   epics_context=True,
                   magics=True,
                   mpl=True,
                   ophyd_logging=True,
                   pbar=True):
    """
    Perform base setup and instantiation of important objects.

    This factory function instantiates the following and adds them to the
    namespace:

    * ``RE`` -- a RunEngine
    * ``db`` -- a Broker (from "databroker"), subscribe to ``RE``
    * ``bec`` -- a BestEffortCallback, subscribed to ``RE``
    * ``peaks`` -- an alias for ``bec.peaks``
    * ``sd`` -- a SupplementalData preprocessor, added to ``RE.preprocessors``
    * ``pbar_maanger`` -- a ProgressBarManager, set as the ``RE.waiting_hook``

    And it performs some low-level configuration:

    * creates a context in ophyd's control layer (``ophyd.setup_ophyd()``)
    * turns out interactive plotting (``matplotlib.pyplot.ion()``)
    * bridges the RunEngine and Qt event loops
      (``bluesky.utils.install_kicker()``)
    * logs ERROR-level log message from ophyd to the standard out

    Parameters
    ----------
    user_ns: dict
        a namespace --- for example, ``get_ipython().user_ns``
    broker_name : string
        Name of databroker configuration.
    bec : boolean, optional
        True by default. Set False to skip BestEffortCallback.
    epics_context : boolean, optional
        True by default. Set False to skip ``setup_ophyd()``.
    magics : boolean, optional
        True by default. Set False to skip registration of custom IPython
        magics.
    mpl : boolean, optional
        True by default. Set False to skip matplotlib ``ion()`` at event-loop
        bridging.
    ophyd_logging : boolean, optional
        True by default. Set False to skip ERROR-level log configuration for
        ophyd.
    pbar : boolean, optional
        True by default. Set false to skip ProgressBarManager.

    Returns
    -------
    names : list
        list of names added to the namespace
    
    Examples
    --------
    Configure IPython for CHX.

    >>>> configure_base(get_ipython().user_ns, 'chx');
    """
    ns = {}  # We will update user_ns with this at the end.

    # Set up a RunEngine and use metadata backed by a sqlite file.
    from bluesky import RunEngine
    from bluesky.utils import get_history
    RE = RunEngine(get_history())
    ns['RE'] = RE

    # Set up SupplementalData.
    # (This is a no-op until devices are added to it,
    # so there is no need to provide a 'skip_sd' switch.)
    from bluesky import SupplementalData
    sd = SupplementalData()
    RE.preprocessors.append(sd)
    ns['sd'] = sd

    if broker_name:
        # Set up a Broker.
        from databroker import Broker
        db = Broker.named(broker_name)
        ns['db'] = db
        RE.subscribe(db.insert)

    if pbar:
        # Add a progress bar.
        from bluesky.utils import ProgressBarManager
        pbar_manager = ProgressBarManager()
        RE.waiting_hook = pbar_manager
        ns['pbar_manager'] = pbar_manager

    if magics:
        # Register bluesky IPython magics.
        from bluesky.magics import BlueskyMagics
        get_ipython().register_magics(BlueskyMagics)

    if bec:
        # Set up the BestEffortCallback.
        from bluesky.callbacks.best_effort import BestEffortCallback
        _bec = BestEffortCallback()
        RE.subscribe(_bec)
        ns['bec'] = _bec
        ns['peaks'] = _bec.peaks  # just as alias for less typing

    if mpl:
        # Import matplotlib and put it in interactive mode.
        import matplotlib.pyplot as plt
        ns['plt'] = plt
        plt.ion()

        # Make plots update live while scans run.
        from bluesky.utils import install_kicker
        install_kicker()

    if epics_context:
        # Create a context in the underlying EPICS client.
        from ophyd import setup_ophyd
        setup_ophyd()

    if not ophyd_logging:
        # Turn on error-level logging, particularly useful for knowing when
        # pyepics callbacks fail.
        import logging
        import ophyd.ophydobj
        ch = logging.StreamHandler()
        ch.setLevel(logging.ERROR)
        ophyd.ophydobj.logger.addHandler(ch)

    # convenience imports
    # some of the * imports are for 'back-compatibility' of a sort -- we have
    # taught BL staff to expect LiveTable and LivePlot etc. to be in their
    # namespace
    import numpy as np
    ns['np'] = np

    import bluesky.callbacks
    ns['bc'] = bluesky.callbacks
    import_star(bluesky.callbacks, ns)

    import bluesky.plans
    ns['bp'] = bluesky.plans
    import_star(bluesky.plans, ns)

    import bluesky.plan_stubs
    ns['bps'] = bluesky.plan_stubs
    import_star(bluesky.plan_stubs, ns)
    # special-case the commonly-used mv / mvr and its aliases mov / movr4
    ns['mv'] = bluesky.plan_stubs.mv
    ns['mvr'] = bluesky.plan_stubs.mvr
    ns['mov'] = bluesky.plan_stubs.mov
    ns['movr'] = bluesky.plan_stubs.movr

    import bluesky.preprocessors
    ns['bpp'] = bluesky.preprocessors

    import bluesky.callbacks.broker
    import_star(bluesky.callbacks.broker, ns)

    import bluesky.simulators
    import_star(bluesky.simulators, ns)

    user_ns.update(ns)
    return list(ns)