Esempio n. 1
0
def daq(RE, sim):
    if sys.platform == 'win32':
        pytest.skip('Cannot make DAQ on windows')
    sim_pydaq.conn_err = None
    daq_module.BEGIN_THROTTLE = 0
    daq = Daq(RE=RE)
    yield daq
    try:
        # Sim daq can freeze pytest's exit if we don't end the run
        daq.end_run()
    except Exception:
        pass
Esempio n. 2
0
def test_serp_scan():
    """Note: run this standalone, not inside mfx hutch python."""
    import numpy as np
    from bluesky import RunEngine
    from bluesky.callbacks.best_effort import BestEffortCallback
    from ophyd.sim import motor1, motor2
    from ophyd.status import StatusBase
    from pcdsdaq.daq import Daq
    from pcdsdaq.sim import set_sim_mode

    class FakeSeq:
        def trigger(self):
            print('Triggered the sequencer!')
            status = StatusBase()
            status.set_finished()
            return status

    set_sim_mode(True)
    RE = RunEngine({})
    bec = BestEffortCallback()
    RE.subscribe(bec)
    seq = FakeSeq()
    daq = Daq(RE=RE)

    RE(serp_seq_scan(motor1, np.arange(100, 200, 10), motor2, [0, 100], seq))
Esempio n. 3
0
def get_daq_objs(platform, RE):
    """
    Create an instance of ``Daq``.

    This makes sure that the ``Daq`` object is set up to connect to a
    hutch's daq, and that it is ready to use in scans with ``RE``.

    Parameters
    ----------
    platform: ``int``
        The daq platform variable associated with the hutch's daq.

    RE: ``RunEngine``
        The session's ``RE`` object

    Returns
    -------
    objs: ``dict``
        A dictionary that contains a single key, ``daq``, and a ready instance
        of the ``Daq`` class.
    """
    daq = Daq(platform=platform, RE=RE)
    return dict(daq=daq)
Esempio n. 4
0
def daq(RE):
    set_sim_mode(True)
    yield Daq(RE=RE, hutch_name='tst')
    set_sim_mode(False)
Esempio n. 5
0
with safe_load('long pulse waveplates'):
    from pcdsdevices import epics_motor
    wp_ab = epics_motor.IMS('MEC:NS1:MMS:02', name='waveplate AB')
    wp_ef = epics_motor.IMS('MEC:NS1:MMS:01', name='waveplate EF')
    wp_gh = epics_motor.Newport('MEC:LAS:MMN:30', name='waveplate GH')
    wp_ij = epics_motor.Newport('MEC:LAS:MMN:29', name='waveplate IJ')

with safe_load('testmotors'):
    from pcdsdevices import epics_motor
    test_1 = epics_motor.Newport('MEC:PPL:MMN:23', name='test 1')
    test_2 = epics_motor.Newport('MEC:PPL:MMN:24', name='test 2')

with safe_load('daq'):
    from pcdsdaq.daq import Daq
    from mec.db import RE
    daq = Daq(RE=RE)
#with safe_load('Nanosecond laser'):
#    from .laser import NanoSecondLaser
#    nsl = NanoSecondLaser()

with safe_load('SPL Modes'):
    from .spl_modes import DG645
    from .spl_modes import UniblitzEVRCH

    las_dg = DG645('MEC:LAS:DDG:08', name='uniblitz dg')
    las_evr = UniblitzEVRCH('LAS:MEC:EVR:03:TRIG2', name='uniblitz las evr')
    uni_evr = UniblitzEVRCH('EVR:MEC:USR01:TRIG5', name='uniblitz usr evr')

    wt = 0.5  # sleep time between commands

    def spl_align():
Esempio n. 6
0
def load_conf(conf, hutch_dir=None):
    """
    Step through the object loading procedure, given a configuration.

    The procedure is:

    - Check the configuration for errors
    - Display the banner by calling `hutch_banner`
    - Use ``hutch`` key to create ``hutch.db`` importable namespace to
      stash the objects. This will be literally ``hutch.db`` if hutch is
      not provided, or the hutch name e.g. ``mfx.db``.
    - Create a ``RunEngine``, ``RE``
    - Import ``plan_defaults`` and include as ``p``, ``plans``
    - Create a ``daq`` object with ``RE`` registered.
    - Create a ``scan_pvs`` object, and leave it ``disabled``.
    - Use ``hutch`` and ``daq_platform`` keys to create the ``elog`` object
      and configure it to match the correct experiment.
    - Use ``db`` key to load devices from the ``happi`` beamline database
      and create a ``hutch_beampath`` object from ``lightpath``
    - Use ``hutch`` key to load detector objects from the ``camviewer``
      configuration file.
    - Use ``experiment`` key to select the current experiment

        - If ``experiment`` was missing, autoselect experiment using
          ``hutch`` key

    - Use current experiment to load experiment objects from questionnaire
    - Use ``load`` key to bring up the user's ``beamline`` modules
    - Use current experiment to load experiment file

    If a conf key is missing, we'll note it in a ``logger.info`` message.
    If an extra conf entry is found, we'll note it in a ``logger.warning``
    message.
    If an automatically selected file is missing, we'll note it in a
    ``logger.info`` message.
    All other errors will be noted in a logger.error message.

    Parameters
    ----------
    conf: ``dict``
        ``dict`` interpretation of the original yaml file

    hutch_dir: ``Path`` or ``str``, optional
        ``Path`` object that points to the hutch's launch directory. This is
        the directory that includes the ``experiments`` directory and a
        hutchname directory e.g. ``mfx``
        If this is missing, we'll be unable to write the ``db.txt`` file,
        do relative filepath database selection for ``happi``,
        or establish a preset positions directory.

    Returns
    ------
    objs: ``dict{str: object}``
        See the return value of `load`
    """
    # Warn user about excess config entries
    for key in conf:
        if key not in VALID_KEYS:
            txt = ('Found %s in configuration, but this is not a valid key. '
                   'The valid keys are %s')
            logger.warning(txt, key, VALID_KEYS)

    # Grab configurations from dict, set defaults, show missing
    try:
        hutch = conf['hutch']
        if isinstance(hutch, str):
            hutch = hutch.lower()
        else:
            logger.error('Invalid hutch conf %s, must be string.', hutch)
            hutch = None
    except KeyError:
        hutch = None
        logger.info(('Missing hutch from conf. Will skip elog '
                     'and cameras.'))

    # Display the banner
    if hutch is None:
        hutch_banner()
    else:
        hutch_banner(hutch)

    try:
        db = conf['db']
        if isinstance(db, str):
            if db[0] == '/':
                db = Path(db)
            else:
                db = Path(hutch_dir) / db
        else:
            logger.error('Invalid db conf %s, must be string.', db)
            db = None
    except KeyError:
        db = None
        logger.info(('Missing db from conf. Will skip loading from shared '
                     'database.'))
    try:
        load = conf['load']
        if not isinstance(load, (str, list)):
            logger.error('Invalid load conf %s, must be string or list', load)
            load = None
    except KeyError:
        load = None
        logger.info('Missing load from conf. Will skip loading hutch files.')

    try:
        experiment = conf['experiment']
        if not isinstance(experiment, str):
            logger.error(
                'Invalid experiment selection %s, must be a string '
                'matching the elog experiment name.', experiment)
            experiment = None
    except KeyError:
        experiment = None
        if hutch is None:
            logger.info(('Missing hutch and experiment from conf. Will not '
                         'load objects from questionnaire or experiment '
                         'file.'))

    try:
        # This is an internal variable here for note-keeping. The ELog uses
        # this to determine if we are in the secondary or primary DAQ mode
        default_platform = True
        platform_info = conf['daq_platform']
        hostname = gethostname()
        try:
            daq_platform = platform_info[hostname]
            logger.info('Selected %s daq platform: %s', hostname, daq_platform)
            default_platform = False
        except KeyError:
            daq_platform = platform_info['default']
            logger.info('Selected default %s daq platform: %s', hutch,
                        daq_platform)
    except KeyError:
        daq_platform = 0
        logger.info('Selected default hutch-python daq platform: 0')

    # Make cache namespace
    cache = LoadCache((hutch or 'hutch') + '.db', hutch_dir=hutch_dir)

    # Make RunEngine
    RE = RunEngine({})
    initialize_qt_teleporter()
    bec = BestEffortCallback()
    RE.subscribe(bec)
    cache(RE=RE)

    # Collect Plans
    cache(bp=plan_defaults.plans)
    cache(bps=plan_defaults.plan_stubs)
    cache(bpp=plan_defaults.preprocessors)

    # Daq
    with safe_load('daq'):
        cache(daq=Daq(RE=RE, hutch_name=hutch))

    # Scan PVs
    if hutch is not None:
        with safe_load('scan_pvs'):
            scan_pvs = ScanVars('{}:SCAN'.format(hutch.upper()),
                                name='scan_pvs',
                                RE=RE)
            scan_pvs.enable()
            cache(scan_pvs=scan_pvs)

    # Elog
    if hutch is not None:
        with safe_load('elog'):
            # Use the fact if we we used the default_platform or not to decide
            # whether we are in a specialty station or not
            if default_platform:
                logger.debug("Using primary experiment ELog")
                kwargs = dict()
            else:
                logger.info("Configuring ELog to post to secondary experiment")
                kwargs = {'station': '1'}
            cache(elog=HutchELog.from_conf(hutch.upper(), **kwargs))

    # Shared global devices for LCLS
    with safe_load('lcls PVs'):
        cache(**global_devices())

    # Happi db and Lightpath
    if db is not None:
        with safe_load('database'):
            happi_objs = get_happi_objs(db, hutch)
            cache(**happi_objs)
            bp = get_lightpath(db, hutch)
            if bp.devices:
                cache(**{"{}_beampath".format(hutch.lower()): bp})

    # ArchApp
    with safe_load('archapp'):
        cache(archive=EpicsArchive())

    # Camviewer
    if hutch is not None:
        with safe_load('camviewer config'):
            objs = read_camviewer_cfg(CAMVIEWER_CFG.format(hutch))
            cache(camviewer=SimpleNamespace(**objs))

    # Simulated hardware
    with safe_load('simulated hardware'):
        cache(sim=sim.get_hw())

    # Auto select experiment if we need to
    if experiment is None:
        if hutch is not None:
            try:
                # xpplp1216
                experiment = get_current_experiment(hutch)
                logger.info('Selected active experiment %s', experiment)
            except Exception:
                err = 'Failed to select experiment automatically'
                logger.error(err)
                logger.debug(err, exc_info=True)

    # Process experiment name a bit
    if experiment is not None:
        if hutch in experiment:
            full_expname = experiment
            raw_expname = experiment.replace(hutch, '', 1)
        else:
            full_expname = hutch + experiment
            raw_expname = experiment

    # Load questionnaire
    if experiment is not None:
        qs_objs = get_qs_objs(full_expname)
        cache(**qs_objs)

    # Load user/beamline files
    if load is not None:
        load_objs = get_user_objs(load)
        cache(**load_objs)

    # Load experiment file
    if experiment is not None:
        user = get_exp_objs(raw_expname)
        for name, obj in qs_objs.items():
            setattr(user, name, obj)
        cache(x=user, user=user)

    # Default namespaces
    with safe_load('default groups'):
        default_class_namespace('ophyd.PositionerBase', 'motors', cache)
        default_class_namespace('Slits', 'slits', cache)
        default_class_namespace('pcdsdaq.ami.AmiDet', 'detectors', cache)

        # Hotfix/disabled until we fix issues here
        # Tree namespace can cause havoc and break top-level devices
        #
        # if hutch is not None:
        #     tree = tree_namespace(scope='hutch_python.db')
        #     # Prune meta, remove branches with only one object
        #     for name, space in tree.__dict__.items():
        #         if count_ns_leaves(space) > 1:
        #             cache(**{name: space})

        all_objs = copy(cache.objs)
        cache(a=all_objs, all_objects=all_objs)

    # Install Presets
    if hutch_dir is not None:
        with safe_load('position presets'):
            presets_dir = Path(hutch_dir) / 'presets'
            beamline_presets = presets_dir / 'beamline'
            preset_paths = [presets_dir, beamline_presets]
            if experiment is not None:
                experiment_presets = presets_dir / raw_expname
                preset_paths.append(experiment_presets)
            for path in preset_paths:
                if not path.exists():
                    path.mkdir()
                    path.chmod(0o777)
            if experiment is None:
                setup_preset_paths(hutch=beamline_presets)
            else:
                setup_preset_paths(hutch=beamline_presets,
                                   exp=experiment_presets)

    # Write db.txt info file to the user's module
    try:
        cache.write_file()
    except OSError:
        logger.warning('No permissions to write db.txt file')

    return cache.objs.__dict__
Esempio n. 7
0
class User(object):
    """User class for the Wall LR15 experiment. 
    
    To anyone thinking of using this, none of the functionality has been tested 
    with sxrpython. The plans were ported for organizational reasons, not
    functionality, therefore any attempts to use this should be done with 
    caution.
    """
    # Devices
    vitara = Vitara("LAS:FS2:VIT", name="Vitara")
    delay = Newport("SXR:LAS:H1:DLS:01", name="Delay Stage")
    syn_motor_1 = SynAxis(name="Syn Motor 1")
    daq = Daq(platform=0)

    def __init__(self, *args, **kwargs):
        # If this is ever tested, remove the following line and update the
        # docstring
        logger.warning("Functionality not tested with sxrpython, use with "
                       "caution!")

    def delay_scan(self,
                   start,
                   stop,
                   num=None,
                   step_size=None,
                   events_per_point=1000,
                   record=True,
                   controls=None,
                   wait=None,
                   return_to_start=True,
                   delay_const=1):
        """
        Perform a scan using the AMO delay stage and SXR vitara timing system.

        For this function to interface with the DAQ properly, it must be run from
        the same machine the DAQ session is running from (usually sxr-daq). Also,
        the DAQ must be allocated before the function is run.

        Parameters
        ----------
        start : float
            Starting delay for the scan in ns.

        stop : float
            Stopping delay for the scan in ns.

        num : int
            Number of steps to take, including the endpoints.

        step_size : float
            Step size to use for the scan.

        events_per_point : int, optional
            Number of daq events to take at each step of the scan.

        record : bool, optional
            Record the data as a DAQ run.

        controls : dict, optional
            Dictionary containing the EPICS pvs to record in the DAQ. Has the form:
            {"motor_name" : motor.position}

        wait : int, optional
            The amount of time to wait at each step.

        return_to_start : bool, optional
            Return the vitara and the delay stage to their starting positions.

        delay_const : float, optional
            Scale the delay move delta by this amount.

        Raises
        ------
        InputError
            If neither the number of the steps or the step_size is provided.

        ValueError
            If the step_size provided does not yield an whole number of steps.
        """
        # Check to make sure a number of steps or step size is provided
        if num is None and step_size is None:
            raise InputError("Must specify either the number of steps to take "
                             "or the step size to use for the scan.")
        # Check that the step size is valid
        elif num is None and step_size is not None:
            num = (stop - start + 1) / step_size
            if num % 1:
                raise ValueError(
                    "Step size '{0}' does not produce an integer "
                    "number of steps for starting delay '{1}' and "
                    "stopping delay '{2}'".format(step_size, start, stop))
        yield from _delay_scan(self.daq,
                               self.vitara,
                               self.delay,
                               start,
                               stop,
                               num,
                               controls=controls,
                               return_to_start=return_to_start,
                               record=record,
                               wait=wait,
                               events_per_point=events_per_point,
                               delay_const=delay_const)

    def delay_scan_rel(self, start_rel, stop_rel, *args, **kwargs):
        """
        Performs a scan relative to the current delay of the system. See 
        `delay_scan` for full documentation on other parameters.

        Parameters
        ----------
        start_rel : float
            Relative starting delay for the scan in ns.

        stop_rel : float
            Relative stopping delay for the scan in ns.
        """
        pos = self.vitara.position
        yield from self.delay_scan(pos + start_rel, pos + stop_rel, *args,
                                   **kwargs)
Esempio n. 8
0
def daq(RE):
    set_sim_mode(True)
    sim_pydaq.conn_err = None
    return Daq(RE=RE, platform=0)