Пример #1
0
def run_publisher(in_port, data_path, quiet=False):
    """
    Acquire data in an infinite loop and publish it.
    """
    publisher = Publisher(f"localhost:{in_port}")
    RE = RunEngine(loop=asyncio.new_event_loop())
    sd = SupplementalData()
    RE.preprocessors.append(sd)
    sd.baseline.extend([motor1, motor2])
    RE.subscribe(publisher)

    def factory(name, doc):
        serializer = Serializer(data_path / "abc", flush=True)
        return [serializer], []

    rr = RunRouter([factory])
    RE.subscribe(rr)
    if not quiet:
        RE.subscribe(LiveTable(["motor", "det"]))

    motor.delay = 0.2
    det.kind = "hinted"

    def infinite_plan():
        while True:
            for i in range(1, 5):
                yield from sleep(2)
                yield from scan([det], motor, -1, 1, 5 * i)

    # Just as a convenience, avoid collission with scan_ids of runs in Catalog.
    RE.md["scan_id"] = 100
    try:
        RE(infinite_plan())
    finally:
        RE.halt()
Пример #2
0
def run_publisher(in_port, data_path):
    """
    Acquire data in an infinite loop and publish it.
    """
    import asyncio
    from bluesky.callbacks.zmq import Publisher
    from suitcase.jsonl import Serializer
    from ophyd.sim import noisy_det, motor1, motor2
    from bluesky.plans import count
    from bluesky.preprocessors import SupplementalData
    from bluesky.plan_stubs import sleep
    publisher = Publisher(f'localhost:{in_port}')
    RE = RunEngine(loop=asyncio.new_event_loop())
    sd = SupplementalData()
    RE.preprocessors.append(sd)
    sd.baseline.extend([motor1, motor2])
    RE.subscribe(publisher)

    def factory(name, doc):
        serializer = Serializer(data_path / 'abc')
        serializer('start', doc)
        return [serializer], []

    rr = RunRouter([factory])
    RE.subscribe(rr)

    def infinite_plan():
        while True:
            yield from sleep(3)
            yield from count([noisy_det], 20, delay=0.5)

    try:
        RE(infinite_plan())
    finally:
        RE.halt()
Пример #3
0
 def run_plans(self):
     RE = RunEngine({})
     RE.log.setLevel(logging.INFO)
 
     publisher = Publisher('localhost:5567')
     RE.subscribe(publisher)
     
     for plan in self.PLANS:
         print('Starting Scan...')
         RE(plan)
         print('Scan Done...')
Пример #4
0
 def __init__(self, config: XPDConfig):
     self.config = config
     self.analysis = AnalysisStream(config)
     self.func = self.config.functionality
     if self.func["do_calibration"]:
         self.calibration = Calibration(config)
     if self.func["dump_to_db"]:
         self.analysis.subscribe(self.config.an_db.v1.insert)
     if self.func["export_files"]:
         self.analysis.subscribe(Exporter(config))
     if self.func["visualize_data"]:
         self.analysis.subscribe(Visualizer(config))
     if self.func["send_messages"]:
         self.analysis.subscribe(Publisher(**self.config.publisher_config))
def main():
    from bluesky.callbacks.zmq import Publisher, RemoteDispatcher

    parser = argparse.ArgumentParser(
        description=
        "Listen for unfilled documents over 0MQ and emit filled ones.")
    parser.add_argument(
        "receive_from",
        type=str,
        help="bluesky-0MQ-proxy out address, given as in localhost:5578",
    )
    parser.add_argument(
        "send_to",
        type=str,
        help="bluesky-0MQ-proxy in address, given as in localhost:5578",
    )
    args = parser.parse_args()

    # Data flows through:
    # * RemoteDispatcher (0MQ)
    # * Accumulator (caches until stop doc is received)
    # * EmittingFiller (fills external data)
    # * Publisher (0MQ)

    publisher = Publisher(args.send_to)

    handler_registry = discover_handlers()

    def factory(name, doc):
        filler = EmittingFiller(handler_registry,
                                inplace=False,
                                callback=publisher,
                                coerce="force_numpy")
        accumulator = Accumulator(callback=filler)
        return [accumulator], []

    rr = RunRouter([factory])
    rd = RemoteDispatcher(args.receive_from)
    rd.subscribe(rr)

    print(f"Listening to {args.receive_from}")

    try:
        rd.start()  # runs forever
    except KeyboardInterrupt:
        print("Terminated by user; exiting")
Пример #6
0
 def __init__(self, config: XPDConfig):
     self.config = config
     self.functionality = self.config.functionality
     self.analysis = [AnalysisStream(config)]
     self.calibration = [Calibration(config)
                         ] if self.functionality["do_calibration"] else []
     if self.functionality["dump_to_db"]:
         self.analysis[0].subscribe(self.config.an_db.v1.insert)
     if self.functionality["export_files"]:
         self.analysis[0].subscribe(Exporter(config))
     if self.functionality["visualize_data"]:
         self.analysis[0].subscribe(Visualizer(config))
     if self.functionality["send_messages"]:
         self.analysis[0].subscribe(
             Publisher(**self.config.publisher_config))
     if self.functionality["export_files_in_xpdan_style"]:
         self.analysis[0].subscribe(ExporterXpdan(config))
Пример #7
0
import logging

from nslsii import configure_base
from IPython import get_ipython
from bluesky.callbacks.zmq import Publisher
import functools
from ophyd.signal import EpicsSignal, EpicsSignalRO

EpicsSignal.set_defaults(connection_timeout=10)
EpicsSignalRO.set_defaults(connection_timeout=10)

configure_base(get_ipython().user_ns, "jpls")

publisher = Publisher("xf12id1-ws2:5577")
RE.subscribe(publisher)

# Optional: set any metadata that rarely changes.
RE.md["beamline_id"] = "OPLS"

# For debug mode
from bluesky.utils import ts_msg_hook
# RE.msg_hook = ts_msg_hook

# THIS NEEDS TO MOVE UPSTREAM
async def reset_user_position(msg):
    obj = msg.obj
    (val,) = msg.args

    old_value = obj.position
    obj.set_current_position(val)
    print(f"{obj.name} reset from {old_value:.4f} to {val:.4f}")
Пример #8
0
from databroker.assets.handlers import AreaDetectorHDF5TimestampHandler
import pandas as pd

EPICS_EPOCH = datetime(1990, 1, 1, 0, 0)


def convert_AD_timestamps(ts):
    return pd.to_datetime(ts, unit="s", origin=EPICS_EPOCH,
                          utc=True).dt.tz_convert("US/Eastern")


# subscribe the zmq plotter

from bluesky.callbacks.zmq import Publisher

publisher = Publisher("xf18id-srv1:5577")
RE.subscribe(publisher)

# nslsii.configure_base(get_ipython().user_ns, 'fxi', bec=False)
"""
def ts_msg_hook(msg):
    t = '{:%H:%M:%S.%f}'.format(datetime.now())
    msg_fmt = '{: <17s} -> {!s: <15s} args: {}, kwargs: {}'.format(
        msg.command,
        msg.obj.name if hasattr(msg.obj, 'name') else msg.obj,
        msg.args,
        msg.kwargs)
    print('{} {}'.format(t, msg_fmt))

RE.msg_hook = ts_msg_hook
"""
Пример #9
0
logger = logging.getLogger('iss_processor')

from databroker import Broker
from ophyd.sim import NumpySeqHandler

processed = Broker.named('temp')  # makes a second, unique temporary Broker

# PROCESSING

import sys
import xas.interpolate
import xas.xray
from event_model import Filler, compose_run, DocumentRouter, pack_event_page, verify_filled
from bluesky.callbacks.zmq import RemoteDispatcher, Publisher

publisher = Publisher('localhost:5577', prefix=b'interpolated')

# HANDLERS COPIED FROM PROFILE -- MOVE THESE TO CENTRAL PLACE!!


# New handlers to support reading files into a Pandas dataframe
class PizzaBoxAnHandlerTxtPD:
    "Read PizzaBox text files using info from filestore."

    def __init__(self, fpath):
        self.df = pd.read_table(fpath,
                                names=['ts_s', 'ts_ns', 'index', 'adc'],
                                sep=' ')

    def __call__(self):
        return self.df
Пример #10
0
def test_zmq(fresh_RE):
    # COMPONENT 1
    # Run a 0MQ proxy on a separate process.
    def start_proxy():
        Proxy(5567, 5568).start()

    proxy_proc = multiprocessing.Process(target=start_proxy, daemon=True)
    proxy_proc.start()
    time.sleep(5)  # Give this plenty of time to start up.

    # COMPONENT 2
    # Run a Publisher and a RunEngine in this main process.

    RE = fresh_RE
    p = Publisher(RE, '127.0.0.1:5567')  # noqa

    # COMPONENT 3
    # Run a RemoteDispatcher on another separate process. Pass the documents
    # it receives over a Queue to this process, so we can count them for our
    # test.

    def make_and_start_dispatcher(queue):
        def put_in_queue(name, doc):
            print('putting ', name, 'in queue')
            queue.put((name, doc))

        d = RemoteDispatcher('127.0.0.1:5568')
        d.subscribe('all', put_in_queue)
        print("REMOTE IS READY TO START")
        d._loop.call_later(9, d.stop)
        d.start()

    queue = multiprocessing.Queue()
    dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher,
                                              daemon=True,
                                              args=(queue, ))
    dispatcher_proc.start()
    time.sleep(5)  # As above, give this plenty of time to start.

    # Generate two documents. The Publisher will send them to the proxy
    # device over 5567, and the proxy will send them to the
    # RemoteDispatcher over 5568. The RemoteDispatcher will push them into
    # the queue, where we can verify that they round-tripped.

    local_accumulator = []

    def local_cb(name, doc):
        local_accumulator.append((name, doc))

    RE([Msg('open_run'), Msg('close_run')], local_cb)
    time.sleep(1)

    # Get the two documents from the queue (or timeout --- test will fail)
    remote_accumulator = []
    for i in range(2):
        remote_accumulator.append(queue.get(timeout=2))
    p.close()
    proxy_proc.terminate()
    dispatcher_proc.terminate()
    proxy_proc.join()
    dispatcher_proc.join()
    assert remote_accumulator == local_accumulator
Пример #11
0
def test_zmq_no_RE_newserializer(RE):
    # COMPONENT 1
    # Run a 0MQ proxy on a separate process.
    def start_proxy():
        Proxy(5567, 5568).start()

    proxy_proc = multiprocessing.Process(target=start_proxy, daemon=True)
    proxy_proc.start()
    time.sleep(5)  # Give this plenty of time to start up.

    # COMPONENT 2
    # Run a Publisher and a RunEngine in this main process.

    p = Publisher('127.0.0.1:5567', serializer=cloudpickle.dumps)  # noqa

    # COMPONENT 3
    # Run a RemoteDispatcher on another separate process. Pass the documents
    # it receives over a Queue to this process, so we can count them for our
    # test.

    def make_and_start_dispatcher(queue):
        def put_in_queue(name, doc):
            print('putting ', name, 'in queue')
            queue.put((name, doc))

        d = RemoteDispatcher('127.0.0.1:5568', deserializer=cloudpickle.loads)
        d.subscribe(put_in_queue)
        print("REMOTE IS READY TO START")
        d.loop.call_later(9, d.stop)
        d.start()

    queue = multiprocessing.Queue()
    dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher,
                                              daemon=True, args=(queue,))
    dispatcher_proc.start()
    time.sleep(5)  # As above, give this plenty of time to start.

    # Generate two documents. The Publisher will send them to the proxy
    # device over 5567, and the proxy will send them to the
    # RemoteDispatcher over 5568. The RemoteDispatcher will push them into
    # the queue, where we can verify that they round-tripped.

    local_accumulator = []

    def local_cb(name, doc):
        local_accumulator.append((name, doc))

    RE([Msg('open_run'), Msg('close_run')], local_cb)

    # This time the Publisher isn't attached to an RE. Send the documents
    # manually. (The idea is, these might have come from a Broker instead...)
    for name, doc in local_accumulator:
        p(name, doc)
    time.sleep(1)

    # Get the two documents from the queue (or timeout --- test will fail)
    remote_accumulator = []
    for i in range(2):
        remote_accumulator.append(queue.get(timeout=2))
    p.close()
    proxy_proc.terminate()
    dispatcher_proc.terminate()
    proxy_proc.join()
    dispatcher_proc.join()
    assert remote_accumulator == local_accumulator
Пример #12
0
from bluesky.callbacks.zmq import Publisher
from bluesky.callbacks.best_effort import BestEffortCallback

from databroker._drivers.mongo_normalized import BlueskyMongoCatalog

from bluesky_adaptive.per_start import adaptive_plan

from ophyd.sim import *

RE = RunEngine()

mds = f"mongodb://localhost:27017/databroker-test-{uuid.uuid4()}"
fs = f"mongodb://localhost:27017/databroker-test-{uuid.uuid4()}"
serializer = Serializer(mds, fs)
catalog = BlueskyMongoCatalog(mds, fs)
p = Publisher("127.0.0.1:4567")
bec = BestEffortCallback()

logger = logging.getLogger("databroker")
logger.setLevel("DEBUG")
handler = logging.StreamHandler()
handler.setLevel("DEBUG")
logger.addHandler(handler)

RE.subscribe(serializer)
RE.subscribe(p)
RE.subscribe(bec)

to_brains = Publisher("127.0.0.1:4567", prefix=b"adaptive")

Пример #13
0
from bluesky.callbacks.zmq import Publisher

Publisher(glbl['inbound_proxy_address'], RE=xrun)
Пример #14
0
import databroker
import bluesky.plans as bp
from bluesky import RunEngine
from bluesky.callbacks.zmq import Publisher
from ophyd.sim import motor, det

db = databroker.Broker.named('local')
RE = RunEngine()
RE.subscribe(db.insert)

pub = Publisher('localhost:2000', RE=RE)

for j in range(10):
    RE(bp.scan([det], motor, -10, 10, 100), sample=f'sample_{j}')
import time
import copy
import numpy
import logging

# DEFINE DATABASE

from databroker import Broker
from ophyd.sim import NumpySeqHandler

raw = Broker.named('temp')
raw.reg.register_handler("NPY_SEQ", NumpySeqHandler)
# processed.reg.register_handler("NPY_SEQ", NumpySeqHandler)
raw.prepare_hook = lambda name, doc: copy.deepcopy(doc)

# ACQUISITION WITH LIVE PROCESSING

from bluesky import RunEngine
from ophyd.sim import img, motor
from bluesky.plans import scan
from bluesky.callbacks.zmq import Publisher

RE = RunEngine({})
RE.subscribe(raw.insert)
publisher = Publisher('localhost:5577', prefix=b'raw')
RE.subscribe(publisher)

uid, = RE(scan([img], motor, -1, 1, 3))
Пример #16
0
def test_zmq_no_RE_newserializer(RE):
    cloudpickle = pytest.importorskip('cloudpickle')

    # COMPONENT 1
    # Run a 0MQ proxy on a separate process.
    def start_proxy():
        Proxy(5567, 5568).start()

    proxy_proc = multiprocessing.Process(target=start_proxy, daemon=True)
    proxy_proc.start()
    time.sleep(5)  # Give this plenty of time to start up.

    # COMPONENT 2
    # Run a Publisher and a RunEngine in this main process.
    p = Publisher('127.0.0.1:5567', serializer=cloudpickle.dumps)  # noqa

    # COMPONENT 3
    # Run a RemoteDispatcher on another separate process. Pass the documents
    # it receives over a Queue to this process, so we can count them for our
    # test.
    def make_and_start_dispatcher(queue):
        def put_in_queue(name, doc):
            print('putting ', name, 'in queue')
            queue.put((name, doc))

        d = RemoteDispatcher('127.0.0.1:5568', deserializer=cloudpickle.loads)
        d.subscribe(put_in_queue)
        print("REMOTE IS READY TO START")
        d.loop.call_later(9, d.stop)
        d.start()

    queue = multiprocessing.Queue()
    dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher,
                                              daemon=True,
                                              args=(queue, ))
    dispatcher_proc.start()
    time.sleep(5)  # As above, give this plenty of time to start.

    # Generate two documents. The Publisher will send them to the proxy
    # device over 5567, and the proxy will send them to the
    # RemoteDispatcher over 5568. The RemoteDispatcher will push them into
    # the queue, where we can verify that they round-tripped.

    local_accumulator = []

    def local_cb(name, doc):
        local_accumulator.append((name, doc))

    RE([Msg('open_run'), Msg('close_run')], local_cb)

    # This time the Publisher isn't attached to an RE. Send the documents
    # manually. (The idea is, these might have come from a Broker instead...)
    for name, doc in local_accumulator:
        p(name, doc)
    time.sleep(1)

    # Get the two documents from the queue (or timeout --- test will fail)
    remote_accumulator = []
    for i in range(2):
        remote_accumulator.append(queue.get(timeout=2))
    p.close()
    proxy_proc.terminate()
    dispatcher_proc.terminate()
    proxy_proc.join()
    dispatcher_proc.join()
    ra = sanitize_doc(remote_accumulator)
    la = sanitize_doc(local_accumulator)
    assert ra == la
Пример #17
0
def test_zmq(RE, hw):
    # COMPONENT 1
    # Run a 0MQ proxy on a separate process.
    def start_proxy():
        Proxy(5567, 5568).start()

    proxy_proc = multiprocessing.Process(target=start_proxy, daemon=True)
    proxy_proc.start()
    time.sleep(5)  # Give this plenty of time to start up.

    # COMPONENT 2
    # Run a Publisher and a RunEngine in this main process.

    p = Publisher('127.0.0.1:5567', RE=RE)  # noqa

    # COMPONENT 3
    # Run a RemoteDispatcher on another separate process. Pass the documents
    # it receives over a Queue to this process, so we can count them for our
    # test.

    def make_and_start_dispatcher(queue):
        def put_in_queue(name, doc):
            print('putting ', name, 'in queue')
            queue.put((name, doc))

        d = RemoteDispatcher('127.0.0.1:5568')
        d.subscribe(put_in_queue)
        print("REMOTE IS READY TO START")
        d.loop.call_later(9, d.stop)
        d.start()

    queue = multiprocessing.Queue()
    dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher,
                                              daemon=True, args=(queue,))
    dispatcher_proc.start()
    time.sleep(5)  # As above, give this plenty of time to start.

    # Generate two documents. The Publisher will send them to the proxy
    # device over 5567, and the proxy will send them to the
    # RemoteDispatcher over 5568. The RemoteDispatcher will push them into
    # the queue, where we can verify that they round-tripped.

    local_accumulator = []

    def local_cb(name, doc):
        local_accumulator.append((name, doc))

    # Check that numpy stuff is sanitized by putting some in the start doc.
    md = {'stuff': {'nested': np.array([1, 2, 3])},
          'scalar_stuff': np.float64(3),
          'array_stuff': np.ones((3, 3))}

    # RE([Msg('open_run', **md), Msg('close_run')], local_cb)
    RE(count([hw.det]), local_cb)
    time.sleep(1)

    # Get the two documents from the queue (or timeout --- test will fail)
    remote_accumulator = []
    for i in range(len(local_accumulator)):
        remote_accumulator.append(queue.get(timeout=2))
    p.close()
    proxy_proc.terminate()
    dispatcher_proc.terminate()
    proxy_proc.join()
    dispatcher_proc.join()
    assert remote_accumulator == local_accumulator
Пример #18
0
    def run(self):
        """
        Overrides the `run()` function of the `multiprocessing.Process` class. Called
        by the `start` method.
        """
        success = True

        logging.basicConfig(level=logging.WARNING)
        logging.getLogger(__name__).setLevel(self._log_level)

        from .plan_monitoring import RunList, CallbackRegisterRun

        self._active_run_list = RunList(
        )  # Initialization should be done before communication is enabled.

        self._comm_to_manager.add_method(self._request_state_handler,
                                         "request_state")
        self._comm_to_manager.add_method(self._request_plan_report_handler,
                                         "request_plan_report")
        self._comm_to_manager.add_method(self._request_run_list_handler,
                                         "request_run_list")
        self._comm_to_manager.add_method(self._command_close_env_handler,
                                         "command_close_env")
        self._comm_to_manager.add_method(self._command_confirm_exit_handler,
                                         "command_confirm_exit")
        self._comm_to_manager.add_method(self._command_run_plan_handler,
                                         "command_run_plan")
        self._comm_to_manager.add_method(self._command_pause_plan_handler,
                                         "command_pause_plan")
        self._comm_to_manager.add_method(self._command_continue_plan_handler,
                                         "command_continue_plan")
        self._comm_to_manager.add_method(self._command_reset_worker_handler,
                                         "command_reset_worker")
        self._comm_to_manager.start()

        self._exit_event = threading.Event()
        self._exit_confirmed_event = threading.Event()
        self._re_report_lock = threading.Lock()

        from bluesky import RunEngine
        from bluesky.run_engine import get_bluesky_event_loop
        from bluesky.callbacks.best_effort import BestEffortCallback
        from bluesky_kafka import Publisher as kafkaPublisher
        from bluesky.utils import PersistentDict

        from .profile_tools import global_user_namespace

        # TODO: TC - Do you think that the following code may be included in RE.__init__()
        #   (for Python 3.8 and above)
        # Setting the default event loop is needed to make the code work with Python 3.8.
        loop = get_bluesky_event_loop()
        asyncio.set_event_loop(loop)

        try:
            keep_re = self._config["keep_re"]
            startup_dir = self._config.get("startup_dir", None)
            startup_module_name = self._config.get("startup_module_name", None)
            startup_script_path = self._config.get("startup_script_path", None)

            self._re_namespace = load_worker_startup_code(
                startup_dir=startup_dir,
                startup_module_name=startup_module_name,
                startup_script_path=startup_script_path,
                keep_re=keep_re,
            )

            if keep_re and ("RE" not in self._re_namespace):
                raise RuntimeError(
                    "Run Engine is not created in the startup code and 'keep_re' option is activated."
                )
            self._existing_plans = plans_from_nspace(self._re_namespace)
            self._existing_devices = devices_from_nspace(self._re_namespace)
            logger.info("Startup code was loaded completed.")

        except Exception as ex:
            logger.exception(
                "Failed to start RE Worker environment. Error while loading startup code: %s.",
                str(ex),
            )
            success = False

        # Load lists of allowed plans and devices
        logger.info("Loading the lists of allowed plans and devices ...")
        path_pd = self._config["existing_plans_and_devices_path"]
        path_ug = self._config["user_group_permissions_path"]
        try:
            self._allowed_plans, self._allowed_devices = load_allowed_plans_and_devices(
                path_existing_plans_and_devices=path_pd,
                path_user_group_permissions=path_ug)
        except Exception as ex:
            logger.exception(
                "Error occurred while loading lists of allowed plans and devices from '%s': %s",
                path_pd, str(ex))

        if success:
            logger.info("Instantiating and configuring Run Engine ...")

            try:
                # Make RE namespace available to the plan code.
                global_user_namespace.set_user_namespace(
                    user_ns=self._re_namespace, use_ipython=False)

                if self._config["keep_re"]:
                    # Copy references from the namespace
                    self._RE = self._re_namespace["RE"]
                    self._db = self._re_namespace.get("RE", None)
                else:
                    # Instantiate a new Run Engine and Data Broker (if needed)
                    md = {}
                    if self._config["use_persistent_metadata"]:
                        # This code is temporarily copied from 'nslsii' before better solution for keeping
                        #   continuous sequence Run ID is found. TODO: continuous sequence of Run IDs.
                        directory = os.path.expanduser("~/.config/bluesky/md")
                        os.makedirs(directory, exist_ok=True)
                        md = PersistentDict(directory)

                    self._RE = RunEngine(md)
                    self._re_namespace["RE"] = self._RE

                    def factory(name, doc):
                        # Documents from each run are routed to an independent
                        #   instance of BestEffortCallback
                        bec = BestEffortCallback()
                        return [bec], []

                    # Subscribe to Best Effort Callback in the way that works with multi-run plans.
                    rr = RunRouter([factory])
                    self._RE.subscribe(rr)

                    # Subscribe RE to databroker if config file name is provided
                    self._db = None
                    if "databroker" in self._config:
                        config_name = self._config["databroker"].get(
                            "config", None)
                        if config_name:
                            logger.info(
                                "Subscribing RE to Data Broker using configuration '%s'.",
                                config_name)
                            from databroker import Broker

                            self._db = Broker.named(config_name)
                            self._re_namespace["db"] = self._db

                            self._RE.subscribe(self._db.insert)

                # Subscribe Run Engine to 'CallbackRegisterRun'. This callback is used internally
                #   by the worker process to keep track of the runs that are open and closed.
                run_reg_cb = CallbackRegisterRun(
                    run_list=self._active_run_list)
                self._RE.subscribe(run_reg_cb)

                if "kafka" in self._config:
                    logger.info(
                        "Subscribing to Kafka: topic '%s', servers '%s'",
                        self._config["kafka"]["topic"],
                        self._config["kafka"]["bootstrap"],
                    )
                    kafka_publisher = kafkaPublisher(
                        topic=self._config["kafka"]["topic"],
                        bootstrap_servers=self._config["kafka"]["bootstrap"],
                        key="kafka-unit-test-key",
                        # work with a single broker
                        producer_config={
                            "acks": 1,
                            "enable.idempotence": False,
                            "request.timeout.ms": 5000
                        },
                        serializer=partial(msgpack.dumps, default=mpn.encode),
                    )
                    self._RE.subscribe(kafka_publisher)

                if "zmq_data_proxy_addr" in self._config:
                    from bluesky.callbacks.zmq import Publisher

                    publisher = Publisher(self._config["zmq_data_proxy_addr"])
                    self._RE.subscribe(publisher)

                self._execution_queue = queue.Queue()

                self._state["environment_state"] = "ready"

            except BaseException as ex:
                success = False
                logger.exception(
                    "Error occurred while initializing the environment: %s.",
                    str(ex))

        if success:
            logger.info("RE Environment is ready.")
            self._execute_in_main_thread()
        else:
            self._exit_event.set()

        logger.info("Environment is waiting to be closed ...")
        self._state["environment_state"] = "closing"

        # Wait until confirmation is received from RE Manager
        while not self._exit_confirmed_event.is_set():
            ttime.sleep(0.02)

        self._RE = None

        self._comm_to_manager.stop()

        logger.info("Run Engine environment was closed successfully.")
Пример #19
0
def test_zmq_prefix(RE, hw):
    # COMPONENT 1
    # Run a 0MQ proxy on a separate process.
    def start_proxy():
        Proxy(5567, 5568).start()

    proxy_proc = multiprocessing.Process(target=start_proxy, daemon=True)
    proxy_proc.start()
    time.sleep(5)  # Give this plenty of time to start up.

    # COMPONENT 2
    # Run a Publisher and a RunEngine in this main process.

    p = Publisher('127.0.0.1:5567', prefix=b'sb')  # noqa
    p2 = Publisher('127.0.0.1:5567', prefix=b'not_sb')  # noqa
    RE.subscribe(p)
    RE.subscribe(p2)


    # COMPONENT 3
    # Run a RemoteDispatcher on another separate process. Pass the documents
    # it receives over a Queue to this process, so we can count them for our
    # test.

    def make_and_start_dispatcher(queue):
        def put_in_queue(name, doc):
            print('putting ', name, 'in queue')
            queue.put((name, doc))

        d = RemoteDispatcher('127.0.0.1:5568', prefix=b'sb')
        d.subscribe(put_in_queue)
        print("REMOTE IS READY TO START")
        d.loop.call_later(9, d.stop)
        d.start()

    queue = multiprocessing.Queue()
    dispatcher_proc = multiprocessing.Process(target=make_and_start_dispatcher,
                                              daemon=True, args=(queue,))
    dispatcher_proc.start()
    time.sleep(5)  # As above, give this plenty of time to start.

    # Generate two documents. The Publisher will send them to the proxy
    # device over 5567, and the proxy will send them to the
    # RemoteDispatcher over 5568. The RemoteDispatcher will push them into
    # the queue, where we can verify that they round-tripped.

    local_accumulator = []

    def local_cb(name, doc):
        local_accumulator.append((name, doc))

    # Check that numpy stuff is sanitized by putting some in the start doc.
    md = {'stuff': {'nested': np.array([1, 2, 3])},
          'scalar_stuff': np.float64(3),
          'array_stuff': np.ones((3, 3))}

    # RE([Msg('open_run', **md), Msg('close_run')], local_cb)
    RE(count([hw.det]), local_cb)
    time.sleep(1)

    # Get the two documents from the queue (or timeout --- test will fail)
    remote_accumulator = []
    for i in range(len(local_accumulator)):
        remote_accumulator.append(queue.get(timeout=2))
    p.close()
    proxy_proc.terminate()
    dispatcher_proc.terminate()
    proxy_proc.join()
    dispatcher_proc.join()
    assert remote_accumulator == local_accumulator
Пример #20
0
import nslsii
ip = get_ipython()

nslsii.configure_base(ip.user_ns, 'bmm', configure_logging=False)

bec.disable_plots()
bec.disable_baseline()

import ophyd
ophyd.EpicsSignal.set_default_timeout(timeout=20, connection_timeout=20)

#from databroker.core import SingleRunCache

from bluesky.utils import ts_msg_hook
RE.msg_hook = ts_msg_hook

from bluesky.callbacks.zmq import Publisher
publisher = Publisher('localhost:5577')
RE.subscribe(publisher)
Пример #21
0
import_userScriptsEtc = import_userScriptsEtc
import_sample_info = import_sample_info
ScanPlan = ScanPlan
Sample = Sample
ct = ct
Tramp = Tramp
Tlist = Tlist
tseries = tseries
run_calibration = run_calibration
xpd_configuration = xpd_configuration
print("INFO: Initializing the XPD data acquisition environment ...")
glbl, bt, xrun = ipysetup(area_det=pe1c,
                          shutter=shctl1,
                          temp_controller=cs700,
                          filter_bank=fb,
                          ring_current=ring_current,
                          db=db)
print("INFO: Initialized glbl, bt, xrun.")
xrun.subscribe(Publisher("localhost:5567", prefix=b'raw'))
print("INFO: Publish data to localhost port 5567 with prefix 'raw'.")
if Path(glbl["home"]).is_dir():
    os.chdir(glbl["home"])
    print("INFO: Changed home to {}".format(glbl["home"]))
elif Path(glbl["base"]).is_dir():
    os.chdir(glbl["base"])
    print("INFO: Changed home to {}".format(glbl["base"]))
print("OK, ready to go.  To continue, follow the steps in the xpdAcq"
      "documentation at http://xpdacq.github.io/xpdacq")
# delete useless names
del os, Path, ipysetup, Publisher, temp, xpd_pe1c
Пример #22
0
arg_parser.add_argument("--db-name", type=str, default="xpd")
arg_parser.add_argument("--run-id", type=str, required=True)
# publish 0MQ messages at XPD from xf28id2-ca1:5577
# subscribe to 0MQ messages at XPD from xf28id2-ca1:5578
arg_parser.add_argument("--zmq-host", type=str, default="xf28id2-ca1")
arg_parser.add_argument("--zmq-publish-port", type=int, default=5577)
arg_parser.add_argument("--zmq-publish-prefix", type=str, default="raw")

args = arg_parser.parse_args()
pprint.pprint(vars(args))

db = Broker.named("xpd")

zmq_publisher = Publisher(
    f"{args.zmq_host}:{args.zmq_publish_port}", prefix=args.zmq_publish_prefix.encode()
)

extra = count()
for name, doc in db[args.run_id].documents():
    print(f"trying to emit {name}")
    doc = dict(doc)
    if name == "descriptor":
        doc["data_keys"]["extra"] = {
            "dtype": "number",
            "source": "computed",
            "units": "arb",
            "shape": [],
        }
        zmq_publisher("descriptor", doc)
    if name == "event_page":
Пример #23
0
from bluesky.callbacks.zmq import Publisher
from xpdan.vend.callbacks import CallbackBase

raw_publisher = Publisher(glbl['inbound_proxy_address'], prefix=b'raw')
#RE.subscribe(lambda *x: raw_publisher(*x))
xrun.subscribe(lambda *x: raw_publisher(*x))
Пример #24
0
from databroker import Broker

db = Broker.named('csx2')

import nslsii
nslsii.configure_base(get_ipython().user_ns, db)
# make sure Best Effort Callback does not plot the baseline readings
bec.noplot_streams.append('pgm_energy_monitor')

### comment this out to:
### disable the zmq servce and re-enable best effort callback plotting locally
bec.disable_plots()

from bluesky.callbacks.zmq import Publisher
pub = Publisher('xf23id-ca:5577')
RE.subscribe(pub)
#####

# TODO not need this
from epics import caget, caput
from amostra.client.commands import SampleReference, ContainerReference

# Optional: set any metadata that rarely changes.

# convenience imports


def ensure_proposal_id(md):
    if 'proposal_id' not in md:
        raise ValueError("Please run user_checkin() first")