Esempio n. 1
0
def generate_example_catalog(data_path):
    data_path = Path(data_path)

    def factory(name, doc):
        serializer = Serializer(data_path / 'abc')
        serializer('start', doc)
        return [serializer], []

    RE = RunEngine()
    sd = SupplementalData()
    RE.preprocessors.append(sd)
    sd.baseline.extend([motor1, motor2])
    rr = RunRouter([factory])
    RE.subscribe(rr)
    RE(count([det]))
    RE(count([noisy_det], 5))
    RE(scan([det], motor, -1, 1, 7))
    RE(grid_scan([det4], motor1, -1, 1, 4, motor2, -1, 1, 7, False))
    RE(scan([det], motor, -1, 1, motor2, -1, 1, 5))
    RE(count([noisy_det, det], 5))
    RE(count([random_img], 5))
    RE(count([img], 5))

    def factory(name, doc):
        serializer = Serializer(data_path / 'xyz')
        serializer('start', doc)
        return [serializer], []

    RE = RunEngine()
    rr = RunRouter([factory])
    RE.subscribe(rr)
    RE(count([det], 3))

    catalog_filepath = data_path / 'catalog.yml'
    with open(catalog_filepath, 'w') as file:
        file.write(f'''
sources:
  abc:
    description: Some imaginary beamline
    driver: bluesky-jsonl-catalog
    container: catalog
    args:
      paths: {Path(data_path) / 'abc' / '*.jsonl'}
      handler_registry:
        NPY_SEQ: ophyd.sim.NumpySeqHandler
    metadata:
      beamline: "00-ID"
  xyz:
    description: Some imaginary beamline
    driver: bluesky-jsonl-catalog
    container: catalog
    args:
      paths: {Path(data_path) / 'xyz' / '*.jsonl'}
      handler_registry:
        NPY_SEQ: ophyd.sim.NumpySeqHandler
    metadata:
      beamline: "99-ID"
''')
    return str(catalog_filepath)
Esempio n. 2
0
def test_no_dark_frames(RE, tmp_path):
    """
    Test that a readable error is raised if no 'dark' frame is received.
    """
    def factory(name, doc):
        # The problem this is solving is to store documents from this run long
        # enough to cross-reference them (e.g. light frames and dark frames),
        # and then tearing it down when we're done with this run.
        subtractor = bluesky_darkframes.DarkSubtraction('det_image')
        serializer = Serializer(tmp_path)
        filler = Filler({'NPY_SEQ': NumpySeqHandler}, inplace=False)

        # Here we push the run 'start' doc through.
        subtractor(name, doc)
        serializer(name, doc)
        filler(name, doc)

        # And by returning this function below, we are routing all other
        # documents *for this run* through here.
        def fill_subtract_and_serialize(name, doc):
            name, doc = filler(name, doc)
            name, doc = subtractor(name, doc)
            serializer(name, doc)

        return [fill_subtract_and_serialize], []

    rr = RunRouter([factory])
    RE.subscribe(rr)

    # We intentionally 'forget' to set up a dark_frame_preprocessor for this
    # test.

    with pytest.raises(bluesky_darkframes.NoDarkFrame):
        RE(count([det]))
Esempio n. 3
0
def run_publisher(in_port, data_path, quiet=False):
    """
    Acquire data in an infinite loop and publish it.
    """
    publisher = Publisher(f"localhost:{in_port}")
    RE = RunEngine(loop=asyncio.new_event_loop())
    sd = SupplementalData()
    RE.preprocessors.append(sd)
    sd.baseline.extend([motor1, motor2])
    RE.subscribe(publisher)

    def factory(name, doc):
        serializer = Serializer(data_path / "abc", flush=True)
        return [serializer], []

    rr = RunRouter([factory])
    RE.subscribe(rr)
    if not quiet:
        RE.subscribe(LiveTable(["motor", "det"]))

    motor.delay = 0.2
    det.kind = "hinted"

    def infinite_plan():
        while True:
            for i in range(1, 5):
                yield from sleep(2)
                yield from scan([det], motor, -1, 1, 5 * i)

    # Just as a convenience, avoid collission with scan_ids of runs in Catalog.
    RE.md["scan_id"] = 100
    try:
        RE(infinite_plan())
    finally:
        RE.halt()
Esempio n. 4
0
def test_publisher_with_no_broker(RE, hw):
    # specify a bootstrap server that does not exist
    nslsii.subscribe_kafka_publisher(
        RE=RE,
        beamline_name="test",
        bootstrap_servers="100.100.100.100:9092",
        producer_config={
            "acks": "all",
            "enable.idempotence": False,
            "request.timeout.ms": 1000,
        },
    )

    # use a RunRouter to get event_pages locally because
    # the KafkaPublisher will produce event_pages
    local_published_documents = list()

    def document_accumulator_factory(start_doc_name, start_doc):
        def document_accumulator(name, doc):
            local_published_documents.append((name, doc))

        return [document_accumulator], []

    local_run_router = RunRouter(factories=[document_accumulator_factory])
    RE.subscribe(local_run_router)

    t0 = time.time()
    RE(count([hw.det1]))
    t1 = time.time()

    # timeout is set at 1s but it takes longer than 5s to run count
    print(f"time for count: {t1-t0:.3f}")
    assert (t1 - t0) < 10.0

    assert len(local_published_documents) == 4
Esempio n. 5
0
def setup_data_saving(RE):
    """
    Subscribe a suitcase Serializer to RE and return a corresponding Catalog.

    The format happens to be msgpack, but that should be treated as an
    implementation detail subject to change.
    """
    directory = appdirs.user_data_dir("bluesky", "tutorial_utils")
    driver = intake.registry["bluesky-msgpack-catalog"]
    catalog = driver(str(Path(directory, "*.msgpack")))

    class PatchedSerializer(Serializer):
        """
        Work around https://github.com/bluesky/databroker/pull/559
        """
        def stop(self, doc):
            super().stop(doc)
            catalog.force_reload()

    def factory(name, start):
        return [PatchedSerializer(directory)], []

    rr = RunRouter([factory])
    RE.subscribe(rr)
    return catalog
Esempio n. 6
0
def run_publisher(in_port, data_path):
    """
    Acquire data in an infinite loop and publish it.
    """
    import asyncio
    from bluesky.callbacks.zmq import Publisher
    from suitcase.jsonl import Serializer
    from ophyd.sim import noisy_det, motor1, motor2
    from bluesky.plans import count
    from bluesky.preprocessors import SupplementalData
    from bluesky.plan_stubs import sleep
    publisher = Publisher(f'localhost:{in_port}')
    RE = RunEngine(loop=asyncio.new_event_loop())
    sd = SupplementalData()
    RE.preprocessors.append(sd)
    sd.baseline.extend([motor1, motor2])
    RE.subscribe(publisher)

    def factory(name, doc):
        serializer = Serializer(data_path / 'abc')
        serializer('start', doc)
        return [serializer], []

    rr = RunRouter([factory])
    RE.subscribe(rr)

    def infinite_plan():
        while True:
            yield from sleep(3)
            yield from count([noisy_det], 20, delay=0.5)

    try:
        RE(infinite_plan())
    finally:
        RE.halt()
def start(export_dir, kafka_bootstrap_servers, kafka_topics):
    def factory(name, start_doc, export_dir):
        my_sample_name = start_doc["md"]
        subtractor = DarkSubtraction("pe1c_image")
        return [
            partial(
                export_subtracted_tiff_series,
                export_dir=export_dir,
                my_sample_name=my_sample_name,
                subtractor=subtractor
            )
        ], []

    dispatcher = bluesky_kafka.RemoteDispatcher(
        topics=kafka_topics,
        group_id="pdf-dark-subtractor-tiff-worker",
        bootstrap_servers=kafka_bootstrap_servers,
        deserializer=msgpack.loads,
    )

    rr = RunRouter(
        [partial(factory, export_dir=export_dir)],
        handler_registry={
            "AD_TIFF": databroker.assets.handlers.AreaDetectorTiffHandler,
            "NPY_SEQ": ophyd.sim.NumpySeqHandler,
        },
    )
    dispatcher.subscribe(rr)
    dispatcher.start()
Esempio n. 8
0
def test_streaming_export(RE, tmp_path, pedestal):
    """
    Test that DarkSubtractor generates files when subscribed to RE.
    """
    def factory(name, doc):
        # The problem this is solving is to store documents from this run long
        # enough to cross-reference them (e.g. light frames and dark frames),
        # and then tearing it down when we're done with this run.
        kwargs = {}
        if pedestal is not None:
            kwargs['pedestal'] = pedestal
        subtractor = bluesky_darkframes.DarkSubtraction('det_image', **kwargs)
        serializer = Serializer(tmp_path)

        # And by returning this function below, we are routing all other
        # documents *for this run* through here.
        def subtract_and_serialize(name, doc):
            name, doc = subtractor(name, doc)
            serializer(name, doc)

        return [subtract_and_serialize], []

    rr = RunRouter([factory], {'NPY_SEQ': NumpySeqHandler})
    RE.subscribe(rr)

    dark_frame_preprocessor = bluesky_darkframes.DarkFramePreprocessor(
        dark_plan=dark_plan, detector=det, max_age=100)
    RE.preprocessors.append(dark_frame_preprocessor)

    RE(count([det]))
    exported_files = os.listdir(tmp_path)

    assert len(exported_files) == 2
Esempio n. 9
0
 def __call__(self, name, start_doc):
     if not self.enabled:
         return [], []
     dimensions = start_doc.get('hints', {}).get('dimensions', guess_dimensions(start_doc))
     rr = RunRouter(
         [factory(self, dimensions) for factory in self.factories])
     rr('start', start_doc)
     return [rr], []
Esempio n. 10
0
def test_multirun_nested_plan(capsys, caplog, RE, hw):
    # This test only checks if the plan runs without crashing. If BEC crashes,
    #   the plan will still run, but data will not be displayed.
    @bpp.set_run_key_decorator(run="inner_run")
    def plan_inner():
        yield from grid_scan([hw.det4], hw.motor1, 0, 1, 1, hw.motor2, 0, 1, 2,
                             True)

    def sequence():
        for n in range(5):
            yield from bps.mov(hw.motor, n * 0.1 + 1)
            yield from bps.trigger_and_read([hw.det1])

    @bpp.set_run_key_decorator(run="outer_run")
    @bpp.stage_decorator([hw.det1, hw.motor])
    @bpp.run_decorator(md={})
    def plan_outer():
        yield from sequence()
        # Call inner plan from within the plan
        yield from plan_inner()
        # Run another set of commands
        yield from sequence()

    # The first test should fail. We check if expected error message is printed in case
    #   of failure.
    bec = BestEffortCallback()
    bec_token = RE.subscribe(bec)
    RE(plan_outer())

    captured = capsys.readouterr()

    # Check for the number of runs (the number of times UID is printed in the output)
    scan_uid_substr = "Persistent Unique Scan ID"
    n_runs = captured.out.count(scan_uid_substr)
    assert n_runs == 2, "scan output contains incorrect number of runs"
    # Check if the expected error message is printed once the callback fails. The same
    #   substring will be used in the second part of the test to check if BEC did not fail.
    err_msg_substr = "is being suppressed to not interrupt plan execution"
    assert err_msg_substr in str(caplog.text), \
        "Best Effort Callback failed, but expected error message was not printed"

    RE.unsubscribe(bec_token)
    caplog.clear()

    # The second test should succeed, i.e. the error message should not be printed
    def factory(name, doc):
        bec = BestEffortCallback()
        return [bec], []

    rr = RunRouter([factory])
    RE.subscribe(rr)
    RE(plan_outer())

    captured = capsys.readouterr()
    n_runs = captured.out.count(scan_uid_substr)
    assert n_runs == 2, "scan output contains incorrect number of runs"
    assert err_msg_substr not in caplog.text, \
        "Best Effort Callback failed while executing nested plans"
Esempio n. 11
0
    def __init__(self, *args, menuBar, **kwargs):
        super().__init__(*args, **kwargs)
        self._run_to_tabs = collections.defaultdict(list)
        self._title_to_tab = {}
        self._tabs_from_streaming = []
        self._overplot = OverPlotState.individual_tab
        self._overplot_target = None
        self._live_enabled = False

        self._live_run_router = RunRouter([self.route_live_stream])

        self._containers = [TabbedViewingArea(self, menuBar=menuBar) for _ in range(2)]
        layout = QVBoxLayout()
        for container in self._containers:
            layout.addWidget(container)
        self.setLayout(layout)

        overplot_group = QActionGroup(self)
        self.off = QAction('&Off', self)
        self.off.setStatusTip('Drop streaming data.')
        self.individual_tab = QAction('&New Tab', self)
        self.individual_tab.setStatusTip('Open a new viewer tab for each Run.')
        self.latest_live = QAction('&Latest Live Tab', self)
        self.latest_live.setStatusTip('Attempt to overplot on the most recent live Run.')
        self.fixed = QAction('&Fixed Tab...', self)
        self.fixed.setStatusTip('Attempt to overplot on a specific tab.')
        self.fixed.setEnabled(False)
        overplot_group.addAction(self.off)
        overplot_group.addAction(self.individual_tab)
        overplot_group.addAction(self.latest_live)
        overplot_group.addAction(self.fixed)
        for action in overplot_group.actions():
            action.setCheckable(True)
        overplot_group.setExclusive(True)
        self.off.setChecked(True)

        overplot_menu = menuBar().addMenu('&Streaming')
        overplot_menu.addActions(overplot_group.actions())

        self.off.triggered.connect(self.disable_live)
        self.individual_tab.triggered.connect(partial(self.set_overplot_state, OverPlotState.individual_tab))
        self.latest_live.triggered.connect(partial(self.set_overplot_state, OverPlotState.latest_live))

        def set_overplot_target():
            item, ok = QInputDialog.getItem(
                self, "Select Tab", "Tab", tuple(self._title_to_tab), 0, False)
            if not ok:
                # Abort and fallback to Off. Would be better to fall back to
                # previous state (which could be latest_live) but it's not
                # clear how to know what that state was.
                self.off.setChecked(True)
                return
            self.set_overplot_state(OverPlotState.fixed)
            self._overplot_target = item

        self.fixed.triggered.connect(set_overplot_target)
Esempio n. 12
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self._entries = []
     self._uids = []
     self._active_loaders = set()
     self.run_router = RunRouter([
         HeaderTreeFactory(self.addTab),
         BaselineFactory(self.addTab),
         FigureManager(self.addTab),
         ])
Esempio n. 13
0
 def __call__(self, name, start_doc):
     if not self.enabled:
         return
     dimensions = start_doc.get('hints',
                                {}).get('dimensions',
                                        guess_dimensions(start_doc))
     log.debug('dimensions: %s', dimensions)
     line_plot_manager = LinePlotManager(self, dimensions)
     rr = RunRouter([line_plot_manager])
     rr('start', start_doc)
     return [rr], []
Esempio n. 14
0
    def _get_serializer(self):
        "This is used internally by v1.Broker. It may be removed in future."
        from suitcase.jsonl import Serializer
        from event_model import RunRouter
        path, *_ = self.paths
        directory = os.path.dirname(path)

        def factory(name, doc):
            serializer = Serializer(directory)
            return [serializer], []

        return RunRouter([factory])
Esempio n. 15
0
def test_basic_functionality():
    "A simple test demonstrating validation failure and success"
    handler_registry.clear()
    RE = RunEngine()
    rr = RunRouter([validator_factory_raising])
    RE.subscribe(rr)
    # This should fail because there is no handler registered.
    with pytest.raises(UndefinedAssetSpecification):
        RE(count([img]))
    # Register the handler...
    handler_registry.update({'NPY_SEQ': NumpySeqHandler})
    # ...and now the validator should be satisfied.
    RE(count([img]))
Esempio n. 16
0
def main():
    parser = argparse.ArgumentParser(
        description='Listen for documents over 0MQ and validate Resources.')
    parser.add_argument(
        'proxy_address', type=str,
        help="bluesky-0MQ-proxy out address, given as in localhost:5578")
    parser.add_argument(
        '--emails', required=False, nargs='*',
        help="space-separated list of email addresses")
    args = parser.parse_args()

    log_handler = logging.StreamHandler()  # stderr
    log_handler.setFormatter(LogFormatter())
    logger.setLevel('INFO')
    logger.addHandler(log_handler)

    if args.emails:
        server_name = socket.gethostname()
        smtp_handler = SMTPHandler(
            mailhost='localhost',
            fromaddr=f'Resource Health Check <noreply@{server_name}>',
            toaddrs=args.emails,
            subject=(f'Error report from resource health check on '
                     f'{server_name}')
        )
        smtp_handler.setFormatter(LogFormatter(color=False))
        smtp_handler.setLevel('WARNING')
        # Use QueueHandler in case sending email is slow. LogRecords flow
        # from QueueHandler -> Queue -> QueueListener -> SMTPHandler.
        cleanup_listener = True
        que = queue.Queue()
        queue_handler = QueueHandler(que)
        queue_listener = QueueListener(que, smtp_handler,
                                       respect_handler_level=True)
        logger.addHandler(queue_handler)
        queue_listener.start()
    else:
        cleanup_listener = False

    rr = RunRouter([validator_factory])
    rd = RemoteDispatcher(args.proxy_address)
    rd.subscribe(rr)

    logger.info(f'Listening to {args.proxy_address}')

    try:
        rd.start()  # runs forever
    finally:
        if cleanup_listener:
            queue_listener.stop()
Esempio n. 17
0
    def __init__(self, *args, **kwargs):
        self.update_config(load_config())
        super().__init__(*args, **kwargs)
        self._entries = []
        self._uids = []
        self._active_loaders = set()

        def filler_factory(name, doc):
            filler = Filler(parse_handler_registry(self.handler_registry))
            filler('start', doc)
            return [filler], []

        self.run_router = RunRouter(
            [filler_factory] +
            [factory(self.addTab) for factory in self.factories])
def main():
    from bluesky.callbacks.zmq import Publisher, RemoteDispatcher

    parser = argparse.ArgumentParser(
        description=
        "Listen for unfilled documents over 0MQ and emit filled ones.")
    parser.add_argument(
        "receive_from",
        type=str,
        help="bluesky-0MQ-proxy out address, given as in localhost:5578",
    )
    parser.add_argument(
        "send_to",
        type=str,
        help="bluesky-0MQ-proxy in address, given as in localhost:5578",
    )
    args = parser.parse_args()

    # Data flows through:
    # * RemoteDispatcher (0MQ)
    # * Accumulator (caches until stop doc is received)
    # * EmittingFiller (fills external data)
    # * Publisher (0MQ)

    publisher = Publisher(args.send_to)

    handler_registry = discover_handlers()

    def factory(name, doc):
        filler = EmittingFiller(handler_registry,
                                inplace=False,
                                callback=publisher,
                                coerce="force_numpy")
        accumulator = Accumulator(callback=filler)
        return [accumulator], []

    rr = RunRouter([factory])
    rd = RemoteDispatcher(args.receive_from)
    rd.subscribe(rr)

    print(f"Listening to {args.receive_from}")

    try:
        rd.start()  # runs forever
    except KeyboardInterrupt:
        print("Terminated by user; exiting")
Esempio n. 19
0
def test_two_streams(RE):
    def pretty_print(name, doc):
        pprint.pprint(name)
        pprint.pprint(doc)

    def serializer_factory(name, start_doc):
        serializer = Serializer("xdi")
        serializer("start", start_doc)
        return [serializer], []

    sd = SupplementalData()
    RE.preprocessors.append(sd)
    sd.baseline = [det1, motor1, motor2]

    RE.subscribe(pretty_print)
    RE.subscribe(RunRouter([serializer_factory]))

    suitcase_meta_data = {"config": xdi_file_template}

    xdi_meta_data = {
        "Element_symbol": "A",
        "Element_edge": "K",
        "Mono_d_spacing": 10.0
    }

    nx_meta_data = {
        "Source": {
            "name": "NSLS-II"
        },
        "Instrument": {
            "name": "BMM"
        },
        "Beam": {
            "incident_energy": 1000.0
        },
    }

    dets = [det1, det2]
    RE(
        count(dets, num=5),
        md={
            "suitcase-xdi": suitcase_meta_data,
            "NX": nx_meta_data,
            "XDI": xdi_meta_data,
        },
    )
Esempio n. 20
0
def test_streaming_export(RE, tmp_path):
    """
    Test that DarkSubtractor generates files when subscribed to RE.
    """
    def factory(name, doc):
        # The problem this is solving is to store documents from this run long
        # enough to cross-reference them (e.g. light frames and dark frames),
        # and then tearing it down when we're done with this run.
        subtractor = bluesky_darkframes.DarkSubtraction('det_image')
        serializer = Serializer(tmp_path)
        filler = Filler({'NPY_SEQ': NumpySeqHandler}, inplace=False)

        # Here we push the run 'start' doc through.
        subtractor(name, doc)
        serializer(name, doc)
        filler(name, doc)

        # And by returning this function below, we are routing all other
        # documents *for this run* through here.
        def fill_subtract_and_serialize(name, doc):
            name, doc = filler(name, doc)
            name, doc = subtractor(name, doc)
            serializer(name, doc)

        return [fill_subtract_and_serialize], []

    rr = RunRouter([factory])
    RE.subscribe(rr)

    dark_frame_preprocessor = bluesky_darkframes.DarkFramePreprocessor(
        dark_plan=dark_plan, max_age=100)
    RE.preprocessors.append(dark_frame_preprocessor)

    RE(count([det]))
    exported_files = os.listdir(tmp_path)

    assert len(exported_files) == 2
Esempio n. 21
0
max_count = 15

recommender_factory, _ = per_start.recommender_factory(
    adaptive_obj=step_recommender,
    independent_keys=["ctrl_temp"],
    dependent_keys=["full_Q"],
    max_count=max_count,
    queue=redis_queue,
)
zmq_dispatcher.subscribe(recommender_factory)


def echo_factory(start_name, start_doc):
    print(
        f"echo_factory called with {start_name}\n{pprint.pformat(start_doc)}\n"
    )

    def echo(name, doc):
        print(
            f"adaptive gpcam consumer received {name}:\n{pprint.pformat(doc)}\n"
        )

    return [echo], []


echo_run_router = RunRouter(factories=[echo_factory])
zmq_dispatcher.subscribe(echo_run_router)

print(f"ADAPTIVE GPCAM CONSUMER LISTENING ON {zmq_listening_prefix}")
zmq_dispatcher.start()
Esempio n. 22
0
                    '{start[institution]}_'
                    '{start[user_name]}/'
                    '{start[project_name]}/'
                    f'{formatted_date}/'
                    '{start[scan_id]}/'
                    '{start[scan_id]}-'
                    '{start[sample_name]}-'
                    #'{event[data][Beamline Energy_energy]:.2f}eV-'
                ),
                directory=USERDIR,
                flush=True,
                line_terminator='\n')
            serializer('start', start_doc)
            serializer('descriptor', descriptor_doc)
            return [serializer]
        else:
            return []

    return [], [subfactory]


import event_model
import suitcase.jsonl

handler_registry = {
    'AD_TIFF': databroker.assets.handlers.AreaDetectorTiffHandler
}
rr = RunRouter([factory], handler_registry=handler_registry)
rr_token = dispatcher.subscribe(rr)
dispatcher.start()
Esempio n. 23
0
def test_kafka_publisher(RE, hw, bootstrap_servers):
    kafka_topic, runrouter_token = nslsii.subscribe_kafka_publisher(
        RE=RE,
        beamline_name="test",
        bootstrap_servers=bootstrap_servers,
        producer_config={
            "acks": "all",
            "enable.idempotence": False,
            "request.timeout.ms": 1000,
        },
    )

    assert kafka_topic == "test.bluesky.documents"
    assert isinstance(runrouter_token, int)

    # Run a RemoteDispatcher on a separate process. Pass the documents
    # it receives over a multiprocessing.Queue back to this process so
    # we can compare with locally stored documents.
    # The default "auto.commit.interval.ms" is 5000, but using the default
    # means some of the Kafka messages consumed here are not committed
    # and so are DELIVERED AGAIN the next time this test runs. The solution
    # is setting a very short "auto.commit.interval.ms" for this test.
    def make_and_start_dispatcher(document_queue):
        def put_in_queue(name, doc):
            document_queue.put((name, doc))

        kafka_dispatcher = RemoteDispatcher(
            topics=[kafka_topic],
            bootstrap_servers=bootstrap_servers,
            group_id="test_kafka_publisher",
            consumer_config={
                "auto.offset.reset": "latest",
                "auto.commit.interval.ms": 100,
            },
            polling_duration=1.0,
            deserializer=partial(msgpack.loads, object_hook=mpn.decode),
        )
        kafka_dispatcher.subscribe(put_in_queue)
        kafka_dispatcher.start()

    queue_ = multiprocessing.Queue()
    dispatcher_proc = multiprocessing.Process(
        target=make_and_start_dispatcher, daemon=True, args=(queue_,)
    )
    dispatcher_proc.start()
    # give the dispatcher process time to start
    time.sleep(10)

    local_documents = []

    # use a RunRouter to get event_pages locally because
    # the KafkaPublisher will produce event_pages
    def document_accumulator_factory(start_doc_name, start_doc):
        def document_accumulator(name, doc):
            local_documents.append((name, doc))

        return [document_accumulator], []

    local_run_router = RunRouter(factories=[document_accumulator_factory])
    RE.subscribe(local_run_router)

    # test that numpy data is transmitted correctly
    md1 = {
        "numpy_data": {"nested": np.array([1, 2, 3])},
        "numpy_scalar": np.float64(4),
        "numpy_array": np.ones((3, 3)),
    }

    RE(count([hw.det1]), md=md1)

    # test that numpy data is transmitted correctly
    md2 = {
        "numpy_data": {"nested": np.array([4, 5, 6])},
        "numpy_scalar": np.float64(7),
        "numpy_array": np.ones((4, 4)),
    }

    RE(count([hw.det2]), md=md2)

    # Get the documents from the inter-process queue (or timeout)
    remote_documents = []
    while True:
        try:
            name_, doc_ = queue_.get(timeout=1)
            remote_documents.append((name_, doc_))
        except queue.Empty:
            print("the queue is empty!")
            break

    dispatcher_proc.terminate()
    dispatcher_proc.join()

    # sanitize_doc normalizes some document data, such as numpy arrays,
    # that are problematic for direct document comparison by "assert"
    sanitized_local_published_documents = [
        (name, sanitize_doc(doc)) for name, doc in local_documents
    ]
    sanitized_remote_published_documents = [
        (name, sanitize_doc(doc)) for name, doc in remote_documents
    ]

    assert len(remote_documents) == len(local_documents)
    assert len(sanitized_remote_published_documents) == len(
        sanitized_local_published_documents
    )
    assert sanitized_remote_published_documents == sanitized_local_published_documents

    # test that we got the correct subscription token for the Kafka Publisher
    # KeyError is raised if the token is not known
    RE.unsubscribe(token=runrouter_token)
Esempio n. 24
0
def recommender_factory(adaptive_obj,
                        independent_keys,
                        dependent_keys,
                        *,
                        max_count=10,
                        queue=None):
    """
    Generate the callback and queue for an Adaptive API backed reccomender.

    This recommends a fixed step size independent of the measurement.

    For each Run (aka Start) that the callback sees it will place
    either a recommendation or `None` into the queue.  Recommendations
    will be of a dict mapping the independent_keys to the recommended
    values and should be interpreted by the plan as a request for more
    data.  A `None` placed in the queue should be interpreted by the
    plan as in instruction to terminate the run.

    The StartDocuments in the stream must contain the key
    ``'batch_count'``.


    Parameters
    ----------
    adaptive_object : adaptive.BaseLearner
        The recommendation engine

    independent_keys : List[str]
        The names of the independent keys in the events

    dependent_keys : List[str]
        The names of the dependent keys in the events

    max_count : int, optional
        The maximum number of measurements to take before poisoning the queue.

    queue : Queue, optional
        The communication channel for the callback to feedback to the plan.
        If not given, a new queue will be created.

    Returns
    -------
    callback : Callable[str, dict]
        This function must be subscribed to RunEngine to receive the
        document stream.

    queue : Queue
        The communication channel between the callback and the plan.  This
        is always returned (even if the user passed it in).

    """

    if queue is None:
        queue = Queue()

    poisoned = None

    def callback(name, doc):
        nonlocal poisoned
        # TODO handle multi-stream runs with more than 1 event!
        if name == "start":
            if doc["batch_count"] > max_count:
                queue.put(None)
                poisoned = True
                return
            else:
                poisoned = False

        if name == "event_page":
            if poisoned:
                return
            independent, measurement = extract_event_page(independent_keys,
                                                          dependent_keys,
                                                          payload=doc["data"])
            adaptive_obj.tell_many(independent, measurement)
            # pull the next point out of the adaptive API
            try:
                next_point = adaptive_obj.ask(1)
            except NoRecommendation:
                queue.put(None)
            else:
                queue.put({k: v for k, v in zip(independent_keys, next_point)})

    rr = RunRouter([lambda name, doc: ([callback], [])])
    return rr, queue
Esempio n. 25
0
accumulator = Accumulator()

if query:
    cat = databroker.catalog[args.catalog_name]
    search_results = cat.search(query)
    accumulator.update_plot = False
    for uid in search_results:
        h = cat[uid]
        for name, doc in h.canonical(fill="no"):
            accumulator(name, doc)
    accumulator.update_plot = True
    accumulator.redraw_plot()


def integration_accumulator(name, start_doc):
    print(f"analysis stage: {start_doc.get('analysis_stage')}")
    if start_doc.get("analysis_stage", "") == "integration":
        print("got integration start document")
        return [accumulator], []
    return [], []


rr = RunRouter([integration_accumulator])
d.subscribe(rr)
# force qt import
import matplotlib.backends.backend_qt5

install_qt_kicker()
print(f"NMF CONSUMER IS LISTENING ON {args.zmq_subscribe_prefix.encode()}")
d.start()
Esempio n. 26
0
def main():
    rr = RunRouter([factory])
    dispatcher = RemoteDispatcher('localhost:5578')
    dispatcher.subscribe(rr)
    dispatcher.start()
# publish 0MQ messages at XPD from xf28id2-ca1:5577
# subscribe to 0MQ messages at XPD from xf28id2-ca1:5578
arg_parser.add_argument("--zmq-host", type=str, default="xf28id2-ca1")
arg_parser.add_argument("--zmq-publish-port", type=int, default=5577)
arg_parser.add_argument("--zmq-publish-prefix", type=str, default="rr")
arg_parser.add_argument("--zmq-subscribe-port", type=int, default=5578)
arg_parser.add_argument("--zmq-subscribe-prefix", type=str, default="an")

args = arg_parser.parse_args()

pprint.pprint(vars(args))

# this process listens for 0MQ messages with prefix "an" (from xpdan)
d = RemoteDispatcher(
    f"{args.zmq_host}:{args.zmq_subscribe_port}",
    prefix=args.zmq_subscribe_prefix.encode(),
    #deserializer=womp_womp,
)

zmq_publisher = zmqPublisher(f"{args.zmq_host}:{args.zmq_publish_port}",
                             prefix=args.zmq_publish_prefix.encode())
peak_location = (2.63, 2.7)
rr = RunRouter([xpdan_result_picker_factory(zmq_publisher, peak_location)])
d.subscribe(rr)

print(
    f"ROI REDUCTION CONSUMER IS LISTENING ON {args.zmq_subscribe_prefix.encode()}"
)
print(f"AND PUBLISHING ON {args.zmq_publish_prefix.encode()}")
d.start()
Esempio n. 28
0
hw = hw()

RE = RunEngine({})

db = Broker.named("temp")
RE.subscribe(db.insert)


def factory(name, doc):
    # Documents from each run is routed to an independent
    #   instance of BestEffortCallback
    bec = BestEffortCallback()
    return [bec], []


rr = RunRouter([factory])
RE.subscribe(rr)


@bpp.set_run_key_decorator("run_2")
@bpp.run_decorator(md={})
def sim_plan_inner(npts):
    for j in range(npts):
        yield from bps.mov(hw.motor1, j * 0.1 + 1, hw.motor2, j * 0.2 - 2)
        yield from bps.trigger_and_read([hw.motor1, hw.motor2, hw.det2])


@bpp.set_run_key_decorator("run_1")
@bpp.run_decorator(md={})
def sim_plan_outer(npts):
    for j in range(int(npts / 2)):
Esempio n. 29
0

def spec_factory(name, doc):
    spec_cb = Serializer(spec_factory.directory,
                         file_prefix=spec_factory.file_prefix,
                         flush=True)
    return [spec_cb], []


spec_factory.directory = '/home/xf11id/specfiles/'
# Initialize the filename to today's date.
import time
#spec_factory.file_prefix = f'chx_spec_{time.strftime('%Y_%m_%d')}'
spec_factory.file_prefix = 'chx_spec_' + time.strftime('%Y_%m_%d')

run_router = RunRouter([spec_factory])

RE.subscribe(run_router)


def new_spec_file(name):
    """
    set new specfile name:
    - path is fixed at /home/xf11id/specfiles/
    - name= xyz .spec will be added automatically
    calling sequence: new_spec_file(name='xyz')
    """
    spec_factory.file_prefix = name


def reload_macro(filename):
Esempio n. 30
0
)

# publish 0MQ messages with xpdan's prefix
zmq_publishing_prefix = b"an"
zmq_analysis_publisher = ZmqPublisher(
    address=("127.0.0.1", 4567), prefix=zmq_publishing_prefix
)


def zmq_publish_from_analysis_factory(start_name, start_doc):
    print(
        f"zmq_publish_from_analysis_factory called with {start_name}:\n{pprint.pformat(start_doc)}\n"
    )

    def zmq_publish_from_analysis(name, doc):
        if name == "start":
            # add batch_count
            print("adding batch_count=1")
            doc["batch_count"] = 1
        print(f"analysis consumer publishing {name}:\n{pprint.pformat(doc)}\n")
        zmq_analysis_publisher(name, doc)

    return [zmq_publish_from_analysis], []


zmq_dispatcher.subscribe(RunRouter(factories=[zmq_publish_from_analysis_factory]))

print(f"NOT XPDAN CONSUMER IS LISTENING ON {zmq_listening_prefix}")
print(f"AND PUBLISHING ON {zmq_publishing_prefix}")
zmq_dispatcher.start()