Ejemplo n.º 1
0
 def test_counter_subscribe(self):
     q = Queue()
     sub = Subscribe(id=20,
                     path=["counting", "counter"],
                     delta=False,
                     callback=q.put)
     self.controller.handle_request(sub)
     response = q.get(timeout=1.0)
     self.assertIsInstance(response, Update)
     assert response.id == 20
     assert response.value["typeid"] == "epics:nt/NTScalar:1.0"
     assert response.value["value"] == 0
     post = Post(id=21, path=["counting", "increment"], callback=q.put)
     self.controller.handle_request(post)
     response = q.get(timeout=1)
     self.assertIsInstance(response, Update)
     assert response.id == 20
     assert response.value["value"] == 1
     response = q.get(timeout=1)
     self.assertIsInstance(response, Return)
     assert response.id == 21
     assert response.value == None
     with self.assertRaises(TimeoutError):
         q.get(timeout=0.05)
Ejemplo n.º 2
0
 def test_concurrency(self):
     q = Queue()
     # Subscribe to the whole block
     sub = Subscribe(id=0, path=["mri"], delta=True)
     sub.set_callback(q.put)
     self.c.handle_request(sub)
     # We should get first Delta through with initial value
     r = q.get().to_dict()
     assert r["id"] == 0
     assert len(r["changes"]) == 1
     assert len(r["changes"][0]) == 2
     assert r["changes"][0][0] == []
     assert r["changes"][0][1]["meta"]["label"] == "My label"
     assert r["changes"][0][1]["label"]["value"] == "My label"
     # Do a Put on the label
     put = Put(id=2, path=["mri", "label", "value"], value="New", get=True)
     put.set_callback(q.put)
     self.c.handle_request(put)
     # Check we got two updates before the return
     r = q.get().to_dict()
     assert r["id"] == 0
     assert len(r["changes"]) == 2
     assert len(r["changes"][0]) == 2
     assert r["changes"][0][0] == ["label", "value"]
     assert r["changes"][0][1] == "New"
     assert len(r["changes"][0]) == 2
     assert r["changes"][1][0] == ["label", "timeStamp"]
     r = q.get().to_dict()
     assert r["id"] == 0
     assert len(r["changes"]) == 1
     assert len(r["changes"][0]) == 2
     assert r["changes"][0][0] == ["meta", "label"]
     assert r["changes"][0][1] == "New"
     # Then the return
     r3 = q.get().to_dict()
     assert r3["id"] == 2
     assert r3["value"] == "New"
Ejemplo n.º 3
0
def sleep(t):
    try:
        Queue().get(timeout=t)
    except TimeoutError:
        # that's how long we wanted to sleep for
        pass
Ejemplo n.º 4
0
class WebsocketClientComms(ClientComms):
    """A class for a client to communicate with the server"""
    use_cothread = False
    # Attribute
    remote_blocks = None

    loop = None
    _conn = None
    _spawned = None
    _connected_queue = None
    # {new_id: (request, old_id}
    _request_lookup = None
    # {Subscribe.generator_key(): Subscribe}
    _subscription_keys = {}
    _next_id = 1

    def create_attribute_models(self):
        for y in super(WebsocketClientComms, self).create_attribute_models():
            yield y
        # Create read-only attribute for the remotely reachable blocks
        meta = StringArrayMeta("Remotely reachable blocks",
                               tags=[widget("table")])
        self.remote_blocks = meta.create_attribute_model()
        yield "remoteBlocks", self.remote_blocks, None

    def do_init(self):
        super(WebsocketClientComms, self).do_init()
        self.loop = IOLoop()
        self._request_lookup = {}
        self._subscription_keys = {}
        self._connected_queue = Queue()
        root_subscribe = Subscribe(id=0,
                                   path=[".", "blocks"],
                                   callback=self._update_remote_blocks)
        self._subscription_keys[root_subscribe.generate_key()] = root_subscribe
        self._request_lookup[0] = (root_subscribe, 0)
        self.start_io_loop()

    def _update_remote_blocks(self, response):
        response = deserialize_object(response, Update)
        # TODO: should we spawn here?
        self.remote_blocks.set_value(response.value)

    def start_io_loop(self):
        if self._spawned is None:
            self._conn = None
            self.loop.add_callback(self.recv_loop)
            self._spawned = self.spawn(self.loop.start)
            try:
                self._connected_queue.get(self.params.connectTimeout)
            except TimeoutError:
                self.stop_io_loop()
                raise

    def stop_io_loop(self):
        if self.loop:
            self.loop.stop()
            self._spawned.wait(timeout=10)
            self._spawned = None

    @gen.coroutine
    def recv_loop(self):
        url = "ws://%(hostname)s:%(port)d/ws" % self.params
        self._conn = yield websocket_connect(
            url, self.loop, connect_timeout=self.params.connectTimeout - 0.5)
        self._connected_queue.put(True)
        for request in self._subscription_keys.values():
            self._send_request(request)
        while True:
            message = yield self._conn.read_message()
            if message is None:
                for request, old_id in self._request_lookup.values():
                    if not isinstance(request, Subscribe):
                        # Respond with an error
                        response = Error(old_id, message="Server disconnected")
                        request.callback(response)
                self.spawn(self._report_fault)
                return
            self.on_message(message)

    def _report_fault(self):
        with self._lock:
            self.transition(self.stateSet.FAULT, "Server disconnected")
            self.stop_io_loop()

    def do_disable(self):
        super(WebsocketClientComms, self).do_disable()
        self.stop_io_loop()

    def do_reset(self):
        super(WebsocketClientComms, self).do_reset()
        self.start_io_loop()

    def on_message(self, message):
        """Pass response from server to process receive queue

        Args:
            message(str): Received message
        """
        try:
            self.log.debug("Got message %s", message)
            d = json_decode(message)
            response = deserialize_object(d, Response)
            if isinstance(response, (Return, Error)):
                request, old_id = self._request_lookup.pop(response.id)
                if request.generate_key() in self._subscription_keys:
                    self._subscription_keys.pop(request.generate_key())
            else:
                request, old_id = self._request_lookup[response.id]
            response.set_id(old_id)
            # TODO: should we spawn here?
            request.callback(response)
        except Exception:
            # If we don't catch the exception here, tornado will spew odd
            # error messages about 'HTTPRequest' object has no attribute 'path'
            self.log.exception("on_message(%r) failed", message)

    def send_to_server(self, request):
        """Dispatch a request to the server

        Args:
            request (Request): The message to pass to the server
        """
        self.loop.add_callback(self._send_to_server, request)

    def _send_to_server(self, request):
        if isinstance(request, Unsubscribe):
            # If we have an unsubscribe, send it with the same id as the
            # subscribe
            subscribe = self._subscription_keys.pop(request.generate_key())
            new_id = subscribe.id
        else:
            if isinstance(request, Subscribe):
                # If we have an subscribe, store it so we can look it up
                self._subscription_keys[request.generate_key()] = request
            new_id = self._next_id
            self._next_id += 1
            self._request_lookup[new_id] = (request, request.id)
        request.set_id(new_id)
        self._send_request(request)

    def _send_request(self, request):
        message = json_encode(request)
        self.log.debug("Sending message %s", message)
        self._conn.write_message(message)
Ejemplo n.º 5
0
class PandAManagerController(builtin.controllers.ManagerController):
    def __init__(
        self,
        mri: AMri,
        config_dir: AConfigDir,
        hostname: AHostname = "localhost",
        port: APort = 8888,
        doc_url_base: ADocUrlBase = DOC_URL_BASE,
        poll_period: APollPeriod = 0.1,
        template_designs: ATemplateDesigns = "",
        initial_design: AInitialDesign = "",
        use_git: AUseGit = True,
        description: ADescription = "",
    ) -> None:
        super().__init__(
            mri=mri,
            config_dir=config_dir,
            template_designs=template_designs,
            initial_design=initial_design,
            use_git=use_git,
            description=description,
        )
        self._poll_period = poll_period
        self._doc_url_base = doc_url_base
        # All the bit_out fields and their values
        # {block_name.field_name: value}
        self._bit_outs: Dict[str, bool] = {}
        # The bit_out field values that need toggling since the last handle
        # {block_name.field_name: value}
        self._bit_out_changes: Dict[str, bool] = {}
        # The fields that busses needs to know about
        # {block_name.field_name[.subfield_name]}
        self._bus_fields: Set[str] = set()
        # The child controllers we have created
        self._child_controllers: Dict[str, PandABlockController] = {}
        # The PandABlock client that does the comms
        self._client = PandABlocksClient(hostname, port, Queue)
        # Filled in on reset
        self._stop_queue = None
        self._poll_spawned = None
        # Poll period reporting
        self.last_poll_period = NumberMeta(
            "float64",
            "The time between the last 2 polls of the hardware",
            tags=[Widget.TEXTUPDATE.tag()],
            display=Display(units="s", precision=3),
        ).create_attribute_model(poll_period)
        self.field_registry.add_attribute_model("lastPollPeriod",
                                                self.last_poll_period)
        # Bus tables
        self.busses: PandABussesPart = self._make_busses()
        self.add_part(self.busses)

    def do_init(self):
        # start the poll loop and make block parts first to fill in our parts
        # before calling _set_block_children()
        self.start_poll_loop()
        super().do_init()

    def start_poll_loop(self):
        # queue to listen for stop events
        if not self._client.started:
            self._stop_queue = Queue()
            if self._client.started:
                self._client.stop()
            self._client.start(self.process.spawn, socket)
        if not self._child_controllers:
            self._make_child_controllers()
        if self._poll_spawned is None:
            self._poll_spawned = self.process.spawn(self._poll_loop)

    def do_disable(self):
        super().do_disable()
        self.stop_poll_loop()

    def do_reset(self):
        self.start_poll_loop()
        super().do_reset()

    def _poll_loop(self):
        """At self.poll_period poll for changes"""
        last_poll_update = time.time()
        next_poll = time.time() + self._poll_period
        try:
            while True:
                # Need to make sure we don't consume all the CPU, allow us to be
                # active for 50% of the poll period, so we must sleep at least
                # 50% of the poll period
                min_sleep = self._poll_period * 0.5
                sleep_for = next_poll - time.time()
                if sleep_for < min_sleep:
                    # Going too fast, slow down a bit
                    last_poll_period = self._poll_period + min_sleep - sleep_for
                    sleep_for = min_sleep
                else:
                    last_poll_period = self._poll_period
                try:
                    # If told to stop, we will get something here and return
                    return self._stop_queue.get(timeout=sleep_for)
                except TimeoutError:
                    # No stop, no problem
                    pass
                # Poll for changes
                self.handle_changes(self._client.get_changes())
                if (last_poll_period != self.last_poll_period.value
                        and next_poll - last_poll_update > POLL_PERIOD_REPORT):
                    self.last_poll_period.set_value(last_poll_period)
                    last_poll_update = next_poll
                next_poll += last_poll_period
        except Exception as e:
            self.go_to_error_state(e)
            raise

    def stop_poll_loop(self):
        if self._poll_spawned:
            self._stop_queue.put(None)
            self._poll_spawned.wait()
            self._poll_spawned = None
        if self._client.started:
            self._client.stop()

    def _make_child_controllers(self):
        self._child_controllers = {}
        controllers = []
        child_parts = []
        pos_names = []
        blocks_data = self._client.get_blocks_data()
        for block_rootname, block_data in blocks_data.items():
            block_names = []
            if block_data.number == 1:
                block_names.append(block_rootname)
            else:
                for i in range(block_data.number):
                    block_names.append("%s%d" % (block_rootname, i + 1))
            for block_name in block_names:
                # Look through the BlockData for things we are interested in
                for field_name, field_data in block_data.fields.items():
                    if field_data.field_type == "pos_out":
                        pos_names.append("%s.%s" % (block_name, field_name))

                # Make the child controller and add it to the process
                controller, child_part = self._make_child_block(
                    block_name, block_data)
                controllers += [controller]
                child_parts += [child_part]
                self._child_controllers[block_name] = controller
                # If there is only one, make an alias with "1" appended for
                # *METADATA.LABEL lookup
                if block_data.number == 1:
                    self._child_controllers[block_name + "1"] = controller

        self.process.add_controllers(controllers)
        for part in child_parts:
            self.add_part(part)

        # Create the busses from their initial sets of values
        pcap_bit_fields = self._client.get_pcap_bits_fields()
        self.busses.create_busses(pcap_bit_fields, pos_names)
        # Handle the pos_names that busses needs
        self._bus_fields = set(pos_names)
        for pos_name in pos_names:
            for suffix in ("CAPTURE", "UNITS", "SCALE", "OFFSET"):
                self._bus_fields.add("%s.%s" % (pos_name, suffix))
        # Handle the bit_outs, keeping a list for toggling and adding them
        # to the set of things that the busses need
        self._bit_outs = {k: 0 for k in self.busses.bits.value.name}
        self._bit_out_changes = {}
        self._bus_fields |= set(self._bit_outs)
        for capture_field in pcap_bit_fields:
            self._bus_fields.add(capture_field)
        # Handle the initial set of changes to get an initial value
        self.handle_changes(self._client.get_changes())
        # Then once more to let bit_outs toggle back
        self.handle_changes(())
        assert not self._bit_out_changes, (
            "There are still bit_out changes %s" % self._bit_out_changes)

    def _make_busses(self) -> PandABussesPart:
        return PandABussesPart("busses", self._client)

    def _make_child_block(self, block_name, block_data):
        controller = PandABlockController(self._client, self.mri, block_name,
                                          block_data, self._doc_url_base)
        if block_name == "PCAP":
            controller.add_part(
                PandAActionPart(self._client, "*PCAP", "ARM",
                                "Arm position capture", []))
            controller.add_part(
                PandAActionPart(self._client, "*PCAP", "DISARM",
                                "Disarm position capture", []))
        child_part = builtin.parts.ChildPart(name=block_name,
                                             mri=controller.mri,
                                             stateful=False)
        return controller, child_part

    def _handle_change(self, k, v, bus_changes, block_changes,
                       bit_out_changes):
        # Handle bit changes
        try:
            current_v = self._bit_outs[k]
        except KeyError:
            # Not a bit
            pass
        else:
            # Convert to a boolean
            v = bool(int(v))
            try:
                changed_to = bit_out_changes[k]
            except KeyError:
                # We didn't already make a change
                if v == current_v:
                    # Value is the same, store the negation, and set it
                    # back next time
                    self._bit_out_changes[k] = v
                    v = not v
            else:
                # Already made a change, defer this value til next time
                # if it is different
                if changed_to != v:
                    self._bit_out_changes[k] = v
                return
            self._bit_outs[k] = v

        # Notify the bus tables if they need to know
        if k in self._bus_fields:
            bus_changes[k] = v

        # Add to the relevant Block changes dict
        block_name, field_name = k.split(".", 1)
        if block_name == "*METADATA":
            if field_name.startswith("LABEL_"):
                field_name, block_name = field_name.split("_", 1)
            else:
                # Don't support any non-label metadata fields at the moment
                return
        block_changes.setdefault(block_name, {})[field_name] = v

    def handle_changes(self, changes: Sequence[Tuple[str, str]]) -> None:
        ts = TimeStamp()
        # {block_name: {field_name: field_value}}
        block_changes: Dict[str, Any] = {}
        # {full_field: field_value}
        bus_changes = {}

        # Process bit outs that need changing
        bit_out_changes = self._bit_out_changes
        self._bit_out_changes = {}
        for k, v in bit_out_changes.items():
            self._bit_outs[k] = v
            bus_changes[k] = v
            block_name, field_name = k.split(".")
            block_changes.setdefault(block_name, {})[field_name] = v

        # Work out which change is needed for which block
        for key, value in changes:
            self._handle_change(key, value, bus_changes, block_changes,
                                bit_out_changes)

        # Notify the Blocks that they need to handle these changes
        if bus_changes:
            self.busses.handle_changes(bus_changes, ts)
        for block_name, block_changes_values in block_changes.items():
            self._child_controllers[block_name].handle_changes(
                block_changes_values, ts)
Ejemplo n.º 6
0
 def make_queue(self):
     return Queue(user_facing=True)
Ejemplo n.º 7
0
    def test_block_fields_adder(self):
        fields = OrderedDict()
        block_data = BlockData(2, "Adder description", fields)
        fields["INPA"] = FieldData("pos_mux", "", "Input A", ["A.OUT", "B.OUT"])
        fields["INPB"] = FieldData("pos_mux", "", "Input B", ["A.OUT", "B.OUT"])
        fields["DIVIDE"] = FieldData(
            "param", "enum", "Divide output", ["/1", "/2", "/4"]
        )
        fields["OUT"] = FieldData("pos_out", "", "Output", ["No", "Capture"])
        fields["HEALTH"] = FieldData("read", "enum", "What's wrong", ["OK", "Very Bad"])

        o = PandABlockController(self.client, "MRI", "ADDER1", block_data, "/docs")
        self.process.add_controller(o)
        b = self.process.block_view("MRI:ADDER1")

        assert list(b) == [
            "meta",
            "health",
            "icon",
            "label",
            "help",
            "inputs",
            "inpa",
            "inpb",
            "parameters",
            "divide",
            "outputs",
            "out",
        ]

        group = b.inputs
        assert group.meta.tags == ["widget:group", "config:1"]

        inpa = b.inpa
        assert inpa.meta.writeable is True
        assert inpa.meta.typeid == ChoiceMeta.typeid
        assert inpa.meta.tags == [
            "group:inputs",
            "sinkPort:int32:ZERO",
            "widget:combo",
            "config:1",
        ]
        assert inpa.meta.choices == ["A.OUT", "B.OUT"]
        inpa.put_value("A.OUT")
        self.client.set_field.assert_called_once_with("ADDER1", "INPA", "A.OUT")
        self.client.reset_mock()

        divide = b.divide
        assert divide.meta.writeable is True
        assert divide.meta.typeid == ChoiceMeta.typeid
        assert divide.meta.tags == ["group:parameters", "widget:combo", "config:1"]
        assert divide.meta.choices == ["/1", "/2", "/4"]

        out = b.out
        assert out.meta.writeable is False
        assert out.meta.typeid == NumberMeta.typeid
        assert out.meta.dtype == "int32"
        assert out.meta.tags == [
            "group:outputs",
            "sourcePort:int32:ADDER1.OUT",
            "widget:textupdate",
        ]

        queue = Queue()
        subscribe = Subscribe(path=["MRI:ADDER1", "out"], delta=True)
        subscribe.set_callback(queue.put)
        o.handle_request(subscribe)
        delta = queue.get(timeout=1)
        assert delta.changes[0][1]["value"] == 0

        ts = TimeStamp()
        o.handle_changes({"OUT": "145"}, ts)
        delta = queue.get(timeout=1)
        assert delta.changes == [
            [["value"], 145],
            [["timeStamp"], ts],
        ]

        subscribe = Subscribe(path=["MRI:ADDER1", "health"], delta=True)
        subscribe.set_callback(queue.put)
        o.handle_request(subscribe)
        delta = queue.get(timeout=1)
        assert delta.changes[0][1]["value"] == "OK"

        ts = TimeStamp()
        o.handle_changes({"HEALTH": "Very Bad"}, ts)
        delta = queue.get(timeout=1)
        assert delta.changes == [
            [["value"], "Very Bad"],
            [["alarm"], Alarm.major("Very Bad")],
            [["timeStamp"], ts],
        ]
        o.handle_changes({"HEALTH": "OK"}, ts)
        delta = queue.get(timeout=1)
        assert delta.changes == [
            [["value"], "OK"],
            [["alarm"], Alarm.ok],
            [["timeStamp"], ts],
        ]
Ejemplo n.º 8
0
class RunnableController(ManagerController):
    """RunnableDevice implementer that also exposes GUI for child parts"""
    # The stateSet that this controller implements
    stateSet = ss()

    Validate = Hook()
    """Called at validate() to check parameters are valid

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        part_info (dict): {part_name: [Info]} returned from ReportStatus
        params (Map): Any configuration parameters asked for by part validate()
            method_takes() decorator

    Returns:
        [`ParameterTweakInfo`] - any parameters tweaks that have occurred
            to make them compatible with this part. If any are returned,
            Validate will be re-run with the modified parameters.
    """

    ReportStatus = Hook()
    """Called before Validate, Configure, PostRunArmed and Seek hooks to report
    the current configuration of all parts

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks

    Returns:
        [`Info`] - any configuration Info objects relevant to other parts
    """

    Configure = Hook()
    """Called at configure() to configure child block for a run

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        completed_steps (int): Number of steps already completed
        steps_to_do (int): Number of steps we should configure for
        part_info (dict): {part_name: [Info]} returned from ReportStatus
        params (Map): Any configuration parameters asked for by part configure()
            method_takes() decorator

    Returns:
        [`Info`] - any Info objects that need to be passed to other parts for
            storing in attributes
    """

    PostConfigure = Hook()
    """Called at the end of configure() to store configuration info calculated
     in the Configure hook

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        part_info (dict): {part_name: [Info]} returned from Configure hook
    """

    Run = Hook()
    """Called at run() to start the configured steps running

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        update_completed_steps (callable): If part can report progress, this
            part should call update_completed_steps(completed_steps, self) with
            the integer step value each time progress is updated
    """

    PostRunArmed = Hook()
    """Called at the end of run() when there are more steps to be run

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        completed_steps (int): Number of steps already completed
        steps_to_do (int): Number of steps we should configure for
        part_info (dict): {part_name: [Info]} returned from ReportStatus
        params (Map): Any configuration parameters asked for by part configure()
            method_takes() decorator
    """

    PostRunReady = Hook()
    """Called at the end of run() when there are no more steps to be run

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
    """

    Pause = Hook()
    """Called at pause() to pause the current scan before Seek is called

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
    """

    Seek = Hook()
    """Called at seek() or at the end of pause() to reconfigure for a different
    number of completed_steps

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        completed_steps (int): Number of steps already completed
        steps_to_do (int): Number of steps we should configure for
        part_info (dict): {part_name: [Info]} returned from ReportStatus
        params (Map): Any configuration parameters asked for by part configure()
            method_takes() decorator
    """

    Resume = Hook()
    """Called at resume() to continue a paused scan

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        update_completed_steps (callable): If part can report progress, this
            part should call update_completed_steps(completed_steps, self) with
            the integer step value each time progress is updated
    """

    Abort = Hook()
    """Called at abort() to stop the current scan

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
    """

    # Attributes
    completed_steps = None
    configured_steps = None
    total_steps = None
    axes_to_move = None

    # Params passed to configure()
    configure_params = None
    
    # Shared contexts between Configure, Run, Pause, Seek, Resume
    part_contexts = None

    # Configure method_models
    # {part: configure_method_model}
    configure_method_models = None

    # Stored for pause
    steps_per_run = 0

    # Progress reporting dict
    # {part: completed_steps for that part}
    progress_updates = None

    # Queue so that do_run can wait to see why it was aborted and resume if
    # needed
    resume_queue = None

    # Queue so we can wait for aborts to complete
    abort_queue = None

    @method_writeable_in(ss.FAULT, ss.DISABLED, ss.ABORTED, ss.ARMED)
    def reset(self):
        # Override reset to work from aborted too
        super(RunnableController, self).reset()

    def create_attribute_models(self):
        for data in super(RunnableController, self).create_attribute_models():
            yield data
        # Create sometimes writeable attribute for the current completed scan
        # step
        completed_steps_meta = NumberMeta(
            "int32", "Readback of number of scan steps",
            tags=[widget("textinput")])
        completed_steps_meta.set_writeable_in(ss.PAUSED, ss.ARMED)
        self.completed_steps = completed_steps_meta.create_attribute_model(0)
        yield "completedSteps", self.completed_steps, self.set_completed_steps
        # Create read-only attribute for the number of configured scan steps
        configured_steps_meta = NumberMeta(
            "int32", "Number of steps currently configured",
            tags=[widget("textupdate")])
        self.configured_steps = configured_steps_meta.create_attribute_model(0)
        yield "configuredSteps", self.configured_steps, None
        # Create read-only attribute for the total number scan steps
        total_steps_meta = NumberMeta(
            "int32", "Readback of number of scan steps",
            tags=[widget("textupdate")])
        self.total_steps = total_steps_meta.create_attribute_model(0)
        yield "totalSteps", self.total_steps, None
        # Create sometimes writeable attribute for the default axis names
        axes_to_move_meta = StringArrayMeta(
            "Default axis names to scan for configure()",
            tags=[widget("table"), config()])
        axes_to_move_meta.set_writeable_in(ss.READY)
        self.axes_to_move = axes_to_move_meta.create_attribute_model(
            self.params.axesToMove)
        yield "axesToMove", self.axes_to_move, self.set_axes_to_move

    def do_init(self):
        self.part_contexts = {}
        # Populate configure args from any child method hooked to Configure.
        # If we have runnablechildparts, they will call update_configure_args
        # during do_init
        self.configure_method_models = {}
        # Look for all parts that hook into Configure
        for part, func_name in self._hooked_func_names[self.Configure].items():
            if func_name in part.method_models:
                self.update_configure_args(part, part.method_models[func_name])
        super(RunnableController, self).do_init()

    def do_reset(self):
        super(RunnableController, self).do_reset()
        self.configured_steps.set_value(0)
        self.completed_steps.set_value(0)
        self.total_steps.set_value(0)

    def update_configure_args(self, part, configure_model):
        """Tell controller part needs different things passed to Configure"""
        with self.changes_squashed:
            # Update the dict
            self.configure_method_models[part] = configure_model
            method_models = list(self.configure_method_models.values())

            # Update takes with the things we need
            default_configure = MethodModel.from_dict(
                RunnableController.configure.MethodModel.to_dict())
            default_configure.defaults["axesToMove"] = self.axes_to_move.value
            method_models.append(default_configure)

            # Decorate validate and configure with the sum of its parts
            self._block.validate.recreate_from_others(method_models)
            self._block.validate.set_returns(self._block.validate.takes)
            self._block.configure.recreate_from_others(method_models)

    def set_axes_to_move(self, value):
        self.axes_to_move.set_value(value)

    @method_takes(*configure_args)
    @method_returns(*validate_args)
    def validate(self, params, returns):
        """Validate configuration parameters and return validated parameters.

        Doesn't take device state into account so can be run in any state
        """
        iterations = 10
        # Make some tasks just for validate
        part_contexts = self.create_part_contexts()
        # Get any status from all parts
        status_part_info = self.run_hook(self.ReportStatus, part_contexts)
        while iterations > 0:
            # Try up to 10 times to get a valid set of parameters
            iterations -= 1
            # Validate the params with all the parts
            validate_part_info = self.run_hook(
                self.Validate, part_contexts, status_part_info, **params)
            tweaks = ParameterTweakInfo.filter_values(validate_part_info)
            if tweaks:
                for tweak in tweaks:
                    params[tweak.parameter] = tweak.value
                    self.log.debug(
                        "Tweaking %s to %s", tweak.parameter, tweak.value)
            else:
                # Consistent set, just return the params
                return params
        raise ValueError("Could not get a consistent set of parameters")

    def abortable_transition(self, state):
        with self._lock:
            # We might have been aborted just now, so this will fail
            # with an AbortedError if we were
            self.part_contexts[self].sleep(0)
            self.transition(state)

    @method_takes(*configure_args)
    @method_writeable_in(ss.READY)
    def configure(self, params):
        """Validate the params then configure the device ready for run().

        Try to prepare the device as much as possible so that run() is quick to
        start, this may involve potentially long running activities like moving
        motors.

        Normally it will return in Armed state. If the user aborts then it will
        return in Aborted state. If something goes wrong it will return in Fault
        state. If the user disables then it will return in Disabled state.
        """
        self.validate(params, params)
        try:
            self.transition(ss.CONFIGURING)
            self.do_configure(params)
            self.abortable_transition(ss.ARMED)
        except AbortedError:
            self.abort_queue.put(None)
            raise
        except Exception as e:
            self.go_to_error_state(e)
            raise

    def do_configure(self, params):
        # These are the part tasks that abort() and pause() will operate on
        self.part_contexts = self.create_part_contexts()
        # Tell these contexts to notify their parts that about things they
        # modify so it doesn't screw up the modified led
        for part, context in self.part_contexts.items():
            context.set_notify_dispatch_request(part.notify_dispatch_request)
        # So add one for ourself too so we can be aborted
        self.part_contexts[self] = Context(self.process)
        # Store the params for use in seek()
        self.configure_params = params
        # This will calculate what we need from the generator, possibly a long
        # call
        params.generator.prepare()
        # Set the steps attributes that we will do across many run() calls
        self.total_steps.set_value(params.generator.size)
        self.completed_steps.set_value(0)
        self.configured_steps.set_value(0)
        # TODO: We can be cleverer about this and support a different number
        # of steps per run for each run by examining the generator structure
        self.steps_per_run = self._get_steps_per_run(
            params.generator, params.axesToMove)
        # Get any status from all parts
        part_info = self.run_hook(self.ReportStatus, self.part_contexts)
        # Run the configure command on all parts, passing them info from
        # ReportStatus. Parts should return any reporting info for PostConfigure
        completed_steps = 0
        steps_to_do = self.steps_per_run
        part_info = self.run_hook(
            self.Configure, self.part_contexts, completed_steps, steps_to_do,
            part_info, **self.configure_params)
        # Take configuration info and reflect it as attribute updates
        self.run_hook(self.PostConfigure, self.part_contexts, part_info)
        # Update the completed and configured steps
        self.configured_steps.set_value(steps_to_do)
        # Reset the progress of all child parts
        self.progress_updates = {}
        self.resume_queue = Queue()

    def _get_steps_per_run(self, generator, axes_to_move):
        steps = 1
        axes_set = set(axes_to_move)
        for dim in reversed(generator.dimensions):
            # If the axes_set is empty then we are done
            if not axes_set:
                break
            # Consume the axes that this generator scans
            for axis in dim.axes:
                assert axis in axes_set, \
                    "Axis %s is not in %s" % (axis, axes_to_move)
                axes_set.remove(axis)
            # Now multiply by the dimensions to get the number of steps
            steps *= dim.size
        return steps

    @method_writeable_in(ss.ARMED)
    def run(self):
        """Run a device where configure() has already be called

        Normally it will return in Ready state. If setup for multiple-runs with
        a single configure() then it will return in Armed state. If the user
        aborts then it will return in Aborted state. If something goes wrong it
        will return in Fault state. If the user disables then it will return in
        Disabled state.
        """
        if self.configured_steps.value < self.total_steps.value:
            next_state = ss.ARMED
        else:
            next_state = ss.READY
        try:
            self.transition(ss.RUNNING)
            hook = self.Run
            going = True
            while going:
                try:
                    self.do_run(hook)
                except AbortedError:
                    self.abort_queue.put(None)
                    # Wait for a response on the resume_queue
                    should_resume = self.resume_queue.get()
                    if should_resume:
                        # we need to resume
                        hook = self.Resume
                        self.log.debug("Resuming run")
                    else:
                        # we don't need to resume, just drop out
                        raise
                else:
                    going = False
            self.abortable_transition(next_state)
        except AbortedError:
            raise
        except Exception as e:
            self.go_to_error_state(e)
            raise

    def do_run(self, hook):
        self.run_hook(hook, self.part_contexts, self.update_completed_steps)
        self.abortable_transition(ss.POSTRUN)
        completed_steps = self.configured_steps.value
        if completed_steps < self.total_steps.value:
            steps_to_do = self.steps_per_run
            part_info = self.run_hook(self.ReportStatus, self.part_contexts)
            self.completed_steps.set_value(completed_steps)
            self.run_hook(
                self.PostRunArmed, self.part_contexts, completed_steps,
                steps_to_do, part_info, **self.configure_params)
            self.configured_steps.set_value(completed_steps + steps_to_do)
        else:
            self.run_hook(self.PostRunReady, self.part_contexts)

    def update_completed_steps(self, completed_steps, part):
        with self._lock:
            # Update
            self.progress_updates[part] = completed_steps
            min_completed_steps = min(self.progress_updates.values())
            if min_completed_steps > self.completed_steps.value:
                self.completed_steps.set_value(min_completed_steps)

    @method_writeable_in(
        ss.READY, ss.CONFIGURING, ss.ARMED, ss.RUNNING, ss.POSTRUN, ss.PAUSED,
        ss.SEEKING)
    def abort(self):
        """Abort the current operation and block until aborted

        Normally it will return in Aborted state. If something goes wrong it
        will return in Fault state. If the user disables then it will return in
        Disabled state.
        """
        # Tell _call_do_run not to resume
        if self.resume_queue:
            self.resume_queue.put(False)
        self.try_aborting_function(ss.ABORTING, ss.ABORTED, self.do_abort)

    def do_abort(self):
        self.run_hook(self.Abort, self.create_part_contexts())

    def try_aborting_function(self, start_state, end_state, func, *args):
        try:
            # To make the running function fail we need to stop any running
            # contexts (if running a hook) or make transition() fail with
            # AbortedError. Both of these are accomplished here
            with self._lock:
                original_state = self.state.value
                self.abort_queue = Queue()
                self.transition(start_state)
                for context in self.part_contexts.values():
                    context.stop()
            if original_state not in (ss.READY, ss.ARMED, ss.PAUSED):
                # Something was running, let it finish aborting
                try:
                    self.abort_queue.get(timeout=ABORT_TIMEOUT)
                except TimeoutError:
                    self.log.warning("Timeout waiting while %s" % start_state)
            with self._lock:
                # Now we've waited for a while we can remove the error state
                # for transition in case a hook triggered it rather than a
                # transition
                self.part_contexts[self].ignore_stops_before_now()
            func(*args)
            self.abortable_transition(end_state)
        except AbortedError:
            self.abort_queue.put(None)
            raise
        except Exception as e:  # pylint:disable=broad-except
            self.go_to_error_state(e)
            raise

    def set_completed_steps(self, completed_steps):
        """Seek a Armed or Paused scan back to another value

        Normally it will return in the state it started in. If the user aborts
        then it will return in Aborted state. If something goes wrong it will
        return in Fault state. If the user disables then it will return in
        Disabled state.
        """
        call_with_params(self.pause, completedSteps=completed_steps)

    @method_writeable_in(ss.ARMED, ss.PAUSED, ss.RUNNING)
    @method_takes("completedSteps", NumberMeta(
        "int32", "Step to mark as the last completed step, -1 for current"), -1)
    def pause(self, params):
        """Pause a run() so that resume() can be called later.

        The original call to run() will not be interrupted by pause(), it will
        with until the scan completes or is aborted.

        Normally it will return in Paused state. If the user aborts then it will
        return in Aborted state. If something goes wrong it will return in Fault
        state. If the user disables then it will return in Disabled state.
        """
        current_state = self.state.value
        if params.completedSteps < 0:
            completed_steps = self.completed_steps.value
        else:
            completed_steps = params.completedSteps
        if current_state == ss.RUNNING:
            next_state = ss.PAUSED
        else:
            next_state = current_state
        assert completed_steps < self.total_steps.value, \
            "Cannot seek to after the end of the scan"
        self.try_aborting_function(
            ss.SEEKING, next_state, self.do_pause, completed_steps)

    def do_pause(self, completed_steps):
        self.run_hook(self.Pause, self.create_part_contexts())
        in_run_steps = completed_steps % self.steps_per_run
        steps_to_do = self.steps_per_run - in_run_steps
        part_info = self.run_hook(self.ReportStatus, self.part_contexts)
        self.completed_steps.set_value(completed_steps)
        self.run_hook(
            self.Seek, self.part_contexts, completed_steps,
            steps_to_do, part_info, **self.configure_params)
        self.configured_steps.set_value(completed_steps + steps_to_do)

    @method_writeable_in(ss.PAUSED)
    def resume(self):
        """Resume a paused scan.

        Normally it will return in Running state. If something goes wrong it
        will return in Fault state.
        """
        self.transition(ss.RUNNING)
        self.resume_queue.put(True)
        # self.run will now take over

    def do_disable(self):
        # Abort anything that is currently running, but don't wait
        for context in self.part_contexts.values():
            context.stop()
        if self.resume_queue:
            self.resume_queue.put(False)
        super(RunnableController, self).do_disable()
Ejemplo n.º 9
0
 def do_init(self):
     super(PvaClientComms, self).do_init()
     self._monitors = {}
     self._send_queue = Queue()
Ejemplo n.º 10
0
    def test_handle_request(self):
        q = Queue()

        request = Get(id=41, path=["mri", "myAttribute"])
        request.set_callback(q.put)
        self.o.handle_request(request)
        response = q.get(timeout=0.1)
        self.assertIsInstance(response, Return)
        assert response.id == 41
        assert response.value["value"] == "hello_block"
        self.part.my_attribute.meta.writeable = False
        request = Put(
            id=42, path=["mri", "myAttribute"], value="hello_block2", get=True
        )
        request.set_callback(q.put)
        self.o.handle_request(request)
        response = q.get(timeout=0.1)
        self.assertIsInstance(response, Error)  # not writeable
        assert response.id == 42

        self.part.my_attribute.meta.writeable = True
        self.o.handle_request(request)
        response = q.get(timeout=0.1)
        self.assertIsInstance(response, Return)
        assert response.id == 42
        assert response.value == "hello_block2"

        request = Post(id=43, path=["mri", "method"])
        request.set_callback(q.put)
        self.o.handle_request(request)
        response = q.get(timeout=0.1)
        self.assertIsInstance(response, Return)
        assert response.id == 43
        assert response.value == "world"

        # cover the controller._handle_post path for parameters
        request = Post(id=43, path=["mri", "method"], parameters={"dummy": 1})
        request.set_callback(q.put)
        self.o.handle_request(request)
        response = q.get(timeout=0.1)
        self.assertIsInstance(response, Error)
        assert response.id == 43
        assert (
            str(response.message)
            == "Given keys ['dummy'], some of which aren't in allowed keys []"
        )

        request = Subscribe(id=44, path=["mri", "myAttribute"], delta=False)
        request.set_callback(q.put)
        self.o.handle_request(request)
        response = q.get(timeout=0.1)
        self.assertIsInstance(response, Update)
        assert response.id == 44
        assert response.value["typeid"] == "epics:nt/NTScalar:1.0"
        assert response.value["value"] == "hello_block2"

        request = Unsubscribe(id=44)
        request.set_callback(q.put)
        self.o.handle_request(request)
        response = q.get(timeout=0.1)
        self.assertIsInstance(response, Return)
        assert response.id == 44
Ejemplo n.º 11
0
class WebsocketClientComms(builtin.controllers.ClientComms):
    """A class for a client to communicate with the server"""
    def __init__(
        self,
        mri,  # type: builtin.controllers.AMri
        hostname="localhost",  # type: AHostname
        port=8008,  # type: APort
        connect_timeout=DEFAULT_TIMEOUT  # type: AConnectTimeout
    ):
        # type: (...) -> None
        super(WebsocketClientComms, self).__init__(mri)
        self.hostname = hostname
        self.port = port
        self.connect_timeout = connect_timeout
        self._connected_queue = Queue()
        # {new_id: request}
        self._request_lookup = {}  # type: Dict[int, Request]
        self._next_id = 1
        self._conn = None  # type: WebSocketClientConnection
        # Create read-only attribute for the remotely reachable blocks
        self.remote_blocks = StringArrayMeta("Remotely reachable blocks",
                                             tags=[Widget.TEXTINPUT.tag()
                                                   ]).create_attribute_model()
        self.field_registry.add_attribute_model("remoteBlocks",
                                                self.remote_blocks)

    def do_init(self):
        super(WebsocketClientComms, self).do_init()
        self._start_client()

    def _start_client(self):
        # Called from cothread
        if self._conn is None:
            IOLoopHelper.call(self.recv_loop)
            self._connected_queue.get(timeout=self.connect_timeout)
            root_subscribe = Subscribe(path=[".", "blocks", "value"])
            root_subscribe.set_callback(self._update_remote_blocks)
            IOLoopHelper.call(self._send_request, root_subscribe)

    @gen.coroutine
    def recv_loop(self):
        # Called from tornado
        url = "ws://%s:%d/ws" % (self.hostname, self.port)
        self._conn = yield websocket_connect(
            url, connect_timeout=self.connect_timeout - 0.5)
        cothread.Callback(self._connected_queue.put, None)
        while True:
            message = yield self._conn.read_message()
            if message is None:
                self._conn = None
                cothread.Callback(self._report_fault)
                return
            self.on_message(message)

    def on_message(self, message):
        """Pass response from server to process receive queue

        Args:
            message(str): Received message
        """
        # Called in tornado loop
        try:
            self.log.debug("Got message %s", message)
            d = json_decode(message)
            response = deserialize_object(d, Response)
            if isinstance(response, (Return, Error)):
                request = self._request_lookup.pop(response.id)
                if isinstance(response, Error):
                    # Make the message an exception so it can be raised
                    response.message = ResponseError(response.message)
            else:
                request = self._request_lookup[response.id]
            # Transfer the work of the callback to cothread
            cothread.Callback(request.callback, response)
        except Exception:
            # If we don't catch the exception here, tornado will spew odd
            # error messages about 'HTTPRequest' object has no attribute 'path'
            self.log.exception("on_message(%r) failed", message)

    def _report_fault(self):
        # Called in cothread thread
        with self._lock:
            if self.state.value != self.state_set.DISABLING:
                self.transition(self.state_set.FAULT, "Server disconnected")
        self._connected_queue.put(None)
        for id in list(self._request_lookup):
            request = self._request_lookup.pop(id)
            response = Error(id=request.id,
                             message=ResponseError("Server disconnected"))
            try:
                request.callback(response)
            except Exception:
                # Most things will error here, not really a problem
                self.log.debug("Callback %s raised", request.callback)

    def _stop_client(self):
        # Called from cothread
        if self._conn:
            IOLoopHelper.call(self._conn.close)
            self._connected_queue.get(timeout=self.connect_timeout)
            self._conn = None

    def _update_remote_blocks(self, response):
        response = deserialize_object(response, Update)
        cothread.Callback(self.remote_blocks.set_value, response.value)

    def do_disable(self):
        super(WebsocketClientComms, self).do_disable()
        self._stop_client()

    def do_reset(self):
        super(WebsocketClientComms, self).do_reset()
        self._start_client()

    def sync_proxy(self, mri, block):
        """Abstract method telling the ClientComms to sync this proxy Block
        with its remote counterpart. Should wait until it is connected

        Args:
            mri (str): The mri for the remote block
            block (BlockModel): The local proxy Block to keep in sync
        """
        # Send a root Subscribe to the server
        subscribe = Subscribe(path=[mri], delta=True)
        done_queue = Queue()

        def handle_response(response):
            # Called from tornado
            if not isinstance(response, Delta):
                # Return or Error is the end of our subscription, log and ignore
                self.log.debug("Proxy got response %r", response)
                done_queue.put(None)
            else:
                cothread.Callback(self._handle_response, response, block,
                                  done_queue)

        subscribe.set_callback(handle_response)
        IOLoopHelper.call(self._send_request, subscribe)
        done_queue.get(timeout=DEFAULT_TIMEOUT)

    def _handle_response(self, response, block, done_queue):
        # type: (Response, BlockModel, Queue) -> None
        try:
            with self.changes_squashed:
                for change in response.changes:
                    self._handle_change(block, change)
        except Exception:
            self.log.exception("Error handling %s", response)
            raise
        finally:
            done_queue.put(None)

    def _handle_change(self, block, change):
        path = change[0]
        if len(path) == 0:
            assert len(change) == 2, \
                "Can't delete root block with change %r" % (change,)
            self._regenerate_block(block, change[1])
        elif len(path) == 1 and path[0] not in ("health", "meta"):
            if len(change) == 1:
                # Delete a field
                block.remove_endpoint(path[1])
            else:
                # Change a single field of the block
                block.set_endpoint_data(path[1], change[1])
        else:
            block.apply_change(path, *change[1:])

    def _regenerate_block(self, block, d):
        for field in list(block):
            if field not in ("health", "meta"):
                block.remove_endpoint(field)
        for field, value in d.items():
            if field == "health":
                # Update health attribute
                value = deserialize_object(value)  # type: NTScalar
                block.health.set_value(value=value.value,
                                       alarm=value.alarm,
                                       ts=value.timeStamp)
            elif field == "meta":
                value = deserialize_object(value)  # type: BlockMeta
                meta = block.meta  # type: BlockMeta
                for k in meta.call_types:
                    meta.apply_change([k], value[k])
            elif field != "typeid":
                # No need to set writeable_functions as the server will do it
                block.set_endpoint_data(field, value)

    def send_put(self, mri, attribute_name, value):
        """Abstract method to dispatch a Put to the server

        Args:
            mri (str): The mri of the Block
            attribute_name (str): The name of the Attribute within the Block
            value: The value to put
        """
        q = Queue()
        request = Put(path=[mri, attribute_name, "value"], value=value)
        request.set_callback(q.put)
        IOLoopHelper.call(self._send_request, request)
        response = q.get()
        if isinstance(response, Error):
            raise response.message
        else:
            return response.value

    def send_post(self, mri, method_name, **params):
        """Abstract method to dispatch a Post to the server

        Args:
            mri (str): The mri of the Block
            method_name (str): The name of the Method within the Block
            params: The parameters to send

        Returns:
            The return results from the server
        """
        q = Queue()
        request = Post(path=[mri, method_name], parameters=params)
        request.set_callback(q.put)
        IOLoopHelper.call(self._send_request, request)
        response = q.get()
        if isinstance(response, Error):
            raise response.message
        else:
            return response.value

    def _send_request(self, request):
        # Called in tornado thread
        request.id = self._next_id
        self._next_id += 1
        self._request_lookup[request.id] = request
        message = json_encode(request)
        self.log.debug("Sending message %s", message)
        self._conn.write_message(message)
Ejemplo n.º 12
0
class TestSystemWSCommsServerOnly(unittest.TestCase):
    socket = 8881

    def setUp(self):
        self.process = Process("proc")
        self.hello = hello_block(mri="hello")[-1]
        self.process.add_controller(self.hello)
        self.server = web_server_block(mri="server", port=self.socket)[-1]
        self.process.add_controller(self.server)
        self.result = Queue()
        self.process.start()

    def tearDown(self):
        self.process.stop(timeout=1)

    @gen.coroutine
    def send_messages(self, messages, convert_json=True):
        conn = yield websocket_connect("ws://localhost:%s/ws" % self.socket)
        num = len(messages)
        for msg in messages:
            if convert_json:
                msg = json_encode(msg)
            conn.write_message(msg)
        for _ in range(num):
            resp = yield conn.read_message()
            resp = json.loads(resp)
            cothread.Callback(self.result.put, resp)
        conn.close()

    @gen.coroutine
    def send_message(self, req, convert_json=True):
        yield self.send_messages([req], convert_json)

    def test_server_and_simple_client(self):
        msg = OrderedDict()
        msg['typeid'] = "malcolm:core/Post:1.0"
        msg['id'] = 0
        msg['path'] = ("hello", "greet")
        msg['parameters'] = dict(name="me")
        IOLoopHelper.call(self.send_message, msg)
        resp = self.result.get(timeout=2)
        assert resp == dict(typeid="malcolm:core/Return:1.0",
                            id=0,
                            value="Hello me")

    def test_concurrency(self):
        msg1 = Post(id=0,
                    path=["hello", "greet"],
                    parameters=dict(name="me", sleep=2)).to_dict()
        msg2 = Post(id=1, path=["hello", "error"]).to_dict()
        IOLoopHelper.call(self.send_messages, [msg1, msg2])
        resp = self.result.get(timeout=1)
        assert resp == dict(typeid="malcolm:core/Error:1.0",
                            id=1,
                            message="RuntimeError: You called method error()")
        resp = self.result.get(timeout=3)
        assert resp == dict(typeid="malcolm:core/Return:1.0",
                            id=0,
                            value="Hello me")

    def test_blocks_delta(self):
        msg = OrderedDict()
        msg['typeid'] = "malcolm:core/Subscribe:1.0"
        msg['id'] = 0
        msg['path'] = (".", "blocks", "value")
        msg['delta'] = True
        IOLoopHelper.call(self.send_message, msg)
        resp = self.result.get(timeout=2)
        assert resp == dict(typeid="malcolm:core/Delta:1.0",
                            id=0,
                            changes=[[[], ["hello", "server"]]])

    def test_blocks_update(self):
        msg = OrderedDict()
        msg['typeid'] = "malcolm:core/Subscribe:1.0"
        msg['id'] = 0
        msg['path'] = (".", "blocks", "value")
        msg['delta'] = False
        IOLoopHelper.call(self.send_message, msg)
        resp = self.result.get(timeout=2)
        assert resp == dict(typeid="malcolm:core/Update:1.0",
                            id=0,
                            value=["hello", "server"])

    def test_error_server_and_simple_client_badJSON(self):
        IOLoopHelper.call(self.send_message, "I am JSON (but not a dict)")
        resp = self.result.get(timeout=2)
        assert resp == dict(
            typeid="malcolm:core/Error:1.0",
            id=-1,
            message=
            "ValueError: Error decoding JSON object (didn't return OrderedDict)"
        )

        IOLoopHelper.call(self.send_message,
                          "I am not JSON",
                          convert_json=False)
        resp = self.result.get(timeout=2)
        if version_info[0] == 2:
            assert resp == dict(
                typeid="malcolm:core/Error:1.0",
                id=-1,
                message=
                "ValueError: Error decoding JSON object (No JSON object could be decoded)"
            )
        elif version_info[0] == 3:
            assert resp == dict(
                typeid="malcolm:core/Error:1.0",
                id=-1,
                message=
                "ValueError: Error decoding JSON object (Expecting value: line 1 column 1 (char 0))"
            )
        else:
            raise Exception("Got bad python version info")

    def test_error_server_and_simple_client_no_id(self):
        msg = OrderedDict()
        msg['typeid'] = "malcolm:core/Post:1.0"
        msg['path'] = ("hello", "greet")
        msg['parameters'] = dict(name="me")
        IOLoopHelper.call(self.send_message, msg)
        resp = self.result.get(timeout=2)
        assert resp == dict(
            typeid="malcolm:core/Error:1.0",
            id=-1,
            message="FieldError: id field not present in JSON message")

    def test_error_server_and_simple_client_bad_type(self):
        msg = OrderedDict()
        msg['typeid'] = "NotATypeID"
        msg['id'] = 0
        msg['path'] = ("hello", "greet")
        msg['parameters'] = dict(name="me")
        IOLoopHelper.call(self.send_message, msg)
        resp = self.result.get(timeout=2)
        assert resp == dict(
            typeid="malcolm:core/Error:1.0",
            id=0,
            message="FieldError: 'NotATypeID' not a valid typeid")

    def test_error_server_and_simple_client_no_type(self):
        msg = OrderedDict()
        msg['id'] = 0
        msg['path'] = ("hello", "greet")
        msg['parameters'] = dict(name="me")
        IOLoopHelper.call(self.send_message, msg)
        resp = self.result.get(timeout=2)

        if version_info[0] == 2:
            message = "FieldError: typeid not present in keys " + \
                "[u'id', u'path', u'parameters']"
        elif version_info[0] == 3:
            message = "FieldError: typeid not present in keys " + \
                "['id', 'path', 'parameters']"
        else:
            raise Exception("Got bad python version info")
        assert resp == dict(typeid="malcolm:core/Error:1.0",
                            id=0,
                            message=message)

    def test_error_server_and_simple_client_bad_path_controller(self):
        msg = OrderedDict()
        msg['typeid'] = "malcolm:core/Post:1.0"
        msg['id'] = 0
        msg['path'] = ("goodbye", "insult")
        msg['parameters'] = dict(name="me")
        IOLoopHelper.call(self.send_message, msg)
        resp = self.result.get(timeout=2)
        assert resp == dict(
            typeid="malcolm:core/Error:1.0",
            id=0,
            message="ValueError: No controller registered for mri 'goodbye'")

    def test_error_server_and_simple_client_superfluous_params(self):
        msg = OrderedDict()
        msg['typeid'] = "malcolm:core/Get:1.0"
        msg['id'] = 0
        msg['path'] = ("hello", "meta")
        msg['parameters'] = dict(name="me")
        IOLoopHelper.call(self.send_message, msg)
        resp = self.result.get(timeout=2)
        assert resp == dict(
            typeid="malcolm:core/Error:1.0",
            id=0,
            message="TypeError: malcolm:core/Get:1.0 raised error: " +
            "__init__() got an unexpected keyword argument 'parameters'")

    def test_error_server_and_simple_client_bad_path_attribute(self):
        msg = OrderedDict()
        msg['typeid'] = "malcolm:core/Get:1.0"
        msg['id'] = 0
        msg['path'] = ("hello", "meat")
        IOLoopHelper.call(self.send_message, msg)
        resp = self.result.get(timeout=2)
        if version_info[0] == 2:
            assert resp == dict(
                typeid="malcolm:core/Error:1.0",
                id=0,
                message=
                "UnexpectedError: Object [u'hello'] of type 'malcolm:core/Block:1.0' has no attribute u'meat'"
            )
        elif version_info[0] == 3:
            assert resp == dict(
                typeid="malcolm:core/Error:1.0",
                id=0,
                message=
                "UnexpectedError: Object ['hello'] of type 'malcolm:core/Block:1.0' has no attribute 'meat'"
            )

    def test_error_server_and_simple_client_no_path(self):
        msg = OrderedDict()
        msg['typeid'] = "malcolm:core/Post:1.0"
        msg['id'] = 0
        IOLoopHelper.call(self.send_message, msg)
        resp = self.result.get(timeout=2)
        assert resp == dict(
            typeid="malcolm:core/Error:1.0",
            id=0,
            message=
            'ValueError: Expected a path with at least 1 element, got []')
Ejemplo n.º 13
0
 def setUp(self):
     self.q = Queue()
Ejemplo n.º 14
0
class ProxyController(Controller):
    """Sync a local block with a given remote block"""
    def __init__(self, process, parts, params):
        self.params = params
        super(ProxyController, self).__init__(process, params.mri, parts)
        self.client_comms = process.get_controller(params.comms)
        self.update_health(self, Alarm.invalid("Uninitialized"))
        self._response_queue = Queue()
        self._notify_response = True
        self._first_response_queue = Queue()

    @Process.Init
    def init(self):
        subscribe = Subscribe(path=[self.params.mri],
                              delta=True,
                              callback=self.handle_response)
        self.client_comms.send_to_server(subscribe)
        # Wait until connected
        self._first_response_queue.get(timeout=5)

    @Process.Halt
    def halt(self):
        unsubscribe = Unsubscribe(callback=self.handle_response)
        self.client_comms.send_to_server(unsubscribe)

    def handle_request(self, request):
        # Forward Puts and Posts to the client_comms
        if isinstance(request, (Put, Post)):
            return self.client_comms.send_to_server(request)
        else:
            return super(ProxyController, self).handle_request(request)

    def handle_response(self, response):
        self._response_queue.put(response)
        return self.spawn(self._handle_response)

    def _handle_response(self):
        with self.changes_squashed:
            if self._notify_response:
                self._first_response_queue.put(True)
                self._notify_response = False
            response = self._response_queue.get(timeout=0)
            if not isinstance(response, Delta):
                # Return or Error is the end of our subscription, log and ignore
                self.log.debug("Proxy got response %r", response)
                return
            for change in response.changes:
                path = change[0]
                if len(path) == 0:
                    assert len(change) == 2, \
                        "Can't delete root block with change %r" % (change,)
                    self._regenerate_block(change[1])
                elif len(path) == 1 and path[0] not in ("health", "meta"):
                    if len(change) == 1:
                        # Delete a field
                        self._block.remove_endpoint(path[1])
                    else:
                        # Change a single field of the block
                        self._block.set_endpoint_data(path[1], change[1])
                elif len(path) == 2 and path[:1] == ["health", "alarm"]:
                    # If we got an alarm update for health
                    assert len(change) == 2, "Can't delete health alarm"
                    self.update_health(self, change[1])
                elif path[0] not in ("health", "meta"):
                    # Update a child of the block
                    assert len(change) == 2, \
                        "Can't delete entries in Attributes or Methods"
                    ob = self._block
                    for p in path[:-1]:
                        ob = ob[p]
                    getattr(ob, "set_%s" % path[-1])(change[1])
                else:
                    raise ValueError("Bad response %s" % response)

    def _regenerate_block(self, d):
        for field in list(self._block):
            if field not in ("health", "meta"):
                self._block.remove_endpoint(field)
        for field, value in d.items():
            if field == "health":
                if value["alarm"]["severity"]:
                    self.update_health(self, value["alarm"])
                else:
                    self.update_health(self, None)
            elif field == "meta":
                # TODO: set meta
                pass
            elif field != "typeid":
                # No need to set writeable_functions as the server will do it
                self._block.set_endpoint_data(field, value)
class PandABlocksManagerController(ManagerController):
    def __init__(self, process, parts, params):
        super(PandABlocksManagerController,
              self).__init__(process, parts, params)
        # {block_name: BlockData}
        self._blocks_data = {}
        # {block_name: {field_name: Part}}
        self._blocks_parts = OrderedDict()
        # src_attr -> [dest_attr]
        self._listening_attrs = {}
        # (block_name, src_field_name) -> [dest_field_name]
        self._scale_offset_fields = {}
        # full_src_field -> [full_dest_field]
        self._mirrored_fields = {}
        # fields that need to inherit UNITS, SCALE and OFFSET from upstream
        self._inherit_scale = {}
        self._inherit_offset = {}
        # lut elements to be displayed or not
        # {fnum: {id: visible}}
        self._lut_elements = {}
        # changes left over from last time
        self.changes = OrderedDict()
        # The PandABlock client that does the comms
        self.client = PandABlocksClient(params.hostname, params.port, Queue)
        # Filled in on reset
        self._stop_queue = None
        self._poll_spawned = None

    def do_init(self):
        # start the poll loop and make block parts first to fill in our parts
        # before calling _set_block_children()
        self.start_poll_loop()
        super(PandABlocksManagerController, self).do_init()

    def start_poll_loop(self):
        # queue to listen for stop events
        if not self.client.started:
            self._stop_queue = Queue()
            if self.client.started:
                self.client.stop()
            from socket import socket
            if self.use_cothread:
                cothread = maybe_import_cothread()
                if cothread:
                    from cothread.cosocket import socket
            self.client.start(self.spawn, socket)
        if not self._blocks_parts:
            self._make_blocks_parts()
        if self._poll_spawned is None:
            self._poll_spawned = self.spawn(self._poll_loop)

    def do_disable(self):
        super(PandABlocksManagerController, self).do_disable()
        self.stop_poll_loop()

    def do_reset(self):
        self.start_poll_loop()
        super(PandABlocksManagerController, self).do_reset()

    def _poll_loop(self):
        """At 10Hz poll for changes"""
        next_poll = time.time()
        while True:
            next_poll += 0.1
            timeout = next_poll - time.time()
            if timeout < 0:
                timeout = 0
            try:
                return self._stop_queue.get(timeout=timeout)
            except TimeoutError:
                # No stop, no problem
                pass
            try:
                self.handle_changes(self.client.get_changes())
            except Exception:
                # TODO: should fault here?
                self.log.exception("Error while getting changes")

    def stop_poll_loop(self):
        if self._poll_spawned:
            self._stop_queue.put(None)
            self._poll_spawned.wait()
            self._poll_spawned = None
        if self.client.started:
            self.client.stop()

    def _make_blocks_parts(self):
        # {block_name_without_number: BlockData}
        self._blocks_data = OrderedDict()
        self._blocks_parts = OrderedDict()
        for block_rootname, block_data in self.client.get_blocks_data().items(
        ):
            block_names = []
            if block_data.number == 1:
                block_names.append(block_rootname)
            else:
                for i in range(block_data.number):
                    block_names.append("%s%d" % (block_rootname, i + 1))
            for block_name in block_names:
                self._blocks_data[block_name] = block_data
                self._make_parts(block_name, block_data)
        # Handle the initial set of changes to get an initial value
        self.handle_changes(self.client.get_changes())
        # Then once more to let bit_outs toggle back
        self.handle_changes({})
        assert not self.changes, "There are still changes %s" % self.changes

    def _make_child_controller(self, parts, mri):
        controller = call_with_params(BasicController,
                                      self.process,
                                      parts,
                                      mri=mri)
        return controller

    def _make_corresponding_part(self, block_name, mri):
        part = call_with_params(ChildPart, name=block_name, mri=mri)
        return part

    def _make_parts(self, block_name, block_data):
        mri = "%s:%s" % (self.params.mri, block_name)

        # Defer creation of parts to a block maker
        maker = PandABlocksMaker(self.client, block_name, block_data)

        # Make the child controller and add it to the process
        controller = self._make_child_controller(maker.parts.values(), mri)
        self.process.add_controller(mri, controller)

        # Store the parts so we can update them with the poller
        self._blocks_parts[block_name] = maker.parts

        # setup param pos on a block with pos_out to inherit SCALE OFFSET UNITS
        pos_fields = []
        pos_out_fields = []
        pos_mux_inp_fields = []
        for field_name, field_data in block_data.fields.items():
            if field_name == "INP" and field_data.field_type == "pos_mux":
                pos_mux_inp_fields.append(field_name)
            elif field_data.field_type == "pos_out":
                pos_out_fields.append(field_name)
            elif field_data.field_subtype in ("pos", "relative_pos"):
                pos_fields.append(field_name)

        # Make sure pos_fields can get SCALE from somewhere
        if pos_fields:
            sources = pos_mux_inp_fields + pos_out_fields
            assert len(sources) == 1, \
                "Expected one source of SCALE and OFFSET for %s, got %s" % (
                    pos_fields, sources)
            for field_name in pos_fields:
                self._map_scale_offset(block_name, sources[0], field_name)

        # Make the corresponding part for us
        child_part = self._make_corresponding_part(block_name, mri)
        self.add_part(child_part)

    def _map_scale_offset(self, block_name, src_field, dest_field):
        self._scale_offset_fields.setdefault((block_name, src_field),
                                             []).append(dest_field)
        if src_field == "INP":
            # mapping based on what it is connected to, defer
            return
        for suff in ("SCALE", "OFFSET", "UNITS"):
            full_src_field = "%s.%s.%s" % (block_name, src_field, suff)
            full_dest_field = "%s.%s.%s" % (block_name, dest_field, suff)
            self._mirrored_fields.setdefault(full_src_field,
                                             []).append(full_dest_field)

    def _set_lut_icon(self, block_name):
        icon_attr = self._blocks_parts[block_name]["icon"].attr
        with open(os.path.join(SVG_DIR, "LUT.svg")) as f:
            svg_text = f.read()
        fnum = int(self.client.get_field(block_name, "FUNC.RAW"))
        invis = self._get_lut_icon_elements(fnum)
        root = ET.fromstring(svg_text)
        for i in invis:
            # Find the first parent which has a child with id i
            parent = root.find('.//*[@id=%r]/..' % i)
            # Find the child and remove it
            child = parent.find('./*[@id=%r]' % i)
            parent.remove(child)
        svg_text = et_to_string(root)
        icon_attr.set_value(svg_text)

    def _get_lut_icon_elements(self, fnum):
        if not self._lut_elements:
            # Generate the lut element table
            # Do the general case funcs
            funcs = [("AND", operator.and_), ("OR", operator.or_)]
            for func, op in funcs:
                for nargs in (2, 3, 4, 5):
                    # 2**nargs permutations
                    for permutation in range(2**nargs):
                        self._calc_visibility(func, op, nargs, permutation)
            # Add in special cases for NOT
            for ninp in "ABCDE":
                invis = {"AND", "OR", "LUT"}
                for inp in "ABCDE":
                    if inp != ninp:
                        invis.add(inp)
                    invis.add("not%s" % inp)
                self._lut_elements[~LUT_CONSTANTS[ninp] & (2**32 - 1)] = invis
            # And catchall for LUT in 0
            invis = {"AND", "OR", "NOT"}
            for inp in "ABCDE":
                invis.add("not%s" % inp)
            self._lut_elements[0] = invis
        return self._lut_elements.get(fnum, self._lut_elements[0])

    def _calc_visibility(self, func, op, nargs, permutations):
        # Visibility dictionary defaults
        invis = {"AND", "OR", "LUT", "NOT"}
        invis.remove(func)
        args = []
        for i, inp in enumerate("EDCBA"):
            # xxxxx where x is 0 or 1
            # EDCBA
            negations = format(permutations, '05b')
            if (5 - i) > nargs:
                # invisible
                invis.add(inp)
                invis.add("not%s" % inp)
            else:
                # visible
                if negations[i] == "1":
                    args.append(~LUT_CONSTANTS[inp] & (2**32 - 1))
                else:
                    invis.add("not%s" % inp)
                    args.append(LUT_CONSTANTS[inp])

        # Insert into table
        fnum = op(args[0], args[1])
        for a in args[2:]:
            fnum = op(fnum, a)
        self._lut_elements[fnum] = invis

    def handle_changes(self, changes):
        for k, v in changes.items():
            self.changes[k] = v
        for full_field, val in list(self.changes.items()):
            # If we have a mirrored field then fire off a request
            for dest_field in self._mirrored_fields.get(full_field, []):
                self.client.send("%s=%s\n" % (dest_field, val))
            block_name, field_name = full_field.split(".", 1)
            ret = self.update_attribute(block_name, field_name, val)
            if ret is not None:
                self.changes[full_field] = ret
            else:
                self.changes.pop(full_field)
            # If it was LUT.FUNC then recalculate icon
            if block_name.startswith("LUT") and field_name == "FUNC":
                self._set_lut_icon(block_name)

    def update_attribute(self, block_name, field_name, val):
        ret = None
        if block_name not in self._blocks_parts:
            self.log.debug("Block %s not known", block_name)
            return
        parts = self._blocks_parts[block_name]
        if field_name not in parts:
            self.log.debug("Block %s has no field %s", block_name, field_name)
            return
        part = parts[field_name]
        attr = part.attr
        field_data = self._blocks_data[block_name].fields.get(field_name, None)
        if val == Exception:
            # TODO: set error
            val = None
        elif isinstance(attr.meta, BooleanMeta):
            val = bool(int(val))
            is_bit_out = field_data and field_data.field_type == "bit_out"
            if is_bit_out and val == attr.value:
                # make bit_out things toggle while changing
                ret = val
                val = not val
        elif isinstance(attr.meta, TableMeta):
            val = part.table_from_list(val)

        # Update the value of our attribute and anyone listening
        attr.set_value(val)
        for dest_attr in self._listening_attrs.get(attr, []):
            dest_attr.set_value(val)

        # if we changed the value of a mux, update the slaved values
        if field_data and field_data.field_type in ("bit_mux", "pos_mux"):
            current_part = parts[field_name + ".CURRENT"]
            current_attr = current_part.attr
            self._update_current_attr(current_attr, val)
            if field_data.field_type == "pos_mux" and field_name == "INP":
                # all param pos fields should inherit scale and offset
                for dest_field_name in self._scale_offset_fields.get(
                    (block_name, field_name), []):
                    self._update_scale_offset_mapping(block_name,
                                                      dest_field_name, val)
        return ret

    def _update_scale_offset_mapping(self, block_name, field_name, mux_val):
        # Find the fields that depend on this input
        field_data = self._blocks_data[block_name].fields.get(field_name, None)
        if field_data.field_subtype == "relative_pos":
            suffs = ("SCALE", "UNITS")
        else:
            suffs = ("SCALE", "OFFSET", "UNITS")

        for suff in suffs:
            full_src_field = "%s.%s" % (mux_val, suff)
            full_dest_field = "%s.%s.%s" % (block_name, field_name, suff)

            # Remove mirrored fields that are already in lists
            for field_list in self._mirrored_fields.values():
                try:
                    field_list.remove(full_dest_field)
                except ValueError:
                    pass

            self._mirrored_fields.setdefault(full_src_field,
                                             []).append(full_dest_field)
            # update it to the right value
            if mux_val == "ZERO":
                value = dict(SCALE=1, OFFSET=0, UNITS="")[suff]
            else:
                mon_block_name, mon_field_name = mux_val.split(".", 1)
                mon_parts = self._blocks_parts[mon_block_name]
                src_attr = mon_parts["%s.%s" % (mon_field_name, suff)].attr
                value = src_attr.value
            self.client.send("%s=%s\n" % (full_dest_field, value))

    def _update_current_attr(self, current_attr, mux_val):
        # Remove the old current_attr from all lists
        for mux_list in self._listening_attrs.values():
            try:
                mux_list.remove(current_attr)
            except ValueError:
                pass
        # add it to the list of things that need to update
        if mux_val == "ZERO":
            current_attr.set_value(0)
        elif mux_val == "ONE":
            current_attr.set_value(1)
        else:
            mon_block_name, mon_field_name = mux_val.split(".", 1)
            mon_parts = self._blocks_parts[mon_block_name]
            out_attr = mon_parts[mon_field_name].attr
            self._listening_attrs.setdefault(out_attr, []).append(current_attr)
            # update it to the right value
            current_attr.set_value(out_attr.value)
Ejemplo n.º 16
0
class MalcWebSocketHandler(WebSocketHandler):
    _registrar: PartRegistrar
    _id_to_mri: Dict[int, str]
    _validators = None
    _writeable = None
    _queue: Optional[Queue] = None
    _counter = None

    def initialize(self, registrar=None, validators=()):
        self._registrar = registrar
        # {id: mri}
        self._id_to_mri = {}
        self._validators = validators
        self._queue = Queue()
        self._counter = 0

    def on_message(self, message):
        # called in tornado's thread
        if self._writeable is None:
            ipv4_ip = self.request.remote_ip
            if ipv4_ip == "::1":
                # Special case IPV6 loopback
                ipv4_ip = "127.0.0.1"
            remoteaddr = struct.unpack("!I", socket.inet_aton(ipv4_ip))[0]
            if self._validators:
                # Work out if the remote ip is within the netmask of any of our
                # interfaces. If not, Put and Post are forbidden
                self._writeable = max(v(remoteaddr) for v in self._validators)
            else:
                self._writeable = True
            log.info(
                "Puts and Posts are %s from %s",
                "allowed" if self._writeable else "forbidden",
                self.request.remote_ip,
            )

        msg_id = -1
        try:
            d = json_decode(message)
            try:
                msg_id = d["id"]
            except KeyError:
                raise FieldError("id field not present in JSON message")
            request = deserialize_object(d, Request)
            request.set_callback(self.on_response)
            if isinstance(request, Subscribe):
                assert msg_id not in self._id_to_mri, (
                    "Duplicate subscription ID %d" % msg_id
                )
                self._id_to_mri[msg_id] = request.path[0]
            if isinstance(request, Unsubscribe):
                mri = self._id_to_mri[msg_id]
            else:
                mri = request.path[0]
            if isinstance(request, (Put, Post)) and not self._writeable:
                raise ValueError(f"Put/Post is forbidden from {self.request.remote_ip}")
            self._registrar.report(builtin.infos.RequestInfo(request, mri))
        except Exception as e:
            log.exception("Error handling message:\n%s", message)
            error = Error(msg_id, e)
            error_message = error.to_dict()
            self.write_message(json_encode(error_message))

    def on_response(self, response):
        # called from cothread
        IOLoopHelper.call(self._on_response, response)
        # Wait for completion once every 10 message
        self._counter += 1
        if self._counter % 10 == 0:
            for _ in range(10):
                self._queue.get()

    def _on_response(self, response: Response) -> None:
        # called from tornado thread
        message = json_encode(response)
        try:
            self.write_message(message)
        except WebSocketError:
            # The websocket is dead. If the response was a Delta or Update, then
            # unsubscribe so the local controller doesn't keep on trying to
            # respond
            if isinstance(response, (Delta, Update)):
                # Websocket is dead so we can clear the subscription key.
                # Subsequent updates may come in before the unsubscribe, but
                # ignore them as we can't do anything about it
                mri = self._id_to_mri.pop(response.id, None)
                if mri:
                    log.info("WebSocket Error: unsubscribing from stale handle")
                    unsubscribe = Unsubscribe(response.id)
                    unsubscribe.set_callback(self.on_response)
                    if self._registrar:
                        self._registrar.report(
                            builtin.infos.RequestInfo(unsubscribe, mri)
                        )
        finally:
            assert self._queue, "No queue"
            cothread.Callback(self._queue.put, None)

    # http://stackoverflow.com/q/24851207
    # TODO: remove this when the web gui is hosted from the box
    def check_origin(self, origin):
        return True
Ejemplo n.º 17
0
class RunnableController(builtin.controllers.ManagerController):
    """RunnableDevice implementer that also exposes GUI for child parts"""

    # The state_set that this controller implements
    state_set = ss()

    def __init__(
        self,
        mri: AMri,
        config_dir: AConfigDir,
        template_designs: ATemplateDesigns = "",
        initial_design: AInitialDesign = "",
        description: ADescription = "",
    ) -> None:
        super().__init__(
            mri=mri,
            config_dir=config_dir,
            template_designs=template_designs,
            initial_design=initial_design,
            description=description,
        )
        # Shared contexts between Configure, Run, Pause, Seek, Resume
        self.part_contexts: Dict[Part, Context] = {}
        # Any custom ConfigureParams subclasses requested by Parts
        self.part_configure_params: PartConfigureParams = {}
        # Params passed to configure()
        self.configure_params: Optional[ConfigureParams] = None
        # Progress reporting dict of completed_steps for each part
        self.progress_updates: Optional[Dict[Part, int]] = None
        # Queue so that do_run can wait to see why it was aborted and resume if
        # needed
        self.resume_queue: Optional[Queue] = None
        # Stored for pause. If using breakpoints, it is a list of steps
        self.steps_per_run: List[int] = []
        # If the list of breakpoints is not empty, this will be true
        self.use_breakpoints: bool = False
        # Absolute steps where the run() returns
        self.breakpoint_steps: List[int] = []
        # Breakpoint index, modified in run() and pause()
        self.breakpoint_index: int = 0
        # Queue so we can wait for aborts to complete
        self.abort_queue: Optional[Queue] = None
        # Create sometimes writeable attribute for the current completed scan
        # step
        self.completed_steps = NumberMeta(
            "int32",
            "Readback of number of scan steps",
            tags=[Widget.METER.tag()],  # Widget.TEXTINPUT.tag()]
        ).create_attribute_model(0)
        self.field_registry.add_attribute_model(
            "completedSteps", self.completed_steps, self.pause
        )
        self.set_writeable_in(self.completed_steps, ss.PAUSED, ss.ARMED)
        # Create read-only attribute for the number of configured scan steps
        self.configured_steps = NumberMeta(
            "int32",
            "Number of steps currently configured",
            tags=[Widget.TEXTUPDATE.tag()],
        ).create_attribute_model(0)
        self.field_registry.add_attribute_model(
            "configuredSteps", self.configured_steps
        )
        # Create read-only attribute for the total number scan steps
        self.total_steps = NumberMeta(
            "int32", "Readback of number of scan steps", tags=[Widget.TEXTUPDATE.tag()]
        ).create_attribute_model(0)
        self.field_registry.add_attribute_model("totalSteps", self.total_steps)
        # Create the method models
        self.field_registry.add_method_model(self.validate)
        self.set_writeable_in(
            self.field_registry.add_method_model(self.configure), ss.READY, ss.FINISHED
        )
        self.set_writeable_in(self.field_registry.add_method_model(self.run), ss.ARMED)
        self.set_writeable_in(
            self.field_registry.add_method_model(self.abort),
            ss.READY,
            ss.CONFIGURING,
            ss.ARMED,
            ss.RUNNING,
            ss.POSTRUN,
            ss.PAUSED,
            ss.SEEKING,
            ss.FINISHED,
        )
        self.set_writeable_in(
            self.field_registry.add_method_model(self.pause),
            ss.ARMED,
            ss.PAUSED,
            ss.RUNNING,
            ss.FINISHED,
        )
        self.set_writeable_in(
            self.field_registry.add_method_model(self.resume), ss.PAUSED
        )
        # Override reset to work from aborted too
        self.set_writeable_in(
            self.field_registry.get_field("reset"),
            ss.FAULT,
            ss.DISABLED,
            ss.ABORTED,
            ss.ARMED,
            ss.FINISHED,
        )
        # Allow Parts to report their status
        self.info_registry.add_reportable(RunProgressInfo, self.update_completed_steps)
        # Allow Parts to request extra items from configure
        self.info_registry.add_reportable(
            ConfigureParamsInfo, self.update_configure_params
        )

    def get_steps_per_run(
        self,
        generator: CompoundGenerator,
        axes_to_move: AAxesToMove,
        breakpoints: List[int],
    ) -> List[int]:
        self.use_breakpoints = False
        steps = [1]
        axes_set = set(axes_to_move)
        for dim in reversed(generator.dimensions):
            # If the axes_set is empty and the dimension has axes then we have
            # done as many dimensions as we can, so return
            if dim.axes and not axes_set:
                break
            # Consume the axes that this generator scans
            for axis in dim.axes:
                assert axis in axes_set, f"Axis {axis} is not in {axes_to_move}"
                axes_set.remove(axis)
            # Now multiply by the dimensions to get the number of steps
            steps[0] *= dim.size

        # If we have breakpoints we make a list of steps
        if len(breakpoints) > 0:
            total_breakpoint_steps = sum(breakpoints)
            assert (
                total_breakpoint_steps <= steps[0]
            ), "Sum of breakpoints greater than steps in scan"
            self.use_breakpoints = True

            # Cast to list so we can append
            breakpoints_list = list(breakpoints)

            # Check if we need to add the final breakpoint to the inner scan
            if total_breakpoint_steps < steps[0]:
                last_breakpoint = steps[0] - total_breakpoint_steps
                breakpoints_list += [last_breakpoint]

            # Repeat the set of breakpoints for each outer step
            breakpoints_list *= self._get_outer_steps(generator, axes_to_move)

            steps = breakpoints_list

            # List of steps completed at end of each run
            self.breakpoint_steps = [sum(steps[:i]) for i in range(1, len(steps) + 1)]

        return steps

    def _get_outer_steps(self, generator, axes_to_move):
        outer_steps = 1
        for dim in reversed(generator.dimensions):
            outer_axis = True
            for axis in dim.axes:
                if axis in axes_to_move:
                    outer_axis = False
            if outer_axis:
                outer_steps *= dim.size
        return outer_steps

    def do_reset(self):
        super().do_reset()
        self.configured_steps.set_value(0)
        self.completed_steps.set_value(0)
        self.total_steps.set_value(0)
        self.breakpoint_index = 0

    def update_configure_params(
        self, part: Part = None, info: ConfigureParamsInfo = None
    ) -> None:
        """Tell controller part needs different things passed to Configure"""
        with self.changes_squashed:
            # Update the dict
            if part:
                assert info, "No info for part"
                self.part_configure_params[part] = info

            # No process yet, so don't do this yet
            if self.process is None:
                return

            # Make a list of all the infos that the parts have contributed
            part_configure_infos = []
            for part in self.parts.values():
                info = self.part_configure_params.get(part, None)
                if info:
                    part_configure_infos.append(info)

            # Update methods from the updated configure model
            for method_name in ("configure", "validate"):
                # Get the model of our configure method as the starting point
                method_meta = MethodMeta.from_callable(self.configure)
                # Update the configure model from the infos
                update_configure_model(method_meta, part_configure_infos)
                # Put the created metas onto our block meta
                method = self._block[method_name]
                method.meta.takes.set_elements(method_meta.takes.elements)
                method.meta.takes.set_required(method_meta.takes.required)
                method.meta.returns.set_elements(method_meta.returns.elements)
                method.meta.returns.set_required(method_meta.returns.required)
                method.meta.set_defaults(method_meta.defaults)
                method.set_took()
                method.set_returned()

    def update_block_endpoints(self):
        super().update_block_endpoints()
        self.update_configure_params()

    def _part_params(
        self, part_contexts: Dict[Part, Context] = None, params: ConfigureParams = None
    ) -> PartContextParams:
        if part_contexts is None:
            part_contexts = self.part_contexts
        if params is None:
            params = self.configure_params
        for part, context in part_contexts.items():
            args = {}
            assert params, "No params"
            for k in params.call_types:
                args[k] = getattr(params, k)
            yield part, context, args

    # This will be serialized, so maintain camelCase for axesToMove
    # noinspection PyPep8Naming
    @add_call_types
    def validate(
        self,
        generator: AGenerator,
        axesToMove: AAxesToMove = None,
        breakpoints: ABreakpoints = None,
        **kwargs: Any,
    ) -> AConfigureParams:
        """Validate configuration parameters and return validated parameters.

        Doesn't take device state into account so can be run in any state
        """
        iterations = 10
        # We will return this, so make sure we fill in defaults
        for k, default in self._block.configure.meta.defaults.items():
            kwargs.setdefault(k, default)
        # The validated parameters we will eventually return
        params = ConfigureParams(generator, axesToMove, breakpoints, **kwargs)
        # Make some tasks just for validate
        part_contexts = self.create_part_contexts()
        # Get any status from all parts
        status_part_info = self.run_hooks(
            ReportStatusHook(p, c) for p, c in part_contexts.items()
        )
        while iterations > 0:
            # Try up to 10 times to get a valid set of parameters
            iterations -= 1
            # Validate the params with all the parts
            validate_part_info = self.run_hooks(
                ValidateHook(p, c, status_part_info, **kwargs)
                for p, c, kwargs in self._part_params(part_contexts, params)
            )
            tweaks: List[ParameterTweakInfo] = ParameterTweakInfo.filter_values(
                validate_part_info
            )
            if tweaks:
                # Check if we need to resolve generator tweaks first
                generator_tweaks: List[ParameterTweakInfo] = []
                for tweak in tweaks:
                    # Collect all generator tweaks
                    if tweak.parameter == "generator":
                        generator_tweaks.append(tweak)
                if len(generator_tweaks) > 0:
                    # Resolve multiple tweaks to the generator
                    generator_tweak = resolve_generator_tweaks(generator_tweaks)
                    deserialized = self._block.configure.meta.takes.elements[
                        generator_tweak.parameter
                    ].validate(generator_tweak.value)
                    setattr(params, generator_tweak.parameter, deserialized)
                    self.log.debug(f"{self.mri}: tweaking generator to {deserialized}")
                else:
                    # Other tweaks can be applied at the same time
                    for tweak in tweaks:
                        deserialized = self._block.configure.meta.takes.elements[
                            tweak.parameter
                        ].validate(tweak.value)
                        setattr(params, tweak.parameter, deserialized)
                        self.log.debug(
                            f"{self.mri}: tweaking {tweak.parameter} to {deserialized}"
                        )
            else:
                # Consistent set, just return the params
                return params
        raise ValueError("Could not get a consistent set of parameters")

    def abortable_transition(self, state):
        with self._lock:
            # We might have been aborted just now, so this will fail
            # with an AbortedError if we were
            self_ctx = self.part_contexts.get(self, None)
            if self_ctx:
                self_ctx.sleep(0)
            self.transition(state)

    # This will be serialized, so maintain camelCase for axesToMove
    # noinspection PyPep8Naming
    @add_call_types
    def configure(
        self,
        generator: AGenerator,
        axesToMove: AAxesToMove = None,
        breakpoints: ABreakpoints = None,
        **kwargs: Any,
    ) -> AConfigureParams:
        """Validate the params then configure the device ready for run().

        Try to prepare the device as much as possible so that run() is quick to
        start, this may involve potentially long running activities like moving
        motors.

        Normally it will return in Armed state. If the user aborts then it will
        return in Aborted state. If something goes wrong it will return in Fault
        state. If the user disables then it will return in Disabled state.
        """
        params = self.validate(generator, axesToMove, breakpoints, **kwargs)
        state = self.state.value
        try:
            self.transition(ss.CONFIGURING)
            self.do_configure(state, params)
            self.abortable_transition(ss.ARMED)
        except AbortedError:
            assert self.abort_queue, "No abort queue"
            self.abort_queue.put(None)
            raise
        except Exception as e:
            self.go_to_error_state(e)
            raise
        else:
            return params

    def do_configure(self, state: str, params: ConfigureParams) -> None:
        if state == ss.FINISHED:
            # If we were finished then do a reset before configuring
            self.run_hooks(
                builtin.hooks.ResetHook(p, c)
                for p, c in self.create_part_contexts().items()
            )
        # Clear out any old part contexts now rather than letting gc do it
        for context in self.part_contexts.values():
            context.unsubscribe_all()
        # These are the part tasks that abort() and pause() will operate on
        self.part_contexts = self.create_part_contexts()
        # So add one for ourself too so we can be aborted
        assert self.process, "No attached process"
        self.part_contexts[self] = Context(self.process)
        # Store the params for use in seek()
        self.configure_params = params
        # Tell everything to get into the right state to Configure
        self.run_hooks(PreConfigureHook(p, c) for p, c in self.part_contexts.items())
        # This will calculate what we need from the generator, possibly a long
        # call
        params.generator.prepare()
        # Set the steps attributes that we will do across many run() calls
        self.total_steps.set_value(params.generator.size)
        self.completed_steps.set_value(0)
        self.configured_steps.set_value(0)
        # TODO: We can be cleverer about this and support a different number
        # of steps per run for each run by examining the generator structure
        self.steps_per_run = self.get_steps_per_run(
            params.generator, params.axesToMove, params.breakpoints
        )
        # Get any status from all parts
        part_info = self.run_hooks(
            ReportStatusHook(p, c) for p, c in self.part_contexts.items()
        )
        # Run the configure command on all parts, passing them info from
        # ReportStatus. Parts should return any reporting info for PostConfigure
        completed_steps = 0
        self.breakpoint_index = 0
        steps_to_do = self.steps_per_run[self.breakpoint_index]
        part_info = self.run_hooks(
            ConfigureHook(p, c, completed_steps, steps_to_do, part_info, **kw)
            for p, c, kw in self._part_params()
        )
        # Take configuration info and reflect it as attribute updates
        self.run_hooks(
            PostConfigureHook(p, c, part_info) for p, c in self.part_contexts.items()
        )
        # Update the completed and configured steps
        self.configured_steps.set_value(steps_to_do)
        self.completed_steps.meta.display.set_limitHigh(params.generator.size)
        # Reset the progress of all child parts
        self.progress_updates = {}
        self.resume_queue = Queue()

    @add_call_types
    def run(self) -> None:
        """Run a device where configure() has already be called

        Normally it will return in Ready state. If setup for multiple-runs with
        a single configure() then it will return in Armed state. If the user
        aborts then it will return in Aborted state. If something goes wrong it
        will return in Fault state. If the user disables then it will return in
        Disabled state.
        """

        if self.configured_steps.value < self.total_steps.value:
            next_state = ss.ARMED
        else:
            next_state = ss.FINISHED
        try:
            self.transition(ss.RUNNING)
            hook = RunHook
            going = True
            while going:
                try:
                    self.do_run(hook)
                    self.abortable_transition(next_state)
                except AbortedError:
                    assert self.abort_queue, "No abort queue"
                    self.abort_queue.put(None)
                    # Wait for a response on the resume_queue
                    assert self.resume_queue, "No resume queue"
                    should_resume = self.resume_queue.get()
                    if should_resume:
                        # we need to resume
                        self.log.debug("Resuming run")
                    else:
                        # we don't need to resume, just drop out
                        raise
                else:
                    going = False
        except AbortedError:
            raise
        except Exception as e:
            self.go_to_error_state(e)
            raise

    def do_run(self, hook):
        # type: (Type[ControllerHook]) -> None

        # Run all PreRunHooks
        self.run_hooks(PreRunHook(p, c) for p, c in self.part_contexts.items())

        self.run_hooks(hook(p, c) for p, c in self.part_contexts.items())
        self.abortable_transition(ss.POSTRUN)
        completed_steps = self.configured_steps.value
        if completed_steps < self.total_steps.value:
            if self.use_breakpoints:
                self.breakpoint_index += 1
            steps_to_do = self.steps_per_run[self.breakpoint_index]
            part_info = self.run_hooks(
                ReportStatusHook(p, c) for p, c in self.part_contexts.items()
            )
            self.completed_steps.set_value(completed_steps)
            self.run_hooks(
                PostRunArmedHook(
                    p, c, completed_steps, steps_to_do, part_info, **kwargs
                )
                for p, c, kwargs in self._part_params()
            )
            self.configured_steps.set_value(completed_steps + steps_to_do)
        else:
            self.completed_steps.set_value(completed_steps)
            self.run_hooks(
                PostRunReadyHook(p, c) for p, c in self.part_contexts.items()
            )

    def update_completed_steps(
        self, part: Part, completed_steps: RunProgressInfo
    ) -> None:
        with self._lock:
            # Update
            assert self.progress_updates is not None, "No progress updates"
            self.progress_updates[part] = completed_steps.steps
            min_completed_steps = min(self.progress_updates.values())
            if min_completed_steps > self.completed_steps.value:
                self.completed_steps.set_value(min_completed_steps)

    @add_call_types
    def abort(self) -> None:
        """Abort the current operation and block until aborted

        Normally it will return in Aborted state. If something goes wrong it
        will return in Fault state. If the user disables then it will return in
        Disabled state.
        """
        self.try_aborting_function(ss.ABORTING, ss.ABORTED, self.do_abort)
        # Tell _call_do_run not to resume
        if self.resume_queue:
            self.resume_queue.put(False)

    def do_abort(self) -> None:
        self.run_hooks(AbortHook(p, c) for p, c in self.create_part_contexts().items())

    def try_aborting_function(
        self, start_state: str, end_state: str, func: Callable[..., None], *args: Any
    ) -> None:
        try:
            # To make the running function fail we need to stop any running
            # contexts (if running a hook) or make transition() fail with
            # AbortedError. Both of these are accomplished here
            with self._lock:
                original_state = self.state.value
                self.abort_queue = Queue()
                self.transition(start_state)
                for context in self.part_contexts.values():
                    context.stop()
            if original_state not in (ss.READY, ss.ARMED, ss.PAUSED, ss.FINISHED):
                # Something was running, let it finish aborting
                try:
                    self.abort_queue.get(timeout=DEFAULT_TIMEOUT)
                except TimeoutError:
                    self.log.warning("Timeout waiting while {start_state}")
            with self._lock:
                # Now we've waited for a while we can remove the error state
                # for transition in case a hook triggered it rather than a
                # transition
                self_ctx = self.part_contexts.get(self, None)
                if self_ctx:
                    self_ctx.ignore_stops_before_now()
            func(*args)
            self.abortable_transition(end_state)
        except AbortedError:
            assert self.abort_queue, "No abort queue"
            self.abort_queue.put(None)
            raise
        except Exception as e:  # pylint:disable=broad-except
            self.go_to_error_state(e)
            raise

    # Allow camelCase as this will be serialized
    # noinspection PyPep8Naming
    @add_call_types
    def pause(self, lastGoodStep: ALastGoodStep = -1) -> None:
        """Pause a run() so that resume() can be called later, or seek within
        an Armed or Paused state.

        The original call to run() will not be interrupted by pause(), it will
        wait until the scan completes or is aborted.

        Normally it will return in Paused state. If the scan is finished it
        will return in Finished state. If the scan is armed it will return in
        Armed state. If the user aborts then it will return in Aborted state.
        If something goes wrong it will return in Fault state. If the user
        disables then it will return in Disabled state.
        """

        total_steps = self.total_steps.value

        # We need to decide where to go
        if lastGoodStep < 0:
            # If we are finished we do not need to do anything
            if self.state.value is ss.FINISHED:
                return
            # Otherwise set to number of completed steps
            else:
                lastGoodStep = self.completed_steps.value
        # Otherwise make sure we are bound to the total steps of the scan
        elif lastGoodStep >= total_steps:
            lastGoodStep = total_steps - 1

        if self.state.value in [ss.ARMED, ss.FINISHED]:
            # We don't have a run process, free to go anywhere we want
            next_state = ss.ARMED
        else:
            # Need to pause within the bounds of the current run
            if lastGoodStep == self.configured_steps.value:
                lastGoodStep -= 1
            next_state = ss.PAUSED

        self.try_aborting_function(ss.SEEKING, next_state, self.do_pause, lastGoodStep)

    def do_pause(self, completed_steps: int) -> None:
        """Recalculates the number of configured steps
        Arguments:
        completed_steps -- Last good step performed
        """
        self.run_hooks(PauseHook(p, c) for p, c in self.create_part_contexts().items())

        if self.use_breakpoints:
            self.breakpoint_index = self.get_breakpoint_index(completed_steps)
            in_run_steps = (
                completed_steps % self.breakpoint_steps[self.breakpoint_index]
            )
            steps_to_do = self.breakpoint_steps[self.breakpoint_index] - in_run_steps
        else:
            in_run_steps = completed_steps % self.steps_per_run[self.breakpoint_index]
            steps_to_do = self.steps_per_run[self.breakpoint_index] - in_run_steps

        part_info = self.run_hooks(
            ReportStatusHook(p, c) for p, c in self.part_contexts.items()
        )
        self.completed_steps.set_value(completed_steps)
        self.run_hooks(
            SeekHook(p, c, completed_steps, steps_to_do, part_info, **kwargs)
            for p, c, kwargs in self._part_params()
        )
        self.configured_steps.set_value(completed_steps + steps_to_do)

    def get_breakpoint_index(self, completed_steps: int) -> int:
        # If the last point, then return the last index
        if completed_steps == self.breakpoint_steps[-1]:
            return len(self.breakpoint_steps) - 1
        # Otherwise check which index we fall within
        index = 0
        while completed_steps >= self.breakpoint_steps[index]:
            index += 1
        return index

    @add_call_types
    def resume(self) -> None:
        """Resume a paused scan.

        Normally it will return in Running state. If something goes wrong it
        will return in Fault state.
        """
        self.transition(ss.RUNNING)
        assert self.resume_queue, "No resume queue"
        self.resume_queue.put(True)
        # self.run will now take over

    def do_disable(self) -> None:
        # Abort anything that is currently running, but don't wait
        for context in self.part_contexts.values():
            context.stop()
        if self.resume_queue:
            self.resume_queue.put(False)
        super().do_disable()

    def go_to_error_state(self, exception):
        if self.resume_queue:
            self.resume_queue.put(False)
        super().go_to_error_state(exception)
class PandABlocksManagerController(ManagerController):
    def __init__(self,
                 mri,  # type: AMri
                 config_dir,  # type: AConfigDir
                 hostname="localhost",  # type: AHostname
                 port=8888,  # type: APort
                 initial_design="",  # type: AInitialDesign
                 description="",  # type: ADescription
                 use_git=True,  # type: AUseGit
                 doc_url_base=DOC_URL_BASE,  # type: ADocUrlBase
                 poll_period=0.1  # type: APollPeriod
                 ):
        # type: (...) -> None
        super(PandABlocksManagerController, self).__init__(
            mri, config_dir, initial_design, description, use_git)
        self._poll_period = poll_period
        self._doc_url_base = doc_url_base
        # {block_name: BlockData}
        self._blocks_data = {}
        # {block_name: {field_name: Part}}
        self._blocks_parts = OrderedDict()
        # src_attr -> [dest_attr]
        self._listening_attrs = {}
        # lut elements to be displayed or not
        # {fnum: {id: visible}}
        self._lut_elements = {}
        # changes left over from last time
        self.changes = OrderedDict()
        # The PandABlock client that does the comms
        self.client = PandABlocksClient(hostname, port, Queue)
        # Filled in on reset
        self._stop_queue = None
        self._poll_spawned = None

    def do_init(self):
        # start the poll loop and make block parts first to fill in our parts
        # before calling _set_block_children()
        self.start_poll_loop()
        super(PandABlocksManagerController, self).do_init()

    def start_poll_loop(self):
        # queue to listen for stop events
        if not self.client.started:
            self._stop_queue = Queue()
            if self.client.started:
                self.client.stop()
            self.client.start(self.process.spawn, socket)
        if not self._blocks_parts:
            self._make_blocks_parts()
        if self._poll_spawned is None:
            self._poll_spawned = self.process.spawn(self._poll_loop)

    def do_disable(self):
        super(PandABlocksManagerController, self).do_disable()
        self.stop_poll_loop()

    def do_reset(self):
        self.start_poll_loop()
        super(PandABlocksManagerController, self).do_reset()

    def _poll_loop(self):
        """At self.poll_period poll for changes"""
        next_poll = time.time()
        while True:
            next_poll += self._poll_period
            timeout = next_poll - time.time()
            if timeout < 0:
                timeout = 0
            try:
                return self._stop_queue.get(timeout=timeout)
            except TimeoutError:
                # No stop, no problem
                pass
            try:
                self.handle_changes(self.client.get_changes())
            except Exception:
                # TODO: should fault here?
                self.log.exception("Error while getting changes")

    def stop_poll_loop(self):
        if self._poll_spawned:
            self._stop_queue.put(None)
            self._poll_spawned.wait()
            self._poll_spawned = None
        if self.client.started:
            self.client.stop()

    def _make_blocks_parts(self):
        # {block_name_without_number: BlockData}
        self._blocks_data = OrderedDict()
        self._blocks_parts = OrderedDict()
        for block_rootname, block_data in self.client.get_blocks_data().items():
            block_names = []
            if block_data.number == 1:
                block_names.append(block_rootname)
            else:
                for i in range(block_data.number):
                    block_names.append("%s%d" % (block_rootname, i + 1))
            for block_name in block_names:
                self._blocks_data[block_name] = block_data
                self._make_parts(block_name, block_data)
        # Handle the initial set of changes to get an initial value
        self.handle_changes(self.client.get_changes())
        # Then once more to let bit_outs toggle back
        self.handle_changes(())
        assert not self.changes, "There are still changes %s" % self.changes

    def _make_child_controller(self, parts, mri):
        controller = BasicController(mri=mri)
        if mri.endswith("PCAP"):
            parts.append(PandABlocksActionPart(
                self.client, "*PCAP", "ARM", "Arm position capture", []))
            parts.append(PandABlocksActionPart(
                self.client, "*PCAP", "DISARM", "Disarm position capture", []))
        for part in parts:
            controller.add_part(part)
        return controller

    def _make_corresponding_part(self, block_name, mri):
        part = ChildPart(name=block_name, mri=mri, stateful=False)
        return part

    def _make_parts(self, block_name, block_data):
        mri = "%s:%s" % (self.mri, block_name)

        # Defer creation of parts to a block maker
        maker = PandABlocksMaker(
            self.client, block_name, block_data, self._doc_url_base)

        # Make the child controller and add it to the process
        controller = self._make_child_controller(maker.parts.values(), mri)
        self.process.add_controller(controller, timeout=5)

        # Store the parts so we can update them with the poller
        self._blocks_parts[block_name] = maker.parts

        # Make the corresponding part for us
        child_part = self._make_corresponding_part(block_name, mri)
        self.add_part(child_part)

    def _set_lut_icon(self, block_name):
        icon_attr = self._blocks_parts[block_name]["icon"].attr
        with open(os.path.join(SVG_DIR, "LUT.svg")) as f:
            svg_text = f.read()
        fnum = int(self.client.get_field(block_name, "FUNC.RAW"), 0)
        invis = self._get_lut_icon_elements(fnum)
        # https://stackoverflow.com/a/8998773
        ET.register_namespace('', "http://www.w3.org/2000/svg")
        root = ET.fromstring(svg_text)
        for i in invis:
            # Find the first parent which has a child with id i
            parent = root.find('.//*[@id=%r]/..' % i)
            # Find the child and remove it
            child = parent.find('./*[@id=%r]' % i)
            parent.remove(child)
        svg_text = et_to_string(root)
        icon_attr.set_value(svg_text)

    def _get_lut_icon_elements(self, fnum):
        if not self._lut_elements:
            # Generate the lut element table
            # Do the general case funcs
            funcs = [("AND", operator.and_), ("OR", operator.or_)]
            for func, op in funcs:
                for nargs in (2, 3, 4, 5):
                    # 2**nargs permutations
                    for permutation in range(2 ** nargs):
                        self._calc_visibility(func, op, nargs, permutation)
            # Add in special cases for NOT
            for ninp in "ABCDE":
                invis = {"AND", "OR", "LUT"}
                for inp in "ABCDE":
                    if inp != ninp:
                        invis.add(inp)
                    invis.add("not%s" % inp)
                self._lut_elements[~LUT_CONSTANTS[ninp] & (2 ** 32 - 1)] = invis
            # And catchall for LUT in 0
            invis = {"AND", "OR", "NOT"}
            for inp in "ABCDE":
                invis.add("not%s" % inp)
            self._lut_elements[0] = invis
        return self._lut_elements.get(fnum, self._lut_elements[0])

    def _calc_visibility(self, func, op, nargs, permutations):
        # Visibility dictionary defaults
        invis = {"AND", "OR", "LUT", "NOT"}
        invis.remove(func)
        args = []
        for i, inp in enumerate("EDCBA"):
            # xxxxx where x is 0 or 1
            # EDCBA
            negations = format(permutations, '05b')
            if (5 - i) > nargs:
                # invisible
                invis.add(inp)
                invis.add("not%s" % inp)
            else:
                # visible
                if negations[i] == "1":
                    args.append(~LUT_CONSTANTS[inp] & (2 ** 32 - 1))
                else:
                    invis.add("not%s" % inp)
                    args.append(LUT_CONSTANTS[inp])

        # Insert into table
        fnum = op(args[0], args[1])
        for a in args[2:]:
            fnum = op(fnum, a)
        self._lut_elements[fnum] = invis

    def handle_changes(self, changes):
        for k, v in changes:
            self.changes[k] = v
        block_changes = OrderedDict()
        for full_field, val in list(self.changes.items()):
            block_name, field_name = full_field.split(".", 1)
            block_changes.setdefault(block_name, []).append((
                field_name, full_field, val))
        for block_name, field_changes in block_changes.items():
            # Squash changes
            block_mri = "%s:%s" % (self.mri, block_name)
            try:
                block_controller = self.process.get_controller(block_mri)
            except ValueError:
                self.log.debug("Block %s not known", block_name)
                for _, full_field, _ in field_changes:
                    self.changes.pop(full_field)
            else:
                with block_controller.changes_squashed:
                    self.do_field_changes(block_name, field_changes)

    def do_field_changes(self, block_name, field_changes):
        for field_name, full_field, val in field_changes:
            ret = self.update_attribute(block_name, field_name, val)
            if ret is not None:
                self.changes[full_field] = ret
            else:
                self.changes.pop(full_field)
            # If it was LUT.FUNC then recalculate icon
            if block_name.startswith("LUT") and field_name == "FUNC":
                self._set_lut_icon(block_name)

    def update_attribute(self, block_name, field_name, value):
        ret = None
        parts = self._blocks_parts[block_name]
        if field_name not in parts:
            self.log.debug("Block %s has no field %s", block_name, field_name)
            return ret
        part = parts[field_name]
        attr = part.attr
        field_data = self._blocks_data[block_name].fields.get(field_name, None)
        if value == Exception:
            # TODO: set error
            self.log.warning("Field %s.%s in error", block_name, field_name)
            value = None

        # Cheaper than isinstance
        if attr.meta.typeid == TableMeta.typeid:
            value = part.table_from_list(value)
        elif attr.meta.typeid == BooleanMeta.typeid:
            value = bool(int(value))
            is_bit_out = field_data and field_data.field_type == "bit_out"
            if is_bit_out and attr.value is value:
                # make bit_out things toggle while changing
                ret = value
                value = not value
        else:
            value = attr.meta.validate(value)

        # Update the value of our attribute and anyone listening
        ts = TimeStamp()
        attr.set_value_alarm_ts(value, Alarm.ok, ts)
        for dest_attr in self._listening_attrs.get(attr, []):
            dest_attr.set_value_alarm_ts(value, Alarm.ok, ts)

        # if we changed the value of a mux, update the slaved values
        if field_data and field_data.field_type in ("bit_mux", "pos_mux"):
            current_part = parts[field_name + ".CURRENT"]
            current_attr = current_part.attr
            self._update_current_attr(
                current_attr, value, ts, field_data.field_type)

        # if we changed a pos_out, its SCALE or OFFSET, update its scaled value
        root_field_name = field_name.split(".")[0]
        field_data = self._blocks_data[block_name].fields[root_field_name]
        if field_data.field_type == "pos_out":
            scale = parts[root_field_name + ".SCALE"].attr.value
            offset = parts[root_field_name + ".OFFSET"].attr.value
            scaled = parts[root_field_name].attr.value * scale + offset
            parts[root_field_name + ".SCALED"].attr.set_value_alarm_ts(
                scaled, Alarm.ok, ts)
        return ret

    def _update_current_attr(self, current_attr, mux_val, ts, field_type):
        # Remove the old current_attr from all lists
        for mux_set in self._listening_attrs.values():
            try:
                mux_set.remove(current_attr)
            except KeyError:
                pass
        # add it to the list of things that need to update
        try:
            val = MUX_CONSTANT_VALUES[field_type][mux_val]
        except KeyError:
            mon_block_name, mon_field_name = mux_val.split(".", 1)
            mon_parts = self._blocks_parts[mon_block_name]
            out_attr = mon_parts[mon_field_name].attr
            self._listening_attrs.setdefault(out_attr, set()).add(current_attr)
            # update it to the right value
            val = out_attr.value
        current_attr.set_value_alarm_ts(val, Alarm.ok, ts)
Ejemplo n.º 19
0
class PvaClientComms(ClientComms):
    """A class for a client to communicate with the server"""

    use_cothread = False
    _monitors = None
    _send_queue = None

    def do_init(self):
        super(PvaClientComms, self).do_init()
        self._monitors = {}
        self._send_queue = Queue()

    def send_to_server(self, request):
        """Dispatch a request to the server

        Args:
            request(Request): The message to pass to the server
        """
        self._send_queue.put(request)
        self.spawn(self._send_to_server)

    def _send_to_server(self):
        request = self._send_queue.get(timeout=0)
        try:
            request = deserialize_object(request, Request)
            response = None
            if isinstance(request, Get):
                response = self._execute_get(request)
            elif isinstance(request, Put):
                response = self._execute_put(request)
            elif isinstance(request, Post):
                response = self._execute_rpc(request)
            elif isinstance(request, Subscribe):
                self._execute_monitor(request)
            elif isinstance(request, Unsubscribe):
                response = self._execute_unsubscribe(request)
            else:
                raise UnexpectedError("Unexpected request %s", request)
        except Exception as e:
            _, response = request.error_response(e)
        if response:
            request.callback(response)

    def _response_from_dict(self, request, d):
        if d.get("typeid", "") == Error.typeid:
            response = Error(request.id, d["message"])
        else:
            response = Return(request.id, d)
        return response

    def _execute_get(self, request):
        path = ".".join(request.path[1:])
        channel = pvaccess.Channel(request.path[0])
        d = channel.get(path).toDict()
        response = self._response_from_dict(request, d)
        return response

    def _execute_put(self, request):
        path = ".".join(request.path[1:])
        channel = pvaccess.Channel(request.path[0])
        channel.put(request.value, path)
        response = Return(request.id)
        return response

    def _execute_monitor(self, request):
        # Connect to the channel
        path = ".".join(request.path[1:])
        channel = pvaccess.Channel(request.path[0])
        self._monitors[request.generate_key()] = channel

        # Store the connection within the monitor set
        def callback(value=None):
            # TODO: ordering is not maintained here...
            # TODO: should we strip_tuples here?
            d = value.toDict(True)
            if d.get("typeid", "") == Error.typeid:
                response = Error(request.id, d["message"])
                self._monitors.pop(request.generate_key())
                channel.unsubscribe("")
            else:
                # TODO: support Deltas properly
                if request.delta:
                    response = Delta(request.id, [[[], d]])
                else:
                    response = Update(request.id, d)
            request.callback(response)

        # Perform a subscribe, but it returns nothing
        channel.subscribe("sub", callback)
        channel.startMonitor(path)
        a = None
        return a

    def _execute_unsubscribe(self, request):
        channel = self._monitors.pop(request.generate_key())
        channel.unsubscribe("sub")
        response = Return(request.id)
        return response

    def _execute_rpc(self, request):
        method = pvaccess.PvObject({'method': pvaccess.STRING})
        method.set({'method': request.path[1]})
        # Connect to the channel and create the RPC client
        rpc = pvaccess.RpcClient(request.path[0], method)
        # Construct the pv object from the parameters
        params = dict_to_pv_object(request.parameters)
        # Call the method on the RPC object
        value = rpc.invoke(params)
        # Now create the Return object and populate it with the response
        d = strip_tuples(value.toDict(True))
        response = self._response_from_dict(request, d)
        return response
Ejemplo n.º 20
0
        block2 = self.process2.block_view("hello")
        ret = block2.greet(name="me2")
        assert ret == dict(greeting="Hello me2")
        with self.assertRaises(ResponseError):
            block2.error()

    #def test_server_counter_with_malcolm_client(self):
    #    from malcolm.core import call_with_params, Context
    #    from malcolm.blocks.builtin import proxy_block
    #    call_with_params(
    #        proxy_block, self.process2, mri="counter2", comms="client")
    #    context = Context("context", self.process2)
    #    context.when_matches(["counter2", "health", "value"], "OK", timeout=2)
    #    context.sleep(3)
    #    block2 = self.process2.block_view("counter2")
    #    block2.zero()
    #    self.assertEqual(block2.counter.value, 0)
    #    block2.increment()
    #    self.assertEqual(block2.counter.value, 1)
    #    block2.zero()
    #    self.assertEqual(block2.counter.value, 0)
    #    assert self.client.remote_blocks.value == (
    #        "hello", "counter", "server")


if __name__ == "__main__":
    from malcolm.core import Queue, Process

    q = Queue()
    p1(q)
Ejemplo n.º 21
0
class ChildPart(Part):
    def __init__(self, params):
        # Layout options
        self.x = 0
        self.y = 0
        self.visible = None
        # {part_name: visible} saying whether part_name is visible
        self.part_visible = {}
        # {attr_name: attr_value} of last saved/loaded structure
        self.saved_structure = {}
        # {attr_name: modified_message} of current values
        self.modified_messages = {}
        # The controller hosting our child
        self.child_controller = None
        # {id: Subscribe} for subscriptions to config tagged fields
        self.config_subscriptions = {}
        # set(attribute_name) where the attribute is a config tagged field
        # we are modifying
        self.we_modified = set()
        # Update queue of modified alarms
        self.modified_update_queue = Queue()
        # Update queue of exportable fields
        self.exportable_update_queue = Queue()
        # {attr_name: PortInfo}
        self.port_infos = {}
        # Store params
        self.params = params
        super(ChildPart, self).__init__(params.name)

    def notify_dispatch_request(self, request):
        """Will be called when a context passed to a hooked function is about
        to dispatch a request"""
        if isinstance(request, Put):
            self.we_modified.add(request.path[-2])

    @ManagerController.Init
    def init(self, context):
        # Save what we have
        self.save(context)
        # Monitor the child configure for changes
        self.child_controller = context.get_controller(self.params.mri)
        subscribe = Subscribe(path=[self.params.mri, "meta", "fields"],
                              callback=self.update_part_exportable)
        # Wait for the first update to come in
        self.child_controller.handle_request(subscribe).wait()

    @ManagerController.Halt
    def halt(self, context):
        unsubscribe = Unsubscribe(callback=self.update_part_exportable)
        self.child_controller.handle_request(unsubscribe)

    def update_part_exportable(self, response):
        # Get a child context to check if we have a config field
        child = self.child_controller.block_view()
        spawned = []
        if response.value:
            new_fields = response.value
        else:
            new_fields = []

        # Remove any existing subscription that is not in the new fields
        for subscribe in self.config_subscriptions.values():
            attr_name = subscribe.path[-2]
            if attr_name not in new_fields:
                unsubscribe = Unsubscribe(subscribe.id, subscribe.callback)
                spawned.append(
                    self.child_controller.handle_request(unsubscribe))
                self.port_infos.pop(attr_name, None)

        # Add a subscription to any new field
        existing_fields = set(s.path[-2]
                              for s in self.config_subscriptions.values())
        for field in set(new_fields) - existing_fields:
            attr = getattr(child, field)
            if isinstance(attr, Attribute):
                for tag in attr.meta.tags:
                    match = port_tag_re.match(tag)
                    if match:
                        d, type, extra = match.groups()
                        self.port_infos[field] = PortInfo(name=field,
                                                          value=attr.value,
                                                          direction=d,
                                                          type=type,
                                                          extra=extra)
            if isinstance(attr, Attribute) and config() in attr.meta.tags:
                if self.config_subscriptions:
                    new_id = max(self.config_subscriptions) + 1
                else:
                    new_id = 1
                subscribe = Subscribe(id=new_id,
                                      path=[self.params.mri, field, "value"],
                                      callback=self.update_part_modified)
                self.config_subscriptions[new_id] = subscribe
                # Signal that any change we get is a difference
                if field not in self.saved_structure:
                    self.saved_structure[field] = None
                spawned.append(self.child_controller.handle_request(subscribe))

        # Wait for the first update to come in
        for s in spawned:
            s.wait()

        # Put data on the queue, so if spawns are handled out of order we
        # still get the most up to date data
        port_infos = [
            self.port_infos[f] for f in new_fields if f in self.port_infos
        ]
        self.exportable_update_queue.put((new_fields, port_infos))
        self.spawn(self._update_part_exportable).wait()

    def _update_part_exportable(self):
        # We spawned just above, so there is definitely something on the
        # queue
        fields, port_infos = self.exportable_update_queue.get(timeout=0)
        self.controller.update_exportable(self, fields, port_infos)

    def update_part_modified(self, response):
        subscribe = self.config_subscriptions[response.id]
        name = subscribe.path[-2]
        original_value = self.saved_structure[name]
        try:
            np.testing.assert_equal(original_value, response.value)
        except AssertionError:
            message = "%s.%s.value = %r not %r" % (
                self.name, name, response.value, original_value)
            if name in self.we_modified:
                message = "(We modified) " + message
            self.modified_messages[name] = message
        else:
            self.modified_messages.pop(name, None)
        message_list = []
        only_modified_by_us = True
        # Tell the controller what has changed
        for name, message in self.modified_messages.items():
            if name not in self.we_modified:
                only_modified_by_us = False
            message_list.append(message)
        if message_list:
            if only_modified_by_us:
                severity = AlarmSeverity.NO_ALARM
            else:
                severity = AlarmSeverity.MINOR_ALARM
            alarm = Alarm(severity, AlarmStatus.CONF_STATUS,
                          "\n".join(message_list))
        else:
            alarm = None
        # Put data on the queue, so if spawns are handled out of order we
        # still get the most up to date data
        self.modified_update_queue.put(alarm)
        self.spawn(self._update_part_modified).wait()

    def _update_part_modified(self):
        # We spawned just above, so there is definitely something on the
        # queue
        alarm = self.modified_update_queue.get(timeout=0)
        self.controller.update_modified(self, alarm)

    @ManagerController.Layout
    def layout(self, context, part_info, layout_table):
        # if this is the first call, we need to calculate if we are visible
        # or not
        if self.visible is None:
            self.visible = self.child_connected(part_info)
        for i, name in enumerate(layout_table.name):
            x = layout_table.x[i]
            y = layout_table.y[i]
            visible = layout_table.visible[i]
            if name == self.name:
                if self.visible and not visible:
                    self.sever_inports(context, part_info)
                self.x = x
                self.y = y
                self.visible = visible
            else:
                was_visible = self.part_visible.get(name, True)
                if was_visible and not visible:
                    self.sever_inports(context, part_info, name)
                self.part_visible[name] = visible
        ret = LayoutInfo(mri=self.params.mri,
                         x=self.x,
                         y=self.y,
                         visible=self.visible)
        return [ret]

    @ManagerController.Load
    def load(self, context, structure):
        child = context.block_view(self.params.mri)
        part_structure = structure.get(self.name, {})
        params = {}
        for k, v in part_structure.items():
            try:
                attr = getattr(child, k)
            except AttributeError:
                self.log.warning("Cannot restore non-existant attr %s" % k)
            else:
                try:
                    np.testing.assert_equal(serialize_object(attr.value), v)
                except AssertionError:
                    params[k] = v
        # Do this first so that any callbacks that happen in the put know
        # not to notify controller
        self.saved_structure = part_structure
        if params:
            child.put_attribute_values(params)

    @ManagerController.Save
    def save(self, context):
        child = context.block_view(self.params.mri)
        part_structure = OrderedDict()
        for k in child:
            attr = getattr(child, k)
            if isinstance(attr, Attribute) and "config" in attr.meta.tags:
                part_structure[k] = serialize_object(attr.value)
        self.saved_structure = part_structure
        return part_structure

    def _get_flowgraph_ports(self, part_info, direction):
        # {attr_name: port_info}
        ports = {}
        for port_info in part_info.get(self.name, []):
            if port_info.direction == direction:
                ports[port_info.name] = port_info
        return ports

    def _outport_lookup(self, port_infos):
        outport_lookup = {}
        for outport_info in port_infos:
            if outport_info.direction == "out":
                outport_lookup[outport_info.extra] = outport_info.type
        return outport_lookup

    def sever_inports(self, context, part_info, connected_to=None):
        """Conditionally sever inports of the child. If connected_to is then
        None then sever all, otherwise restrict to connected_to's outports

        Args:
            context (Context): The context to use
            part_info (dict): {part_name: [PortInfo]}
            connected_to (str): Restrict severing to this part
        """
        # Find the outports to connect to
        if connected_to:
            # Calculate a lookup of the outport "name" to type
            outport_lookup = self._outport_lookup(
                part_info.get(connected_to, []))
        else:
            outport_lookup = True

        # Find our inports
        inports = self._get_flowgraph_ports(part_info, "in")

        # If we have inports that need to be disconnected then do so
        if inports and outport_lookup:
            child = context.block_view(self.params.mri)
            attribute_values = {}
            for name, port_info in inports.items():
                if outport_lookup is True or outport_lookup.get(
                        child[name].value, None) == port_info.type:
                    attribute_values[name] = port_info.extra
            child.put_attribute_values(attribute_values)

    def child_connected(self, part_info):
        """Calculate if anything is connected to us or we are connected to
        anything else

        Args:
            part_info (dict): {part_name: [PortInfo]} from other ports

        Returns:
            bool: True if we are connected or have nothing to connect
        """
        has_ports = False
        # See if our inports are connected to anything
        inports = self._get_flowgraph_ports(part_info, "in")
        for name, inport_info in inports.items():
            disconnected_value = inport_info.extra
            has_ports = True
            if inport_info.value != disconnected_value:
                return True
        # Calculate a lookup of outport "name" to their types
        outport_lookup = self._outport_lookup(part_info.get(self.name, []))
        if outport_lookup:
            has_ports = True
        # See if anything is connected to one of our outports
        for inport_info in PortInfo.filter_values(part_info):
            if inport_info.direction == "in":
                if outport_lookup.get(inport_info.value,
                                      None) == inport_info.type:
                    return True
        # If we have ports and they haven't been connected to anything then
        # we are disconnected
        if has_ports:
            return False
        # otherwise, treat a block with no ports as connected
        else:
            return True
Ejemplo n.º 22
0
    def test_block_fields_pulse(self):
        fields = OrderedDict()
        block_data = BlockData(4, "Pulse description", fields)
        fields["DELAY"] = FieldData("time", "", "Time", [])
        fields["INP"] = FieldData("bit_mux", "", "Input", ["ZERO", "X.OUT", "Y.OUT"])
        fields["OUT"] = FieldData("bit_out", "", "Output", [])
        fields["ERR_PERIOD"] = FieldData("read", "bit", "Error", [])

        o = PandABlockController(self.client, "MRI", "PULSE2", block_data, "/docs")
        self.process.add_controller(o)
        b = self.process.block_view("MRI:PULSE2")

        assert list(b) == [
            "meta",
            "health",
            "icon",
            "label",
            "help",
            "parameters",
            "delay",
            "delayUnits",
            "inputs",
            "inp",
            "inpDelay",
            "outputs",
            "out",
            "readbacks",
            "errPeriod",
        ]

        assert b.meta.label == "Pulse description 2"
        assert b.label.value == "Pulse description 2"

        # check setting label
        b.label.put_value("A new label")
        assert b.meta.label == "A new label"
        assert b.label.value == "A new label"
        self.client.set_field.assert_called_once_with(
            "*METADATA", "LABEL_PULSE2", "A new label"
        )
        self.client.set_field.reset_mock()

        # check updated with nothing
        o.handle_changes(dict(LABEL=""), ts=TimeStamp())
        assert b.meta.label == "Pulse description 2"
        assert b.label.value == "Pulse description 2"
        self.client.set_field.assert_not_called()

        # check updated with something from the server
        o.handle_changes(dict(LABEL="A server label"), ts=TimeStamp())
        assert b.meta.label == "A server label"
        assert b.label.value == "A server label"
        self.client.set_field.assert_not_called()

        help = b.help
        assert help.value == "/docs/build/pulse_doc.html"

        delay = b.delay
        assert delay.meta.writeable is True
        assert delay.meta.typeid == NumberMeta.typeid
        assert delay.meta.dtype == "float64"
        assert delay.meta.tags == ["group:parameters", "widget:textinput", "config:2"]

        units = b.delayUnits
        assert units.meta.writeable is True
        assert units.meta.typeid == ChoiceMeta.typeid
        assert units.meta.tags == ["group:parameters", "widget:combo", "config:1"]
        assert units.meta.choices == ["s", "ms", "us"]

        inp = b.inp
        assert inp.meta.writeable is True
        assert inp.meta.typeid == ChoiceMeta.typeid
        assert inp.meta.tags == [
            "group:inputs",
            "sinkPort:bool:ZERO",
            "widget:combo",
            "badgevalue:plus:inpDelay:MRI:PULSE2",
            "config:1",
        ]
        assert inp.meta.choices == ["ZERO", "X.OUT", "Y.OUT"]

        delay = b.inpDelay
        assert delay.meta.writeable is True
        assert delay.meta.typeid == NumberMeta.typeid
        assert delay.meta.dtype == "uint8"
        assert delay.meta.tags == ["group:inputs", "widget:textinput", "config:1"]

        out = b.out
        assert out.meta.writeable is False
        assert out.meta.typeid == BooleanMeta.typeid
        assert out.meta.tags == [
            "group:outputs",
            "sourcePort:bool:PULSE2.OUT",
            "widget:led",
        ]

        err = b.errPeriod
        assert err.meta.writeable is False
        assert err.meta.typeid == BooleanMeta.typeid
        assert err.meta.tags == ["group:readbacks", "widget:led"]

        queue = Queue()
        subscribe = Subscribe(path=["MRI:PULSE2", "inp"], delta=True)
        subscribe.set_callback(queue.put)
        o.handle_request(subscribe)
        delta = queue.get()
        assert delta.changes[0][1]["value"] == "ZERO"

        ts = TimeStamp()
        o.handle_changes({"INP": "X.OUT"}, ts)
        delta = queue.get()
        assert delta.changes == [
            [["value"], "X.OUT"],
            [["timeStamp"], ts],
            [
                ["meta", "tags"],
                [
                    "group:inputs",
                    "sinkPort:bool:ZERO",
                    "widget:combo",
                    "badgevalue:plus:inpDelay:MRI:PULSE2",
                    "config:1",
                    "linkedvalue:out:MRI:X",
                ],
            ],
        ]