def test_pos_table_deltas(self):
        queue = Queue()
        subscribe = Subscribe(path=["P"], delta=True)
        subscribe.set_callback(queue.put)
        self.o.handle_request(subscribe)
        delta = queue.get()
        capture_enums = delta.changes[0][1]["positions"]["meta"]["elements"][
            "capture"]["choices"]
        assert capture_enums[0] == PositionCapture.NO
        table = delta.changes[0][1]["positions"]["value"]
        assert table.name == ["COUNTER.OUT"]
        assert table.value == [0.0]
        assert table.scale == [1.0]
        assert table.offset == [0.0]
        assert table.capture == [PositionCapture.NO]

        self.o.handle_changes([("COUNTER.OUT", "20")])
        delta = queue.get()
        assert delta.changes == [
            [["positions", "value", "value"], [20.0]],
            [["positions", "timeStamp"], ANY],
        ]

        self.o.handle_changes([("COUNTER.OUT", "5"),
                               ("COUNTER.OUT.SCALE", 0.5)])
        delta = queue.get()
        assert delta.changes == [
            [["positions", "value", "value"], [2.5]],
            [["positions", "value", "scale"], [0.5]],
            [["positions", "timeStamp"], ANY],
        ]
Beispiel #2
0
    def sync_proxy(self, mri, block):
        """Abstract method telling the ClientComms to sync this proxy Block
        with its remote counterpart. Should wait until it is connected

        Args:
            mri (str): The mri for the remote block
            block (BlockModel): The local proxy Block to keep in sync
        """
        done_queue = Queue()
        self._queues[mri] = done_queue
        update_fields = set()

        def callback(value=None):
            if isinstance(value, Exception):
                # Disconnect or Cancelled or RemoteError
                if isinstance(value, Disconnected):
                    # We will get a reconnect with a whole new structure
                    update_fields.clear()
                    block.health.set_value(
                        value="pvAccess disconnected",
                        alarm=Alarm.disconnected("pvAccess disconnected"),
                    )
            else:
                with block.notifier.changes_squashed:
                    if not update_fields:
                        self.log.debug("Regenerating from %s", list(value))
                        self._regenerate_block(block, value, update_fields)
                        done_queue.put(None)
                    else:
                        self._update_block(block, value, update_fields)

        m = self._ctxt.monitor(mri, callback, notify_disconnect=True)
        self._monitors.add(m)
        done_queue.get(timeout=DEFAULT_TIMEOUT)
Beispiel #3
0
 def testMonitorEverythingInitial(self):
     q = Queue()
     m = self.ctxt.monitor("TESTCOUNTER", q.put)
     self.addCleanup(m.close)
     counter = q.get(timeout=1)
     self.assertStructureWithoutTsEqual(str(counter), str(counter_expected))
     self.assertTrue(counter.changedSet().issuperset(
         {"meta.fields", "counter.value", "zero.meta.description"}))
     self.assertEqual(counter["counter.value"], 0)
     self.assertEqual(counter["zero.meta.description"],
                      "Zero the counter attribute")
     self.ctxt.put("TESTCOUNTER.counter", 5, "value")
     counter = q.get(timeout=1)
     self.assertEqual(counter.counter.value, 5)
     self.assertEqual(
         counter.changedSet(),
         {
             "counter.value",
             "counter.timeStamp.userTag",
             "counter.timeStamp.secondsPastEpoch",
             "counter.timeStamp.nanoseconds",
         },
     )
     self.ctxt.put("TESTCOUNTER.counter", 0, "value")
     counter = q.get(timeout=1)
     self.assertStructureWithoutTsEqual(str(counter), str(counter_expected))
Beispiel #4
0
    def sync_proxy(self, mri, block):
        """Abstract method telling the ClientComms to sync this proxy Block
        with its remote counterpart. Should wait until it is connected

        Args:
            mri (str): The mri for the remote block
            block (BlockModel): The local proxy Block to keep in sync
        """
        # Send a root Subscribe to the server
        subscribe = Subscribe(path=[mri], delta=True)
        done_queue = Queue()

        def handle_response(response):
            # Called from tornado
            if not isinstance(response, Delta):
                # Return or Error is the end of our subscription, log and ignore
                self.log.debug("Proxy got response %r", response)
                done_queue.put(None)
            else:
                cothread.Callback(self._handle_response, response, block,
                                  done_queue)

        subscribe.set_callback(handle_response)
        IOLoopHelper.call(self._send_request, subscribe)
        done_queue.get(timeout=DEFAULT_TIMEOUT)
Beispiel #5
0
class TestSpawned(unittest.TestCase):

    def setUp(self):
        self.q = Queue()

    def do_spawn(self, throw_me=None):
        s = Spawned(
            do_div, (40, 2, self.q, throw_me), {})
        return s

    def test_spawn_div(self):
        s = self.do_spawn()
        assert s.ready() is False
        s.wait(1)
        assert s.ready() is True
        assert self.q.get(1) == 20
        assert s.get() == 20

    def test_spawn_err(self):
        s = self.do_spawn(UnexpectedError)
        assert s.ready() is False
        s.wait(1)
        assert s.ready() is True
        assert self.q.get(1) == UnexpectedError
        with self.assertRaises(UnexpectedError):
            s.get()
Beispiel #6
0
 def testTwoMonitors(self):
     if PVAPY:
         # No need to do this test on the old server
         return
     assert "TESTCOUNTER" not in self.server._pvs
     # Make first monitor
     q1 = Queue()
     m1 = self.ctxt.monitor("TESTCOUNTER", q1.put)
     self.addCleanup(m1.close)
     counter = q1.get(timeout=1)
     self.assertStructureWithoutTsEqual(str(counter), str(counter_expected))
     assert len(self.server._pvs["TESTCOUNTER"]) == 1
     # Make a second monitor and check that also fires without making another
     # PV
     ctxt2 = self.make_pva_context()
     q2 = Queue()
     m2 = ctxt2.monitor("TESTCOUNTER", q2.put)
     self.addCleanup(m2.close)
     counter = q2.get(timeout=1)
     self.assertStructureWithoutTsEqual(str(counter), str(counter_expected))
     assert len(self.server._pvs["TESTCOUNTER"]) == 1
     # Check that a Put fires on both
     self.ctxt.put("TESTCOUNTER.counter", 5, "value")
     counter = q1.get(timeout=1)
     self.assertEqual(counter.counter.value, 5)
     counter = q2.get(timeout=1)
     self.assertEqual(counter.counter.value, 5)
Beispiel #7
0
    def test_block_fields_lut(self):
        fields = OrderedDict()
        block_data = BlockData(8, "Lut description", fields)
        fields["FUNC"] = FieldData("param", "lut", "Function", [])

        o = PandABlockController(self.client, "MRI", "LUT3", block_data, "/docs")
        self.process.add_controller(o)
        b = self.process.block_view("MRI:LUT3")

        func = b.func
        assert func.meta.writeable is True
        assert func.meta.typeid == StringMeta.typeid
        assert func.meta.tags == ["group:parameters", "widget:textinput", "config:1"]

        queue = Queue()
        subscribe = Subscribe(path=["MRI:LUT3"], delta=True)
        subscribe.set_callback(queue.put)
        o.handle_request(subscribe)
        delta = queue.get()
        assert delta.changes[0][1]["func"]["value"] == ""
        assert '<path id="OR"' in delta.changes[0][1]["icon"]["value"]

        # This is the correct FUNC.RAW value for !A&!B&!C&!D&!E
        self.client.get_field.return_value = "1"
        ts = TimeStamp()
        o.handle_changes({"FUNC": "!A&!B&!C&!D&!E"}, ts)
        self.client.get_field.assert_called_once_with("LUT3", "FUNC.RAW")
        delta = queue.get()
        assert delta.changes == [
            [["func", "value"], "!A&!B&!C&!D&!E"],
            [["func", "timeStamp"], ts],
            [["icon", "value"], ANY],
            [["icon", "timeStamp"], ts],
        ]
        assert '<path id="OR"' not in delta.changes[2][1]
    def test_table_deltas(self):
        queue = Queue()
        subscribe = Subscribe(path=["P"], delta=True)
        subscribe.set_callback(queue.put)
        self.o.handle_request(subscribe)
        delta = queue.get()
        table = delta.changes[0][1]["bits"]["value"]
        assert table.name == ["TTLIN1.VAL", "TTLIN2.VAL", "PCOMP.OUT"]
        assert table.value == [False, False, False]
        assert table.capture == [False, False, False]

        self.o.handle_changes([("TTLIN1.VAL", "1")])
        delta = queue.get()
        assert delta.changes == [
            [["bits", "value", "value"], [True, False, False]],
            [["bits", "timeStamp"], ANY],
        ]
Beispiel #9
0
 def test_concurrent(self):
     q = Queue()
     request = Post(id=44,
                    path=["hello_block", "greet"],
                    parameters=dict(name="me", sleep=1))
     request.set_callback(q.put)
     self.controller.handle_request(request)
     request = Post(id=45, path=["hello_block", "error"])
     request.set_callback(q.put)
     self.controller.handle_request(request)
     response = q.get(timeout=1.0)
     self.assertIsInstance(response, Error)
     assert response.id == 45
     response = q.get(timeout=3.0)
     self.assertIsInstance(response, Return)
     assert response.id == 44
     assert response.value == "Hello me"
Beispiel #10
0
 def test_concurrent(self):
     q = Queue()
     request = Subscribe(id=40, path=["hello_block", "greet"], delta=True)
     request.set_callback(q.put)
     self.controller.handle_request(request)
     # Get the initial subscribe value
     inital = q.get(timeout=0.1)
     self.assertIsInstance(inital, Delta)
     assert inital.changes[0][1]["took"]["value"] == dict(sleep=0, name="")
     assert inital.changes[0][1]["returned"]["value"] == {"return": ""}
     # Do a greet
     request = Post(id=44,
                    path=["hello_block", "greet"],
                    parameters=dict(name="me", sleep=1))
     request.set_callback(q.put)
     self.controller.handle_request(request)
     # Then an error
     request = Post(id=45, path=["hello_block", "error"])
     request.set_callback(q.put)
     self.controller.handle_request(request)
     # We should quickly get the error response first
     response = q.get(timeout=1.0)
     self.assertIsInstance(response, Error)
     assert response.id == 45
     # Then the long running greet delta
     response = q.get(timeout=3.0)
     self.assertIsInstance(response, Delta)
     assert len(response.changes) == 2
     assert response.changes[0][0] == ["took"]
     took = response.changes[0][1]
     assert took.value == dict(sleep=1, name="me")
     assert took.present == ["name", "sleep"]
     assert took.alarm == Alarm.ok
     assert response.changes[1][0] == ["returned"]
     returned = response.changes[1][1]
     assert returned.value == {"return": "Hello me"}
     assert returned.present == ["return"]
     assert returned.alarm == Alarm.ok
     # Check it took about 1s to run
     assert abs(1 - (returned.timeStamp.to_time() -
                     took.timeStamp.to_time())) < 0.4
     # And it's response
     response = q.get(timeout=1.0)
     self.assertIsInstance(response, Return)
     assert response.id == 44
     assert response.value == "Hello me"
Beispiel #11
0
class TestSystemRest(unittest.TestCase):
    socket = 8886

    def setUp(self):
        self.process = Process("proc")
        self.hello = hello_block(mri="hello")[-1]
        self.process.add_controller(self.hello)
        self.server = web_server_block(mri="server", port=self.socket)[-1]
        self.process.add_controller(self.server)
        self.result = Queue()
        self.http_client = AsyncHTTPClient()
        self.process.start()

    def tearDown(self):
        self.process.stop(timeout=1)

    @gen.coroutine
    def get(self, mri):
        result = yield self.http_client.fetch("http://localhost:%s/rest/%s" %
                                              (self.socket, mri))
        cothread.Callback(self.result.put, result)

    @gen.coroutine
    def post(self, mri, method, args):
        req = HTTPRequest(
            "http://localhost:%s/rest/%s/%s" % (self.socket, mri, method),
            method="POST",
            body=args,
        )
        result = yield self.http_client.fetch(req)
        cothread.Callback(self.result.put, result)

    def test_get_hello(self):
        IOLoopHelper.call(self.get, "hello")
        result = self.result.get(timeout=2)
        assert result.body.decode().strip() == json_encode(self.hello._block)

    def test_post_hello(self):
        IOLoopHelper.call(self.post, "hello", "greet",
                          json_encode(dict(name="me")))
        result = self.result.get(timeout=2)
        assert result.body.decode().strip() == json_encode("Hello me")
Beispiel #12
0
 def test_hello_good_input(self):
     q = Queue()
     request = Post(id=44,
                    path=["hello_block", "greet"],
                    parameters=dict(name="thing"))
     request.set_callback(q.put)
     self.controller.handle_request(request)
     response = q.get(timeout=1.0)
     self.assertIsInstance(response, Return)
     assert response.id == 44
     assert response.value == "Hello thing"
Beispiel #13
0
 def _request_response(self, request_cls, path, **kwargs):
     queue = Queue()
     request = request_cls(path=[self._mri] + path,
                           callback=queue.put,
                           **kwargs)
     self._controller.handle_request(request)
     response = queue.get()
     if isinstance(response, Error):
         raise ResponseError(response.message)
     else:
         return response
Beispiel #14
0
 def testMonitorSubfieldInitial(self):
     q = Queue()
     m = self.ctxt.monitor("TESTCOUNTER", q.put, "meta.fields")
     self.addCleanup(m.close)
     counter = q.get(timeout=1)
     self.assertEqual(counter.getID(), "structure")
     # P4P only says leaves have changed
     self.assertEqual(counter.changedSet(), {"meta.fields"})
     self.assertEqual(counter.meta.fields,
                      ["health", "counter", "delta", "zero", "increment"])
     fields_code = dict(counter.meta.type().aspy()[2])["fields"]
     self.assertEqual(fields_code, "as")
Beispiel #15
0
 def testMonitorDotted(self):
     q = Queue()
     m = self.ctxt.monitor("TESTCOUNTER.counter", q.put)
     self.addCleanup(m.close)
     counter: Value = q.get(timeout=1)
     self.assertEqual(counter.getID(), "epics:nt/NTScalar:1.0")
     self.assertTrue(counter.changedSet().issuperset(
         {"value", "alarm.severity", "timeStamp.userTag"}))
     self.ctxt.put("TESTCOUNTER.counter", 5, "value")
     counter = q.get(timeout=1)
     self.assertEqual(counter.value, 5)
     self.assertEqual(
         counter.changedSet(),
         {
             "value",
             "timeStamp.userTag",
             "timeStamp.secondsPastEpoch",
             "timeStamp.nanoseconds",
         },
     )
     self.ctxt.put("TESTCOUNTER.counter", 0, "value")
     counter = q.get(timeout=1)
     self.assertEqual(counter.value, 0)
Beispiel #16
0
 def test_concurrency(self):
     q = Queue()
     # Subscribe to the whole block
     sub = Subscribe(id=0, path=["mri"], delta=True)
     sub.set_callback(q.put)
     self.c.handle_request(sub)
     # We should get first Delta through with initial value
     r = q.get().to_dict()
     assert r["id"] == 0
     assert len(r["changes"]) == 1
     assert len(r["changes"][0]) == 2
     assert r["changes"][0][0] == []
     assert r["changes"][0][1]["meta"]["label"] == "My label"
     assert r["changes"][0][1]["label"]["value"] == "My label"
     # Do a Put on the label
     put = Put(id=2, path=["mri", "label", "value"], value="New", get=True)
     put.set_callback(q.put)
     self.c.handle_request(put)
     # Check we got two updates before the return
     r = q.get().to_dict()
     assert r["id"] == 0
     assert len(r["changes"]) == 2
     assert len(r["changes"][0]) == 2
     assert r["changes"][0][0] == ["label", "value"]
     assert r["changes"][0][1] == "New"
     assert len(r["changes"][0]) == 2
     assert r["changes"][1][0] == ["label", "timeStamp"]
     r = q.get().to_dict()
     assert r["id"] == 0
     assert len(r["changes"]) == 1
     assert len(r["changes"][0]) == 2
     assert r["changes"][0][0] == ["meta", "label"]
     assert r["changes"][0][1] == "New"
     # Then the return
     r3 = q.get().to_dict()
     assert r3["id"] == 2
     assert r3["value"] == "New"
Beispiel #17
0
 def testMonitorDotted(self):
     q = Queue()
     m = self.ctxt.monitor("TESTCOUNTER.counter", q.put)
     self.addCleanup(m.close)
     counter = q.get(timeout=1)  # type: Value
     self.assertEqual(counter.getID(), "epics:nt/NTScalar:1.0")
     self.assertTrue(counter.changedSet().issuperset({
         "value", "alarm.severity", "timeStamp.userTag"}))
     self.ctxt.put("TESTCOUNTER.counter", 5, "value")
     counter = q.get(timeout=1)
     self.assertEqual(counter.value, 5)
     if PVAPY:
         # bitsets in pvaPy don't work, so it is everything at the moment
         self.assertTrue(counter.changedSet().issuperset({
             "value", "alarm", "timeStamp"}))
     else:
         self.assertEqual(counter.changedSet(),
                          {"value",
                           "timeStamp.userTag",
                           "timeStamp.secondsPastEpoch",
                           "timeStamp.nanoseconds"})
     self.ctxt.put("TESTCOUNTER.counter", 0, "value")
     counter = q.get(timeout=1)
     self.assertEqual(counter.value, 0)
Beispiel #18
0
 def test_counter_subscribe(self):
     q = Queue()
     sub = Subscribe(id=20,
                     path=["counting", "counter"],
                     delta=False,
                     callback=q.put)
     self.controller.handle_request(sub)
     response = q.get(timeout=1.0)
     self.assertIsInstance(response, Update)
     assert response.id == 20
     assert response.value["typeid"] == "epics:nt/NTScalar:1.0"
     assert response.value["value"] == 0
     post = Post(id=21, path=["counting", "increment"], callback=q.put)
     self.controller.handle_request(post)
     response = q.get(timeout=1)
     self.assertIsInstance(response, Update)
     assert response.id == 20
     assert response.value["value"] == 1
     response = q.get(timeout=1)
     self.assertIsInstance(response, Return)
     assert response.id == 21
     assert response.value == None
     with self.assertRaises(TimeoutError):
         q.get(timeout=0.05)
Beispiel #19
0
 def wait_for_good_status(self, deadline):
     q = Queue()
     m = util.catools.camonitor(self.status_pv,
                                q.put,
                                datatype=util.catools.DBR_STRING)
     status = None
     try:
         while True:
             try:
                 status = q.get(deadline - time.time())
             except TimeoutError:
                 return status
             else:
                 if status == self.good_status:
                     return status
     finally:
         m.close()
Beispiel #20
0
    def send_put(self, mri, attribute_name, value):
        """Abstract method to dispatch a Put to the server

        Args:
            mri (str): The mri of the Block
            attribute_name (str): The name of the Attribute within the Block
            value: The value to put
        """
        q = Queue()
        request = Put(path=[mri, attribute_name, "value"], value=value)
        request.set_callback(q.put)
        IOLoopHelper.call(self._send_request, request)
        response = q.get()
        if isinstance(response, Error):
            raise response.message
        else:
            return response.value
Beispiel #21
0
class TestSystemWSCommsServerOnly(unittest.TestCase):
    socket = 8881

    def setUp(self):
        self.process = Process("proc")
        self.hello = call_with_params(hello_block, self.process, mri="hello")
        self.server = call_with_params(
            web_server_block, self.process, mri="server", port=self.socket)
        self.result = Queue()
        self.process.start()

    def tearDown(self):
        self.process.stop(timeout=1)

    @gen.coroutine
    def send_message(self):
        conn = yield websocket_connect("ws://localhost:%s/ws" % self.socket)
        req = dict(
            typeid="malcolm:core/Post:1.0",
            id=0,
            path=["hello", "greet"],
            parameters=dict(
                name="me"
            )
        )
        conn.write_message(json.dumps(req))
        resp = yield conn.read_message()
        resp = json.loads(resp)
        self.result.put(resp)
        conn.close()

    def test_server_and_simple_client(self):
        self.server._loop.add_callback(self.send_message)
        resp = self.result.get(timeout=2)
        assert resp == dict(
            typeid="malcolm:core/Return:1.0",
            id=0,
            value=dict(
                typeid='malcolm:core/Map:1.0',
                greeting="Hello me",
            )
        )
Beispiel #22
0
    def send_post(self, mri, method_name, **params):
        """Abstract method to dispatch a Post to the server

        Args:
            mri (str): The mri of the Block
            method_name (str): The name of the Method within the Block
            params: The parameters to send

        Returns:
            The return results from the server
        """
        q = Queue()
        request = Post(path=[mri, method_name], parameters=params)
        request.set_callback(q.put)
        IOLoopHelper.call(self._send_request, request)
        response = q.get()
        if isinstance(response, Error):
            raise response.message
        else:
            return response.value
Beispiel #23
0
 def test_counter_subscribe(self):
     q = Queue()
     # Subscribe to the value
     sub = Subscribe(id=20, path=["counting", "counter"], delta=False)
     sub.set_callback(q.put)
     self.controller.handle_request(sub)
     # Check initial return
     response = q.get(timeout=1.0)
     self.assertIsInstance(response, Update)
     assert response.id == 20
     assert response.value["typeid"] == "epics:nt/NTScalar:1.0"
     assert response.value["value"] == 0
     # Post increment()
     post = Post(id=21, path=["counting", "increment"])
     post.set_callback(q.put)
     self.controller.handle_request(post)
     # Check the value updates...
     response = q.get(timeout=1)
     self.assertIsInstance(response, Update)
     assert response.id == 20
     assert response.value["value"] == 1
     # ... then we get the return
     response = q.get(timeout=1)
     self.assertIsInstance(response, Return)
     assert response.id == 21
     assert response.value is None
     # Check we can put too
     put = Put(id=22, path=["counting", "counter", "value"], value=31)
     put.set_callback(q.put)
     self.controller.handle_request(put)
     # Check the value updates...
     response = q.get(timeout=1)
     self.assertIsInstance(response, Update)
     assert response.id == 20
     assert response.value["value"] == 31
     # ... then we get the return
     response = q.get(timeout=1)
     self.assertIsInstance(response, Return)
     assert response.id == 22
     assert response.value is None
     # And that there isn't anything else
     with self.assertRaises(TimeoutError):
         q.get(timeout=0.05)
Beispiel #24
0
 def __init__(self):
     assert not self._instance, \
         "Can't create more than one instance of Singleton. Use instance()"
     self.cothread = maybe_import_cothread()
     if self.cothread:
         # We can use it in this thread
         from cothread import catools
         self.in_cothread_thread = True
     else:
         # We need our own thread to run it in
         q = Queue()
         threading.Thread(target=_import_cothread, args=(q,)).start()
         self.cothread, catools = q.get()
         self.in_cothread_thread = False
     self.catools = catools
     self.DBR_STRING = catools.DBR_STRING
     self.DBR_LONG = catools.DBR_LONG
     self.DBR_DOUBLE = catools.DBR_DOUBLE
     self.FORMAT_CTRL = catools.FORMAT_CTRL
     self.FORMAT_TIME = catools.FORMAT_TIME
     self.DBR_ENUM = catools.DBR_ENUM
     self.DBR_CHAR_STR = catools.DBR_CHAR_STR
Beispiel #25
0
class ChildPart(Part):
    def __init__(self, params):
        # Layout options
        self.x = 0
        self.y = 0
        self.visible = None
        # {part_name: visible} saying whether part_name is visible
        self.part_visible = {}
        # {attr_name: attr_value} of last saved/loaded structure
        self.saved_structure = {}
        # {attr_name: modified_message} of current values
        self.modified_messages = {}
        # The controller hosting our child
        self.child_controller = None
        # {id: Subscribe} for subscriptions to config tagged fields
        self.config_subscriptions = {}
        # set(attribute_name) where the attribute is a config tagged field
        # we are modifying
        self.we_modified = set()
        # Update queue of modified alarms
        self.modified_update_queue = Queue()
        # Update queue of exportable fields
        self.exportable_update_queue = Queue()
        # {attr_name: PortInfo}
        self.port_infos = {}
        # Store params
        self.params = params
        super(ChildPart, self).__init__(params.name)

    def notify_dispatch_request(self, request):
        """Will be called when a context passed to a hooked function is about
        to dispatch a request"""
        if isinstance(request, Put):
            self.we_modified.add(request.path[-2])

    @ManagerController.Init
    def init(self, context):
        # Save what we have
        self.save(context)
        # Monitor the child configure for changes
        self.child_controller = context.get_controller(self.params.mri)
        subscribe = Subscribe(path=[self.params.mri, "meta", "fields"],
                              callback=self.update_part_exportable)
        # Wait for the first update to come in
        self.child_controller.handle_request(subscribe).wait()

    @ManagerController.Halt
    def halt(self, context):
        unsubscribe = Unsubscribe(callback=self.update_part_exportable)
        self.child_controller.handle_request(unsubscribe)

    def update_part_exportable(self, response):
        # Get a child context to check if we have a config field
        child = self.child_controller.block_view()
        spawned = []
        if response.value:
            new_fields = response.value
        else:
            new_fields = []

        # Remove any existing subscription that is not in the new fields
        for subscribe in self.config_subscriptions.values():
            attr_name = subscribe.path[-2]
            if attr_name not in new_fields:
                unsubscribe = Unsubscribe(subscribe.id, subscribe.callback)
                spawned.append(
                    self.child_controller.handle_request(unsubscribe))
                self.port_infos.pop(attr_name, None)

        # Add a subscription to any new field
        existing_fields = set(s.path[-2]
                              for s in self.config_subscriptions.values())
        for field in set(new_fields) - existing_fields:
            attr = getattr(child, field)
            if isinstance(attr, Attribute):
                for tag in attr.meta.tags:
                    match = port_tag_re.match(tag)
                    if match:
                        d, type, extra = match.groups()
                        self.port_infos[field] = PortInfo(name=field,
                                                          value=attr.value,
                                                          direction=d,
                                                          type=type,
                                                          extra=extra)
            if isinstance(attr, Attribute) and config() in attr.meta.tags:
                if self.config_subscriptions:
                    new_id = max(self.config_subscriptions) + 1
                else:
                    new_id = 1
                subscribe = Subscribe(id=new_id,
                                      path=[self.params.mri, field, "value"],
                                      callback=self.update_part_modified)
                self.config_subscriptions[new_id] = subscribe
                # Signal that any change we get is a difference
                if field not in self.saved_structure:
                    self.saved_structure[field] = None
                spawned.append(self.child_controller.handle_request(subscribe))

        # Wait for the first update to come in
        for s in spawned:
            s.wait()

        # Put data on the queue, so if spawns are handled out of order we
        # still get the most up to date data
        port_infos = [
            self.port_infos[f] for f in new_fields if f in self.port_infos
        ]
        self.exportable_update_queue.put((new_fields, port_infos))
        self.spawn(self._update_part_exportable).wait()

    def _update_part_exportable(self):
        # We spawned just above, so there is definitely something on the
        # queue
        fields, port_infos = self.exportable_update_queue.get(timeout=0)
        self.controller.update_exportable(self, fields, port_infos)

    def update_part_modified(self, response):
        subscribe = self.config_subscriptions[response.id]
        name = subscribe.path[-2]
        original_value = self.saved_structure[name]
        try:
            np.testing.assert_equal(original_value, response.value)
        except AssertionError:
            message = "%s.%s.value = %r not %r" % (
                self.name, name, response.value, original_value)
            if name in self.we_modified:
                message = "(We modified) " + message
            self.modified_messages[name] = message
        else:
            self.modified_messages.pop(name, None)
        message_list = []
        only_modified_by_us = True
        # Tell the controller what has changed
        for name, message in self.modified_messages.items():
            if name not in self.we_modified:
                only_modified_by_us = False
            message_list.append(message)
        if message_list:
            if only_modified_by_us:
                severity = AlarmSeverity.NO_ALARM
            else:
                severity = AlarmSeverity.MINOR_ALARM
            alarm = Alarm(severity, AlarmStatus.CONF_STATUS,
                          "\n".join(message_list))
        else:
            alarm = None
        # Put data on the queue, so if spawns are handled out of order we
        # still get the most up to date data
        self.modified_update_queue.put(alarm)
        self.spawn(self._update_part_modified).wait()

    def _update_part_modified(self):
        # We spawned just above, so there is definitely something on the
        # queue
        alarm = self.modified_update_queue.get(timeout=0)
        self.controller.update_modified(self, alarm)

    @ManagerController.Layout
    def layout(self, context, part_info, layout_table):
        # if this is the first call, we need to calculate if we are visible
        # or not
        if self.visible is None:
            self.visible = self.child_connected(part_info)
        for i, name in enumerate(layout_table.name):
            x = layout_table.x[i]
            y = layout_table.y[i]
            visible = layout_table.visible[i]
            if name == self.name:
                if self.visible and not visible:
                    self.sever_inports(context, part_info)
                self.x = x
                self.y = y
                self.visible = visible
            else:
                was_visible = self.part_visible.get(name, True)
                if was_visible and not visible:
                    self.sever_inports(context, part_info, name)
                self.part_visible[name] = visible
        ret = LayoutInfo(mri=self.params.mri,
                         x=self.x,
                         y=self.y,
                         visible=self.visible)
        return [ret]

    @ManagerController.Load
    def load(self, context, structure):
        child = context.block_view(self.params.mri)
        part_structure = structure.get(self.name, {})
        params = {}
        for k, v in part_structure.items():
            try:
                attr = getattr(child, k)
            except AttributeError:
                self.log.warning("Cannot restore non-existant attr %s" % k)
            else:
                try:
                    np.testing.assert_equal(serialize_object(attr.value), v)
                except AssertionError:
                    params[k] = v
        # Do this first so that any callbacks that happen in the put know
        # not to notify controller
        self.saved_structure = part_structure
        if params:
            child.put_attribute_values(params)

    @ManagerController.Save
    def save(self, context):
        child = context.block_view(self.params.mri)
        part_structure = OrderedDict()
        for k in child:
            attr = getattr(child, k)
            if isinstance(attr, Attribute) and "config" in attr.meta.tags:
                part_structure[k] = serialize_object(attr.value)
        self.saved_structure = part_structure
        return part_structure

    def _get_flowgraph_ports(self, part_info, direction):
        # {attr_name: port_info}
        ports = {}
        for port_info in part_info.get(self.name, []):
            if port_info.direction == direction:
                ports[port_info.name] = port_info
        return ports

    def _outport_lookup(self, port_infos):
        outport_lookup = {}
        for outport_info in port_infos:
            if outport_info.direction == "out":
                outport_lookup[outport_info.extra] = outport_info.type
        return outport_lookup

    def sever_inports(self, context, part_info, connected_to=None):
        """Conditionally sever inports of the child. If connected_to is then
        None then sever all, otherwise restrict to connected_to's outports

        Args:
            context (Context): The context to use
            part_info (dict): {part_name: [PortInfo]}
            connected_to (str): Restrict severing to this part
        """
        # Find the outports to connect to
        if connected_to:
            # Calculate a lookup of the outport "name" to type
            outport_lookup = self._outport_lookup(
                part_info.get(connected_to, []))
        else:
            outport_lookup = True

        # Find our inports
        inports = self._get_flowgraph_ports(part_info, "in")

        # If we have inports that need to be disconnected then do so
        if inports and outport_lookup:
            child = context.block_view(self.params.mri)
            attribute_values = {}
            for name, port_info in inports.items():
                if outport_lookup is True or outport_lookup.get(
                        child[name].value, None) == port_info.type:
                    attribute_values[name] = port_info.extra
            child.put_attribute_values(attribute_values)

    def child_connected(self, part_info):
        """Calculate if anything is connected to us or we are connected to
        anything else

        Args:
            part_info (dict): {part_name: [PortInfo]} from other ports

        Returns:
            bool: True if we are connected or have nothing to connect
        """
        has_ports = False
        # See if our inports are connected to anything
        inports = self._get_flowgraph_ports(part_info, "in")
        for name, inport_info in inports.items():
            disconnected_value = inport_info.extra
            has_ports = True
            if inport_info.value != disconnected_value:
                return True
        # Calculate a lookup of outport "name" to their types
        outport_lookup = self._outport_lookup(part_info.get(self.name, []))
        if outport_lookup:
            has_ports = True
        # See if anything is connected to one of our outports
        for inport_info in PortInfo.filter_values(part_info):
            if inport_info.direction == "in":
                if outport_lookup.get(inport_info.value,
                                      None) == inport_info.type:
                    return True
        # If we have ports and they haven't been connected to anything then
        # we are disconnected
        if has_ports:
            return False
        # otherwise, treat a block with no ports as connected
        else:
            return True
Beispiel #26
0
class RunnableController(builtin.controllers.ManagerController):
    """RunnableDevice implementer that also exposes GUI for child parts"""

    # The state_set that this controller implements
    state_set = ss()

    def __init__(
        self,
        mri: AMri,
        config_dir: AConfigDir,
        template_designs: ATemplateDesigns = "",
        initial_design: AInitialDesign = "",
        description: ADescription = "",
    ) -> None:
        super().__init__(
            mri=mri,
            config_dir=config_dir,
            template_designs=template_designs,
            initial_design=initial_design,
            description=description,
        )
        # Shared contexts between Configure, Run, Pause, Seek, Resume
        self.part_contexts: Dict[Part, Context] = {}
        # Any custom ConfigureParams subclasses requested by Parts
        self.part_configure_params: PartConfigureParams = {}
        # Params passed to configure()
        self.configure_params: Optional[ConfigureParams] = None
        # Progress reporting dict of completed_steps for each part
        self.progress_updates: Optional[Dict[Part, int]] = None
        # Queue so that do_run can wait to see why it was aborted and resume if
        # needed
        self.resume_queue: Optional[Queue] = None
        # Stored for pause. If using breakpoints, it is a list of steps
        self.steps_per_run: List[int] = []
        # If the list of breakpoints is not empty, this will be true
        self.use_breakpoints: bool = False
        # Absolute steps where the run() returns
        self.breakpoint_steps: List[int] = []
        # Breakpoint index, modified in run() and pause()
        self.breakpoint_index: int = 0
        # Queue so we can wait for aborts to complete
        self.abort_queue: Optional[Queue] = None
        # Create sometimes writeable attribute for the current completed scan
        # step
        self.completed_steps = NumberMeta(
            "int32",
            "Readback of number of scan steps",
            tags=[Widget.METER.tag()],  # Widget.TEXTINPUT.tag()]
        ).create_attribute_model(0)
        self.field_registry.add_attribute_model(
            "completedSteps", self.completed_steps, self.pause
        )
        self.set_writeable_in(self.completed_steps, ss.PAUSED, ss.ARMED)
        # Create read-only attribute for the number of configured scan steps
        self.configured_steps = NumberMeta(
            "int32",
            "Number of steps currently configured",
            tags=[Widget.TEXTUPDATE.tag()],
        ).create_attribute_model(0)
        self.field_registry.add_attribute_model(
            "configuredSteps", self.configured_steps
        )
        # Create read-only attribute for the total number scan steps
        self.total_steps = NumberMeta(
            "int32", "Readback of number of scan steps", tags=[Widget.TEXTUPDATE.tag()]
        ).create_attribute_model(0)
        self.field_registry.add_attribute_model("totalSteps", self.total_steps)
        # Create the method models
        self.field_registry.add_method_model(self.validate)
        self.set_writeable_in(
            self.field_registry.add_method_model(self.configure), ss.READY, ss.FINISHED
        )
        self.set_writeable_in(self.field_registry.add_method_model(self.run), ss.ARMED)
        self.set_writeable_in(
            self.field_registry.add_method_model(self.abort),
            ss.READY,
            ss.CONFIGURING,
            ss.ARMED,
            ss.RUNNING,
            ss.POSTRUN,
            ss.PAUSED,
            ss.SEEKING,
            ss.FINISHED,
        )
        self.set_writeable_in(
            self.field_registry.add_method_model(self.pause),
            ss.ARMED,
            ss.PAUSED,
            ss.RUNNING,
            ss.FINISHED,
        )
        self.set_writeable_in(
            self.field_registry.add_method_model(self.resume), ss.PAUSED
        )
        # Override reset to work from aborted too
        self.set_writeable_in(
            self.field_registry.get_field("reset"),
            ss.FAULT,
            ss.DISABLED,
            ss.ABORTED,
            ss.ARMED,
            ss.FINISHED,
        )
        # Allow Parts to report their status
        self.info_registry.add_reportable(RunProgressInfo, self.update_completed_steps)
        # Allow Parts to request extra items from configure
        self.info_registry.add_reportable(
            ConfigureParamsInfo, self.update_configure_params
        )

    def get_steps_per_run(
        self,
        generator: CompoundGenerator,
        axes_to_move: AAxesToMove,
        breakpoints: List[int],
    ) -> List[int]:
        self.use_breakpoints = False
        steps = [1]
        axes_set = set(axes_to_move)
        for dim in reversed(generator.dimensions):
            # If the axes_set is empty and the dimension has axes then we have
            # done as many dimensions as we can, so return
            if dim.axes and not axes_set:
                break
            # Consume the axes that this generator scans
            for axis in dim.axes:
                assert axis in axes_set, f"Axis {axis} is not in {axes_to_move}"
                axes_set.remove(axis)
            # Now multiply by the dimensions to get the number of steps
            steps[0] *= dim.size

        # If we have breakpoints we make a list of steps
        if len(breakpoints) > 0:
            total_breakpoint_steps = sum(breakpoints)
            assert (
                total_breakpoint_steps <= steps[0]
            ), "Sum of breakpoints greater than steps in scan"
            self.use_breakpoints = True

            # Cast to list so we can append
            breakpoints_list = list(breakpoints)

            # Check if we need to add the final breakpoint to the inner scan
            if total_breakpoint_steps < steps[0]:
                last_breakpoint = steps[0] - total_breakpoint_steps
                breakpoints_list += [last_breakpoint]

            # Repeat the set of breakpoints for each outer step
            breakpoints_list *= self._get_outer_steps(generator, axes_to_move)

            steps = breakpoints_list

            # List of steps completed at end of each run
            self.breakpoint_steps = [sum(steps[:i]) for i in range(1, len(steps) + 1)]

        return steps

    def _get_outer_steps(self, generator, axes_to_move):
        outer_steps = 1
        for dim in reversed(generator.dimensions):
            outer_axis = True
            for axis in dim.axes:
                if axis in axes_to_move:
                    outer_axis = False
            if outer_axis:
                outer_steps *= dim.size
        return outer_steps

    def do_reset(self):
        super().do_reset()
        self.configured_steps.set_value(0)
        self.completed_steps.set_value(0)
        self.total_steps.set_value(0)
        self.breakpoint_index = 0

    def update_configure_params(
        self, part: Part = None, info: ConfigureParamsInfo = None
    ) -> None:
        """Tell controller part needs different things passed to Configure"""
        with self.changes_squashed:
            # Update the dict
            if part:
                assert info, "No info for part"
                self.part_configure_params[part] = info

            # No process yet, so don't do this yet
            if self.process is None:
                return

            # Make a list of all the infos that the parts have contributed
            part_configure_infos = []
            for part in self.parts.values():
                info = self.part_configure_params.get(part, None)
                if info:
                    part_configure_infos.append(info)

            # Update methods from the updated configure model
            for method_name in ("configure", "validate"):
                # Get the model of our configure method as the starting point
                method_meta = MethodMeta.from_callable(self.configure)
                # Update the configure model from the infos
                update_configure_model(method_meta, part_configure_infos)
                # Put the created metas onto our block meta
                method = self._block[method_name]
                method.meta.takes.set_elements(method_meta.takes.elements)
                method.meta.takes.set_required(method_meta.takes.required)
                method.meta.returns.set_elements(method_meta.returns.elements)
                method.meta.returns.set_required(method_meta.returns.required)
                method.meta.set_defaults(method_meta.defaults)
                method.set_took()
                method.set_returned()

    def update_block_endpoints(self):
        super().update_block_endpoints()
        self.update_configure_params()

    def _part_params(
        self, part_contexts: Dict[Part, Context] = None, params: ConfigureParams = None
    ) -> PartContextParams:
        if part_contexts is None:
            part_contexts = self.part_contexts
        if params is None:
            params = self.configure_params
        for part, context in part_contexts.items():
            args = {}
            assert params, "No params"
            for k in params.call_types:
                args[k] = getattr(params, k)
            yield part, context, args

    # This will be serialized, so maintain camelCase for axesToMove
    # noinspection PyPep8Naming
    @add_call_types
    def validate(
        self,
        generator: AGenerator,
        axesToMove: AAxesToMove = None,
        breakpoints: ABreakpoints = None,
        **kwargs: Any,
    ) -> AConfigureParams:
        """Validate configuration parameters and return validated parameters.

        Doesn't take device state into account so can be run in any state
        """
        iterations = 10
        # We will return this, so make sure we fill in defaults
        for k, default in self._block.configure.meta.defaults.items():
            kwargs.setdefault(k, default)
        # The validated parameters we will eventually return
        params = ConfigureParams(generator, axesToMove, breakpoints, **kwargs)
        # Make some tasks just for validate
        part_contexts = self.create_part_contexts()
        # Get any status from all parts
        status_part_info = self.run_hooks(
            ReportStatusHook(p, c) for p, c in part_contexts.items()
        )
        while iterations > 0:
            # Try up to 10 times to get a valid set of parameters
            iterations -= 1
            # Validate the params with all the parts
            validate_part_info = self.run_hooks(
                ValidateHook(p, c, status_part_info, **kwargs)
                for p, c, kwargs in self._part_params(part_contexts, params)
            )
            tweaks: List[ParameterTweakInfo] = ParameterTweakInfo.filter_values(
                validate_part_info
            )
            if tweaks:
                # Check if we need to resolve generator tweaks first
                generator_tweaks: List[ParameterTweakInfo] = []
                for tweak in tweaks:
                    # Collect all generator tweaks
                    if tweak.parameter == "generator":
                        generator_tweaks.append(tweak)
                if len(generator_tweaks) > 0:
                    # Resolve multiple tweaks to the generator
                    generator_tweak = resolve_generator_tweaks(generator_tweaks)
                    deserialized = self._block.configure.meta.takes.elements[
                        generator_tweak.parameter
                    ].validate(generator_tweak.value)
                    setattr(params, generator_tweak.parameter, deserialized)
                    self.log.debug(f"{self.mri}: tweaking generator to {deserialized}")
                else:
                    # Other tweaks can be applied at the same time
                    for tweak in tweaks:
                        deserialized = self._block.configure.meta.takes.elements[
                            tweak.parameter
                        ].validate(tweak.value)
                        setattr(params, tweak.parameter, deserialized)
                        self.log.debug(
                            f"{self.mri}: tweaking {tweak.parameter} to {deserialized}"
                        )
            else:
                # Consistent set, just return the params
                return params
        raise ValueError("Could not get a consistent set of parameters")

    def abortable_transition(self, state):
        with self._lock:
            # We might have been aborted just now, so this will fail
            # with an AbortedError if we were
            self_ctx = self.part_contexts.get(self, None)
            if self_ctx:
                self_ctx.sleep(0)
            self.transition(state)

    # This will be serialized, so maintain camelCase for axesToMove
    # noinspection PyPep8Naming
    @add_call_types
    def configure(
        self,
        generator: AGenerator,
        axesToMove: AAxesToMove = None,
        breakpoints: ABreakpoints = None,
        **kwargs: Any,
    ) -> AConfigureParams:
        """Validate the params then configure the device ready for run().

        Try to prepare the device as much as possible so that run() is quick to
        start, this may involve potentially long running activities like moving
        motors.

        Normally it will return in Armed state. If the user aborts then it will
        return in Aborted state. If something goes wrong it will return in Fault
        state. If the user disables then it will return in Disabled state.
        """
        params = self.validate(generator, axesToMove, breakpoints, **kwargs)
        state = self.state.value
        try:
            self.transition(ss.CONFIGURING)
            self.do_configure(state, params)
            self.abortable_transition(ss.ARMED)
        except AbortedError:
            assert self.abort_queue, "No abort queue"
            self.abort_queue.put(None)
            raise
        except Exception as e:
            self.go_to_error_state(e)
            raise
        else:
            return params

    def do_configure(self, state: str, params: ConfigureParams) -> None:
        if state == ss.FINISHED:
            # If we were finished then do a reset before configuring
            self.run_hooks(
                builtin.hooks.ResetHook(p, c)
                for p, c in self.create_part_contexts().items()
            )
        # Clear out any old part contexts now rather than letting gc do it
        for context in self.part_contexts.values():
            context.unsubscribe_all()
        # These are the part tasks that abort() and pause() will operate on
        self.part_contexts = self.create_part_contexts()
        # So add one for ourself too so we can be aborted
        assert self.process, "No attached process"
        self.part_contexts[self] = Context(self.process)
        # Store the params for use in seek()
        self.configure_params = params
        # Tell everything to get into the right state to Configure
        self.run_hooks(PreConfigureHook(p, c) for p, c in self.part_contexts.items())
        # This will calculate what we need from the generator, possibly a long
        # call
        params.generator.prepare()
        # Set the steps attributes that we will do across many run() calls
        self.total_steps.set_value(params.generator.size)
        self.completed_steps.set_value(0)
        self.configured_steps.set_value(0)
        # TODO: We can be cleverer about this and support a different number
        # of steps per run for each run by examining the generator structure
        self.steps_per_run = self.get_steps_per_run(
            params.generator, params.axesToMove, params.breakpoints
        )
        # Get any status from all parts
        part_info = self.run_hooks(
            ReportStatusHook(p, c) for p, c in self.part_contexts.items()
        )
        # Run the configure command on all parts, passing them info from
        # ReportStatus. Parts should return any reporting info for PostConfigure
        completed_steps = 0
        self.breakpoint_index = 0
        steps_to_do = self.steps_per_run[self.breakpoint_index]
        part_info = self.run_hooks(
            ConfigureHook(p, c, completed_steps, steps_to_do, part_info, **kw)
            for p, c, kw in self._part_params()
        )
        # Take configuration info and reflect it as attribute updates
        self.run_hooks(
            PostConfigureHook(p, c, part_info) for p, c in self.part_contexts.items()
        )
        # Update the completed and configured steps
        self.configured_steps.set_value(steps_to_do)
        self.completed_steps.meta.display.set_limitHigh(params.generator.size)
        # Reset the progress of all child parts
        self.progress_updates = {}
        self.resume_queue = Queue()

    @add_call_types
    def run(self) -> None:
        """Run a device where configure() has already be called

        Normally it will return in Ready state. If setup for multiple-runs with
        a single configure() then it will return in Armed state. If the user
        aborts then it will return in Aborted state. If something goes wrong it
        will return in Fault state. If the user disables then it will return in
        Disabled state.
        """

        if self.configured_steps.value < self.total_steps.value:
            next_state = ss.ARMED
        else:
            next_state = ss.FINISHED
        try:
            self.transition(ss.RUNNING)
            hook = RunHook
            going = True
            while going:
                try:
                    self.do_run(hook)
                    self.abortable_transition(next_state)
                except AbortedError:
                    assert self.abort_queue, "No abort queue"
                    self.abort_queue.put(None)
                    # Wait for a response on the resume_queue
                    assert self.resume_queue, "No resume queue"
                    should_resume = self.resume_queue.get()
                    if should_resume:
                        # we need to resume
                        self.log.debug("Resuming run")
                    else:
                        # we don't need to resume, just drop out
                        raise
                else:
                    going = False
        except AbortedError:
            raise
        except Exception as e:
            self.go_to_error_state(e)
            raise

    def do_run(self, hook):
        # type: (Type[ControllerHook]) -> None

        # Run all PreRunHooks
        self.run_hooks(PreRunHook(p, c) for p, c in self.part_contexts.items())

        self.run_hooks(hook(p, c) for p, c in self.part_contexts.items())
        self.abortable_transition(ss.POSTRUN)
        completed_steps = self.configured_steps.value
        if completed_steps < self.total_steps.value:
            if self.use_breakpoints:
                self.breakpoint_index += 1
            steps_to_do = self.steps_per_run[self.breakpoint_index]
            part_info = self.run_hooks(
                ReportStatusHook(p, c) for p, c in self.part_contexts.items()
            )
            self.completed_steps.set_value(completed_steps)
            self.run_hooks(
                PostRunArmedHook(
                    p, c, completed_steps, steps_to_do, part_info, **kwargs
                )
                for p, c, kwargs in self._part_params()
            )
            self.configured_steps.set_value(completed_steps + steps_to_do)
        else:
            self.completed_steps.set_value(completed_steps)
            self.run_hooks(
                PostRunReadyHook(p, c) for p, c in self.part_contexts.items()
            )

    def update_completed_steps(
        self, part: Part, completed_steps: RunProgressInfo
    ) -> None:
        with self._lock:
            # Update
            assert self.progress_updates is not None, "No progress updates"
            self.progress_updates[part] = completed_steps.steps
            min_completed_steps = min(self.progress_updates.values())
            if min_completed_steps > self.completed_steps.value:
                self.completed_steps.set_value(min_completed_steps)

    @add_call_types
    def abort(self) -> None:
        """Abort the current operation and block until aborted

        Normally it will return in Aborted state. If something goes wrong it
        will return in Fault state. If the user disables then it will return in
        Disabled state.
        """
        self.try_aborting_function(ss.ABORTING, ss.ABORTED, self.do_abort)
        # Tell _call_do_run not to resume
        if self.resume_queue:
            self.resume_queue.put(False)

    def do_abort(self) -> None:
        self.run_hooks(AbortHook(p, c) for p, c in self.create_part_contexts().items())

    def try_aborting_function(
        self, start_state: str, end_state: str, func: Callable[..., None], *args: Any
    ) -> None:
        try:
            # To make the running function fail we need to stop any running
            # contexts (if running a hook) or make transition() fail with
            # AbortedError. Both of these are accomplished here
            with self._lock:
                original_state = self.state.value
                self.abort_queue = Queue()
                self.transition(start_state)
                for context in self.part_contexts.values():
                    context.stop()
            if original_state not in (ss.READY, ss.ARMED, ss.PAUSED, ss.FINISHED):
                # Something was running, let it finish aborting
                try:
                    self.abort_queue.get(timeout=DEFAULT_TIMEOUT)
                except TimeoutError:
                    self.log.warning("Timeout waiting while {start_state}")
            with self._lock:
                # Now we've waited for a while we can remove the error state
                # for transition in case a hook triggered it rather than a
                # transition
                self_ctx = self.part_contexts.get(self, None)
                if self_ctx:
                    self_ctx.ignore_stops_before_now()
            func(*args)
            self.abortable_transition(end_state)
        except AbortedError:
            assert self.abort_queue, "No abort queue"
            self.abort_queue.put(None)
            raise
        except Exception as e:  # pylint:disable=broad-except
            self.go_to_error_state(e)
            raise

    # Allow camelCase as this will be serialized
    # noinspection PyPep8Naming
    @add_call_types
    def pause(self, lastGoodStep: ALastGoodStep = -1) -> None:
        """Pause a run() so that resume() can be called later, or seek within
        an Armed or Paused state.

        The original call to run() will not be interrupted by pause(), it will
        wait until the scan completes or is aborted.

        Normally it will return in Paused state. If the scan is finished it
        will return in Finished state. If the scan is armed it will return in
        Armed state. If the user aborts then it will return in Aborted state.
        If something goes wrong it will return in Fault state. If the user
        disables then it will return in Disabled state.
        """

        total_steps = self.total_steps.value

        # We need to decide where to go
        if lastGoodStep < 0:
            # If we are finished we do not need to do anything
            if self.state.value is ss.FINISHED:
                return
            # Otherwise set to number of completed steps
            else:
                lastGoodStep = self.completed_steps.value
        # Otherwise make sure we are bound to the total steps of the scan
        elif lastGoodStep >= total_steps:
            lastGoodStep = total_steps - 1

        if self.state.value in [ss.ARMED, ss.FINISHED]:
            # We don't have a run process, free to go anywhere we want
            next_state = ss.ARMED
        else:
            # Need to pause within the bounds of the current run
            if lastGoodStep == self.configured_steps.value:
                lastGoodStep -= 1
            next_state = ss.PAUSED

        self.try_aborting_function(ss.SEEKING, next_state, self.do_pause, lastGoodStep)

    def do_pause(self, completed_steps: int) -> None:
        """Recalculates the number of configured steps
        Arguments:
        completed_steps -- Last good step performed
        """
        self.run_hooks(PauseHook(p, c) for p, c in self.create_part_contexts().items())

        if self.use_breakpoints:
            self.breakpoint_index = self.get_breakpoint_index(completed_steps)
            in_run_steps = (
                completed_steps % self.breakpoint_steps[self.breakpoint_index]
            )
            steps_to_do = self.breakpoint_steps[self.breakpoint_index] - in_run_steps
        else:
            in_run_steps = completed_steps % self.steps_per_run[self.breakpoint_index]
            steps_to_do = self.steps_per_run[self.breakpoint_index] - in_run_steps

        part_info = self.run_hooks(
            ReportStatusHook(p, c) for p, c in self.part_contexts.items()
        )
        self.completed_steps.set_value(completed_steps)
        self.run_hooks(
            SeekHook(p, c, completed_steps, steps_to_do, part_info, **kwargs)
            for p, c, kwargs in self._part_params()
        )
        self.configured_steps.set_value(completed_steps + steps_to_do)

    def get_breakpoint_index(self, completed_steps: int) -> int:
        # If the last point, then return the last index
        if completed_steps == self.breakpoint_steps[-1]:
            return len(self.breakpoint_steps) - 1
        # Otherwise check which index we fall within
        index = 0
        while completed_steps >= self.breakpoint_steps[index]:
            index += 1
        return index

    @add_call_types
    def resume(self) -> None:
        """Resume a paused scan.

        Normally it will return in Running state. If something goes wrong it
        will return in Fault state.
        """
        self.transition(ss.RUNNING)
        assert self.resume_queue, "No resume queue"
        self.resume_queue.put(True)
        # self.run will now take over

    def do_disable(self) -> None:
        # Abort anything that is currently running, but don't wait
        for context in self.part_contexts.values():
            context.stop()
        if self.resume_queue:
            self.resume_queue.put(False)
        super().do_disable()

    def go_to_error_state(self, exception):
        if self.resume_queue:
            self.resume_queue.put(False)
        super().go_to_error_state(exception)
Beispiel #27
0
class PandAManagerController(builtin.controllers.ManagerController):
    def __init__(
        self,
        mri: AMri,
        config_dir: AConfigDir,
        hostname: AHostname = "localhost",
        port: APort = 8888,
        doc_url_base: ADocUrlBase = DOC_URL_BASE,
        poll_period: APollPeriod = 0.1,
        template_designs: ATemplateDesigns = "",
        initial_design: AInitialDesign = "",
        use_git: AUseGit = True,
        description: ADescription = "",
    ) -> None:
        super().__init__(
            mri=mri,
            config_dir=config_dir,
            template_designs=template_designs,
            initial_design=initial_design,
            use_git=use_git,
            description=description,
        )
        self._poll_period = poll_period
        self._doc_url_base = doc_url_base
        # All the bit_out fields and their values
        # {block_name.field_name: value}
        self._bit_outs: Dict[str, bool] = {}
        # The bit_out field values that need toggling since the last handle
        # {block_name.field_name: value}
        self._bit_out_changes: Dict[str, bool] = {}
        # The fields that busses needs to know about
        # {block_name.field_name[.subfield_name]}
        self._bus_fields: Set[str] = set()
        # The child controllers we have created
        self._child_controllers: Dict[str, PandABlockController] = {}
        # The PandABlock client that does the comms
        self._client = PandABlocksClient(hostname, port, Queue)
        # Filled in on reset
        self._stop_queue = None
        self._poll_spawned = None
        # Poll period reporting
        self.last_poll_period = NumberMeta(
            "float64",
            "The time between the last 2 polls of the hardware",
            tags=[Widget.TEXTUPDATE.tag()],
            display=Display(units="s", precision=3),
        ).create_attribute_model(poll_period)
        self.field_registry.add_attribute_model("lastPollPeriod",
                                                self.last_poll_period)
        # Bus tables
        self.busses: PandABussesPart = self._make_busses()
        self.add_part(self.busses)

    def do_init(self):
        # start the poll loop and make block parts first to fill in our parts
        # before calling _set_block_children()
        self.start_poll_loop()
        super().do_init()

    def start_poll_loop(self):
        # queue to listen for stop events
        if not self._client.started:
            self._stop_queue = Queue()
            if self._client.started:
                self._client.stop()
            self._client.start(self.process.spawn, socket)
        if not self._child_controllers:
            self._make_child_controllers()
        if self._poll_spawned is None:
            self._poll_spawned = self.process.spawn(self._poll_loop)

    def do_disable(self):
        super().do_disable()
        self.stop_poll_loop()

    def do_reset(self):
        self.start_poll_loop()
        super().do_reset()

    def _poll_loop(self):
        """At self.poll_period poll for changes"""
        last_poll_update = time.time()
        next_poll = time.time() + self._poll_period
        try:
            while True:
                # Need to make sure we don't consume all the CPU, allow us to be
                # active for 50% of the poll period, so we must sleep at least
                # 50% of the poll period
                min_sleep = self._poll_period * 0.5
                sleep_for = next_poll - time.time()
                if sleep_for < min_sleep:
                    # Going too fast, slow down a bit
                    last_poll_period = self._poll_period + min_sleep - sleep_for
                    sleep_for = min_sleep
                else:
                    last_poll_period = self._poll_period
                try:
                    # If told to stop, we will get something here and return
                    return self._stop_queue.get(timeout=sleep_for)
                except TimeoutError:
                    # No stop, no problem
                    pass
                # Poll for changes
                self.handle_changes(self._client.get_changes())
                if (last_poll_period != self.last_poll_period.value
                        and next_poll - last_poll_update > POLL_PERIOD_REPORT):
                    self.last_poll_period.set_value(last_poll_period)
                    last_poll_update = next_poll
                next_poll += last_poll_period
        except Exception as e:
            self.go_to_error_state(e)
            raise

    def stop_poll_loop(self):
        if self._poll_spawned:
            self._stop_queue.put(None)
            self._poll_spawned.wait()
            self._poll_spawned = None
        if self._client.started:
            self._client.stop()

    def _make_child_controllers(self):
        self._child_controllers = {}
        controllers = []
        child_parts = []
        pos_names = []
        blocks_data = self._client.get_blocks_data()
        for block_rootname, block_data in blocks_data.items():
            block_names = []
            if block_data.number == 1:
                block_names.append(block_rootname)
            else:
                for i in range(block_data.number):
                    block_names.append("%s%d" % (block_rootname, i + 1))
            for block_name in block_names:
                # Look through the BlockData for things we are interested in
                for field_name, field_data in block_data.fields.items():
                    if field_data.field_type == "pos_out":
                        pos_names.append("%s.%s" % (block_name, field_name))

                # Make the child controller and add it to the process
                controller, child_part = self._make_child_block(
                    block_name, block_data)
                controllers += [controller]
                child_parts += [child_part]
                self._child_controllers[block_name] = controller
                # If there is only one, make an alias with "1" appended for
                # *METADATA.LABEL lookup
                if block_data.number == 1:
                    self._child_controllers[block_name + "1"] = controller

        self.process.add_controllers(controllers)
        for part in child_parts:
            self.add_part(part)

        # Create the busses from their initial sets of values
        pcap_bit_fields = self._client.get_pcap_bits_fields()
        self.busses.create_busses(pcap_bit_fields, pos_names)
        # Handle the pos_names that busses needs
        self._bus_fields = set(pos_names)
        for pos_name in pos_names:
            for suffix in ("CAPTURE", "UNITS", "SCALE", "OFFSET"):
                self._bus_fields.add("%s.%s" % (pos_name, suffix))
        # Handle the bit_outs, keeping a list for toggling and adding them
        # to the set of things that the busses need
        self._bit_outs = {k: 0 for k in self.busses.bits.value.name}
        self._bit_out_changes = {}
        self._bus_fields |= set(self._bit_outs)
        for capture_field in pcap_bit_fields:
            self._bus_fields.add(capture_field)
        # Handle the initial set of changes to get an initial value
        self.handle_changes(self._client.get_changes())
        # Then once more to let bit_outs toggle back
        self.handle_changes(())
        assert not self._bit_out_changes, (
            "There are still bit_out changes %s" % self._bit_out_changes)

    def _make_busses(self) -> PandABussesPart:
        return PandABussesPart("busses", self._client)

    def _make_child_block(self, block_name, block_data):
        controller = PandABlockController(self._client, self.mri, block_name,
                                          block_data, self._doc_url_base)
        if block_name == "PCAP":
            controller.add_part(
                PandAActionPart(self._client, "*PCAP", "ARM",
                                "Arm position capture", []))
            controller.add_part(
                PandAActionPart(self._client, "*PCAP", "DISARM",
                                "Disarm position capture", []))
        child_part = builtin.parts.ChildPart(name=block_name,
                                             mri=controller.mri,
                                             stateful=False)
        return controller, child_part

    def _handle_change(self, k, v, bus_changes, block_changes,
                       bit_out_changes):
        # Handle bit changes
        try:
            current_v = self._bit_outs[k]
        except KeyError:
            # Not a bit
            pass
        else:
            # Convert to a boolean
            v = bool(int(v))
            try:
                changed_to = bit_out_changes[k]
            except KeyError:
                # We didn't already make a change
                if v == current_v:
                    # Value is the same, store the negation, and set it
                    # back next time
                    self._bit_out_changes[k] = v
                    v = not v
            else:
                # Already made a change, defer this value til next time
                # if it is different
                if changed_to != v:
                    self._bit_out_changes[k] = v
                return
            self._bit_outs[k] = v

        # Notify the bus tables if they need to know
        if k in self._bus_fields:
            bus_changes[k] = v

        # Add to the relevant Block changes dict
        block_name, field_name = k.split(".", 1)
        if block_name == "*METADATA":
            if field_name.startswith("LABEL_"):
                field_name, block_name = field_name.split("_", 1)
            else:
                # Don't support any non-label metadata fields at the moment
                return
        block_changes.setdefault(block_name, {})[field_name] = v

    def handle_changes(self, changes: Sequence[Tuple[str, str]]) -> None:
        ts = TimeStamp()
        # {block_name: {field_name: field_value}}
        block_changes: Dict[str, Any] = {}
        # {full_field: field_value}
        bus_changes = {}

        # Process bit outs that need changing
        bit_out_changes = self._bit_out_changes
        self._bit_out_changes = {}
        for k, v in bit_out_changes.items():
            self._bit_outs[k] = v
            bus_changes[k] = v
            block_name, field_name = k.split(".")
            block_changes.setdefault(block_name, {})[field_name] = v

        # Work out which change is needed for which block
        for key, value in changes:
            self._handle_change(key, value, bus_changes, block_changes,
                                bit_out_changes)

        # Notify the Blocks that they need to handle these changes
        if bus_changes:
            self.busses.handle_changes(bus_changes, ts)
        for block_name, block_changes_values in block_changes.items():
            self._child_controllers[block_name].handle_changes(
                block_changes_values, ts)
class RunnableController(ManagerController):
    """RunnableDevice implementer that also exposes GUI for child parts"""
    # The stateSet that this controller implements
    stateSet = ss()

    Validate = Hook()
    """Called at validate() to check parameters are valid

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        part_info (dict): {part_name: [Info]} returned from ReportStatus
        params (Map): Any configuration parameters asked for by part validate()
            method_takes() decorator

    Returns:
        [`ParameterTweakInfo`] - any parameters tweaks that have occurred
            to make them compatible with this part. If any are returned,
            Validate will be re-run with the modified parameters.
    """

    ReportStatus = Hook()
    """Called before Validate, Configure, PostRunArmed and Seek hooks to report
    the current configuration of all parts

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks

    Returns:
        [`Info`] - any configuration Info objects relevant to other parts
    """

    Configure = Hook()
    """Called at configure() to configure child block for a run

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        completed_steps (int): Number of steps already completed
        steps_to_do (int): Number of steps we should configure for
        part_info (dict): {part_name: [Info]} returned from ReportStatus
        params (Map): Any configuration parameters asked for by part configure()
            method_takes() decorator

    Returns:
        [`Info`] - any Info objects that need to be passed to other parts for
            storing in attributes
    """

    PostConfigure = Hook()
    """Called at the end of configure() to store configuration info calculated
     in the Configure hook

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        part_info (dict): {part_name: [Info]} returned from Configure hook
    """

    Run = Hook()
    """Called at run() to start the configured steps running

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        update_completed_steps (callable): If part can report progress, this
            part should call update_completed_steps(completed_steps, self) with
            the integer step value each time progress is updated
    """

    PostRunArmed = Hook()
    """Called at the end of run() when there are more steps to be run

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        completed_steps (int): Number of steps already completed
        steps_to_do (int): Number of steps we should configure for
        part_info (dict): {part_name: [Info]} returned from ReportStatus
        params (Map): Any configuration parameters asked for by part configure()
            method_takes() decorator
    """

    PostRunReady = Hook()
    """Called at the end of run() when there are no more steps to be run

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
    """

    Pause = Hook()
    """Called at pause() to pause the current scan before Seek is called

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
    """

    Seek = Hook()
    """Called at seek() or at the end of pause() to reconfigure for a different
    number of completed_steps

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        completed_steps (int): Number of steps already completed
        steps_to_do (int): Number of steps we should configure for
        part_info (dict): {part_name: [Info]} returned from ReportStatus
        params (Map): Any configuration parameters asked for by part configure()
            method_takes() decorator
    """

    Resume = Hook()
    """Called at resume() to continue a paused scan

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
        update_completed_steps (callable): If part can report progress, this
            part should call update_completed_steps(completed_steps, self) with
            the integer step value each time progress is updated
    """

    Abort = Hook()
    """Called at abort() to stop the current scan

    Args:
        context (Context): The context that should be used to perform operations
            on child blocks
    """

    # Attributes
    completed_steps = None
    configured_steps = None
    total_steps = None
    axes_to_move = None

    # Params passed to configure()
    configure_params = None
    
    # Shared contexts between Configure, Run, Pause, Seek, Resume
    part_contexts = None

    # Configure method_models
    # {part: configure_method_model}
    configure_method_models = None

    # Stored for pause
    steps_per_run = 0

    # Progress reporting dict
    # {part: completed_steps for that part}
    progress_updates = None

    # Queue so that do_run can wait to see why it was aborted and resume if
    # needed
    resume_queue = None

    # Queue so we can wait for aborts to complete
    abort_queue = None

    @method_writeable_in(ss.FAULT, ss.DISABLED, ss.ABORTED, ss.ARMED)
    def reset(self):
        # Override reset to work from aborted too
        super(RunnableController, self).reset()

    def create_attribute_models(self):
        for data in super(RunnableController, self).create_attribute_models():
            yield data
        # Create sometimes writeable attribute for the current completed scan
        # step
        completed_steps_meta = NumberMeta(
            "int32", "Readback of number of scan steps",
            tags=[widget("textinput")])
        completed_steps_meta.set_writeable_in(ss.PAUSED, ss.ARMED)
        self.completed_steps = completed_steps_meta.create_attribute_model(0)
        yield "completedSteps", self.completed_steps, self.set_completed_steps
        # Create read-only attribute for the number of configured scan steps
        configured_steps_meta = NumberMeta(
            "int32", "Number of steps currently configured",
            tags=[widget("textupdate")])
        self.configured_steps = configured_steps_meta.create_attribute_model(0)
        yield "configuredSteps", self.configured_steps, None
        # Create read-only attribute for the total number scan steps
        total_steps_meta = NumberMeta(
            "int32", "Readback of number of scan steps",
            tags=[widget("textupdate")])
        self.total_steps = total_steps_meta.create_attribute_model(0)
        yield "totalSteps", self.total_steps, None
        # Create sometimes writeable attribute for the default axis names
        axes_to_move_meta = StringArrayMeta(
            "Default axis names to scan for configure()",
            tags=[widget("table"), config()])
        axes_to_move_meta.set_writeable_in(ss.READY)
        self.axes_to_move = axes_to_move_meta.create_attribute_model(
            self.params.axesToMove)
        yield "axesToMove", self.axes_to_move, self.set_axes_to_move

    def do_init(self):
        self.part_contexts = {}
        # Populate configure args from any child method hooked to Configure.
        # If we have runnablechildparts, they will call update_configure_args
        # during do_init
        self.configure_method_models = {}
        # Look for all parts that hook into Configure
        for part, func_name in self._hooked_func_names[self.Configure].items():
            if func_name in part.method_models:
                self.update_configure_args(part, part.method_models[func_name])
        super(RunnableController, self).do_init()

    def do_reset(self):
        super(RunnableController, self).do_reset()
        self.configured_steps.set_value(0)
        self.completed_steps.set_value(0)
        self.total_steps.set_value(0)

    def update_configure_args(self, part, configure_model):
        """Tell controller part needs different things passed to Configure"""
        with self.changes_squashed:
            # Update the dict
            self.configure_method_models[part] = configure_model
            method_models = list(self.configure_method_models.values())

            # Update takes with the things we need
            default_configure = MethodModel.from_dict(
                RunnableController.configure.MethodModel.to_dict())
            default_configure.defaults["axesToMove"] = self.axes_to_move.value
            method_models.append(default_configure)

            # Decorate validate and configure with the sum of its parts
            self._block.validate.recreate_from_others(method_models)
            self._block.validate.set_returns(self._block.validate.takes)
            self._block.configure.recreate_from_others(method_models)

    def set_axes_to_move(self, value):
        self.axes_to_move.set_value(value)

    @method_takes(*configure_args)
    @method_returns(*validate_args)
    def validate(self, params, returns):
        """Validate configuration parameters and return validated parameters.

        Doesn't take device state into account so can be run in any state
        """
        iterations = 10
        # Make some tasks just for validate
        part_contexts = self.create_part_contexts()
        # Get any status from all parts
        status_part_info = self.run_hook(self.ReportStatus, part_contexts)
        while iterations > 0:
            # Try up to 10 times to get a valid set of parameters
            iterations -= 1
            # Validate the params with all the parts
            validate_part_info = self.run_hook(
                self.Validate, part_contexts, status_part_info, **params)
            tweaks = ParameterTweakInfo.filter_values(validate_part_info)
            if tweaks:
                for tweak in tweaks:
                    params[tweak.parameter] = tweak.value
                    self.log.debug(
                        "Tweaking %s to %s", tweak.parameter, tweak.value)
            else:
                # Consistent set, just return the params
                return params
        raise ValueError("Could not get a consistent set of parameters")

    def abortable_transition(self, state):
        with self._lock:
            # We might have been aborted just now, so this will fail
            # with an AbortedError if we were
            self.part_contexts[self].sleep(0)
            self.transition(state)

    @method_takes(*configure_args)
    @method_writeable_in(ss.READY)
    def configure(self, params):
        """Validate the params then configure the device ready for run().

        Try to prepare the device as much as possible so that run() is quick to
        start, this may involve potentially long running activities like moving
        motors.

        Normally it will return in Armed state. If the user aborts then it will
        return in Aborted state. If something goes wrong it will return in Fault
        state. If the user disables then it will return in Disabled state.
        """
        self.validate(params, params)
        try:
            self.transition(ss.CONFIGURING)
            self.do_configure(params)
            self.abortable_transition(ss.ARMED)
        except AbortedError:
            self.abort_queue.put(None)
            raise
        except Exception as e:
            self.go_to_error_state(e)
            raise

    def do_configure(self, params):
        # These are the part tasks that abort() and pause() will operate on
        self.part_contexts = self.create_part_contexts()
        # Tell these contexts to notify their parts that about things they
        # modify so it doesn't screw up the modified led
        for part, context in self.part_contexts.items():
            context.set_notify_dispatch_request(part.notify_dispatch_request)
        # So add one for ourself too so we can be aborted
        self.part_contexts[self] = Context(self.process)
        # Store the params for use in seek()
        self.configure_params = params
        # This will calculate what we need from the generator, possibly a long
        # call
        params.generator.prepare()
        # Set the steps attributes that we will do across many run() calls
        self.total_steps.set_value(params.generator.size)
        self.completed_steps.set_value(0)
        self.configured_steps.set_value(0)
        # TODO: We can be cleverer about this and support a different number
        # of steps per run for each run by examining the generator structure
        self.steps_per_run = self._get_steps_per_run(
            params.generator, params.axesToMove)
        # Get any status from all parts
        part_info = self.run_hook(self.ReportStatus, self.part_contexts)
        # Run the configure command on all parts, passing them info from
        # ReportStatus. Parts should return any reporting info for PostConfigure
        completed_steps = 0
        steps_to_do = self.steps_per_run
        part_info = self.run_hook(
            self.Configure, self.part_contexts, completed_steps, steps_to_do,
            part_info, **self.configure_params)
        # Take configuration info and reflect it as attribute updates
        self.run_hook(self.PostConfigure, self.part_contexts, part_info)
        # Update the completed and configured steps
        self.configured_steps.set_value(steps_to_do)
        # Reset the progress of all child parts
        self.progress_updates = {}
        self.resume_queue = Queue()

    def _get_steps_per_run(self, generator, axes_to_move):
        steps = 1
        axes_set = set(axes_to_move)
        for dim in reversed(generator.dimensions):
            # If the axes_set is empty then we are done
            if not axes_set:
                break
            # Consume the axes that this generator scans
            for axis in dim.axes:
                assert axis in axes_set, \
                    "Axis %s is not in %s" % (axis, axes_to_move)
                axes_set.remove(axis)
            # Now multiply by the dimensions to get the number of steps
            steps *= dim.size
        return steps

    @method_writeable_in(ss.ARMED)
    def run(self):
        """Run a device where configure() has already be called

        Normally it will return in Ready state. If setup for multiple-runs with
        a single configure() then it will return in Armed state. If the user
        aborts then it will return in Aborted state. If something goes wrong it
        will return in Fault state. If the user disables then it will return in
        Disabled state.
        """
        if self.configured_steps.value < self.total_steps.value:
            next_state = ss.ARMED
        else:
            next_state = ss.READY
        try:
            self.transition(ss.RUNNING)
            hook = self.Run
            going = True
            while going:
                try:
                    self.do_run(hook)
                except AbortedError:
                    self.abort_queue.put(None)
                    # Wait for a response on the resume_queue
                    should_resume = self.resume_queue.get()
                    if should_resume:
                        # we need to resume
                        hook = self.Resume
                        self.log.debug("Resuming run")
                    else:
                        # we don't need to resume, just drop out
                        raise
                else:
                    going = False
            self.abortable_transition(next_state)
        except AbortedError:
            raise
        except Exception as e:
            self.go_to_error_state(e)
            raise

    def do_run(self, hook):
        self.run_hook(hook, self.part_contexts, self.update_completed_steps)
        self.abortable_transition(ss.POSTRUN)
        completed_steps = self.configured_steps.value
        if completed_steps < self.total_steps.value:
            steps_to_do = self.steps_per_run
            part_info = self.run_hook(self.ReportStatus, self.part_contexts)
            self.completed_steps.set_value(completed_steps)
            self.run_hook(
                self.PostRunArmed, self.part_contexts, completed_steps,
                steps_to_do, part_info, **self.configure_params)
            self.configured_steps.set_value(completed_steps + steps_to_do)
        else:
            self.run_hook(self.PostRunReady, self.part_contexts)

    def update_completed_steps(self, completed_steps, part):
        with self._lock:
            # Update
            self.progress_updates[part] = completed_steps
            min_completed_steps = min(self.progress_updates.values())
            if min_completed_steps > self.completed_steps.value:
                self.completed_steps.set_value(min_completed_steps)

    @method_writeable_in(
        ss.READY, ss.CONFIGURING, ss.ARMED, ss.RUNNING, ss.POSTRUN, ss.PAUSED,
        ss.SEEKING)
    def abort(self):
        """Abort the current operation and block until aborted

        Normally it will return in Aborted state. If something goes wrong it
        will return in Fault state. If the user disables then it will return in
        Disabled state.
        """
        # Tell _call_do_run not to resume
        if self.resume_queue:
            self.resume_queue.put(False)
        self.try_aborting_function(ss.ABORTING, ss.ABORTED, self.do_abort)

    def do_abort(self):
        self.run_hook(self.Abort, self.create_part_contexts())

    def try_aborting_function(self, start_state, end_state, func, *args):
        try:
            # To make the running function fail we need to stop any running
            # contexts (if running a hook) or make transition() fail with
            # AbortedError. Both of these are accomplished here
            with self._lock:
                original_state = self.state.value
                self.abort_queue = Queue()
                self.transition(start_state)
                for context in self.part_contexts.values():
                    context.stop()
            if original_state not in (ss.READY, ss.ARMED, ss.PAUSED):
                # Something was running, let it finish aborting
                try:
                    self.abort_queue.get(timeout=ABORT_TIMEOUT)
                except TimeoutError:
                    self.log.warning("Timeout waiting while %s" % start_state)
            with self._lock:
                # Now we've waited for a while we can remove the error state
                # for transition in case a hook triggered it rather than a
                # transition
                self.part_contexts[self].ignore_stops_before_now()
            func(*args)
            self.abortable_transition(end_state)
        except AbortedError:
            self.abort_queue.put(None)
            raise
        except Exception as e:  # pylint:disable=broad-except
            self.go_to_error_state(e)
            raise

    def set_completed_steps(self, completed_steps):
        """Seek a Armed or Paused scan back to another value

        Normally it will return in the state it started in. If the user aborts
        then it will return in Aborted state. If something goes wrong it will
        return in Fault state. If the user disables then it will return in
        Disabled state.
        """
        call_with_params(self.pause, completedSteps=completed_steps)

    @method_writeable_in(ss.ARMED, ss.PAUSED, ss.RUNNING)
    @method_takes("completedSteps", NumberMeta(
        "int32", "Step to mark as the last completed step, -1 for current"), -1)
    def pause(self, params):
        """Pause a run() so that resume() can be called later.

        The original call to run() will not be interrupted by pause(), it will
        with until the scan completes or is aborted.

        Normally it will return in Paused state. If the user aborts then it will
        return in Aborted state. If something goes wrong it will return in Fault
        state. If the user disables then it will return in Disabled state.
        """
        current_state = self.state.value
        if params.completedSteps < 0:
            completed_steps = self.completed_steps.value
        else:
            completed_steps = params.completedSteps
        if current_state == ss.RUNNING:
            next_state = ss.PAUSED
        else:
            next_state = current_state
        assert completed_steps < self.total_steps.value, \
            "Cannot seek to after the end of the scan"
        self.try_aborting_function(
            ss.SEEKING, next_state, self.do_pause, completed_steps)

    def do_pause(self, completed_steps):
        self.run_hook(self.Pause, self.create_part_contexts())
        in_run_steps = completed_steps % self.steps_per_run
        steps_to_do = self.steps_per_run - in_run_steps
        part_info = self.run_hook(self.ReportStatus, self.part_contexts)
        self.completed_steps.set_value(completed_steps)
        self.run_hook(
            self.Seek, self.part_contexts, completed_steps,
            steps_to_do, part_info, **self.configure_params)
        self.configured_steps.set_value(completed_steps + steps_to_do)

    @method_writeable_in(ss.PAUSED)
    def resume(self):
        """Resume a paused scan.

        Normally it will return in Running state. If something goes wrong it
        will return in Fault state.
        """
        self.transition(ss.RUNNING)
        self.resume_queue.put(True)
        # self.run will now take over

    def do_disable(self):
        # Abort anything that is currently running, but don't wait
        for context in self.part_contexts.values():
            context.stop()
        if self.resume_queue:
            self.resume_queue.put(False)
        super(RunnableController, self).do_disable()
Beispiel #29
0
class PvaClientComms(ClientComms):
    """A class for a client to communicate with the server"""

    use_cothread = False
    _monitors = None
    _send_queue = None

    def do_init(self):
        super(PvaClientComms, self).do_init()
        self._monitors = {}
        self._send_queue = Queue()

    def send_to_server(self, request):
        """Dispatch a request to the server

        Args:
            request(Request): The message to pass to the server
        """
        self._send_queue.put(request)
        self.spawn(self._send_to_server)

    def _send_to_server(self):
        request = self._send_queue.get(timeout=0)
        try:
            request = deserialize_object(request, Request)
            response = None
            if isinstance(request, Get):
                response = self._execute_get(request)
            elif isinstance(request, Put):
                response = self._execute_put(request)
            elif isinstance(request, Post):
                response = self._execute_rpc(request)
            elif isinstance(request, Subscribe):
                self._execute_monitor(request)
            elif isinstance(request, Unsubscribe):
                response = self._execute_unsubscribe(request)
            else:
                raise UnexpectedError("Unexpected request %s", request)
        except Exception as e:
            _, response = request.error_response(e)
        if response:
            request.callback(response)

    def _response_from_dict(self, request, d):
        if d.get("typeid", "") == Error.typeid:
            response = Error(request.id, d["message"])
        else:
            response = Return(request.id, d)
        return response

    def _execute_get(self, request):
        path = ".".join(request.path[1:])
        channel = pvaccess.Channel(request.path[0])
        d = channel.get(path).toDict()
        response = self._response_from_dict(request, d)
        return response

    def _execute_put(self, request):
        path = ".".join(request.path[1:])
        channel = pvaccess.Channel(request.path[0])
        channel.put(request.value, path)
        response = Return(request.id)
        return response

    def _execute_monitor(self, request):
        # Connect to the channel
        path = ".".join(request.path[1:])
        channel = pvaccess.Channel(request.path[0])
        self._monitors[request.generate_key()] = channel

        # Store the connection within the monitor set
        def callback(value=None):
            # TODO: ordering is not maintained here...
            # TODO: should we strip_tuples here?
            d = value.toDict(True)
            if d.get("typeid", "") == Error.typeid:
                response = Error(request.id, d["message"])
                self._monitors.pop(request.generate_key())
                channel.unsubscribe("")
            else:
                # TODO: support Deltas properly
                if request.delta:
                    response = Delta(request.id, [[[], d]])
                else:
                    response = Update(request.id, d)
            request.callback(response)

        # Perform a subscribe, but it returns nothing
        channel.subscribe("sub", callback)
        channel.startMonitor(path)
        a = None
        return a

    def _execute_unsubscribe(self, request):
        channel = self._monitors.pop(request.generate_key())
        channel.unsubscribe("sub")
        response = Return(request.id)
        return response

    def _execute_rpc(self, request):
        method = pvaccess.PvObject({'method': pvaccess.STRING})
        method.set({'method': request.path[1]})
        # Connect to the channel and create the RPC client
        rpc = pvaccess.RpcClient(request.path[0], method)
        # Construct the pv object from the parameters
        params = dict_to_pv_object(request.parameters)
        # Call the method on the RPC object
        value = rpc.invoke(params)
        # Now create the Return object and populate it with the response
        d = strip_tuples(value.toDict(True))
        response = self._response_from_dict(request, d)
        return response
Beispiel #30
0
class WebsocketClientComms(ClientComms):
    """A class for a client to communicate with the server"""
    use_cothread = False
    # Attribute
    remote_blocks = None

    loop = None
    _conn = None
    _spawned = None
    _connected_queue = None
    # {new_id: (request, old_id}
    _request_lookup = None
    # {Subscribe.generator_key(): Subscribe}
    _subscription_keys = {}
    _next_id = 1

    def create_attribute_models(self):
        for y in super(WebsocketClientComms, self).create_attribute_models():
            yield y
        # Create read-only attribute for the remotely reachable blocks
        meta = StringArrayMeta("Remotely reachable blocks",
                               tags=[widget("table")])
        self.remote_blocks = meta.create_attribute_model()
        yield "remoteBlocks", self.remote_blocks, None

    def do_init(self):
        super(WebsocketClientComms, self).do_init()
        self.loop = IOLoop()
        self._request_lookup = {}
        self._subscription_keys = {}
        self._connected_queue = Queue()
        root_subscribe = Subscribe(id=0,
                                   path=[".", "blocks"],
                                   callback=self._update_remote_blocks)
        self._subscription_keys[root_subscribe.generate_key()] = root_subscribe
        self._request_lookup[0] = (root_subscribe, 0)
        self.start_io_loop()

    def _update_remote_blocks(self, response):
        response = deserialize_object(response, Update)
        # TODO: should we spawn here?
        self.remote_blocks.set_value(response.value)

    def start_io_loop(self):
        if self._spawned is None:
            self._conn = None
            self.loop.add_callback(self.recv_loop)
            self._spawned = self.spawn(self.loop.start)
            try:
                self._connected_queue.get(self.params.connectTimeout)
            except TimeoutError:
                self.stop_io_loop()
                raise

    def stop_io_loop(self):
        if self.loop:
            self.loop.stop()
            self._spawned.wait(timeout=10)
            self._spawned = None

    @gen.coroutine
    def recv_loop(self):
        url = "ws://%(hostname)s:%(port)d/ws" % self.params
        self._conn = yield websocket_connect(
            url, self.loop, connect_timeout=self.params.connectTimeout - 0.5)
        self._connected_queue.put(True)
        for request in self._subscription_keys.values():
            self._send_request(request)
        while True:
            message = yield self._conn.read_message()
            if message is None:
                for request, old_id in self._request_lookup.values():
                    if not isinstance(request, Subscribe):
                        # Respond with an error
                        response = Error(old_id, message="Server disconnected")
                        request.callback(response)
                self.spawn(self._report_fault)
                return
            self.on_message(message)

    def _report_fault(self):
        with self._lock:
            self.transition(self.stateSet.FAULT, "Server disconnected")
            self.stop_io_loop()

    def do_disable(self):
        super(WebsocketClientComms, self).do_disable()
        self.stop_io_loop()

    def do_reset(self):
        super(WebsocketClientComms, self).do_reset()
        self.start_io_loop()

    def on_message(self, message):
        """Pass response from server to process receive queue

        Args:
            message(str): Received message
        """
        try:
            self.log.debug("Got message %s", message)
            d = json_decode(message)
            response = deserialize_object(d, Response)
            if isinstance(response, (Return, Error)):
                request, old_id = self._request_lookup.pop(response.id)
                if request.generate_key() in self._subscription_keys:
                    self._subscription_keys.pop(request.generate_key())
            else:
                request, old_id = self._request_lookup[response.id]
            response.set_id(old_id)
            # TODO: should we spawn here?
            request.callback(response)
        except Exception:
            # If we don't catch the exception here, tornado will spew odd
            # error messages about 'HTTPRequest' object has no attribute 'path'
            self.log.exception("on_message(%r) failed", message)

    def send_to_server(self, request):
        """Dispatch a request to the server

        Args:
            request (Request): The message to pass to the server
        """
        self.loop.add_callback(self._send_to_server, request)

    def _send_to_server(self, request):
        if isinstance(request, Unsubscribe):
            # If we have an unsubscribe, send it with the same id as the
            # subscribe
            subscribe = self._subscription_keys.pop(request.generate_key())
            new_id = subscribe.id
        else:
            if isinstance(request, Subscribe):
                # If we have an subscribe, store it so we can look it up
                self._subscription_keys[request.generate_key()] = request
            new_id = self._next_id
            self._next_id += 1
            self._request_lookup[new_id] = (request, request.id)
        request.set_id(new_id)
        self._send_request(request)

    def _send_request(self, request):
        message = json_encode(request)
        self.log.debug("Sending message %s", message)
        self._conn.write_message(message)