class SwitchoverTest(MessagingHandler):
    def __init__(self, sender_host, primary_host, fallback_host, addr):
        super(SwitchoverTest, self).__init__()
        self.sender_host    = sender_host[0]
        self.primary_host   = primary_host[0]
        self.fallback_host  = fallback_host[0]
        self.sender_name    = sender_host[1]
        self.primary_name   = primary_host[1]
        self.fallback_name  = fallback_host[1]
        self.addr           = addr
        self.count          = 300

        # DISPATCH-2213 back off on logging.
        self.log_sends      = 100  # every 100th send
        self.log_recvs      = 100  # every 100th receive
        self.log_released   = 100  # every 100th sender released

        self.sender_conn    = None
        self.primary_conn   = None
        self.fallback_conn  = None
        self.primary_open   = False
        self.fallback_open  = False
        self.error          = None
        self.n_tx           = 0
        self.n_rx           = 0
        self.n_rel          = 0
        self.phase          = 0
        self.tx_seq         = 0
        self.local_rel      = 0

        self.log_prefix     = "FALLBACK_TEST %s" % self.addr
        self.logger = Logger("SwitchoverTest_%s" % addr, print_to_console=False)
        # Prepend a convenience SERVER line for scraper tool.
        # Then the logs from this test can be merged with the router logs in scraper.
        self.logger.log("SERVER (info) Container Name: %s" % self.addr)
        self.logger.log("%s SwitchoverTest sender:%s primary:%s fallback:%s" %
                        (self.log_prefix, self.sender_name, self.primary_name, self.fallback_name))

    def timeout(self):
        self.error = "Timeout Expired - n_tx=%d, n_rx=%d, n_rel=%d, phase=%d, local_rel=%d" % \
                     (self.n_tx, self.n_rx, self.n_rel, self.phase, self.local_rel)
        self.sender_conn.close()
        self.primary_conn.close()
        self.fallback_conn.close()

    def fail(self, error):
        self.error = error
        self.sender_conn.close()
        self.primary_conn.close()
        self.fallback_conn.close()
        self.timer.cancel()

    def on_start(self, event):
        self.timer              = event.reactor.schedule(TIMEOUT, TestTimeout(self))
        self.logger.log("%s Opening sender connection to %s" % (self.log_prefix, self.sender_name))
        self.sender_conn        = event.container.connect(self.sender_host)
        self.logger.log("%s Opening primary receiver connection to %s" % (self.log_prefix, self.primary_name))
        self.primary_conn       = event.container.connect(self.primary_host)
        self.logger.log("%s Opening fallback receiver connection to %s" % (self.log_prefix, self.fallback_name))
        self.fallback_conn      = event.container.connect(self.fallback_host)
        self.logger.log("%s Opening primary receiver to %s" % (self.log_prefix, self.primary_name))
        self.primary_receiver   = event.container.create_receiver(self.primary_conn, self.addr, name=(self.addr + "_primary_receiver"))
        self.logger.log("%s Opening fallback receiver to %s" % (self.log_prefix, self.fallback_name))
        self.fallback_receiver  = event.container.create_receiver(self.fallback_conn, self.addr, name=(self.addr + "fallback_receiver"))
        self.fallback_receiver.source.capabilities.put_object(symbol("qd.fallback"))

    def on_link_opened(self, event):
        receiver_event = False
        if event.receiver == self.primary_receiver:
            self.logger.log("%s Primary receiver opened" % self.log_prefix)
            self.primary_open = True
            receiver_event = True
        if event.receiver == self.fallback_receiver:
            self.logger.log("%s Fallback receiver opened" % self.log_prefix)
            self.fallback_open = True
            receiver_event = True
        if receiver_event and self.primary_open and self.fallback_open:
            self.logger.log("%s Opening sender to %s" % (self.log_prefix, self.sender_name))
            self.sender = event.container.create_sender(self.sender_conn, self.addr, name=(self.addr + "_sender"))

    def on_link_closed(self, event):
        if event.receiver == self.primary_receiver:
            self.logger.log("%s Primary receiver closed. Start phase 1 send" % self.log_prefix)
            self.n_rx = 0
            self.n_tx = 0
            self.send()

    def send(self):
        e_credit = self.sender.credit
        e_n_tx = self.n_tx
        e_tx_seq = self.tx_seq
        last_message = Message("None")
        while self.sender.credit > 0 and self.n_tx < self.count and not self.sender.drain_mode:
            last_message = Message("Msg %s %d %d" % (self.addr, self.tx_seq, self.n_tx))
            self.sender.send(last_message)
            self.n_tx += 1
            self.tx_seq += 1
        if self.sender.drain_mode:
            n_drained = self.sender.drained()
            self.logger.log("%s sender.drained() drained %d credits" % (self.log_prefix, n_drained))
        if self.n_tx > e_n_tx and self.n_tx % self.log_sends == 0:  # if sent then log every Nth message
            self.logger.log("%s send() exit: last sent '%s' phase=%d, credit=%3d->%3d, n_tx=%4d->%4d, tx_seq=%4d->%4d, n_rel=%4d" %
                            (self.log_prefix, last_message.body, self.phase, e_credit, self.sender.credit,
                             e_n_tx, self.n_tx, e_tx_seq, self.tx_seq, self.n_rel))

    def on_sendable(self, event):
        if event.sender == self.sender:
            self.send()
        else:
            self.fail("%s on_sendable event not from the only sender")

    def on_message(self, event):
        if event.receiver == self.primary_receiver:
            if self.phase == 0:
                self.n_rx += 1
                if self.n_rx % self.log_recvs == 0:
                    self.logger.log("%s Received phase 0 message '%s', n_rx=%d" %
                                    (self.log_prefix, event.message.body, self.n_rx))
                if self.n_rx == self.count:
                    self.logger.log("%s Triggering fallback by closing primary receiver on %s. Test phase 0->1." %
                                    (self.log_prefix, self.primary_name))
                    self.phase = 1
                    self.primary_receiver.close()
            else:
                # Phase 1 messages are unexpected on primary receiver
                self.logger.log("%s Phase %d message received on primary: '%s'" % (self.log_prefix, self.phase, event.message.body))
                self.fail("Receive phase1 message on primary receiver")
        elif event.receiver == self.fallback_receiver:
            if self.phase == 0:
                # Phase 0 message over fallback receiver. This may happen because
                # primary receiver is on a distant router and the fallback receiver is local.
                # Release the message to keep trying until the primary receiver kicks in.
                self.release(event.delivery)
                self.n_rel += 1
                self.n_tx -= 1
                self.local_rel += 1
                if self.local_rel % self.log_recvs == 0:
                    self.logger.log("%s Released phase 0 over fallback: msg:'%s', n_rx=%d, n_tx=%d, n_rel=%d, local_rel=%d" %
                                    (self.log_prefix, event.message.body, self.n_rx, self.n_tx, self.n_rel, self.local_rel))
                    time.sleep(0.02)
            else:
                self.n_rx += 1
                if self.n_rx % self.log_recvs == 0:
                    self.logger.log("%s Received phase 1 over fallback: msg:'%s', n_rx=%d" %
                                    (self.log_prefix, event.message.body, self.n_rx))
                if self.n_rx == self.count:
                    self.logger.log("%s Success" % self.log_prefix)
                    self.fail(None)
        else:
            self.fail("%s message received on unidentified receiver" % self.addr)

    def on_released(self, event):
        # event type pn_delivery for sender
        self.n_rel += 1
        self.n_tx  -= 1
        if self.n_rel % self.log_released == 0:
            self.logger.log("%s on_released: sender delivery was released. Adjusted counts: n_rel=%d, n_tx=%d" %
                            (self.log_prefix, self.n_rel, self.n_tx))
        if event.sender is None:
            self.fail("on_released event not related to sender")

    def run(self):
        Container(self).run()
        if self.error is not None:
            self.logger.dump()
Esempio n. 2
0
class WaypointTest(MessagingHandler):
    def __init__(self, first_host, second_host, first_address, second_address, container_id="ALC"):
        super(WaypointTest, self).__init__()
        self.first_host     = first_host
        self.second_host    = second_host
        self.first_address  = first_address
        self.second_address = second_address
        self.container_id   = container_id
        self.logger = Logger(title="WaypointTest")

        self.first_conn        = None
        self.second_conn       = None
        self.error             = None
        self.first_sender      = None
        self.first_sender_created = False
        self.first_sender_link_opened = False
        self.first_receiver    = None
        self.first_receiver_created    = False
        self.waypoint_sender   = None
        self.waypoint_receiver = None
        self.waypoint_queue    = []
        self.waypoint_sender_opened = False
        self.waypoint_receiver_opened = False
        self.firsts_created = False

        self.count  = 10
        self.n_sent = 0
        self.n_rcvd = 0
        self.n_waypoint_rcvd = 0
        self.n_thru = 0
        self.outs = None

    def timeout(self):
        self.error = "Timeout Expired: n_sent=%d n_rcvd=%d n_thru=%d n_waypoint_rcvd=%d" % (self.n_sent, self.n_rcvd, self.n_thru, self.n_waypoint_rcvd)
        self.first_conn.close()
        self.second_conn.close()
        self.logger.dump()

    def fail(self, text):
        self.error = text
        self.second_conn.close()
        self.first_conn.close()
        self.timer.cancel()
        self.outs = "n_sent=%d n_rcvd=%d n_thru=%d n_waypoint_rcvd=%d" % (self.n_sent, self.n_rcvd, self.n_thru, self.n_waypoint_rcvd)
        print (self.outs)

    def send_client(self):
        while self.first_sender.credit > 0 and self.n_sent < self.count:
            self.n_sent += 1
            m = Message(body="Message %d of %d" % (self.n_sent, self.count))
            self.first_sender.send(m)

    def send_waypoint(self):
        self.logger.log("send_waypoint called")
        while self.waypoint_sender.credit > 0 and len(self.waypoint_queue) > 0:
            self.n_thru += 1
            m = self.waypoint_queue.pop()
            self.waypoint_sender.send(m)
            self.logger.log("waypoint_sender message sent")
        else:
            self.logger.log("waypoint_sender did not sent - credit = %s, len(self.waypoint_queue) = %s" % (str(self.waypoint_sender.credit), str(len(self.waypoint_queue))))

    def on_start(self, event):
        self.timer       = event.reactor.schedule(TIMEOUT, TestTimeout(self))
        self.first_conn  = event.container.connect(self.first_host)
        self.second_conn = event.container.connect(self.second_host)

    def on_link_flow(self, event):
        if event.sender == self.waypoint_sender and self.first_sender_link_opened and not self.first_sender_created:
            self.first_sender_created = True
            self.first_sender = event.container.create_sender(self.first_conn, self.first_address)

    def on_link_opened(self, event):
        if event.receiver == self.waypoint_receiver and not self.first_sender_link_opened:
            self.first_sender_link_opened = True

    def on_link_opening(self, event):
        if event.sender and not self.waypoint_sender:
            self.waypoint_sender = event.sender
            if event.sender.remote_source.address == self.second_address:
                event.sender.source.address = self.second_address
                event.sender.open()
                self.waypoint_sender_opened = True
            else:
                self.fail("Incorrect address on incoming sender: got %s, expected %s" %
                          (event.sender.remote_source.address, self.second_address))

        elif event.receiver and not self.waypoint_receiver:
            self.waypoint_receiver = event.receiver
            if event.receiver.remote_target.address == self.second_address:
                event.receiver.target.address = self.second_address
                event.receiver.open()
                self.waypoint_receiver_opened = True
            else:
                self.fail("Incorrect address on incoming receiver: got %s, expected %s" %
                          (event.receiver.remote_target.address, self.second_address))

        if self.waypoint_sender_opened and self.waypoint_receiver_opened and not self.first_receiver_created:
            self.first_receiver_created = True
            self.first_receiver = event.container.create_receiver(self.first_conn, self.first_address)

    def on_sendable(self, event):
        if event.sender == self.first_sender:
            self.send_client()

    def on_message(self, event):
        if event.receiver == self.first_receiver:
            self.n_rcvd += 1
            if self.n_rcvd == self.count and self.n_thru == self.count:
                self.fail(None)
        elif event.receiver == self.waypoint_receiver:
            self.n_waypoint_rcvd += 1
            m = Message(body=event.message.body)
            self.waypoint_queue.append(m)
            self.send_waypoint()

    def run(self):
        container = Container(self)
        container.container_id = self.container_id
        container.run()
Esempio n. 3
0
class ThreadedTestClient(object):
    """
    An HTTP client running in a separate thread
    """

    def __init__(self, tests, port, repeat=1):
        self._id = uuid.uuid4().hex
        self._conn_addr = ("127.0.0.1:%s" % port)
        self._tests = tests
        self._repeat = repeat
        self._logger = Logger(title="TestClient: %s" % self._id,
                              print_to_console=False)
        self._thread = Thread(target=self._run)
        self._thread.daemon = True
        self.error = None
        self.count = 0
        self._thread.start()

    def _run(self):
        self._logger.log("TestClient connecting on %s" % self._conn_addr)
        client = HTTPConnection(self._conn_addr, timeout=TIMEOUT)
        self._logger.log("TestClient connected")
        for loop in range(self._repeat):
            self._logger.log("TestClient start request %d" % loop)
            for op, tests in self._tests.items():
                for req, _, val in tests:
                    self._logger.log("TestClient sending %s %s request" % (op, req.target))
                    req.send_request(client,
                                     {"test-echo": "%s-%s-%s-%s" % (self._id,
                                                                    loop,
                                                                    op,
                                                                    req.target)})
                    self._logger.log("TestClient getting %s response" % op)
                    try:
                        rsp = client.getresponse()
                    except HTTPException as exc:
                        self._logger.log("TestClient response failed: %s" % exc)
                        self.error = str(exc)
                        return
                    self._logger.log("TestClient response %s received" % op)
                    if val:
                        try:
                            body = val.check_response(rsp)
                        except Exception as exc:
                            self._logger.log("TestClient response invalid: %s"
                                             % str(exc))
                            self.error = "client failed: %s" % str(exc)
                            return

                        if req.method == "BODY" and body != b'':
                            self._logger.log("TestClient response invalid: %s"
                                             % "body present!")
                            self.error = "error: body present!"
                            return
                    self.count += 1
                    self._logger.log("TestClient request %s %s completed!" %
                                     (op, req.target))
        client.close()
        self._logger.log("TestClient to %s closed" % self._conn_addr)

    def wait(self, timeout=TIMEOUT):
        self._thread.join(timeout=TIMEOUT)
        self._logger.log("TestClient %s shut down" % self._conn_addr)
        sleep(0.5)  # fudge factor allow socket close to complete

    def dump_log(self):
        self._logger.dump()
class QdmanageTest(TestCase):
    """Test qdmanage tool output"""
    @staticmethod
    def ssl_file(name):
        return os.path.join(DIR, 'ssl_certs', name)

    @classmethod
    def setUpClass(cls):
        super(QdmanageTest, cls).setUpClass()
        cls.inter_router_port = cls.tester.get_port()
        config_1 = Qdrouterd.Config([
            ('router', {
                'mode': 'interior',
                'id': 'R1'
            }),
            ('sslProfile', {
                'name': 'server-ssl',
                'caCertFile': cls.ssl_file('ca-certificate.pem'),
                'certFile': cls.ssl_file('server-certificate.pem'),
                'privateKeyFile': cls.ssl_file('server-private-key.pem'),
                'password': '******'
            }), ('listener', {
                'port': cls.tester.get_port()
            }),
            ('connector', {
                'role': 'inter-router',
                'port': cls.inter_router_port
            }),
            ('address', {
                'name': 'test-address',
                'prefix': 'abcd',
                'distribution': 'multicast'
            }),
            ('linkRoute', {
                'name': 'test-link-route',
                'prefix': 'xyz',
                'direction': 'in'
            }),
            ('autoLink', {
                'name': 'test-auto-link',
                'address': 'mnop',
                'direction': 'out'
            }),
            ('listener', {
                'port': cls.tester.get_port(),
                'sslProfile': 'server-ssl'
            }),
            ('address', {
                'name': 'pattern-address',
                'pattern': 'a/*/b/#/c',
                'distribution': 'closest'
            })
        ])

        config_2 = Qdrouterd.Config([
            ('router', {
                'mode': 'interior',
                'id': 'R2'
            }),
            ('listener', {
                'role': 'inter-router',
                'port': cls.inter_router_port
            }),
        ])
        cls.router_2 = cls.tester.qdrouterd('test_router_2',
                                            config_2,
                                            wait=True)
        cls.router_1 = cls.tester.qdrouterd('test_router_1',
                                            config_1,
                                            wait=True)
        cls.router_1.wait_router_connected('R2')

    def address(self):
        return self.router_1.addresses[0]

    def run_qdmanage(self,
                     cmd,
                     input=None,
                     expect=Process.EXIT_OK,
                     address=None):
        p = self.popen(['qdmanage'] + cmd.split(' ') + [
            '--bus', address or self.address(), '--indent=-1', '--timeout',
            str(TIMEOUT)
        ],
                       stdin=PIPE,
                       stdout=PIPE,
                       stderr=STDOUT,
                       expect=expect,
                       universal_newlines=True)
        out = p.communicate(input)[0]
        try:
            p.teardown()
        except Exception as e:
            raise Exception(out if out else str(e))
        return out

    def assert_entity_equal(self, expect, actual, copy=None):
        """Copy keys in copy from actual to idenity, then assert maps equal."""
        if copy:
            for k in copy:
                expect[k] = actual[k]
        self.assertEqual(expect, actual)

    def assert_entities_equal(self, expect, actual, copy=None):
        """Do assert_entities_equal on a list of maps."""
        for e, a in zip(expect, actual):
            self.assert_entity_equal(e, a, copy)

    def test_crud(self):
        def check(cmd, expect, copy=None, **kwargs):
            actual = json.loads(self.run_qdmanage(cmd))
            self.assert_entity_equal(expect, actual, copy=copy)

        expect = {'arg1': 'foo', 'type': DUMMY, 'name': 'mydummy2'}
        # create with type, name in attributes
        check('create arg1=foo type=dummy name=mydummy2',
              expect,
              copy=['identity'],
              attributes=json.dumps(expect))
        # create with type, name as arguments
        expect['name'] = 'mydummy'
        check('create name=mydummy type=dummy arg1=foo',
              expect,
              copy=['identity'])
        check('read --name mydummy', expect)
        check('read --identity %s' % expect['identity'], expect)
        expect.update([], arg1='bar', num1=555)
        check('update name=mydummy arg1=bar num1=555', expect)
        check('read --name=mydummy', expect)
        expect.update([], arg1='xxx', num1=888)
        # name outside attributes
        check('update name=mydummy arg1=xxx num1=888', expect)
        check('read --name=mydummy', expect)
        self.run_qdmanage('delete --name mydummy')
        self.run_qdmanage('read --name=mydummy', expect=Process.EXIT_FAIL)

    def test_stdin(self):
        """Test piping from stdin"""
        def check(cmd, expect, input, copy=None):
            actual = json.loads(
                self.run_qdmanage(cmd + " --stdin", input=input))
            self.assert_entity_equal(expect, actual, copy=copy)

        def check_list(cmd, expect_list, input, copy=None):
            actual = json.loads(
                self.run_qdmanage(cmd + " --stdin", input=input))
            self.assert_entities_equal(expect_list, actual, copy=copy)

        expect = {'type': DUMMY, 'name': 'mydummyx', 'arg1': 'foo'}
        check('create', expect, json.dumps(expect), copy=['identity'])

        expect_list = [{
            'type': DUMMY,
            'name': 'mydummyx%s' % i
        } for i in range(3)]
        check_list('create',
                   expect_list,
                   json.dumps(expect_list),
                   copy=['identity'])

        expect['arg1'] = 'bar'
        expect['num1'] = 42
        check('update', expect, json.dumps(expect))

        for i in range(3):
            expect_list[i]['arg1'] = 'bar'
            expect_list[i]['num1'] = i
        check_list('update', expect_list, json.dumps(expect_list))

    def test_query(self):
        def long_type(name):
            return u'org.apache.qpid.dispatch.' + name

        types = ['listener', 'log', 'router']
        long_types = [long_type(name) for name in types]

        qall = json.loads(self.run_qdmanage('query'))
        qall_types = set([e['type'] for e in qall])
        for t in long_types:
            self.assertIn(t, qall_types)

        qlistener = json.loads(self.run_qdmanage('query --type=listener'))
        self.assertEqual([long_type('listener')] * 2,
                         [e['type'] for e in qlistener])
        self.assertEqual(self.router_1.ports[0], int(qlistener[0]['port']))

        qattr = json.loads(self.run_qdmanage('query type name'))
        for e in qattr:
            self.assertEqual(2, len(e))

        def name_type(entities):
            ignore_types = [
                long_type(t)
                for t in ['router.link', 'connection', 'router.address']
            ]
            return set((e['name'], e['type']) for e in entities
                       if e['type'] not in ignore_types)

        self.assertEqual(name_type(qall), name_type(qattr))

    def test_get_schema(self):
        schema = dictify(QdSchema().dump())
        actual = self.run_qdmanage("get-json-schema")
        self.assertEqual(schema, dictify(json.loads(actual)))
        actual = self.run_qdmanage("get-schema")
        self.assertEqual(schema, dictify(json.loads(actual)))

    def test_get_annotations(self):
        """
        The qdmanage GET-ANNOTATIONS call must return an empty dict since we don't support annotations at the moment.
        """
        out = json.loads(self.run_qdmanage("get-annotations"))
        self.assertTrue(len(out) == 0)

    def test_get_types(self):
        out = json.loads(self.run_qdmanage("get-types"))
        self.assertEqual(len(out), TOTAL_ENTITIES)

    def test_get_attributes(self):
        out = json.loads(self.run_qdmanage("get-attributes"))
        self.assertEqual(len(out), 28)

    def test_get_attributes(self):
        out = json.loads(self.run_qdmanage("get-attributes"))
        self.assertEqual(len(out), TOTAL_ENTITIES)

    def test_get_operations(self):
        out = json.loads(self.run_qdmanage("get-operations"))
        self.assertEqual(len(out), TOTAL_ENTITIES)
        self.assertEqual(out['org.apache.qpid.dispatch.sslProfile'],
                         [u'CREATE', u'DELETE', u'READ'])

    def test_get_types_with_ssl_profile_type(self):
        out = json.loads(
            self.run_qdmanage(
                "get-types --type=org.apache.qpid.dispatch.sslProfile"))
        self.assertEqual(out['org.apache.qpid.dispatch.sslProfile'], [
            u'org.apache.qpid.dispatch.configurationEntity',
            u'org.apache.qpid.dispatch.entity'
        ])

    def test_get_ssl_profile_type_attributes(self):
        out = json.loads(
            self.run_qdmanage(
                'get-attributes --type=org.apache.qpid.dispatch.sslProfile'))
        self.assertEqual(len(out), 1)
        self.assertEqual(len(out['org.apache.qpid.dispatch.sslProfile']), 12)

    def test_get_ssl_profile_attributes(self):
        out = json.loads(
            self.run_qdmanage(
                'get-attributes org.apache.qpid.dispatch.sslProfile'))
        self.assertEqual(len(out), 1)
        self.assertEqual(len(out['org.apache.qpid.dispatch.sslProfile']), 12)

    def test_get_ssl_profile_type_operations(self):
        out = json.loads(
            self.run_qdmanage(
                'get-operations --type=org.apache.qpid.dispatch.sslProfile'))
        self.assertEqual(len(out), 1)
        self.assertEqual(len(out['org.apache.qpid.dispatch.sslProfile']), 3)

    def test_get_ssl_profile_operations(self):
        out = json.loads(
            self.run_qdmanage(
                'get-operations org.apache.qpid.dispatch.sslProfile'))
        self.assertEqual(len(out), 1)
        self.assertEqual(len(out['org.apache.qpid.dispatch.sslProfile']), 3)

    def test_get_log(self):
        logs = json.loads(self.run_qdmanage("get-log limit=20"))
        found = False
        for log in logs:
            if u'get-log' in log[2] and ['AGENT', 'debug'] == log[0:2]:
                found = True
        self.assertTrue(found)

    def test_get_logstats(self):
        query_command = 'QUERY --type=logStats'
        logs = json.loads(self.run_qdmanage(query_command))
        # Each value returned by the above query should be
        # a log, and each log should contain an entry for each
        # log level.
        log_levels = [
            'criticalCount', 'debugCount', 'errorCount', 'infoCount',
            'noticeCount', 'traceCount', 'warningCount'
        ]
        n_log_levels = len(log_levels)

        good_logs = 0

        for log_dict in logs:
            log_levels_present = 0
            log_levels_missing = 0
            for log_level in log_levels:
                if log_level in log_dict:
                    log_levels_present += 1
                else:
                    log_levels_missing += 1
            if log_levels_present == n_log_levels:
                good_logs += 1

        self.assertEqual(good_logs, len(logs))

    def test_update(self):
        exception = False
        try:
            # Try to not set 'output'
            json.loads(
                self.run_qdmanage(
                    "UPDATE --type org.apache.qpid.dispatch.log --name log/DEFAULT outputFile="
                ))
        except Exception as e:
            exception = True
            self.assertTrue(
                "InternalServerErrorStatus: CError: Configuration: Failed to open log file ''"
                in str(e))
        self.assertTrue(exception)

        # Set a valid 'output'
        output = json.loads(
            self.run_qdmanage(
                "UPDATE --type org.apache.qpid.dispatch.log --name log/DEFAULT "
                "enable=trace+ outputFile=A.log"))
        self.assertEqual("A.log", output['outputFile'])
        self.assertEqual("trace+", output['enable'])

    def create(self, type, name, port):
        create_command = 'CREATE --type=' + type + ' --name=' + name + ' host=0.0.0.0 port=' + port
        connector = json.loads(self.run_qdmanage(create_command))
        return connector

    def test_check_address_name(self):
        long_type = 'org.apache.qpid.dispatch.router.config.address'
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(len(output), 2)
        self.assertEqual(output[0]['name'], "test-address")
        self.assertEqual(output[0]['distribution'], "multicast")
        self.assertEqual(output[0]['prefix'], "abcd")
        self.assertNotIn('pattern', output[0])
        self.assertEqual(output[1]['name'], "pattern-address")
        self.assertEqual(output[1]['distribution'], "closest")
        self.assertEqual(output[1]['pattern'], "a/*/b/#/c")
        self.assertNotIn('prefix', output[1])

    def test_create_address(self):
        long_type = 'org.apache.qpid.dispatch.router.config.address'
        create_command = 'CREATE --type=' + long_type + ' pattern="a.b.#" ingressPhase=5 egressPhase=6'
        output = json.loads(self.run_qdmanage(create_command))
        self.assertEqual(output['egressPhase'], 6)
        self.assertEqual(output['ingressPhase'], 5)

    def test_check_link_route_name(self):
        long_type = 'org.apache.qpid.dispatch.router.config.linkRoute'
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(output[0]['name'], "test-link-route")
        self.assertEqual(output[0]['direction'], "in")
        self.assertEqual(output[0]['dir'], "in")
        self.assertEqual(output[0]['prefix'], "xyz")

    def test_specify_container_id_connection_link_route(self):
        long_type = 'org.apache.qpid.dispatch.router.config.linkRoute'
        create_command = 'CREATE --type=' + long_type + ' prefix=abc containerId=id1 connection=conn1 direction=out'
        output = self.run_qdmanage(create_command, expect=Process.EXIT_FAIL)
        self.assertIn("Both connection and containerId cannot be specified",
                      output)

    def test_check_auto_link_name(self):
        long_type = 'org.apache.qpid.dispatch.router.config.autoLink'
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(output[0]['name'], "test-auto-link")
        self.assertEqual(output[0]['direction'], "out")
        self.assertEqual(output[0]['addr'], "mnop")

    def test_create_auto_link_with_phase(self):
        long_type = 'org.apache.qpid.dispatch.router.config.autoLink'
        create_command = 'CREATE --type=' + long_type + ' addr=xyz containerId=id1 direction=out phase=2'
        output = json.loads(self.run_qdmanage(create_command))
        self.assertEqual(output['phase'], 2)

    def test_create_auto_link_with_dir(self):
        long_type = 'org.apache.qpid.dispatch.router.config.autoLink'
        create_command = 'CREATE --type=' + long_type + ' addr=defgh containerId=id2 dir=out phase=2'
        output = json.loads(self.run_qdmanage(create_command))
        self.assertEqual(output['dir'], 'out')
        self.assertEqual(output['direction'], 'out')

    def test_create_link_route_with_dir(self):
        long_type = 'org.apache.qpid.dispatch.router.config.linkRoute'
        create_command = 'CREATE --type=' + long_type + ' pattern=mnb dir=out'
        output = json.loads(self.run_qdmanage(create_command))
        self.assertEqual(output['dir'], 'out')
        self.assertEqual(output['direction'], 'out')

    def test_specify_container_id_connection_auto_link(self):
        long_type = 'org.apache.qpid.dispatch.router.config.autoLink'
        create_command = 'CREATE --type=' + long_type + ' addr=abc containerId=id1 connection=conn1 direction=out'
        output = self.run_qdmanage(create_command, expect=Process.EXIT_FAIL)
        self.assertIn("Both connection and containerId cannot be specified",
                      output)

    def test_create_delete_connector(self):
        long_type = 'org.apache.qpid.dispatch.connector'
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        name = output[0]['name']

        # Delete an existing connector
        delete_command = 'DELETE --type=' + long_type + ' --name=' + name
        self.run_qdmanage(delete_command)
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(output, [])

        # Re-create the connector and then try wait_connectors
        self.create(long_type, name, str(QdmanageTest.inter_router_port))

        outputs = json.loads(self.run_qdmanage(query_command))
        created = False
        for output in outputs:
            conn_name = 'connector/127.0.0.1:%s' % QdmanageTest.inter_router_port
            conn_name_1 = 'connector/0.0.0.0:%s' % QdmanageTest.inter_router_port
            if conn_name == output['name'] or conn_name_1 == output['name']:
                created = True
                break

        self.assertTrue(created)

    def test_zzz_add_connector(self):
        port = self.get_port()
        # dont provide role and make sure that role is defaulted to 'normal'
        command = "CREATE --type=connector --name=eaconn1 port=" + str(
            port) + " host=0.0.0.0"
        output = json.loads(self.run_qdmanage(command))
        self.assertEqual("normal", output['role'])
        # provide the same connector name (eaconn1), expect duplicate value failure
        self.assertRaises(
            Exception, self.run_qdmanage,
            "CREATE --type=connector --name=eaconn1 port=12345 host=0.0.0.0")
        port = self.get_port()
        # provide role as 'normal' and make sure that it is preserved
        command = "CREATE --type=connector --name=eaconn2 port=" + str(
            port) + " host=0.0.0.0 role=normal"
        output = json.loads(self.run_qdmanage(command))
        self.assertEqual("normal", output['role'])

    def test_zzz_create_delete_listener(self):
        long_type = 'org.apache.qpid.dispatch.listener'
        name = 'ealistener'

        listener_port = self.get_port()

        listener = self.create(long_type, name, str(listener_port))
        self.assertEqual(listener['type'], long_type)
        self.assertEqual(listener['name'], name)

        exception_occurred = False

        delete_command = 'DELETE --type=' + long_type + ' --name=' + name
        self.run_qdmanage(delete_command)

        exception_occurred = False
        try:
            # Try deleting an already deleted connector, this should raise an exception
            self.run_qdmanage(delete_command)
        except Exception as e:
            exception_occurred = True
            self.assertTrue(("NotFoundStatus: No entity with name=%s" %
                             name) in str(e))

        self.assertTrue(exception_occurred)

    def test_create_delete_ssl_profile(self):
        ssl_profile_name = 'ssl-profile-test'
        ssl_create_command = 'CREATE --type=sslProfile certFile=' + self.ssl_file('server-certificate.pem') + \
                         ' privateKeyFile=' + self.ssl_file('server-private-key.pem') + ' password=server-password' + \
                         ' name=' + ssl_profile_name + ' caCertFile=' + self.ssl_file('ca-certificate.pem')
        output = json.loads(self.run_qdmanage(ssl_create_command))
        self.assertEqual(output['name'], ssl_profile_name)
        self.run_qdmanage('DELETE --type=sslProfile --name=' +
                          ssl_profile_name)

    def test_delete_connection(self):
        """
        This test creates a blocking connection and tries to delete that connection using qdmanage DELETE operation.
        Make sure we are Forbidden from deleting a connection because qdmanage DELETEs are not allowed on a connection
        Only qdmanage UPDATEs are allowed..
        :return:
        """
        connection = BlockingConnection(
            self.address(), properties=CONNECTION_PROPERTIES_UNICODE_STRING)
        query_command = 'QUERY --type=connection'
        outputs = json.loads(self.run_qdmanage(query_command))
        identity = None
        passed = False
        for output in outputs:
            if output.get('properties'):
                conn_properties = output['properties']
                if conn_properties.get('int_property'):
                    identity = output.get("identity")
                    if identity:
                        delete_command = 'DELETE --type=connection --id=' + identity
                        try:
                            outs = json.loads(
                                self.run_qdmanage(delete_command))
                        except Exception as e:
                            if "Forbidden" in str(e):
                                passed = True

        # The test has passed since we were forbidden from deleting a connection
        # due to lack of policy permissions.
        self.assertTrue(passed)

    def test_create_delete_address_pattern(self):
        config = [('mercury.*.earth.#', 'closest'),
                  ('*/mars/*/#', 'multicast'), ('*.mercury', 'closest'),
                  ('*/#/pluto', 'multicast')]
        long_type = 'org.apache.qpid.dispatch.router.config.address'

        # add patterns:
        pcount = 0
        for p in config:
            query_command = 'CREATE --type=' + long_type + \
                                             ' pattern=' + p[0] + \
                                             ' distribution=' + p[1] + \
                                             ' name=Pattern' + str(pcount)
            self.run_qdmanage(query_command)
            pcount += 1

        # verify correctly added:
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        total = len(output)

        pcount = 0
        for o in output:
            pattern = o.get('pattern')
            if pattern is not None:
                for p in config:
                    if p[0] == pattern:
                        pcount += 1
                        self.assertEqual(p[1], o.get('distribution'))
        self.assertEqual(pcount, len(config))

        # delete
        pcount = 0
        for p in config:
            query_command = 'DELETE --type=' + long_type + \
                                             ' --name=Pattern' + str(pcount)
            self.run_qdmanage(query_command)
            pcount += 1

        # verify deleted:
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(len(output), total - len(config))
        for o in output:
            pattern = o.get('pattern')
            if pattern is not None:
                for p in config:
                    self.assertNotEqual(p[0], pattern)

    def test_yy_query_many_links(self):
        # This test will fail without the fix for DISPATCH-974
        c = BlockingConnection(self.address())
        self.logger = Logger(title="test_yy_query_many_links")
        count = 0
        COUNT = 5000

        ADDRESS_SENDER = "examples-sender"
        ADDRESS_RECEIVER = "examples-receiver"

        # This loop creates 5000 consumer and 5000 producer links with
        # different addresses
        while count < COUNT:
            r = c.create_receiver(ADDRESS_RECEIVER + str(count))
            s = c.create_sender(ADDRESS_SENDER + str(count))
            count += 1

        # Try fetching all 10,000 addresses
        # This qdmanage query command would fail without the fix
        # for DISPATCH-974
        query_command = 'QUERY --type=org.apache.qpid.dispatch.router.address'
        outs = json.loads(self.run_qdmanage(query_command))

        sender_addresses = 0
        receiver_addresses = 0

        for out in outs:
            if ADDRESS_SENDER in out['name']:
                sender_addresses += 1
            if ADDRESS_RECEIVER in out['name']:
                receiver_addresses += 1

        self.assertEqual(sender_addresses, COUNT)
        self.assertEqual(receiver_addresses, COUNT)

        query_command = 'QUERY --type=link'
        outs = json.loads(self.run_qdmanage(query_command))

        out_links = 0
        in_links = 0
        success = False

        i = 0
        while i < 3:
            i += 1
            for out in outs:
                if out.get('owningAddr'):
                    if ADDRESS_SENDER in out['owningAddr']:
                        in_links += 1
                    if ADDRESS_RECEIVER in out['owningAddr']:
                        out_links += 1

            # If the link count is less than COUNT, try again in 2 seconds
            # Try after 2 more seconds for a total of 6 seconds.
            # If the link count is still less than expected count, there
            # is something wrong, the test has failed.
            if out_links < COUNT or in_links < COUNT:
                self.logger.log("out_links=%s, in_links=%s" %
                                (str(out_links), str(in_links)))
                sleep(2)
                outs = json.loads(self.run_qdmanage(query_command))
            else:
                self.logger.log("Test success!")
                success = True
                break

        if not success:
            self.logger.dump()

        self.assertEqual(out_links, COUNT)
        self.assertEqual(in_links, COUNT)

    def test_worker_threads(self):
        long_type = 'org.apache.qpid.dispatch.router'
        qd_manager = QdManager(self, address=self.address())
        output = qd_manager.query('org.apache.qpid.dispatch.router')
        self.assertEqual(output[0]['workerThreads'], 4)

    def test_check_memory_usage(self):
        """
        Verify that the process memory usage is present. Non-Linux platforms
        may return zero, so accept that as a valid value.
        """
        long_type = 'org.apache.qpid.dispatch.router'
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(len(output), 1)
        mem = output[0].get('memoryUsage')

        if sys.platform.lower().startswith('linux'):
            # @TODO(kgiusti) - linux only for now
            self.assertTrue(mem is not None)
            self.assertTrue(mem >= 0)
        else:
            # @TODO(kgiusti) - update test to handle other platforms as support
            # is added
            self.assertTrue(mem is None)
class LinkRouteTest(MessagingHandler):
    def __init__(self, first_host, second_host, first_address, second_address,
                 dynamic, lookup_host, routers):
        super(LinkRouteTest, self).__init__(prefetch=0)
        self.logger = Logger(title="LinkRouteTest")
        self.first_host = first_host
        self.second_host = second_host
        self.first_address = first_address
        self.second_address = second_address
        self.dynamic = dynamic
        self.lookup_host = lookup_host
        self.routers = routers
        self.reactor = None

        self.first_conn = None
        self.second_conn = None
        self.error = None
        self.first_sender = None
        self.first_receiver = None
        self.second_sender = None
        self.second_receiver = None
        self.poll_timer = None

        self.count = 10
        self.n_sent = 0
        self.n_rcvd = 0
        self.n_settled = 0

    def timeout(self):
        self.done("Timeout Expired: n_sent=%d n_rcvd=%d n_settled=%d" %
                  (self.n_sent, self.n_rcvd, self.n_settled))

    def poll_timeout(self):
        self.poll()

    def cleanup(self):
        for router in self.routers:
            router.wait_address_unsubscribed("D0.0.0.0/link")

    def done(self, error=None):
        self.error = error
        self.second_conn.close()
        self.first_conn.close()
        self.timer.cancel()
        self.lookup_conn.close()
        if self.poll_timer:
            self.poll_timer.cancel()
        if error:
            self.logger.dump()

        # give proton a chance to close all of the above connections,
        # then wait for the route tables remove the link route

        class _CleanupTimer:
            def __init__(self, parent):
                self.parent = parent

            def on_timer_task(self, event):
                self.parent.cleanup()

        self.reactor.schedule(1.0, _CleanupTimer(self))

    def send(self):
        self.logger.log("Send")
        while self.first_sender.credit > 0 and self.n_sent < self.count:
            self.n_sent += 1
            m = Message(body="Message %d of %d" % (self.n_sent, self.count))
            self.first_sender.send(m)

    def poll(self):
        self.logger.log("Poll")
        request = self.proxy.read_address("D0.0.0.0/link")
        self.agent_sender.send(request)

    def setup_first_links(self, event):
        self.logger.log("First links")
        self.first_sender = event.container.create_sender(
            self.first_conn, self.first_address)
        if self.dynamic:
            self.first_receiver = event.container.create_receiver(
                self.first_conn,
                dynamic=True,
                options=DynamicNodeProperties(
                    {"x-opt-qd.address": UNICODE(self.first_address)}))
        else:
            self.first_receiver = event.container.create_receiver(
                self.first_conn, self.first_address)

    def on_start(self, event):
        self.logger.log("On Start")
        self.reactor = event.reactor
        self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
        self.first_conn = event.container.connect(self.first_host)
        self.second_conn = event.container.connect(self.second_host)
        self.lookup_conn = event.container.connect(self.lookup_host)
        self.reply_receiver = event.container.create_receiver(self.lookup_conn,
                                                              dynamic=True)
        self.agent_sender = event.container.create_sender(
            self.lookup_conn, "$management")

    def on_link_opening(self, event):
        if event.sender:
            self.logger.log("On sender link opening")
            self.second_sender = event.sender
            if self.dynamic:
                if event.sender.remote_source.dynamic:
                    event.sender.source.address = self.second_address
                    event.sender.open()
                else:
                    self.done("Expected dynamic source on sender")
            else:
                if event.sender.remote_source.address == self.second_address:
                    event.sender.source.address = self.second_address
                    event.sender.open()
                else:
                    self.done(
                        "Incorrect address on incoming sender: got %s, expected %s"
                        % (event.sender.remote_source.address,
                           self.second_address))

        elif event.receiver:
            self.logger.log("On receiver link opening")
            self.second_receiver = event.receiver
            if event.receiver.remote_target.address == self.second_address:
                event.receiver.target.address = self.second_address
                event.receiver.open()
            else:
                self.done(
                    "Incorrect address on incoming receiver: got %s, expected %s"
                    % (event.receiver.remote_target.address,
                       self.second_address))

    def on_link_opened(self, event):
        self.logger.log("On link opened")
        if event.receiver:
            event.receiver.flow(self.count)

        if event.receiver == self.reply_receiver:
            self.proxy = RouterProxy(self.reply_receiver.remote_source.address)
            self.poll()

    def on_sendable(self, event):
        self.logger.log("On sendable")
        if event.sender == self.first_sender:
            self.send()

    def on_message(self, event):
        if event.receiver == self.first_receiver:
            self.logger.log("On message 1st")
            self.n_rcvd += 1

        if event.receiver == self.reply_receiver:
            self.logger.log("On message reply")
            response = self.proxy.response(event.message)
            if response.status_code == 200 and (response.remoteCount +
                                                response.containerCount) > 0:
                if self.poll_timer:
                    self.poll_timer.cancel()
                    self.poll_timer = None
                self.setup_first_links(event)
            else:
                self.poll_timer = event.reactor.schedule(
                    0.25, PollTimeout(self))

    def on_settled(self, event):
        if event.sender == self.first_sender:
            self.logger.log("On settled")
            self.n_settled += 1
            if self.n_settled == self.count:
                self.done(None)

    def run(self):
        container = Container(self)
        container.container_id = 'LRC'
        container.run()
Esempio n. 6
0
class OversizeMessageTransferTest(MessagingHandler):
    """
    This test connects a sender and a receiver. Then it tries to send _count_
    number of messages of the given size through the router or router network.

    With expect_block=True the ingress router should detect the sender's oversize
    message and close the sender connection. The receiver may receive
    aborted message indications but that is not guaranteed. If any aborted
    messages are received then the count must be at most one.
    The test is a success when the sender receives a connection error with
    oversize indication and the receiver has not received too many aborts.

    With expect_block=False sender messages should be received normally.
    The test is a success when n_accepted == count.
    """
    def __init__(self, sender_host, receiver_host, test_address,
                 message_size=100000, count=10, expect_block=True, print_to_console=False):
        super(OversizeMessageTransferTest, self).__init__()
        self.sender_host = sender_host
        self.receiver_host = receiver_host
        self.test_address = test_address
        self.msg_size = message_size
        self.count = count
        self.expect_block = expect_block

        self.sender_conn = None
        self.receiver_conn = None
        self.error = None
        self.sender = None
        self.receiver = None
        self.proxy = None

        self.n_sent = 0
        self.n_rcvd = 0
        self.n_accepted = 0
        self.n_rejected = 0
        self.n_aborted = 0
        self.n_connection_error = 0
        self.shut_down = False

        self.logger = Logger(title=("OversizeMessageTransferTest - %s" % (self.test_address)), print_to_console=print_to_console)
        self.log_unhandled = False

    def timeout(self):
        self.error = "Timeout Expired: n_sent=%d n_rcvd=%d n_rejected=%d n_aborted=%d" % \
                     (self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted)
        self.logger.log("self.timeout " + self.error)
        self._shut_down_test()

    def on_start(self, event):
        self.logger.log("on_start")
        self.timer = event.reactor.schedule(10, Timeout(self))
        self.logger.log("on_start: opening receiver connection to %s" % (self.receiver_host.addresses[0]))
        self.receiver_conn = event.container.connect(self.receiver_host.addresses[0])
        self.logger.log("on_start: opening   sender connection to %s" % (self.sender_host.addresses[0]))
        self.sender_conn = event.container.connect(self.sender_host.addresses[0])
        self.logger.log("on_start: Creating receiver")
        self.receiver = event.container.create_receiver(self.receiver_conn, self.test_address)
        self.logger.log("on_start: Creating sender")
        self.sender = event.container.create_sender(self.sender_conn, self.test_address)
        self.logger.log("on_start: done")

    def send(self):
        while self.sender.credit > 0 and self.n_sent < self.count:
            # construct message in indentifiable chunks
            body_msg = ""
            padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[self.n_sent % 30]
            while len(body_msg) < self.msg_size:
                chunk = "[%s:%d:%d" % (self.test_address, self.n_sent, len(body_msg))
                padlen = 50 - len(chunk)
                chunk += padchar * padlen
                body_msg += chunk
            if len(body_msg) > self.msg_size:
                body_msg = body_msg[:self.msg_size]
            self.logger.log("send. address:%s message:%d of %s length=%d" %
                            (self.test_address, self.n_sent, self.count, self.msg_size))
            m = Message(body=body_msg)
            self.sender.send(m)
            self.n_sent += 1

    def on_sendable(self, event):
        if event.sender == self.sender:
            self.logger.log("on_sendable")
            self.send()

    def on_message(self, event):
        if self.expect_block:
            # All messages should violate maxMessageSize.
            # Receiving any is an error.
            self.error = "Received a message. Expected to receive no messages."
            self.logger.log(self.error)
            self._shut_down_test()
        else:
            self.n_rcvd += 1
            self.accept(event.delivery)
            self._check_done()

    def on_connection_remote_close(self, event):
        if self.shut_down:
            return
        if event.connection == self.sender_conn:
            if not event.connection.remote_condition is None:
                if event.connection.remote_condition.name == OVERSIZE_CONDITION_NAME and \
                   event.connection.remote_condition.description == OVERSIZE_CONDITION_DESC:
                    self.logger.log("on_connection_remote_close: sender closed with correct condition")
                    self.n_connection_error += 1
                    self.sender_conn.close()
                    self.sender_conn = None
                else:
                    # sender closed but for wrong reason
                    self.error = "sender close error: Expected name: %s, description: %s, but received name: %s, description: %s" % (
                                 OVERSIZE_CONDITION_NAME, OVERSIZE_CONDITION_DESC,
                                 event.connection.remote_condition.name, event.connection.remote_condition.description)
                    self.logger.log(self.error)
            else:
                self.error = "sender close error: Expected a remote_condition but there was none."
                self.logger.log(self.error)
        else:
            # connection error but not for sender
            self.error = "unexpected connection close error: wrong connection closed."
            self.logger.log(self.error)
        self._check_done()

    def _shut_down_test(self):
        self.shut_down = True
        if self.timer:
            self.timer.cancel()
            self.timer = None
        if self.sender:
            self.sender.close()
            self.sender = None
        if self.receiver:
            self.receiver.close()
            self.receiver = None
        if self.sender_conn:
            self.sender_conn.close()
            self.sender_conn = None
        if self.receiver_conn:
            self.receiver_conn.close()
            self.receiver_conn = None

    def _check_done(self):
        current = ("check_done: sent=%d rcvd=%d rejected=%d aborted=%d connection_error:%d" %
                   (self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted, self.n_connection_error))
        self.logger.log(current)
        if self.error is not None:
            self.logger.log("TEST FAIL")
            self._shut_down_test()
        else:
            done = (self.n_connection_error == 1) \
                    if self.expect_block else \
                    (self.n_sent == self.count and self.n_rcvd == self.count)

            if done:
                self.logger.log("TEST DONE!!!")
                # self.log_unhandled = True # verbose debugging
                self._shut_down_test()

    def on_rejected(self, event):
        self.n_rejected += 1
        if self.expect_block:
            self.logger.log("on_rejected: entry")
            self._check_done()
        else:
            self.error = "Unexpected on_reject"
            self.logger.log(self.error)
            self._check_done()

    def on_aborted(self, event):
        self.logger.log("on_aborted")
        self.n_aborted += 1
        self._check_done()

    def on_error(self, event):
        self.error = "Container error"
        self.logger.log(self.error)
        self._shut_down_test()

    def on_unhandled(self, method, *args):
        if self.log_unhandled:
            self.logger.log("on_unhandled: method: %s, args: %s" % (method, args))

    def run(self):
        try:
            Container(self).run()
        except Exception as e:
            self.error = "Container run exception: %s" % (e)
            self.logger.log(self.error)
            self.logger.dump()
Esempio n. 7
0
class FakeBroker(MessagingHandler):
    """
    A fake broker-like service that listens for client connections
    """
    class _Queue:
        def __init__(self, name, logger, dynamic=False):
            self.dynamic = dynamic
            self.queue = collections.deque()
            self.consumers = []
            self.logger = logger
            self.name = name
            self.sent = 0
            self.recv = 0

        def subscribe(self, consumer):
            self.consumers.append(consumer)

        def unsubscribe(self, consumer):
            if consumer in self.consumers:
                self.consumers.remove(consumer)
            return len(self.consumers) == 0 and (self.dynamic
                                                 or len(self.queue) == 0)

        def publish(self, message):
            self.recv += 1
            self.logger.log("Received message %d" % self.recv)
            self.queue.append(message)
            return self.dispatch()

        def dispatch(self, consumer=None):
            if consumer:
                c = [consumer]
            else:
                c = self.consumers
            count = 0
            while True:
                rc = self._deliver_to(c)
                count += rc
                if rc == 0:
                    break
            return count

        def _deliver_to(self, consumers):
            try:
                result = 0
                for c in consumers:
                    if c.credit:
                        c.send(self.queue.popleft())
                        result += 1
                        self.sent += 1
                        self.logger.log("Sent message %d" % self.sent)

                return result
            except IndexError:  # no more messages
                return 0

    def __init__(self, url, container_id=None, **handler_kwargs):
        super(FakeBroker, self).__init__(**handler_kwargs)
        self.url = url
        self.queues = {}
        self.acceptor = None
        self.in_count = 0
        self.out_count = 0
        self.link_errors = 0
        self._connections = []
        self._error = None
        self._container = Container(self)
        self._container.container_id = container_id or 'FakeBroker'
        self._logger = Logger(title=self._container.container_id)
        self._thread = Thread(target=self._main)
        self._thread.daemon = True
        self._stop_thread = False
        self._thread.start()

    def _main(self):
        self._container.timeout = 1.0
        self._container.start()
        self._logger.log("Starting reactor thread")

        while self._container.process():
            if self._stop_thread:
                if self.acceptor:
                    self.acceptor.close()
                    self.acceptor = None
                for c in self._connections:
                    c.close()
                self._connections = []
        self._logger.log("reactor thread done")

    def join(self):
        self._stop_thread = True
        self._container.wakeup()
        self._thread.join(timeout=TIMEOUT)
        self._logger.log("thread done")
        if self._thread.is_alive():
            raise Exception("FakeBroker did not exit")
        if self._error:
            raise Exception(self._error)

    def on_start(self, event):
        self.acceptor = event.container.listen(self.url)

    def _queue(self, address):
        if address not in self.queues:
            self.queues[address] = self._Queue(address, self._logger)
        return self.queues[address]

    def on_link_opening(self, event):
        if event.link.is_sender:
            if event.link.remote_source.dynamic:
                address = str(uuid.uuid4())
                event.link.source.address = address
                q = self._Queue(address, self._logger, True)
                self.queues[address] = q
                q.subscribe(event.link)
                self._logger.log("dynamic sending link opened %s" % address)
            elif event.link.remote_source.address:
                event.link.source.address = event.link.remote_source.address
                self._queue(event.link.source.address).subscribe(event.link)
                self._logger.log("sending link opened %s" %
                                 event.link.source.address)
        elif event.link.remote_target.address:
            event.link.target.address = event.link.remote_target.address
            self._logger.log("receiving link opened %s" %
                             event.link.target.address)

    def _unsubscribe(self, link):
        if link.source.address in self.queues and self.queues[
                link.source.address].unsubscribe(link):
            del self.queues[link.source.address]

    def on_link_error(self, event):
        self._logger.log("link error")
        self.link_errors += 1
        self.on_link_closing(event)

    def on_link_closing(self, event):
        self._logger.log("link closing")
        if event.link.is_sender:
            self._unsubscribe(event.link)

    def on_connection_opening(self, event):
        pn_conn = event.connection
        pn_conn.container = self._container.container_id

    def on_connection_opened(self, event):
        self._logger.log("connection opened")
        self._connections.append(event.connection)

    def on_connection_closing(self, event):
        self.remove_stale_consumers(event.connection)

    def on_connection_closed(self, event):
        self._logger.log("connection closed")
        try:
            self._connections.remove(event.connection)
        except ValueError:
            pass

    def on_disconnected(self, event):
        self.remove_stale_consumers(event.connection)

    def remove_stale_consumers(self, connection):
        link = connection.link_head(Endpoint.REMOTE_ACTIVE)
        while link:
            if link.is_sender:
                self._unsubscribe(link)
            link = link.next(Endpoint.REMOTE_ACTIVE)

    def on_sendable(self, event):
        self.out_count += self._queue(event.link.source.address).dispatch(
            event.link)

    def on_message(self, event):
        self.in_count += 1
        self.out_count += self._queue(event.link.target.address).publish(
            event.message)

    def dump_log(self):
        self._logger.dump()
class OversizeMessageTransferTest(MessagingHandler):
    """
    This test connects a sender and a receiver. Then it tries to send _count_
    number of messages of the given size through the router or router network.

    Messages are to pass through an edge router and get blocked by an interior
    or messages are to be blocked by both the edge and the interior.

    When 'blocked_by_both' is false then:

    * The ingress router should allow the sender's oversize message.
    * The message is blocked by the uplink router by rejecting the message
    and closing the connection between the interior and edge routers.
    * The receiver may receive aborted message indications but that is
    not guaranteed.
    * If any aborted messages are received then the count must be at most one.

    When 'blocked_by_both' is true then:
    * The ingress edge router will reject and close the connection on the first message
    * The second message may be aborted because the connection between the
    edge router and the interior router was closed
    * The remainder of the messages are going into a closed connection and
    will receive no settlement.
    """
    def __init__(self,
                 test_class,
                 sender_host,
                 receiver_host,
                 test_address,
                 message_size=100000,
                 count=10,
                 blocked_by_both=False,
                 print_to_console=False):
        """
        Construct an instance of the unicast test
        :param test_class:    test class - has wait-connection function
        :param sender_host:   router for sender connection
        :param receiver_host: router for receiver connection
        :param test_address:  sender/receiver AMQP address
        :param message_size:  in bytes
        :param count:         how many messages to send
        :param blocked_by_both:  true if edge router messages are also blocked by interior
        :param print_to_console: print logs as they happen
        """
        super(OversizeMessageTransferTest, self).__init__()
        self.test_class = test_class
        self.sender_host = sender_host
        self.receiver_host = receiver_host
        self.test_address = test_address
        self.msg_size = message_size
        self.count = count
        self.blocked_by_both = blocked_by_both
        self.expect_block = True
        self.messages = []

        self.sender_conn = None
        self.receiver_conn = None
        self.error = None
        self.sender = None
        self.receiver = None
        self.proxy = None

        self.network_stable = False
        self.n_sent = 0
        self.n_rcvd = 0
        self.n_accepted = 0
        self.n_rejected = 0
        self.n_modified = 0
        self.n_released = 0
        self.n_send_settled = 0
        self.n_aborted = 0
        self.n_connection_error = 0
        self.shut_down = False

        self.logger = Logger(title=("OversizeMessageTransferTest - %s" %
                                    (self.test_address)),
                             print_to_console=print_to_console)
        self.log_unhandled = False  # verbose diagnostics of proton callbacks

    def timeout(self):
        current = (
            "check_done: sent=%d rcvd=%d rejected=%d aborted=%d connection_error:%d send_settled:%d"
            % (self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted,
               self.n_connection_error, self.n_send_settled))
        self.error = "Timeout Expired " + current
        self.logger.log("self.timeout " + self.error)
        self._shut_down_test()

    def on_start(self, event):
        self.logger.log("on_start")

        self.logger.log("on_start: secheduling reactor timeout")
        self.timer = event.reactor.schedule(10, Timeout(self))

        self.logger.log("Waiting for router network to stabilize")
        self.test_class.wait_router_network_connected()
        self.network_stable = True

        self.logger.log("on_start: generating messages")
        for idx in range(self.count):
            # construct message in indentifiable chunks
            body_msg = ""
            padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[idx % 30]
            while len(body_msg) < self.msg_size:
                chunk = "[%s:%d:%d" % (self.test_address, idx, len(body_msg))
                padlen = 50 - len(chunk)
                chunk += padchar * padlen
                body_msg += chunk
            if len(body_msg) > self.msg_size:
                body_msg = body_msg[:self.msg_size]
            m = Message(body=body_msg)
            self.messages.append(m)

        self.logger.log("on_start: opening receiver connection to %s" %
                        (self.receiver_host.addresses[0]))
        self.receiver_conn = event.container.connect(
            self.receiver_host.addresses[0])

        self.logger.log("on_start: Creating receiver")
        self.receiver = event.container.create_receiver(
            self.receiver_conn, self.test_address)

        self.logger.log("on_start: opening   sender connection to %s" %
                        (self.sender_host.addresses[0]))
        self.sender_conn = event.container.connect(
            self.sender_host.addresses[0])

        self.logger.log("on_start: Creating sender")
        self.sender = event.container.create_sender(self.sender_conn,
                                                    self.test_address)

        self.logger.log("on_start: done")

    def send(self):
        while self.sender.credit > 0 and self.n_sent < self.count:
            m = self.messages[self.n_sent]
            self.logger.log(
                "send. address:%s message:%d of %s length=%d" %
                (self.test_address, self.n_sent, self.count, self.msg_size))
            self.sender.send(m)
            self.n_sent += 1
        #if self.n_sent == self.count:
        #    self.log_unhandled = True

    def on_sendable(self, event):
        if event.sender == self.sender:
            self.logger.log("on_sendable")
            self.send()

    def on_message(self, event):
        self.logger.log("on_message: entry")
        if self.expect_block:
            # All messages should violate maxMessageSize.
            # Receiving any is an error.
            self.error = "Received a message. Expected to receive no messages."
            self.logger.log(self.error)
            self._shut_down_test()
        else:
            self.n_rcvd += 1
            self.accept(event.delivery)
            self._check_done()

    def on_connection_remote_close(self, event):
        if self.shut_down:
            return
        if event.connection == self.sender_conn:
            if not event.connection.remote_condition is None:
                if event.connection.remote_condition.name == OVERSIZE_CONDITION_NAME and \
                   event.connection.remote_condition.description == OVERSIZE_CONDITION_DESC:
                    self.logger.log(
                        "on_connection_remote_close: sender closed with correct condition"
                    )
                    self.n_connection_error += 1
                    self.sender_conn.close()
                    self.sender_conn = None
                else:
                    # sender closed but for wrong reason
                    self.error = "sender close error: Expected name: %s, description: %s, but received name: %s, description: %s" % (
                        OVERSIZE_CONDITION_NAME, OVERSIZE_CONDITION_DESC,
                        event.connection.remote_condition.name,
                        event.connection.remote_condition.description)
                    self.logger.log(self.error)
            else:
                self.error = "sender close error: Expected a remote_condition but there was none."
                self.logger.log(self.error)
        else:
            # connection error but not for sender
            self.error = "unexpected connection close error: wrong connection closed."
            self.logger.log(self.error)
        self._check_done()

    def _shut_down_test(self):
        self.shut_down = True
        if self.timer:
            self.timer.cancel()
            self.timer = None
        if self.sender:
            self.sender.close()
            self.sender = None
        if self.receiver:
            self.receiver.close()
            self.receiver = None
        if self.sender_conn:
            self.sender_conn.close()
            self.sender_conn = None
        if self.receiver_conn:
            self.receiver_conn.close()
            self.receiver_conn = None

    def _current(self):
        return (
            "net_stable=%s sent=%d rcvd=%d rejected=%d aborted=%d connection_error:%d send_settled:%d"
            % (self.network_stable, self.n_sent, self.n_rcvd, self.n_rejected,
               self.n_aborted, self.n_connection_error, self.n_send_settled))

    def _check_done(self):
        self.logger.log("check_done: " + self._current())
        if self.error is not None:
            self.logger.log("TEST FAIL")
            self._shut_down_test()
        else:
            if not self.blocked_by_both:
                # Blocked by interior only. Connection to edge stays up
                # and all messages must be accounted for.
                done = self.n_rejected == 1 and \
                       self.n_send_settled == self.count
            else:
                # Blocked by interior and edge. Expect edge connection to go down
                # and some of our messaages arrive at edge after it has sent
                # AMQP close. Those messages are never settled. TODO: Is that OK?
                done = self.n_rejected == 1 and \
                       self.n_connection_error == 1
            if done:
                self.logger.log("TEST DONE!!!")
                # self.log_unhandled = True # verbose debugging
                self._shut_down_test()

    def on_rejected(self, event):
        self.n_rejected += 1
        if self.expect_block:
            self.logger.log("on_rejected: entry")
            self._check_done()
        else:
            self.error = "Unexpected on_reject"
            self.logger.log(self.error)
            self._check_done()

    def on_aborted(self, event):
        self.logger.log("on_aborted")
        self.n_aborted += 1
        self._check_done()

    def on_settled(self, event):
        self.logger.log("on_settled")
        if event.connection == self.sender_conn:
            self.logger.log("on_settled: sender connection")
            self.n_send_settled += 1
        self._check_done()

    def on_error(self, event):
        self.error = "Container error"
        self.logger.log(self.error)
        self.sender_conn.close()
        self.receiver_conn.close()
        self.timer.cancel()

    def on_link_error(self, event):
        self.error = event.link.remote_condition.name
        self.logger.log("on_link_error: %s" % (self.error))
        # Link errors may prevent normal test shutdown so don't even try.
        raise Exception(self.error)

    def on_reactor_final(self, event):
        self.logger.log("on_reactor_final:")

    def on_unhandled(self, method, *args):
        if self.log_unhandled:
            self.logger.log("on_unhandled: method: %s, args: %s" %
                            (method, args))

    def run(self):
        try:
            Container(self).run()
        except Exception as e:
            self.error = "Container run exception: %s" % (e)
            self.logger.log(self.error)
            self.logger.dump()
        time.sleep(0.2)
class OversizeMulticastTransferTest(MessagingHandler):
    """
    This test connects a sender and four receivers. Then it tries to send _count_
    number of messages of the given size through the router or router network.
    """
    def __init__(self,
                 test_class,
                 sender_host,
                 routers,
                 test_address,
                 expect_receives,
                 blocked_by_ingress,
                 blocked_by_interior,
                 message_size=100000,
                 count=10,
                 print_to_console=False):
        """
        Construct an instance of the multicast test
        :param test_class:    test class - has wait-connection function
        :param sender_host:         router for the sender connection
        :param routers:             a list of all the routers for receiver connections
        :param test_address:        sender/receiver AMQP address
        :param expect_receives:     array of expected receive counts
        :param blocked_by_ingress:  true if ingress router blocks
        :param blocked_by_interior: true if edge router messages also blocked by interior
        :param message_size:        in bytes
        :param count:               how many messages to send
        :param print_to_console:    print logs as they happen
        """
        super(OversizeMulticastTransferTest, self).__init__()
        self.test_class = test_class
        self.sender_host = sender_host
        self.routers = routers
        self.test_address = test_address
        self.msg_size = message_size
        self.count = count
        self.expect_receives = expect_receives  # router array
        self.blocked_by_ingress = blocked_by_ingress
        self.blocked_by_interior = blocked_by_interior
        self.messages = []

        self.sender_conn = None
        self.receiver_conns = [None, None, None, None]  # router array
        self.error = None
        self.sender = None
        self.receivers = [None, None, None, None]  # router array
        self.proxy = None

        self.network_stable = False
        self.n_sent = 0
        self.n_rcvds = [0, 0, 0, 0]  # router array
        self.n_accepted = 0
        self.n_rejected = 0
        self.n_modified = 0
        self.n_released = 0
        self.n_send_settled = 0
        self.n_aborteds = [0, 0, 0, 0]  # router array
        self.n_connection_error = 0
        self.shut_down = False

        self.logger = Logger(title=("OversizeMulticastTransferTest - %s" %
                                    (self.test_address)),
                             print_to_console=print_to_console)
        self.log_unhandled = False  # verbose diagnostics of proton callbacks

    def timeout(self):
        current = self._current()
        self.error = "Timeout Expired " + current
        self.logger.log("self.timeout " + self.error)
        self._shut_down_test()

    def on_start(self, event):
        self.logger.log("on_start")

        self.logger.log("on_start: secheduling reactor timeout")
        self.timer = event.reactor.schedule(10, Timeout(self))

        self.logger.log("Waiting for router network to stabilize")
        self.test_class.wait_router_network_connected()
        self.network_stable = True

        for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
            self.logger.log("on_start: opening receiver connection to %s" %
                            (self.routers[idx].addresses[0]))
            self.receiver_conns[idx] = event.container.connect(
                self.routers[idx].addresses[0])
        for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
            self.logger.log("on_start: Creating receiver %d" % idx)
            self.receivers[idx] = event.container.create_receiver(
                self.receiver_conns[idx], self.test_address)

        self.logger.log("on_start: generating messages")
        for idx in range(self.count):
            # construct message in indentifiable chunks
            body_msg = ""
            padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[idx % 30]
            while len(body_msg) < self.msg_size:
                chunk = "[%s:%d:%d" % (self.test_address, idx, len(body_msg))
                padlen = 50 - len(chunk)
                chunk += padchar * padlen
                body_msg += chunk
            if len(body_msg) > self.msg_size:
                body_msg = body_msg[:self.msg_size]
            m = Message(body=body_msg)
            self.messages.append(m)

        self.logger.log("on_start: opening   sender connection to %s" %
                        (self.sender_host.addresses[0]))
        self.sender_conn = event.container.connect(
            self.sender_host.addresses[0])

        self.logger.log("on_start: Creating sender")
        self.sender = event.container.create_sender(self.sender_conn,
                                                    self.test_address)

        self.logger.log("on_start: done")

    def rcvr_idx_of(self, rcvr):
        """
        Given a receiver, as in event.receiver, return
        the router array index of that receiver's router
        :param rcvr:
        :return: integer index of receiver
        """
        for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
            if rcvr == self.receivers[idx]:
                return idx
        self.error = "Receiver not found in receivers array."
        self.logger.log(self.error)
        self.logger.dump()
        self._shut_down_test()
        raise Exception(self.error)

    def send(self):
        while self.sender.credit > 0 and self.n_sent < self.count:
            m = self.messages[self.n_sent]
            self.logger.log(
                "send. address:%s message:%d of %s length=%d" %
                (self.test_address, self.n_sent, self.count, self.msg_size))
            self.sender.send(m)
            self.n_sent += 1
        #if self.n_sent == self.count:
        #    self.log_unhandled = True

    def on_sendable(self, event):
        if event.sender == self.sender:
            self.logger.log("on_sendable")
            self.send()

    def on_message(self, event):
        self.logger.log("on_message")
        if self.shut_down:
            return
        idx = self.rcvr_idx_of(event.receiver)
        if self.expect_receives[idx] == 0:
            # Receiving any is an error.
            self.error = "Received a message. Expected to receive no messages."
            self.logger.log(self.error)
            self._shut_down_test()
        else:
            self.n_rcvds[idx] += 1
            self.accept(event.delivery)
            self._check_done()

    def on_connection_remote_close(self, event):
        if self.shut_down:
            return
        if event.connection == self.sender_conn:
            if not event.connection.remote_condition is None:
                if event.connection.remote_condition.name == OVERSIZE_CONDITION_NAME and \
                   event.connection.remote_condition.description == OVERSIZE_CONDITION_DESC:
                    self.logger.log(
                        "on_connection_remote_close: sender closed with correct condition"
                    )
                    self.n_connection_error += 1
                    self.sender_conn.close()
                    self.sender_conn = None
                else:
                    # sender closed but for wrong reason
                    self.error = "sender close error: Expected name: %s, description: %s, but received name: %s, description: %s" % (
                        OVERSIZE_CONDITION_NAME, OVERSIZE_CONDITION_DESC,
                        event.connection.remote_condition.name,
                        event.connection.remote_condition.description)
                    self.logger.log(self.error)
            else:
                self.error = "sender close error: Expected a remote_condition but there was none."
                self.logger.log(self.error)
        else:
            # connection error but not for sender
            self.error = "unexpected connection close error: wrong connection closed."
            self.logger.log(self.error)
        self._check_done()

    def _shut_down_test(self):
        self.shut_down = True
        if self.timer:
            self.timer.cancel()
            self.timer = None
        if self.sender:
            self.sender.close()
            self.sender = None
        for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
            if self.receivers[idx]:
                self.receivers[idx].close()
                self.receivers[idx] = None
        if self.sender_conn:
            self.sender_conn.close()
            self.sender_conn = None
        for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
            if self.receiver_conns[idx]:
                self.receiver_conns[idx].close()
                self.receiver_conns[idx] = None

    def _current(self):
        return (
            "net_stable:%s sent=%d rcvd=%s rejected=%d aborted=%s connection_error:%d send_settled:%d"
            % (self.network_stable, self.n_sent, str(
                self.n_rcvds), self.n_rejected, str(self.n_aborteds),
               self.n_connection_error, self.n_send_settled))

    def _check_done(self):
        self.logger.log("check_done: " + self._current())
        if self.error is not None:
            self.logger.log("TEST FAIL")
            self._shut_down_test()
        else:
            if self.blocked_by_interior:
                if self.blocked_by_ingress:
                    # Blocked by interior and edge. Expect edge connection to go down
                    # and some of our messaages arrive at edge after it has sent
                    # AMQP close. Those messages are never settled. TODO: Is that OK?
                    done = self.n_rejected == 1 and \
                           self.n_connection_error == 1
                else:
                    # Blocked by interior only. Connection to edge stays up
                    # and all messages must be accounted for.
                    all_received = True
                    for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
                        if self.expect_receives[idx] > 0:
                            if not self.n_rcvds[idx] == self.expect_receives[
                                    idx]:
                                all_received = False
                    done = self.n_rejected <= 1 and \
                           self.n_send_settled == self.count and \
                           all_received
            else:
                # Blocked by edge should never deliver to interior
                done = self.n_rejected == 1 and \
                       self.n_connection_error == 1

            if done:
                self.logger.log("TEST DONE!!!")
                # self.log_unhandled = True # verbose debugging
                self._shut_down_test()

    def on_rejected(self, event):
        self.n_rejected += 1
        if self.reject:
            self.logger.log("on_rejected: entry")
            self._check_done()
        else:
            self.error = "Unexpected on_reject"
            self.logger.log(self.error)
            self._check_done()

    def on_aborted(self, event):
        self.logger.log("on_aborted")
        if self.shut_down:
            return
        idx = self.rcvr_idx_of(event.receiver)
        self.n_aborteds[idx] += 1
        self._check_done()

    def on_settled(self, event):
        self.logger.log("on_settled")
        if event.connection == self.sender_conn:
            self.logger.log("on_settled: sender connection")
            self.n_send_settled += 1
        self._check_done()

    def on_error(self, event):
        self.error = "Container error"
        self.logger.log(self.error)
        self._shut_down_test()

    def on_link_error(self, event):
        self.error = event.link.remote_condition.name
        self.logger.log("on_link_error: %s" % (self.error))
        # Link errors may prevent normal test shutdown so don't even try.
        raise Exception(self.error)

    def on_reactor_final(self, event):
        self.logger.log("on_reactor_final:")

    def on_unhandled(self, method, *args):
        if self.log_unhandled:
            self.logger.log("on_unhandled: method: %s, args: %s" %
                            (method, args))

    def run(self):
        try:
            Container(self).run()
        except Exception as e:
            self.error = "Container run exception: %s" % (e)
            self.logger.log(self.error)
            self.logger.dump()
        time.sleep(0.2)