def __init__(self, first_host, second_host, first_address, second_address,
                 dynamic, lookup_host, routers):
        super(LinkRouteTest, self).__init__(prefetch=0)
        self.logger = Logger(title="LinkRouteTest")
        self.first_host = first_host
        self.second_host = second_host
        self.first_address = first_address
        self.second_address = second_address
        self.dynamic = dynamic
        self.lookup_host = lookup_host
        self.routers = routers
        self.reactor = None

        self.first_conn = None
        self.second_conn = None
        self.error = None
        self.first_sender = None
        self.first_receiver = None
        self.second_sender = None
        self.second_receiver = None
        self.poll_timer = None

        self.count = 10
        self.n_sent = 0
        self.n_rcvd = 0
        self.n_settled = 0
Пример #2
0
    def __init__(self, first_host, second_host, first_address, second_address, container_id="ALC"):
        super(WaypointTest, self).__init__()
        self.first_host     = first_host
        self.second_host    = second_host
        self.first_address  = first_address
        self.second_address = second_address
        self.container_id   = container_id
        self.logger = Logger(title="WaypointTest")

        self.first_conn        = None
        self.second_conn       = None
        self.error             = None
        self.first_sender      = None
        self.first_sender_created = False
        self.first_sender_link_opened = False
        self.first_receiver    = None
        self.first_receiver_created    = False
        self.waypoint_sender   = None
        self.waypoint_receiver = None
        self.waypoint_queue    = []
        self.waypoint_sender_opened = False
        self.waypoint_receiver_opened = False
        self.firsts_created = False

        self.count  = 10
        self.n_sent = 0
        self.n_rcvd = 0
        self.n_waypoint_rcvd = 0
        self.n_thru = 0
        self.outs = None
Пример #3
0
    def __init__(self, sender_host, receiver_host, test_address,
                 message_size=100000, count=10, expect_block=True, print_to_console=False):
        super(OversizeMessageTransferTest, self).__init__()
        self.sender_host = sender_host
        self.receiver_host = receiver_host
        self.test_address = test_address
        self.msg_size = message_size
        self.count = count
        self.expect_block = expect_block

        self.sender_conn = None
        self.receiver_conn = None
        self.error = None
        self.sender = None
        self.receiver = None
        self.proxy = None

        self.n_sent = 0
        self.n_rcvd = 0
        self.n_accepted = 0
        self.n_rejected = 0
        self.n_aborted = 0
        self.n_connection_error = 0
        self.shut_down = False

        self.logger = Logger(title=("OversizeMessageTransferTest - %s" % (self.test_address)), print_to_console=print_to_console)
        self.log_unhandled = False
    def __init__(self,
                 test_class,
                 sender_host,
                 routers,
                 test_address,
                 expect_receives,
                 blocked_by_ingress,
                 blocked_by_interior,
                 message_size=100000,
                 count=10,
                 print_to_console=False):
        """
        Construct an instance of the multicast test
        :param test_class:    test class - has wait-connection function
        :param sender_host:         router for the sender connection
        :param routers:             a list of all the routers for receiver connections
        :param test_address:        sender/receiver AMQP address
        :param expect_receives:     array of expected receive counts
        :param blocked_by_ingress:  true if ingress router blocks
        :param blocked_by_interior: true if edge router messages also blocked by interior
        :param message_size:        in bytes
        :param count:               how many messages to send
        :param print_to_console:    print logs as they happen
        """
        super(OversizeMulticastTransferTest, self).__init__()
        self.test_class = test_class
        self.sender_host = sender_host
        self.routers = routers
        self.test_address = test_address
        self.msg_size = message_size
        self.count = count
        self.expect_receives = expect_receives  # router array
        self.blocked_by_ingress = blocked_by_ingress
        self.blocked_by_interior = blocked_by_interior
        self.messages = []

        self.sender_conn = None
        self.receiver_conns = [None, None, None, None]  # router array
        self.error = None
        self.sender = None
        self.receivers = [None, None, None, None]  # router array
        self.proxy = None

        self.network_stable = False
        self.n_sent = 0
        self.n_rcvds = [0, 0, 0, 0]  # router array
        self.n_accepted = 0
        self.n_rejected = 0
        self.n_modified = 0
        self.n_released = 0
        self.n_send_settled = 0
        self.n_aborteds = [0, 0, 0, 0]  # router array
        self.n_connection_error = 0
        self.shut_down = False

        self.logger = Logger(title=("OversizeMulticastTransferTest - %s" %
                                    (self.test_address)),
                             print_to_console=print_to_console)
        self.log_unhandled = False  # verbose diagnostics of proton callbacks
    def __init__(self,
                 test_class,
                 sender_host,
                 receiver_host,
                 test_address,
                 message_size=100000,
                 count=10,
                 blocked_by_both=False,
                 print_to_console=False):
        """
        Construct an instance of the unicast test
        :param test_class:    test class - has wait-connection function
        :param sender_host:   router for sender connection
        :param receiver_host: router for receiver connection
        :param test_address:  sender/receiver AMQP address
        :param message_size:  in bytes
        :param count:         how many messages to send
        :param blocked_by_both:  true if edge router messages are also blocked by interior
        :param print_to_console: print logs as they happen
        """
        super(OversizeMessageTransferTest, self).__init__()
        self.test_class = test_class
        self.sender_host = sender_host
        self.receiver_host = receiver_host
        self.test_address = test_address
        self.msg_size = message_size
        self.count = count
        self.blocked_by_both = blocked_by_both
        self.expect_block = True
        self.messages = []

        self.sender_conn = None
        self.receiver_conn = None
        self.error = None
        self.sender = None
        self.receiver = None
        self.proxy = None

        self.network_stable = False
        self.n_sent = 0
        self.n_rcvd = 0
        self.n_accepted = 0
        self.n_rejected = 0
        self.n_modified = 0
        self.n_released = 0
        self.n_send_settled = 0
        self.n_aborted = 0
        self.n_connection_error = 0
        self.shut_down = False

        self.logger = Logger(title=("OversizeMessageTransferTest - %s" %
                                    (self.test_address)),
                             print_to_console=print_to_console)
        self.log_unhandled = False  # verbose diagnostics of proton callbacks
Пример #6
0
 def __init__(self, server_port, client_port, tests, handler_cls=None):
     self._logger = Logger(title="TestServer", print_to_console=False)
     self._client_port = client_port
     self._server_addr = ("", server_port)
     self._server = MyHTTPServer(self._server_addr,
                                 handler_cls or RequestHandler,
                                 tests)
     self._server.allow_reuse_address = True
     self._thread = Thread(target=self._run)
     self._thread.daemon = True
     self._thread.start()
Пример #7
0
 def __init__(self, tests, port, repeat=1):
     self._id = uuid.uuid4().hex
     self._conn_addr = ("127.0.0.1:%s" % port)
     self._tests = tests
     self._repeat = repeat
     self._logger = Logger(title="TestClient: %s" % self._id,
                           print_to_console=False)
     self._thread = Thread(target=self._run)
     self._thread.daemon = True
     self.error = None
     self.count = 0
     self._thread.start()
Пример #8
0
    def setUpClass(cls):
        super(TcpAdaptorManagementTest, cls).setUpClass()

        if DISABLE_SELECTOR_TESTS:
            return

        cls.tcp_server_port = cls.tester.get_port()
        cls.tcp_listener_port = cls.tester.get_port()
        cls.test_name = 'TCPMgmtTest'

        # Here we have a simple barebones standalone router config.
        config = [
            ('router', {'mode': 'standalone',
                        'id': cls.test_name}),
            ('listener', {'role': 'normal',
                          'port': cls.tester.get_port()}),
        ]
        config = Qdrouterd.Config(config)
        cls.router = cls.tester.qdrouterd(cls.test_name, config, wait=True)

        # Start the echo server. This is the server that the tcpConnector
        # will be connecting to.
        server_prefix = "ECHO_SERVER ES_%s" % cls.test_name
        cls.logger = Logger(title="TcpAdaptor",
                            print_to_console=True,
                            save_for_dump=False,
                            ofilename="../setUpClass/TcpAdaptor_echo_server.log")
        cls.echo_server = TcpEchoServer(prefix=server_prefix,
                                        port=cls.tcp_server_port,
                                        logger=cls.logger)
        # The router and the echo server are running at this point.
        assert cls.echo_server.is_running
Пример #9
0
 def __init__(self, url, container_id=None, **handler_kwargs):
     super(FakeBroker, self).__init__(**handler_kwargs)
     self.url = url
     self.queues = {}
     self.acceptor = None
     self.in_count = 0
     self.out_count = 0
     self.link_errors = 0
     self._connections = []
     self._error = None
     self._container = Container(self)
     self._container.container_id = container_id or 'FakeBroker'
     self._logger = Logger(title=self._container.container_id)
     self._thread = Thread(target=self._main)
     self._thread.daemon = True
     self._stop_thread = False
     self._thread.start()
Пример #10
0
    def __init__(self, test_name, client_n, logger, client, server, size,
                 count,
                 print_client_logs=True,
                 timeout=TIMEOUT,
                 port_override=None):
        """
        Launch an echo client upon construction.

        :param test_name: Unique name for log file prefix
        :param client_n: Client number for differentiating otherwise identical clients
        :param logger: parent logger for logging test activity vs. client activity
        :param client: router name to which the client connects
        :param server: name whose address the client is targeting
        :param size: length of messages in bytes
        :param count: number of messages to be sent/verified
        :param print_client_logs: verbosity switch
        :return Null if success else string describing error
        """
        self.test_name = test_name
        self.client_n = str(client_n)
        self.logger = logger
        self.client = client
        self.server = server
        self.size = size
        self.count = count
        self.timeout = timeout
        self.print_client_logs = print_client_logs
        self.client_final = False

        # Each router has a listener for the echo server attached to every router
        self.listener_port = TcpAdaptor.tcp_client_listener_ports[self.client][self.server] if port_override is None else port_override

        self.name = "%s_%s_%s_%s" % \
                    (self.test_name, self.client_n, self.size, self.count)
        self.client_prefix = "ECHO_CLIENT %s" % self.name
        self.client_logger = Logger(title=self.client_prefix,
                                    print_to_console=self.print_client_logs,
                                    save_for_dump=False,
                                    ofilename="../setUpClass/TcpAdaptor_echo_client_%s.log" % self.name)

        try:
            self.e_client = TcpEchoClient(prefix=self.client_prefix,
                                          host='localhost',
                                          port=self.listener_port,
                                          size=self.size,
                                          count=self.count,
                                          timeout=self.timeout,
                                          logger=self.client_logger)

        except Exception as exc:
            self.e_client.error = "TCP_TEST TcpAdaptor_runner_%s failed. Exception: %s" % \
                                  (self.name, traceback.format_exc())
            self.logger.log(self.e_client.error)
            raise Exception(self.e_client.error)
    def __init__(self, sender_host, receiver_host, address):
        super(MessageRouteAbortTest, self).__init__()
        self.sender_host      = sender_host
        self.receiver_host    = receiver_host
        self.address          = address

        self.sender_conn   = None
        self.receiver_conn = None
        self.error         = None
        self.sender1       = None
        self.receiver      = None
        self.delivery      = None
        self.logger        = Logger(title="MessageRouteAbortTest")

        self.program       = [('D', 10), ('D', 20), ('A', 30), ('A', 40), ('D', 50), ('D', 60),
                              ('A', 100), ('D', 110),
                              ('A', 1000), ('A', 1010), ('A', 1020), ('A', 1030), ('A', 1040), ('D', 1050),
                              ('A', 10000), ('A', 10010), ('A', 10020), ('A', 10030), ('A', 10040), ('D', 10050),
                              ('A', 100000), ('A', 100010), ('A', 100020), ('A', 100030), ('A', 100040), ('D', 100050), ('F', 10)]
        self.result        = []
        self.expected_result = [10, 20, 50, 60, 110, 1050, 10050, 100050]
    def __init__(self, sender_host, primary_host, fallback_host, addr):
        super(SwitchoverTest, self).__init__()
        self.sender_host = sender_host[0]
        self.primary_host = primary_host[0]
        self.fallback_host = fallback_host[0]
        self.sender_name = sender_host[1]
        self.primary_name = primary_host[1]
        self.fallback_name = fallback_host[1]
        self.addr = addr
        self.count = 300

        # DISPATCH-2213 back off on logging.
        self.log_sends = 100  # every 100th send
        self.log_recvs = 100  # every 100th receive
        self.log_released = 100  # every 100th sender released

        self.sender_conn = None
        self.primary_conn = None
        self.fallback_conn = None
        self.primary_open = False
        self.fallback_open = False
        self.error = None
        self.n_tx = 0
        self.n_rx = 0
        self.n_rel = 0
        self.phase = 0
        self.tx_seq = 0
        self.local_rel = 0

        self.log_prefix = "FALLBACK_TEST %s" % self.addr
        self.logger = Logger("SwitchoverTest_%s" % addr,
                             print_to_console=False)
        # Prepend a convenience SERVER line for scraper tool.
        # Then the logs from this test can be merged with the router logs in scraper.
        self.logger.log("SERVER (info) Container Name: %s" % self.addr)
        self.logger.log("%s SwitchoverTest sender:%s primary:%s fallback:%s" %
                        (self.log_prefix, self.sender_name, self.primary_name,
                         self.fallback_name))
Пример #13
0
class TestServer(object):
    """
    A HTTPServer running in a separate thread
    """

    def __init__(self, server_port, client_port, tests, handler_cls=None):
        self._logger = Logger(title="TestServer", print_to_console=False)
        self._client_port = client_port
        self._server_addr = ("", server_port)
        self._server = MyHTTPServer(self._server_addr,
                                    handler_cls or RequestHandler,
                                    tests)
        self._server.allow_reuse_address = True
        self._thread = Thread(target=self._run)
        self._thread.daemon = True
        self._thread.start()

    def _run(self):
        self._logger.log("TestServer listening on %s:%s" % self._server_addr)
        try:
            self._server.server_killed = False
            while not self._server.server_killed:
                self._server.handle_request()
        except Exception as exc:
            self._logger.log("TestServer %s crash: %s" %
                             (self._server_addr, exc))
            raise
        self._logger.log("TestServer %s:%s closed" % self._server_addr)

    def wait(self, timeout=TIMEOUT):
        self._logger.log("TestServer %s:%s shutting down" % self._server_addr)
        self.request_count = 0
        if self._thread.is_alive():
            client = HTTPConnection("127.0.0.1:%s" % self._client_port,
                                    timeout=TIMEOUT)
            client.putrequest("POST", "/SHUTDOWN")
            client.putheader("Content-Length", "0")
            client.endheaders()
            # 13 == len('Server Closed')
            client.getresponse().read(13)
            client.close()
            self._thread.join(timeout=TIMEOUT)
        if self._server:
            self._server.server_close()
            self.request_count = self._server.request_count
            del self._server
        sleep(0.5)  # fudge factor allow socket close to complete
    def test_yy_query_many_links(self):
        # This test will fail without the fix for DISPATCH-974
        c = BlockingConnection(self.address())
        self.logger = Logger(title="test_yy_query_many_links")
        count = 0
        COUNT = 5000

        ADDRESS_SENDER = "examples-sender"
        ADDRESS_RECEIVER = "examples-receiver"

        # This loop creates 5000 consumer and 5000 producer links with
        # different addresses
        while count < COUNT:
            r = c.create_receiver(ADDRESS_RECEIVER + str(count))
            s = c.create_sender(ADDRESS_SENDER + str(count))
            count += 1

        # Try fetching all 10,000 addresses
        # This qdmanage query command would fail without the fix
        # for DISPATCH-974
        query_command = 'QUERY --type=org.apache.qpid.dispatch.router.address'
        outs = json.loads(self.run_qdmanage(query_command))

        sender_addresses = 0
        receiver_addresses = 0

        for out in outs:
            if ADDRESS_SENDER in out['name']:
                sender_addresses += 1
            if ADDRESS_RECEIVER in out['name']:
                receiver_addresses += 1

        self.assertEqual(sender_addresses, COUNT)
        self.assertEqual(receiver_addresses, COUNT)

        query_command = 'QUERY --type=link'
        outs = json.loads(self.run_qdmanage(query_command))

        out_links = 0
        in_links = 0
        success = False

        i = 0
        while i < 3:
            i += 1
            for out in outs:
                if out.get('owningAddr'):
                    if ADDRESS_SENDER in out['owningAddr']:
                        in_links += 1
                    if ADDRESS_RECEIVER in out['owningAddr']:
                        out_links += 1

            # If the link count is less than COUNT, try again in 2 seconds
            # Try after 2 more seconds for a total of 6 seconds.
            # If the link count is still less than expected count, there
            # is something wrong, the test has failed.
            if out_links < COUNT or in_links < COUNT:
                self.logger.log("out_links=%s, in_links=%s" %
                                (str(out_links), str(in_links)))
                sleep(2)
                outs = json.loads(self.run_qdmanage(query_command))
            else:
                self.logger.log("Test success!")
                success = True
                break

        if not success:
            self.logger.dump()

        self.assertEqual(out_links, COUNT)
        self.assertEqual(in_links, COUNT)
class QdmanageTest(TestCase):
    """Test qdmanage tool output"""
    @staticmethod
    def ssl_file(name):
        return os.path.join(DIR, 'ssl_certs', name)

    @classmethod
    def setUpClass(cls):
        super(QdmanageTest, cls).setUpClass()
        cls.inter_router_port = cls.tester.get_port()
        config_1 = Qdrouterd.Config([
            ('router', {
                'mode': 'interior',
                'id': 'R1'
            }),
            ('sslProfile', {
                'name': 'server-ssl',
                'caCertFile': cls.ssl_file('ca-certificate.pem'),
                'certFile': cls.ssl_file('server-certificate.pem'),
                'privateKeyFile': cls.ssl_file('server-private-key.pem'),
                'password': '******'
            }), ('listener', {
                'port': cls.tester.get_port()
            }),
            ('connector', {
                'role': 'inter-router',
                'port': cls.inter_router_port
            }),
            ('address', {
                'name': 'test-address',
                'prefix': 'abcd',
                'distribution': 'multicast'
            }),
            ('linkRoute', {
                'name': 'test-link-route',
                'prefix': 'xyz',
                'direction': 'in'
            }),
            ('autoLink', {
                'name': 'test-auto-link',
                'address': 'mnop',
                'direction': 'out'
            }),
            ('listener', {
                'port': cls.tester.get_port(),
                'sslProfile': 'server-ssl'
            }),
            ('address', {
                'name': 'pattern-address',
                'pattern': 'a/*/b/#/c',
                'distribution': 'closest'
            })
        ])

        config_2 = Qdrouterd.Config([
            ('router', {
                'mode': 'interior',
                'id': 'R2'
            }),
            ('listener', {
                'role': 'inter-router',
                'port': cls.inter_router_port
            }),
        ])
        cls.router_2 = cls.tester.qdrouterd('test_router_2',
                                            config_2,
                                            wait=True)
        cls.router_1 = cls.tester.qdrouterd('test_router_1',
                                            config_1,
                                            wait=True)
        cls.router_1.wait_router_connected('R2')

    def address(self):
        return self.router_1.addresses[0]

    def run_qdmanage(self,
                     cmd,
                     input=None,
                     expect=Process.EXIT_OK,
                     address=None):
        p = self.popen(['qdmanage'] + cmd.split(' ') + [
            '--bus', address or self.address(), '--indent=-1', '--timeout',
            str(TIMEOUT)
        ],
                       stdin=PIPE,
                       stdout=PIPE,
                       stderr=STDOUT,
                       expect=expect,
                       universal_newlines=True)
        out = p.communicate(input)[0]
        try:
            p.teardown()
        except Exception as e:
            raise Exception(out if out else str(e))
        return out

    def assert_entity_equal(self, expect, actual, copy=None):
        """Copy keys in copy from actual to idenity, then assert maps equal."""
        if copy:
            for k in copy:
                expect[k] = actual[k]
        self.assertEqual(expect, actual)

    def assert_entities_equal(self, expect, actual, copy=None):
        """Do assert_entities_equal on a list of maps."""
        for e, a in zip(expect, actual):
            self.assert_entity_equal(e, a, copy)

    def test_crud(self):
        def check(cmd, expect, copy=None, **kwargs):
            actual = json.loads(self.run_qdmanage(cmd))
            self.assert_entity_equal(expect, actual, copy=copy)

        expect = {'arg1': 'foo', 'type': DUMMY, 'name': 'mydummy2'}
        # create with type, name in attributes
        check('create arg1=foo type=dummy name=mydummy2',
              expect,
              copy=['identity'],
              attributes=json.dumps(expect))
        # create with type, name as arguments
        expect['name'] = 'mydummy'
        check('create name=mydummy type=dummy arg1=foo',
              expect,
              copy=['identity'])
        check('read --name mydummy', expect)
        check('read --identity %s' % expect['identity'], expect)
        expect.update([], arg1='bar', num1=555)
        check('update name=mydummy arg1=bar num1=555', expect)
        check('read --name=mydummy', expect)
        expect.update([], arg1='xxx', num1=888)
        # name outside attributes
        check('update name=mydummy arg1=xxx num1=888', expect)
        check('read --name=mydummy', expect)
        self.run_qdmanage('delete --name mydummy')
        self.run_qdmanage('read --name=mydummy', expect=Process.EXIT_FAIL)

    def test_stdin(self):
        """Test piping from stdin"""
        def check(cmd, expect, input, copy=None):
            actual = json.loads(
                self.run_qdmanage(cmd + " --stdin", input=input))
            self.assert_entity_equal(expect, actual, copy=copy)

        def check_list(cmd, expect_list, input, copy=None):
            actual = json.loads(
                self.run_qdmanage(cmd + " --stdin", input=input))
            self.assert_entities_equal(expect_list, actual, copy=copy)

        expect = {'type': DUMMY, 'name': 'mydummyx', 'arg1': 'foo'}
        check('create', expect, json.dumps(expect), copy=['identity'])

        expect_list = [{
            'type': DUMMY,
            'name': 'mydummyx%s' % i
        } for i in range(3)]
        check_list('create',
                   expect_list,
                   json.dumps(expect_list),
                   copy=['identity'])

        expect['arg1'] = 'bar'
        expect['num1'] = 42
        check('update', expect, json.dumps(expect))

        for i in range(3):
            expect_list[i]['arg1'] = 'bar'
            expect_list[i]['num1'] = i
        check_list('update', expect_list, json.dumps(expect_list))

    def test_query(self):
        def long_type(name):
            return u'org.apache.qpid.dispatch.' + name

        types = ['listener', 'log', 'router']
        long_types = [long_type(name) for name in types]

        qall = json.loads(self.run_qdmanage('query'))
        qall_types = set([e['type'] for e in qall])
        for t in long_types:
            self.assertIn(t, qall_types)

        qlistener = json.loads(self.run_qdmanage('query --type=listener'))
        self.assertEqual([long_type('listener')] * 2,
                         [e['type'] for e in qlistener])
        self.assertEqual(self.router_1.ports[0], int(qlistener[0]['port']))

        qattr = json.loads(self.run_qdmanage('query type name'))
        for e in qattr:
            self.assertEqual(2, len(e))

        def name_type(entities):
            ignore_types = [
                long_type(t)
                for t in ['router.link', 'connection', 'router.address']
            ]
            return set((e['name'], e['type']) for e in entities
                       if e['type'] not in ignore_types)

        self.assertEqual(name_type(qall), name_type(qattr))

    def test_get_schema(self):
        schema = dictify(QdSchema().dump())
        actual = self.run_qdmanage("get-json-schema")
        self.assertEqual(schema, dictify(json.loads(actual)))
        actual = self.run_qdmanage("get-schema")
        self.assertEqual(schema, dictify(json.loads(actual)))

    def test_get_annotations(self):
        """
        The qdmanage GET-ANNOTATIONS call must return an empty dict since we don't support annotations at the moment.
        """
        out = json.loads(self.run_qdmanage("get-annotations"))
        self.assertTrue(len(out) == 0)

    def test_get_types(self):
        out = json.loads(self.run_qdmanage("get-types"))
        self.assertEqual(len(out), TOTAL_ENTITIES)

    def test_get_attributes(self):
        out = json.loads(self.run_qdmanage("get-attributes"))
        self.assertEqual(len(out), 28)

    def test_get_attributes(self):
        out = json.loads(self.run_qdmanage("get-attributes"))
        self.assertEqual(len(out), TOTAL_ENTITIES)

    def test_get_operations(self):
        out = json.loads(self.run_qdmanage("get-operations"))
        self.assertEqual(len(out), TOTAL_ENTITIES)
        self.assertEqual(out['org.apache.qpid.dispatch.sslProfile'],
                         [u'CREATE', u'DELETE', u'READ'])

    def test_get_types_with_ssl_profile_type(self):
        out = json.loads(
            self.run_qdmanage(
                "get-types --type=org.apache.qpid.dispatch.sslProfile"))
        self.assertEqual(out['org.apache.qpid.dispatch.sslProfile'], [
            u'org.apache.qpid.dispatch.configurationEntity',
            u'org.apache.qpid.dispatch.entity'
        ])

    def test_get_ssl_profile_type_attributes(self):
        out = json.loads(
            self.run_qdmanage(
                'get-attributes --type=org.apache.qpid.dispatch.sslProfile'))
        self.assertEqual(len(out), 1)
        self.assertEqual(len(out['org.apache.qpid.dispatch.sslProfile']), 12)

    def test_get_ssl_profile_attributes(self):
        out = json.loads(
            self.run_qdmanage(
                'get-attributes org.apache.qpid.dispatch.sslProfile'))
        self.assertEqual(len(out), 1)
        self.assertEqual(len(out['org.apache.qpid.dispatch.sslProfile']), 12)

    def test_get_ssl_profile_type_operations(self):
        out = json.loads(
            self.run_qdmanage(
                'get-operations --type=org.apache.qpid.dispatch.sslProfile'))
        self.assertEqual(len(out), 1)
        self.assertEqual(len(out['org.apache.qpid.dispatch.sslProfile']), 3)

    def test_get_ssl_profile_operations(self):
        out = json.loads(
            self.run_qdmanage(
                'get-operations org.apache.qpid.dispatch.sslProfile'))
        self.assertEqual(len(out), 1)
        self.assertEqual(len(out['org.apache.qpid.dispatch.sslProfile']), 3)

    def test_get_log(self):
        logs = json.loads(self.run_qdmanage("get-log limit=20"))
        found = False
        for log in logs:
            if u'get-log' in log[2] and ['AGENT', 'debug'] == log[0:2]:
                found = True
        self.assertTrue(found)

    def test_get_logstats(self):
        query_command = 'QUERY --type=logStats'
        logs = json.loads(self.run_qdmanage(query_command))
        # Each value returned by the above query should be
        # a log, and each log should contain an entry for each
        # log level.
        log_levels = [
            'criticalCount', 'debugCount', 'errorCount', 'infoCount',
            'noticeCount', 'traceCount', 'warningCount'
        ]
        n_log_levels = len(log_levels)

        good_logs = 0

        for log_dict in logs:
            log_levels_present = 0
            log_levels_missing = 0
            for log_level in log_levels:
                if log_level in log_dict:
                    log_levels_present += 1
                else:
                    log_levels_missing += 1
            if log_levels_present == n_log_levels:
                good_logs += 1

        self.assertEqual(good_logs, len(logs))

    def test_update(self):
        exception = False
        try:
            # Try to not set 'output'
            json.loads(
                self.run_qdmanage(
                    "UPDATE --type org.apache.qpid.dispatch.log --name log/DEFAULT outputFile="
                ))
        except Exception as e:
            exception = True
            self.assertTrue(
                "InternalServerErrorStatus: CError: Configuration: Failed to open log file ''"
                in str(e))
        self.assertTrue(exception)

        # Set a valid 'output'
        output = json.loads(
            self.run_qdmanage(
                "UPDATE --type org.apache.qpid.dispatch.log --name log/DEFAULT "
                "enable=trace+ outputFile=A.log"))
        self.assertEqual("A.log", output['outputFile'])
        self.assertEqual("trace+", output['enable'])

    def create(self, type, name, port):
        create_command = 'CREATE --type=' + type + ' --name=' + name + ' host=0.0.0.0 port=' + port
        connector = json.loads(self.run_qdmanage(create_command))
        return connector

    def test_check_address_name(self):
        long_type = 'org.apache.qpid.dispatch.router.config.address'
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(len(output), 2)
        self.assertEqual(output[0]['name'], "test-address")
        self.assertEqual(output[0]['distribution'], "multicast")
        self.assertEqual(output[0]['prefix'], "abcd")
        self.assertNotIn('pattern', output[0])
        self.assertEqual(output[1]['name'], "pattern-address")
        self.assertEqual(output[1]['distribution'], "closest")
        self.assertEqual(output[1]['pattern'], "a/*/b/#/c")
        self.assertNotIn('prefix', output[1])

    def test_create_address(self):
        long_type = 'org.apache.qpid.dispatch.router.config.address'
        create_command = 'CREATE --type=' + long_type + ' pattern="a.b.#" ingressPhase=5 egressPhase=6'
        output = json.loads(self.run_qdmanage(create_command))
        self.assertEqual(output['egressPhase'], 6)
        self.assertEqual(output['ingressPhase'], 5)

    def test_check_link_route_name(self):
        long_type = 'org.apache.qpid.dispatch.router.config.linkRoute'
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(output[0]['name'], "test-link-route")
        self.assertEqual(output[0]['direction'], "in")
        self.assertEqual(output[0]['dir'], "in")
        self.assertEqual(output[0]['prefix'], "xyz")

    def test_specify_container_id_connection_link_route(self):
        long_type = 'org.apache.qpid.dispatch.router.config.linkRoute'
        create_command = 'CREATE --type=' + long_type + ' prefix=abc containerId=id1 connection=conn1 direction=out'
        output = self.run_qdmanage(create_command, expect=Process.EXIT_FAIL)
        self.assertIn("Both connection and containerId cannot be specified",
                      output)

    def test_check_auto_link_name(self):
        long_type = 'org.apache.qpid.dispatch.router.config.autoLink'
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(output[0]['name'], "test-auto-link")
        self.assertEqual(output[0]['direction'], "out")
        self.assertEqual(output[0]['addr'], "mnop")

    def test_create_auto_link_with_phase(self):
        long_type = 'org.apache.qpid.dispatch.router.config.autoLink'
        create_command = 'CREATE --type=' + long_type + ' addr=xyz containerId=id1 direction=out phase=2'
        output = json.loads(self.run_qdmanage(create_command))
        self.assertEqual(output['phase'], 2)

    def test_create_auto_link_with_dir(self):
        long_type = 'org.apache.qpid.dispatch.router.config.autoLink'
        create_command = 'CREATE --type=' + long_type + ' addr=defgh containerId=id2 dir=out phase=2'
        output = json.loads(self.run_qdmanage(create_command))
        self.assertEqual(output['dir'], 'out')
        self.assertEqual(output['direction'], 'out')

    def test_create_link_route_with_dir(self):
        long_type = 'org.apache.qpid.dispatch.router.config.linkRoute'
        create_command = 'CREATE --type=' + long_type + ' pattern=mnb dir=out'
        output = json.loads(self.run_qdmanage(create_command))
        self.assertEqual(output['dir'], 'out')
        self.assertEqual(output['direction'], 'out')

    def test_specify_container_id_connection_auto_link(self):
        long_type = 'org.apache.qpid.dispatch.router.config.autoLink'
        create_command = 'CREATE --type=' + long_type + ' addr=abc containerId=id1 connection=conn1 direction=out'
        output = self.run_qdmanage(create_command, expect=Process.EXIT_FAIL)
        self.assertIn("Both connection and containerId cannot be specified",
                      output)

    def test_create_delete_connector(self):
        long_type = 'org.apache.qpid.dispatch.connector'
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        name = output[0]['name']

        # Delete an existing connector
        delete_command = 'DELETE --type=' + long_type + ' --name=' + name
        self.run_qdmanage(delete_command)
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(output, [])

        # Re-create the connector and then try wait_connectors
        self.create(long_type, name, str(QdmanageTest.inter_router_port))

        outputs = json.loads(self.run_qdmanage(query_command))
        created = False
        for output in outputs:
            conn_name = 'connector/127.0.0.1:%s' % QdmanageTest.inter_router_port
            conn_name_1 = 'connector/0.0.0.0:%s' % QdmanageTest.inter_router_port
            if conn_name == output['name'] or conn_name_1 == output['name']:
                created = True
                break

        self.assertTrue(created)

    def test_zzz_add_connector(self):
        port = self.get_port()
        # dont provide role and make sure that role is defaulted to 'normal'
        command = "CREATE --type=connector --name=eaconn1 port=" + str(
            port) + " host=0.0.0.0"
        output = json.loads(self.run_qdmanage(command))
        self.assertEqual("normal", output['role'])
        # provide the same connector name (eaconn1), expect duplicate value failure
        self.assertRaises(
            Exception, self.run_qdmanage,
            "CREATE --type=connector --name=eaconn1 port=12345 host=0.0.0.0")
        port = self.get_port()
        # provide role as 'normal' and make sure that it is preserved
        command = "CREATE --type=connector --name=eaconn2 port=" + str(
            port) + " host=0.0.0.0 role=normal"
        output = json.loads(self.run_qdmanage(command))
        self.assertEqual("normal", output['role'])

    def test_zzz_create_delete_listener(self):
        long_type = 'org.apache.qpid.dispatch.listener'
        name = 'ealistener'

        listener_port = self.get_port()

        listener = self.create(long_type, name, str(listener_port))
        self.assertEqual(listener['type'], long_type)
        self.assertEqual(listener['name'], name)

        exception_occurred = False

        delete_command = 'DELETE --type=' + long_type + ' --name=' + name
        self.run_qdmanage(delete_command)

        exception_occurred = False
        try:
            # Try deleting an already deleted connector, this should raise an exception
            self.run_qdmanage(delete_command)
        except Exception as e:
            exception_occurred = True
            self.assertTrue(("NotFoundStatus: No entity with name=%s" %
                             name) in str(e))

        self.assertTrue(exception_occurred)

    def test_create_delete_ssl_profile(self):
        ssl_profile_name = 'ssl-profile-test'
        ssl_create_command = 'CREATE --type=sslProfile certFile=' + self.ssl_file('server-certificate.pem') + \
                         ' privateKeyFile=' + self.ssl_file('server-private-key.pem') + ' password=server-password' + \
                         ' name=' + ssl_profile_name + ' caCertFile=' + self.ssl_file('ca-certificate.pem')
        output = json.loads(self.run_qdmanage(ssl_create_command))
        self.assertEqual(output['name'], ssl_profile_name)
        self.run_qdmanage('DELETE --type=sslProfile --name=' +
                          ssl_profile_name)

    def test_delete_connection(self):
        """
        This test creates a blocking connection and tries to delete that connection using qdmanage DELETE operation.
        Make sure we are Forbidden from deleting a connection because qdmanage DELETEs are not allowed on a connection
        Only qdmanage UPDATEs are allowed..
        :return:
        """
        connection = BlockingConnection(
            self.address(), properties=CONNECTION_PROPERTIES_UNICODE_STRING)
        query_command = 'QUERY --type=connection'
        outputs = json.loads(self.run_qdmanage(query_command))
        identity = None
        passed = False
        for output in outputs:
            if output.get('properties'):
                conn_properties = output['properties']
                if conn_properties.get('int_property'):
                    identity = output.get("identity")
                    if identity:
                        delete_command = 'DELETE --type=connection --id=' + identity
                        try:
                            outs = json.loads(
                                self.run_qdmanage(delete_command))
                        except Exception as e:
                            if "Forbidden" in str(e):
                                passed = True

        # The test has passed since we were forbidden from deleting a connection
        # due to lack of policy permissions.
        self.assertTrue(passed)

    def test_create_delete_address_pattern(self):
        config = [('mercury.*.earth.#', 'closest'),
                  ('*/mars/*/#', 'multicast'), ('*.mercury', 'closest'),
                  ('*/#/pluto', 'multicast')]
        long_type = 'org.apache.qpid.dispatch.router.config.address'

        # add patterns:
        pcount = 0
        for p in config:
            query_command = 'CREATE --type=' + long_type + \
                                             ' pattern=' + p[0] + \
                                             ' distribution=' + p[1] + \
                                             ' name=Pattern' + str(pcount)
            self.run_qdmanage(query_command)
            pcount += 1

        # verify correctly added:
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        total = len(output)

        pcount = 0
        for o in output:
            pattern = o.get('pattern')
            if pattern is not None:
                for p in config:
                    if p[0] == pattern:
                        pcount += 1
                        self.assertEqual(p[1], o.get('distribution'))
        self.assertEqual(pcount, len(config))

        # delete
        pcount = 0
        for p in config:
            query_command = 'DELETE --type=' + long_type + \
                                             ' --name=Pattern' + str(pcount)
            self.run_qdmanage(query_command)
            pcount += 1

        # verify deleted:
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(len(output), total - len(config))
        for o in output:
            pattern = o.get('pattern')
            if pattern is not None:
                for p in config:
                    self.assertNotEqual(p[0], pattern)

    def test_yy_query_many_links(self):
        # This test will fail without the fix for DISPATCH-974
        c = BlockingConnection(self.address())
        self.logger = Logger(title="test_yy_query_many_links")
        count = 0
        COUNT = 5000

        ADDRESS_SENDER = "examples-sender"
        ADDRESS_RECEIVER = "examples-receiver"

        # This loop creates 5000 consumer and 5000 producer links with
        # different addresses
        while count < COUNT:
            r = c.create_receiver(ADDRESS_RECEIVER + str(count))
            s = c.create_sender(ADDRESS_SENDER + str(count))
            count += 1

        # Try fetching all 10,000 addresses
        # This qdmanage query command would fail without the fix
        # for DISPATCH-974
        query_command = 'QUERY --type=org.apache.qpid.dispatch.router.address'
        outs = json.loads(self.run_qdmanage(query_command))

        sender_addresses = 0
        receiver_addresses = 0

        for out in outs:
            if ADDRESS_SENDER in out['name']:
                sender_addresses += 1
            if ADDRESS_RECEIVER in out['name']:
                receiver_addresses += 1

        self.assertEqual(sender_addresses, COUNT)
        self.assertEqual(receiver_addresses, COUNT)

        query_command = 'QUERY --type=link'
        outs = json.loads(self.run_qdmanage(query_command))

        out_links = 0
        in_links = 0
        success = False

        i = 0
        while i < 3:
            i += 1
            for out in outs:
                if out.get('owningAddr'):
                    if ADDRESS_SENDER in out['owningAddr']:
                        in_links += 1
                    if ADDRESS_RECEIVER in out['owningAddr']:
                        out_links += 1

            # If the link count is less than COUNT, try again in 2 seconds
            # Try after 2 more seconds for a total of 6 seconds.
            # If the link count is still less than expected count, there
            # is something wrong, the test has failed.
            if out_links < COUNT or in_links < COUNT:
                self.logger.log("out_links=%s, in_links=%s" %
                                (str(out_links), str(in_links)))
                sleep(2)
                outs = json.loads(self.run_qdmanage(query_command))
            else:
                self.logger.log("Test success!")
                success = True
                break

        if not success:
            self.logger.dump()

        self.assertEqual(out_links, COUNT)
        self.assertEqual(in_links, COUNT)

    def test_worker_threads(self):
        long_type = 'org.apache.qpid.dispatch.router'
        qd_manager = QdManager(self, address=self.address())
        output = qd_manager.query('org.apache.qpid.dispatch.router')
        self.assertEqual(output[0]['workerThreads'], 4)

    def test_check_memory_usage(self):
        """
        Verify that the process memory usage is present. Non-Linux platforms
        may return zero, so accept that as a valid value.
        """
        long_type = 'org.apache.qpid.dispatch.router'
        query_command = 'QUERY --type=' + long_type
        output = json.loads(self.run_qdmanage(query_command))
        self.assertEqual(len(output), 1)
        mem = output[0].get('memoryUsage')

        if sys.platform.lower().startswith('linux'):
            # @TODO(kgiusti) - linux only for now
            self.assertTrue(mem is not None)
            self.assertTrue(mem >= 0)
        else:
            # @TODO(kgiusti) - update test to handle other platforms as support
            # is added
            self.assertTrue(mem is None)
class OversizeMulticastTransferTest(MessagingHandler):
    """
    This test connects a sender and four receivers. Then it tries to send _count_
    number of messages of the given size through the router or router network.
    """
    def __init__(self,
                 test_class,
                 sender_host,
                 routers,
                 test_address,
                 expect_receives,
                 blocked_by_ingress,
                 blocked_by_interior,
                 message_size=100000,
                 count=10,
                 print_to_console=False):
        """
        Construct an instance of the multicast test
        :param test_class:    test class - has wait-connection function
        :param sender_host:         router for the sender connection
        :param routers:             a list of all the routers for receiver connections
        :param test_address:        sender/receiver AMQP address
        :param expect_receives:     array of expected receive counts
        :param blocked_by_ingress:  true if ingress router blocks
        :param blocked_by_interior: true if edge router messages also blocked by interior
        :param message_size:        in bytes
        :param count:               how many messages to send
        :param print_to_console:    print logs as they happen
        """
        super(OversizeMulticastTransferTest, self).__init__()
        self.test_class = test_class
        self.sender_host = sender_host
        self.routers = routers
        self.test_address = test_address
        self.msg_size = message_size
        self.count = count
        self.expect_receives = expect_receives  # router array
        self.blocked_by_ingress = blocked_by_ingress
        self.blocked_by_interior = blocked_by_interior
        self.messages = []

        self.sender_conn = None
        self.receiver_conns = [None, None, None, None]  # router array
        self.error = None
        self.sender = None
        self.receivers = [None, None, None, None]  # router array
        self.proxy = None

        self.network_stable = False
        self.n_sent = 0
        self.n_rcvds = [0, 0, 0, 0]  # router array
        self.n_accepted = 0
        self.n_rejected = 0
        self.n_modified = 0
        self.n_released = 0
        self.n_send_settled = 0
        self.n_aborteds = [0, 0, 0, 0]  # router array
        self.n_connection_error = 0
        self.shut_down = False

        self.logger = Logger(title=("OversizeMulticastTransferTest - %s" %
                                    (self.test_address)),
                             print_to_console=print_to_console)
        self.log_unhandled = False  # verbose diagnostics of proton callbacks

    def timeout(self):
        current = self._current()
        self.error = "Timeout Expired " + current
        self.logger.log("self.timeout " + self.error)
        self._shut_down_test()

    def on_start(self, event):
        self.logger.log("on_start")

        self.logger.log("on_start: secheduling reactor timeout")
        self.timer = event.reactor.schedule(10, Timeout(self))

        self.logger.log("Waiting for router network to stabilize")
        self.test_class.wait_router_network_connected()
        self.network_stable = True

        for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
            self.logger.log("on_start: opening receiver connection to %s" %
                            (self.routers[idx].addresses[0]))
            self.receiver_conns[idx] = event.container.connect(
                self.routers[idx].addresses[0])
        for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
            self.logger.log("on_start: Creating receiver %d" % idx)
            self.receivers[idx] = event.container.create_receiver(
                self.receiver_conns[idx], self.test_address)

        self.logger.log("on_start: generating messages")
        for idx in range(self.count):
            # construct message in indentifiable chunks
            body_msg = ""
            padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[idx % 30]
            while len(body_msg) < self.msg_size:
                chunk = "[%s:%d:%d" % (self.test_address, idx, len(body_msg))
                padlen = 50 - len(chunk)
                chunk += padchar * padlen
                body_msg += chunk
            if len(body_msg) > self.msg_size:
                body_msg = body_msg[:self.msg_size]
            m = Message(body=body_msg)
            self.messages.append(m)

        self.logger.log("on_start: opening   sender connection to %s" %
                        (self.sender_host.addresses[0]))
        self.sender_conn = event.container.connect(
            self.sender_host.addresses[0])

        self.logger.log("on_start: Creating sender")
        self.sender = event.container.create_sender(self.sender_conn,
                                                    self.test_address)

        self.logger.log("on_start: done")

    def rcvr_idx_of(self, rcvr):
        """
        Given a receiver, as in event.receiver, return
        the router array index of that receiver's router
        :param rcvr:
        :return: integer index of receiver
        """
        for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
            if rcvr == self.receivers[idx]:
                return idx
        self.error = "Receiver not found in receivers array."
        self.logger.log(self.error)
        self.logger.dump()
        self._shut_down_test()
        raise Exception(self.error)

    def send(self):
        while self.sender.credit > 0 and self.n_sent < self.count:
            m = self.messages[self.n_sent]
            self.logger.log(
                "send. address:%s message:%d of %s length=%d" %
                (self.test_address, self.n_sent, self.count, self.msg_size))
            self.sender.send(m)
            self.n_sent += 1
        #if self.n_sent == self.count:
        #    self.log_unhandled = True

    def on_sendable(self, event):
        if event.sender == self.sender:
            self.logger.log("on_sendable")
            self.send()

    def on_message(self, event):
        self.logger.log("on_message")
        if self.shut_down:
            return
        idx = self.rcvr_idx_of(event.receiver)
        if self.expect_receives[idx] == 0:
            # Receiving any is an error.
            self.error = "Received a message. Expected to receive no messages."
            self.logger.log(self.error)
            self._shut_down_test()
        else:
            self.n_rcvds[idx] += 1
            self.accept(event.delivery)
            self._check_done()

    def on_connection_remote_close(self, event):
        if self.shut_down:
            return
        if event.connection == self.sender_conn:
            if not event.connection.remote_condition is None:
                if event.connection.remote_condition.name == OVERSIZE_CONDITION_NAME and \
                   event.connection.remote_condition.description == OVERSIZE_CONDITION_DESC:
                    self.logger.log(
                        "on_connection_remote_close: sender closed with correct condition"
                    )
                    self.n_connection_error += 1
                    self.sender_conn.close()
                    self.sender_conn = None
                else:
                    # sender closed but for wrong reason
                    self.error = "sender close error: Expected name: %s, description: %s, but received name: %s, description: %s" % (
                        OVERSIZE_CONDITION_NAME, OVERSIZE_CONDITION_DESC,
                        event.connection.remote_condition.name,
                        event.connection.remote_condition.description)
                    self.logger.log(self.error)
            else:
                self.error = "sender close error: Expected a remote_condition but there was none."
                self.logger.log(self.error)
        else:
            # connection error but not for sender
            self.error = "unexpected connection close error: wrong connection closed."
            self.logger.log(self.error)
        self._check_done()

    def _shut_down_test(self):
        self.shut_down = True
        if self.timer:
            self.timer.cancel()
            self.timer = None
        if self.sender:
            self.sender.close()
            self.sender = None
        for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
            if self.receivers[idx]:
                self.receivers[idx].close()
                self.receivers[idx] = None
        if self.sender_conn:
            self.sender_conn.close()
            self.sender_conn = None
        for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
            if self.receiver_conns[idx]:
                self.receiver_conns[idx].close()
                self.receiver_conns[idx] = None

    def _current(self):
        return (
            "net_stable:%s sent=%d rcvd=%s rejected=%d aborted=%s connection_error:%d send_settled:%d"
            % (self.network_stable, self.n_sent, str(
                self.n_rcvds), self.n_rejected, str(self.n_aborteds),
               self.n_connection_error, self.n_send_settled))

    def _check_done(self):
        self.logger.log("check_done: " + self._current())
        if self.error is not None:
            self.logger.log("TEST FAIL")
            self._shut_down_test()
        else:
            if self.blocked_by_interior:
                if self.blocked_by_ingress:
                    # Blocked by interior and edge. Expect edge connection to go down
                    # and some of our messaages arrive at edge after it has sent
                    # AMQP close. Those messages are never settled. TODO: Is that OK?
                    done = self.n_rejected == 1 and \
                           self.n_connection_error == 1
                else:
                    # Blocked by interior only. Connection to edge stays up
                    # and all messages must be accounted for.
                    all_received = True
                    for idx in [IDX_INTA, IDX_INTB, IDX_EA1, IDX_EB1]:
                        if self.expect_receives[idx] > 0:
                            if not self.n_rcvds[idx] == self.expect_receives[
                                    idx]:
                                all_received = False
                    done = self.n_rejected <= 1 and \
                           self.n_send_settled == self.count and \
                           all_received
            else:
                # Blocked by edge should never deliver to interior
                done = self.n_rejected == 1 and \
                       self.n_connection_error == 1

            if done:
                self.logger.log("TEST DONE!!!")
                # self.log_unhandled = True # verbose debugging
                self._shut_down_test()

    def on_rejected(self, event):
        self.n_rejected += 1
        if self.reject:
            self.logger.log("on_rejected: entry")
            self._check_done()
        else:
            self.error = "Unexpected on_reject"
            self.logger.log(self.error)
            self._check_done()

    def on_aborted(self, event):
        self.logger.log("on_aborted")
        if self.shut_down:
            return
        idx = self.rcvr_idx_of(event.receiver)
        self.n_aborteds[idx] += 1
        self._check_done()

    def on_settled(self, event):
        self.logger.log("on_settled")
        if event.connection == self.sender_conn:
            self.logger.log("on_settled: sender connection")
            self.n_send_settled += 1
        self._check_done()

    def on_error(self, event):
        self.error = "Container error"
        self.logger.log(self.error)
        self._shut_down_test()

    def on_link_error(self, event):
        self.error = event.link.remote_condition.name
        self.logger.log("on_link_error: %s" % (self.error))
        # Link errors may prevent normal test shutdown so don't even try.
        raise Exception(self.error)

    def on_reactor_final(self, event):
        self.logger.log("on_reactor_final:")

    def on_unhandled(self, method, *args):
        if self.log_unhandled:
            self.logger.log("on_unhandled: method: %s, args: %s" %
                            (method, args))

    def run(self):
        try:
            Container(self).run()
        except Exception as e:
            self.error = "Container run exception: %s" % (e)
            self.logger.log(self.error)
            self.logger.dump()
        time.sleep(0.2)
class OversizeMessageTransferTest(MessagingHandler):
    """
    This test connects a sender and a receiver. Then it tries to send _count_
    number of messages of the given size through the router or router network.

    Messages are to pass through an edge router and get blocked by an interior
    or messages are to be blocked by both the edge and the interior.

    When 'blocked_by_both' is false then:

    * The ingress router should allow the sender's oversize message.
    * The message is blocked by the uplink router by rejecting the message
    and closing the connection between the interior and edge routers.
    * The receiver may receive aborted message indications but that is
    not guaranteed.
    * If any aborted messages are received then the count must be at most one.

    When 'blocked_by_both' is true then:
    * The ingress edge router will reject and close the connection on the first message
    * The second message may be aborted because the connection between the
    edge router and the interior router was closed
    * The remainder of the messages are going into a closed connection and
    will receive no settlement.
    """
    def __init__(self,
                 test_class,
                 sender_host,
                 receiver_host,
                 test_address,
                 message_size=100000,
                 count=10,
                 blocked_by_both=False,
                 print_to_console=False):
        """
        Construct an instance of the unicast test
        :param test_class:    test class - has wait-connection function
        :param sender_host:   router for sender connection
        :param receiver_host: router for receiver connection
        :param test_address:  sender/receiver AMQP address
        :param message_size:  in bytes
        :param count:         how many messages to send
        :param blocked_by_both:  true if edge router messages are also blocked by interior
        :param print_to_console: print logs as they happen
        """
        super(OversizeMessageTransferTest, self).__init__()
        self.test_class = test_class
        self.sender_host = sender_host
        self.receiver_host = receiver_host
        self.test_address = test_address
        self.msg_size = message_size
        self.count = count
        self.blocked_by_both = blocked_by_both
        self.expect_block = True
        self.messages = []

        self.sender_conn = None
        self.receiver_conn = None
        self.error = None
        self.sender = None
        self.receiver = None
        self.proxy = None

        self.network_stable = False
        self.n_sent = 0
        self.n_rcvd = 0
        self.n_accepted = 0
        self.n_rejected = 0
        self.n_modified = 0
        self.n_released = 0
        self.n_send_settled = 0
        self.n_aborted = 0
        self.n_connection_error = 0
        self.shut_down = False

        self.logger = Logger(title=("OversizeMessageTransferTest - %s" %
                                    (self.test_address)),
                             print_to_console=print_to_console)
        self.log_unhandled = False  # verbose diagnostics of proton callbacks

    def timeout(self):
        current = (
            "check_done: sent=%d rcvd=%d rejected=%d aborted=%d connection_error:%d send_settled:%d"
            % (self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted,
               self.n_connection_error, self.n_send_settled))
        self.error = "Timeout Expired " + current
        self.logger.log("self.timeout " + self.error)
        self._shut_down_test()

    def on_start(self, event):
        self.logger.log("on_start")

        self.logger.log("on_start: secheduling reactor timeout")
        self.timer = event.reactor.schedule(10, Timeout(self))

        self.logger.log("Waiting for router network to stabilize")
        self.test_class.wait_router_network_connected()
        self.network_stable = True

        self.logger.log("on_start: generating messages")
        for idx in range(self.count):
            # construct message in indentifiable chunks
            body_msg = ""
            padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[idx % 30]
            while len(body_msg) < self.msg_size:
                chunk = "[%s:%d:%d" % (self.test_address, idx, len(body_msg))
                padlen = 50 - len(chunk)
                chunk += padchar * padlen
                body_msg += chunk
            if len(body_msg) > self.msg_size:
                body_msg = body_msg[:self.msg_size]
            m = Message(body=body_msg)
            self.messages.append(m)

        self.logger.log("on_start: opening receiver connection to %s" %
                        (self.receiver_host.addresses[0]))
        self.receiver_conn = event.container.connect(
            self.receiver_host.addresses[0])

        self.logger.log("on_start: Creating receiver")
        self.receiver = event.container.create_receiver(
            self.receiver_conn, self.test_address)

        self.logger.log("on_start: opening   sender connection to %s" %
                        (self.sender_host.addresses[0]))
        self.sender_conn = event.container.connect(
            self.sender_host.addresses[0])

        self.logger.log("on_start: Creating sender")
        self.sender = event.container.create_sender(self.sender_conn,
                                                    self.test_address)

        self.logger.log("on_start: done")

    def send(self):
        while self.sender.credit > 0 and self.n_sent < self.count:
            m = self.messages[self.n_sent]
            self.logger.log(
                "send. address:%s message:%d of %s length=%d" %
                (self.test_address, self.n_sent, self.count, self.msg_size))
            self.sender.send(m)
            self.n_sent += 1
        #if self.n_sent == self.count:
        #    self.log_unhandled = True

    def on_sendable(self, event):
        if event.sender == self.sender:
            self.logger.log("on_sendable")
            self.send()

    def on_message(self, event):
        self.logger.log("on_message: entry")
        if self.expect_block:
            # All messages should violate maxMessageSize.
            # Receiving any is an error.
            self.error = "Received a message. Expected to receive no messages."
            self.logger.log(self.error)
            self._shut_down_test()
        else:
            self.n_rcvd += 1
            self.accept(event.delivery)
            self._check_done()

    def on_connection_remote_close(self, event):
        if self.shut_down:
            return
        if event.connection == self.sender_conn:
            if not event.connection.remote_condition is None:
                if event.connection.remote_condition.name == OVERSIZE_CONDITION_NAME and \
                   event.connection.remote_condition.description == OVERSIZE_CONDITION_DESC:
                    self.logger.log(
                        "on_connection_remote_close: sender closed with correct condition"
                    )
                    self.n_connection_error += 1
                    self.sender_conn.close()
                    self.sender_conn = None
                else:
                    # sender closed but for wrong reason
                    self.error = "sender close error: Expected name: %s, description: %s, but received name: %s, description: %s" % (
                        OVERSIZE_CONDITION_NAME, OVERSIZE_CONDITION_DESC,
                        event.connection.remote_condition.name,
                        event.connection.remote_condition.description)
                    self.logger.log(self.error)
            else:
                self.error = "sender close error: Expected a remote_condition but there was none."
                self.logger.log(self.error)
        else:
            # connection error but not for sender
            self.error = "unexpected connection close error: wrong connection closed."
            self.logger.log(self.error)
        self._check_done()

    def _shut_down_test(self):
        self.shut_down = True
        if self.timer:
            self.timer.cancel()
            self.timer = None
        if self.sender:
            self.sender.close()
            self.sender = None
        if self.receiver:
            self.receiver.close()
            self.receiver = None
        if self.sender_conn:
            self.sender_conn.close()
            self.sender_conn = None
        if self.receiver_conn:
            self.receiver_conn.close()
            self.receiver_conn = None

    def _current(self):
        return (
            "net_stable=%s sent=%d rcvd=%d rejected=%d aborted=%d connection_error:%d send_settled:%d"
            % (self.network_stable, self.n_sent, self.n_rcvd, self.n_rejected,
               self.n_aborted, self.n_connection_error, self.n_send_settled))

    def _check_done(self):
        self.logger.log("check_done: " + self._current())
        if self.error is not None:
            self.logger.log("TEST FAIL")
            self._shut_down_test()
        else:
            if not self.blocked_by_both:
                # Blocked by interior only. Connection to edge stays up
                # and all messages must be accounted for.
                done = self.n_rejected == 1 and \
                       self.n_send_settled == self.count
            else:
                # Blocked by interior and edge. Expect edge connection to go down
                # and some of our messaages arrive at edge after it has sent
                # AMQP close. Those messages are never settled. TODO: Is that OK?
                done = self.n_rejected == 1 and \
                       self.n_connection_error == 1
            if done:
                self.logger.log("TEST DONE!!!")
                # self.log_unhandled = True # verbose debugging
                self._shut_down_test()

    def on_rejected(self, event):
        self.n_rejected += 1
        if self.expect_block:
            self.logger.log("on_rejected: entry")
            self._check_done()
        else:
            self.error = "Unexpected on_reject"
            self.logger.log(self.error)
            self._check_done()

    def on_aborted(self, event):
        self.logger.log("on_aborted")
        self.n_aborted += 1
        self._check_done()

    def on_settled(self, event):
        self.logger.log("on_settled")
        if event.connection == self.sender_conn:
            self.logger.log("on_settled: sender connection")
            self.n_send_settled += 1
        self._check_done()

    def on_error(self, event):
        self.error = "Container error"
        self.logger.log(self.error)
        self.sender_conn.close()
        self.receiver_conn.close()
        self.timer.cancel()

    def on_link_error(self, event):
        self.error = event.link.remote_condition.name
        self.logger.log("on_link_error: %s" % (self.error))
        # Link errors may prevent normal test shutdown so don't even try.
        raise Exception(self.error)

    def on_reactor_final(self, event):
        self.logger.log("on_reactor_final:")

    def on_unhandled(self, method, *args):
        if self.log_unhandled:
            self.logger.log("on_unhandled: method: %s, args: %s" %
                            (method, args))

    def run(self):
        try:
            Container(self).run()
        except Exception as e:
            self.error = "Container run exception: %s" % (e)
            self.logger.log(self.error)
            self.logger.dump()
        time.sleep(0.2)
Пример #18
0
class ThreadedTestClient(object):
    """
    An HTTP client running in a separate thread
    """

    def __init__(self, tests, port, repeat=1):
        self._id = uuid.uuid4().hex
        self._conn_addr = ("127.0.0.1:%s" % port)
        self._tests = tests
        self._repeat = repeat
        self._logger = Logger(title="TestClient: %s" % self._id,
                              print_to_console=False)
        self._thread = Thread(target=self._run)
        self._thread.daemon = True
        self.error = None
        self.count = 0
        self._thread.start()

    def _run(self):
        self._logger.log("TestClient connecting on %s" % self._conn_addr)
        client = HTTPConnection(self._conn_addr, timeout=TIMEOUT)
        self._logger.log("TestClient connected")
        for loop in range(self._repeat):
            self._logger.log("TestClient start request %d" % loop)
            for op, tests in self._tests.items():
                for req, _, val in tests:
                    self._logger.log("TestClient sending %s %s request" % (op, req.target))
                    req.send_request(client,
                                     {"test-echo": "%s-%s-%s-%s" % (self._id,
                                                                    loop,
                                                                    op,
                                                                    req.target)})
                    self._logger.log("TestClient getting %s response" % op)
                    try:
                        rsp = client.getresponse()
                    except HTTPException as exc:
                        self._logger.log("TestClient response failed: %s" % exc)
                        self.error = str(exc)
                        return
                    self._logger.log("TestClient response %s received" % op)
                    if val:
                        try:
                            body = val.check_response(rsp)
                        except Exception as exc:
                            self._logger.log("TestClient response invalid: %s"
                                             % str(exc))
                            self.error = "client failed: %s" % str(exc)
                            return

                        if req.method == "BODY" and body != b'':
                            self._logger.log("TestClient response invalid: %s"
                                             % "body present!")
                            self.error = "error: body present!"
                            return
                    self.count += 1
                    self._logger.log("TestClient request %s %s completed!" %
                                     (op, req.target))
        client.close()
        self._logger.log("TestClient to %s closed" % self._conn_addr)

    def wait(self, timeout=TIMEOUT):
        self._thread.join(timeout=TIMEOUT)
        self._logger.log("TestClient %s shut down" % self._conn_addr)
        sleep(0.5)  # fudge factor allow socket close to complete

    def dump_log(self):
        self._logger.dump()
Пример #19
0
def main(argv):
    retval = 0
    logger = None
    # parse args
    p = argparse.ArgumentParser()
    p.add_argument('--port', '-p', help='Required listening port number')
    p.add_argument('--name', help='Optional logger prefix')
    p.add_argument(
        '--echo',
        '-e',
        type=int,
        default=0,
        const=1,
        nargs="?",
        help=
        'Exit after echoing this many bytes. Default value "0" disables exiting on byte count.'
    )
    p.add_argument(
        '--timeout',
        '-t',
        type=float,
        default=0.0,
        const=1,
        nargs="?",
        help='Timeout in seconds. Default value "0" disables timeouts')
    p.add_argument('--log',
                   '-l',
                   action='store_true',
                   help='Write activity log to console')
    del argv[0]
    args = p.parse_args(argv)

    # port
    if args.port is None:
        raise Exception("User must specify a port number")
    port = args.port

    # name / prefix
    prefix = args.name if args.name is not None else "ECHO_SERVER (%s)" % (
        str(port))

    # echo
    if args.echo < 0:
        raise Exception("Echo count must be greater than zero")

    # timeout
    if args.timeout < 0.0:
        raise Exception("Timeout must be greater than or equal to zero")

    signaller = GracefulExitSignaler()
    server = None

    try:
        # logging
        logger = Logger(title="%s port %s" % (prefix, port),
                        print_to_console=args.log,
                        save_for_dump=False)

        server = TcpEchoServer(prefix, port, args.echo, args.timeout, logger)

        keep_running = True
        while keep_running:
            time.sleep(0.1)
            if server.error is not None:
                logger.log("%s Server stopped with error: %s" %
                           (prefix, server.error))
                keep_running = False
                retval = 1
            if server.exit_status is not None:
                logger.log("%s Server stopped with status: %s" %
                           (prefix, server.exit_status))
                keep_running = False
            if signaller.kill_now:
                logger.log("%s Process killed with signal" % prefix)
                keep_running = False
            if keep_running and not server.is_running:
                logger.log("%s Server stopped with no error or status" %
                           prefix)
                keep_running = False

    except Exception:
        if logger is not None:
            logger.log("%s Exception: %s" % (prefix, traceback.format_exc()))
        retval = 1

    if server is not None and server.sock is not None:
        server.sock.close()

    return retval
class LinkRouteTest(MessagingHandler):
    def __init__(self, first_host, second_host, first_address, second_address,
                 dynamic, lookup_host, routers):
        super(LinkRouteTest, self).__init__(prefetch=0)
        self.logger = Logger(title="LinkRouteTest")
        self.first_host = first_host
        self.second_host = second_host
        self.first_address = first_address
        self.second_address = second_address
        self.dynamic = dynamic
        self.lookup_host = lookup_host
        self.routers = routers
        self.reactor = None

        self.first_conn = None
        self.second_conn = None
        self.error = None
        self.first_sender = None
        self.first_receiver = None
        self.second_sender = None
        self.second_receiver = None
        self.poll_timer = None

        self.count = 10
        self.n_sent = 0
        self.n_rcvd = 0
        self.n_settled = 0

    def timeout(self):
        self.done("Timeout Expired: n_sent=%d n_rcvd=%d n_settled=%d" %
                  (self.n_sent, self.n_rcvd, self.n_settled))

    def poll_timeout(self):
        self.poll()

    def cleanup(self):
        for router in self.routers:
            router.wait_address_unsubscribed("D0.0.0.0/link")

    def done(self, error=None):
        self.error = error
        self.second_conn.close()
        self.first_conn.close()
        self.timer.cancel()
        self.lookup_conn.close()
        if self.poll_timer:
            self.poll_timer.cancel()
        if error:
            self.logger.dump()

        # give proton a chance to close all of the above connections,
        # then wait for the route tables remove the link route

        class _CleanupTimer:
            def __init__(self, parent):
                self.parent = parent

            def on_timer_task(self, event):
                self.parent.cleanup()

        self.reactor.schedule(1.0, _CleanupTimer(self))

    def send(self):
        self.logger.log("Send")
        while self.first_sender.credit > 0 and self.n_sent < self.count:
            self.n_sent += 1
            m = Message(body="Message %d of %d" % (self.n_sent, self.count))
            self.first_sender.send(m)

    def poll(self):
        self.logger.log("Poll")
        request = self.proxy.read_address("D0.0.0.0/link")
        self.agent_sender.send(request)

    def setup_first_links(self, event):
        self.logger.log("First links")
        self.first_sender = event.container.create_sender(
            self.first_conn, self.first_address)
        if self.dynamic:
            self.first_receiver = event.container.create_receiver(
                self.first_conn,
                dynamic=True,
                options=DynamicNodeProperties(
                    {"x-opt-qd.address": UNICODE(self.first_address)}))
        else:
            self.first_receiver = event.container.create_receiver(
                self.first_conn, self.first_address)

    def on_start(self, event):
        self.logger.log("On Start")
        self.reactor = event.reactor
        self.timer = event.reactor.schedule(TIMEOUT, TestTimeout(self))
        self.first_conn = event.container.connect(self.first_host)
        self.second_conn = event.container.connect(self.second_host)
        self.lookup_conn = event.container.connect(self.lookup_host)
        self.reply_receiver = event.container.create_receiver(self.lookup_conn,
                                                              dynamic=True)
        self.agent_sender = event.container.create_sender(
            self.lookup_conn, "$management")

    def on_link_opening(self, event):
        if event.sender:
            self.logger.log("On sender link opening")
            self.second_sender = event.sender
            if self.dynamic:
                if event.sender.remote_source.dynamic:
                    event.sender.source.address = self.second_address
                    event.sender.open()
                else:
                    self.done("Expected dynamic source on sender")
            else:
                if event.sender.remote_source.address == self.second_address:
                    event.sender.source.address = self.second_address
                    event.sender.open()
                else:
                    self.done(
                        "Incorrect address on incoming sender: got %s, expected %s"
                        % (event.sender.remote_source.address,
                           self.second_address))

        elif event.receiver:
            self.logger.log("On receiver link opening")
            self.second_receiver = event.receiver
            if event.receiver.remote_target.address == self.second_address:
                event.receiver.target.address = self.second_address
                event.receiver.open()
            else:
                self.done(
                    "Incorrect address on incoming receiver: got %s, expected %s"
                    % (event.receiver.remote_target.address,
                       self.second_address))

    def on_link_opened(self, event):
        self.logger.log("On link opened")
        if event.receiver:
            event.receiver.flow(self.count)

        if event.receiver == self.reply_receiver:
            self.proxy = RouterProxy(self.reply_receiver.remote_source.address)
            self.poll()

    def on_sendable(self, event):
        self.logger.log("On sendable")
        if event.sender == self.first_sender:
            self.send()

    def on_message(self, event):
        if event.receiver == self.first_receiver:
            self.logger.log("On message 1st")
            self.n_rcvd += 1

        if event.receiver == self.reply_receiver:
            self.logger.log("On message reply")
            response = self.proxy.response(event.message)
            if response.status_code == 200 and (response.remoteCount +
                                                response.containerCount) > 0:
                if self.poll_timer:
                    self.poll_timer.cancel()
                    self.poll_timer = None
                self.setup_first_links(event)
            else:
                self.poll_timer = event.reactor.schedule(
                    0.25, PollTimeout(self))

    def on_settled(self, event):
        if event.sender == self.first_sender:
            self.logger.log("On settled")
            self.n_settled += 1
            if self.n_settled == self.count:
                self.done(None)

    def run(self):
        container = Container(self)
        container.container_id = 'LRC'
        container.run()
class MessageRouteAbortTest(MessagingHandler):
    def __init__(self, sender_host, receiver_host, address):
        super(MessageRouteAbortTest, self).__init__()
        self.sender_host = sender_host
        self.receiver_host = receiver_host
        self.address = address

        self.sender_conn = None
        self.receiver_conn = None
        self.error = None
        self.sender1 = None
        self.receiver = None
        self.delivery = None
        self.logger = Logger(title="MessageRouteAbortTest")

        self.program = [('D', 10), ('D', 20), ('A', 30), ('A', 40), ('D', 50),
                        ('D', 60), ('A', 100), ('D', 110), ('A', 1000),
                        ('A', 1010), ('A', 1020), ('A', 1030), ('A', 1040),
                        ('D', 1050), ('A', 10000), ('A', 10010), ('A', 10020),
                        ('A', 10030), ('A', 10040), ('D', 10050),
                        ('A', 100000), ('A', 100010), ('A', 100020),
                        ('A', 100030), ('A', 100040), ('D', 100050), ('F', 10)]
        self.result = []
        self.expected_result = [10, 20, 50, 60, 110, 1050, 10050, 100050]

    def timeout(self):
        self.error = "Timeout Expired - Unprocessed Ops: %r, Result: %r" % (
            self.program, self.result)
        self.logger.log(self.error)
        self.sender_conn.close()
        self.receiver_conn.close()

    def on_start(self, event):
        self.logger.log("on_start")
        self.timer = event.reactor.schedule(10.0, Timeout(self))
        self.sender_conn = event.container.connect(self.sender_host)
        self.receiver_conn = event.container.connect(self.receiver_host)
        self.sender1 = event.container.create_sender(self.sender_conn,
                                                     self.address,
                                                     name="S1")
        self.receiver = event.container.create_receiver(
            self.receiver_conn, self.address)

    def send(self):
        if self.delivery:
            self.logger.log(
                "send(): Do not send - delivery to be aborted is in flight")
            return

        op, size = self.program.pop(0) if len(self.program) > 0 else (None,
                                                                      None)
        self.logger.log("send - op=%s, size=%s" % (str(op), str(size)))

        if op == None:
            return

        body = ""
        if op == 'F':
            body = "FINISH"
        else:
            bod = str(size)
            bod2 = "0000000000" + bod
            bod3 = "." + bod2[-9:]
            body = bod3 * (size // 10)
        msg = Message(body=body)

        if op in 'DF':
            self.logger.log("send(): Send message size: %d" % (size))
            delivery = self.sender1.send(msg)

        if op == 'A':
            self.logger.log("send(): Start aborted message size: %d" % (size))
            self.delivery = self.sender1.delivery(self.sender1.delivery_tag())
            encoded = msg.encode()
            self.sender1.stream(encoded)

    def finish(self):
        if self.result != self.expected_result:
            self.error = "Expected: %r, Actual: %r" % (self.expected_result,
                                                       self.result)
            self.logger.log(self.error)
        self.sender_conn.close()
        self.receiver_conn.close()
        self.timer.cancel()

    def on_sendable(self, event):
        self.logger.log("on_sendable")
        if event.sender == self.sender1:
            if self.delivery:
                self.delivery.abort()
                self.delivery = None
                self.logger.log("on_sendable aborts delivery")
            else:
                self.send()

    def on_message(self, event):
        m = event.message
        if m.body == "FINISH":
            self.finish()
        else:
            self.logger.log("on_message receives len: %d" % (len(m.body)))
            self.result.append(len(m.body))
            self.send()

    def run(self):
        Container(self).run()
Пример #22
0
class FakeBroker(MessagingHandler):
    """
    A fake broker-like service that listens for client connections
    """
    class _Queue:
        def __init__(self, name, logger, dynamic=False):
            self.dynamic = dynamic
            self.queue = collections.deque()
            self.consumers = []
            self.logger = logger
            self.name = name
            self.sent = 0
            self.recv = 0

        def subscribe(self, consumer):
            self.consumers.append(consumer)

        def unsubscribe(self, consumer):
            if consumer in self.consumers:
                self.consumers.remove(consumer)
            return len(self.consumers) == 0 and (self.dynamic
                                                 or len(self.queue) == 0)

        def publish(self, message):
            self.recv += 1
            self.logger.log("Received message %d" % self.recv)
            self.queue.append(message)
            return self.dispatch()

        def dispatch(self, consumer=None):
            if consumer:
                c = [consumer]
            else:
                c = self.consumers
            count = 0
            while True:
                rc = self._deliver_to(c)
                count += rc
                if rc == 0:
                    break
            return count

        def _deliver_to(self, consumers):
            try:
                result = 0
                for c in consumers:
                    if c.credit:
                        c.send(self.queue.popleft())
                        result += 1
                        self.sent += 1
                        self.logger.log("Sent message %d" % self.sent)

                return result
            except IndexError:  # no more messages
                return 0

    def __init__(self, url, container_id=None, **handler_kwargs):
        super(FakeBroker, self).__init__(**handler_kwargs)
        self.url = url
        self.queues = {}
        self.acceptor = None
        self.in_count = 0
        self.out_count = 0
        self.link_errors = 0
        self._connections = []
        self._error = None
        self._container = Container(self)
        self._container.container_id = container_id or 'FakeBroker'
        self._logger = Logger(title=self._container.container_id)
        self._thread = Thread(target=self._main)
        self._thread.daemon = True
        self._stop_thread = False
        self._thread.start()

    def _main(self):
        self._container.timeout = 1.0
        self._container.start()
        self._logger.log("Starting reactor thread")

        while self._container.process():
            if self._stop_thread:
                if self.acceptor:
                    self.acceptor.close()
                    self.acceptor = None
                for c in self._connections:
                    c.close()
                self._connections = []
        self._logger.log("reactor thread done")

    def join(self):
        self._stop_thread = True
        self._container.wakeup()
        self._thread.join(timeout=TIMEOUT)
        self._logger.log("thread done")
        if self._thread.is_alive():
            raise Exception("FakeBroker did not exit")
        if self._error:
            raise Exception(self._error)

    def on_start(self, event):
        self.acceptor = event.container.listen(self.url)

    def _queue(self, address):
        if address not in self.queues:
            self.queues[address] = self._Queue(address, self._logger)
        return self.queues[address]

    def on_link_opening(self, event):
        if event.link.is_sender:
            if event.link.remote_source.dynamic:
                address = str(uuid.uuid4())
                event.link.source.address = address
                q = self._Queue(address, self._logger, True)
                self.queues[address] = q
                q.subscribe(event.link)
                self._logger.log("dynamic sending link opened %s" % address)
            elif event.link.remote_source.address:
                event.link.source.address = event.link.remote_source.address
                self._queue(event.link.source.address).subscribe(event.link)
                self._logger.log("sending link opened %s" %
                                 event.link.source.address)
        elif event.link.remote_target.address:
            event.link.target.address = event.link.remote_target.address
            self._logger.log("receiving link opened %s" %
                             event.link.target.address)

    def _unsubscribe(self, link):
        if link.source.address in self.queues and self.queues[
                link.source.address].unsubscribe(link):
            del self.queues[link.source.address]

    def on_link_error(self, event):
        self._logger.log("link error")
        self.link_errors += 1
        self.on_link_closing(event)

    def on_link_closing(self, event):
        self._logger.log("link closing")
        if event.link.is_sender:
            self._unsubscribe(event.link)

    def on_connection_opening(self, event):
        pn_conn = event.connection
        pn_conn.container = self._container.container_id

    def on_connection_opened(self, event):
        self._logger.log("connection opened")
        self._connections.append(event.connection)

    def on_connection_closing(self, event):
        self.remove_stale_consumers(event.connection)

    def on_connection_closed(self, event):
        self._logger.log("connection closed")
        try:
            self._connections.remove(event.connection)
        except ValueError:
            pass

    def on_disconnected(self, event):
        self.remove_stale_consumers(event.connection)

    def remove_stale_consumers(self, connection):
        link = connection.link_head(Endpoint.REMOTE_ACTIVE)
        while link:
            if link.is_sender:
                self._unsubscribe(link)
            link = link.next(Endpoint.REMOTE_ACTIVE)

    def on_sendable(self, event):
        self.out_count += self._queue(event.link.source.address).dispatch(
            event.link)

    def on_message(self, event):
        self.in_count += 1
        self.out_count += self._queue(event.link.target.address).publish(
            event.message)

    def dump_log(self):
        self._logger.dump()
class SwitchoverTest(MessagingHandler):
    def __init__(self, sender_host, primary_host, fallback_host, addr):
        super(SwitchoverTest, self).__init__()
        self.sender_host    = sender_host[0]
        self.primary_host   = primary_host[0]
        self.fallback_host  = fallback_host[0]
        self.sender_name    = sender_host[1]
        self.primary_name   = primary_host[1]
        self.fallback_name  = fallback_host[1]
        self.addr           = addr
        self.count          = 300

        # DISPATCH-2213 back off on logging.
        self.log_sends      = 100  # every 100th send
        self.log_recvs      = 100  # every 100th receive
        self.log_released   = 100  # every 100th sender released

        self.sender_conn    = None
        self.primary_conn   = None
        self.fallback_conn  = None
        self.primary_open   = False
        self.fallback_open  = False
        self.error          = None
        self.n_tx           = 0
        self.n_rx           = 0
        self.n_rel          = 0
        self.phase          = 0
        self.tx_seq         = 0
        self.local_rel      = 0

        self.log_prefix     = "FALLBACK_TEST %s" % self.addr
        self.logger = Logger("SwitchoverTest_%s" % addr, print_to_console=False)
        # Prepend a convenience SERVER line for scraper tool.
        # Then the logs from this test can be merged with the router logs in scraper.
        self.logger.log("SERVER (info) Container Name: %s" % self.addr)
        self.logger.log("%s SwitchoverTest sender:%s primary:%s fallback:%s" %
                        (self.log_prefix, self.sender_name, self.primary_name, self.fallback_name))

    def timeout(self):
        self.error = "Timeout Expired - n_tx=%d, n_rx=%d, n_rel=%d, phase=%d, local_rel=%d" % \
                     (self.n_tx, self.n_rx, self.n_rel, self.phase, self.local_rel)
        self.sender_conn.close()
        self.primary_conn.close()
        self.fallback_conn.close()

    def fail(self, error):
        self.error = error
        self.sender_conn.close()
        self.primary_conn.close()
        self.fallback_conn.close()
        self.timer.cancel()

    def on_start(self, event):
        self.timer              = event.reactor.schedule(TIMEOUT, TestTimeout(self))
        self.logger.log("%s Opening sender connection to %s" % (self.log_prefix, self.sender_name))
        self.sender_conn        = event.container.connect(self.sender_host)
        self.logger.log("%s Opening primary receiver connection to %s" % (self.log_prefix, self.primary_name))
        self.primary_conn       = event.container.connect(self.primary_host)
        self.logger.log("%s Opening fallback receiver connection to %s" % (self.log_prefix, self.fallback_name))
        self.fallback_conn      = event.container.connect(self.fallback_host)
        self.logger.log("%s Opening primary receiver to %s" % (self.log_prefix, self.primary_name))
        self.primary_receiver   = event.container.create_receiver(self.primary_conn, self.addr, name=(self.addr + "_primary_receiver"))
        self.logger.log("%s Opening fallback receiver to %s" % (self.log_prefix, self.fallback_name))
        self.fallback_receiver  = event.container.create_receiver(self.fallback_conn, self.addr, name=(self.addr + "fallback_receiver"))
        self.fallback_receiver.source.capabilities.put_object(symbol("qd.fallback"))

    def on_link_opened(self, event):
        receiver_event = False
        if event.receiver == self.primary_receiver:
            self.logger.log("%s Primary receiver opened" % self.log_prefix)
            self.primary_open = True
            receiver_event = True
        if event.receiver == self.fallback_receiver:
            self.logger.log("%s Fallback receiver opened" % self.log_prefix)
            self.fallback_open = True
            receiver_event = True
        if receiver_event and self.primary_open and self.fallback_open:
            self.logger.log("%s Opening sender to %s" % (self.log_prefix, self.sender_name))
            self.sender = event.container.create_sender(self.sender_conn, self.addr, name=(self.addr + "_sender"))

    def on_link_closed(self, event):
        if event.receiver == self.primary_receiver:
            self.logger.log("%s Primary receiver closed. Start phase 1 send" % self.log_prefix)
            self.n_rx = 0
            self.n_tx = 0
            self.send()

    def send(self):
        e_credit = self.sender.credit
        e_n_tx = self.n_tx
        e_tx_seq = self.tx_seq
        last_message = Message("None")
        while self.sender.credit > 0 and self.n_tx < self.count and not self.sender.drain_mode:
            last_message = Message("Msg %s %d %d" % (self.addr, self.tx_seq, self.n_tx))
            self.sender.send(last_message)
            self.n_tx += 1
            self.tx_seq += 1
        if self.sender.drain_mode:
            n_drained = self.sender.drained()
            self.logger.log("%s sender.drained() drained %d credits" % (self.log_prefix, n_drained))
        if self.n_tx > e_n_tx and self.n_tx % self.log_sends == 0:  # if sent then log every Nth message
            self.logger.log("%s send() exit: last sent '%s' phase=%d, credit=%3d->%3d, n_tx=%4d->%4d, tx_seq=%4d->%4d, n_rel=%4d" %
                            (self.log_prefix, last_message.body, self.phase, e_credit, self.sender.credit,
                             e_n_tx, self.n_tx, e_tx_seq, self.tx_seq, self.n_rel))

    def on_sendable(self, event):
        if event.sender == self.sender:
            self.send()
        else:
            self.fail("%s on_sendable event not from the only sender")

    def on_message(self, event):
        if event.receiver == self.primary_receiver:
            if self.phase == 0:
                self.n_rx += 1
                if self.n_rx % self.log_recvs == 0:
                    self.logger.log("%s Received phase 0 message '%s', n_rx=%d" %
                                    (self.log_prefix, event.message.body, self.n_rx))
                if self.n_rx == self.count:
                    self.logger.log("%s Triggering fallback by closing primary receiver on %s. Test phase 0->1." %
                                    (self.log_prefix, self.primary_name))
                    self.phase = 1
                    self.primary_receiver.close()
            else:
                # Phase 1 messages are unexpected on primary receiver
                self.logger.log("%s Phase %d message received on primary: '%s'" % (self.log_prefix, self.phase, event.message.body))
                self.fail("Receive phase1 message on primary receiver")
        elif event.receiver == self.fallback_receiver:
            if self.phase == 0:
                # Phase 0 message over fallback receiver. This may happen because
                # primary receiver is on a distant router and the fallback receiver is local.
                # Release the message to keep trying until the primary receiver kicks in.
                self.release(event.delivery)
                self.n_rel += 1
                self.n_tx -= 1
                self.local_rel += 1
                if self.local_rel % self.log_recvs == 0:
                    self.logger.log("%s Released phase 0 over fallback: msg:'%s', n_rx=%d, n_tx=%d, n_rel=%d, local_rel=%d" %
                                    (self.log_prefix, event.message.body, self.n_rx, self.n_tx, self.n_rel, self.local_rel))
                    time.sleep(0.02)
            else:
                self.n_rx += 1
                if self.n_rx % self.log_recvs == 0:
                    self.logger.log("%s Received phase 1 over fallback: msg:'%s', n_rx=%d" %
                                    (self.log_prefix, event.message.body, self.n_rx))
                if self.n_rx == self.count:
                    self.logger.log("%s Success" % self.log_prefix)
                    self.fail(None)
        else:
            self.fail("%s message received on unidentified receiver" % self.addr)

    def on_released(self, event):
        # event type pn_delivery for sender
        self.n_rel += 1
        self.n_tx  -= 1
        if self.n_rel % self.log_released == 0:
            self.logger.log("%s on_released: sender delivery was released. Adjusted counts: n_rel=%d, n_tx=%d" %
                            (self.log_prefix, self.n_rel, self.n_tx))
        if event.sender is None:
            self.fail("on_released event not related to sender")

    def run(self):
        Container(self).run()
        if self.error is not None:
            self.logger.dump()
    def setUpClass(cls):
        """Start a router"""
        super(TcpAdaptor, cls).setUpClass()

        if DISABLE_SELECTOR_TESTS:
            return

        def router(name, mode, connection, extra=None):
            """
            Launch a router through the system_test framework.
            For each router:
             * normal listener first
             #* http listener for console connections
             * tcp listener for 'nodest', which will never exist
             * tcp connector to echo server whose address is the same as this router's name
             * six tcp listeners, one for each server on each router on the network
            :param name: router name
            :param mode: router mode: interior or edge
            :param connection: list of router-level connection/listener tuples
            :param extra: yet more configuation tuples. unused for now
            :return:
            """
            config = [
                ('router', {
                    'mode': mode,
                    'id': name
                }),
                ('listener', {
                    'port': cls.amqp_listener_ports[name]
                }),
                # ('listener', {'port': cls.http_listener_ports[name], 'http': 'yes'}),
                ('tcpListener', {
                    'host': "0.0.0.0",
                    'port': cls.nodest_listener_ports[name],
                    'address': 'nodest',
                    'siteId': cls.site
                }),
                ('tcpConnector', {
                    'host': "127.0.0.1",
                    'port': cls.tcp_server_listener_ports[name],
                    'address': 'ES_' + name,
                    'siteId': cls.site
                })
            ]
            if connection:
                config.extend(connection)
            listeners = []
            for rtr in cls.router_order:
                listener = {
                    'host': "0.0.0.0",
                    'port': cls.tcp_client_listener_ports[name][rtr],
                    'address': 'ES_' + rtr,
                    'siteId': cls.site
                }
                tup = [(('tcpListener', listener))]
                listeners.extend(tup)
            config.extend(listeners)

            if extra:
                config.extend(extra)

            config = Qdrouterd.Config(config)
            cls.routers.append(cls.tester.qdrouterd(name, config, wait=True))

        cls.routers = []

        # define logging levels
        cls.print_logs_server = False
        cls.print_logs_client = True
        cls.logger = Logger(title="TcpAdaptor-testClass",
                            print_to_console=True,
                            save_for_dump=False,
                            ofilename='../setUpClass/TcpAdaptor.log')
        # Write a dummy log line for scraper.
        cls.logger.log("SERVER (info) Container Name: TCP_TEST")

        # Allocate echo server ports first
        for rtr in cls.router_order:
            cls.tcp_server_listener_ports[rtr] = cls.tester.get_port()

        # start echo servers immediately after the echo server
        # ports are assigned.
        for rtr in cls.router_order:
            test_name = "TcpAdaptor"
            server_prefix = "ECHO_SERVER %s ES_%s" % (test_name, rtr)
            server_logger = Logger(
                title=test_name,
                print_to_console=cls.print_logs_server,
                save_for_dump=False,
                ofilename="../setUpClass/TcpAdaptor_echo_server_%s.log" % rtr)
            cls.logger.log("TCP_TEST Launching echo server '%s'" %
                           server_prefix)
            server = TcpEchoServer(prefix=server_prefix,
                                   port=cls.tcp_server_listener_ports[rtr],
                                   logger=server_logger)
            assert server.is_running
            cls.echo_servers[rtr] = server

        cls.EC2_conn_stall_connector_port = cls.tester.get_port()
        # start special naughty servers that misbehave on purpose
        server_prefix = "ECHO_SERVER TcpAdaptor NS_EC2_CONN_STALL"
        server_logger = Logger(
            title="TcpAdaptor",
            print_to_console=cls.print_logs_server,
            save_for_dump=False,
            ofilename="../setUpClass/TcpAdaptor_echo_server_NS_CONN_STALL.log")
        cls.logger.log("TCP_TEST Launching echo server '%s'" % server_prefix)
        server = TcpEchoServer(prefix=server_prefix,
                               port=cls.EC2_conn_stall_connector_port,
                               logger=server_logger,
                               conn_stall=Q2_DELAY_SECONDS)
        assert server.is_running
        cls.echo_server_NS_CONN_STALL = server

        # Allocate a sea of router ports
        for rtr in cls.router_order:
            cls.amqp_listener_ports[rtr] = cls.tester.get_port()
            tl_ports = {}
            for tcp_listener in cls.router_order:
                tl_ports[tcp_listener] = cls.tester.get_port()
            cls.tcp_client_listener_ports[rtr] = tl_ports
            cls.nodest_listener_ports[rtr] = cls.tester.get_port()

        inter_router_port_AB = cls.tester.get_port()
        cls.INTA_edge_port = cls.tester.get_port()
        cls.INTA_conn_stall_listener_port = cls.tester.get_port()

        # Launch the routers using the sea of router ports
        router('INTA', 'interior',
               [('listener', {
                   'role': 'inter-router',
                   'port': inter_router_port_AB
               }),
                ('listener', {
                    'name': 'uplink',
                    'role': 'edge',
                    'port': cls.INTA_edge_port
                }),
                ('tcpListener', {
                    'host': "0.0.0.0",
                    'port': cls.INTA_conn_stall_listener_port,
                    'address': 'NS_EC2_CONN_STALL',
                    'siteId': cls.site
                })])
        inter_router_port_BC = cls.tester.get_port()
        cls.INTB_edge_port = cls.tester.get_port()
        router('INTB', 'interior', [('connector', {
            'role': 'inter-router',
            'port': inter_router_port_AB
        }), ('listener', {
            'role': 'inter-router',
            'port': inter_router_port_BC
        }),
                                    ('listener', {
                                        'name': 'uplink',
                                        'role': 'edge',
                                        'port': cls.INTB_edge_port
                                    })])

        cls.INTC_edge_port = cls.tester.get_port()
        router('INTC', 'interior', [('connector', {
            'role': 'inter-router',
            'port': inter_router_port_BC
        }),
                                    ('listener', {
                                        'name': 'uplink',
                                        'role': 'edge',
                                        'port': cls.INTC_edge_port
                                    })])

        router('EA1', 'edge', [('connector', {
            'name': 'uplink',
            'role': 'edge',
            'port': cls.INTA_edge_port
        })])
        router('EA2', 'edge', [('connector', {
            'name': 'uplink',
            'role': 'edge',
            'port': cls.INTA_edge_port
        })])
        router('EB1', 'edge', [('connector', {
            'name': 'uplink',
            'role': 'edge',
            'port': cls.INTB_edge_port
        })])
        router('EB2', 'edge', [('connector', {
            'name': 'uplink',
            'role': 'edge',
            'port': cls.INTB_edge_port
        })])
        router('EC1', 'edge', [('connector', {
            'name': 'uplink',
            'role': 'edge',
            'port': cls.INTC_edge_port
        })])
        cls.EC2_conn_stall_listener_port = cls.tester.get_port()
        router('EC2', 'edge', [('connector', {
            'name': 'uplink',
            'role': 'edge',
            'port': cls.INTC_edge_port
        }),
                               ('tcpConnector', {
                                   'host': "127.0.0.1",
                                   'port': cls.EC2_conn_stall_connector_port,
                                   'address': 'NS_EC2_CONN_STALL',
                                   'siteId': cls.site
                               }),
                               ('tcpListener', {
                                   'host': "0.0.0.0",
                                   'port': cls.EC2_conn_stall_listener_port,
                                   'address': 'NS_EC2_CONN_STALL',
                                   'siteId': cls.site
                               })])

        cls.INTA = cls.routers[0]
        cls.INTB = cls.routers[1]
        cls.INTC = cls.routers[2]
        cls.EA1 = cls.routers[3]
        cls.EA2 = cls.routers[4]
        cls.EB1 = cls.routers[5]
        cls.EB2 = cls.routers[6]
        cls.EC1 = cls.routers[7]
        cls.EC2 = cls.routers[8]

        cls.router_dict = {}
        cls.router_dict['INTA'] = cls.INTA
        cls.router_dict['INTB'] = cls.INTB
        cls.router_dict['INTC'] = cls.INTC
        cls.router_dict['EA1'] = cls.EA1
        cls.router_dict['EA2'] = cls.EA2
        cls.router_dict['EB1'] = cls.EB1
        cls.router_dict['EB2'] = cls.EB2
        cls.router_dict['EC1'] = cls.EC1
        cls.router_dict['EC2'] = cls.EC2

        cls.logger.log("TCP_TEST INTA waiting for connection to INTB")
        cls.INTA.wait_router_connected('INTB')
        cls.logger.log("TCP_TEST INTB waiting for connection to INTA")
        cls.INTB.wait_router_connected('INTA')
        cls.logger.log("TCP_TEST INTB waiting for connection to INTC")
        cls.INTB.wait_router_connected('INTC')
        cls.logger.log("TCP_TEST INTC waiting for connection to INTB")
        cls.INTC.wait_router_connected('INTB')

        # Create a scoreboard for the ports
        p_out = []
        for rtr in cls.router_order:
            p_out.append("%s_amqp=%d" % (rtr, cls.amqp_listener_ports[rtr]))
            p_out.append("%s_echo_server=%d" %
                         (rtr, cls.tcp_server_listener_ports[rtr]))
            for tcp_listener in cls.router_order:
                p_out.append(
                    "%s_echo_listener_for_%s=%d" %
                    (rtr, tcp_listener,
                     cls.tcp_client_listener_ports[rtr][tcp_listener]))
            p_out.append("%s_nodest_listener=%d" %
                         (rtr, cls.nodest_listener_ports[rtr]))
            # p_out.append("%s_http_listener=%d" %
            #             (rtr, cls.http_listener_ports[rtr]))
        p_out.append("inter_router_port_AB=%d" % inter_router_port_AB)
        p_out.append("inter_router_port_BC=%d" % inter_router_port_BC)
        p_out.append("INTA_edge_port=%d" % cls.INTA_edge_port)
        p_out.append("INTB_edge_port=%d" % cls.INTB_edge_port)
        p_out.append("INTC_edge_port=%d" % cls.INTC_edge_port)
        p_out.append("EC2_conn_stall_connector_port%d" %
                     cls.EC2_conn_stall_connector_port)
        p_out.append("INTA_conn_stall_listener_port%d" %
                     cls.INTA_conn_stall_listener_port)
        p_out.append("EC2_conn_stall_listener_port%d" %
                     cls.EC2_conn_stall_listener_port)

        # write to log
        for line in p_out:
            cls.logger.log("TCP_TEST %s" % line)

        # write to shell script
        with open("../setUpClass/TcpAdaptor-ports.sh", 'w') as o_file:
            for line in p_out:
                o_file.write("set %s\n" % line)

        # Write a script to run scraper on this test's log files
        scraper_abspath = os.path.join(os.environ.get('BUILD_DIR'), 'tests',
                                       'scraper', 'scraper.py')
        logs_dir = os.path.abspath("../setUpClass")
        main_log = "TcpAdaptor.log"
        echo_logs = "TcpAdaptor_echo*"
        big_test_log = "TcpAdaptor_all.log"
        int_logs = "I*.log"
        edge_logs = "E*.log"
        log_modules_spec = "--log-modules TCP_ADAPTOR,TCP_TEST,ECHO_SERVER,ECHO_CLIENT"
        html_output = "TcpAdaptor.html"

        with open("../setUpClass/TcpAdaptor-run-scraper.sh", 'w') as o_file:
            o_file.write("#!/bin/bash\n\n")
            o_file.write(
                "# Script to run scraper on test class TcpAdaptor test result\n"
            )
            o_file.write("# cd into logs directory\n")
            o_file.write("cd %s\n\n" % logs_dir)
            o_file.write("# Concatenate test class logs into single file\n")
            o_file.write("cat %s %s > %s\n\n" %
                         (main_log, echo_logs, big_test_log))
            o_file.write("# run scraper\n")
            o_file.write("python %s %s -f %s %s %s > %s\n\n" %
                         (scraper_abspath, log_modules_spec, int_logs,
                          edge_logs, big_test_log, html_output))
            o_file.write("echo View the results by opening the html file\n")
            o_file.write("echo     firefox %s" %
                         (os.path.join(logs_dir, html_output)))

        # wait for server addresses (mobile ES_<rtr>) to propagate to all interior routers
        interior_rtrs = [
            rtr for rtr in cls.router_order if rtr.startswith('I')
        ]
        found_all = False
        while not found_all:
            found_all = True
            cls.logger.log(
                "TCP_TEST Poll wait for echo server addresses to propagate")
            for rtr in interior_rtrs:
                # query each interior for addresses
                p = Process([
                    'qdstat', '-b',
                    str(cls.router_dict[rtr].addresses[0]), '-a'
                ],
                            name='qdstat-snap1',
                            stdout=PIPE,
                            expect=None,
                            universal_newlines=True)
                out = p.communicate()[0]
                # examine what this router can see; signal poll loop to continue or not
                lines = out.split("\n")
                server_lines = [
                    line for line in lines
                    if "mobile" in line and "ES_" in line
                ]
                if not len(server_lines) == len(cls.router_order):
                    found_all = False
                    seen = []
                    for line in server_lines:
                        flds = line.split()
                        seen.extend(
                            [fld for fld in flds if fld.startswith("ES_")])
                    unseen = [
                        srv for srv in cls.router_order
                        if "ES_" + srv not in seen
                    ]
                    cls.logger.log(
                        "TCP_TEST Router %s sees only %d of %d addresses. Waiting for %s"
                        % (rtr, len(server_lines), len(
                            cls.router_order), unseen))
        cls.logger.log("TCP_TEST Done poll wait")
Пример #25
0
class WaypointTest(MessagingHandler):
    def __init__(self, first_host, second_host, first_address, second_address, container_id="ALC"):
        super(WaypointTest, self).__init__()
        self.first_host     = first_host
        self.second_host    = second_host
        self.first_address  = first_address
        self.second_address = second_address
        self.container_id   = container_id
        self.logger = Logger(title="WaypointTest")

        self.first_conn        = None
        self.second_conn       = None
        self.error             = None
        self.first_sender      = None
        self.first_sender_created = False
        self.first_sender_link_opened = False
        self.first_receiver    = None
        self.first_receiver_created    = False
        self.waypoint_sender   = None
        self.waypoint_receiver = None
        self.waypoint_queue    = []
        self.waypoint_sender_opened = False
        self.waypoint_receiver_opened = False
        self.firsts_created = False

        self.count  = 10
        self.n_sent = 0
        self.n_rcvd = 0
        self.n_waypoint_rcvd = 0
        self.n_thru = 0
        self.outs = None

    def timeout(self):
        self.error = "Timeout Expired: n_sent=%d n_rcvd=%d n_thru=%d n_waypoint_rcvd=%d" % (self.n_sent, self.n_rcvd, self.n_thru, self.n_waypoint_rcvd)
        self.first_conn.close()
        self.second_conn.close()
        self.logger.dump()

    def fail(self, text):
        self.error = text
        self.second_conn.close()
        self.first_conn.close()
        self.timer.cancel()
        self.outs = "n_sent=%d n_rcvd=%d n_thru=%d n_waypoint_rcvd=%d" % (self.n_sent, self.n_rcvd, self.n_thru, self.n_waypoint_rcvd)
        print (self.outs)

    def send_client(self):
        while self.first_sender.credit > 0 and self.n_sent < self.count:
            self.n_sent += 1
            m = Message(body="Message %d of %d" % (self.n_sent, self.count))
            self.first_sender.send(m)

    def send_waypoint(self):
        self.logger.log("send_waypoint called")
        while self.waypoint_sender.credit > 0 and len(self.waypoint_queue) > 0:
            self.n_thru += 1
            m = self.waypoint_queue.pop()
            self.waypoint_sender.send(m)
            self.logger.log("waypoint_sender message sent")
        else:
            self.logger.log("waypoint_sender did not sent - credit = %s, len(self.waypoint_queue) = %s" % (str(self.waypoint_sender.credit), str(len(self.waypoint_queue))))

    def on_start(self, event):
        self.timer       = event.reactor.schedule(TIMEOUT, TestTimeout(self))
        self.first_conn  = event.container.connect(self.first_host)
        self.second_conn = event.container.connect(self.second_host)

    def on_link_flow(self, event):
        if event.sender == self.waypoint_sender and self.first_sender_link_opened and not self.first_sender_created:
            self.first_sender_created = True
            self.first_sender = event.container.create_sender(self.first_conn, self.first_address)

    def on_link_opened(self, event):
        if event.receiver == self.waypoint_receiver and not self.first_sender_link_opened:
            self.first_sender_link_opened = True

    def on_link_opening(self, event):
        if event.sender and not self.waypoint_sender:
            self.waypoint_sender = event.sender
            if event.sender.remote_source.address == self.second_address:
                event.sender.source.address = self.second_address
                event.sender.open()
                self.waypoint_sender_opened = True
            else:
                self.fail("Incorrect address on incoming sender: got %s, expected %s" %
                          (event.sender.remote_source.address, self.second_address))

        elif event.receiver and not self.waypoint_receiver:
            self.waypoint_receiver = event.receiver
            if event.receiver.remote_target.address == self.second_address:
                event.receiver.target.address = self.second_address
                event.receiver.open()
                self.waypoint_receiver_opened = True
            else:
                self.fail("Incorrect address on incoming receiver: got %s, expected %s" %
                          (event.receiver.remote_target.address, self.second_address))

        if self.waypoint_sender_opened and self.waypoint_receiver_opened and not self.first_receiver_created:
            self.first_receiver_created = True
            self.first_receiver = event.container.create_receiver(self.first_conn, self.first_address)

    def on_sendable(self, event):
        if event.sender == self.first_sender:
            self.send_client()

    def on_message(self, event):
        if event.receiver == self.first_receiver:
            self.n_rcvd += 1
            if self.n_rcvd == self.count and self.n_thru == self.count:
                self.fail(None)
        elif event.receiver == self.waypoint_receiver:
            self.n_waypoint_rcvd += 1
            m = Message(body=event.message.body)
            self.waypoint_queue.append(m)
            self.send_waypoint()

    def run(self):
        container = Container(self)
        container.container_id = self.container_id
        container.run()
Пример #26
0
class OversizeMessageTransferTest(MessagingHandler):
    """
    This test connects a sender and a receiver. Then it tries to send _count_
    number of messages of the given size through the router or router network.

    With expect_block=True the ingress router should detect the sender's oversize
    message and close the sender connection. The receiver may receive
    aborted message indications but that is not guaranteed. If any aborted
    messages are received then the count must be at most one.
    The test is a success when the sender receives a connection error with
    oversize indication and the receiver has not received too many aborts.

    With expect_block=False sender messages should be received normally.
    The test is a success when n_accepted == count.
    """
    def __init__(self, sender_host, receiver_host, test_address,
                 message_size=100000, count=10, expect_block=True, print_to_console=False):
        super(OversizeMessageTransferTest, self).__init__()
        self.sender_host = sender_host
        self.receiver_host = receiver_host
        self.test_address = test_address
        self.msg_size = message_size
        self.count = count
        self.expect_block = expect_block

        self.sender_conn = None
        self.receiver_conn = None
        self.error = None
        self.sender = None
        self.receiver = None
        self.proxy = None

        self.n_sent = 0
        self.n_rcvd = 0
        self.n_accepted = 0
        self.n_rejected = 0
        self.n_aborted = 0
        self.n_connection_error = 0
        self.shut_down = False

        self.logger = Logger(title=("OversizeMessageTransferTest - %s" % (self.test_address)), print_to_console=print_to_console)
        self.log_unhandled = False

    def timeout(self):
        self.error = "Timeout Expired: n_sent=%d n_rcvd=%d n_rejected=%d n_aborted=%d" % \
                     (self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted)
        self.logger.log("self.timeout " + self.error)
        self._shut_down_test()

    def on_start(self, event):
        self.logger.log("on_start")
        self.timer = event.reactor.schedule(10, Timeout(self))
        self.logger.log("on_start: opening receiver connection to %s" % (self.receiver_host.addresses[0]))
        self.receiver_conn = event.container.connect(self.receiver_host.addresses[0])
        self.logger.log("on_start: opening   sender connection to %s" % (self.sender_host.addresses[0]))
        self.sender_conn = event.container.connect(self.sender_host.addresses[0])
        self.logger.log("on_start: Creating receiver")
        self.receiver = event.container.create_receiver(self.receiver_conn, self.test_address)
        self.logger.log("on_start: Creating sender")
        self.sender = event.container.create_sender(self.sender_conn, self.test_address)
        self.logger.log("on_start: done")

    def send(self):
        while self.sender.credit > 0 and self.n_sent < self.count:
            # construct message in indentifiable chunks
            body_msg = ""
            padchar = "abcdefghijklmnopqrstuvwxyz@#$%"[self.n_sent % 30]
            while len(body_msg) < self.msg_size:
                chunk = "[%s:%d:%d" % (self.test_address, self.n_sent, len(body_msg))
                padlen = 50 - len(chunk)
                chunk += padchar * padlen
                body_msg += chunk
            if len(body_msg) > self.msg_size:
                body_msg = body_msg[:self.msg_size]
            self.logger.log("send. address:%s message:%d of %s length=%d" %
                            (self.test_address, self.n_sent, self.count, self.msg_size))
            m = Message(body=body_msg)
            self.sender.send(m)
            self.n_sent += 1

    def on_sendable(self, event):
        if event.sender == self.sender:
            self.logger.log("on_sendable")
            self.send()

    def on_message(self, event):
        if self.expect_block:
            # All messages should violate maxMessageSize.
            # Receiving any is an error.
            self.error = "Received a message. Expected to receive no messages."
            self.logger.log(self.error)
            self._shut_down_test()
        else:
            self.n_rcvd += 1
            self.accept(event.delivery)
            self._check_done()

    def on_connection_remote_close(self, event):
        if self.shut_down:
            return
        if event.connection == self.sender_conn:
            if not event.connection.remote_condition is None:
                if event.connection.remote_condition.name == OVERSIZE_CONDITION_NAME and \
                   event.connection.remote_condition.description == OVERSIZE_CONDITION_DESC:
                    self.logger.log("on_connection_remote_close: sender closed with correct condition")
                    self.n_connection_error += 1
                    self.sender_conn.close()
                    self.sender_conn = None
                else:
                    # sender closed but for wrong reason
                    self.error = "sender close error: Expected name: %s, description: %s, but received name: %s, description: %s" % (
                                 OVERSIZE_CONDITION_NAME, OVERSIZE_CONDITION_DESC,
                                 event.connection.remote_condition.name, event.connection.remote_condition.description)
                    self.logger.log(self.error)
            else:
                self.error = "sender close error: Expected a remote_condition but there was none."
                self.logger.log(self.error)
        else:
            # connection error but not for sender
            self.error = "unexpected connection close error: wrong connection closed."
            self.logger.log(self.error)
        self._check_done()

    def _shut_down_test(self):
        self.shut_down = True
        if self.timer:
            self.timer.cancel()
            self.timer = None
        if self.sender:
            self.sender.close()
            self.sender = None
        if self.receiver:
            self.receiver.close()
            self.receiver = None
        if self.sender_conn:
            self.sender_conn.close()
            self.sender_conn = None
        if self.receiver_conn:
            self.receiver_conn.close()
            self.receiver_conn = None

    def _check_done(self):
        current = ("check_done: sent=%d rcvd=%d rejected=%d aborted=%d connection_error:%d" %
                   (self.n_sent, self.n_rcvd, self.n_rejected, self.n_aborted, self.n_connection_error))
        self.logger.log(current)
        if self.error is not None:
            self.logger.log("TEST FAIL")
            self._shut_down_test()
        else:
            done = (self.n_connection_error == 1) \
                    if self.expect_block else \
                    (self.n_sent == self.count and self.n_rcvd == self.count)

            if done:
                self.logger.log("TEST DONE!!!")
                # self.log_unhandled = True # verbose debugging
                self._shut_down_test()

    def on_rejected(self, event):
        self.n_rejected += 1
        if self.expect_block:
            self.logger.log("on_rejected: entry")
            self._check_done()
        else:
            self.error = "Unexpected on_reject"
            self.logger.log(self.error)
            self._check_done()

    def on_aborted(self, event):
        self.logger.log("on_aborted")
        self.n_aborted += 1
        self._check_done()

    def on_error(self, event):
        self.error = "Container error"
        self.logger.log(self.error)
        self._shut_down_test()

    def on_unhandled(self, method, *args):
        if self.log_unhandled:
            self.logger.log("on_unhandled: method: %s, args: %s" % (method, args))

    def run(self):
        try:
            Container(self).run()
        except Exception as e:
            self.error = "Container run exception: %s" % (e)
            self.logger.log(self.error)
            self.logger.dump()
Пример #27
0
def main(argv):
    retval = 0
    # parse args
    p = argparse.ArgumentParser()
    p.add_argument('--host', '-b', help='Required target host')
    p.add_argument('--port',
                   '-p',
                   type=int,
                   help='Required target port number')
    p.add_argument(
        '--size',
        '-s',
        type=int,
        default=100,
        const=1,
        nargs='?',
        help=
        'Size of payload in bytes must be >= 0. Size of zero connects and disconnects with no data traffic.'
    )
    p.add_argument(
        '--count',
        '-c',
        type=int,
        default=1,
        const=1,
        nargs='?',
        help=
        'Number of payloads to process must be >= 0. Count of zero connects and disconnects with no data traffic.'
    )
    p.add_argument('--name', help='Optional logger prefix')
    p.add_argument(
        '--timeout',
        '-t',
        type=float,
        default=0.0,
        const=1,
        nargs="?",
        help='Timeout in seconds. Default value "0" disables timeouts')
    p.add_argument('--log',
                   '-l',
                   action='store_true',
                   help='Write activity log to console')
    del argv[0]
    args = p.parse_args(argv)

    # host
    if args.host is None:
        raise Exception("User must specify a host")
    host = args.host

    # port
    if args.port is None:
        raise Exception("User must specify a port number")
    port = args.port

    # size
    if args.size < 0:
        raise Exception("Size must be greater than or equal to zero")
    size = args.size

    # count
    if args.count < 0:
        raise Exception("Count must be greater than or equal to zero")
    count = args.count

    # name / prefix
    prefix = args.name if args.name is not None else "ECHO_CLIENT (%d_%d_%d)" % \
                                                     (port, size, count)

    # timeout
    if args.timeout < 0.0:
        raise Exception("Timeout must be greater than or equal to zero")

    signaller = GracefulExitSignaler()
    logger = None

    try:
        # logging
        logger = Logger(title="%s host:%s port %d size:%d count:%d" %
                        (prefix, host, port, size, count),
                        print_to_console=args.log,
                        save_for_dump=False)

        client = TcpEchoClient(prefix, host, port, size, count, args.timeout,
                               logger)

        keep_running = True
        while keep_running:
            time.sleep(0.1)
            if client.error is not None:
                logger.log("%s Client stopped with error: %s" %
                           (prefix, client.error))
                keep_running = False
                retval = 1
            if client.exit_status is not None:
                logger.log("%s Client stopped with status: %s" %
                           (prefix, client.exit_status))
                keep_running = False
            if signaller.kill_now:
                logger.log("%s Process killed with signal" % prefix)
                keep_running = False
            if keep_running and not client.is_running:
                logger.log("%s Client stopped with no error or status" %
                           prefix)
                keep_running = False

    except Exception:
        client.error = "ERROR: exception : '%s'" % traceback.format_exc()
        if logger is not None:
            logger.log("%s Exception: %s" % (prefix, traceback.format_exc()))
        retval = 1

    if client.error is not None:
        # write client errors to stderr
        def eprint(*args, **kwargs):
            print(*args, file=sys.stderr, **kwargs)

        elines = client.error.split("\n")
        for line in elines:
            eprint("ERROR:", prefix, line)

    return retval
Пример #28
0
def main(argv):
    retval = 0
    logger = None
    # parse args
    p = argparse.ArgumentParser()
    p.add_argument('--port', '-p', help='Required listening port number')
    p.add_argument('--name', help='Optional logger prefix')
    p.add_argument(
        '--echo',
        '-e',
        type=int,
        default=0,
        const=1,
        nargs="?",
        help=
        'Exit after echoing this many bytes. Default value "0" disables exiting on byte count.'
    )
    p.add_argument(
        '--timeout',
        '-t',
        type=float,
        default=0.0,
        const=1,
        nargs="?",
        help='Timeout in seconds. Default value "0.0" disables timeouts')
    p.add_argument('--log',
                   '-l',
                   action='store_true',
                   help='Write activity log to console')
    # Add controlled server misbehavior for testing conditions seen in the field
    # Stall required to trigger Q2 testing for DISPATCH-1947 and improving test DISPATCH-1981
    p.add_argument(
        '--connect-stall',
        type=float,
        default=0.0,
        const=1,
        nargs="?",
        help=
        'Accept connections but wait this many seconds before reading from socket. Default value "0.0" disables stall'
    )
    # Close on connect - exercises control paths scrutinized under DISPATCH-1968
    p.add_argument(
        '--close-on-connect',
        action='store_true',
        help=
        'Close client connection without reading from socket when listener connects. If stall is specified then stall before closing.'
    )
    # Close on data - exercises control paths scrutinized under DISPATCH-1968
    p.add_argument('--close-on-data',
                   action='store_true',
                   help='Close client connection as soon as data arrives.')
    del argv[0]
    args = p.parse_args(argv)

    # port
    if args.port is None:
        raise Exception("User must specify a port number")
    port = args.port

    # name / prefix
    prefix = args.name if args.name is not None else "ECHO_SERVER (%s)" % (
        str(port))

    # echo
    if args.echo < 0:
        raise Exception("Echo count must be greater than zero")

    # timeout
    if args.timeout < 0.0:
        raise Exception("Timeout must be greater than or equal to zero")

    # timeout
    if args.connect_stall < 0.0:
        raise Exception("Connect-stall must be greater than or equal to zero")

    signaller = GracefulExitSignaler()
    server = None

    try:
        # logging
        logger = Logger(title="%s port %s" % (prefix, port),
                        print_to_console=args.log,
                        save_for_dump=False)

        server = TcpEchoServer(prefix, port, args.echo, args.timeout, logger,
                               args.connect_stall, args.close_on_connect,
                               args.close_on_data)

        keep_running = True
        while keep_running:
            time.sleep(0.1)
            if server.error is not None:
                logger.log("%s Server stopped with error: %s" %
                           (prefix, server.error))
                keep_running = False
                retval = 1
            if server.exit_status is not None:
                logger.log("%s Server stopped with status: %s" %
                           (prefix, server.exit_status))
                keep_running = False
            if signaller.kill_now:
                logger.log("%s Process killed with signal" % prefix)
                keep_running = False
            if keep_running and not server.is_running:
                logger.log("%s Server stopped with no error or status" %
                           prefix)
                keep_running = False

    except Exception:
        if logger is not None:
            logger.log("%s Exception: %s" % (prefix, traceback.format_exc()))
        retval = 1

    if server is not None and server.sock is not None:
        server.sock.close()

    return retval