def test_relay(self): brokerB = self.amqp_broker() agentB = BrokerAgent(brokerB.connect()) agentB.create("queue", "q") self.agent.create("domain", "BrokerB", {"url":brokerB.host_port(), "sasl_mechanisms":"NONE"}) #send to q on broker B through brokerA self.send_and_receive(send_config=Config(self.broker, address="q@BrokerB"), recv_config=Config(brokerB))
def test_outgoing_link(self): brokerB = self.amqp_broker() agentB = BrokerAgent(brokerB.connect()) self.agent.create("queue", "q") agentB.create("queue", "q") self.agent.create("domain", "BrokerB", {"url":brokerB.host_port(), "sasl_mechanisms":"NONE"}) self.agent.create("outgoing", "Link1", {"domain":"BrokerB","source":"q","target":"q"}) #send to brokerA, receive from brokerB self.send_and_receive(recv_config=Config(brokerB))
def incoming_link(self, mechanism): brokerB = self.amqp_broker() agentB = BrokerAgent(brokerB.connect()) self.agent.create("queue", "q") agentB.create("queue", "q") self.agent.create("domain", "BrokerB", {"url":brokerB.host_port(), "sasl_mechanisms":mechanism}) self.agent.create("incoming", "Link1", {"domain":"BrokerB","source":"q","target":"q"}) #send to brokerB, receive from brokerA self.send_and_receive(send_config=Config(brokerB))
def checkOverStockedQueues(host): connection = Connection.establish(host) broker = BrokerAgent(connection) queues = broker.getAllQueues() result = list() for q in queues: if (q.msgDepth != 0): print(q.name + " " + str(q.msgDepth)) result.append([q.name, q.msgDepth]) return result
def test_relay(self): brokerB = self.amqp_broker() agentB = BrokerAgent(brokerB.connect()) agentB.create("queue", "q") self.agent.create("domain", "BrokerB", { "url": brokerB.host_port(), "sasl_mechanisms": "NONE" }) #send to q on broker B through brokerA self.send_and_receive(send_config=Config(self.broker, address="q@BrokerB"), recv_config=Config(brokerB))
def _node_disambiguation_precreated(self, ambiguous_send): agent = BrokerAgent(self.conn) agent.addExchange("fanout", "ambiguous") agent.addQueue("ambiguous") try: r1 = self.ssn.receiver("ambiguous; {node:{type:topic}}") r2 = self.ssn.receiver("ambiguous; {node:{type:queue}}") self._node_disambiguation_test(r1, r2, ambiguous_send=ambiguous_send) finally: agent.delExchange("ambiguous") agent.delQueue("ambiguous", False, False)
def test_outgoing_link(self): brokerB = self.amqp_broker() agentB = BrokerAgent(brokerB.connect()) self.agent.create("queue", "q") agentB.create("queue", "q") self.agent.create("domain", "BrokerB", { "url": brokerB.host_port(), "sasl_mechanisms": "NONE" }) self.agent.create("outgoing", "Link1", { "domain": "BrokerB", "source": "q", "target": "q" }) #send to brokerA, receive from brokerB self.send_and_receive(recv_config=Config(brokerB))
def startBrokerAccess(self): """ New-style management access to the broker. Can be used in lieu of startQmf. """ if 'broker_conn' not in self.__dict__: self.broker_conn = qpid.messaging.Connection(str(self.broker)) self.broker_conn.open() self.broker_access = BrokerAgent(self.broker_conn)
class QmfAgent(object): """Access to a QMF broker agent.""" def __init__(self, address, **kwargs): self._connection = Connection.establish( address, client_properties={"qpid.ha-admin":1}, **kwargs) self._agent = BrokerAgent(self._connection) assert self._agent.getHaBroker(), "HA module not loaded in broker at: %s"%(address) def __getattr__(self, name): a = getattr(self._agent, name) return a
class QmfAgent(object): """Access to a QMF broker agent.""" def __init__(self, address, **kwargs): self._connection = Connection.establish( address, client_properties={"qpid.ha-admin": 1}, **kwargs) self._agent = BrokerAgent(self._connection) assert self._agent.getHaBroker( ), "HA module not loaded in broker at: %s" % (address) def __getattr__(self, name): a = getattr(self._agent, name) return a
def test_ambiguous_create_2(self): #create exchange first, then queue r1 = self.ssn.receiver( "ambiguous; {create:receiver, node:{type:topic}}") r2 = self.ssn.receiver( "ambiguous; {create:receiver, node:{type:queue}}") agent = BrokerAgent(self.conn) exchange = agent.getExchange("ambiguous") queue = agent.getQueue("ambiguous") try: assert (exchange) assert (queue) self._node_disambiguation_test(r1, r2) finally: if exchange: agent.delExchange("ambiguous") if queue: agent.delQueue("ambiguous", False, False)
def test_ambiguous_create_2(self): #create exchange first, then queue r1 = self.ssn.receiver("ambiguous; {create:receiver, node:{type:topic}}") r2 = self.ssn.receiver("ambiguous; {create:receiver, node:{type:queue}}") agent = BrokerAgent(self.conn) exchange = agent.getExchange("ambiguous") queue = agent.getQueue("ambiguous") try: assert(exchange) assert(queue) self._node_disambiguation_test(r1, r2) finally: if exchange: agent.delExchange("ambiguous") if queue: agent.delQueue("ambiguous", False, False)
def migrate(*args, **kwargs): """ Migrate qpid queues: - Ensure pulp.task is no longer *exclusive*. - Rename agent queues: consumer_id> => pulp.agent.<consumer_id> """ transport = pulp_conf.get('messaging', 'transport') if transport != 'qpid': # not using qpid return if not QPID_MESSAGING_AVAILABLE: msg = _( 'Migration 0009 did not run because the python package qpid.messaging is not ' 'installed. Pulp\'s Qpid client dependencies can be installed with the ' '\"pulp-server-qpid\" package group. See the installation docs for more ' 'information. Alternatively, you may reconfigure Pulp to use RabbitMQ.' ) _logger.error(msg) raise Exception(msg) if not QPIDTOOLLIBS_AVAILABLE: msg = _( 'Migration 0009 did not run because the python package qpidtoollibs is not ' 'installed. Pulp\'s Qpid client dependencies can be installed with the ' '\"pulp-server-qpid\" package group. See the installation docs for more ' 'information. Alternatively, you may reconfigure Pulp to use RabbitMQ.' ) _logger.error(msg) raise Exception(msg) url = urlparse(pulp_conf.get('messaging', 'url')) connection = Connection(host=url.hostname, port=url.port, transport=url.scheme, reconnect=False, ssl_certfile=pulp_conf.get('messaging', 'clientcert'), ssl_skip_hostname_check=True) connection.attach() broker = BrokerAgent(connection) _migrate_reply_queue(broker) _migrate_agent_queues(broker) connection.detach()
def migrate(*args, **kwargs): """ Migrate qpid queues: - Ensure pulp.task is no longer *exclusive*. - Rename agent queues: consumer_id> => pulp.agent.<consumer_id> """ transport = pulp_conf.get('messaging', 'transport') if transport != 'qpid': # not using qpid return if not QPID_MESSAGING_AVAILABLE: msg = _( 'Migration 0009 did not run because the python package qpid.messaging is not ' 'installed. Please install qpid.messaging and rerun the migrations. See %s' 'for more information.') msg = msg % QPID_MESSAGING_URL _logger.error(msg) raise Exception(msg) if not QPIDTOOLLIBS_AVAILABLE: msg = _( 'Migration 0009 did not run because the python package qpidtoollibs is not ' 'installed. Please install qpidtoollibs and rerun the migrations. See %s for more ' 'information.') msg = msg % QPIDTOOLLIBS_URL _logger.error(msg) raise Exception(msg) url = urlparse(pulp_conf.get('messaging', 'url')) connection = Connection(host=url.hostname, port=url.port, transport=url.scheme, reconnect=False, ssl_certfile=pulp_conf.get('messaging', 'clientcert'), ssl_skip_hostname_check=True) connection.attach() broker = BrokerAgent(connection) _migrate_reply_queue(broker) _migrate_agent_queues(broker) connection.detach()
def __init__(self, address, **kwargs): self._connection = Connection.establish( address, client_properties={"qpid.ha-admin": 1}, **kwargs) self._agent = BrokerAgent(self._connection) assert self._agent.getHaBroker( ), "HA module not loaded in broker at: %s" % (address)
def __init__(self, address, **kwargs): self._connection = Connection.establish( address, client_properties={"qpid.ha-admin": 1}, **kwargs) self._agent = BrokerAgent(self._connection)
def setup_access(self): if 'broker_agent' not in self.__dict__: self.conn2 = qpid.messaging.Connection(self.broker) self.conn2.open() self.broker_agent = BrokerAgent(self.conn2) return self.broker_agent
except ImportError: print 'Cannot run test without python MagicMock' print 'Please install MagicMock: pip install mock' exit(3) connection = None broker = None try: logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) # setup broker connection connection = Connection.establish('127.0.0.1') broker = BrokerAgent(connection) # add test service busname busname = 'test-lofarbus-%s' % (uuid.uuid1()) broker.addExchange('topic', busname) # the system under test is the service and the rpc, not the RADatabase # so, patch (mock) the RADatabase class during these tests. # when the service instantiates an RADatabase it will get the mocked class. with patch('lofar.sas.resourceassignment.database.radb.RADatabase', autospec=True) as MockRADatabase: mock = MockRADatabase.return_value # modify the return values of the various RADatabase methods with pre-cooked answers mock.getTaskStatuses.return_value = [{ 'id': 1, 'name': 'opened'
class AmqpBrokerTest(BrokerTest): """ Tests using AMQP 1.0 support """ def setUp(self): BrokerTest.setUp(self) self.port_holder = HaPort(self) self.broker = self.amqp_broker(port_holder=self.port_holder) self.default_config = Config(self.broker) self.agent = BrokerAgent(self.broker.connect()) def sender(self, config, reply_to=None): cmd = ["qpid-send", "--broker", config.url, "--address", config.address, "--connection-options", "{protocol:%s}" % config.version, "--content-stdin", "--send-eos=1" ] if reply_to: cmd.append( "--reply-to=%s" % reply_to) return self.popen(cmd, stdin=PIPE) def receiver(self, config): cmd = ["qpid-receive", "--broker", config.url, "--address", config.address, "--connection-options", "{protocol:%r}" % config.version, "--timeout=10" ] return self.popen(cmd, stdout=PIPE) def ready_receiver(self, config): s = self.broker.connect().session() r = s.receiver("readyq; {create:always}") cmd = ["qpid-receive", "--broker", config.url, "--address", config.address, "--connection-options", "{protocol:%r}" % config.version, "--timeout=10", "--ready-address=readyq;{create:always}" ] result = self.popen(cmd, stdout=PIPE) r.fetch(timeout=1) # wait until receiver is actually ready s.acknowledge() s.close() return result def send_and_receive(self, send_config=None, recv_config=None, count=1000, reply_to=None, wait_for_receiver=False, debug=False): if debug: print "sender config is %s" % (send_config or self.default_config) print "receiver config is %s" % (recv_config or self.default_config) sender = self.sender(send_config or self.default_config, reply_to) sender._set_cloexec_flag(sender.stdin) #required for older python, see http://bugs.python.org/issue4112 if wait_for_receiver: receiver = self.ready_receiver(recv_config or self.default_config) else: receiver = self.receiver(recv_config or self.default_config) messages = ["message-%s" % (i+1) for i in range(count)] for m in messages: sender.stdin.write(m + "\n") sender.stdin.flush() sender.stdin.close() if debug: c = send_config or self.default_config print "sent %s messages to %s sn %s" % (len(messages), c.address, c.url) if debug: c = recv_config or self.default_config print "reading messages from %s sn %s" % (c.address, c.url) for m in messages: l = receiver.stdout.readline().rstrip() if debug: print l assert m == l, (m, l) sender.wait() receiver.wait() def test_simple(self): self.send_and_receive() def test_translate1(self): self.send_and_receive(recv_config=Config(self.broker, version="amqp0-10")) def test_translate2(self): self.send_and_receive(send_config=Config(self.broker, version="amqp0-10")) def test_translate_with_large_routingkey(self): self.send_and_receive(send_config=Config(self.broker, address="amq.topic/a.%s" % ("x" * 256), version="amqp1.0"), recv_config=Config(self.broker, address="amq.topic/a.*", version="amqp0-10"), wait_for_receiver=True) def send_and_receive_empty(self, send_config=None, recv_config=None): sconfig = send_config or self.default_config rconfig = recv_config or self.default_config send_cmd = ["qpid-send", "--broker", sconfig.url, "--address=%s" % sconfig.address, "--connection-options={protocol:%s}" % sconfig.version, "--content-size=0", "--messages=1", "-P", "my-header=abc" ] sender = self.popen(send_cmd) sender.wait() receive_cmd = ["qpid-receive", "--broker", rconfig.url, "--address=%s" % rconfig.address, "--connection-options={protocol:%s}" % rconfig.version, "--messages=1", "--print-content=false", "--print-headers=true" ] receiver = self.popen(receive_cmd, stdout=PIPE) l = receiver.stdout.read() assert "my-header:abc" in l receiver.wait() def test_translate_empty_1(self): self.send_and_receive_empty(recv_config=Config(self.broker, version="amqp0-10")) def test_translate_empty_2(self): self.send_and_receive_empty(send_config=Config(self.broker, version="amqp0-10")) def request_response(self, reply_to, send_config=None, request_config=None, response_config=None, count=1000, wait_for_receiver=False): rconfig = request_config or self.default_config echo_cmd = ["qpid-receive", "--broker", rconfig.url, "--address=%s" % rconfig.address, "--connection-options={protocol:%s}" % rconfig.version, "--timeout=10", "--print-content=false", "--print-headers=false" ] requests = self.popen(echo_cmd) self.send_and_receive(send_config, response_config, count, reply_to=reply_to, wait_for_receiver=wait_for_receiver) requests.wait() def request_response_local(self, request_address, response_address, wait_for_receiver=False, request_version="amqp1.0", echo_version="amqp1.0"): self.request_response(response_address, send_config=Config(self.broker, address=request_address, version=request_version), request_config=Config(self.broker, address=request_address, version=echo_version), response_config=Config(self.broker, address=response_address, version=request_version), wait_for_receiver=wait_for_receiver) def test_request_reponse_queue(self): self.agent.create("queue", "q1") self.agent.create("queue", "q2") self.request_response_local("q1", "q2") def test_request_reponse_queue_translated1(self): self.agent.create("queue", "q1") self.agent.create("queue", "q2") self.request_response_local("q1", "q2", request_version="amqp0-10", echo_version="amqp1.0") def test_request_reponse_queue_translated2(self): self.agent.create("queue", "q1") self.agent.create("queue", "q2") self.request_response_local("q1", "q2", request_version="amqp1.0", echo_version="amqp0-10") def test_request_reponse_exchange(self): self.agent.create("queue", "q1") self.request_response_local("q1", "amq.fanout", wait_for_receiver=True) def test_request_reponse_exchange_translated1(self): self.agent.create("queue", "q1") self.request_response_local("q1", "amq.fanout", wait_for_receiver=True, request_version="amqp0-10", echo_version="amqp1.0") def test_request_reponse_exchange_translated2(self): self.agent.create("queue", "q1") self.request_response_local("q1", "amq.fanout", wait_for_receiver=True, request_version="amqp1.0", echo_version="amqp0-10") def test_request_reponse_exchange_with_subject(self): self.agent.create("queue", "q1") self.request_response_local("q1", "amq.topic/abc; {node:{type:topic}}", wait_for_receiver=True) def test_request_reponse_exchange_with_subject_translated1(self): self.agent.create("queue", "q1") self.request_response_local("q1", "amq.topic/abc; {node:{type:topic}}", wait_for_receiver=True, request_version="amqp0-10", echo_version="amqp1.0") def test_request_reponse_exchange_with_subject_translated2(self): self.agent.create("queue", "q1") self.request_response_local("q1", "amq.topic/abc; {node:{type:topic}}", wait_for_receiver=True, request_version="amqp1.0", echo_version="amqp0-10") def test_domain(self): brokerB = self.amqp_broker() self.agent.create("domain", "BrokerB", {"url":brokerB.host_port()}) domains = self.agent._getAllBrokerObjects(Domain) assert len(domains) == 1 assert domains[0].name == "BrokerB" def incoming_link(self, mechanism): brokerB = self.amqp_broker() agentB = BrokerAgent(brokerB.connect()) self.agent.create("queue", "q") agentB.create("queue", "q") self.agent.create("domain", "BrokerB", {"url":brokerB.host_port(), "sasl_mechanisms":mechanism}) self.agent.create("incoming", "Link1", {"domain":"BrokerB","source":"q","target":"q"}) #send to brokerB, receive from brokerA self.send_and_receive(send_config=Config(brokerB)) def test_incoming_link_anonymous(self): self.incoming_link("ANONYMOUS") def test_incoming_link_nosasl(self): self.incoming_link("NONE") def test_outgoing_link(self): brokerB = self.amqp_broker() agentB = BrokerAgent(brokerB.connect()) self.agent.create("queue", "q") agentB.create("queue", "q") self.agent.create("domain", "BrokerB", {"url":brokerB.host_port(), "sasl_mechanisms":"NONE"}) self.agent.create("outgoing", "Link1", {"domain":"BrokerB","source":"q","target":"q"}) #send to brokerA, receive from brokerB self.send_and_receive(recv_config=Config(brokerB)) def test_relay(self): brokerB = self.amqp_broker() agentB = BrokerAgent(brokerB.connect()) agentB.create("queue", "q") self.agent.create("domain", "BrokerB", {"url":brokerB.host_port(), "sasl_mechanisms":"NONE"}) #send to q on broker B through brokerA self.send_and_receive(send_config=Config(self.broker, address="q@BrokerB"), recv_config=Config(brokerB)) def test_reconnect(self): receiver_cmd = ["qpid-receive", "--broker", self.broker.host_port(), "--address=amq.fanout", "--connection-options={protocol:amqp1.0, reconnect:True,container_id:receiver}", "--timeout=10", "--print-content=true", "--print-headers=false" ] receiver = self.popen(receiver_cmd, stdout=PIPE) sender_cmd = ["qpid-send", "--broker", self.broker.host_port(), "--address=amq.fanout", "--connection-options={protocol:amqp1.0,reconnect:True,container_id:sender}", "--content-stdin", "--send-eos=1" ] sender = self.popen(sender_cmd, stdin=PIPE) sender._set_cloexec_flag(sender.stdin) #required for older python, see http://bugs.python.org/issue4112 batch1 = ["message-%s" % (i+1) for i in range(10000)] for m in batch1: sender.stdin.write(m + "\n") sender.stdin.flush() self.broker.kill() self.broker = self.amqp_broker(port_holder=self.port_holder) batch2 = ["message-%s" % (i+1) for i in range(10000, 20000)] for m in batch2: sender.stdin.write(m + "\n") sender.stdin.flush() sender.stdin.close() last = None m = receiver.stdout.readline().rstrip() while len(m): last = m m = receiver.stdout.readline().rstrip() assert last == "message-20000", (last) """ Create and return a broker with AMQP 1.0 support """ def amqp_broker(self): assert BrokerTest.amqp_lib, "Cannot locate AMQP 1.0 plug-in" self.port_holder = HaPort(self) #reserve port args = ["--load-module", BrokerTest.amqp_lib, "--socket-fd=%s" % self.port_holder.fileno, "--listen-disable=tcp", "--log-enable=trace+:Protocol", "--log-enable=info+"] return BrokerTest.broker(self, args, port=self.port_holder.port) def amqp_broker(self, port_holder=None): assert BrokerTest.amqp_lib, "Cannot locate AMQP 1.0 plug-in" if port_holder: args = ["--load-module", BrokerTest.amqp_lib, "--socket-fd=%s" % port_holder.fileno, "--listen-disable=tcp", "--log-enable=trace+:Protocol", "--log-enable=info+"] return BrokerTest.broker(self, args, port=port_holder.port) else: args = ["--load-module", BrokerTest.amqp_lib, "--log-enable=trace+:Protocol", "--log-enable=info+"] return BrokerTest.broker(self, args)
def test_08_integration_test_with_messagebus(self): """ Full blown integration test listening for notifications on the bus, and checking which dir is up for a visit next. Needs a working local qpid broker. Test is skipped if qpid not available. """ try: broker = None connection = None import uuid from threading import Event from qpid.messaging import Connection, ConnectError from qpidtoollibs import BrokerAgent from lofar.messaging.messagebus import ToBus from lofar.messaging.messages import EventMessage from lofar.lta.ingest.common.config import DEFAULT_INGEST_NOTIFICATION_PREFIX # setup broker connection connection = Connection.establish('127.0.0.1') broker = BrokerAgent(connection) # add test service bus busname = 'test-LTASOIngestEventHandler-%s' % (uuid.uuid1()) broker.addExchange('topic', busname) sync_event = Event() class SyncedLTASOIngestEventHandler(LTASOIngestEventHandler): """This derived LTASOIngestEventHandler behaves exactly like the normal object under test LTASOIngestEventHandler, but it also sets a sync_event to sync between the listener thread and this main test thread""" def _handleMessage(self, msg): super(SyncedLTASOIngestEventHandler, self)._handleMessage(msg) sync_event.set() with SyncedLTASOIngestEventHandler(self.dbcreds, busname=busname): for site in self.db.sites(): for root_dir in self.db.rootDirectoriesForSite(site['id']): self._markAllDirectoriesRecentlyVisited() # create the subdir surl sub_dir_name = '/foo' sub_dir_path = root_dir['dir_name'] + sub_dir_name surl = site['url'] + sub_dir_path with ToBus(busname) as sender: msg = EventMessage( subject=DEFAULT_INGEST_NOTIFICATION_PREFIX + "TaskFinished", content={'srm_url': surl}) sender.send(msg) # wait for the handler to have processed the message self.assertTrue(sync_event.wait(2)) sync_event.clear() # surl should have been scheduled for a visit, all other dir's were marked as visited already... # so there should be a new dir for this surl, and it should be the least_recent_visited_dir site_visit_stats = self.db.visitStats( datetime.utcnow())[site['name']] least_recent_visited_dir_id = site_visit_stats.get( 'least_recent_visited_dir_id') self.assertIsNotNone(least_recent_visited_dir_id) least_recent_visited_dir = self.db.directory( least_recent_visited_dir_id) self.assertEqual(sub_dir_path, least_recent_visited_dir['dir_name']) except ImportError as e: logger.warning("skipping test due to: %s", e) except ConnectError as e: logger.warning("skipping test due to: %s", e) finally: # cleanup test bus and exit if broker: broker.delExchange(busname) if connection: connection.close()
def test_ambiguous_delete_2(self): agent = BrokerAgent(self.conn) agent.addExchange("fanout", "ambiguous") agent.addQueue("ambiguous") self.ssn.receiver("ambiguous; {delete:receiver, node:{type:queue}}").close() exchange = agent.getExchange("ambiguous") queue = agent.getQueue("ambiguous") try: assert(exchange) assert(not queue) finally: if exchange: agent.delExchange("ambiguous") if queue: agent.delQueue("ambiguous", False, False)
class AmqpBrokerTest(BrokerTest): """ Tests using AMQP 1.0 support """ def setUp(self): BrokerTest.setUp(self) os.putenv("QPID_LOAD_MODULE", BrokerTest.amqpc_lib) self.broker = self.amqp_broker() self.default_config = Config(self.broker) self.agent = BrokerAgent(self.broker.connect()) def sender(self, config): cmd = [ "qpid-send", "--broker", config.url, "--address", config.address, "--connection-options", "{protocol:%s}" % config.version, "--content-stdin", "--send-eos=1" ] return self.popen(cmd, stdin=PIPE) def receiver(self, config): cmd = [ "qpid-receive", "--broker", config.url, "--address", config.address, "--connection-options", "{protocol:%r}" % config.version, "--timeout=10" ] return self.popen(cmd, stdout=PIPE) def send_and_receive(self, send_config=None, recv_config=None, count=1000, debug=False): if debug: print "sender config is %s" % (send_config or self.default_config) print "receiver config is %s" % (recv_config or self.default_config) sender = self.sender(send_config or self.default_config) receiver = self.receiver(recv_config or self.default_config) messages = ["message-%s" % (i + 1) for i in range(count)] for m in messages: sender.stdin.write(m + "\n") sender.stdin.flush() sender.stdin.close() if debug: c = send_config or self.default_config print "sent %s messages to %s sn %s" % (len(messages), c.address, c.url) if debug: c = recv_config or self.default_config print "reading messages from %s sn %s" % (c.address, c.url) for m in messages: l = receiver.stdout.readline().rstrip() if debug: print l assert m == l, (m, l) sender.wait() receiver.wait() def test_simple(self): self.send_and_receive() def test_translate1(self): self.send_and_receive( recv_config=Config(self.broker, version="amqp0-10")) def test_translate2(self): self.send_and_receive( send_config=Config(self.broker, version="amqp0-10")) def test_domain(self): brokerB = self.amqp_broker() self.agent.create("domain", "BrokerB", {"url": brokerB.host_port()}) domains = self.agent._getAllBrokerObjects(Domain) assert len(domains) == 1 assert domains[0].name == "BrokerB" def test_incoming_link(self): brokerB = self.amqp_broker() agentB = BrokerAgent(brokerB.connect()) self.agent.create("queue", "q") agentB.create("queue", "q") self.agent.create("domain", "BrokerB", { "url": brokerB.host_port(), "sasl_mechanisms": "NONE" }) self.agent.create("incoming", "Link1", { "domain": "BrokerB", "source": "q", "target": "q" }) #send to brokerB, receive from brokerA self.send_and_receive(send_config=Config(brokerB)) def test_outgoing_link(self): brokerB = self.amqp_broker() agentB = BrokerAgent(brokerB.connect()) self.agent.create("queue", "q") agentB.create("queue", "q") self.agent.create("domain", "BrokerB", { "url": brokerB.host_port(), "sasl_mechanisms": "NONE" }) self.agent.create("outgoing", "Link1", { "domain": "BrokerB", "source": "q", "target": "q" }) #send to brokerA, receive from brokerB self.send_and_receive(recv_config=Config(brokerB)) def test_relay(self): brokerB = self.amqp_broker() agentB = BrokerAgent(brokerB.connect()) agentB.create("queue", "q") self.agent.create("domain", "BrokerB", { "url": brokerB.host_port(), "sasl_mechanisms": "NONE" }) #send to q on broker B through brokerA self.send_and_receive(send_config=Config(self.broker, address="q@BrokerB"), recv_config=Config(brokerB)) """ Create and return a broker with AMQP 1.0 support """ def amqp_broker(self): assert BrokerTest.amqp_lib, "Cannot locate AMQP 1.0 plug-in" args = [ "--load-module", BrokerTest.amqp_lib, "--max-negotiate-time=600000", "--log-enable=trace+:Protocol", "--log-enable=info+" ] return BrokerTest.broker(self, args)
def test_ambiguous_delete_2(self): agent = BrokerAgent(self.conn) agent.addExchange("fanout", "ambiguous") agent.addQueue("ambiguous") self.ssn.receiver( "ambiguous; {delete:receiver, node:{type:queue}}").close() exchange = agent.getExchange("ambiguous") queue = agent.getQueue("ambiguous") try: assert (exchange) assert (not queue) finally: if exchange: agent.delExchange("ambiguous") if queue: agent.delQueue("ambiguous", False, False)
def setUp(self): BrokerTest.setUp(self) os.putenv("QPID_LOAD_MODULE", BrokerTest.amqpc_lib) self.broker = self.amqp_broker() self.default_config = Config(self.broker) self.agent = BrokerAgent(self.broker.connect())
from mock import patch except ImportError: print 'Cannot run test without python MagicMock' print 'Please install MagicMock: pip install mock' exit(3) connection = None broker = None try: logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) # setup broker connection connection = Connection.establish('127.0.0.1') broker = BrokerAgent(connection) # add test service busname busname = 'test-lofarbus-%s' % (uuid.uuid1()) broker.addExchange('topic', busname) # the system under test is the service and the rpc, not the RADatabase # so, patch (mock) the RADatabase class during these tests. # when the service instantiates an RADatabase it will get the mocked class. with patch('lofar.sas.resourceassignment.database.radb.RADatabase', autospec=True) as MockRADatabase: mock = MockRADatabase.return_value # modify the return values of the various RADatabase methods with pre-cooked answers mock.getTaskStatuses.return_value = [{'id': 1, 'name': 'opened'}, {'id': 2, 'name': 'scheduled'}] mock.getTaskTypes.return_value = [{'id': 0, 'name': 'OBSERVATION'}, {'id': 1, 'name': 'PIPELINE'}] mock.getResourceClaimStatuses.return_value = [{'id': 0, 'name': 'CLAIMED'},{'id': 1, 'name': 'ALLOCATED'},{'id': 2, 'name': 'CONFLICT'}] mock.getUnits.return_value = [{'units': 'rsp_channel_bit', 'id': 0},{'units': 'bytes', 'id': 1},{'units': 'rcu_board', 'id': 2},{'units': 'bytes/second', 'id': 3},{'units': 'cores', 'id': 4}]
class BrokerManager: def __init__(self): self.brokerName = None self.connection = None self.broker = None self.cluster = None def SetBroker(self, brokerUrl): self.url = brokerUrl self.connection = Connection.establish(self.url, **conn_options) self.broker = BrokerAgent(self.connection) def Disconnect(self): """ Release any allocated brokers. Ignore any failures as the tool is shutting down. """ try: connection.close() except: pass def _getCluster(self): packages = self.qmf.getPackages() if "org.apache.qpid.cluster" not in packages: return None clusters = self.qmf.getObjects(_class="cluster", _agent=self.brokerAgent) if len(clusters) == 0: print "Clustering is installed but not enabled on the broker." return None self.cluster = clusters[0] def _getHostList(self, urlList): hosts = [] hostAddr = IpAddr(config._host) for url in urlList: if url.find("amqp:") != 0: raise Exception("Invalid URL 1") url = url[5:] addrs = str(url).split(",") addrList = [] for addr in addrs: tokens = addr.split(":") if len(tokens) != 3: raise Exception("Invalid URL 2") addrList.append((tokens[1], tokens[2])) # Find the address in the list that is most likely to be in the same subnet as the address # with which we made the original QMF connection. This increases the probability that we will # be able to reach the cluster member. best = hostAddr.bestAddr(addrList) bestUrl = best[0] + ":" + best[1] hosts.append(bestUrl) return hosts def displayBroker(self): disp = Display(prefix=" ") heads = [] jsonContainer =[] heads.append(Header('uptime', Header.DURATION)) heads.append(Header('cluster', Header.NONE)) heads.append(Header('connections', Header.COMMAS)) heads.append(Header('sessions', Header.COMMAS)) heads.append(Header('exchanges', Header.COMMAS)) heads.append(Header('queues', Header.COMMAS)) rows = [] broker = self.broker.getBroker() cluster = self.broker.getCluster() clusterInfo = cluster and cluster.clusterName + "<" + cluster.status + ">" or "<standalone>" connections = self.getConnectionMap() sessions = self.getSessionMap() exchanges = self.getExchangeMap() queues = self.getQueueMap() row = (broker.getUpdateTime() - broker.getCreateTime(), clusterInfo, len(connections), len(sessions), len(exchanges), len(queues)) rows.append(row) if config._jsonoutput: jsonContainer.append(rows); else: disp.formattedTable('Broker Summary:', heads, rows) if 'queueCount' not in broker.values: return print heads = [] heads.append(Header('Statistic')) heads.append(Header('Messages', Header.COMMAS)) heads.append(Header('Bytes', Header.COMMAS)) rows = [] rows.append(['queue-depth', broker.msgDepth, broker.byteDepth]) rows.append(['total-enqueues', broker.msgTotalEnqueues, broker.byteTotalEnqueues]) rows.append(['total-dequeues', broker.msgTotalDequeues, broker.byteTotalDequeues]) rows.append(['persistent-enqueues', broker.msgPersistEnqueues, broker.bytePersistEnqueues]) rows.append(['persistent-dequeues', broker.msgPersistDequeues, broker.bytePersistDequeues]) rows.append(['transactional-enqueues', broker.msgTxnEnqueues, broker.byteTxnEnqueues]) rows.append(['transactional-dequeues', broker.msgTxnDequeues, broker.byteTxnDequeues]) rows.append(['flow-to-disk-depth', broker.msgFtdDepth, broker.byteFtdDepth]) rows.append(['flow-to-disk-enqueues', broker.msgFtdEnqueues, broker.byteFtdEnqueues]) rows.append(['flow-to-disk-dequeues', broker.msgFtdDequeues, broker.byteFtdDequeues]) rows.append(['acquires', broker.acquires, None]) rows.append(['releases', broker.releases, None]) rows.append(['discards-no-route', broker.discardsNoRoute, None]) rows.append(['discards-ttl-expired', broker.discardsTtl, None]) rows.append(['discards-limit-overflow', broker.discardsOverflow, None]) rows.append(['discards-ring-overflow', broker.discardsRing, None]) rows.append(['discards-lvq-replace', broker.discardsLvq, None]) rows.append(['discards-subscriber-reject', broker.discardsSubscriber, None]) rows.append(['discards-purged', broker.discardsPurge, None]) rows.append(['reroutes', broker.reroutes, None]) rows.append(['abandoned', broker.abandoned, None]) rows.append(['abandoned-via-alt', broker.abandonedViaAlt, None]) if config._jsonoutput: jsonContainer.append(rows); print json.dumps(jsonContainer,sort_keys=True,indent=4, separators=(',', ': ')) else: disp.formattedTable('Aggregate Broker Statistics:', heads, rows) def displayConn(self): disp = Display(prefix=" ") heads = [] jsonContainer =[] heads.append(Header('connection')) heads.append(Header('cproc')) heads.append(Header('cpid')) heads.append(Header('mech')) heads.append(Header('auth')) heads.append(Header('connected', Header.DURATION)) heads.append(Header('idle', Header.DURATION)) heads.append(Header('msgIn', Header.KMG)) heads.append(Header('msgOut', Header.KMG)) rows = [] connections = self.broker.getAllConnections() broker = self.broker.getBroker() for conn in connections: row = [] row.append(conn.address) row.append(conn.remoteProcessName) row.append(conn.remotePid) row.append(conn.saslMechanism) row.append(conn.authIdentity) row.append(broker.getUpdateTime() - conn.getCreateTime()) row.append(broker.getUpdateTime() - conn.getUpdateTime()) row.append(conn.msgsFromClient) row.append(conn.msgsToClient) rows.append(row) title = "Connections" if config._sortcol: sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) dispRows = sorter.getSorted() else: dispRows = rows if config._jsonoutput: jsonContainer.append(rows); print json.dumps(jsonContainer,sort_keys=True,indent=4, separators=(',', ': ')) else: disp.formattedTable(title, heads, dispRows) def displaySession(self): disp = Display(prefix=" ") def displayExchange(self): disp = Display(prefix=" ") heads = [] jsonContainer =[] heads.append(Header("exchange")) heads.append(Header("type")) heads.append(Header("dur", Header.Y)) heads.append(Header("bind", Header.KMG)) heads.append(Header("msgIn", Header.KMG)) heads.append(Header("msgOut", Header.KMG)) heads.append(Header("msgDrop", Header.KMG)) heads.append(Header("byteIn", Header.KMG)) heads.append(Header("byteOut", Header.KMG)) heads.append(Header("byteDrop", Header.KMG)) rows = [] exchanges = self.broker.getAllExchanges() for ex in exchanges: row = [] row.append(ex.name) row.append(ex.type) row.append(ex.durable) row.append(ex.bindingCount) row.append(ex.msgReceives) row.append(ex.msgRoutes) row.append(ex.msgDrops) row.append(ex.byteReceives) row.append(ex.byteRoutes) row.append(ex.byteDrops) rows.append(row) title = "Exchanges" if config._sortcol: sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) dispRows = sorter.getSorted() else: dispRows = rows if config._jsonoutput: jsonContainer.append(rows); print json.dumps(jsonContainer,sort_keys=True,indent=4, separators=(',', ': ')) else: disp.formattedTable(title, heads, dispRows) def displayQueues(self): disp = Display(prefix=" ") heads = [] jsonContainer =[] heads.append(Header("queue")) heads.append(Header("dur", Header.Y)) heads.append(Header("autoDel", Header.Y)) heads.append(Header("excl", Header.Y)) heads.append(Header("msg", Header.KMG)) heads.append(Header("msgIn", Header.KMG)) heads.append(Header("msgOut", Header.KMG)) heads.append(Header("bytes", Header.KMG)) heads.append(Header("bytesIn", Header.KMG)) heads.append(Header("bytesOut", Header.KMG)) heads.append(Header("cons", Header.KMG)) heads.append(Header("bind", Header.KMG)) rows = [] queues = self.broker.getAllQueues() for q in queues: row = [] row.append(q.name) row.append(q.durable) row.append(q.autoDelete) row.append(q.exclusive) row.append(q.msgDepth) row.append(q.msgTotalEnqueues) row.append(q.msgTotalDequeues) row.append(q.byteDepth) row.append(q.byteTotalEnqueues) row.append(q.byteTotalDequeues) row.append(q.consumerCount) row.append(q.bindingCount) rows.append(row) title = "Queues" if config._sortcol: sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) dispRows = sorter.getSorted() else: dispRows = rows if config._jsonoutput: jsonContainer.append(rows); print json.dumps(jsonContainer,sort_keys=True,indent=4, separators=(',', ': ')) else: disp.formattedTable(title, heads, dispRows) def displayQueue(self, name): queue = self.broker.getQueue(name) if not queue: print "Queue '%s' not found" % name return disp = Display(prefix=" ") heads = [] jsonContainer =[] heads.append(Header('Name')) heads.append(Header('Durable', Header.YN)) heads.append(Header('AutoDelete', Header.YN)) heads.append(Header('Exclusive', Header.YN)) heads.append(Header('FlowStopped', Header.YN)) heads.append(Header('FlowStoppedCount', Header.COMMAS)) heads.append(Header('Consumers', Header.COMMAS)) heads.append(Header('Bindings', Header.COMMAS)) rows = [] rows.append([queue.name, queue.durable, queue.autoDelete, queue.exclusive, queue.flowStopped, queue.flowStoppedCount, queue.consumerCount, queue.bindingCount]) if config._jsonoutput: jsonContainer.append(rows); else: disp.formattedTable("Properties:", heads, rows) print heads = [] heads.append(Header('Property')) heads.append(Header('Value')) rows = [] rows.append(['arguments', queue.arguments]) rows.append(['alt-exchange', queue.altExchange]) if config._jsonoutput: jsonContainer.append(rows); else: disp.formattedTable("Optional Properties:", heads, rows) print heads = [] heads.append(Header('Statistic')) heads.append(Header('Messages', Header.COMMAS)) heads.append(Header('Bytes', Header.COMMAS)) rows = [] rows.append(['queue-depth', queue.msgDepth, queue.byteDepth]) rows.append(['total-enqueues', queue.msgTotalEnqueues, queue.byteTotalEnqueues]) rows.append(['total-dequeues', queue.msgTotalDequeues, queue.byteTotalDequeues]) rows.append(['persistent-enqueues', queue.msgPersistEnqueues, queue.bytePersistEnqueues]) rows.append(['persistent-dequeues', queue.msgPersistDequeues, queue.bytePersistDequeues]) rows.append(['transactional-enqueues', queue.msgTxnEnqueues, queue.byteTxnEnqueues]) rows.append(['transactional-dequeues', queue.msgTxnDequeues, queue.byteTxnDequeues]) rows.append(['flow-to-disk-depth', queue.msgFtdDepth, queue.byteFtdDepth]) rows.append(['flow-to-disk-enqueues', queue.msgFtdEnqueues, queue.byteFtdEnqueues]) rows.append(['flow-to-disk-dequeues', queue.msgFtdDequeues, queue.byteFtdDequeues]) rows.append(['acquires', queue.acquires, None]) rows.append(['releases', queue.releases, None]) rows.append(['discards-ttl-expired', queue.discardsTtl, None]) rows.append(['discards-limit-overflow', queue.discardsOverflow, None]) rows.append(['discards-ring-overflow', queue.discardsRing, None]) rows.append(['discards-lvq-replace', queue.discardsLvq, None]) rows.append(['discards-subscriber-reject', queue.discardsSubscriber, None]) rows.append(['discards-purged', queue.discardsPurge, None]) rows.append(['reroutes', queue.reroutes, None]) if config._jsonoutput: jsonContainer.append(rows); print json.dumps(jsonContainer,sort_keys=True,indent=4, separators=(',', ': ')) else: disp.formattedTable("Statistics:", heads, rows) def displaySubscriptions(self): disp = Display(prefix=" ") heads = [] jsonContainer =[] heads.append(Header("subscr")) heads.append(Header("queue")) heads.append(Header("conn")) heads.append(Header("procName")) heads.append(Header("procId")) heads.append(Header("browse", Header.Y)) heads.append(Header("acked", Header.Y)) heads.append(Header("excl", Header.Y)) heads.append(Header("creditMode")) heads.append(Header("delivered", Header.COMMAS)) heads.append(Header("sessUnacked", Header.COMMAS)) rows = [] subscriptions = self.broker.getAllSubscriptions() sessions = self.getSessionMap() connections = self.getConnectionMap() for s in subscriptions: row = [] try: row.append(s.name) row.append(s.queueRef) session = sessions[s.sessionRef] connection = connections[session.connectionRef] row.append(connection.address) row.append(connection.remoteProcessName) row.append(connection.remotePid) row.append(s.browsing) row.append(s.acknowledged) row.append(s.exclusive) row.append(s.creditMode) row.append(s.delivered) row.append(session.unackedMessages) rows.append(row) except: pass title = "Subscriptions" if config._sortcol: sorter = Sorter(heads, rows, config._sortcol, config._limit, config._increasing) dispRows = sorter.getSorted() else: dispRows = rows if config._jsonoutput: jsonContainer.append(rows); print json.dumps(jsonContainer,sort_keys=True,indent=4, separators=(',', ': ')) else: disp.formattedTable(title, heads, dispRows) def displayMemory(self): disp = Display(prefix=" ") heads = [Header('Statistic'), Header('Value', Header.COMMAS)] rows = [] jsonContainer =[] memory = self.broker.getMemory() for k,v in memory.values.items(): if k != 'name': rows.append([k, v]) if config._jsonoutput: jsonContainer.append(rows); print json.dumps(jsonContainer,sort_keys=True,indent=4, separators=(',', ': ')) else: disp.formattedTable('Broker Memory Statistics:', heads, rows) def displayAcl(self): acl = self.broker.getAcl() if not acl: print "ACL Policy Module is not installed" return disp = Display(prefix=" ") heads = [Header('Statistic'), Header('Value')] rows = [] jsonContainer =[] rows.append(['policy-file', acl.policyFile]) rows.append(['enforcing', YN(acl.enforcingAcl)]) rows.append(['has-transfer-acls', YN(acl.transferAcl)]) rows.append(['last-acl-load', TimeLong(acl.lastAclLoad)]) rows.append(['acl-denials', Commas(acl.aclDenyCount)]) if config._jsonoutput: jsonContainer.append(rows); print json.dumps(jsonContainer,sort_keys=True,indent=4, separators=(',', ': ')) else: disp.formattedTable('ACL Policy Statistics:', heads, rows) def getExchangeMap(self): exchanges = self.broker.getAllExchanges() emap = {} for e in exchanges: emap[e.name] = e return emap def getQueueMap(self): queues = self.broker.getAllQueues() qmap = {} for q in queues: qmap[q.name] = q return qmap def getSessionMap(self): sessions = self.broker.getAllSessions() smap = {} for s in sessions: smap[s.name] = s return smap def getConnectionMap(self): connections = self.broker.getAllConnections() cmap = {} for c in connections: cmap[c.address] = c return cmap def displayMain(self, names, main): if main == 'g': self.displayBroker() elif main == 'c': self.displayConn() elif main == 's': self.displaySession() elif main == 'e': self.displayExchange() elif main == 'q': if len(names) >= 1: self.displayQueue(names[0]) else: self.displayQueues() elif main == 'u': self.displaySubscriptions() elif main == 'm': self.displayMemory() elif main == 'acl': self.displayAcl() def display(self, names): self.displayMain(names, config._types)
def SetBroker(self, brokerUrl): self.url = brokerUrl self.connection = Connection.establish(self.url, **conn_options) self.broker = BrokerAgent(self.connection)
def __init__(self, address, **kwargs): self._connection = Connection.establish( address, client_properties={"qpid.ha-admin":1}, **kwargs) self._agent = BrokerAgent(self._connection) assert self._agent.getHaBroker(), "HA module not loaded in broker at: %s"%(address)
def setUp(self): BrokerTest.setUp(self) self.port_holder = HaPort(self) self.broker = self.amqp_broker(port_holder=self.port_holder) self.default_config = Config(self.broker) self.agent = BrokerAgent(self.broker.connect())