class RabbitMQCnxFixture(AbstractTestFixture): """ Mock a message over RabbitMQ """ def _get_producer(self): producer = producers[self._mock_rabbit_connection].acquire(block=True, timeout=2) self._connections.add(producer.connection) return producer def setup(self): #Note: not a setup_class method, not to conflict with AbstractTestFixture's setup self._mock_rabbit_connection = BrokerConnection("pyamqp://*****:*****@localhost:5672") self._connections = {self._mock_rabbit_connection} self._exchange = Exchange('navitia', durable=True, delivry_mode=2, type='topic') self._mock_rabbit_connection.connect() #wait for the cnx to run the test self._wait_for_rabbitmq_cnx() def teardown(self): #we need to release the amqp connection self._mock_rabbit_connection.release() def _publish(self, item): with self._get_producer() as producer: producer.publish(item, exchange=self._exchange, routing_key=rt_topic, declare=[self._exchange]) def _make_mock_item(self, *args, **kwargs): """ method to overload to create a mock object to send over rabbitmq """ raise NotImplementedError() def send_mock(self, *args, **kwargs): status = self.query_region('status') last_loaded_data = get_not_null(status['status'], 'last_rt_data_loaded') item = self._make_mock_item(*args, **kwargs) self._publish(item) #we sleep a bit to let kraken reload the data self._poll_until_reload(last_loaded_data) def _poll_until_reload(self, previous_val): """ poll until the kraken have reloaded its data check that the last_rt_data_loaded field is different from the first call """ Retrying(stop_max_delay=10 * 1000, wait_fixed=100, retry_on_result=lambda status: get_not_null(status['status'], 'last_rt_data_loaded') == previous_val) \ .call(lambda: self.query_region('status')) def _wait_for_rabbitmq_cnx(self): """ poll until the kraken is connected to rabbitmq small timeout because it must not be long (otherwise it way be a server configuration problem) """ Retrying(stop_max_delay=1 * 1000, wait_fixed=50, retry_on_result=lambda status: get_not_null(status['status'], 'is_connected_to_rabbitmq') is False) \ .call(lambda: self.query_region('status'))
def send_end(num): connection = BrokerConnection(hostname = 'myhost', userid = 'webfis', password = '******', virtual_host = 'webfishost', port = 5672) publisher = Publisher(connection=connection, exchange="end", routing_key="end"+str(num), exchange_type="direct") publisher.send("end") publisher.close() connection.release()
def check_end(num): connection = BrokerConnection(hostname = 'myhost', userid = 'webfis', password = '******', virtual_host = 'webfishost', port = 5672) consumer = Consumer(connection=connection, queue="end"+str(num), exchange="end", routing_key="end"+str(num), exchange_type="direct") message = consumer.fetch() if message and message.payload == "end": end = True else: end = False consumer.close() connection.release() return end
def main(): """ Query the master server and add the results to the database """ usage = 'usage: masterserver.py [options]' parser = OptionParser(usage) parser.add_option("-d", "--debug", action="store_true", dest="debug", default=False, help="enable debug messages") (options, args) = parser.parse_args() if len(args) != 0: parser.error('incorrect number of arguments') logging.basicConfig(level=logging.DEBUG) if not options.debug: logging.disable(logging.DEBUG) # declare exchange server_exchange = Exchange('servers', type='fanout') # set up our amqp connection connection = BrokerConnection( hostname='localhost', userid='gamelion', password='******', virtual_host='/' ) channel = connection.channel() producer = Producer(channel, server_exchange, serializer="pickle") # run through all the master servers we know of and ask them for ips for server_address in master_servers: logging.debug('*' * 60) logging.debug('NEW SERVER: %s', str(server_address)) logging.debug('*' * 60) run_full_query(server_address, producer) channel.close() connection.release()
def get_error(num): connection = BrokerConnection(hostname = 'myhost', userid = 'webfis', password = '******', virtual_host = 'webfishost', port = 5672) consumer = Consumer(connection=connection, queue=str(num), exchange="error", routing_key=str(num), exchange_type="direct") message = consumer.fetch() if message: error = message.payload message.ack() print("geterror: " + str(error)) else: error = "wait" consumer.close() connection.release() return error
class ChaosDisruptionsFixture(AbstractTestFixture): """ Mock a chaos disruption message, in order to check the api """ def _get_producer(self): producer = producers[self.mock_chaos_connection].acquire(block=True, timeout=2) self._connections.add(producer.connection) return producer def setup(self): self.mock_chaos_connection = BrokerConnection( "pyamqp://*****:*****@localhost:5672") self._connections = {self.mock_chaos_connection} self._exchange = Exchange('navitia', durable=True, delivry_mode=2, type='topic') self.mock_chaos_connection.connect() def teardown(self): #we need to release the amqp connection self.mock_chaos_connection.release() def send_chaos_disruption(self, disruption_name, impacted_obj, impacted_obj_type, start=None, end=None, message='default_message', is_deleted=False, blocking=False, start_period="20100412T165200", end_period="20200412T165200"): item = make_mock_chaos_item(disruption_name, impacted_obj, impacted_obj_type, start, end, message, is_deleted, blocking, start_period, end_period) with self._get_producer() as producer: producer.publish(item, exchange=self._exchange, routing_key=chaos_rt_topic, declare=[self._exchange]) def send_chaos_disruption_and_sleep(self, disruption_name, impacted_obj, impacted_obj_type, start=None, end=None, message='default_message', is_deleted=False, blocking=False, start_period="20100412T165200", end_period="20200412T165200"): status = self.query_region('status') last_loaded_data = get_not_null(status['status'], 'last_rt_data_loaded') self.send_chaos_disruption(disruption_name, impacted_obj, impacted_obj_type, start, end, message, is_deleted, blocking, start_period, end_period) #we sleep a bit to let kraken reload the data self.poll_until_reload(last_loaded_data) def poll_until_reload(self, previous_val): """ poll until the kraken have reloaded its data check that the last_rt_data_loaded field is different from the first call """ Retrying(stop_max_delay=10 * 1000, wait_fixed=100, retry_on_result=lambda status: get_not_null(status['status'], 'last_rt_data_loaded') == previous_val) \ .call(lambda: self.query_region('status')) def wait_for_rabbitmq_cnx(self): """ poll until the kraken is connected to rabbitmq small timeout because it must not be long (otherwise it way be a server configuration problem) """ Retrying(stop_max_delay=1 * 1000, wait_fixed=50, retry_on_result=lambda status: get_not_null(status['status'], 'is_connected_to_rabbitmq') is False) \ .call(lambda: self.query_region('status'))