async def _rw_handler(self, reader, writer): """ This gets called when a connection is established. """ connection_info = writer.get_extra_info('peername') p_host = connection_info[0] p_port = connection_info[1] bl.info('Connection established from {}:{}'.format( p_host, p_port)) self.new_file_task = self.loop.create_task(self.push_new_file(reader, writer)) self.data_index_task = self.loop.create_task(self.get_data_and_index(reader, writer)) try: await self.new_file_task await self.data_index_task except Exception as e: bl.error("Exception: {}".format(e)) finally: bl.info('Connection terminated') writer.close()
def setup_logging(logging_level): """ Setup the loggers. """ gl(logging_level) # setup simulation logging gl.info("Started Gateway logging with level '{}'".format(logging_level)) bl(logging_level) # setup backend logging bl.info("Started Backend logging with level '{}'".format(logging_level))
def setup_logging(logging_level): """ Setup the loggers. """ cl(logging_level) # setup simulation logging cl.info("Started Core logging with level '{}'".format(logging_level)) sl(logging_level) # setup simulation logging sl.info("Started Simulation logging with level '{}'".format(logging_level)) bl(logging_level) # setup backend logging bl.info("Started Backend logging with level '{}'".format(logging_level))
def start(self): try: bl.info('Starting BackendManager on port {}'.format(self.port)) bl.info("\tConnect the platt backend to {}:{}".format(self.host, self.port)) self.loop.run_forever() except KeyboardInterrupt: self.stop() finally: bl.debug('BackendManager closed') self.loop.close()
def stop(self): bl.info("Stopping BackendManager") # await self._cancel() try: self._cancel_new_file_executor_event.set() except AttributeError: pass try: self._cancel_file_request_answer_executor_event.set() except AttributeError: pass self._loop.call_soon_threadsafe(self._loop.close())
def __init__(self, host, port, new_file_send_queue, get_index_server_event, index_data_queue, file_name_request_server_queue, file_content_name_hash_server_queue, shutdown_backend_manager_event): bl.info("BackendManager init: {}:{}".format(host, port)) self._host = host self._port = port # expose events, pipes and queues to the class self._new_file_send_queue = new_file_send_queue self._get_index_server_event = get_index_server_event self._index_data_queue = index_data_queue self._file_name_request_server_queue = file_name_request_server_queue self._file_content_name_hash_server_queue = file_content_name_hash_server_queue self._shutdown_backend_manager_event = shutdown_backend_manager_event # create a server self._loop = asyncio.get_event_loop() self._coro = asyncio.start_server(self._rw_handler, self._host, self._port, loop=self._loop, backlog=100) self._server = self._loop.run_until_complete(self._coro) self._new_file_connection_active = False self._index_connection_active = False self._file_requests_connection_active = False self._file_answers_connection_active = False # download data from the ceph manager and store it in a dictionary # self._ceph_data_dict = dict() # we need a threading lock and not a asyncio lock because we use them in # an executor (extra tread) self._ceph_data_lock = threading.Lock() ceph_data_task = self._loop.create_task(self._ceph_data_coro()) perdiodically_delete_ceph_data_task = self._loop.create_task( self._periodic_ceph_file_deletion_coro()) # manage the queue cleanup when there are no active connections queue_cleanup_task = self._loop.create_task(self._queue_cleanup_coro()) shutdown_watch_task = self._loop.create_task( self._watch_shutdown_event_coro()) bl.info("Starting BackendManager") try: self._loop.run_forever() except KeyboardInterrupt: pass # quiet KeyboardInterrupt finally: bl.info("BackendManager stopped")
async def _rw_handler(self, reader, writer): """ This gets called when a connection is established. """ connection_info = writer.get_extra_info('peername') p_host = connection_info[0] p_port = connection_info[1] bl.info("Connection open from {}/{}".format(p_host, p_port)) # perform a handshake with the new connection task_dict = await self.read_data(reader, writer) if not task_dict: await self.send_nack(writer) return await self.send_ack(writer) task = task_dict["task"] bl.info("Connection from port {} is tasked with {}".format( p_port, task)) try: # watch the connection self.connection_active_task = self._loop.create_task( self._connection_active_coro(reader, writer)) # depending on the task that is to be performed this creates on of # four tasks # # push information about new files to the client if task == "new_file_message": self._new_file_connection_active = True self.send_new_files_task = self._loop.create_task( self._new_file_information_coro(reader, writer)) await self.send_new_files_task # watch the connection conn_active = await self.connection_active_task if not conn_active: bl.info("Connection to {}/{} lost".format(p_host, p_port)) bl.debug("Cancelling send_new_files_task") self.send_new_files_task.cancel() self._new_file_connection_active = False # manage requests for the complete index from the client if task == "index": self._index_connection_active = True self.get_index_task = self._loop.create_task( self._index_request_coro(reader, writer)) await self.get_index_task # watch the connection conn_active = await self.connection_active_task if not conn_active: bl.info("Connection to {}/{} lost".format(p_host, p_port)) bl.debug("Cancelling get_index_task") self.get_index_task.cancel() self._index_connection_active = False # manage requests for file data from the ceph cluster if task == "file_download": self.file_download_task = self._loop.create_task( self._file_download_coro(reader, writer)) # optional? await self.file_download_task except Exception as e: bl.error("Exception: {}".format(e)) raise finally: writer.close()
def stop(self): self.server.close() self.loop.run_until_complete(self.server.wait_closed()) self.loop.close() bl.info('BackendManager stopped')