def _cbk_connecting_nodes(self, ch: BlockingChannel, method: Basic.Deliver, _: BasicProperties, body: bytes) \ -> None: ch.basic_ack(method.delivery_tag) arr = body.decode().strip().split(',') logger = logging.getLogger('monitoring.tracking_nodes') logger.debug(f'{arr} is received from tracking_node queue') if len(arr) != 6: return ip_addr, aggr_contention, num_workloads, num_of_fg_wls, num_of_bg_wls, node_type = arr aggr_contention = float(aggr_contention) num_workloads = int(num_workloads) num_of_fg_wls = int(num_of_fg_wls) num_of_bg_wls = int(num_of_bg_wls) # node_type is either 'gpu' or 'cpu' tracked_node = self._cluster_nodes[ip_addr] tracked_node.aggr_contention = aggr_contention tracked_node.num_workloads = num_workloads tracked_node.num_of_fg_wls = num_of_fg_wls tracked_node.num_of_bg_wls = num_of_bg_wls tracked_node.node_type = node_type node_queue_name = '{}_node_({})'.format(tracked_node.node_type, tracked_node.ip_addr) ch.queue_declare(node_queue_name) ch.basic_consume( functools.partial(self._cbk_node_monitor, tracked_node), node_queue_name)
def _cbk_wl_creation(self, ch: BlockingChannel, method: Basic.Deliver, _: BasicProperties, body: bytes) -> None: ch.basic_ack(method.delivery_tag) arr = body.decode().strip().split(',') logger = logging.getLogger('monitoring.workload_creation') logger.debug(f'{arr} is received from workload_creation queue') if len(arr) != 5: return wl_identifier, wl_type, pid, perf_pid, perf_interval = arr pid = int(pid) perf_pid = int(perf_pid) perf_interval = int(perf_interval) item = wl_identifier.split('_') wl_name = item[0] if not psutil.pid_exists(pid): return workload = Workload(wl_name, wl_type, pid, perf_pid, perf_interval) if wl_type == 'bg': logger.info(f'{workload} is background process') else: logger.info(f'{workload} is foreground process') self._pending_wl.add(workload) wl_queue_name = '{}({})'.format(wl_name, pid) ch.queue_declare(wl_queue_name) ch.basic_consume(functools.partial(self._cbk_wl_monitor, workload), wl_queue_name)
def target_consuming_executor(task: TargetTask, channel: BlockingChannel) -> NoReturn: """ Starts endless consuming messages from RabbitMQ queue :param task: Parameters for producer (routing key and queue label) :param channel: Prepared blocking channel for consuming :return: """ channel.basic_consume(queue=task.queue_label, on_message_callback=nxlog_callback) channel.start_consuming() logger.warning("Should appear only on exception. !!!")
def _cbk_wl_creation(self, ch: BlockingChannel, method: Basic.Deliver, _: BasicProperties, body: bytes) -> None: ch.basic_ack(method.delivery_tag) arr = body.decode().strip().split(',') logger = logging.getLogger('monitoring.workload_creation') logger.debug(f'{arr} is received from workload_creation queue') if len(arr) != 8: return wl_identifier, wl_type, pid, perf_pid, perf_interval, tegra_pid, tegra_interval, max_workloads = arr pid = int(pid) perf_pid = int(perf_pid) perf_interval = int(perf_interval) item = wl_identifier.split('_') wl_name = item[0] max_wls = int(max_workloads) if not psutil.pid_exists(pid): return workload = Workload(wl_name, wl_type, pid, perf_pid, perf_interval) #workload.check_gpu_task() if wl_type == 'bg': logger.info(f'{workload} is background process') else: logger.info(f'{workload} is foreground process') self._pending_wl.add(workload, max_wls) wl_queue_name = 'rmq-{}-{}({})'.format(self._rmq_host, wl_name, pid) ch.exchange_declare(exchange=self._rmq_creation_exchange, exchange_type='fanout') ch.queue_declare(wl_queue_name) self._rmq_bench_exchange = f'ex-{self._rmq_host}-{wl_name}({pid})' ch.queue_bind(exchange=self._rmq_bench_exchange, queue=wl_queue_name) ch.basic_consume(functools.partial(self._cbk_wl_monitor, workload), wl_queue_name)