Пример #1
0
    def _cbk_wl_creation(self, ch: BlockingChannel, method: Basic.Deliver,
                         _: BasicProperties, body: bytes) -> None:
        ch.basic_ack(method.delivery_tag)

        arr = body.decode().strip().split(',')

        logger = logging.getLogger('monitoring.workload_creation')
        logger.debug(f'{arr} is received from workload_creation queue')

        if len(arr) != 5:
            return

        wl_identifier, wl_type, pid, perf_pid, perf_interval = arr
        pid = int(pid)
        perf_pid = int(perf_pid)
        perf_interval = int(perf_interval)
        item = wl_identifier.split('_')
        wl_name = item[0]

        if not psutil.pid_exists(pid):
            return

        workload = Workload(wl_name, wl_type, pid, perf_pid, perf_interval)
        if wl_type == 'bg':
            logger.info(f'{workload} is background process')
        else:
            logger.info(f'{workload} is foreground process')

        self._pending_wl.add(workload)

        wl_queue_name = '{}({})'.format(wl_name, pid)
        ch.queue_declare(wl_queue_name)
        ch.basic_consume(functools.partial(self._cbk_wl_monitor, workload),
                         wl_queue_name)
Пример #2
0
def ngram_create_consumer(ch: BlockingChannel, method: Basic.Deliver, properties, body: bytes):
    logger.debug("Recebido: " + body.decode(), extra={"received_args": body})

    try:
        task_info = json.loads(body.decode())
        logger.debug("Recuperando tarefa: " + task_info["task"])
        ngram_create_task: NGramsCreateTaskModel = NGramsCreateTaskModel.objects(id=task_info["task"]).first()
        if ngram_create_task is None:
            raise Exception("Não foi encontrada nenhuma tarefa com o id " + task_info["task"])
    except Exception as e:
        logger.error(
            "Erro ao recuperar a tarefa",
            exc_info=True,
            extra={"received_args": body}
        )
        ch.basic_nack(
            delivery_tag=method.delivery_tag,
            requeue=False
        )
        return False

    success_task = process_task(ngram_create_task)
    logger.debug("Finalizando tarefa: " + task_info["task"])

    if success_task:
        ch.basic_ack(delivery_tag=method.delivery_tag)
        return True
    else:
        ch.basic_nack(
            delivery_tag=method.delivery_tag,
            requeue=False
        )
        return False
Пример #3
0
    def _cbk_wl_monitor(self, workload: Workload, ch: BlockingChannel,
                        method: Basic.Deliver, _: BasicProperties,
                        body: bytes) -> None:
        metric = json.loads(body.decode())
        ch.basic_ack(method.delivery_tag)
        if self._node_type == NodeType.IntegratedGPU:
            item = BasicMetric(metric['llc_references'], metric['llc_misses'],
                               metric['instructions'], metric['cycles'],
                               metric['gpu_core_util'],
                               metric['gpu_core_freq'], metric['gpu_emc_util'],
                               metric['gpu_emc_freq'], workload.perf_interval)
        if self._node_type == NodeType.CPU:
            item = BasicMetric(metric['llc_references'], metric['llc_misses'],
                               metric['instructions'], metric['cycles'], 0, 0,
                               0, 0, workload.perf_interval)

        logger = logging.getLogger(f'monitoring.metric.{workload}')
        logger.debug(f'{metric} is given from ')

        metric_que = workload.metrics

        if len(metric_que) == self._metric_buf_size:
            metric_que.pop()

        metric_que.appendleft(item)
Пример #4
0
    def _cbk_connecting_nodes(self, ch: BlockingChannel, method: Basic.Deliver, _: BasicProperties, body: bytes) \
            -> None:
        ch.basic_ack(method.delivery_tag)

        arr = body.decode().strip().split(',')

        logger = logging.getLogger('monitoring.tracking_nodes')
        logger.debug(f'{arr} is received from tracking_node queue')

        if len(arr) != 6:
            return

        ip_addr, aggr_contention, num_workloads, num_of_fg_wls, num_of_bg_wls, node_type = arr
        aggr_contention = float(aggr_contention)
        num_workloads = int(num_workloads)
        num_of_fg_wls = int(num_of_fg_wls)
        num_of_bg_wls = int(num_of_bg_wls)
        # node_type is either 'gpu' or 'cpu'

        tracked_node = self._cluster_nodes[ip_addr]
        tracked_node.aggr_contention = aggr_contention
        tracked_node.num_workloads = num_workloads
        tracked_node.num_of_fg_wls = num_of_fg_wls
        tracked_node.num_of_bg_wls = num_of_bg_wls
        tracked_node.node_type = node_type

        node_queue_name = '{}_node_({})'.format(tracked_node.node_type,
                                                tracked_node.ip_addr)
        ch.queue_declare(node_queue_name)
        ch.basic_consume(
            functools.partial(self._cbk_node_monitor, tracked_node),
            node_queue_name)
Пример #5
0
        def do_consume(channel: BlockingChannel) -> None:
            events: List[Dict[str, Any]] = []
            last_process = time.time()
            max_processed: Optional[int] = None
            self.is_consuming = True

            # This iterator technique will iteratively collect up to
            # batch_size events from the RabbitMQ queue (if present)
            # before calling the callback with the batch.  If not
            # enough events are present, it will sleep for at most
            # timeout seconds before calling the callback with the
            # batch of events it has.
            for method, properties, body in channel.consume(
                    queue_name, inactivity_timeout=timeout):
                if body is not None:
                    assert method is not None
                    events.append(orjson.loads(body))
                    max_processed = method.delivery_tag
                now = time.time()
                if len(events) >= batch_size or (
                        timeout and now >= last_process + timeout):
                    if events:
                        assert max_processed is not None
                        try:
                            callback(events)
                            channel.basic_ack(max_processed, multiple=True)
                        except BaseException:
                            channel.basic_nack(max_processed, multiple=True)
                            raise
                        events = []
                    last_process = now
                if not self.is_consuming:
                    break
Пример #6
0
    def task_process(self, channel: BlockingChannel, method: Basic.Deliver,
                     properties: BasicProperties, body: bytes) -> None:
        """
        Process the received task
        :param channel: channel
        :param method: method
        :param properties: task properties
        :param body: task body
        :return: None
        """
        raw_body = loads(body.decode(encoding="utf-8"))
        cases = raw_body.get("cases", {})
        task = TaskItem(**raw_body.get("task", {}))

        try:
            results = list(self.manager.multi_case_runner(cases=cases))
            for result in results:
                TaskCrud.create_task_result(task, result or {})
            task.set_success(
                msg=f"Task done: {len(results)} out of {len(cases)} cases")
        except Exception as cases_err:
            task.set_error(msg=f"Task error: {str(cases_err)}")

        TaskCrud.update_task(task)
        logger.info(msg=f"Done task {task.task_id}")

        channel.basic_publish(
            exchange="",
            routing_key=properties.reply_to,
            properties=BasicProperties(
                correlation_id=properties.correlation_id),
            body=dumps(task.as_json()).encode(encoding="utf-8"),
        )
        channel.basic_ack(delivery_tag=method.delivery_tag)
Пример #7
0
    async def execute(
        self,
        channel: BlockingChannel,
        method,
        properties,
        body: CallInfo,
    ):
        task = self.tasks[body.func]

        if not is_coroutine_callable(task):
            raise Exception(f"{task.__name__} is not coroutine function.")

        func_name = body.func
        file = task.__code__.co_filename
        line = task.__code__.co_firstlineno

        try:
            result = await task.do(**body.kwargs)
            self.logger.info(
                f"[SUCCESS]{file} {line} {func_name}(**{body.kwargs!r})")
        except Exception as e:
            self.logger.warning(
                f"[FAILED]{file} {line} {func_name}(**{body.kwargs!r}) {e}")

            # TODO: 処理が数回失敗した場合は、デッドレターキューに格納する(現在はとりあえず削除)
            # channel.basic_reject(delivery_tag=method.delivery_tag, requeue=False)  # メッセージを破棄する
            # channel.basic_reject(delivery_tag=method.delivery_tag, requeue=True)  # メッセージを再度キューイングする。実行順序は、多分最後になる
            # channel.close()  # コネクションやチャンネル開放時、未応答のキューは再度キューイングされる。順序はそのままらしいので、エラーが発生し続け、他の処理を妨害する恐れがある

        if not self.auto_ack:
            channel.basic_ack(delivery_tag=method.delivery_tag)
Пример #8
0
def callback(ch: BlockingChannel, method, properties, body):
    msg = str(body, "utf8").split(":")
    print('# [%s] %s 메세지를 받았습니다. \n %r' %
          (datetime.datetime.now(), msg[0], body))

    time.sleep(int(str(msg[1])) / 10)
    print(" # [%s] 완료했습니다." % datetime.datetime.now())
    ch.basic_ack(delivery_tag=method.delivery_tag)
Пример #9
0
 def wrapped_consumer(
     ch: BlockingChannel,
     method: Basic.Deliver,
     properties: pika.BasicProperties,
     body: bytes,
 ) -> None:
     callback([orjson.loads(body)])
     ch.basic_ack(delivery_tag=method.delivery_tag)
Пример #10
0
 def on_rpc_response(ch: BlockingChannel, method, props, body):
     if props.correlation_id == self.corr_id:
         decoded_body = body.decode()
         log_info('GET RPC REPLY',
                  queue=reply_queue,
                  correlation_id=self.corr_id,
                  body=decoded_body)
         self.result = jsons.loads(decoded_body)
     ch.basic_ack(delivery_tag=method.delivery_tag)
Пример #11
0
 def _on_message(
     channel: BlockingChannel,
     method: Basic.Deliver,
     properties: BasicProperties,
     body: bytes,
 ) -> None:
     quote: StockQuote = pickle.loads(body)
     handler([quote])
     channel.basic_ack(method.delivery_tag)
Пример #12
0
        def opened(channel: BlockingChannel) -> None:
            while True:
                (meta, _, message) = channel.basic_get(queue_name)

                if message is None:
                    break

                channel.basic_ack(meta.delivery_tag)
                messages.append(message)
Пример #13
0
 def _message_callback(self, ch: BlockingChannel, method: Basic.Deliver, _,
                       body: bytes):
     try:
         body_as_dict = RabbitMQBrokerConsumer._decode_body(body)
         self._callback(body_as_dict)
     except Exception as e:
         print(f'Error during handling message {body}: {e}')
     finally:
         ch.basic_ack(delivery_tag=method.delivery_tag)
Пример #14
0
 def on_message(self, ch: BlockingChannel, method, properties, body: str):
     payload = json.loads(body)
     response = self.callback(self.service, payload)
     payload = {**payload, **response}
     self.channel.basic_publish("",
                                self.queue_out,
                                body=json.dumps(payload),
                                mandatory=True)
     ch.basic_ack(delivery_tag=method.delivery_tag)
Пример #15
0
 def wrapped_consumer(ch: BlockingChannel, method: Basic.Deliver,
                      properties: pika.BasicProperties,
                      body: str) -> None:
     try:
         consumer(ch, method, properties, body)
         ch.basic_ack(delivery_tag=method.delivery_tag)
     except Exception as e:
         ch.basic_nack(delivery_tag=method.delivery_tag)
         raise e
Пример #16
0
    def on_request(self, channel: BlockingChannel, method, props, body):
        request = dill.loads(body)
        response = self.execute_request(request)

        channel.basic_publish(exchange='',
                     routing_key=props.reply_to,
                     properties=pika.BasicProperties(correlation_id = \
                                                         props.correlation_id),
                     body=dill.dumps(response))
        channel.basic_ack(delivery_tag=method.delivery_tag)
Пример #17
0
 def req_callback(ch: BlockingChannel, method: spec.Basic.Deliver,
                  _: spec.BasicProperties, body: bytes):
     body = load_object(body)
     ack = callback(body)
     if ack is None:
         ack = True
     if ack:
         ch.basic_ack(delivery_tag=method.delivery_tag)
     else:
         ch.basic_nack(delivery_tag=method.delivery_tag)
Пример #18
0
 def wrapped_consumer(ch: BlockingChannel,
                      method: Basic.Deliver,
                      properties: pika.BasicProperties,
                      body: str) -> None:
     try:
         consumer(ch, method, properties, body)
         ch.basic_ack(delivery_tag=method.delivery_tag)
     except Exception as e:
         ch.basic_nack(delivery_tag=method.delivery_tag)
         raise e
 def raw_answer_callback(ch: BlockingChannel,
                         method: spec.Basic.Deliver,
                         _: spec.BasicProperties, body: bytes):
     body: AnsBody = load_object(body)
     ack = answer_callback(body['id'], body['ans'])
     if ack is None:
         ack = True
     if ack:
         ch.basic_ack(delivery_tag=method.delivery_tag)
     else:
         ch.basic_nack(delivery_tag=method.delivery_tag)
Пример #20
0
 def ack_message(self, channel: BlockingChannel, delivery_tag: int) -> None:
     if channel.is_open:
         logging.info(
             "%s", f"Message (delivery_tag={delivery_tag}) acknowledged")
         channel.basic_ack(delivery_tag)
     else:
         logging.info(
             "%s",
             (f"Message (delivery_tag={delivery_tag}) "
              "NOT acknowledged (channel closed)"),
         )
Пример #21
0
 def cb(ch: BlockingChannel, method: pika.spec.Basic.Deliver,
        properties: pika.spec.BasicProperties, body: bytes):
     key = properties.correlation_id
     try:
         callback(ch, method, properties, body)
         ch.basic_ack(delivery_tag=method.delivery_tag)
         self.logger.info(f" [x] Message {key} processed!")
     except Exception:
         self.logger.info(
             f" [-] failed to process message {key}: {traceback.format_exc()}"
         )
Пример #22
0
 def on_rpc_response(ch: BlockingChannel, method, props, body):
     if props.correlation_id == self.corr_id:
         decoded_body = body.decode()
         self.result = jsons.loads(decoded_body)
         print({
             'action': 'get_rmq_rpc_reply',
             'queue': reply_queue,
             'correlation_id': self.corr_id,
             'result': self.result
         })
         self.replied = True
     ch.basic_ack(delivery_tag=method.delivery_tag)
Пример #23
0
 def req_callback(ch: BlockingChannel, method: spec.Basic.Deliver,
                  _: spec.BasicProperties, body: bytes):
     body: ReqBody = load_object(body)
     ack = request_callback(
         body['req'],
         lambda answer: answer_callback(body['id'], answer))
     if ack is None:
         ack = True
     if ack:
         ch.basic_ack(delivery_tag=method.delivery_tag)
     else:
         ch.basic_nack(delivery_tag=method.delivery_tag)
Пример #24
0
    def callback(self, ch: BlockingChannel, method: spec.Basic.Deliver,
                 properties: spec.BasicProperties, body: bytes):
        log.debug(' -> Received ch: %s meth: %s props: %s body: %s', ch,
                  method, properties, body)
        b = body.decode()
        try:
            # General format: {req_id, action, host}
            act = json.loads(b)  # type: dict
            log.debug(' -> Decoded to %s', act)

            if 'action' not in act:
                raise ValueError('"action" was not in decoded json object...')
            if act['action'] not in self.ACTIONS:
                raise ValueError('Action "{}" is not a valid action...'.format(
                    act['action']))
        except (JSONDecodeError, TypeError, ValueError, AttributeError):
            log.exception('Error decoding json for %s', b)
            return ch.basic_nack(delivery_tag=method.delivery_tag,
                                 requeue=False)

        try:
            run_act = self.ACTIONS[act['action']](**act)
            if run_act:
                log.debug('Acknowledging success to MQ')
                return ch.basic_ack(delivery_tag=method.delivery_tag)
            else:
                log.debug('Acknowledging failure (try later) to MQ')
                return ch.basic_nack(delivery_tag=method.delivery_tag)
        except (InvalidHostException, AttributeError, ValueError) as e:
            log.warning('Invalid host... Type: %s Msg: %s', type(e), str(e))
            return ch.basic_nack(delivery_tag=method.delivery_tag,
                                 requeue=False)
        except Exception:
            log.exception('Unknown exception while handling action call...')
            return ch.basic_nack(delivery_tag=method.delivery_tag)
Пример #25
0
def ner_resume_create_consumer(ch: BlockingChannel, method: Basic.Deliver,
                               properties, body: bytes):
    logger.debug("Recebido: " + body.decode(), extra={"received_args": body})

    try:
        task_info = json.loads(body.decode())
        logger.debug("Recuperando tarefa: " + task_info["task"])
        ner_resume_create_task: NerResumeCreateTaskModel = NerResumeCreateTaskModel.objects(
            id=task_info["task"]).first()
        if ner_resume_create_task is None:
            raise Exception("Não foi encontrada nenhuma tarefa com o id " +
                            task_info["task"])
    except Exception as e:
        logger.error("Erro ao recuperar a tarefa",
                     exc_info=True,
                     extra={"received_args": body})
        ch.basic_nack(delivery_tag=method.delivery_tag, requeue=False)
        return False

    try:
        if ner_resume_create_task.status == "success" or \
                ner_resume_create_task.progress == ner_resume_create_task.total:
            ner_resume_create_task.status = "success"
            ner_resume_create_task.save()
            success_task = True
        else:
            success_task = process_task(ner_resume_create_task)
            logger.debug("Finalizando tarefa: " + task_info["task"])

        if success_task:
            ch.basic_ack(delivery_tag=method.delivery_tag)
            return True

    except ChannelWrongStateError:
        return False

    except Exception:
        ner_resume_create_task.status = "error"
        ner_resume_create_task.save()
        logger.error(
            ner_resume_create_task.error,
            exc_info=True,
            extra={"received_args": ner_resume_create_task.to_mongo()})
        ch.basic_nack(delivery_tag=method.delivery_tag, requeue=False)
        return False
    def handle_consumption_error(
        self,
        ch: BlockingChannel,
        method: Basic.Deliver,
        properties: BasicProperties,
        body: bytes,
        is_store: bool,
    ):

        if self.has_been_redelivered_too_much(properties):
            derived_action = self.send_to_dead_letter(ch, method, properties,
                                                      body)
        else:
            derived_action = self.send_to_retry(ch, method, properties, body,
                                                is_store)

        ch.basic_ack(delivery_tag=method.delivery_tag)

        return derived_action
Пример #27
0
    def __on_request(
            self,
            channel: BlockingChannel,
            method: pika.spec.Basic.Deliver,
            properties: pika.BasicProperties,
            body: bytes
    ):
        try:
            print('Got message')
            message = json.loads(body.decode())
            print(message)

            metadata: dict = {
                'user': message['user'] if 'user' in message else None
            }
            payload: dict = message['payload']

            status_code, response = self.methods[message['method']](payload, metadata)
            print('Response', response)

        except ResponseException as e:
            print_exc()
            status_code, response = e.status_code, str(e)
        except Exception as e:
            print_exc()
            status_code, response = 500, 'Internal Server Error: ' + str(e)

        if properties.reply_to:
            channel.basic_publish(
                exchange='',
                routing_key=properties.reply_to,
                properties=pika.BasicProperties(correlation_id=properties.correlation_id),
                body=json.dumps({
                    'statusCode': status_code,
                    'body': response
                })
            )
        else:
            print('Async, one way, message handled')
        channel.basic_ack(delivery_tag=method.delivery_tag)
        print('Acked')
Пример #28
0
    def _consuming_callback(self, ch: BlockingChannel, method, properties,
                            body):
        try:
            rabbit_message = json.loads(body.decode())
        except json.JSONDecodeError:
            self._logger.error(
                '[RabbitConsumer] Task skipped. JSONDecodeError on "%s"',
                body.decode())
            ch.basic_ack(delivery_tag=method.delivery_tag)
        else:
            self._thread_output.put(rabbit_message)
            self._logger.debug('Got msg: %s', rabbit_message)

            # waiting for main thread
            signal = self._wait_signal()
            if signal is SendAcknowledgeSignal:
                ch.basic_ack(delivery_tag=method.delivery_tag)
            elif signal is RejectSignal:
                ch.basic_reject(delivery_tag=method.delivery_tag)
            else:
                raise Exception('Excuse me what the type?')
Пример #29
0
    def _cbk_job_submission(self, ch: BlockingChannel, method: Basic.Deliver,
                            _: BasicProperties, body: bytes) -> None:
        ch.basic_ack(method.delivery_tag)

        arr = body.decode().strip().split(',')

        logger = logging.getLogger('monitoring.job_submission')
        logger.debug(f'{arr} is received from job_submission queue')

        if len(arr) != 4:
            return

        job_name, job_type, job_preferences, job_objective = arr

        job = Job(job_name, job_type, job_preferences, job_objective)
        if job_type == 'bg':
            logger.info(f'{job_name} is background job')
        else:
            logger.info(f'{job_name} is foreground job')

        self._pending_jobs.add(job)
Пример #30
0
    def _cbk_wl_creation(self, ch: BlockingChannel, method: Basic.Deliver,
                         _: BasicProperties, body: bytes) -> None:
        ch.basic_ack(method.delivery_tag)

        arr = body.decode().strip().split(',')

        logger = logging.getLogger('monitoring.workload_creation')
        logger.debug(f'{arr} is received from workload_creation queue')

        if len(arr) != 8:
            return

        wl_identifier, wl_type, pid, perf_pid, perf_interval, tegra_pid, tegra_interval, max_workloads = arr
        pid = int(pid)
        perf_pid = int(perf_pid)
        perf_interval = int(perf_interval)
        item = wl_identifier.split('_')
        wl_name = item[0]
        max_wls = int(max_workloads)

        if not psutil.pid_exists(pid):
            return

        workload = Workload(wl_name, wl_type, pid, perf_pid, perf_interval)
        #workload.check_gpu_task()
        if wl_type == 'bg':
            logger.info(f'{workload} is background process')
        else:
            logger.info(f'{workload} is foreground process')

        self._pending_wl.add(workload, max_wls)

        wl_queue_name = 'rmq-{}-{}({})'.format(self._rmq_host, wl_name, pid)
        ch.exchange_declare(exchange=self._rmq_creation_exchange,
                            exchange_type='fanout')
        ch.queue_declare(wl_queue_name)
        self._rmq_bench_exchange = f'ex-{self._rmq_host}-{wl_name}({pid})'
        ch.queue_bind(exchange=self._rmq_bench_exchange, queue=wl_queue_name)
        ch.basic_consume(functools.partial(self._cbk_wl_monitor, workload),
                         wl_queue_name)
Пример #31
0
    def _cbk_node_monitor(self, node: Node, ch: BlockingChannel,
                          method: Basic.Deliver, _: BasicProperties,
                          body: bytes) -> None:
        metric = json.loads(body.decode(
        ))  # Through json format, node monitor can get aggr contention info
        ch.basic_ack(method.delivery_tag)

        # FIXME: Hard coded (200ms as interval)
        item = BasicMetric(metric['llc_references'], metric['llc_misses'],
                           metric['instructions'], metric['cycles'],
                           metric['gpu_core_util'], metric['gpu_core_freq'],
                           metric['gpu_emc_util'], metric['gpu_emc_freq'], 200)

        logger = logging.getLogger(f'monitoring.metric.{node}')
        logger.debug(f'{metric} is given from ')

        metric_que = node.metrics

        if len(metric_que) == self._metric_buf_size:
            metric_que.pop()

        metric_que.appendleft(item)