def __message_handler(
         self, channel: pika.adapters.blocking_connection.BlockingChannel,
         method: pika.spec.Basic.Deliver,
         properties: pika.spec.BasicProperties, body: bytes) -> None:
     channel.basic_ack(delivery_tag=method.delivery_tag)
     if self.__local_command_event.is_set():
         self.__print_order_semaphore.acquire()
         self.__print_order_semaphore.release()
     try:
         message_object = json.loads(body.decode())
     except json.JSONDecodeError:
         print('Not json-formatted payload, stop parsing')
     else:
         if properties.type == 'private':
             print('<<<{}->{}: {}>>>'.format(message_object['sender'],
                                             message_object['receiver'],
                                             message_object['message']))
             if self.__local_command_event.is_set():
                 self.__print_queue_message_done.set()
         elif properties.type == 'public':
             print('<<<{}->GROUP<{}>: {}>>>'.format(
                 message_object['sender'], message_object['topic'],
                 message_object['message']))
             if self.__local_command_event.is_set():
                 number_finished_thread = self.__global_counter.get()
                 number_finished_thread -= 1
                 if number_finished_thread > 0:
                     self.__global_counter.put(number_finished_thread)
                 elif number_finished_thread == 0:
                     self.__print_topic_message_done.set()
                     self.__global_counter.put(0)
                 else:
                     self.__global_counter.put(0)
         else:
             print('Unrecognized destination type, can not parsed')
Esempio n. 2
0
def process(channel: pika.adapters.blocking_connection.BlockingChannel,
            method: T.Any, _, body: bytes) -> None:
    data = json.loads(body)
    user_id = data['user_id']
    log.info(f"Received Data from {user_id}")

    status = False
    msg = "OK"
    try:
        status = process_bake_data(user_id)
    except Exception as exc:
        msg = f"ERROR {type(exc)}: {str(exc)}"

    if status:
        channel.basic_ack(method.delivery_tag, False)
    else:
        channel.basic_nack(method.delivery_tag, False, True)

    if mongo.write_log(mongo_client,
                       bson.ObjectId(bson.objectid.bytes_from_hex(user_id)),
                       status, msg).acknowledged:
        log.info(f"Login Bake on Mongo For User: {user_id}")
    else:
        log.warning(f"WARN: Cant Log Bake on Mongo For User: {user_id}")

    log.info(f"Processed Data from {user_id}")
Esempio n. 3
0
def send_response(channel: pika.adapters.blocking_connection.BlockingChannel,
                  properties: pika.spec.BasicProperties, response):
    if channel.is_open:
        channel.basic_publish(exchange='',
                              routing_key=properties.reply_to,
                              body=response,
                              properties=pika.BasicProperties(
                                  correlation_id=properties.correlation_id))
Esempio n. 4
0
 def respond(channel: pika.adapters.blocking_connection.BlockingChannel,
             mq_item: MQItem, response: Response):
     channel.basic_publish(exchange='',
                           routing_key=mq_item.reply_to,
                           properties=pika.BasicProperties(
                               correlation_id=mq_item.correlation_id),
                           body=response.encode())
     channel.basic_ack(delivery_tag=mq_item.delivery_tag)
Esempio n. 5
0
 def _publish_message(
         self, channel: pika.adapters.blocking_connection.BlockingChannel,
         message: PublishableMessageVo):
     channel.basic_publish(exchange=self.exchange_name,
                           routing_key=self._get_routing_key(
                               message.channel),
                           body=message.get_json())
     LOGGER.info(f'Message Queued: {message.get_json()}')
Esempio n. 6
0
 def on_message(
         channel: pika.adapters.blocking_connection.BlockingChannel,
         _method: pika.spec.Basic.Deliver,
         _properties: pika.spec.BasicProperties,
         body: bytes) -> None:
     '''Mocking on_message function'''
     message = body.decode()
     assert message == 'value'
     channel.basic_cancel('test-consumer')
Esempio n. 7
0
def receive_callback(
    channel: pika.adapters.blocking_connection.BlockingChannel,
    method: pika.spec.Basic.Deliver,
    properties: pika.spec.BasicProperties,
    body: bytes,
) -> None:
    """Handle received message."""
    LOGGER.debug(channel)
    LOGGER.debug(method)
    LOGGER.debug(properties)
    LOGGER.debug(properties.headers)
    LOGGER.debug(f" [x] Received '{str(body)}'")
    channel.stop_consuming()
Esempio n. 8
0
def ReceiveMessage(ch: pika.adapters.blocking_connection.BlockingChannel,
                   queue: str):
    method_frame, header_frame, body = BasicGetBlockingMessage(ch, queue)
    headers: dict = header_frame.headers
    if 'STANDARD.MessageId' not in headers:
        raise Exception("Standard headers not in message frame!")
    message = json.loads(body)
    print(f"Received message: {message['MessagePayload']}")
    tellMeToExit = message['TellMeToExit']
    ch.basic_ack(method_frame.delivery_tag)
    if tellMeToExit:
        print("I'm told to exit - Bye!")
        exit(0)
def new_equation_callback(
        channel: pika.adapters.blocking_connection.BlockingChannel,
        method: pika.spec.Basic.Deliver, properties: pika.spec.BasicProperties,
        body: bytes):
    """
    Called when a new equation is consumed from the line.
    """
    time_before_solving_equation = timer()
    equation = body.decode()
    final_equation = pythonize_equation(equation)
    solution = solve_equation(final_equation)
    time_to_solve_equation = timer() - time_before_solving_equation
    insert_equation_to_db(equation, solution, time_to_solve_equation)
    print(f'New Equation Solved: {equation} - {solution}')
    channel.basic_ack(delivery_tag=method.delivery_tag)
Esempio n. 10
0
    def on_batch_request(
            self, channel: pika.adapters.blocking_connection.BlockingChannel,
            method: pika.spec.Basic.Deliver, properties: pika.BasicProperties,
            body: bytes) -> None:
        t1 = time()
        batch = [
            MQItem(method.delivery_tag, properties.reply_to,
                   properties.correlation_id, json.loads(body))
        ]

        while len(batch) < self.nazgul.batch_size:
            method, properties, body = channel.basic_get(queue=self.queue_name)
            if method:
                batch.append(
                    MQItem(method.delivery_tag, properties.reply_to,
                           properties.correlation_id, json.loads(body)))
            else:
                break

        requests = [mq_item.body for mq_item in batch]
        responses = self.nazgul.process_batch(requests)

        for mq_item, response in zip(batch, responses):
            self.respond(channel, mq_item, response)

        t4 = time()
        LOGGER.debug(
            f"On_batch_request took: {round(t4 - t1, 3)} s. Batch size: {len(batch)}."
        )
Esempio n. 11
0
def setup_queues(channel: pika.adapters.blocking_connection.BlockingChannel, callback: T.Callable) -> None:
    channel.queue_declare(
        queue="bake",
        passive=False,
        durable=True,
        exclusive=False,
        auto_delete=False
    )

    channel.basic_consume(
        queue="bake",
        on_message_callback=callback,
        auto_ack=False,
        exclusive=False
    )

    return None
Esempio n. 12
0
 def _GetQueueMessagesCount(
         self, channel: pika.adapters.blocking_connection.BlockingChannel,
         queue: str):
     if channel.is_closed:
         return -1
     queue = channel.queue_declare(queue=queue, passive=True)
     messagesCount = queue.method.message_count
     return messagesCount
Esempio n. 13
0
def producer(channel: pika.adapters.blocking_connection.BlockingChannel):
    i = 1
    while True:
        sleep_time_secs = randrange(1, 11)
        log(f"Producer sleeps for {sleep_time_secs} secs")
        time.sleep(sleep_time_secs)
        with mutex:
            tmp_vector_clock = vector_clock.copy()
            tmp_vector_clock[client_id] = tmp_vector_clock[client_id] + 1
            (packet, msg) = encode_packet(f"{i}-th msg from {client_id}",
                                          tmp_vector_clock)
            log(f"Sending packet: {packet}")
            channel.basic_publish(exchange=EXCHANGE_NAME,
                                  routing_key="",
                                  body=packet)
            deliver_msg(msg)
        i = i + 1
Esempio n. 14
0
 def message_process(
         self, channel: pika.adapters.blocking_connection.BlockingChannel,
         method_frame, header_frame, body):
     """
     Process message. Can be overrode in sub classes
     :param channel:
     :param method_frame:
     :param header_frame:
     :param body:
     :return:
     """
     LOGGER.info(
         'routing key: {routing_key} | headers: {headers} | body: {body}'.
         format(
             routing_key=method_frame.routing_key,
             headers=header_frame.headers,
             body=body.decode(),
         ))
     channel.basic_ack(method_frame.delivery_tag)
Esempio n. 15
0
    def _OnMessageCallBack(
            self,
            channel: pika.adapters.blocking_connection.BlockingChannel,
            methodFrame: frame.Method,
            headerFrame: frame.Header,
            body: bytes,
            connection: pika.BlockingConnection,
            channelId: str,
            listenerQueue: str,
            topicExchange: str = None,
            directExchange: str = None):
        try:
            self._logger.debug(f"Received new message on channel {channelId}")
            data = self._CreateDefaultDataHolder(connection,
                                                 channel,
                                                 listenerQueue,
                                                 topicExchange=topicExchange,
                                                 directExchange=directExchange)
            data[PikaConstants.DATA_KEY_MESSAGE_HANDLERS] = list(
                self.messageHandlers)
            incomingMessage = {
                PikaConstants.DATA_KEY_METHOD_FRAME: methodFrame,
                PikaConstants.DATA_KEY_HEADER_FRAME: headerFrame,
                PikaConstants.DATA_KEY_BODY: body,
            }
            data[PikaConstants.DATA_KEY_INCOMING_MESSAGE] = incomingMessage

            pikaBus: AbstractPikaBus = self._pikaBusCreateMethod(
                data=data,
                closeChannelOnDelete=False,
                closeConnectionOnDelete=False)
            data[PikaConstants.DATA_KEY_BUS] = pikaBus

            pipelineIterator = iter(self._pipeline)
            PikaSteps.HandleNextStep(pipelineIterator, data)
            self._logger.debug(
                f"Successfully handled message on channel {channelId}")
        except Exception as exception:
            channel.basic_nack(methodFrame.delivery_tag)
            self._logger.exception(
                f"Failed handling message on channel {channelId} - {str(exception)}"
            )
Esempio n. 16
0
def go_publish(
    another_span: wtt.Span,
    friend: str,
    myself: str,
    channel: pika.adapters.blocking_connection.BlockingChannel,
) -> None:
    """Publish a message."""
    msg = f"Hey {friend}, I'm {myself}"

    another_link = wtt.span_to_link(
        another_span,
        {
            "name": "another_span",
            "NOTE":
            "explicitly linking `another_span` isn't necessary, it's `producer-span`'s parent",
            "REASONING":
            "`another_span` is already automatically accessible via the `producer-span`'s `parent_id` pointer",
            "FURTHERMORE":
            "this is just an example of linking multiple spans :D",
        },
    )

    headers = wtt.inject_links_carrier(
        attrs={
            "name": "producer-span",
            "from": myself,
            "to": friend
        },
        addl_links=[another_link],
    )

    channel.basic_publish(
        exchange="",
        routing_key=friend,
        body=msg,
        properties=pika.BasicProperties(headers=headers),
    )

    LOGGER.debug(f" [x] Sent '{msg}'")
Esempio n. 17
0
def receive_messages(
        queue: str, exchange: str, routing_key: str,
        channel: pika.adapters.blocking_connection.BlockingChannel,
        callback: ClientCallback) -> None:
    '''Receive messages from a queue'''

    channel.exchange_declare(exchange=exchange,
                             durable=True,
                             exchange_type='direct')
    channel.queue_bind(exchange=exchange, queue=queue, routing_key=routing_key)

    channel.basic_consume(queue=queue,
                          on_message_callback=callback,
                          auto_ack=True)
    channel.start_consuming()
Esempio n. 18
0
    def on_message(
        self,
        channel: pika.adapters.blocking_connection.BlockingChannel,
        method_frame: pika.spec.Basic.Deliver,
        header_frame: pika.spec.BasicProperties,
        body: bytes,
    ) -> None:
        """
        Callback receiving message consumed from AMQ server

        :param channel: Pika object representing channel connected to AMQ server
        :type channel: pika.adapters.blocking_connection.BlockingChannel
        :param method_frame: Pika object representing low level protocol properties
        :type method_frame: pika.spec.Basic.Deliver
        :param header_frame: Pika object representing headers metadata
        :type header_frame: pika.spec.BasicProperties
        :param body: Raw message content
        :type body: bytes
        """

        self.consume_log_count += 1
        source_ip = header_frame.headers["SOURCEIP"]
        match_exclude = any(x in body for x in self.exclude_patterns_bytes)
        if not match_exclude:
            self.udp_publish(body, source_ip)

        # Just for logging every 60 seconds
        # Of course if there's nothing to consume there will be no log at all
        # I thought about writing this using asyncio but too much work for no
        # real improvments
        if self.utc_now - datetime.timedelta(seconds=60) > self.consume_log_dt:
            self.logger.info("Consumed %d messages for the last 60s",
                             self.consume_log_count)
            self.logger.info("Last log entry was: %s from %s", body, source_ip)
            self.consume_log_dt = self.utc_now
            self.consume_log_count = 0

        channel.basic_ack(delivery_tag=method_frame.delivery_tag)
Esempio n. 19
0
 def _CreateDefaultRabbitMqSetup(
         self,
         channel: pika.adapters.blocking_connection.BlockingChannel,
         listenerQueue: str,
         listenerQueueSettings: dict,
         topicExchange: str = None,
         topicExchangeSettings: dict = None,
         directExchange: str = None,
         directExchangeSettings: dict = None,
         subscriptions: Union[List[str], str] = None,
         confirmDelivery: bool = None):
     if confirmDelivery is None:
         confirmDelivery = self._defaultConfirmDelivery
     if confirmDelivery:
         channel.confirm_delivery()
     if topicExchange is None:
         topicExchange = self._defaultTopicExchange
     if topicExchangeSettings is None:
         topicExchangeSettings = self._defaultTopicExchangeSettings
     if directExchange is None:
         directExchange = self._defaultDirectExchange
     if directExchangeSettings is None:
         directExchangeSettings = self._defaultDirectExchangeSettings
     if subscriptions is None:
         subscriptions = self._defaultSubscriptions
     PikaTools.CreateExchange(channel,
                              directExchange,
                              settings=directExchangeSettings)
     PikaTools.CreateExchange(channel,
                              topicExchange,
                              settings=topicExchangeSettings)
     if listenerQueue is not None:
         PikaTools.CreateDurableQueue(channel,
                                      listenerQueue,
                                      settings=listenerQueueSettings)
         PikaTools.BasicSubscribe(channel, topicExchange, subscriptions,
                                  listenerQueue)
Esempio n. 20
0
def BasicGetBlockingMessage(
        ch: pika.adapters.blocking_connection.BlockingChannel,
        queue: str,
        timeoutSec=10):
    method_frame = None
    sleepPause = 0.5
    retry = 0
    maxRetries = int(timeoutSec / sleepPause)
    while method_frame is None and retry < maxRetries:
        result = ch.basic_get(queue)
        method_frame = result[0]
        if method_frame is not None:
            return result
        time.sleep(sleepPause)
        retry += 1
    raise Exception("Could not receive message!")
Esempio n. 21
0
def queue_selection(
    channel: pika.adapters.blocking_connection.BlockingChannel,
    context,
    pg_username: str,
):
    """
    Callback for setting up the handler for messages published on the queue.
    @param channel: RabbitMQ channel in which to consume messages.
    @param context: context required for RabbitMQ to work properly inside a different thread than MainThread.
    @param pg_username: name of the queue and the socket in which messages are respectively read and written.
    """
    with context:

        def message_handler(ch, method, properties, body: bytes):
            """
            Callback for handling message consuming and publishing on the WebSocket.
            @param ch:
            @param method:
            @param properties:
            @param body: message body
            """
            with context:
                json = loads(body)
                msg = Message.from_dict(json)
                print(
                    f"---\nSender: {msg.sender} -- {msg.send_time}\n{msg.body}\n---\n"
                )
                socket_io.send(dumps(json), json=True, room=pg_username)

        channel.queue_declare(queue=pg_username, durable=True)

        channel.basic_consume(queue=pg_username,
                              on_message_callback=message_handler,
                              auto_ack=True)

        channel.start_consuming()
 def __message_handler(
         self, channel: pika.adapters.blocking_connection.BlockingChannel,
         method: pika.spec.Basic.Deliver,
         properties: pika.spec.BasicProperties, body: bytes) -> None:
     channel.basic_ack(method.delivery_tag)
     self.__connection_established_event.set()
Esempio n. 23
0
def callback(ch: pika.adapters.blocking_connection.BlockingChannel,
             method: pika.spec.Basic.Deliver,
             properties: pika.spec.BasicProperties, body: bytes):
    customizer_name = None
    metadata = None
    output_object_key = None
    callback_url = None

    try:
        body_dict = json.loads(body)
        customizer_name = body_dict['customizer_name']
        metadata = body_dict['metadata']
        output_object_key = body_dict['output_object_key']
        callback_url = body_dict['callback_url']
    except Exception as e:
        logger.error('Invalid message body; %s' % e)
        ch.basic_nack(delivery_tag=method.delivery_tag, requeue=False)
        return

    logger.info(" [x] Received task for: %s" % customizer_name)

    generated_successfully = False
    uploaded_successfully = False
    patched_successfully = False

    # save files locally before doing the union
    metadataFilePath = './tmp/metadata_%s.json' % uuid.uuid4()
    fileMappings = {}

    tmpFilesFolder = pathlib.Path('/tmp/customized-mesh').absolute()
    inputFilesFolderPath = tmpFilesFolder.joinpath('files')
    metadataFilePath = tmpFilesFolder.joinpath("metadata_%s.json" %
                                               uuid.uuid4())
    outputFilePath = tmpFilesFolder.joinpath('output_%s.stl' % uuid.uuid4())

    try:
        inputFilesFolderPath.mkdir(parents=True, exist_ok=True)

        for fileId, fileKey in metadata['file_mappings'].items():
            tmpFilePath = inputFilesFolderPath.joinpath(str(uuid.uuid4()))
            res = get_customizer_object(fileKey)
            with tmpFilePath.open('wb') as f:
                f.write(res.read())

            fileMappings[fileId] = str(tmpFilePath)

        metadataWithLocalFiles = {
            'tree': metadata['tree'],
            'file_mappings': fileMappings,
        }

        with metadataFilePath.open('w') as f:
            json.dump(metadataWithLocalFiles, f)

        p = subprocess.run(
            ['python', 'unify.py',
             str(metadataFilePath),
             str(outputFilePath)])

        if p.returncode == 0:
            generated_successfully = True
            logger.info('Mesh generated successfully')
        else:
            logger.error('Mesh generation failed with status code %d' %
                         p.returncode)
    except Exception as e:
        logger.error(e)

    if generated_successfully:
        try:
            with outputFilePath.open('rb') as f:
                put_customizer_object(output_object_key, f.read())
                logger.info('Mesh uploaded successfully. output path: %s' %
                            output_object_key)
                uploaded_successfully = True
        except Exception as e:
            logger.error('There was a problem uploading the file')
            logger.error(e)

    if uploaded_successfully:
        try:
            r = patch_customized_mesh(callback_url,
                                      success=generated_successfully)
            if r.status_code == 200:
                patched_successfully = True
                logger.info(
                    'Mesh patched successfully as %s' %
                    'successful' if generated_successfully else 'failed')
        except Exception as e:
            logger.error(e)

    if generated_successfully and uploaded_successfully and patched_successfully:
        ch.basic_ack(delivery_tag=method.delivery_tag)
    else:
        requeue = generated_successfully and (not uploaded_successfully
                                              or not patched_successfully)
        ch.basic_nack(delivery_tag=method.delivery_tag, requeue=requeue)

    try:
        shutil.rmtree(tmpFilesFolder)
    except Exception as e:
        logger.error('Failed to clean up')
Esempio n. 24
0
def nack_message(ch: pika.adapters.blocking_connection.BlockingChannel,
                 delivery_tag):
    if ch.is_open:
        ch.basic_nack(delivery_tag)
Esempio n. 25
0
def declare_exchange_with_queue(ch: pika.adapters.blocking_connection.BlockingChannel, name: str):
    ch.exchange_declare(name, exchange_type="fanout", durable=True)
    ch.queue_declare(name, durable=True)
    ch.queue_bind(name, name)
Esempio n. 26
0
 def _declare_exchange(
         self, channel: pika.adapters.blocking_connection.BlockingChannel):
     channel.exchange_declare(exchange=self.exchange_name,
                              exchange_type=self.exchange_type)