def test_messages_received(subscriber: SubscriberImpl, async_subscriber,
                           message_callback, close_callback):
    message1 = Message(PubsubMessage(message_id="1")._pb, "", 0, None)
    message2 = Message(PubsubMessage(message_id="2")._pb, "", 0, None)

    counter = Box[int]()
    counter.val = 0

    async def on_read() -> Message:
        counter.val += 1
        if counter.val == 1:
            return message1
        if counter.val == 2:
            return message2
        await sleep_forever()

    async_subscriber.read.side_effect = on_read

    results = Queue()
    message_callback.side_effect = lambda m: results.put(m.message_id)

    subscriber.add_close_callback(close_callback)
    subscriber.__enter__()
    assert results.get() == "1"
    assert results.get() == "2"
    subscriber.close()
예제 #2
0
 def __init__(self,
              *args: List[Any],
              google_cloud_message: Message = None,
              **kwargs: Dict[str, Any]) -> None:
     if google_cloud_message:
         self._message = google_cloud_message
         return
     self._message = Message(*args, **kwargs)
예제 #3
0
 def _on_receive(self, message: Message):
     recv_time = int(time.time() * 1000)
     latency_ms = recv_time - int(message.attributes["sendTime"])
     pub_id = int(message.attributes["clientId"])
     sequence_number = int(message.attributes["sequenceNumber"])
     out = MessageAndDuration(pub_id, sequence_number, latency_ms)
     self.metrics_tracker.put(out)
     message.ack()
예제 #4
0
def callback(environment_manager: BackupEnvironmentManager,
             backup_manager: BackupManager, storage_manager: StorageManager,
             message: Message) -> None:
    schema_name = environment_manager.database_schema
    backup_filename = f'{schema_name}.sql'
    backup_manager.create_backup(backup_filename)

    storage_manager.upload_file(backup_filename)
    message.ack()
예제 #5
0
def receive_refresh_request(message: Message):
    """Refresh the token when received message"""
    # print(f"Received {message}.", type(message))
    secret_name = message.attributes.secret_name
    old_version = message.attributes.version  # String
    message.ack()

    # secret = read_secret(secret_name, old_version)
    secret_name, version = update_secret(secret_name, old_version)

    publish_complete_message(secret_name, version)
async def test_iterator(
    default_subscriber,
    subscriber_factory,
    multiplexed_client: AsyncSubscriberClientInterface,
):
    read_queues = wire_queues(default_subscriber.read)
    subscription = SubscriptionPath(1, CloudZone.parse("us-central1-a"), "abc")
    message = Message(PubsubMessage(message_id="1")._pb, "", 0, None)
    async with multiplexed_client:
        iterator = await multiplexed_client.subscribe(
            subscription, DISABLED_FLOW_CONTROL
        )
        subscriber_factory.assert_has_calls(
            [call(subscription, None, DISABLED_FLOW_CONTROL)]
        )
        read_fut_1 = asyncio.ensure_future(iterator.__anext__())
        assert not read_fut_1.done()
        await read_queues.called.get()
        default_subscriber.read.assert_has_calls([call()])
        await read_queues.results.put(message)
        assert await read_fut_1 is message
        read_fut_2 = asyncio.ensure_future(iterator.__anext__())
        assert not read_fut_2.done()
        await read_queues.called.get()
        default_subscriber.read.assert_has_calls([call(), call()])
        await read_queues.results.put(FailedPrecondition(""))
        with pytest.raises(FailedPrecondition):
            await read_fut_2
        default_subscriber.__aexit__.assert_called_once()
def test_parse_json_message():
    attributes = {
        'eventType': 'OBJECT_FINALIZE',
        'bucketId': 'mybucket',
        'objectId': 'myobject',
        'objectGeneration': 1234567,
        'resource': 'projects/_/buckets/mybucket/objects/myobject#1234567',
        'notificationConfig': ('projects/_/buckets/mybucket/'
                               'notificationConfigs/5'),
        'payloadFormat': 'JSON_API_V1'}
    data = (b'{'
            b'  "size": 12345,'
            b'  "contentType": "text/html",'
            b'  "metageneration": 1'
            b'}')
    message = Message(
        mock.Mock(data=data, attributes=attributes),
        MESSAGE_ID,
        mock.Mock())
    assert summarize(message) == (
        '\tEvent type: OBJECT_FINALIZE\n'
        '\tBucket ID: mybucket\n'
        '\tObject ID: myobject\n'
        '\tGeneration: 1234567\n'
        '\tContent type: text/html\n'
        '\tSize: 12345\n'
        '\tMetageneration: 1\n')
def test_parse_json_message():
    attributes = {
        "eventType":
        "OBJECT_FINALIZE",
        "bucketId":
        "mybucket",
        "objectId":
        "myobject",
        "objectGeneration":
        1234567,
        "resource":
        "projects/_/buckets/mybucket/objects/myobject#1234567",
        "notificationConfig": ("projects/_/buckets/mybucket/"
                               "notificationConfigs/5"),
        "payloadFormat":
        "JSON_API_V1",
    }
    data = (b"{"
            b'  "size": 12345,'
            b'  "contentType": "text/html",'
            b'  "metageneration": 1'
            b"}")
    message = Message(mock.Mock(data=data, attributes=attributes), MESSAGE_ID,
                      mock.Mock())
    assert summarize(message) == ("\tEvent type: OBJECT_FINALIZE\n"
                                  "\tBucket ID: mybucket\n"
                                  "\tObject ID: myobject\n"
                                  "\tGeneration: 1234567\n"
                                  "\tContent type: text/html\n"
                                  "\tSize: 12345\n"
                                  "\tMetageneration: 1\n")
def ppo_undelivered_mail_to_case(message: Message):
    log = logger.bind(
        message_id=message.message_id,
        subscription_name=PPO_UNDELIVERED_SUBSCRIPTION_NAME,
        subscription_project=PPO_UNDELIVERED_SUBSCRIPTION_PROJECT_ID)

    log.debug('Pub/Sub Message received for processing')

    payload = validate_offline_receipt(
        message.data, log, ['transactionId', 'caseRef', 'productCode'])
    if not payload:
        return  # Failed validation

    tx_id, case_ref, product_code = payload['transactionId'], payload[
        'caseRef'], payload['productCode']
    date_time = datetime.strptime(
        payload['dateTime'],
        '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc).isoformat()

    log = log.bind(case_ref=case_ref,
                   created=date_time,
                   product_code=product_code,
                   tx_id=tx_id)

    receipt_message = {
        'event': {
            'type': 'UNDELIVERED_MAIL_REPORTED',
            'source': 'RECEIPT_SERVICE',
            'channel': 'PPO',
            'dateTime': date_time,
            'transactionId': tx_id
        },
        'payload': {
            'fulfilmentInformation': {
                'caseRef': case_ref,
                'fulfilmentCode': product_code
            }
        }
    }

    send_message_to_rabbitmq(json.dumps(receipt_message),
                             routing_key=UNDELIVERED_MAIL_ROUTING_KEY)
    message.ack()

    log.debug('Message processing complete')
def offline_receipt_to_case(message: Message):
    log = logger.bind(message_id=message.message_id,
                      subscription_name=OFFLINE_SUBSCRIPTION_NAME,
                      subscription_project=OFFLINE_SUBSCRIPTION_PROJECT_ID)

    log.info('Pub/Sub Message received for processing')

    payload = validate_offline_receipt(
        message.data, log, ['transactionId', 'questionnaireId', 'channel'])
    if not payload:
        return  # Failed validation

    tx_id, questionnaire_id, channel = payload['transactionId'], payload[
        'questionnaireId'], payload['channel']
    time_obj_created = datetime.strptime(
        payload['dateTime'],
        '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc).isoformat()

    log = log.bind(questionnaire_id=questionnaire_id,
                   created=time_obj_created,
                   tx_id=tx_id,
                   channel=channel)

    receipt_message = {
        'event': {
            'type': 'RESPONSE_RECEIVED',
            'source': 'RECEIPT_SERVICE',
            'channel': channel,
            'dateTime': time_obj_created,
            'transactionId': tx_id
        },
        'payload': {
            'response': {
                'questionnaireId': questionnaire_id,
                'unreceipt': payload.get('unreceipt', False)
            }
        }
    }

    send_message_to_rabbitmq(json.dumps(receipt_message))
    message.ack()

    log.info('Message processing complete')
예제 #11
0
def process_message(message: Message) -> None:
    message_data = json.loads(message.data.decode())

    # Keep the message alive while we are processing it
    with MessageKeepAlive(message=message, interval=570):
        input_file = Path(f"./{message_data['name']}")
        output_file = Path(f"./{input_file.name}").with_suffix(".out.mp4")

        # Download the user's input video
        download_input(input_file.name)

        # Process the video with DAIN
        try:
            subprocess.check_call([
                "/usr/bin/python3",
                "run.py",
                "-i",
                input_file.name,
                "-o",
                output_file.name,
                "-ot",
                "video",
                "-a",
                "DAIN",
                "-pt",
                "60fps",
                "-net",
                "DAIN",
            ])
        # If the video can't be processed put the message back in the queue
        except CalledProcessError:
            message.nack()
            return

        # Upload the result
        upload_result(output_file.name, input_file.name)

        # Delete the temporary input/outputs
        input_file.unlink()
        output_file.unlink()

    # Acknowledge the message - we're done processing
    message.ack()
예제 #12
0
def process_deletion(message: Message):
    firebase_id = message.attributes.get('firebaseID')
    action = message.attributes.get('action', 'delete')  # String
    # message.ack()

    if action == 'delete':
        try:
            member_instance = CustomUser.objects.get(firebase_id=firebase_id)
            if member_instance and member_instance.is_active == True:
                delete_update(member_instance)

            print(f"Member with firebase id {firebase_id} is deleted")
            message.ack()
            return firebase_id
        except CustomUser.DoesNotExist:
            print(f"Member with firebase id {firebase_id} does not exist")
    else:
        message.nack()
        return "Error"
예제 #13
0
    def on_response(self, response):
        """Process all received Pub/Sub messages.

        For each message, schedule a callback with the executor.
        """
        for msg in response.received_messages:
            _LOGGER.debug('Using %s to process message with ack_id %s.',
                          self._callback, msg.ack_id)
            message = Message(msg.message, msg.ack_id, self._request_queue)
            self._executor.submit(self._callback, message)
예제 #14
0
    def on_response(self, response):
        """Process all received Pub/Sub messages.

        For each message, schedule a callback with the executor.
        """
        for msg in response.received_messages:
            logger.debug('New message received from Pub/Sub: %r', msg)
            logger.debug(self._callback)
            message = Message(msg.message, msg.ack_id, self._request_queue)
            future = self._executor.submit(self._callback, message)
            logger.debug('Result: %s' % future.result())
예제 #15
0
    def on_response(self, response):
        """Process all received Pub/Sub messages.

        For each message, schedule a callback with the executor.
        """
        for msg in response.received_messages:
            _LOGGER.debug('New message received from Pub/Sub:\n%r', msg)
            _LOGGER.debug(self._callback)
            message = Message(msg.message, msg.ack_id, self._request_queue)
            future = self._executor.submit(self._callback, message)
            future.add_done_callback(_callback_completed)
예제 #16
0
async def test_delivery_from_multiple(subscriber, assigner,
                                      subscriber_factory):
    assign_queues = wire_queues(assigner.get_assignment)
    async with subscriber:
        await assign_queues.called.get()
        sub1 = mock_async_context_manager(
            MagicMock(spec=AsyncSingleSubscriber))
        sub2 = mock_async_context_manager(
            MagicMock(spec=AsyncSingleSubscriber))
        sub1_queues = wire_queues(sub1.read)
        sub2_queues = wire_queues(sub2.read)
        subscriber_factory.side_effect = (
            lambda partition: sub1 if partition == Partition(1) else sub2)
        await assign_queues.results.put({Partition(1), Partition(2)})
        await sub1_queues.results.put(
            Message(PubsubMessage(message_id="1")._pb, "", 0, None))
        await sub2_queues.results.put(
            Message(PubsubMessage(message_id="2")._pb, "", 0, None))
        message_ids: Set[str] = set()
        message_ids.add((await subscriber.read()).message_id)
        message_ids.add((await subscriber.read()).message_id)
        assert message_ids == {"1", "2"}
예제 #17
0
def unpack_and_insert(output: BigQueryOutput, message: Message) -> None:
    """Unpack a PubSub message regarding a GCS object change, and insert it into
    a BigQueryOutput.

    Args:
        output (BigQueryOutput): The output to use. In most cases, you will want to use a single
        output object per program.
        message (Message): The PubSub message.
    """
    try:
        LOG.debug("Message data: \n---DATA---\n{}\n---DATA---".format(
            message.data))

        # Decode and deserialize
        message_string = bytes.decode(message.data, "UTF-8")
        object_info = json.loads(message_string)

        # Get important attributes
        event_type = message.attributes['eventType']
        publish_time = message.publish_time.isoformat()
        LOG.info("Got a message: {} {} {}".format(
            publish_time, event_type,
            object_info['bucket'] + "/" + object_info['name']))

        # For deletes, use the publish time to approximate deleted time
        if event_type == "OBJECT_DELETE":
            object_info["timeDeleted"] = publish_time

        # Enqueue for writing
        output.put(object_info)
        message.ack()

    except:
        LOG.exception(
            "Error processing message! ---DATA---\n{}\n---DATA---".format(
                message.data))
        # TODO: A retry / DLQ policy would be useful, if not already present by default.
        message.nack()
예제 #18
0
def test_construct_subscriber_message_from_google_message():
    ack_id = 'some_ack_id'
    delivery_attempt = 0
    request_queue = Queue()

    pubsub_message = PubsubMessage()
    pubsub_message.attributes['style'] = 'cool'
    google_message = Message(pubsub_message, ack_id, delivery_attempt,
                             request_queue)

    subscriber_message = SubscriberMessage.from_google_cloud(google_message)
    assert subscriber_message.ack_id == ack_id
    assert subscriber_message.delivery_attempt is None  # only an int if >0
    assert subscriber_message.attributes['style'] == 'cool'
 def _wrap_message(self, message: SequencedMessage.meta.pb) -> Message:
     # Rewrap in the proto-plus-python wrapper for passing to the transform
     rewrapped = SequencedMessage()
     rewrapped._pb = message
     cps_message = self._transformer.transform(rewrapped)
     offset = message.cursor.offset
     ack_id_str = _AckId(self._ack_generation_id, offset).encode()
     self._ack_set_tracker.track(offset)
     self._messages_by_ack_id[ack_id_str] = _SizedMessage(
         cps_message, message.size_bytes)
     wrapped_message = Message(
         cps_message._pb,
         ack_id=ack_id_str,
         delivery_attempt=0,
         request_queue=self._queue,
     )
     return wrapped_message
예제 #20
0
    def _process_message(self, message: pubsub_message.Message) -> None:
        """Processes a single message from Pub/Sub.

    Args:
      message: Message from Pub/Sub.
    """
        # Extract the task proto from the message.
        try:
            task = task_pb2.Task.FromString(message.data)
        except proto_message.DecodeError as e:
            logging.error('Unable to deserialize Task proto: %s', e)
            message.nack()
            return

        # Find the registration, based on the type of proto stored in task.args.
        _, _, full_name = task.args.type_url.partition('/')
        try:
            registration = self._message_type_registry[full_name]
        except KeyError:
            logging.warning('Unknown type of task: %s', task.args.type_url)
            message.nack()
            return

        # Get the args proto.
        args = registration.task_args_class()
        task.args.Unpack(args)

        # Call the registered callback.
        logging.info('Processing task (message_id=%s): %s', message.message_id,
                     text_format.MessageToString(task))
        try:
            registration.callback(args)
        except Exception:  # pylint: disable=broad-except
            logging.exception('Task failed (message_id=%s).',
                              message.message_id)
            message.nack()
        else:
            logging.info('Finished task (message_id=%s).', message.message_id)
            message.ack()
 async def read(self) -> Message:
     try:
         message: SequencedMessage = await self.await_unless_failed(
             self._underlying.read())
         cps_message = self._transformer.transform(message)
         offset = message.cursor.offset
         ack_id = _AckId(self._ack_generation_id, offset)
         self._ack_set_tracker.track(offset)
         self._messages_by_ack_id[ack_id.str()] = _SizedMessage(
             cps_message, message.size_bytes)
         wrapped_message = Message(
             cps_message._pb,
             ack_id=ack_id.str(),
             delivery_attempt=0,
             request_queue=self._queue,
         )
         return wrapped_message
     except GoogleAPICallError as e:
         self.fail(e)
         raise e
예제 #22
0
    def on_response(self, response):
        """Process all received Pub/Sub messages.

        For each message, send a modified acknowledgement request to the
        server. This prevents expiration of the message due to buffering by
        gRPC or proxy/firewall. This makes the server and client expiration
        timer closer to each other thus preventing the message being
        redelivered multiple times.

        After the messages have all had their ack deadline updated, execute
        the callback for each message using the executor.
        """
        items = [
            base.ModAckRequest(message.ack_id, self.histogram.percentile(99))
            for message in response.received_messages
        ]
        self.modify_ack_deadline(items)
        for msg in response.received_messages:
            _LOGGER.debug('Using %s to process message with ack_id %s.',
                          self._callback, msg.ack_id)
            message = Message(msg.message, msg.ack_id, self._request_queue)
            self._executor.submit(self._callback, message)
    def pubsub_callback(message: Message) -> None:
        """Execute a scenario based on the incoming message from the test runner"""
        if TEST_ID not in message.attributes:
            # don't even know how to write back to the publisher that the
            # message is invalid, so nack()
            message.nack()
            return
        test_id: str = message.attributes[TEST_ID]

        if SCENARIO not in message.attributes:
            respond(
                test_id,
                scenarios.Response(
                    status_code=code_pb2.INVALID_ARGUMENT,
                    data=f'Expected attribute "{SCENARIO}" is missing'.encode(),
                ),
            )
            message.ack()
            return

        scenario = message.attributes[SCENARIO]
        handler = scenarios.SCENARIO_TO_HANDLER.get(
            scenario, scenarios.not_implemented_handler
        )

        try:
            res = handler(
                scenarios.Request(
                    test_id=test_id,
                    headers=dict(message.attributes),
                    data=message.data,
                )
            )
        except Exception as e:
            logger.exception("exception trying to handle request")
            res = scenarios.Response(
                status_code=code_pb2.INTERNAL, data=str(e).encode()
            )
        finally:
            respond(test_id, res)
            message.ack()
예제 #24
0
class SubscriberMessage:
    def __init__(self,
                 *args: List[Any],
                 google_cloud_message: Message = None,
                 **kwargs: Dict[str, Any]) -> None:
        if google_cloud_message:
            self._message = google_cloud_message
            return
        self._message = Message(*args, **kwargs)

    @staticmethod
    def from_google_cloud(message: Message) -> 'SubscriberMessage':
        return SubscriberMessage(google_cloud_message=message)

    @property
    def google_cloud_message(self) -> Message:
        return self._message

    @property
    def message_id(self) -> str:  # indirects to a Google protobuff field
        return str(self._message.message_id)

    def __repr__(self) -> str:
        return repr(self._message)

    @property
    def attributes(self) -> Any:  # Google .ScalarMapContainer
        return self._message.attributes

    @property
    def data(self) -> bytes:
        return bytes(self._message.data)

    @property
    def publish_time(self) -> datetime.datetime:
        published: datetime.datetime = self._message.publish_time
        return published

    @property
    def ordering_key(self) -> str:
        return str(self._message.ordering_key)

    @property
    def size(self) -> int:
        return int(self._message.size)

    @property
    def ack_id(self) -> str:
        return str(self._message.ack_id)

    @property
    def delivery_attempt(self) -> Optional[int]:
        if self._message.delivery_attempt:
            return int(self._message.delivery_attempt)
        return None

    def ack(self) -> None:
        self._message.ack()

    def drop(self) -> None:
        self._message.drop()

    def modify_ack_deadline(self, seconds: int) -> None:
        self._message.modify_ack_deadline(seconds)

    def nack(self) -> None:
        self._message.nack()
예제 #25
0
def unpack_and_insert(output: BigQueryOutput, message: Message) -> None:
    """Unpack a PubSub message regarding a GCS object change, and insert it into
    a BigQueryOutput.

    Args:
        output (BigQueryOutput): The output to use. In most cases, you will want to use a single
        output object per program.
        message (Message): The PubSub message.
    """
    bq_client = get_bq_client()
    config = get_config()
    table = get_table(TableDefinitions.INVENTORY,
                      config.get("BIGQUERY", "INVENTORY_TABLE"))
    table_name = table.get_fully_qualified_name()

    try:
        LOG.debug("Message data: \n---DATA---\n{}\n---DATA---".format(
            message.data))

        # Decode and deserialize
        message_string = bytes.decode(message.data, "UTF-8")
        object_info = json.loads(message_string)

        LOG.debug(message)
        LOG.debug(object_info)

        # Get important attributes
        event_type = message.attributes['eventType']
        publish_time = message.publish_time.isoformat()
        LOG.info("Got a message: {} {} {}".format(
            publish_time, event_type,
            object_info['bucket'] + "/" + object_info['name']))

        # For deletes, use the publish time to approximate deleted time
        if event_type == "OBJECT_DELETE":
            object_info["timeDeleted"] = publish_time
            if object_info.get("metadata"):
                object_info["metadata"] = [{
                    "key": k,
                    "value": v
                } for k, v in object_info["metadata"].items()]

        if event_type == "OBJECT_METADATA_UPDATE":

            def generate_structs(arr):
                res = '['
                for s in arr:
                    res += "STRUCT(\"{key}\" as key, \"{value}\" as value),".format(
                        key=s['key'], value=s['value'])
                res = res[:-1]
                res += ']'
                return res

            querytext = "UPDATE `{table_name}`\
                SET metadata = {new_metadata}\
                WHERE id = '{id}'".format(
                table_name=table_name,
                new_metadata=generate_structs([{
                    "key": k,
                    "value": v
                } for k, v in object_info["metadata"].items()]),
                id=object_info["id"])
            LOG.info("Running query: \n%s", querytext)
            query_job = bq_client.query(querytext)
            LOG.info(query_job.result())
        else:
            # Enqueue for writing
            output.put(object_info)

        message.ack()

    except:
        LOG.exception(
            "Error processing message! ---DATA---\n{}\n---DATA---".format(
                message.data))
        # TODO: A retry / DLQ policy would be useful, if not already present by default.
        message.nack()
def eq_receipt_to_case(message: Message):
    """
    Callback for handling new pubsub messages which attempts to publish a receipt to the events exchange

    NB: any exceptions raised by this callback should nack the message by the future manager
    :param message: a GCP pubsub subscriber Message
    """
    log = logger.bind(message_id=message.message_id,
                      subscription_name=SUBSCRIPTION_NAME,
                      subscription_project=SUBSCRIPTION_PROJECT_ID)
    try:
        if message.attributes[
                'eventType'] != 'OBJECT_FINALIZE':  # only forward on object creation
            log.error('Unknown Pub/Sub Message eventType',
                      eventType=message.attributes['eventType'])
            return
        bucket_name, object_name = message.attributes[
            'bucketId'], message.attributes['objectId']
    except KeyError as e:
        log.error('Pub/Sub Message missing required attribute',
                  missing_attribute=e.args[0])
        return

    log = log.bind(bucket_name=bucket_name, object_name=object_name)
    log.info('Pub/Sub Message received for processing')

    payload = validate_eq_receipt(message.data, log, ['timeCreated'],
                                  ['tx_id', 'questionnaire_id'])
    if not payload:
        return  # Failed validation

    metadata = payload['metadata']
    tx_id, questionnaire_id, case_id = metadata['tx_id'], metadata[
        'questionnaire_id'], metadata.get('case_id')
    time_obj_created = parse_datetime(payload['timeCreated']).isoformat()

    log = log.bind(questionnaire_id=questionnaire_id,
                   created=time_obj_created,
                   tx_id=tx_id,
                   case_id=case_id)

    receipt_message = {
        'event': {
            'type': 'RESPONSE_RECEIVED',
            'source': 'RECEIPT_SERVICE',
            'channel': 'EQ',
            'dateTime': time_obj_created,
            'transactionId': tx_id
        },
        'payload': {
            'response': {
                'caseId': case_id,
                'questionnaireId': questionnaire_id,
                'unreceipt': False
            }
        }
    }

    send_message_to_rabbitmq(json.dumps(receipt_message))
    message.ack()

    log.info('Message processing complete')
예제 #27
0
 def callback(message: Message):
     print(f"Received {message}.")
     data = json.loads(message.data.decode("utf-8"))
     sensors.append(data)
     message.ack()
예제 #28
0
 def callback_wrapper(message: Message) -> None:
     callback(
         EmsMessage(message.ack_id, message.data, message.attributes))
     message.ack()
예제 #29
0
def callback(message: Message):
    print('Receive text message: ' + message.data.decode('utf8'))
    message.ack()
예제 #30
0
    def _process_message(self, message: pubsub_message.Message) -> None:
        """Processes a single message from Pub/Sub.

    Args:
      message: Message from Pub/Sub.
    """
        # Extract the task proto from the message.
        try:
            task = task_pb2.Task.FromString(message.data)
        except proto_message.DecodeError as e:
            logging.error('Unable to deserialize Task proto: %s', e)
            # If the message is gibberish, nacking keeps putting it back, wasting
            # resources for no reason. If the message is fine but there's a parsing
            # bug, nacking makes it possible to process the message normally after
            # fixing the bug. If the expected format of the message ever changes in an
            # incompatible way and a message with the new format is sent before the
            # worker is updated, nacking makes it possible to process the message
            # normally after updating the worker.
            message.nack()
            return

        # Find the registration, based on the type of proto stored in task.args.
        _, _, full_name = task.args.type_url.partition('/')
        try:
            registration = self._message_type_registry[full_name]
        except KeyError:
            logging.warning('Unknown type of task: %s', task.args.type_url)
            # If the task has a bogus type, nacking keeps putting it back, wasting
            # resources for no reason. If a new task type is added and those tasks are
            # requested before the worker code is updated, nacking makes it possible
            # to process the tasks after the worker code is updated. If an existing
            # task type is removed from the running worker code before all tasks of
            # that type have been processed, nacking keeps putting it back, wasting
            # resources.
            message.nack()
            return

        # Get the args proto.
        args = registration.task_args_class()
        task.args.Unpack(args)

        # Convert the task to a loggable string.
        try:
            task_string = self._task_to_string(task)
        except Exception:  # pylint: disable=broad-except
            logging.exception(
                'Unable to convert task of type %s to a string for logging.',
                full_name)
            # If self._task_to_string() fails for a reason unrelated to the task
            # itself, nacking makes it possible to process the task once
            # self._task_to_string() is working again. If something about the task
            # makes self._task_to_string() fail consistently, nacking makes it
            # possible to process the task once the bug in self._task_to_string() is
            # fixed. Additionally, users can catch and ignore exceptions in
            # self._task_to_string() itself if they want to always process tasks
            # regardless of whether it's possible to log the contents of the task.
            message.nack()
            return

        # Call the registered callback.
        logging.info('Processing task (message_id=%s):\n%s',
                     message.message_id, task_string)
        try:
            registration.callback(args)
        except Exception:  # pylint: disable=broad-except
            logging.exception('Task failed (message_id=%s).',
                              message.message_id)
            # See the comment above about nacking on self._task_to_string() failures
            # for the considerations here.
            message.nack()
        else:
            logging.info('Finished task (message_id=%s).', message.message_id)
            message.ack()