Exemplo n.º 1
0
 def _on_receive(self, message: Message):
     recv_time = int(time.time() * 1000)
     latency_ms = recv_time - int(message.attributes["sendTime"])
     pub_id = int(message.attributes["clientId"])
     sequence_number = int(message.attributes["sequenceNumber"])
     out = MessageAndDuration(pub_id, sequence_number, latency_ms)
     self.metrics_tracker.put(out)
     message.ack()
Exemplo n.º 2
0
def callback(environment_manager: BackupEnvironmentManager,
             backup_manager: BackupManager, storage_manager: StorageManager,
             message: Message) -> None:
    schema_name = environment_manager.database_schema
    backup_filename = f'{schema_name}.sql'
    backup_manager.create_backup(backup_filename)

    storage_manager.upload_file(backup_filename)
    message.ack()
Exemplo n.º 3
0
def receive_refresh_request(message: Message):
    """Refresh the token when received message"""
    # print(f"Received {message}.", type(message))
    secret_name = message.attributes.secret_name
    old_version = message.attributes.version  # String
    message.ack()

    # secret = read_secret(secret_name, old_version)
    secret_name, version = update_secret(secret_name, old_version)

    publish_complete_message(secret_name, version)
def ppo_undelivered_mail_to_case(message: Message):
    log = logger.bind(
        message_id=message.message_id,
        subscription_name=PPO_UNDELIVERED_SUBSCRIPTION_NAME,
        subscription_project=PPO_UNDELIVERED_SUBSCRIPTION_PROJECT_ID)

    log.debug('Pub/Sub Message received for processing')

    payload = validate_offline_receipt(
        message.data, log, ['transactionId', 'caseRef', 'productCode'])
    if not payload:
        return  # Failed validation

    tx_id, case_ref, product_code = payload['transactionId'], payload[
        'caseRef'], payload['productCode']
    date_time = datetime.strptime(
        payload['dateTime'],
        '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc).isoformat()

    log = log.bind(case_ref=case_ref,
                   created=date_time,
                   product_code=product_code,
                   tx_id=tx_id)

    receipt_message = {
        'event': {
            'type': 'UNDELIVERED_MAIL_REPORTED',
            'source': 'RECEIPT_SERVICE',
            'channel': 'PPO',
            'dateTime': date_time,
            'transactionId': tx_id
        },
        'payload': {
            'fulfilmentInformation': {
                'caseRef': case_ref,
                'fulfilmentCode': product_code
            }
        }
    }

    send_message_to_rabbitmq(json.dumps(receipt_message),
                             routing_key=UNDELIVERED_MAIL_ROUTING_KEY)
    message.ack()

    log.debug('Message processing complete')
Exemplo n.º 5
0
def process_deletion(message: Message):
    firebase_id = message.attributes.get('firebaseID')
    action = message.attributes.get('action', 'delete')  # String
    # message.ack()

    if action == 'delete':
        try:
            member_instance = CustomUser.objects.get(firebase_id=firebase_id)
            if member_instance and member_instance.is_active == True:
                delete_update(member_instance)

            print(f"Member with firebase id {firebase_id} is deleted")
            message.ack()
            return firebase_id
        except CustomUser.DoesNotExist:
            print(f"Member with firebase id {firebase_id} does not exist")
    else:
        message.nack()
        return "Error"
Exemplo n.º 6
0
def process_message(message: Message) -> None:
    message_data = json.loads(message.data.decode())

    # Keep the message alive while we are processing it
    with MessageKeepAlive(message=message, interval=570):
        input_file = Path(f"./{message_data['name']}")
        output_file = Path(f"./{input_file.name}").with_suffix(".out.mp4")

        # Download the user's input video
        download_input(input_file.name)

        # Process the video with DAIN
        try:
            subprocess.check_call([
                "/usr/bin/python3",
                "run.py",
                "-i",
                input_file.name,
                "-o",
                output_file.name,
                "-ot",
                "video",
                "-a",
                "DAIN",
                "-pt",
                "60fps",
                "-net",
                "DAIN",
            ])
        # If the video can't be processed put the message back in the queue
        except CalledProcessError:
            message.nack()
            return

        # Upload the result
        upload_result(output_file.name, input_file.name)

        # Delete the temporary input/outputs
        input_file.unlink()
        output_file.unlink()

    # Acknowledge the message - we're done processing
    message.ack()
def offline_receipt_to_case(message: Message):
    log = logger.bind(message_id=message.message_id,
                      subscription_name=OFFLINE_SUBSCRIPTION_NAME,
                      subscription_project=OFFLINE_SUBSCRIPTION_PROJECT_ID)

    log.info('Pub/Sub Message received for processing')

    payload = validate_offline_receipt(
        message.data, log, ['transactionId', 'questionnaireId', 'channel'])
    if not payload:
        return  # Failed validation

    tx_id, questionnaire_id, channel = payload['transactionId'], payload[
        'questionnaireId'], payload['channel']
    time_obj_created = datetime.strptime(
        payload['dateTime'],
        '%Y-%m-%dT%H:%M:%S').replace(tzinfo=timezone.utc).isoformat()

    log = log.bind(questionnaire_id=questionnaire_id,
                   created=time_obj_created,
                   tx_id=tx_id,
                   channel=channel)

    receipt_message = {
        'event': {
            'type': 'RESPONSE_RECEIVED',
            'source': 'RECEIPT_SERVICE',
            'channel': channel,
            'dateTime': time_obj_created,
            'transactionId': tx_id
        },
        'payload': {
            'response': {
                'questionnaireId': questionnaire_id,
                'unreceipt': payload.get('unreceipt', False)
            }
        }
    }

    send_message_to_rabbitmq(json.dumps(receipt_message))
    message.ack()

    log.info('Message processing complete')
    def pubsub_callback(message: Message) -> None:
        """Execute a scenario based on the incoming message from the test runner"""
        if TEST_ID not in message.attributes:
            # don't even know how to write back to the publisher that the
            # message is invalid, so nack()
            message.nack()
            return
        test_id: str = message.attributes[TEST_ID]

        if SCENARIO not in message.attributes:
            respond(
                test_id,
                scenarios.Response(
                    status_code=code_pb2.INVALID_ARGUMENT,
                    data=f'Expected attribute "{SCENARIO}" is missing'.encode(),
                ),
            )
            message.ack()
            return

        scenario = message.attributes[SCENARIO]
        handler = scenarios.SCENARIO_TO_HANDLER.get(
            scenario, scenarios.not_implemented_handler
        )

        try:
            res = handler(
                scenarios.Request(
                    test_id=test_id,
                    headers=dict(message.attributes),
                    data=message.data,
                )
            )
        except Exception as e:
            logger.exception("exception trying to handle request")
            res = scenarios.Response(
                status_code=code_pb2.INTERNAL, data=str(e).encode()
            )
        finally:
            respond(test_id, res)
            message.ack()
Exemplo n.º 9
0
    def _process_message(self, message: pubsub_message.Message) -> None:
        """Processes a single message from Pub/Sub.

    Args:
      message: Message from Pub/Sub.
    """
        # Extract the task proto from the message.
        try:
            task = task_pb2.Task.FromString(message.data)
        except proto_message.DecodeError as e:
            logging.error('Unable to deserialize Task proto: %s', e)
            message.nack()
            return

        # Find the registration, based on the type of proto stored in task.args.
        _, _, full_name = task.args.type_url.partition('/')
        try:
            registration = self._message_type_registry[full_name]
        except KeyError:
            logging.warning('Unknown type of task: %s', task.args.type_url)
            message.nack()
            return

        # Get the args proto.
        args = registration.task_args_class()
        task.args.Unpack(args)

        # Call the registered callback.
        logging.info('Processing task (message_id=%s): %s', message.message_id,
                     text_format.MessageToString(task))
        try:
            registration.callback(args)
        except Exception:  # pylint: disable=broad-except
            logging.exception('Task failed (message_id=%s).',
                              message.message_id)
            message.nack()
        else:
            logging.info('Finished task (message_id=%s).', message.message_id)
            message.ack()
Exemplo n.º 10
0
def unpack_and_insert(output: BigQueryOutput, message: Message) -> None:
    """Unpack a PubSub message regarding a GCS object change, and insert it into
    a BigQueryOutput.

    Args:
        output (BigQueryOutput): The output to use. In most cases, you will want to use a single
        output object per program.
        message (Message): The PubSub message.
    """
    try:
        LOG.debug("Message data: \n---DATA---\n{}\n---DATA---".format(
            message.data))

        # Decode and deserialize
        message_string = bytes.decode(message.data, "UTF-8")
        object_info = json.loads(message_string)

        # Get important attributes
        event_type = message.attributes['eventType']
        publish_time = message.publish_time.isoformat()
        LOG.info("Got a message: {} {} {}".format(
            publish_time, event_type,
            object_info['bucket'] + "/" + object_info['name']))

        # For deletes, use the publish time to approximate deleted time
        if event_type == "OBJECT_DELETE":
            object_info["timeDeleted"] = publish_time

        # Enqueue for writing
        output.put(object_info)
        message.ack()

    except:
        LOG.exception(
            "Error processing message! ---DATA---\n{}\n---DATA---".format(
                message.data))
        # TODO: A retry / DLQ policy would be useful, if not already present by default.
        message.nack()
Exemplo n.º 11
0
    def _process_message(self, message: pubsub_message.Message) -> None:
        """Processes a single message from Pub/Sub.

    Args:
      message: Message from Pub/Sub.
    """
        # Extract the task proto from the message.
        try:
            task = task_pb2.Task.FromString(message.data)
        except proto_message.DecodeError as e:
            logging.error('Unable to deserialize Task proto: %s', e)
            # If the message is gibberish, nacking keeps putting it back, wasting
            # resources for no reason. If the message is fine but there's a parsing
            # bug, nacking makes it possible to process the message normally after
            # fixing the bug. If the expected format of the message ever changes in an
            # incompatible way and a message with the new format is sent before the
            # worker is updated, nacking makes it possible to process the message
            # normally after updating the worker.
            message.nack()
            return

        # Find the registration, based on the type of proto stored in task.args.
        _, _, full_name = task.args.type_url.partition('/')
        try:
            registration = self._message_type_registry[full_name]
        except KeyError:
            logging.warning('Unknown type of task: %s', task.args.type_url)
            # If the task has a bogus type, nacking keeps putting it back, wasting
            # resources for no reason. If a new task type is added and those tasks are
            # requested before the worker code is updated, nacking makes it possible
            # to process the tasks after the worker code is updated. If an existing
            # task type is removed from the running worker code before all tasks of
            # that type have been processed, nacking keeps putting it back, wasting
            # resources.
            message.nack()
            return

        # Get the args proto.
        args = registration.task_args_class()
        task.args.Unpack(args)

        # Convert the task to a loggable string.
        try:
            task_string = self._task_to_string(task)
        except Exception:  # pylint: disable=broad-except
            logging.exception(
                'Unable to convert task of type %s to a string for logging.',
                full_name)
            # If self._task_to_string() fails for a reason unrelated to the task
            # itself, nacking makes it possible to process the task once
            # self._task_to_string() is working again. If something about the task
            # makes self._task_to_string() fail consistently, nacking makes it
            # possible to process the task once the bug in self._task_to_string() is
            # fixed. Additionally, users can catch and ignore exceptions in
            # self._task_to_string() itself if they want to always process tasks
            # regardless of whether it's possible to log the contents of the task.
            message.nack()
            return

        # Call the registered callback.
        logging.info('Processing task (message_id=%s):\n%s',
                     message.message_id, task_string)
        try:
            registration.callback(args)
        except Exception:  # pylint: disable=broad-except
            logging.exception('Task failed (message_id=%s).',
                              message.message_id)
            # See the comment above about nacking on self._task_to_string() failures
            # for the considerations here.
            message.nack()
        else:
            logging.info('Finished task (message_id=%s).', message.message_id)
            message.ack()
def eq_receipt_to_case(message: Message):
    """
    Callback for handling new pubsub messages which attempts to publish a receipt to the events exchange

    NB: any exceptions raised by this callback should nack the message by the future manager
    :param message: a GCP pubsub subscriber Message
    """
    log = logger.bind(message_id=message.message_id,
                      subscription_name=SUBSCRIPTION_NAME,
                      subscription_project=SUBSCRIPTION_PROJECT_ID)
    try:
        if message.attributes[
                'eventType'] != 'OBJECT_FINALIZE':  # only forward on object creation
            log.error('Unknown Pub/Sub Message eventType',
                      eventType=message.attributes['eventType'])
            return
        bucket_name, object_name = message.attributes[
            'bucketId'], message.attributes['objectId']
    except KeyError as e:
        log.error('Pub/Sub Message missing required attribute',
                  missing_attribute=e.args[0])
        return

    log = log.bind(bucket_name=bucket_name, object_name=object_name)
    log.info('Pub/Sub Message received for processing')

    payload = validate_eq_receipt(message.data, log, ['timeCreated'],
                                  ['tx_id', 'questionnaire_id'])
    if not payload:
        return  # Failed validation

    metadata = payload['metadata']
    tx_id, questionnaire_id, case_id = metadata['tx_id'], metadata[
        'questionnaire_id'], metadata.get('case_id')
    time_obj_created = parse_datetime(payload['timeCreated']).isoformat()

    log = log.bind(questionnaire_id=questionnaire_id,
                   created=time_obj_created,
                   tx_id=tx_id,
                   case_id=case_id)

    receipt_message = {
        'event': {
            'type': 'RESPONSE_RECEIVED',
            'source': 'RECEIPT_SERVICE',
            'channel': 'EQ',
            'dateTime': time_obj_created,
            'transactionId': tx_id
        },
        'payload': {
            'response': {
                'caseId': case_id,
                'questionnaireId': questionnaire_id,
                'unreceipt': False
            }
        }
    }

    send_message_to_rabbitmq(json.dumps(receipt_message))
    message.ack()

    log.info('Message processing complete')
Exemplo n.º 13
0
def unpack_and_insert(output: BigQueryOutput, message: Message) -> None:
    """Unpack a PubSub message regarding a GCS object change, and insert it into
    a BigQueryOutput.

    Args:
        output (BigQueryOutput): The output to use. In most cases, you will want to use a single
        output object per program.
        message (Message): The PubSub message.
    """
    bq_client = get_bq_client()
    config = get_config()
    table = get_table(TableDefinitions.INVENTORY,
                      config.get("BIGQUERY", "INVENTORY_TABLE"))
    table_name = table.get_fully_qualified_name()

    try:
        LOG.debug("Message data: \n---DATA---\n{}\n---DATA---".format(
            message.data))

        # Decode and deserialize
        message_string = bytes.decode(message.data, "UTF-8")
        object_info = json.loads(message_string)

        LOG.debug(message)
        LOG.debug(object_info)

        # Get important attributes
        event_type = message.attributes['eventType']
        publish_time = message.publish_time.isoformat()
        LOG.info("Got a message: {} {} {}".format(
            publish_time, event_type,
            object_info['bucket'] + "/" + object_info['name']))

        # For deletes, use the publish time to approximate deleted time
        if event_type == "OBJECT_DELETE":
            object_info["timeDeleted"] = publish_time
            if object_info.get("metadata"):
                object_info["metadata"] = [{
                    "key": k,
                    "value": v
                } for k, v in object_info["metadata"].items()]

        if event_type == "OBJECT_METADATA_UPDATE":

            def generate_structs(arr):
                res = '['
                for s in arr:
                    res += "STRUCT(\"{key}\" as key, \"{value}\" as value),".format(
                        key=s['key'], value=s['value'])
                res = res[:-1]
                res += ']'
                return res

            querytext = "UPDATE `{table_name}`\
                SET metadata = {new_metadata}\
                WHERE id = '{id}'".format(
                table_name=table_name,
                new_metadata=generate_structs([{
                    "key": k,
                    "value": v
                } for k, v in object_info["metadata"].items()]),
                id=object_info["id"])
            LOG.info("Running query: \n%s", querytext)
            query_job = bq_client.query(querytext)
            LOG.info(query_job.result())
        else:
            # Enqueue for writing
            output.put(object_info)

        message.ack()

    except:
        LOG.exception(
            "Error processing message! ---DATA---\n{}\n---DATA---".format(
                message.data))
        # TODO: A retry / DLQ policy would be useful, if not already present by default.
        message.nack()
Exemplo n.º 14
0
def callback(message: Message):
    print('Receive text message: ' + message.data.decode('utf8'))
    message.ack()
Exemplo n.º 15
0
class SubscriberMessage:
    def __init__(self,
                 *args: List[Any],
                 google_cloud_message: Message = None,
                 **kwargs: Dict[str, Any]) -> None:
        if google_cloud_message:
            self._message = google_cloud_message
            return
        self._message = Message(*args, **kwargs)

    @staticmethod
    def from_google_cloud(message: Message) -> 'SubscriberMessage':
        return SubscriberMessage(google_cloud_message=message)

    @property
    def google_cloud_message(self) -> Message:
        return self._message

    @property
    def message_id(self) -> str:  # indirects to a Google protobuff field
        return str(self._message.message_id)

    def __repr__(self) -> str:
        return repr(self._message)

    @property
    def attributes(self) -> Any:  # Google .ScalarMapContainer
        return self._message.attributes

    @property
    def data(self) -> bytes:
        return bytes(self._message.data)

    @property
    def publish_time(self) -> datetime.datetime:
        published: datetime.datetime = self._message.publish_time
        return published

    @property
    def ordering_key(self) -> str:
        return str(self._message.ordering_key)

    @property
    def size(self) -> int:
        return int(self._message.size)

    @property
    def ack_id(self) -> str:
        return str(self._message.ack_id)

    @property
    def delivery_attempt(self) -> Optional[int]:
        if self._message.delivery_attempt:
            return int(self._message.delivery_attempt)
        return None

    def ack(self) -> None:
        self._message.ack()

    def drop(self) -> None:
        self._message.drop()

    def modify_ack_deadline(self, seconds: int) -> None:
        self._message.modify_ack_deadline(seconds)

    def nack(self) -> None:
        self._message.nack()
Exemplo n.º 16
0
 def callback_wrapper(message: Message) -> None:
     callback(
         EmsMessage(message.ack_id, message.data, message.attributes))
     message.ack()
Exemplo n.º 17
0
 def callback(message: Message):
     print(f"Received {message}.")
     data = json.loads(message.data.decode("utf-8"))
     sensors.append(data)
     message.ack()