def index(argv: typing.List[str], args: argparse.Namespace): """ Queue an SQS message to the indexer lambda for each key in object storage beginning with `bundles/{prefix}`. If `prefix` is omitted, send a message for each key in object storage beginning with `bundles/` """ replica = Replica[args.replica] handle = Config.get_blobstore_handle(replica) index_queue_url = get_queue_url("dss-index-operation-" + os.environ['DSS_DEPLOYMENT_STAGE']) if "on-change" == args.send_notifications: send_notifications = None else: send_notifications = ("true" == args.send_notifications) def _forward_keys(pfx): with SQSMessenger(index_queue_url) as sqsm: for key in handle.list(replica.bucket, pfx): msg = dict(replica=replica.name, key=key) if send_notifications is not None: msg['send_notifications'] = send_notifications sqsm.send(json.dumps(msg)) with ThreadPoolExecutor(max_workers=10) as e: futures = [ e.submit(_forward_keys, f"bundles/{args.prefix}{c}") for c in set(hexdigits.lower()) ] for f in as_completed(futures): f.result()
def notify_or_queue(replica: Replica, subscription: dict, metadata_document: dict, key: str): """ Notify or queue for later processing: 1) For normal bundle: attempt notification, queue on failure 2) For delete: attempt notification, queue on failure 3) For versioned tombstone: attempt notification, queue on failure 4) For unversioned tombstone: Queue one notification per affected bundle version. Notifications are not attempted for previously tombstoned versions. Since the number of versions is unbounded, inline delivery is not attempted. """ event_type = metadata_document['event_type'] with SQSMessenger(get_queue_url(notification_queue_name)) as sqsm: if _unversioned_tombstone_key_regex.match(key): tombstones = set() bundles = set() key_prefix = key.rsplit(".", 1)[0] # chop off the tombstone suffix for key in _list_prefix(replica, key_prefix): if _versioned_tombstone_key_regex.match(key): bundle_key = key.rsplit(".", 1)[0] tombstones.add(bundle_key) elif _bundle_key_regex.match(key): bundles.add(key) for key in bundles - tombstones: sqsm.send(_format_sqs_message(replica, subscription, event_type, key), delay_seconds=0) else: if not notify(subscription, metadata_document, key): sqsm.send(_format_sqs_message(replica, subscription, event_type, key), delay_seconds=15 * 60)
def test_queue_notification(self): replica = Replica.aws bucket = get_env('DSS_S3_BUCKET_TEST') key = f"notification-v2/{uuid4()}" post = self.s3.generate_presigned_post( Bucket=bucket, Key=key, ExpiresIn=60, Fields={'Content-Type': "application/json"}, Conditions=[{ 'Content-Type': "application/json" }]) subscription = self._put_subscription( { 'payload_form_field': "file", 'form_fields': post['fields'], 'callback_url': post['url'], 'encoding': "multipart/form-data", }, replica) with SQSMessenger(get_queue_url( notify_v2.notification_queue_name)) as mq: msg = notify_v2._format_sqs_message( replica, subscription, "CREATE", "bundles/a47b90b2-0967-4fbf-87bc-c6c12db3fedf.2017-07-12T055120.037644Z", ) mq.send(msg, delay_seconds=0) notification = self._get_notification_from_s3_object(bucket, key) self.assertEquals(notification['subscription_id'], subscription['uuid'])
def index_keys(argv: typing.List[str], args: argparse.Namespace): """ Queue an SQS message to the indexer lambda for each key in `keys`. """ index_queue_url = get_queue_url("dss-index-operation-" + os.environ['DSS_DEPLOYMENT_STAGE']) with SQSMessenger(index_queue_url) as sqsm: for key in args.keys: sqsm.send(json.dumps(dict(replica=args.replica, key=key)))
def trigger_sync(argv: typing.List[str], args: argparse.Namespace): """ Invoke the sync daemon on a set of keys via sqs. """ sync_queue_url = get_queue_url("dss-sync-operation-" + os.environ['DSS_DEPLOYMENT_STAGE']) with SQSMessenger(sync_queue_url) as sqsm: for key in args.keys: msg = json.dumps( dict(source_replica=args.source_replica, dest_replica=args.destination_replica, key=key)) sqsm.send(msg)