Exemplo n.º 1
0
def launch_from_forwarded_event(event, context):
    executions = {}
    for event_record in event["Records"]:
        message = json.loads(json.loads(event_record["body"])["Message"])
        if message['resourceState'] == "not_exists":
            logger.info("Ignoring object deletion event")
        elif message["selfLink"].startswith(
                "https://www.googleapis.com/storage"):
            source_replica = Replica.gcp
            source_key = message["name"]
            bucket = source_replica.bucket
            if source_key.startswith(
                    BLOB_PREFIX) and not BLOB_KEY_REGEX.match(source_key):
                logger.info(
                    "Key %s does not match blob key format, skipping sync",
                    source_key)
                continue
            for dest_replica in Config.get_replication_destinations(
                    source_replica):
                if exists(dest_replica, source_key):
                    logger.info("Key %s already exists in %s, skipping sync",
                                source_key, dest_replica)
                    continue
                exec_name = bucket + "/" + message[
                    "name"] + ":" + source_replica.name + ":" + dest_replica.name
                exec_input = dict(source_replica=source_replica.name,
                                  dest_replica=dest_replica.name,
                                  source_key=message["name"],
                                  source_obj_metadata=message)
                executions[exec_name] = app.state_machine.start_execution(
                    **exec_input)["executionArn"]
        else:
            raise NotImplementedError()
    return executions
Exemplo n.º 2
0
def launch_from_s3_event(event, context):
    source_replica = Replica.aws
    executions = {}
    if event.get("Event") == "s3:TestEvent":
        logger.info("S3 test event received and processed successfully")
    else:
        for event_record in event["Records"]:
            bucket = resources.s3.Bucket(event_record["s3"]["bucket"]["name"])
            obj = bucket.Object(unquote(event_record["s3"]["object"]["key"]))
            if obj.key.startswith(BLOB_PREFIX) and not BLOB_KEY_REGEX.match(
                    obj.key):
                logger.info(
                    "Key %s does not match blob key format, skipping sync",
                    obj.key)
                continue
            if obj.key.startswith("cache"):
                logger.info("Ignoring cache object")
                continue
            if bucket.name != source_replica.bucket:
                logger.error(
                    "Received S3 event for bucket %s with no configured replica",
                    bucket.name)
                continue
            for dest_replica in Config.get_replication_destinations(
                    source_replica):
                if exists(dest_replica, obj.key):
                    # Logging error here causes daemons/invoke_lambda.sh to report failure, for some reason
                    # - Brian Hannafious, 2019-01-31
                    logger.info("Key %s already exists in %s, skipping sync",
                                obj.key, dest_replica)
                    continue
                exec_name = bucket.name + "/" + obj.key + ":" + source_replica.name + ":" + dest_replica.name
                exec_input = dict(
                    source_replica=source_replica.name,
                    dest_replica=dest_replica.name,
                    source_key=obj.key,
                    source_obj_metadata=event_record["s3"]["object"])
                executions[exec_name] = app.state_machine.start_execution(
                    **exec_input)["executionArn"]
    return executions