Beispiel #1
0
def lambda_handler(event, context):

    bucket_name, ipst = s3.parse_s3_event(event)

    comm = io.get_io_bundle(bucket_name)

    if ipst == "all":
        print("Cleaning all datasets;  removing all job resources (S3 files).")

        comm.messages.delete_literal(
            "clean-all")  # don't interpret all as existing ipppssoots

        cleanup_ids = comm.ids("all")

        comm.messages.broadcast("clean", cleanup_ids)
    elif ipst == "ingested":  # a variation of "all" restricted to datasets with an ingest message
        print(
            "Cleaning all ingested datasets;  removing all job resources (S3 files)."
        )

        comm.messages.delete_literal(
            "clean-ingested")  # don't interpret "ingested"

        cleanup_ids = comm.messages.ids("ingested")

        comm.messages.broadcast("clean", cleanup_ids)
    else:
        print("Cleaning", ipst)
        comm.clean(ipst)
Beispiel #2
0
def lambda_handler(event, context):

    bucket_name, ipst = s3.parse_s3_event(event)

    comm = io.get_io_bundle(bucket_name)

    comm.xdata.delete(ipst)  # biggest difference between "placed" and "rescue"

    # comm.messages.delete(f"placed-{ipst}")

    lambda_submit.main(comm, ipst, bucket_name)
Beispiel #3
0
def lambda_handler(event, context):

    bucket_name, ipst = s3.parse_s3_event(event)

    comm = io.get_io_bundle(bucket_name)

    if ipst == "all":
        # Delete exactly the cancel-all message,  not every ipppssoot
        comm.messages.delete_literal("cancel-all")
        # Cancel all jobs in a killable state broadcasting cancel over job_ids
        job_ids = batch.get_job_ids()
        comm.messages.broadcast("cancel", job_ids)
    elif batch.JOB_ID_RE.match(ipst):
        job_id, ipst = ipst, "unknown"  # kill one job, ipst = job_id
        print("Cancelling job_id", job_id)
        comm.messages.delete_literal(f"cancel-{job_id}")
        with log.trap_exception("Handling messages + control for", job_id):
            ipst = batch.get_job_name(job_id)
            print("Handling messages and control for", ipst)
            comm.messages.delete(f"all-{ipst}")
            comm.messages.put(f"terminated-{ipst}",
                              "cancel lambda " + bucket_name)
            try:
                metadata = comm.xdata.get(ipst)
            except comm.xdata.client.exceptions.NoSuchKeyError:
                metadata = dict(job_id=job_id, cancel_type="job_id")
            metadata["terminated"] = True
            comm.xdata.put(ipst, metadata)
        # Do last so terminate flag is set if possible.
        print("Terminating", job_id)
        batch.terminate_job(job_id, "Operator cancelled")
    elif hst.IPPPSSOOT_RE.match(ipst):  # kill one ipppssoot
        print("Cancelling ipppssoot", ipst)
        comm.messages.delete(f"all-{ipst}")
        comm.messages.put(f"terminated-{ipst}", "cancel lambda " + bucket_name)
        metadata = comm.xdata.get(ipst)
        metadata["terminated"] = True
        metadata["cancel_type"] = "ipppssoot"
        comm.xdata.put(ipst, metadata)
        job_id = metadata["job_id"]
        with log.trap_exception("Terminating", job_id):
            print("Terminating", job_id)
            batch.terminate_job(job_id, "Operator cancelled")
    else:
        raise ValueError("Bad cancel ID", ipst)
Beispiel #4
0
def lambda_handler(event, context):

    bucket_name, ipst = s3.parse_s3_event(event)

    comm = io.get_io_bundle(bucket_name)

    if ipst == "all":
        print("Rescuing all")

        comm.messages.delete_literal(
            "rescue-all")  # don't interpret all as existing ipppssoots

        rescues = comm.messages.ids(RESCUE_TYPES)

        comm.messages.broadcast("rescue", rescues)
    else:
        print("Rescuing", ipst)
        # comm.outputs.delete(ipst)
        lambda_submit.main(comm, ipst, bucket_name)
Beispiel #5
0
def lambda_handler(event, context):

    bucket_name, serial = s3.parse_s3_event(event)

    comm = io.get_io_bundle(bucket_name)

    if check_for_kill(comm, "Detected broadcast-kill on entry."):
        return

    broadcasted = comm.messages.pop(f"broadcast-{serial}")

    if len(broadcasted) > 100:
        serial1, serial2 = comm.messages.get_id(), comm.messages.get_id()
        comm.messages.put(f"broadcast-{serial1}", broadcasted[: len(broadcasted) // 2])
        comm.messages.put(f"broadcast-{serial2}", broadcasted[len(broadcasted) // 2 :])
    else:
        for i, msg in enumerate(broadcasted):
            if not i % 10:
                if check_for_kill(comm, "Detected broadcast-kill in put loop"):
                    return
            comm.messages.put(msg)
Beispiel #6
0
def lambda_handler(event, context):

    bucket_name, serial = s3.parse_s3_event(event)

    comm = io.get_io_bundle(bucket_name)

    if check_for_kill(comm, "Detected broadcast-kill on entry."):
        return

    bmsg = comm.messages.pop(f"broadcast-{serial}")

    broadcasted, payload = bmsg["messages"], bmsg["payload"]

    if len(broadcasted) > 100:  # split broadcast into two new broadcasts
        comm.messages.bifurcate_broadcast(broadcasted, payload)
    else:  # iteratively send payload to each message in broadcasted
        for i, msg in enumerate(broadcasted):
            if not i % 10:
                if check_for_kill(comm, "Detected broadcast-kill in put loop"):
                    return
            comm.messages.put(msg, payload)