Exemplo n.º 1
0
def main(context: DurableOrchestrationContext):
    logging.info(f"Cache buster orchestrator has been triggered")

    trigger_payload_raw: str = context.get_input()
    logging.info(f"\tTrigger received: {trigger_payload_raw}")

    trigger_payload = loads(trigger_payload_raw)

    retry_options = RetryOptions(first_retry_interval_in_milliseconds=5_000,
                                 max_number_of_attempts=5)

    tasks = list()

    if trigger_payload['to'] == FLUSH_DESPATCH:
        for payload in get_operations():
            task = context.call_activity_with_retry(
                "cache_buster_activity",
                retry_options=retry_options,
                input_=payload)

            tasks.append(task)

        context.set_custom_status(
            f"Submitting {len(tasks)} tasks for {FLUSH_DESPATCH}.")

    elif trigger_payload['to'] == PURGE_STORAGE_CACHE:
        task = context.call_activity_with_retry(
            "cache_buster_storage_activity",
            retry_options=retry_options,
            input_=trigger_payload)

        tasks.append(task)

        context.set_custom_status(
            f"Submitting tasks for {PURGE_STORAGE_CACHE}.")

    _ = yield context.task_all(tasks)

    context.set_custom_status(f"ALL DONE: {trigger_payload}")

    return f"ALL DONE: {trigger_payload}"
Exemplo n.º 2
0
def main(context: DurableOrchestrationContext):
    logging.info(f"MSOA ETL orchestrator has been triggered")

    trigger_payload: str = context.get_input()
    logging.info(f"\tTrigger received: {trigger_payload}")

    retry_options = RetryOptions(
        first_retry_interval_in_milliseconds=3_000,
        max_number_of_attempts=3
    )

    trigger_data = loads(trigger_payload)

    data_container = trigger_data.get("container", "rawdbdata")
    data_path = trigger_data["data_path"]
    process_name = trigger_data["process_name"]

    area_type = trigger_data["area_type"]
    now = trigger_data.get("timestamp", datetime.now())

    if not isinstance(now, datetime):
        now = datetime.fromisoformat(now)

    metric_name = "newCasesBySpecimenDate"
    area_codes = get_area_codes(area_type.lower())
    release_id, timestamp = get_release_id(now, process_name)
    set_file_releaseid(filepath=trigger_data["main_data_path"], release_id=release_id)

    metric_id = get_metric_id(metric_name)
    population = get_msoa_poplation()
    partition_id = create_partition(area_type, timestamp)

    payload = {
        "data_path": {
            "container": data_container,
            "path": data_path
        },
        "area_type": area_type.lower(),
        "metric": metric_name,
        "partition_id": partition_id,
        "metric_id": metric_id,
        "release_id": release_id,
        "timestamp": timestamp.isoformat()
    }

    logging.info(f"Base payload: {payload}")

    tasks = list()

    context.set_custom_status("Submitting for processing and deployment to DB")
    for _, row in area_codes.iterrows():
        payload.update({
            "area_code": row.area_code,
            "population": population[row.area_code],
            "area_id": row.area_id
        })

        task = context.call_activity_with_retry(
            "msoa_etl_db",
            retry_options=retry_options,
            input_=payload
        )
        tasks.append(task)

    _ = yield context.task_all(tasks)
    context.set_custom_status("DB deployment complete")

    _ = yield context.call_activity_with_retry(
        'db_etl_update_db',
        input_=dict(
            date=f"{now:%Y-%m-%d}",
            category=area_type.lower()
        ),
        retry_options=retry_options
    )
    context.set_custom_status("Metadata updated")

    return f"DONE: {data_path}"
Exemplo n.º 3
0
def main(context: DurableOrchestrationContext):
    logging.info(f"DB ETL orchestrator has been triggered")

    retry_options = RetryOptions(first_retry_interval_in_milliseconds=5_000,
                                 max_number_of_attempts=5)

    trigger_payload: str = context.get_input()
    logging.info(f"\tTrigger received: {trigger_payload}")

    trigger_data = loads(trigger_payload)

    timestamp = trigger_data["datestamp"]
    datestamp = trigger_data["datestamp"].split("T")[0]

    if "path" in trigger_data:
        paths = [trigger_data["path"]]
    else:
        paths = [
            # f"daily_chunks/specimen_date_cases/by_age/{datestamp}/",
            # f"daily_chunks/deaths_28days_death_date/by_age/{datestamp}/",
            f"daily_chunks/main/{datestamp}/"
        ]

    category = trigger_data.get("category", "main")

    main_path = trigger_data['main_data_path']
    if main_path.endswith("json"):
        process_name = "MAIN"
    else:
        parsed_path = parse_filepath(main_path)
        process_name = category_label(parsed_path)

    tasks = list()

    if len(timestamp) > 10:
        now = datetime.fromisoformat(timestamp)
    else:
        file_date = datetime.strptime(datestamp, "%Y-%m-%d")
        now = context.current_utc_datetime
        now = datetime(year=file_date.year,
                       month=file_date.month,
                       day=file_date.day,
                       hour=now.hour,
                       minute=now.minute,
                       second=now.second,
                       microsecond=now.microsecond)

    release_id, timestamp = get_release_id(now, process_name)
    set_file_releaseid(filepath=trigger_data["main_data_path"],
                       release_id=release_id)

    payload = {"timestamp": timestamp.isoformat()}

    for path in paths:
        with StorageClient(container="pipeline", path=path) as client:
            for file in client:
                payload.update({'file_path': file['name']})

                task = context.call_activity_with_retry(
                    "db_etl_upload",
                    retry_options=retry_options,
                    input_=payload)
                tasks.append(task)

    _ = yield context.task_all(tasks)
    context.set_custom_status("Upload to database is complete.")

    if category != "main":
        # Categories other than main may have DB level processes. These
        # need to be performed before stats and graphs are generated.
        # Processes for stats and graphs are therefore moved to chunk
        # processor.
        context.set_custom_status(
            "Chunk deployment is done. Remaining processes are skipped.")
        return f"DONE: {trigger_payload}"

    settings_task = context.call_activity_with_retry(
        'db_etl_update_db',
        input_=dict(date=f"{now:%Y-%m-%d}",
                    process_name=process_name,
                    environment=trigger_data.get('environment', "PRODUCTION")),
        retry_options=retry_options)

    graphs_task = context.call_activity_with_retry('db_etl_homepage_graphs',
                                                   input_=dict(
                                                       date=f"{now:%Y-%m-%d}",
                                                       category=category),
                                                   retry_options=retry_options)

    _ = yield context.task_all([settings_task, graphs_task])

    context.set_custom_status("Metadata updated / graphs created.")

    return f"DONE: {trigger_payload}"
def main(context: DurableOrchestrationContext):
    retry_twice_opts = RetryOptions(first_retry_interval_in_milliseconds=5_000,
                                    max_number_of_attempts=2)

    timestamp = context.current_utc_datetime
    trigger_payload = loads(context.get_input())

    logging.info(f"triggered with payload: {trigger_payload}")

    # ------------------------------------------------------------------------------------
    # Retrieve blob paths
    # ------------------------------------------------------------------------------------
    context.set_custom_status("Retrieving artefacts")
    logging.info("retrieving artefacts")

    task_artefacts = list()

    for task_manifest in housekeeping_tasks:
        logging.info(f"submitting '{task_manifest['label']}' to retriever")

        artefacts = context.call_activity_with_retry(
            "housekeeping_retriever",
            input_=RetrieverPayload(timestamp=timestamp.isoformat(),
                                    environment=trigger_payload['environment'],
                                    manifest=task_manifest),
            retry_options=retry_twice_opts)

        task_artefacts.append(artefacts)

    logging.info("awaiting retriever tasks")
    retrieved_artefacts = yield context.task_all(task_artefacts)

    # ------------------------------------------------------------------------------------
    # Submit for archiving
    # ------------------------------------------------------------------------------------
    context.set_custom_status("Submitting candidates to the archiver")
    logging.info("submitting candidates to the archiver")

    archive_modes = [ProcessMode.ARCHIVE_AND_DISPOSE, ProcessMode.ARCHIVE_ONLY]
    activities = list()

    for task in chain(*retrieved_artefacts):
        logging.info(f"submitting '{task['manifest']['label']}' to archiver")

        if task["manifest"]["mode"] not in archive_modes:
            logging.info("-- not archived")
            continue

        activity = context.call_activity_with_retry(
            "housekeeping_archiver",
            input_=task,
            retry_options=retry_twice_opts)
        activities.append(activity)

    logging.info("awaiting archiver tasks")
    archived_artefacts = yield context.task_all(activities)

    # ------------------------------------------------------------------------------------
    # Dispose of archived blobs
    # ------------------------------------------------------------------------------------
    context.set_custom_status("Removing archived data")
    logging.info("removing archived data")

    disposable_only = filter(
        lambda t: t['manifest']['mode'] == ProcessMode.DISPOSE_ONLY,
        chain(*retrieved_artefacts))

    disposal_modes = [
        ProcessMode.ARCHIVE_AND_DISPOSE, ProcessMode.DISPOSE_ONLY
    ]
    activities = list()

    for task in chain(archived_artefacts, disposable_only):
        logging.info(f"submitting '{task['manifest']['label']}' to disposer")

        if task["manifest"]["mode"] not in disposal_modes:
            logging.info("-- not disposed")
            continue

        activity = context.call_activity_with_retry(
            "housekeeping_disposer",
            input_=task,
            retry_options=retry_twice_opts)
        activities.append(activity)

    logging.info("awaiting disposer tasks")
    report = yield context.task_all(activities)

    # ------------------------------------------------------------------------------------

    context.set_custom_status(
        f"ALL DONE - processed {report['total_processed']} artefacts")

    return f"DONE - {timestamp.isoformat()}"
def main(context: DurableOrchestrationContext):
    logging.info(f"Main ETL orchestrator has been triggered")

    default_retry_opts = RetryOptions(
        first_retry_interval_in_milliseconds=5_000,
        max_number_of_attempts=6
    )

    retry_twice_opts = RetryOptions(
        first_retry_interval_in_milliseconds=5_000,
        max_number_of_attempts=2
    )

    trigger_payload: str = context.get_input()
    logging.info(f"\tTrigger received: {trigger_payload}")

    trigger_data = loads(trigger_payload)
    environment = trigger_data.get("ENVIRONMENT", ENVIRONMENT)

    file_name: str = trigger_data['fileName']

    raw_timestamp = trigger_data.get('timestamp', context.current_utc_datetime.isoformat())[:26]
    logging.info(f"Process timestamp: {raw_timestamp}")

    file_date_raw, _ = raw_timestamp.split("T")
    file_date = datetime.strptime(file_date_raw, "%Y-%m-%d")
    now = context.current_utc_datetime
    now = datetime(
        year=file_date.year,
        month=file_date.month,
        day=file_date.day,
        hour=now.hour,
        minute=now.minute,
        second=now.second,
        microsecond=now.microsecond
    )

    # Prod registration is done through `register_file`.
    if not context.is_replaying and environment != "PRODUCTION":
        logging.info(f"Not replaying - registering '{file_name}'")
        register_file(filepath=file_name, timestamp=now, instance_id=context.instance_id)

    if not file_name.endswith("json"):
        context.set_custom_status(f"Identified as non-JSON: {file_name}.")
        _ = yield context.call_sub_orchestrator_with_retry(
            "chunk_etl_orchestrator",
            input_=dumps({
                "fileName": file_name,
                "environment": environment,
            }),
            retry_options=retry_twice_opts
        )

        return f"DONE: {trigger_data}"

    logging.info("Following the main data pathway.")

    # Determine whether or not the payload is for
    # processing legacy data.
    # NOTE: Legacy data do not get:
    #       - deployed to the database,
    #       - archived,
    #       - a new despatch token.
    is_legacy = trigger_data.get("legacy", False)
    logging.info(f"> Legacy mode: {is_legacy}")

    # Generate retrieve payload
    retrieve_payload = {
        'data_path': file_name,
        'timestamp':  f"{raw_timestamp:0<26}",
        'legacy': is_legacy
    }
    logging.info(
        f'\tTrigger payload parsed - '
        f'processing "{retrieve_payload["data_path"]}" @ '
        f'"{retrieve_payload["timestamp"]}"'
    )

    # Read file and split into chunks by area type
    logging.info(f'\tStarting the process to retrieve new data')
    area_data_paths = yield context.call_activity_with_retry(
        "main_etl_retrieve_data",
        input_=retrieve_payload,
        retry_options=retry_twice_opts
    )
    logging.info(f'\tDOWNLOAD COMPLETE')
    context.set_custom_status("Data file has been parsed.")

    # Process chunks
    logging.info(f'Starting the main process')

    tasks = list()
    for data_path in area_data_paths:
        task = context.call_activity_with_retry(
            "main_etl_processor",
            input_=dict(
                data_path=data_path,
                timestamp=retrieve_payload['timestamp'] + "5Z",
                environment=environment
            ),
            retry_options=default_retry_opts
        )
        tasks.append(task)

    # Await processes
    etl_response = yield context.task_all(tasks)
    logging.info(f'>>> ALL MAIN ETL PROCESSES COMPLETE - length: {len(etl_response)}')
    context.set_custom_status("Main ETL processes are done. Creating box plot.")

    if is_legacy is True:
        context.set_custom_status("Legacy file detected.")
        return f"DONE: {context.current_utc_datetime}"

    _ = yield context.call_activity_with_retry(
        "chunk_etl_postprocessing",
        input_={
            "timestamp": now.isoformat(),
            "environment": environment,
            "category": "main"
        },
        retry_options=retry_twice_opts
    )

    context.set_custom_status("Deploying to the DB.")

    _ = yield context.call_sub_orchestrator_with_retry(
        "db_etl_orchestrator",
        input_=dumps({
            "datestamp": now.isoformat(),
            "environment": ENVIRONMENT,
            "main_data_path": file_name
        }),
        retry_options=retry_twice_opts
    )

    context.set_custom_status("Submitting main postprocessing tasks")
    _ = yield context.call_activity_with_retry(
        "main_etl_postprocessors",
        input_=dict(
            original_path=retrieve_payload['data_path'],
            timestamp=raw_timestamp,
            environment=environment
        ),
        retry_options=retry_twice_opts
    )
    logging.info("Done with latest main_etl_postprocessors.")

    # ====================================================================================

    tasks = list()

    # Retrieve scales
    context.set_custom_status("Requesting latest scale records.")

    area_types = ["nation", "region", "utla", "ltla", "msoa"]

    for area_type in area_types:
        task = context.call_activity_with_retry(
            "rate_scales_worker",
            retry_options=retry_twice_opts,
            input_={
                "type": "RETRIEVE",
                "timestamp": raw_timestamp,
                "area_type": area_type
            }
        )
        tasks.append(task)

    raw_scale_records = yield context.task_all(tasks)
    logging.info("Received latest scale records.")

    # ------------------------------------------------------------------------------------

    context.set_custom_status("Creating post deployment tasks")

    # Concatenate and archive processed data
    archive_response = context.call_activity_with_retry(
        "main_etl_archiver",
        input_=dict(
            results=etl_response,
            original_path=retrieve_payload['data_path'],
            timestamp=retrieve_payload['timestamp'] + "5Z",
            environment=environment
        ),
        retry_options=retry_twice_opts
    )
    logging.info("Created jobs for `main_etl_archiver`")

    # ....................................................................................
    # Pre-populate cache

    populate_cache = context.call_activity_with_retry(
        "cache_prepopulate",
        input_=dict(
            timestamp=raw_timestamp,
            environment=environment
        ),
        retry_options=retry_twice_opts
    )
    logging.info("Created jobs for `cache_prepopulate`")

    # ....................................................................................

    # Send daily report email
    daily_report = context.call_activity(
        "main_etl_daily_report",
        input_=dict(
            legacy=is_legacy,
            timestamp=raw_timestamp,
            environment=environment
        )
    )
    logging.info("Created jobs for `main_etl_daily_report`")

    # ....................................................................................

    tasks = [
        daily_report,
        archive_response,
        populate_cache
    ]

    # ....................................................................................
    # Generate rate scales

    for item in raw_scale_records:
        for record in item['records']:
            task = context.call_activity_with_retry(
                "rate_scales_worker",
                retry_options=retry_twice_opts,
                input_={
                    "type": "GENERATE",
                    "date": file_date_raw,
                    "timestamp": item["timestamp"],
                    "area_type": record['area_type'],
                    "area_code": record['area_code'],
                    "rate": record['rate'],
                    "percentiles": item['percentiles'],
                }
            )
            tasks.append(task)

    logging.info("Created jobs for `rate_scales_worker`")
    # ....................................................................................

    context.set_custom_status("Submitting post deployment tasks")
    _ = yield context.task_all(tasks)
    context.set_custom_status("ALL done.")

    return f"DONE: {trigger_data}"
def main(context: DurableOrchestrationContext):
    logging.info(f"Chunk ETL orchestrator has been triggered")

    default_retry_opts = RetryOptions(
        first_retry_interval_in_milliseconds=5_000, max_number_of_attempts=6)

    retry_twice_opts = RetryOptions(first_retry_interval_in_milliseconds=5_000,
                                    max_number_of_attempts=2)

    trigger_payload: str = context.get_input()
    logging.info(f"\tTrigger received: {trigger_payload}")

    trigger_data = loads(trigger_payload)

    file_name = trigger_data["fileName"]

    metadata = parse_filepath(file_name)

    if metadata is None:
        # Path pattern does not conform
        # to the defined pattern.
        context.set_custom_status(
            "File name cannot be parsed. Process terminated.")
        return f"DONE: {trigger_data}"

    now = context.current_utc_datetime
    timestamp_raw = (datetime.strptime(
        f'{metadata["timestamp"]}{now:%S}.{now:%f}', "%Y%m%d%H%M%S.%f"))
    timestamp = timestamp_raw.isoformat()

    main_path = trigger_data['fileName']
    if main_path.endswith("json"):
        process_name = "MAIN"
    else:
        process_name = category_label(metadata)

    msg = (
        f'Starting to upload pre-processed data: '
        f'{metadata["area_type"]}::{metadata["category"]}::{metadata["subcategory"]}'
    )

    if (metadata["area_type"],
            metadata["category"]) == ("MSOA",
                                      "vaccinations-by-vaccination-date"):
        logging.info(msg)
        context.set_custom_status(msg)

        process_name = "MSOA: VACCINATION - EVENT DATE"

        _ = yield context.call_activity_with_retry(
            "chunk_db_direct",
            input_={
                'data_path': file_name,
                'area_type': metadata["area_type"],
                'timestamp': timestamp,
                'process_name': process_name
            },
            retry_options=retry_twice_opts)
        logging.info(f"DONE: {msg}")
        context.set_custom_status(f"DONE: {msg}")

    elif (metadata["area_type"],
          metadata["category"]) == ("MSOA", "cases-by-specimen-date"):
        logging.info(msg)
        context.set_custom_status(msg)

        process_name = "MSOA"

        _ = yield context.call_sub_orchestrator_with_retry(
            "msoa_etl_orchestrator",
            input_=dumps({
                'data_path': file_name,
                'area_type': metadata["area_type"],
                'timestamp': timestamp,
                'process_name': process_name,
                'main_data_path': file_name
            }),
            retry_options=retry_twice_opts)
        logging.info(f"DONE: {msg}")
        context.set_custom_status(f"DONE: {msg}")

    else:
        # Read file and split into chunks
        # by area type / area code combination.
        logging.info(f'\tStarting the process to retrieve new data')
        context.set_custom_status("Parsing the payload")
        area_data_paths = yield context.call_activity_with_retry(
            "chunk_etl_retriever",
            input_={
                'path': file_name,
                'date': metadata["date"],
                'area_type': metadata["area_type"],
                'category': metadata["category"],
                'subcategory': metadata["subcategory"],
                'timestamp': timestamp
            },
            retry_options=retry_twice_opts)
        logging.info(f'\tDOWNLOAD COMPLETE')
        context.set_custom_status("Payload has been parsed")

        # Process chunks
        logging.info(f'Starting the main process')

        tasks = list()
        context.set_custom_status("Submitting main ETL processes")

        # Create ETL tasks based on the paths
        # returned by `chunk_etl_retriever`.
        for item in area_data_paths:
            data_path = item.pop("path")
            task = context.call_activity_with_retry(
                "chunk_etl_processor",
                input_=dict(base=dict(data_path=data_path,
                                      timestamp=timestamp,
                                      environment="PRODUCTION"),
                            **item),
                retry_options=default_retry_opts)
            tasks.append(task)

        context.set_custom_status("Awaiting ETL processes")
        # Await processes
        etl_response = yield context.task_all(tasks)
        logging.info(
            f'>>> ALL MAIN ETL PROCESSES COMPLETE - length: {len(etl_response)}'
        )

        chunks_path = f"daily_chunks/{metadata['category']}/{metadata['date']}/"
        if metadata['subcategory']:
            chunks_path = f"daily_chunks/{metadata['category']}/{metadata['subcategory']}/{metadata['date']}/"

        # Deploy processed data to the DB.
        context.set_custom_status(f"Deploying to the database: {chunks_path}")

        _ = yield context.call_sub_orchestrator_with_retry(
            "db_etl_orchestrator",
            input_=dumps({
                "datestamp": metadata['date'],
                "path": chunks_path,
                "environment": "PRODUCTION",
                "area_type": metadata['area_type'],
                "category": metadata['category'],
                "subcategory": metadata['subcategory'],
                "main_data_path": file_name
            }),
            retry_options=retry_twice_opts)

    context.set_custom_status("Postprocessing")
    _ = yield context.call_activity_with_retry(
        "chunk_etl_postprocessing",
        input_={
            "timestamp":
            timestamp,
            "environment":
            "PRODUCTION",
            "category":
            metadata['category'],
            "subcategory":
            metadata['subcategory'] if metadata['subcategory'] != "" else None,
            "area_type":
            metadata['area_type'] if metadata['area_type'] != "" else None
        },
        retry_options=retry_twice_opts)
    context.set_custom_status(
        "Deployment to the DB is complete, submitting postprocessing tasks.")

    settings_task = context.call_activity_with_retry(
        'db_etl_update_db',
        input_=dict(date=f"{timestamp_raw:%Y-%m-%d}",
                    process_name=process_name,
                    environment=trigger_data['environment']),
        retry_options=retry_twice_opts)

    context.set_custom_status("Submitting main postprocessing tasks")
    post_processes = context.call_activity_with_retry(
        "main_etl_postprocessors",
        input_=dict(original_path=file_name,
                    timestamp=timestamp,
                    environment=trigger_data['environment']),
        retry_options=retry_twice_opts)

    graphs_task = context.call_activity_with_retry(
        'db_etl_homepage_graphs',
        input_=dict(date=f"{timestamp_raw:%Y-%m-%d}",
                    category=metadata['category'],
                    subcategory=metadata['subcategory']),
        retry_options=retry_twice_opts)

    _ = yield context.task_all([graphs_task, settings_task, post_processes])

    context.set_custom_status(
        "Metadata updated / graphs created / main postprocessing tasks complete. ALL DONE."
    )

    return f"DONE: {trigger_data}"
def main(context: DurableOrchestrationContext):
    logging.info(f"Despatch ops orchestrator has been triggered")

    trigger_payload: str = context.get_input()
    logging.info(f"\tTrigger received: {trigger_payload}")

    retry_options = RetryOptions(
        first_retry_interval_in_milliseconds=5_000,
        max_number_of_attempts=5
    )

    trigger_data = loads(trigger_payload)

    devices = [Device.desktop, Device.mobile]
    area_types = ["utla", "ltla", "msoa"]

    tasks = list()
    for area_type, device in product(area_types, devices):
        task = context.call_activity_with_retry(
            "despatch_ops_workers",
            retry_options=retry_options,
            input_={
                "handler": "map_geojson",
                "payload": {
                    "area_type": area_type,
                    "device": device,
                    "timestamp": trigger_data["timestamp"]
                }
            }
        )

        tasks.append(task)

    task = context.call_activity_with_retry(
        "despatch_ops_workers",
        retry_options=retry_options,
        input_={
            "handler": "vax_map_geojson",
            "payload": {"timestamp": trigger_data["timestamp"]}
        }
    )

    tasks.append(task)

    area_types = ["nation", "region", "utla", "ltla", "msoa"]
    for area_type in area_types:
        task = context.call_activity_with_retry(
            "despatch_ops_workers",
            retry_options=retry_options,
            input_={
                "handler": "map_percentiles",
                "payload": {
                    "area_type": area_type,
                    "timestamp": trigger_data["timestamp"]
                }
            }
        )

        tasks.append(task)

    task = context.call_activity_with_retry(
        "despatch_ops_workers",
        retry_options=retry_options,
        input_={
            "handler": "archive_dates",
            "payload": {
                "data_type": "MAIN",
                "timestamp": trigger_data["timestamp"]
            }}
    )
    tasks.append(task)

    task = context.call_activity_with_retry(
        "despatch_ops_workers",
        retry_options=retry_options,
        input_={
            "handler": "og_images",
            "payload": {"timestamp": trigger_data["timestamp"]}
        }
    )
    tasks.append(task)

    task = context.call_activity_with_retry(
        "despatch_ops_workers",
        retry_options=retry_options,
        input_={
            "handler": "sitemap",
            "payload": {"timestamp": trigger_data["timestamp"]}
        }
    )
    tasks.append(task)

    task = context.call_activity_with_retry(
        "despatch_ops_workers",
        retry_options=retry_options,
        input_={
            "handler": "landing_page_map",
            "payload": {"timestamp": trigger_data["timestamp"]}
        }
    )
    tasks.append(task)

    context.set_custom_status("All jobs created - submitting for execution.")
    _ = yield context.task_all(tasks)

    context.set_custom_status("All jobs complete - updating timestamps.")

    tasks = list()
    for item in ReleaseTimestamps:
        processor_fn = item["process"]
        value = processor_fn(trigger_data["releaseTimestamp"])

        task = context.call_activity_with_retry(
            "despatch_ops_release",
            retry_options=retry_options,
            input_={
                "path": item["path"],
                "container": item["container"],
                "value": value
            }
        )

        tasks.append(task)

    _ = yield context.task_all(tasks)
    context.set_custom_status("Timestamps updated - clearing Redis cache.")

    _ = yield context.call_sub_orchestrator_with_retry(
        'cache_buster_orchestrator',
        input_=dumps({
            "to": FLUSH_DESPATCH,
            "timestamp": trigger_data.get('timestamp')
        }),
        retry_options=retry_options
    )

    context.set_custom_status(f"ALL DONE: {trigger_payload}")

    return f"ALL DONE: {trigger_payload}"