Ejemplo n.º 1
0
def lambda_handler(event, context):
    payload = ProcessPayload.from_event(event)
    logger = get_task_logger("task.publish", payload=payload)

    config = payload.get_task('publish', {})
    public = config.get('public', False)
    # additional SNS topics to publish to
    topics = config.get('sns', [])

    # these are the URLs to the canonical records on s3
    s3urls = []

    try:
        logger.debug("Publishing items to s3 and SNS")

        if API_URL is not None:
            link = {
                'title': payload['id'],
                'rel': 'via-cirrus',
                'href': f"{API_URL}/catid/{payload['id']}"
            }
            logger.debug(json.dumps(link))
            # add cirrus-source relation
            for item in payload['features']:
                item['links'].append(link)

        # publish to s3
        s3urls = payload.publish_items_to_s3(DATA_BUCKET, public=public)

        # publish to Cirrus SNS publish topic
        payload.publish_items_to_sns()

        # Deprecated additional topics
        if PUBLISH_TOPICS:
            for topic in PUBLISH_TOPICS.split(','):
                payload.publish_items_to_sns(topic)

        for topic in topics:
            payload.publish_items_to_sns(topic)
    except Exception as err:
        msg = f"publish: failed publishing output items ({err})"
        logger.error(msg, exc_info=True)
        raise Exception(msg) from err

    try:
        # update job outputs in table
        statedb.set_outputs(payload['id'], outputs=s3urls)
    except Exception as err:
        msg = f"publish: failed setting statedb outputs ({err})"
        logger.error(msg, exc_info=True)
        raise Exception(msg) from err

    return payload
Ejemplo n.º 2
0
    def __init__(self, *args, update=False, state_item=None, **kwargs):
        """Initialize a ProcessPayload, verify required fields, and assign an ID

        Args:
            state_item (Dict, optional): Dictionary of entry in StateDB. Defaults to None.
        """
        super().__init__(*args, **kwargs)

        self.logger = get_task_logger(__name__, payload=self)

        # validate process block
        # TODO: assert isn't safe for this use if debug is off
        assert (self['type'] == 'FeatureCollection')
        assert ('process' in self)

        self.process = (self['process'][0] if isinstance(
            self['process'], list) else self['process'])

        if update:
            self.update()

        assert ('output_options' in self.process)
        assert ('workflow' in self.process)

        # convert old functions field to tasks
        if 'functions' in self.process:
            self.logger.warning(
                "Deprecated: process 'functions' has been renamed to 'tasks'")
            self.process['tasks'] = self.process.pop('functions')

        assert ('tasks' in self.process)
        self.tasks = self.process['tasks']

        assert ('workflow-' in self['id'])

        # TODO - validate with a JSON schema
        #if schema:
        #    pass
        # For now, just make check that there is at least one item
        assert (len(self['features']) > 0)
        for item in self['features']:
            if 'links' not in item:
                item['links'] = []

        # update collection IDs of member Items
        self.assign_collections()

        self.state_item = state_item
Ejemplo n.º 3
0
def lambda_handler(event, context):
    payload = ProcessPayload.from_event(event)
    logger = get_task_logger("task.pre-batch", payload=payload)

    url = f"s3://{PAYLOAD_BUCKET}/batch/{payload['id']}/{uuid.uuid1()}.json"

    try:
        # copy payload to s3
        s3().upload_json(payload, url)

        logger.debug(f"Uploaded payload to {url}")
        return {'url': url}
    except Exception as err:
        msg = f"pre-batch: failed pre processing batch job for ({err})"
        logger.error(msg, exc_info=True)
        raise Exception(msg) from err
Ejemplo n.º 4
0
def lambda_handler(event, context={}):
    payload = ProcessPayload.from_event(event)
    logger = get_task_logger("task.copy-assets", payload=payload)

    # TODO - make this more general for more items/collections
    item = payload['features'][0]  # collection=payload['collections'][0])

    # configuration options
    config = payload.get_task('copy-assets', {})
    outopts = payload.process.get('output_options', {})

    # asset config
    assets = config.get('assets', item['assets'].keys())
    drop_assets = config.get('drop_assets', [])
    # drop specified assets
    for asset in [a for a in drop_assets if a in item['assets'].keys()]:
        logger.debug(f'Dropping asset {asset}')
        item['assets'].pop(asset)
    if type(assets) is str and assets == 'ALL':
        assets = item['assets'].keys()

    # create temporary work directory
    tmpdir = mkdtemp()

    try:
        # copy specified assets
        _assets = [a for a in assets if a in item['assets'].keys()]

        for asset in _assets:
            item = download_item_assets(item, path=tmpdir, assets=[asset])

            item = upload_item_assets(item, assets=[asset], **outopts)

        # replace item in payload
        payload['features'][0] = item
    except Exception as err:
        msg = f"copy-assets: failed processing {payload['id']} ({err})"
        logger.error(msg, exc_info=True)
        raise Exception(msg) from err
    finally:
        # remove work directory....very important for Lambdas!
        logger.debug('Removing work directory %s' % tmpdir)
        rmtree(tmpdir)

    return payload
Ejemplo n.º 5
0
import re
import json

import boto3

from cirrus.lib.process_payload import ProcessPayload
from cirrus.lib.logging import get_task_logger

logger = get_task_logger('lambda_function.update-state', payload=tuple())

BATCH_LOG_GROUP = '/aws/batch/job'
LOG_CLIENT = boto3.client('logs')
DEFAULT_ERROR = 'UnknownError'
ERROR_REGEX = re.compile(r'^(?:cirrus\.?lib\.errors\.)?(?:([\.\w]+):)?\s*(.*)')


def lambda_handler(event, context):
    if 'error' not in event:
        return ProcessPayload.from_event(event)

    error = event.get('error', {})
    cause = json.loads(error['Cause'])
    logname = cause['Attempts'][-1]['Container']['LogStreamName']

    try:
        error_type, error_msg = get_error_from_batch(logname)
    except Exception:
        # lambda does not currently support exeception chaining,
        # so we have to log the original exception separately
        logger.exception("Original exception:")
        raise Exception("Unable to get error log")
Ejemplo n.º 6
0
import json
import os

from cirrus.lib.process_payload import ProcessPayload, ProcessPayloads
from cirrus.lib.utils import dict_merge
from cirrus.lib.logging import get_task_logger

logger = get_task_logger('lambda_function.process', payload=tuple())

# Default PROCESSES
# TODO: put this configuration into the cirrus.yml
with open(os.path.join(os.path.dirname(__file__), 'processes.json')) as f:
    PROCESSES = json.loads(f.read())


def lambda_handler(event, context):
    logger.debug(json.dumps(event))

    # Read SQS event
    if 'Records' not in event:
        raise ValueError("Input not from SQS")

    # TODO: a large number of input collections will cause a timeout
    # find a way to process each input message, deleting it from the queue
    # any not processed before timeout will be retried on the next execution
    payloads = []
    for record in [json.loads(r['body']) for r in event['Records']]:
        payload = json.loads(record['Message'])
        logger.debug('payload: %s', json.dumps(payload))
        # expand payload_ids to full payloads
        if 'payload_ids' in payload:
Ejemplo n.º 7
0
def lambda_handler(event, context={}):
    payload = ProcessPayload.from_event(event)
    logger = get_task_logger("task.add-preview", payload=payload)

    # get step configuration
    config = payload.get_task('add-preview', {})
    outopts = payload.process.get('output_options', {})
    assets = config.pop('assets', None)
    thumb = config.pop('thumbnail', False)
    config.pop('batch')

    if assets is None:
        msg = "add-preview: no asset specified for preview"
        logger.error(msg)
        raise Exception(msg)

    # create temporary work directory
    tmpdir = tempfile.mkdtemp()
    items = []
    for item in payload['features']:
        # find asset to use for preview
        asset = None
        for a in assets:
            if a in item['assets']:
                asset = a
                break
        if asset is None:
            msg = "add-preview: no available asset for preview"
            logger.warning(msg)
            items.append(item)
            continue

        try:
            # keep original href
            href = item['assets'][asset]['href']

            # download asset
            item = download_item_assets(item, path=tmpdir, assets=[asset])
            filename = item['assets'][asset]['href']

            # add preview to item
            item['assets']['preview'] = create_preview(filename, logger,
                                                       **config)
            if thumb:
                # add thumbnail to item
                item['assets']['thumbnail'] = create_thumbnail(
                    item['assets']['preview']['href'], logger)

            # put back original href
            item['assets'][asset]['href'] = href

            # upload these new assets
            item = upload_item_assets(item,
                                      assets=['preview', 'thumbnail'],
                                      **outopts)
            items.append(item)
        except Exception as err:
            msg = f"add-preview: failed creating preview/thumbnail ({err})"
            logger.error(msg, exc_info=True)
            # remove work directory....very important for Lambdas!
            shutil.rmtree(tmpdir)
            raise Exception(msg) from err

    # remove work directory....very important for Lambdas!
    shutil.rmtree(tmpdir)

    # return new items
    payload['features'] = items
    return payload
Ejemplo n.º 8
0
def lambda_handler(event, context={}):
    payload = ProcessPayload.from_event(event)
    logger = get_task_logger("task.convert-to-cog", payload=payload)

    # TODO - make this more general for more items/collections
    item = payload['features'][0]  # collection=payload['collections'][0])

    # configuration options
    config = payload.get_task('convert-to-cog', {})
    outopts = payload.process.get('output_options', {})
    assets = config.get('assets')

    # create temporary work directory
    tmpdir = mkdtemp()

    try:
        asset_keys = [a for a in assets if a in item['assets'].keys()]

        for asset in asset_keys:
            logger.info(f"Converting {asset} to COG")
            # download asset
            item = download_item_assets(item, path=tmpdir, assets=[asset])

            # cogify
            fn = item['assets'][asset]['href']
            fnout = cogify(fn, os.path.splitext(fn)[0] + '.tif', **assets[asset])
            item['assets'][asset]['href'] = fnout
            item['assets'][asset]['type'] = "image/tiff; application=geotiff; profile=cloud-optimized"
            with rasterio.open(fnout) as src:
                item['assets'][asset]['proj:shape'] = src.shape
                item['assets'][asset]['proj:transform'] = src.transform

            # upload assets
            item = upload_item_assets(item, assets=[asset], **outopts)
            # cleanup files
            if os.path.exists(fn):
                os.remove(fn)
            if os.path.exists(fnout):
                os.remove(fnout)

        # add derived_from link
        links = [link['href'] for link in item['links'] if link['rel'] == 'self']
        if len(links) == 1:
            # add derived from link
            item['links'].append({
                'title': 'Source STAC Item',
                'rel': 'derived_from',
                'href': links[0],
                'type': 'application/json'
            })

        # drop any specified assets
        for asset in [a for a in config.get('drop_assets', []) if a in item['assets'].keys()]:
            item['assets'].pop(asset)

        payload['features'][0] = item
    except CRSError as err:
        msg = f"convert-to-cog: invalid CRS ({err})"
        logger.error(msg, exc_info=True)
        raise InvalidInput(msg)
    except s3_sessions[list(s3_sessions)[0]].s3.exceptions.NoSuchKey as err:
        msg = f"convert-to-cog: failed fetching {asset} asset ({err})"
        logger.error(msg, exc_info=True)
        raise InvalidInput(msg)
    except Exception as err:
        msg = f"convert-to-cog: failed creating COGs ({err})"
        logger.error(msg, exc_info=True)
        raise Exception(msg)
    finally:
        # remove work directory....very important for Lambdas!
        logger.debug('Removing work directory %s' % tmpdir)
        rmtree(tmpdir)

    return payload