def test_other_error(self): def other_error_fn(event, context): raise Exception('SomeError') test_event = create_event() context = LambdaContextMock() try: run_cumulus_task(other_error_fn, test_event, context) except Exception as exception: name = exception.args[0] self.assertEqual(name, 'SomeError')
def handler(event: Dict[str, Union[List[str], Dict]], context: object) -> Any: """Lambda handler. Runs a cumulus task that copies the files in {event}['input'] from the collection specified in {config} to the {config}'s 'glacier' bucket. Args: event: Event passed into the step from the aws workflow. A dict with the following keys: input (dict): Dictionary with the followig keys: granules (List): List of granule objects (dictionaries) config (dict): A dict with the following keys: collection (dict): The collection from AWS. See https://nasa.github.io/cumulus/docs/data-cookbooks/sips-workflow A dict with the following keys: name (str): The name of the collection. Used when generating the default value for {event}[config][fileStagingDir]. version (str): The version of the collection. Used when generating the default value for {event}[config][fileStagingDir]. files (list[Dict]): A list of dicts representing file types within the collection. The first file where the file's ['regex'] matches the filename from the input Is used to identify the bucket referenced in return's['granules'][filename]['files']['bucket'] Each dict contains the following keys: regex (str): The regex that all files in the bucket must match with their name. bucket (str): The name of the bucket containing the files. url_path (str): Used when calling {copy_granule_between_buckets} as a part of the destination_key. buckets (dict): A dict with the following keys: glacier (dict): A dict with the following keys: name (str): The name of the bucket to copy to. context: An object required by AWS Lambda. Unused. Returns: The result of the cumulus task. """ return run_cumulus_task(task, event, context)
def test_simple_handler_without_context(self): def handler_fn(event, context): return event test_event = create_event() response = run_cumulus_task(handler_fn, test_event) self.assertIsNotNone(response) self.assertEqual(response['cumulus_meta']['task'], 'Example') self.assertEqual(response['payload']['input']['anykey'], 'anyvalue')
def test_workflow_error(self): def workflow_error_fn(event, context): raise Exception('SomeWorkflowError') test_event = create_event() context = LambdaContextMock() response = run_cumulus_task(workflow_error_fn, test_event, context) self.assertIsNone(response['payload']) self.assertEqual(response['exception'], 'SomeWorkflowError')
def handler(event, context): #pylint: disable-msg=unused-argument """Lambda handler. Initiates a restore_object request from glacier for each file of a granule. Note that this function is set up to accept a list of granules, (because Cumulus sends a list), but at this time, only 1 granule will be accepted. This is due to the error handling. If the restore request for any file for a granule fails to submit, the entire granule (workflow) fails. If more than one granule were accepted, and a failure occured, at present, it would fail all of them. Environment variables can be set to override how many days to keep the restored files, how many times to retry a restore_request, and how long to wait between retries. Environment Vars: RESTORE_EXPIRE_DAYS (number, optional, default = 5): The number of days the restored file will be accessible in the S3 bucket before it expires. RESTORE_REQUEST_RETRIES (number, optional, default = 3): The number of attempts to retry a restore_request that failed to submit. RESTORE_RETRY_SLEEP_SECS (number, optional, default = 0): The number of seconds to sleep between retry attempts. RESTORE_RETRIEVAL_TYPE (string, optional, default = 'Standard'): the Tier for the restore request. Valid valuesare 'Standard'|'Bulk'|'Expedited'. DATABASE_PORT (string): the database port. The standard is 5432. DATABASE_NAME (string): the name of the database. DATABASE_USER (string): the name of the application user. Parameter Store: drdb-user-pass (string): the password for the application user (DATABASE_USER). drdb-host (string): the database host Args: event (dict): A dict with the following keys: glacierBucket (string) : The name of the glacier bucket from which the files will be restored. granules (list(dict)): A list of dict with the following keys: granuleId (string): The id of the granule being restored. keys (list(string)): list of keys (glacier keys) for the granule Example: event: {'glacierBucket': 'some_bucket', 'granules': [{'granuleId': 'granxyz', 'keys': ['path1', 'path2']}] } context (Object): None Returns: dict: The dict returned from the task. All 'success' values will be True. If they were not all True, the RestoreRequestError exception would be raised. Raises: RestoreRequestError: An error occurred calling restore_object for one or more files. The same dict that is returned for a successful granule restore, will be included in the message, with 'success' = False for the files for which the restore request failed to submit. """ LOGGER.setMetadata(event, context) return run_cumulus_task(task, event, context)
def test_simple_handler(self): def handler_fn(event, context): return event test_event = create_event() context = LambdaContextMock() response = run_cumulus_task(handler_fn, test_event, context) self.assertIsNotNone(response) self.assertEqual(response['cumulus_meta']['task'], 'Example') # payload includes the entire event self.assertEqual(response['payload']['input']['anykey'], 'anyvalue')
def test_message_adapter_disabled(self): def disabled_adapter_handler_fn(event, context): return {"message": "hello"} os.environ['CUMULUS_MESSAGE_ADAPTER_DISABLED'] = 'true' test_event = create_event() context = LambdaContextMock() response = run_cumulus_task(disabled_adapter_handler_fn, test_event, context) self.assertEqual(response["message"], "hello")
def test_task_function_with_additional_arguments(self): taskargs = {"taskArgOne": "one", "taskArgTwo": "two"} def handler_fn(event, context, taskArgOne, taskArgTwo): self.assertEqual(taskArgOne, taskargs['taskArgOne']) self.assertEqual(taskArgTwo, taskargs['taskArgTwo']) return event test_event = create_event() response = run_cumulus_task(handler_fn, test_event, **taskargs) self.assertIsNotNone(response) self.assertEqual(response['cumulus_meta']['task'], 'Example') self.assertEqual(response['payload']['input']['anykey'], 'anyvalue')
def lambda_handler(event, context): """AWS Lambda Function entrypoint Parameters ---------- event: dict, required Lambda trigger event context: object, required Lambda Context runtime methods and attributes Context doc: https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html """ return run_cumulus_task(process_event, event, context)
def handler(event, context): #pylint: disable-msg=unused-argument """Lambda handler. Extracts the key's for a granule from an input dict. Args: event (dict): A dict with the following keys: granules (list(dict)): A list of dict with the following keys: granuleId (string): The id of a granule. files (list(dict)): list of dict with the following keys: key (string): The key of the file to be returned. other dictionary keys may be included, but are not used. other dictionary keys may be included, but are not used. Example: event: {'granules': [ {'granuleId': 'granxyz', 'version": '006', 'files': [ {'name': 'file1', 'key': 'key1', 'filename': 's3://dr-test-sandbox-protected/file1', 'type': 'metadata'} ] } ] } context (Object): None Returns: dict: A dict with the following keys: 'granules' (list(dict)): list of dict with the following keys: 'granuleId' (string): The id of a granule. 'keys' (list(string)): list of keys for the granule. Example: {"granules": [{"granuleId": "granxyz", "keys": ["key1", "key2"]}]} Raises: ExtractFilePathsError: An error occurred parsing the input. """ LOGGER.setMetadata(event, context) result = run_cumulus_task(task, event, context) return result
def handler(event, context): """handler that is provided to aws lambda""" # make sure event & context metadata is set in the logger logger.setMetadata(event, context) return run_cumulus_task(task, event, context, schemas)
def handler(function, event, context): """handler that is provided to aws lambda""" return run_cumulus_task(function, event, context, {})
def handler(event, context): if run_cumulus_task: return run_cumulus_task(lambda_handler, event, context) else: return []
def handler(event, context): return run_cumulus_task(task, event, context)
def cumulus_handler(cls, event, context=None): """ General event handler using Cumulus messaging (cumulus-message-adapter) """ return run_cumulus_task(cls.handler, event, context)
def handler(event, _context): """ Handler wrapper to run task through the CMA `run_cumulus_task` method """ return run_cumulus_task(task, event, _context)