def run_scheduler_with_rule_event():
    print_verbose(
        "Running Scheduler by sending simulated CloudWatch rule event")

    event = {
        "source":
        "aws.events",
        "resources": [
            "arn:aws:events:region:000000000000:rule/{0}".format(
                "OpsAutomatorRule-" + os.getenv(handlers.ENV_STACK_NAME))
        ]
    }

    print_verbose("Event is {}", safe_json(event, indent=3))

    lambda_handler(event, used_context)
 def test_empty_sources(self, mock_is_user, mock_ldap, mock_source, mock_msg_publisher):
   main.sources = []
   isUser = MagicMock()
   ldap = MagicMock()
   source = MagicMock()
   msg_publisher = MagicMock()
   mock_is_user.return_value = isUser
   mock_ldap.return_value = ldap
   mock_source.return_value = source
   mock_msg_publisher.return_value = msg_publisher
   event = {'dryrun': False}
   lambda_handler(event, None)
   mock_is_user.assert_not_called()
   mock_ldap.assert_called_once()
   mock_source.assert_not_called()
   mock_msg_publisher.assert_not_called()
Exemple #3
0
 def test_return_questions_no_category(self):
     """When we do not supply a category will want to recieve a warning"""
     event = {}
     event["queryStringParameters"] = {}
     context = {}
     response = lambda_handler(event, context)
     print(response)
     self.assertTrue("No Category Supplied" in response["body"])
def run_completion_handler():
    print_verbose(
        "Running Scheduler completion handling by sending simulated CloudWatch rule event"
    )

    event = {
        "source":
        "aws.events",
        "resources": [
            "arn:aws:events:region:123456789012:rule/{0}".format(
                os.getenv(handlers.ENV_COMPLETION_RULE))
        ]
    }

    print_verbose("Event is {}", safe_json(event, indent=3))

    lambda_handler(event, used_context)
Exemple #5
0
    def test_return_questions_category(self):
        """When we supply a category will we get questions returned in said category"""
        event = {}
        event["queryStringParameters"] = {"category": "dynamodb"}
        context = {}
        response = lambda_handler(event, context)

        self.assertTrue("dynamodb" in response["body"])
Exemple #6
0
    def test_return_questions(self):
        """Do we recieve json response. We want one please"""
        event = {}
        event["queryStringParameters"] = {}
        context = {}
        response = lambda_handler(event, context)

        self.assertTrue(
            "application/json" in response["headers"]["Content-Type"])
def test_simple_event():
    record = {
        'Records': [{
            'eventVersion': '2.1',
            'eventSource': 'aws:s3',
            'awsRegion': 'us-east-1',
            'eventTime': '2019-07-26T04:42:12.870Z',
            'eventName': 'ObjectCreated:Put',
            'userIdentity': {
                'principalId': 'AWS:AROATTCRBLUWT5FNY52QN:AWSFirehoseToS3'
            },
            'requestParameters': {
                'sourceIPAddress': '3.84.239.21'
            },
            'responseElements': {
                'x-amz-request-id':
                '9B9322B197D30E18',
                'x-amz-id-2':
                'Cth5SSzqWPMzHibJ25AgXZcMs+ojD3C4l4MUAbOfspKRD6vf0MJ+02taoZPpsBcRLaN+OVw+lJo='
            },
            's3': {
                's3SchemaVersion': '1.0',
                'configurationId': 'Call Log Parser',
                'bucket': {
                    'name': 'waf-autobot-waflogbucket-1uoczbjdq1nxf',
                    'ownerIdentity': {
                        'principalId': 'A34ROINPPVRDOV'
                    },
                    'arn':
                    'arn:aws:s3:::waf-autobot-waflogbucket-1uoczbjdq1nxf'
                },
                'object': {
                    'key':
                    'AWSLogs/2019/07/26/04/aws-waf-logs-wafautobot_BmsvvM-1-2019-07-26-04-37-11-11e9ddf7-c674-4963-973c-6bbdd35c67aa.gz',
                    'size': 710,
                    'eTag': '7a989b9af65e43ab6216fedfa95c5978',
                    'sequencer': '005D3A84A4C6F703F4'
                }
            }
        }]
    }
    lambda_handler(record, 1)
    def test_lambda_handler_makes_prediction(self):
        main.initialize(param_path)
        main.client.publish = MagicMock()

        event = {}
        event['filepath'] = './resources/img/blue_box_000001.jpg'
        response = main.lambda_handler(event, {})

        # Assert message published correctly
        self.assertEqual(response['prediction'][0][0], 0.0)
        main.client.publish.assert_called_with(topic='blog/infer/output', payload=json.dumps(response))
 def test_user_valid(self, mock_is_user, mock_ldap, mock_source, mock_msg_publisher):
   isUser = MagicMock()
   ldap = MagicMock()
   source = MagicMock()
   msg_publisher = MagicMock()
   main.sources = [source]
   mock_is_user.return_value = isUser
   mock_ldap.return_value = ldap
   mock_source.return_value = source
   mock_msg_publisher.return_value = msg_publisher
   source.get_users.return_value = [{'email': '*****@*****.**', 'id': "abj148djh"}]
   isUser.return_value = True
   event = {'dryrun': False}
   lambda_handler(event, None)
   mock_is_user.assert_not_called()
   mock_ldap.assert_called_once()
   mock_source.assert_not_called()
   mock_msg_publisher.assert_not_called()
   source.get_users.assert_called_once()
   msg_publisher.publish_message.assert_not_called()
   source.remove_user.assert_not_called()
Exemple #10
0
    def _simulate_stream_processing(table_action, new_item, old_item=None):

        # if not running in lambda environment create event that normally results from dynamodb inserts and pass directly
        # to the main lambda handler to simulate an event triggered by the dynamodb stream

        if old_item is None:
            old_item = {}
        account = AwsService.get_aws_account()
        region = boto3.Session().region_name
        table = os.environ.get(handlers.ENV_ACTION_TRACKING_TABLE)
        event = {
            "Records": [
                {
                    "eventName": table_action,
                    "eventSourceARN": "arn:aws:dynamodb:{}:{}:table/{}/stream/{}".format(region, account, table,
                                                                                      datetime.utcnow().isoformat()),
                    "eventSource": "aws:dynamodb",
                    "dynamodb": {
                        "NewImage": {n: TaskTrackingTable.typed_item(new_item[n]) for n in new_item},
                        "OldImage": {o: TaskTrackingTable.typed_item(old_item[o]) for o in old_item}
                    }
                }]
        }
        main.lambda_handler(event, None)
Exemple #11
0
 def _execute_task(self, task, dt=None):
     """
     Execute a task by starting a lambda function that selects the resources for that action
     :param task: Task started
     :param dt: Task start datetime
     :return: 
     """
     event = {
         handlers.HANDLER_EVENT_ACTION: handlers.HANDLER_ACTION_SELECT_RESOURCES,
         handlers.HANDLER_EVENT_TASK: task,
         handlers.HANDLER_EVENT_SOURCE: "aws:events",
         handlers.HANDLER_EVENT_TASK_DT: dt.isoformat() if dt is not None else datetime.utcnow().isoformat()
     }
     if self._context is not None:
         # start lambda function to scan for task resources
         payload = str.encode(safe_json(event))
         client = get_client_with_retries("lambda", ["invoke"], context=self._context)
         resp = client.invoke_with_retries(FunctionName=self._context.function_name,
                                           Qualifier=self._context.function_version,
                                           InvocationType="Event", LogType="None", Payload=payload)
         self._logger.info(INFO_LAMBDA, resp["StatusCode"], payload)
     else:
         # or if not running in lambda environment pass event to main task handler
         lambda_handler(event, None)
    def _execute_task(self, task, dt=None, task_group=None):
        """
        Execute a task by starting a lambda function that selects the resources for that action
        :param task: Task started
        :param dt: Task start datetime
        :return:
        """

        debug_state = self._logger.debug_enabled
        self._logger.debug_enabled = task.get(handlers.TASK_DEBUG, False)
        if task_group is None:
            task_group = str(uuid.uuid4())
        try:

            event = {
                handlers.HANDLER_EVENT_ACTION:
                handlers.HANDLER_ACTION_SELECT_RESOURCES,
                handlers.HANDLER_EVENT_TASK:
                task,
                handlers.HANDLER_EVENT_SOURCE:
                "scheduler-handler",
                handlers.HANDLER_EVENT_TASK_DT:
                dt.isoformat()
                if dt is not None else datetime.utcnow().isoformat(),
                handlers.HANDLER_EVENT_TASK_GROUP:
                task_group
            }

            sub_tasks = list(
                ScheduleHandler.task_account_region_sub_tasks(task))
            for sub_task in sub_tasks:

                event[handlers.HANDLER_EVENT_SUB_TASK] = sub_task

                if not handlers.running_local(self._context):

                    if task[handlers.
                            TASK_SELECT_SIZE] != actions.ACTION_USE_ECS:
                        # start lambda function to scan for task resources
                        payload = str.encode(safe_json(event))
                        client = get_client_with_retries("lambda", ["invoke"],
                                                         context=self._context)

                        function_name = "{}-{}-{}".format(
                            os.getenv(handlers.ENV_STACK_NAME),
                            os.getenv(handlers.ENV_LAMBDA_NAME),
                            task[handlers.TASK_SELECT_SIZE])

                        self._logger.info(INFO_RUNNING_LAMBDA, function_name)

                        try:
                            resp = client.invoke_with_retries(
                                FunctionName=function_name,
                                InvocationType="Event",
                                LogType="None",
                                Payload=payload)

                            self._logger.debug(DEBUG_LAMBDA,
                                               resp["StatusCode"], payload)
                        except Exception as ex:
                            self._logger.error(ERR_FAILED_START_LAMBDA_TASK,
                                               str(ex))

                    else:
                        ecs_args = {
                            handlers.HANDLER_EVENT_ACTION:
                            handlers.HANDLER_ACTION_SELECT_RESOURCES,
                            handlers.TASK_NAME: task[handlers.TASK_NAME],
                            handlers.HANDLER_EVENT_SUB_TASK: sub_task
                        }

                        ecs_memory = task.get(handlers.TASK_SELECT_ECS_MEMORY,
                                              None)

                        self._logger.info(INFO_RUNNING_AS_ECS_JOB,
                                          task[handlers.TASK_NAME])
                        handlers.run_as_ecs_job(ecs_args,
                                                ecs_memory_size=ecs_memory,
                                                context=self._context,
                                                logger=self._logger)

                else:
                    if task[handlers.
                            TASK_SELECT_SIZE] == actions.ACTION_USE_ECS:
                        ecs_args = {
                            handlers.HANDLER_EVENT_ACTION:
                            handlers.HANDLER_ACTION_SELECT_RESOURCES,
                            handlers.TASK_NAME: task[handlers.TASK_NAME],
                            handlers.HANDLER_EVENT_SUB_TASK: sub_task
                        }

                        ecs_memory = task.get(handlers.TASK_SELECT_ECS_MEMORY,
                                              None)

                        handlers.run_as_ecs_job(ecs_args,
                                                ecs_memory_size=ecs_memory,
                                                logger=self._logger)
                    else:
                        # or if not running in lambda environment pass event to main task handler
                        lambda_handler(event, self._context)

            return task_group, sub_tasks

        finally:
            self._logger.debug_enabled = debug_state
Exemple #13
0
 def test_lambda_handler(self):
     self.assertEqual(lambda_handler("status", "test")["statusCode"], 200)
Exemple #14
0
os.environ['WATSON_KEY'] = 'ce0618f33d6b5dcb305c884dd1ab6ce720dc0562'
os.environ['PROJECTOXFORD_KEY'] = 'fe1ddf67faa347b1847318f516e6065f'
os.environ[
    'YANDEX_KEY'] = 'trnsl.1.1.20170108T012202Z.fa1a8d03eb8d33be.60cd4068fa2f37d75d11fad1906531974ce3ccdf'

from main import lambda_handler

event = {
    "response_id": "test",
    "timestamp": "test",
    "session_id": "test",
    "raw_response": "I farm apples and chickens",
    "question": {
        "question_id":
        "1",
        "question_text":
        "What do you farm?",
        "metrics": [{
            "metric_id": 1,
            "metric_type": "sentiment"
        }, {
            "metric_id": 4,
            "metric_type": "entity"
        }]
    }
}
response = lambda_handler(event, None)
print('response: {}'.format(response))
assert (response != None)
Exemple #15
0
from main import lambda_handler
import uuid
import json

body = {"ip": "127.0.0.1", "stage": "test", "id": str(uuid.uuid4()), "resonse_size": "small", "claim":"the death of Sherlock Holmes almost destroyed the magazine thries. When Arthur Conan Doyle killed him off in 1893, 20,000 people cancelled their subscriptions. The magazine barely survived. Its staff referred to Holmes’ death as “the dreadful event”.", "link":"http://www.bbc.com/culture/story/20160106-how-sherlock-holmes-changed-the-world"}
print(lambda_handler(body, None))#{"isBase64Encoded": False, "body": body, "requestContext": {"http": {"method": "POST", "sourceIp": "dfsds"}, "stage": "dev", }},0))
# {"ip": "127.0.0.1", "stage": "test", "id": "", "resonse_size": "small", "claim":"the death of Sherlock Holmes almost destroyed the magazine thries. When Arthur Conan Doyle killed him off in 1893, 20,000 people cancelled their subscriptions. The magazine barely survived. Its staff referred to Holmes’ death as “the dreadful event”.", "link":"http://www.bbc.com/culture/story/20160106-how-sherlock-holmes-changed-the-world"}
Exemple #16
0
import main

event = {
    "queryStringParameters": {
        "slack_url": [your slack incoming-webhook URL],
        "app_id": [your App_id],
        "date_scope_range": [date scope],
        "channel_name": [slack channel name]
    }
}

result = main.lambda_handler(event=event, context='')

print(result)
    def test_mappings(self):
        result = lambda_handler(
            {
                'requestId': 'one',
                'fragment': {
                    'AWSTemplateFormatVersion': '2010-09-09',
                    'Mappings': {
                        'stacks': {
                            'one': {
                                'url': 'production.yml',
                                'version': 2
                            },
                            'two': {
                                'url': 'development.yml'
                            }
                        }
                    },
                    'Resources': {
                        'BaseStack': {
                            'Type': 'AWS::CloudFormation::Stack',
                            'Replicates': {
                                'Elements': 'stacks'
                            },
                            'Properties': {
                                'TemplateURL': {
                                    'Fn::Sub': [
                                        '${repl_url}-${repl_version}',
                                        {
                                            'repl_version': 1
                                        }
                                    ]
                                }
                            }
                        },
                        'OtherStack': {
                            'Type': 'AWS::CloudFormation::Stack',
                            'Properties': {
                                'TemplateURL': 'template.yml',
                            }
                        }
                    }
                }
            }, None
        )

        assert result == {
            'requestId': 'one',
            'status': 'success',
            'fragment': {
                'AWSTemplateFormatVersion': '2010-09-09',
                'Mappings': {
                    'stacks': {
                        'one': {
                            'url': 'production.yml',
                            'version': 2
                        },
                        'two': {
                            'url': 'development.yml'
                        }
                    }
                },
                'Resources': {
                    'BaseStackOne': {
                        'Type': 'AWS::CloudFormation::Stack',
                        'Properties': {
                            'TemplateURL': {
                                'Fn::Sub': [
                                    '${repl_url}-${repl_version}',
                                    {
                                        'repl_url': 'production.yml',
                                        'repl_version': 2
                                    }
                                ]
                            }
                        }
                    },
                    'BaseStackTwo': {
                        'Type': 'AWS::CloudFormation::Stack',
                        'Properties': {
                            'TemplateURL': {
                                'Fn::Sub': [
                                    '${repl_url}-${repl_version}',
                                    {
                                        'repl_url': 'development.yml',
                                        'repl_version': 1
                                    }
                                ]
                            }
                        }
                    },
                    'OtherStack': {
                        'Type': 'AWS::CloudFormation::Stack',
                        'Properties': {
                            'TemplateURL': 'template.yml',
                        }
                    }
                }
            }
        }
    else:
        stack = sys.argv[1]

    try:
        cloudformation_client = _service_client("cloudformation")
        lambda_resource = cloudformation_client.describe_stack_resource(
            StackName=stack,
            LogicalResourceId="Main").get("StackResourceDetail", None)

        lambda_client = boto3.client("lambda")
        lambda_function = lambda_client.get_function(
            FunctionName=lambda_resource["PhysicalResourceId"])

        environment = lambda_function["Configuration"]["Environment"][
            "Variables"]

        for env_var in environment:
            os.environ[env_var] = environment[env_var]

    except Exception as ex:
        print("error setting up environment, {}".format(ex))

    event = {
        "source": "aws.events",
        "detail-type": "Scheduled Event",
        "resources":
        ["arn/{}".format(os.getenv(configuration.ENV_SCHEDULER_RULE))]
    }

    lambda_handler(event, None)
from main import lambda_handler

lambda_handler(0,0)
Exemple #20
0
def testEvent(eventjson_file):
    with open(eventjson_file) as data_file:
        event = json.load(data_file)

        res = main.lambda_handler(event, {})
        pprint(res)
def test_main():

    while not (pg_ready(PG_HOST) and pg_ready(PG_HOST_EMPTY)):
        print("Waiting for Postgres containers...")
        time.sleep(1)

    s3 = boto3.resource('s3', endpoint_url=S3_URL)
    while not (s3_ready(s3)):
        print("Waiting for S3 container...")
        time.sleep(1)

    ssm = boto3.client('ssm', endpoint_url=SSM_URL)
    while not (ssm_ready(ssm)):
        print("Waiting for SSM container...")
        time.sleep(1)

    bucket = s3.create_bucket(Bucket=S3_BUCKET)

    ssm.put_parameter(Name=POSTGRES_PASSWORD_SSM_KEY,
                      Value='password',
                      Type='SecureString')

    os.environ.update({
        'POSTGRES_HOST': PG_HOST,
        'POSTGRES_PORT': PG_PORT,
        'POSTGRES_USER': PG_USER,
        'POSTGRES_DATABASE': PG_DATABASE,
        'S3_BUCKET': S3_BUCKET,
    })

    lambda_handler(
        {
            'organization_schema': ORGANIZATION_SCHEMA,
            's3_key': S3_KEY,
        }, {})

    bucket.download_file(S3_KEY, DUMP)

    # Import the dump file into a clean database
    # There is not an easy way to import a whole file using SqlAlchemy, so use psql
    subprocess.check_output([
        PSQL, '--host', PG_HOST_EMPTY, '--port', PG_PORT, '--user', PG_USER,
        '--dbname', PG_DATABASE, '--file', DUMP, '-v', 'ON_ERROR_STOP=1'
    ],
                            env={
                                'PGPASSWORD': PG_PASSWORD,
                                'LD_LIBRARY_PATH': PG_PATH
                            })

    # Connect to the new database
    engine = create_engine(
        URL(drivername='postgresql',
            database=PG_DATABASE,
            username=PG_USER,
            password=PG_PASSWORD,
            host=PG_HOST_EMPTY,
            port=PG_PORT))

    # Does the pennsievedb seed data from pennsieve-api/local-seed.sql look good?
    with engine.connect() as conn:
        rows = conn.execute(
            f'SELECT * FROM "{ORGANIZATION_SCHEMA}".datasets;').fetchall()
        assert len(rows) == 1
        assert rows[0]['name'] == 'Pennsieve Dataset'
        assert rows[0][
            'node_id'] == 'N:dataset:c0f0db41-c7cb-4fb5-98b4-e90791f8a975'

        with pytest.raises(ProgrammingError):
            conn.execute('SELECT * FROM "2".datasets;').fetchall()

        rows = conn.execute(
            f'SELECT * FROM "{ORGANIZATION_SCHEMA}".files;').fetchall()
        assert len(rows) == 0
Exemple #22
0
    def handle_request(self):
        """
        Handled the cloudwatch rule timer event
        :return: Started tasks, if any, information
        """
        def is_matching_event_state(event_state, ec2event):
            return event_state in [s.strip() for s in ec2event.split(",")
                                   ] or ec2event != "*"

        try:

            result = []
            start = datetime.now()
            self._logger.info("Handler {}", self.__class__.__name__)

            state = self._event.get("detail", {}).get("state")
            if state is not None:
                state = state.lower()

            account = self._event["account"]
            region = self._event["region"]
            instance_id = self._event["detail"]["instance-id"]
            dt = self._event["time"]
            task = None

            try:

                # for all ec2 events tasks in configuration
                for task in [
                        t for t in
                        TaskConfiguration(context=self._context,
                                          logger=self._logger).get_tasks()
                        if t.get("events") is not None and EC2_STATE_EVENT in
                        t["events"] and t.get("enabled", True)
                ]:

                    task_name = task["name"]

                    ec2_event = task["events"][EC2_STATE_EVENT]

                    if not is_matching_event_state(state, ec2_event):
                        continue

                    result.append(task_name)

                    self._logger.info(INFO_EVENT, task_name, state,
                                      instance_id, account, region,
                                      safe_json(task, indent=2))
                    # create an event for lambda function that scans for resources for this task
                    event = {
                        handlers.HANDLER_EVENT_ACTION:
                        handlers.HANDLER_ACTION_SELECT_RESOURCES,
                        handlers.HANDLER_SELECT_ARGUMENTS: {
                            handlers.HANDLER_EVENT_REGIONS: [region],
                            handlers.HANDLER_EVENT_ACCOUNT: account,
                            "InstanceIds": [instance_id]
                        },
                        handlers.HANDLER_EVENT_SOURCE: EC2_STATE_EVENT,
                        handlers.HANDLER_EVENT_TASK: task,
                        handlers.HANDLER_EVENT_TASK_DT: dt
                    }

                    if self._context is not None:
                        # start lambda function to scan for task resources
                        payload = str.encode(safe_json(event))
                        client = get_client_with_retries("lambda", ["invoke"],
                                                         context=self._context)
                        client.invoke_with_retries(
                            FunctionName=self._context.function_name,
                            Qualifier=self._context.function_version,
                            InvocationType="Event",
                            LogType="None",
                            Payload=payload)
                    else:
                        # or if not running in lambda environment pass event to main task handler
                        lambda_handler(event, None)

                return safe_dict({
                    "datetime":
                    datetime.now().isoformat(),
                    "running-time": (datetime.now() - start).total_seconds(),
                    "event-datetime":
                    dt,
                    "started-tasks":
                    result
                })

            except ValueError as ex:
                self._logger.error("{}\n{}".format(ex, safe_json(task,
                                                                 indent=2)))

        finally:
            self._logger.flush()
Exemple #23
0
import main
import json

event = json.load(open('event.json'))

main.lambda_handler(event, {})
Exemple #24
0
    def _start_task_execution(self,
                              task_item,
                              action=handlers.HANDLER_ACTION_EXECUTE):
        """
        Creates an instance of the lambda function that executes the tasks action. It first checks is the action has specific memory
        requirements and based on this it creates a copy of this instance or one configured for the required memory. All
        information for executing the action is passed in the event.
        :param task_item: Task item for which action is executed
        :return:
        """

        try:

            self._logger.debug(
                "Entering start_task_execution ({}) with task {}", action,
                safe_json(task_item, indent=3))

            # Create event for execution of the action and set its action so that is picked up by the execution handler
            event = {i: task_item.get(i) for i in task_item}
            event[handlers.HANDLER_EVENT_ACTION] = action

            self._logger.debug(DEBUG_ACTION,
                               task_item[handlers.TASK_TR_ACTION],
                               task_item[handlers.TASK_TR_NAME],
                               task_item[handlers.TASK_TR_ID])

            self._logger.debug(
                DEBUG_ACTION_PARAMETERS,
                safe_json(task_item.get(handlers.TASK_TR_PARAMETERS, {}),
                          indent=3))

            # get memory allocation for executing the task
            lambda_size = handlers.TASK_TR_COMPLETION_SIZE \
                if action == handlers.HANDLER_ACTION_TEST_COMPLETION \
                else handlers.TASK_TR_EXECUTE_SIZE

            execute_lambda_size = task_item.get(lambda_size,
                                                actions.ACTION_SIZE_STANDARD)

            if execute_lambda_size == actions.ACTION_USE_ECS:
                ecs_memory = task_item.get(
                    handlers.TASK_EXECUTE_ECS_MEMORY
                    if action == handlers.HANDLER_ACTION_EXECUTE else
                    handlers.TASK_COMPLETION_ECS_MEMORY, None)
            else:
                ecs_memory = None

            if not handlers.running_local(self._context):

                self._logger.debug(DEBUG_MEMORY_SIZE, execute_lambda_size)

                if execute_lambda_size != actions.ACTION_USE_ECS:

                    # create event payload
                    payload = str.encode(safe_json(event))

                    # determine which lambda to execute on
                    function_name = "{}-{}-{}".format(
                        os.getenv(handlers.ENV_STACK_NAME),
                        os.getenv(handlers.ENV_LAMBDA_NAME),
                        execute_lambda_size)

                    self._logger.debug(
                        "Running execution of task on lambda function {}",
                        function_name)

                    self._logger.debug(DEBUG_LAMBDA_FUNCTION_, function_name,
                                       payload)
                    # start lambda function
                    lambda_client = boto_retry.get_client_with_retries(
                        "lambda", ["invoke"],
                        context=self._context,
                        logger=self._logger)
                    resp = lambda_client.invoke_with_retries(
                        FunctionName=function_name,
                        InvocationType="Event",
                        LogType="None",
                        Payload=payload)

                    task_info = {
                        "id": task_item[handlers.TASK_TR_ID],
                        "task": task_item[handlers.TASK_TR_NAME],
                        "action": task_item[handlers.TASK_TR_ACTION],
                        "payload": payload,
                        "status-code": resp["StatusCode"]
                    }

                    self._logger.debug(DEBUG_LAMBDA,
                                       safe_json(task_info, indent=2))
                    self.invoked_lambda_functions.append(task_info)
                else:
                    # run as ECS job
                    ecs_args = {
                        "subnets": os.getenv('AWSVPC_SUBNETS'),
                        "securitygroups": os.getenv('AWSVPC_SECURITYGROUPS'),
                        "assignpublicip": os.getenv('AWSVPC_ASSIGNPUBLICIP'),
                        handlers.HANDLER_EVENT_ACTION: action,
                        handlers.TASK_NAME: task_item[handlers.TASK_TR_NAME],
                        handlers.TASK_TR_ID: task_item[handlers.TASK_TR_ID]
                    }

                    self._logger.debug(DEBUG_RUNNING_ECS_TASK, action,
                                       task_item[handlers.TASK_TR_NAME])
                    handlers.run_as_ecs_job(ecs_args,
                                            ecs_memory_size=ecs_memory,
                                            context=self._context,
                                            logger=self._logger)

            else:
                lambda_handler(event, self._context)

            ResultNotifications(context=self._context,
                                logger=self._logger).publish_started(task_item)

        except Exception as ex:
            self._logger.error(ERR_RUNNING_TASK, task_item, str(ex),
                               full_stack())
Exemple #25
0
def test_main():
    main.lambda_handler(test_body, None)
Exemple #26
0
import sys
import json
import main

with open(sys.argv[1], 'r') as f:
    j = json.loads(f.read())

    r = main.lambda_handler(j, None)

    print(r)
 def test_lambda_handler_noops_empty_filepath(self):
     event = {}
     response = main.lambda_handler(event, {})
     self.assertIsNone(response, 'Should return none if no filepath found')
    def _start_task_execution(self,
                              task_item,
                              action=handlers.HANDLER_ACTION_EXECUTE):
        """
        Creates an instance of the lambda function that executes the tasks action. It first checks is the action has specific memory
        requirements and based on this it creates a copy of this instance or one configured for the required memory. All
        information for executing the action is passed in the event.
        :param task_item: Task item for which action is executed
        :return:
        """

        try:

            # get the action for the task
            action_properties = actions.get_action_properties(
                task_item[tracking.TASK_TR_ACTION])

            # check if there are specific memory requirements
            action_memory_size = action_properties.get(actions.ACTION_MEMORY,
                                                       None)
            if action_memory_size is not None:
                self._logger.info(INFO_MEMORY_SIZE, action_memory_size)

            # Create event for execution of the action and set its action so that is picked up by the execution handler
            event = {i: task_item.get(i) for i in task_item}
            event[handlers.HANDLER_EVENT_ACTION] = action

            self._logger.debug(DEBUG_ACTION,
                               task_item[tracking.TASK_TR_ACTION],
                               task_item[tracking.TASK_TR_NAME],
                               task_item[tracking.TASK_TR_ID])
            self._logger.debug(
                DEBUG_ACTION_PARAMETERS,
                safe_json(task_item.get(tracking.TASK_TR_PARAMETERS, {})))

            if self._context is not None:
                # if running in a Lambda environment the action will be executed asynchronously in a new instance of a
                # lambda function. This gives each individual action 5 minutes to execute and allows parallel execution of
                # multiple actions

                # create event payload
                payload = str.encode(safe_json(event))
                lambda_name = self._context.function_name

                # based on the memory requirements determine the lambda function to use
                if action_memory_size is not None and action_memory_size != actions.LAMBDA_DEFAULT_MEMORY:
                    lambda_name = lambda_name.replace(
                        SCHEDULER_LAMBDA_FUNTION_DEFAULT,
                        SIZED_SCHEDULER_NAME_TEMPLATE.format(
                            action_memory_size))

                self._logger.info(INFO_LAMBDA_FUNCTION_, lambda_name, payload)
                # start lambda function
                lambda_client = boto_retry.get_client_with_retries(
                    "lambda", ["invoke"], context=self._context)
                resp = lambda_client.invoke_with_retries(
                    FunctionName=lambda_name,
                    InvocationType="Event",
                    LogType="None",
                    Payload=payload)

                task_info = {
                    "id": task_item[tracking.TASK_TR_ID],
                    "task": task_item[tracking.TASK_TR_NAME],
                    "action": task_item[tracking.TASK_TR_ACTION],
                    "payload": payload,
                    "status-code": resp["StatusCode"]
                }

                self._logger.info(DEBUG_LAMBDA, safe_json(task_info, indent=2))
                self.invoked_lambda_functions.append(task_info)

            else:
                # if not running in Lambda, for debugging purposes, the event is passed to the main handler
                # that created and instance of the execution handler to execute the action. Note tha execution of actions in
                # this scenario are serialized.
                lambda_handler(event, None)

        except Exception as ex:
            self._logger.error("Error running task {}, {}, {}", task_item,
                               str(ex), traceback.format_exc())
def main():
    print(lambda_handler(launch_request, {}))
Exemple #30
0
    def handle_request(self, use_custom_select=True):
        """
        Handled the cloudwatch rule timer event
        :return: Started tasks, if any, information
        """
        try:

            self._logger.info("Handling CloudWatch event {}",
                              safe_json(self._event, indent=3))

            result = []
            start = datetime.now()

            dt = self._event_time()
            config_task = None

            source_resource_tags = None

            try:

                # for all events tasks in configuration
                for config_task in TaskConfiguration(
                        context=self._context,
                        logger=self._logger).get_tasks():

                    self._logger.debug_enabled = config_task.get(
                        handlers.TASK_DEBUG, False)

                    if not self._event_triggers_task(task=config_task):
                        continue

                    # tasks that can react to events with a wider resource scope than the actual resource causing the event may
                    # have a filter that can is used to filter based on the tags of the resource
                    event_source_tag_filter = config_task.get(
                        handlers.TASK_EVENT_SOURCE_TAG_FILTER, None)
                    if event_source_tag_filter is not None:
                        if source_resource_tags is None:
                            # get the tags for the source resource of the event
                            session = services.get_session(
                                self._role_executing_triggered_task,
                                logger=self._logger)
                            if session is None:
                                self._logger.error(
                                    ERR_NO_SESSION_FOR_GETTING_TAGS)
                                continue
                            try:
                                source_resource_tags = self._source_resource_tags(
                                    session, config_task)
                            except Exception as ex:
                                self._logger.error(
                                    ERR_GETTING_EVENT_SOURCE_RESOURCE_TAGS, ex)
                                continue

                            self._logger.debug(
                                "Tags for event source resource are  {}",
                                source_resource_tags)

                        # apply filter to source resource tags
                        if not TagFilterExpression(
                                event_source_tag_filter).is_match(
                                    source_resource_tags):
                            self._logger.debug(
                                "Tags of source resource do not match tag filter {}",
                                event_source_tag_filter)
                            continue

                    task_name = config_task[handlers.TASK_NAME]
                    result.append(task_name)

                    select_parameters = self._select_parameters(
                        self._event_name(), config_task)
                    if select_parameters is None:
                        continue

                    self._logger.debug(DEBUG_EVENT, task_name,
                                       self._event_name(), select_parameters,
                                       self._event_account(),
                                       self._event_region(),
                                       safe_json(config_task, indent=3))

                    # create an event for lambda function that scans for resources for this task
                    lambda_event = {
                        handlers.HANDLER_EVENT_ACTION:
                        handlers.HANDLER_ACTION_SELECT_RESOURCES,
                        handlers.HANDLER_EVENT_CUSTOM_SELECT:
                        use_custom_select,
                        handlers.HANDLER_SELECT_ARGUMENTS: {
                            handlers.HANDLER_EVENT_REGIONS:
                            [self._event_region()],
                            handlers.HANDLER_EVENT_ACCOUNT:
                            self._event_account(),
                            handlers.HANDLER_EVENT_RESOURCE_NAME:
                            config_task[handlers.TASK_RESOURCE_TYPE],
                        },
                        handlers.HANDLER_EVENT_SOURCE:
                        "{}:{}:{}".format(self._handled_event_source,
                                          self._handled_detail_type,
                                          self._event_name()),
                        handlers.HANDLER_EVENT_TASK:
                        config_task,
                        handlers.HANDLER_EVENT_TASK_DT:
                        dt
                    }

                    for i in select_parameters:
                        lambda_event[handlers.HANDLER_SELECT_ARGUMENTS][
                            i] = select_parameters[i]

                    if self._event_resources() is not None:
                        self._logger.debug(
                            DEBUG_EVENT_RESOURCES,
                            safe_json(self._event_resources(), indent=3))
                        lambda_event[
                            handlers.
                            HANDLER_SELECT_RESOURCES] = self._event_resources(
                            )

                    if not handlers.running_local(self._context):
                        # start lambda function to scan for task resources
                        payload = str.encode(safe_json(lambda_event))
                        client = get_client_with_retries("lambda", ["invoke"],
                                                         context=self._context,
                                                         logger=self._logger)
                        client.invoke_with_retries(
                            FunctionName=self._context.function_name,
                            InvocationType="Event",
                            LogType="None",
                            Payload=payload)
                    else:
                        # or if not running in lambda environment pass event to main task handler
                        lambda_handler(lambda_event, None)

                return safe_dict({
                    "datetime":
                    datetime.now().isoformat(),
                    "running-time": (datetime.now() - start).total_seconds(),
                    "event-datetime":
                    dt,
                    "started-tasks":
                    result
                })

            except ValueError as ex:
                self._logger.error(ERR_HANDLING_EVENT_IN_BASE_HANDLER, ex,
                                   safe_json(config_task, indent=2))

        finally:
            self._logger.flush()
Exemple #31
0
def test_lambda():
    print('running')
    response = lambda_handler(request)
    print(response)

    return response