def handler(event, context):
    request_type = event['RequestType']
    stack_arn = event['StackId']
    physical_resource_id = aws_utils.get_stack_name_from_stack_arn(stack_arn) + '-' + event['LogicalResourceId']
    data = {}
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_arn)

    if request_type == 'Delete':
        _clear_interface_refs(stack)
        return custom_resource_response.success_response(data, physical_resource_id)

    if not stack.is_deployment_stack:
        raise RuntimeError("InterfaceDependecyResolver can only be stood up on a deployment stack")

    resource_groups = stack.resource_groups

    configuration_bucket_name = stack.project.configuration_bucket
    if not configuration_bucket_name:
        raise RuntimeError('Not adding service settings because there is no project configuration bucket.')
    service_directory = ServiceDirectory(configuration_bucket_name)


    interface_deps = event["ResourceProperties"].get("InterfaceDependencies", {})
    _clear_interface_refs(stack)
    for gem, interface_list in interface_deps.iteritems():
        for interface in interface_list:
            print "getting url for interface {} from gem {} to use in {}:{}".format(interface["id"], interface["gem"], gem, interface["function"])
            interfaces = service_directory.get_interface_services(stack.deployment_name, interface["id"])
            if len(interfaces) > 0:
                _add_url_to_lambda(interfaces[0], gem, interface["function"], stack)
            else:
                print "Failed to lookup interface {}".format(len(interfaces))

    return custom_resource_response.success_response(data, physical_resource_id)
def handler(event, context):
    if event['RequestType'] == 'Delete':
        return custom_resource_response.success_response({'Arn': ''}, '')
    props = properties.load(event, {
            'ConfigurationBucket': properties.String(),
            'ConfigurationKey': properties.String(),
            'LogicalPoolName': properties.String(),
            'RoleType': properties.String(default=""),
            'Path': properties.String(),
            'AssumeRolePolicyDocument': properties.Dictionary()
        })

    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(event['StackId'])

    identity_client = identity_pool.get_identity_client()

    cognito_pool_info = aws_utils.get_cognito_pool_from_file(props.ConfigurationBucket,
        props.ConfigurationKey,
        props.LogicalPoolName,
        stack)

    arn = ''
    if cognito_pool_info:
        response = identity_client.get_identity_pool_roles(IdentityPoolId=cognito_pool_info['PhysicalResourceId'])
        arn = response.get("Roles", {}).get(props.RoleType, "")
    else:
        name = "{}{}Role".format(stack.stack_name, event['LogicalResourceId'])
        arn=create_role(name, props)

    return custom_resource_response.success_response({'Arn': arn}, arn)
def handler(event, context):
    ''' Invoked when AWS Lambda service executes. '''

    if not __is_valid_event(event):
        return custom_resource_response.failure_response(
            'Malformed event recieved.')

    request_type = event['RequestType']

    if request_type != 'Create':
        print(
            'Saw RequestType: \"{}\". No action needed (Only \"Create\" supported)'
            .format(request_type))
        return custom_resource_response.success_response({}, '*')

    s3_client = s3.get_client()
    lambda_client = lambda_.get_client()

    bucket_name, lambda_arn = __get_resources(event)

    has_permission = __add_permission_to_trigger(lambda_client, lambda_arn,
                                                 bucket_name)

    if not has_permission:
        return custom_resource_response.failure_response(
            'Could not add permissions to Lambda')

    is_configured = __add_notification_configuration(bucket_name, lambda_arn,
                                                     s3_client)

    if is_configured:
        return custom_resource_response.success_response({}, '*')
    else:
        return custom_resource_response.failure_response(
            'Could not succesfully configure AttachmentBucket')
def handler(event, context):
    request_type = event['RequestType']
    stack_arn = event['StackId']
    physical_resource_id = aws_utils.get_stack_name_from_stack_arn(stack_arn) + '-' + event['LogicalResourceId']
    data = {}
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_arn)

    if request_type == 'Delete':
        _clear_interface_refs(stack)
        return custom_resource_response.success_response(data, physical_resource_id)

    if not stack.is_deployment_stack:
        raise RuntimeError("InterfaceDependencyResolver can only be stood up on a deployment stack")

    configuration_bucket_name = stack.project.configuration_bucket
    if not configuration_bucket_name:
        raise RuntimeError('Not adding service settings because there is no project configuration bucket.')
    service_directory = ServiceDirectory(configuration_bucket_name)

    interface_deps = event["ResourceProperties"].get("InterfaceDependencies", {})
    # start by clearing the refs this function and role have to make sure no old permissions/interfaces linger
    _clear_interface_refs(stack)

    for gem, interface_list in iteritems(interface_deps):
        gem_function_info = {}
        for interface in interface_list:
            if not interface['function'] in gem_function_info:
                gem_function_info[interface['function']] = {"interfaces": []}
            interface_function_info = {"id": interface['id'], "gem": interface.get("gem", "CloudGemFramework")}
            if interface_function_info["gem"] == "CloudGemFramework":
                interface_function_info["url"] = _get_project_url(
                    service_directory, interface['id'])
                interface_function_info["permittedArns"] = _get_permitted_arns(
                    _get_resource_group(gem, stack),
                    _get_project_interface_description(
                        service_directory, interface["id"])
                )
            else:
                interface_function_info["url"] = _get_url(
                    service_directory, stack, interface['id'])
                interface_function_info["permittedArns"] = _get_permitted_arns(
                    _get_resource_group(gem, stack),
                    _get_interface_description(service_directory, stack, interface["id"])
                )
            gem_function_info[interface['function']]["interfaces"].append(
                interface_function_info)

        _put_gem_function_info(gem, gem_function_info, stack)

    return custom_resource_response.success_response(data, physical_resource_id)
def handler(event, context):
    print "Start FIFO Auto Scale"
    prefix = util.get_stack_name_from_arn(event[c.ENV_STACK_ID], False)
    request_type = event['RequestType']
    assigned_suffix = event['ResourceProperties'].get('Suffix', None)
    type = event['ResourceProperties'].get('QueueType', "fifo")
    initial_number_of_queues = int(event['ResourceProperties'].get(
        'IntialNumberOfQueues', 5))

    if assigned_suffix:
        prefix = "{0}{1}".format(prefix, assigned_suffix)

    sqs = Sqs({}, queue_prefix=prefix, type=type)
    if request_type == 'Delete':
        sqs.delete_all_queues(prefix)
    else:
        queues = sqs.get_queues()
        number_of_queues = len(queues)

        #5 queues to start, each queue can support 300 send message calls per second.  total: 1500 messages per second
        if number_of_queues < initial_number_of_queues:
            for i in range(number_of_queues, initial_number_of_queues):
                sqs.add_fifo_queue(prefix)

    return custom_resource_response.success_response({}, "*")
Exemplo n.º 6
0
def handler(event, context):
    """Entry point for the Custom::ResourceGroupConfiguration resource handler."""

    props = properties.load(
        event, {
            'ConfigurationBucket': properties.String(),
            'ConfigurationKey': properties.String(),
            'ResourceGroupName': properties.String()
        })

    data = {
        'ConfigurationBucket':
        props.ConfigurationBucket,
        'ConfigurationKey':
        '{}/resource-group/{}'.format(props.ConfigurationKey,
                                      props.ResourceGroupName),
        'TemplateURL':
        'https://s3.amazonaws.com/{}/{}/resource-group/{}/{}'.format(
            props.ConfigurationBucket, props.ConfigurationKey,
            props.ResourceGroupName, constant.RESOURCE_GROUP_TEMPLATE_FILENAME)
    }

    physical_resource_id = 'CloudCanvas:LambdaConfiguration:{stack_name}:{resource_group_name}'.format(
        stack_name=aws_utils.get_stack_name_from_stack_arn(event['StackId']),
        resource_group_name=props.ResourceGroupName)

    return custom_resource_response.success_response(data,
                                                     physical_resource_id)
Exemplo n.º 7
0
def handler(event, context):
    properties = event["ResourceProperties"]
    stack_arn = event['StackId']
    physical_resource_id = aws_utils.get_stack_name_from_stack_arn(
            stack_arn) + '-' + event['LogicalResourceId']

    buckets = properties.get("Buckets", [])
    if not buckets:
        print "There were no buckets in the resource properties, returning early"
        return custom_resource_response.success_response({}, physical_resource_id)

    if event["RequestType"] != "Delete":
        return custom_resource_response.success_response({}, physical_resource_id)

    for bucket_name in buckets:
        clear_bucket(bucket_name)
    return custom_resource_response.success_response({}, physical_resource_id)
def handler(event, context):
    """Entry point for the Custom::CognitoIdPoolSharedRole resource handler."""
    stack_id = event['StackId']

    if event['RequestType'] == 'Delete':
        return custom_resource_response.success_response({'Arn': ''}, '')

    props = properties.load(
        event, {
            'ConfigurationBucket': properties.String(),
            'ConfigurationKey': properties.String(),
            'LogicalPoolName': properties.String(),
            'RoleType': properties.String(default=""),
            'Path': properties.String(),
            'AssumeRolePolicyDocument': properties.Dictionary()
        })

    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_id)

    identity_client = identity_pool.get_identity_client()

    cognito_pool_info = aws_utils.get_cognito_pool_from_file(
        props.ConfigurationBucket, props.ConfigurationKey,
        props.LogicalPoolName, stack)

    arn = ''
    if cognito_pool_info:
        response = identity_client.get_identity_pool_roles(
            IdentityPoolId=cognito_pool_info['PhysicalResourceId'])
        arn = response.get("Roles", {}).get(props.RoleType, "")
    else:
        # Set up resource tags for all resources created
        tags = [{
            "Key": constant.PROJECT_NAME_TAG,
            "Value": stack.project_stack.project_name
        }, {
            "Key": constant.STACK_ID_TAG,
            "Value": stack_id
        }]

        name = "{}{}Role".format(stack.stack_name, event['LogicalResourceId'])
        arn = _create_role(name, props, tags)

    return custom_resource_response.success_response({'Arn': arn}, arn)
Exemplo n.º 9
0
def handler(event, context):
    print "Start Glue"  
    stack_id = event[c.ENV_STACK_ID]
    resources = util.get_stack_resources(stack_id)  
    request_type = event['RequestType']
    db_name = athena.get_database_name(stack_id, False) 
    glue = Glue()  

    for resource in resources:        
        if resource.logical_id == c.RES_SERVICE_ROLE:
           role_name = resource.physical_id
        if resource.logical_id == c.RES_S3_STORAGE:
           storage_physical_id = resource.physical_id
    
    if role_name is None:
        raise errors.ClientError("The logical resource '{}' was not found.  Is the resource in the cloud formation stack?".format(c.RES_SERVICE_ROLE))   

    if storage_physical_id is None:
        raise errors.ClientError("The logical resource '{}' was not found.  Is the resource in the cloud formation stack?".format(c.RES_S3_STORAGE))           
    crawler_id_1 =  glue.get_crawler_name(stack_id)    
    srcs = [
                {
                    'Path': "{}/{}{}".format(storage_physical_id, "table=", DEFAULT_EVENTS.CLIENTINITCOMPLETE),
                    'Exclusions': []
                },
                {
                    'Path': "{}/{}{}".format(storage_physical_id, "table=", DEFAULT_EVENTS.SESSIONSTART),
                    'Exclusions': []
                }
            ]
      

    print request_type, db_name, crawler_id_1, "role: ", role_name, "s3: ", storage_physical_id
    if request_type.lower() == 'delete':
        if glue.get_crawler(crawler_id_1) is not None:       
            glue.stop_crawler(crawler_id_1) 
            glue.delete_crawler(crawler_id_1)

        if glue.database_exists(db_name):
            glue.delete_database(db_name)
    elif request_type.lower() == 'create':   
        if not glue.database_exists(db_name):
            glue.create_database(db_name)

        if glue.get_crawler(crawler_id_1) is None:
            glue.create_crawler(crawler_id_1, role_name, db_name, athena.get_table_prefix(stack_id), srcs=srcs )

    else:                   
        if glue.get_crawler(crawler_id_1) is None:
            glue.create_crawler(crawler_id_1, role_name, db_name, athena.get_table_prefix(stack_id), srcs=srcs )
        else:
            glue.stop_crawler(crawler_id_1) 
            glue.update_crawler(crawler_id_1, role_name, db_name, athena.get_table_prefix(stack_id) )
        
    return custom_resource_response.success_response({}, "*")
Exemplo n.º 10
0
def handler(event, context):
    '''Entry point for the Custom::AccessControl resource handler.'''

    # Validate RequestType
    request_type = event['RequestType']
    if request_type not in ['Create', 'Update', 'Delete']:
        raise RuntimeError('Unexpected request type: {}'.format(request_type))

    # Get stack_info for the AccessControl resource's stack.
    stack_arn = event['StackId']
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_arn)

    # Physical ID is always the same.
    physical_resource_id = aws_utils.get_stack_name_from_stack_arn(
        stack_arn) + '-' + event['LogicalResourceId']

    # The AccessControl resource has no output values.
    data = {}

    # Accumlate problems encountered so we can give a full report.
    problems = ProblemList()

    # Apply access control as determined by the Cloud Canvas stack type.
    if stack.stack_type == stack.STACK_TYPE_RESOURCE_GROUP:
        were_changes = _apply_resource_group_access_control(
            request_type, stack, problems)
    elif stack.stack_type == stack.STACK_TYPE_DEPLOYMENT_ACCESS:
        were_changes = _apply_deployment_access_control(
            request_type, stack, event['ResourceProperties']['Gem'], problems)
    elif stack.stack_type == stack.STACK_TYPE_PROJECT:
        were_changes = _apply_project_access_control(request_type, stack,
                                                     problems)
    else:
        raise RuntimeError(
            'The Custom::AccessControl resource can only be used in resource group, deployment access, or project stack templates.'
        )

    # If there were any problems, provide an error message with all the details.
    if problems:
        raise RuntimeError(
            'Found invalid AccessControl metadata:\n    {}'.format(problems))

    # If there were changes, wait a few seconds for them to propagate
    if were_changes:
        print 'Delaying {} seconds for change propagation'.format(
            PROPAGATION_DELAY_SECONDS)
        time.sleep(PROPAGATION_DELAY_SECONDS)

    # Successful execution.
    return custom_resource_response.success_response(data,
                                                     physical_resource_id)
Exemplo n.º 11
0
def handler(event, context):

    props = properties.load(event, {
        'Input': properties.Dictionary(),
    })

    stack_name = aws_utils.get_stack_name_from_stack_arn(event['StackId'])
    physical_resource_id = stack_name + '-' + event['LogicalResourceId']

    output = _process_dict(props.Input)

    return custom_resource_response.success_response(output,
                                                     physical_resource_id)
Exemplo n.º 12
0
def handler(event, context):
    
    # This resource does nothing. It exists so that deployment stacks can be created
    # before any resource groups have been defined. In such cases the Resources list 
    # would be empty, which CloudFormation doesn't allow, so the lmbr_aws client inserts
    # this resource into the deployment template when no other resources are defined
    # by the template.

    props = properties.load(event, {})

    data = {}

    physical_resource_id = 'CloudCanvas:EmptyDeployment:{}'.format(aws_utils.get_stack_name_from_stack_arn(event['StackId']))

    return custom_resource_response.success_response(data, physical_resource_id)
Exemplo n.º 13
0
def handler(event, context):
    print "Start FIFO Auto Scale"
    prefix = util.get_stack_name_from_arn(event[c.ENV_STACK_ID], False)
    sqs = Sqs({}, queue_prefix=prefix)
    request_type = event['RequestType']
    print request_type, prefix
    if request_type == 'Delete':
        sqs.delete_all_queues(prefix)
    else:
        queues = sqs.get_queues()
        print queues
        number_of_queues = len(queues)

        #5 queues to start, each queue can support 300 send message calls per second.  total: 1500 messages per second
        for i in range(number_of_queues, 5):
            sqs.add_fifo_queue(prefix)

    return custom_resource_response.success_response({}, "*")
Exemplo n.º 14
0
def launch(event, lambdacontext):
    print "Start"
    hours_delta = 36
    context = dict({})
    context[c.KEY_LAMBDA_FUNCTION] = lambdacontext.function_name if hasattr(
        lambdacontext, 'function_name') else None
    context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr(
        lambdacontext, 'aws_request_id') else None
    global threadpool
    global is_lambda
    threadpool = ThreadPool(context, 8)
    is_lambda = context[c.KEY_REQUEST_ID] is not None
    available_amoeba_lambdas = []
    available_amoeba_lambdas.append(c.ENV_AMOEBA_1)
    available_amoeba_lambdas.append(c.ENV_AMOEBA_2)
    available_amoeba_lambdas.append(c.ENV_AMOEBA_3)
    available_amoeba_lambdas.append(c.ENV_AMOEBA_4)
    available_amoeba_lambdas.append(c.ENV_AMOEBA_5)
    db = DynamoDb(context)
    crawler = Crawler(context, os.environ[c.ENV_S3_STORAGE])
    glue = Glue()

    events = glue.get_events()
    #TODO: adjust the amoeba tree depth so that we have fully utilized all available amoebas; len(available_amoeba_lambdas) * 1000
    #since the number of leaf nodes for the metric partitions can quickly get very large we use a 5 lambda pool to ensure we don't hit the 1000 invocation limit.

    start = datetime.datetime.utcnow() - datetime.timedelta(hours=hours_delta)
    now = datetime.datetime.utcnow()

    for type in events:
        dt = start
        while dt <= now:
            prefix = metric_schema.s3_key_format().format(
                context[c.KEY_SEPERATOR_PARTITION], dt.year, dt.month, dt.day,
                dt.hour, type, dt.strftime(util.partition_date_format()))
            threadpool.add(crawler.crawl, prefix, available_amoeba_lambdas,
                           invoke_lambda)
            dt += timedelta(hours=1)

    threadpool.wait()
    return custom_resource_response.success_response({"StatusCode": 200}, "*")
Exemplo n.º 15
0
def main(event, request):
    context = dict({})
    context[c.KEY_LAMBDA_FUNCTION] = request.function_name if hasattr(
        request, 'function_name') else None
    context[c.KEY_REQUEST_ID] = request.aws_request_id if hasattr(
        request, 'aws_request_id') else None
    stackid = os.environ[c.ENV_DEPLOYMENT_STACK_ARN]

    context[c.KEY_DB] = DynamoDb(context)
    context[c.KEY_ATHENA_QUERY] = Query(stackid)
    context[c.KEY_GLUE_CRAWLER] = Glue()
    thread_pool = ThreadPool(size=3)
    crawler_name = context[c.KEY_GLUE_CRAWLER].get_crawler_name(stackid)
    crawler = Crawler(context, os.environ[c.ENV_S3_STORAGE])
    glue = Glue()
    events = glue.get_events()

    start = datetime.datetime.utcnow() - datetime.timedelta(hours=2)
    now = datetime.datetime.utcnow()

    found = False
    for type in events:
        dt = start
        while dt <= now:
            prefix = metric_schema.s3_key_format().format(
                context[c.KEY_SEPERATOR_PARTITION], dt.year, dt.month, dt.day,
                dt.hour, type, dt.strftime(util.partition_date_format()))
            found = crawler.exists(prefix)
            if found:
                print "FOUND new events=>", prefix
                break
            dt += timedelta(hours=1)
        if found:
            break

    if found:
        thread_pool.add(crawl, context, crawler_name,
                        context[c.KEY_ATHENA_QUERY].execute_with_format)
        thread_pool.wait()

    return custom_resource_response.success_response({}, "*")
Exemplo n.º 16
0
def handler(event, context):

    # Validate RequestType
    request_type = event['RequestType']
    if request_type not in ['Create', 'Update', 'Delete']:
        raise RuntimeError('Unexpected request type: {}'.format(request_type))

    stack_arn = event['StackId']
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_arn)

    # Physical ID is always the same.
    physical_resource_id = aws_utils.get_stack_name_from_stack_arn(stack_arn) + '-' + event['LogicalResourceId']

    if request_type == 'Delete':
        _delete_iot_policy(physical_resource_id)
    elif request_type == 'Update':
        _update_iot_policy(physical_resource_id, stack)
    elif request_type == 'Create':
        _create_iot_policy(physical_resource_id, stack)

    return custom_resource_response.success_response({}, physical_resource_id)
Exemplo n.º 17
0
def handler(event, context):
    stack_arn = event['StackId']
    stack = stack_info.StackInfoManager().get_stack_info(stack_arn)
    props = properties.load(event, {'ReferenceName': properties.String()})

    request_type = event['RequestType']
    if request_type not in ['Create', 'Update', 'Delete']:
        raise RuntimeError('Unexpected request type: {}'.format(request_type))

    data = {}
    if request_type != 'Delete':
        data = {
            'PhysicalId': _get_reference_physical_id(stack,
                                                     props.ReferenceName)
        }

    physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data(
        stack_arn, event['LogicalResourceId'],
        {'ReferenceName': props.ReferenceName})

    return custom_resource_response.success_response(data,
                                                     physical_resource_id)
Exemplo n.º 18
0
def handler(event, context):
    stack_arn = event['StackId']
    stack = stack_info.StackInfoManager().get_stack_info(stack_arn)

    if not stack.is_project_stack:
        raise RuntimeError(
            "Custom::ExternalResourceInstance can only be defined in the project stack."
        )

    request_type = event['RequestType']
    if request_type not in ['Create', 'Update', 'Delete']:
        raise RuntimeError('Unexpected request type: {}'.format(request_type))

    if request_type in ['Create', 'Update']:
        _create_reference_metadata(event, stack)
    else:
        _delete_reference_metadata(event['LogicalResourceId'], stack)

    physical_resource_id = aws_utils.get_stack_name_from_stack_arn(
        stack_arn) + '-' + event['LogicalResourceId']

    return custom_resource_response.success_response({}, physical_resource_id)
Exemplo n.º 19
0
def handler(event, context):
    ''' Invoked when AWS Lambda service executes. '''

    stack_manager = stack_info.StackInfoManager()
    access_stack_arn = event['StackId']
    access_stack = stack_manager.get_stack_info(access_stack_arn)

    if not access_stack.resources:
        print(
            'Skipping setting CloudCanvasIdentityPool: access stack not found.'
        )
    else:
        pool = access_stack.resources.get_by_logical_id(
            'PlayerAccessIdentityPool',
            'Custom::CognitoIdentityPool',
            optional=True)
        custom_auth_flow_lambda = __get_resource(
            access_stack, event['ResourceProperties'].get('GemName', ''),
            'CustomAuthFlowLambda', 'AWS::Lambda::Function')

        if not pool:
            print(
                'Skipping setting CloudCanvasIdentityPool: PlayerAccessIdentityPool not found.'
            )
        elif not custom_auth_flow_lambda:
            print(
                'Skipping setting CloudCanvasIdentityPool: CustomAuthFlowLambda not found.'
            )
        else:
            print('Adding setting CloudCanvasIdentityPool = {}'.format(
                pool.physical_id))

            cloud_canvas_identity_pool_mapping = {
                'CloudCanvasIdentityPool': pool.physical_id
            }
            __add_environment_variables(custom_auth_flow_lambda.physical_id,
                                        cloud_canvas_identity_pool_mapping)

    return custom_resource_response.success_response({}, '*')
Exemplo n.º 20
0
def main(event, lambdacontext):
    context = dict({})
    stack_id = os.environ[c.ENV_DEPLOYMENT_STACK_ARN]
    context[c.KEY_LAMBDA_FUNCTION] = lambdacontext.function_name if hasattr(
        lambdacontext, 'function_name') else None
    context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr(
        lambdacontext, 'aws_request_id') else None
    is_lambda = context[c.KEY_REQUEST_ID] is not None
    db = DynamoDb(context)
    if not is_lambda:
        import lambda_fifo_message_consumer as consumer

    prefix = util.get_stack_name_from_arn(stack_id)
    sqs = Sqs(context, "{0}_".format(prefix))
    awslambda = Lambda(context)

    if sqs.is_all_under_load:
        sqs.add_fifo_queue(prefix)

    queues = sqs.get_queues()
    for queue_url in queues:
        payload = {c.KEY_SQS_QUEUE_URL: queue_url, "context": context}
        print "Starting {} with queue url '{}'".format(
            "lambda" if is_lambda else "thread", queue_url)
        if is_lambda:
            invoke(context, awslambda, payload)
        else:
            payload[c.ENV_STACK_ID] = event['StackId']
            consumer.main(
                payload,
                type('obj', (object, ),
                     {'function_name': context[c.KEY_LAMBDA_FUNCTION]}))

    print "{} {} lambdas have started".format(len(queues),
                                              context[c.KEY_LAMBDA_FUNCTION])
    return custom_resource_response.success_response({}, "*")
Exemplo n.º 21
0
def handler(event, context):
    """Entry point for the Custom::CognitoUserPool resource handler."""
    stack_id = event['StackId']

    props = properties.load(
        event,
        {
            'ClientApps':
            properties.StringOrListOfString(),
            'ExplicitAuthFlows':
            properties.StringOrListOfString(default=[]),
            'RefreshTokenValidity':
            properties.String('30'),
            'ConfigurationKey':
            properties.String(
            ),  # this is only here to force the resource handler to execute on each update to the deployment
            'LambdaConfig':
            properties.Dictionary({}),
            'PoolName':
            properties.String(),
            'Groups':
            properties.ObjectOrListOfObject(
                default=[],
                schema={
                    'Name': properties.String(),
                    'Description': properties.String(''),
                    'Role': properties.String(),
                    'Precedence': properties.String('99')
                }),
            'AllowAdminCreateUserOnly':
            properties.String('')
        })

    # give the identity pool a unique name per stack
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_id)

    stack_name = stack.stack_name
    pool_name = props.PoolName.replace('-', ' ')
    pool_name = stack_name + pool_name
    cognito_idp_client = user_pool.get_idp_client()
    pool_id = custom_resource_utils.get_embedded_physical_id(
        event.get('PhysicalResourceId'))
    found_pool = user_pool.get_user_pool(pool_id)

    # Set up tags for all resources created
    tags = {
        constant.PROJECT_NAME_TAG: stack.project_stack.project_name,
        constant.STACK_ID_TAG: stack_id
    }

    request_type = event['RequestType']

    if request_type == 'Delete':
        if found_pool is not None:
            cognito_idp_client.delete_user_pool(UserPoolId=pool_id)
        data = {}

    else:
        # if the pool exists just update it, otherwise create a new one

        mfa_config = 'OFF'  # MFA is currently unsupported by Lumberyard
        # Users are automatically prompted to verify these things.
        # At least one auto-verified thing (email or phone) is required to allow password recovery.
        auto_verified_attributes = ['email']

        client_app_data = {}
        lambda_config = props.LambdaConfig

        user_pool.validate_identity_metadata(stack_manager, stack_id,
                                             event['LogicalResourceId'],
                                             props.ClientApps)
        admin_create_user_config = __get_admin_create_user_config(
            props.AllowAdminCreateUserOnly)
        print(json.dumps(admin_create_user_config))

        if found_pool is not None:  # Update
            response = cognito_idp_client.update_user_pool(
                UserPoolId=pool_id,
                MfaConfiguration=mfa_config,
                AutoVerifiedAttributes=auto_verified_attributes,
                LambdaConfig=lambda_config,
                AdminCreateUserConfig=admin_create_user_config,
                UserPoolTags=tags)

            existing_client_apps = user_pool.get_client_apps(pool_id)
            client_app_data = update_client_apps(pool_id, props.ClientApps,
                                                 existing_client_apps, False,
                                                 props.ExplicitAuthFlows,
                                                 props.RefreshTokenValidity)

            response = cognito_idp_client.list_groups(UserPoolId=pool_id)

            found_groups = {}
            for actual_group in response['Groups']:
                group_name = actual_group['GroupName']
                for requested_group in props.Groups:
                    # does the group exist in the resource template
                    if group_name == requested_group.Name:
                        found_groups.update({group_name: True})
                        break

                # delete the group as it is no longer in the resource template
                if group_name not in found_groups:
                    cognito_idp_client.delete_group(
                        GroupName=actual_group['GroupName'],
                        UserPoolId=pool_id)

            print("Found groups=>{}".format(json.dumps(found_groups)))
            # iterate the groups defined in the user pool resource template
            for group in props.Groups:
                # update the group as it is currently a group in the user pool
                group_definition = __generate_group_definition(pool_id, group)
                print("Group '{}' is defined by {}".format(
                    group.Name, json.dumps(group_definition)))
                if group.Name in found_groups:
                    cognito_idp_client.update_group(**group_definition)
                else:
                    # group is a new group on the user pool
                    cognito_idp_client.create_group(**group_definition)

        else:  # Create
            response = cognito_idp_client.create_user_pool(
                PoolName=pool_name,
                MfaConfiguration=mfa_config,
                AutoVerifiedAttributes=auto_verified_attributes,
                LambdaConfig=lambda_config,
                AdminCreateUserConfig=admin_create_user_config,
                UserPoolTags=tags)
            pool_id = response['UserPool']['Id']
            print('User pool creation response: {}'.format(response))
            for group in props.Groups:
                group_definition = __generate_group_definition(pool_id, group)
                print("Group '{}' is defined by {}".format(
                    group.Name, json.dumps(group_definition)))
                cognito_idp_client.create_group(**group_definition)

            client_app_data = update_client_apps(pool_id, props.ClientApps, [],
                                                 False,
                                                 props.ExplicitAuthFlows,
                                                 props.RefreshTokenValidity)

        updated_resources = {
            stack_id: {
                event['LogicalResourceId']: {
                    'physical_id': pool_id,
                    'client_apps': {
                        client_app['ClientName']: {
                            'client_id': client_app['ClientId']
                        }
                        for client_app in client_app_data['Created'] +
                        client_app_data['Updated']
                    }
                }
            }
        }

        identity_pool.update_cognito_identity_providers(
            stack_manager, stack_id, pool_id, updated_resources)

        data = {
            'UserPoolName': pool_name,
            'UserPoolId': pool_id,
            'ClientApps': client_app_data,
        }

    physical_resource_id = pool_id

    return custom_resource_response.success_response(data,
                                                     physical_resource_id)
Exemplo n.º 22
0
def handler(event, context):
    dynamodb = aws_utils.ClientWrapper(boto3.client('dynamodb'))
    wait_for_account_tables()

    request_type = event['RequestType']
    table_name = get_table_name(event)

    if request_type == 'Create':
        try:
            if table_name in gather_tables(dynamodb):
                raise RuntimeError(
                    "Trying to create a Custom::DynamoDB::Table custom resource, but DynamoDB table already exists!"
                )
            try:
                response = create_table(table_name, event)
            except Exception as e:
                if isinstance(e,
                              ClientError) and e.response['Error']['Code'] in [
                                  'LimitExceededException'
                              ]:
                    wait_for_account_tables()
                    response = create_table(table_name, event)
                else:
                    raise e
            table_response = _TableResponse(response)
        except RuntimeError as e:
            return custom_resource_response.failure_response(e.message)
        tag_table(dynamodb, response, event)
    elif request_type == 'Update':
        try:
            if not table_name in gather_tables(dynamodb):
                try:
                    response = create_table(table_name, event)
                except Exception as e:
                    if isinstance(
                            e,
                            ClientError) and e.response['Error']['Code'] in [
                                'LimitExceededException'
                            ]:
                        wait_for_account_tables()
                        response = create_table(table_name, event)
                    else:
                        raise e
                table_response = _TableResponse(response)
                tag_table(dynamodb, response, event)
            else:
                try:
                    response = update_table(table_name, event)
                except Exception as e:
                    if isinstance(
                            e,
                            ClientError) and e.response['Error']['Code'] in [
                                'LimitExceededException'
                            ]:
                        wait_for_account_tables()
                        response = update_table(table_name, event)
                    else:
                        raise e
                table_response = _TableResponse(response)
        except RuntimeError as e:
            return custom_resource_response.failure_response(e.message)

    elif request_type == 'Delete':
        try:
            if table_name in gather_tables(dynamodb):
                try:
                    response = dynamodb.delete_table(TableName=table_name)
                except Exception as e:
                    if isinstance(
                            e,
                            ClientError) and e.response['Error']['Code'] in [
                                'LimitExceededException'
                            ]:
                        wait_for_account_tables()
                        response = dynamodb.delete_table(TableName=table_name)
                    else:
                        raise e
                table_response = _TableResponse(response)
            else:
                print "Custom::DynamoDB::Table is trying to delete a DynamoDB table that does not exist"
                table_response = _TableResponse(
                    {'TableDescription': {
                        'TableName': table_name
                    }})
        except RuntimeError as e:
            return custom_resource_response.failure_response(e.message)
    else:
        raise RuntimeError('Invalid RequestType: {}'.format(request_type))

    return custom_resource_response.success_response(table_response.output,
                                                     table_response.table_name)
Exemplo n.º 23
0
def handler(event, context):
    """Entry point for the Custom::ServiceApi resource handler."""
    stack_id = event['StackId']
    request_type = event['RequestType']
    logical_resource_id = event['LogicalResourceId']
    logical_role_name = logical_resource_id
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_id)

    rest_api_resource_name = stack.stack_name + '-' + logical_resource_id
    id_data = aws_utils.get_data_from_custom_physical_resource_id(
        event.get('PhysicalResourceId', None))

    response_data = {}
    project_tags = {
        constant.PROJECT_NAME_TAG: stack.project_stack.project_name,
        constant.STACK_ID_TAG: stack_id
    }

    if request_type == 'Create':

        props = properties.load(event, PROPERTY_SCHEMA)
        role_arn = role_utils.create_access_control_role(
            stack_manager, id_data, stack.stack_arn, logical_role_name,
            API_GATEWAY_SERVICE_NAME)
        swagger_content = get_configured_swagger_content(
            stack, props, role_arn, rest_api_resource_name)
        rest_api_id = create_api_gateway(rest_api_resource_name, props,
                                         swagger_content)
        service_url = get_service_url(rest_api_id, stack.region)

        register_service_interfaces(stack, service_url, swagger_content)
        update_api_gateway_tags(rest_api_id, event, project_tags)

        response_data['Url'] = service_url
        id_data['RestApiId'] = rest_api_id

    elif request_type == 'Update':

        rest_api_id = id_data.get('RestApiId', None)
        if not rest_api_id:
            raise RuntimeError(
                'No RestApiId found in id_data: {}'.format(id_data))

        props = properties.load(event, PROPERTY_SCHEMA)
        role_arn = role_utils.get_access_control_role_arn(
            id_data, logical_role_name)
        swagger_content = get_configured_swagger_content(
            stack, props, role_arn, rest_api_resource_name)
        update_api_gateway(rest_api_id, props, swagger_content)
        service_url = get_service_url(rest_api_id, stack.region)
        register_service_interfaces(stack, service_url, swagger_content)

        update_api_gateway_tags(rest_api_id, event, project_tags)

        response_data['Url'] = service_url

    elif request_type == 'Delete':

        if not id_data:

            # The will be no data in the id if Cloud Formation cancels a resource creation
            # (due to a failure in another resource) before it processes the resource create
            # response. Apparently Cloud Formation has an internal temporary id for the
            # resource and uses it for the delete request.
            #
            # Unfortunately there isn't a good way to deal with this case. We don't have the
            # id data, so we can't clean up the things it identifies. At best we can allow the
            # stack cleanup to continue, leaving the rest API behind and role behind.

            print('WARNING: No id_data provided on delete.')

        else:

            rest_api_id = id_data.get('RestApiId', None)
            if not rest_api_id:
                raise RuntimeError(
                    'No RestApiId found in id_data: {}'.format(id_data))

            delete_api_gateway(rest_api_id)
            service_url = get_service_url(rest_api_id, stack.region)
            unregister_service_interfaces(stack, service_url)

            del id_data['RestApiId']

            role_utils.delete_access_control_role(id_data, logical_role_name)

    else:

        raise RuntimeError('Invalid RequestType: {}'.format(request_type))

    physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data(
        event['StackId'], logical_resource_id, id_data)

    return custom_resource_response.success_response(response_data,
                                                     physical_resource_id)
Exemplo n.º 24
0
def handler(event, context):
    return custom_resource_response.success_response({}, "*")
Exemplo n.º 25
0
def empty_handler(event, context):
    return custom_resource_response.success_response({}, '*')
Exemplo n.º 26
0
def launch(event, lambdacontext):
    util.debug_print("Start Amoeba Launcher")
    context = dict({})
    context[c.KEY_START_TIME] = time.time()
    context[c.KEY_LAMBDA_FUNCTION] = lambdacontext.function_name if hasattr(
        lambdacontext, 'function_name') else None
    context[c.KEY_REQUEST_ID] = lambdacontext.aws_request_id if hasattr(
        lambdacontext, 'aws_request_id') else None
    prefix = util.get_stack_name_from_arn(
        os.environ[c.ENV_DEPLOYMENT_STACK_ARN])
    prefix = "{0}{1}".format(prefix, c.KEY_SQS_AMOEBA_SUFFIX)
    db = DynamoDb(context)
    sqs = Sqs(context, prefix, "sqs")
    sqs.set_queue_url(lowest_load_queue=False)

    if sqs.is_all_under_load:
        sqs.add_fifo_queue(prefix)

    elapsed = util.elapsed(context)
    timeout = context[c.KEY_MAX_LAMBDA_TIME] * c.RATIO_OF_MAX_LAMBDA_TIME
    map = {}
    queues_checked = 0
    number_of_queues = sqs.number_of_queues
    sqs_delete_tokens = {}
    while elapsed < timeout and queues_checked < number_of_queues:
        messages = sqs.read_queue()
        length = len(messages)
        if sqs.queue_url not in sqs_delete_tokens:
            sqs_delete_tokens[sqs.queue_url] = []

        if length > 0:
            for x in range(0, length):
                message = messages[x]
                body = json.loads(message["Body"])
                paths = body["paths"]
                msg_token = "{}{}{}".format(message['MessageId'],
                                            context[c.KEY_SEPERATOR_CSV],
                                            message['ReceiptHandle'])
                sqs_delete_tokens[sqs.queue_url].append(msg_token)
                for i in range(0, len(paths)):
                    path = paths[i]
                    parts = path.split(context[c.KEY_SEPERATOR_PARTITION])
                    filename = parts.pop()
                    directory = context[c.KEY_SEPERATOR_PARTITION].join(parts)
                    if directory not in map:
                        map[directory] = {"paths": [], "size": 0}
                    #lambda payload limit for Event invocation type  131072
                    sizeof = len(path) + map[directory]["size"]
                    is_invoked = map[directory].get("invoked", False)
                    if sizeof >= c.MAXIMUM_ASYNC_PAYLOAD_SIZE and not is_invoked:
                        invoke_lambda(context, directory,
                                      map[directory]["paths"])
                        map[directory] = {
                            "paths": [],
                            "size": 0,
                            "invoked": True
                        }
                    else:
                        map[directory]["paths"].append(path)
                        map[directory]["size"] = sizeof

        else:
            queues_checked += 1
            sqs.set_queue_url(lowest_load_queue=False)

        elapsed = util.elapsed(context)

    #Invoke a amoeba generator for each S3 leaf node
    for directory, settings in iteritems(map):
        is_invoked = settings.get("invoked", False)
        #Amoeba's are not designed to have multiple amoebas working against one directory
        #If the Amoeba has already been invoked due to payload size then we requeue the remaining paths
        if is_invoked:
            sqs.send_generic_message(json.dumps({"paths": settings["paths"]}))
        else:
            invoke_lambda(context, directory, settings["paths"])

    context[c.KEY_THREAD_POOL] = ThreadPool(context, 8)
    #Delete SQS messages that have been processed
    for key, value in iteritems(sqs_delete_tokens):
        sqs.delete_message_batch(value, key)

    return custom_resource_response.success_response({"StatusCode": 200}, "*")
def handler(event, context):

    props = properties.load(
        event,
        {
            'ConfigurationBucket':
            properties.String(),
            'ConfigurationKey':
            properties.String(
            ),  ##this is only here to force the resource handler to execute on each update to the deployment
            'IdentityPoolName':
            properties.String(),
            'UseAuthSettingsObject':
            properties.String(),
            'AllowUnauthenticatedIdentities':
            properties.String(),
            'DeveloperProviderName':
            properties.String(default=''),
            'Roles':
            properties.Object(default={}, schema={'*': properties.String()}),
            'RoleMappings':
            properties.Object(
                default={},
                schema={
                    'Cognito':
                    properties.Object(
                        default={},
                        schema={
                            'Type': properties.String(''),
                            'AmbiguousRoleResolution': properties.String('')
                        })
                })
        })

    #give the identity pool a unique name per stack
    stack_manager = stack_info.StackInfoManager()
    stack_name = aws_utils.get_stack_name_from_stack_arn(event['StackId'])
    identity_pool_name = stack_name + props.IdentityPoolName
    identity_pool_name = identity_pool_name.replace('-', ' ')
    identity_client = identity_pool.get_identity_client()
    identity_pool_id = custom_resource_utils.get_embedded_physical_id(
        event.get('PhysicalResourceId'))
    found_pool = identity_pool.get_identity_pool(identity_pool_id)

    request_type = event['RequestType']
    if request_type == 'Delete':
        if found_pool != None:
            identity_client.delete_identity_pool(
                IdentityPoolId=identity_pool_id)
        data = {}

    else:
        use_auth_settings_object = props.UseAuthSettingsObject.lower(
        ) == 'true'
        supported_login_providers = {}

        if use_auth_settings_object == True:
            #download the auth settings from s3
            player_access_key = 'player-access/' + constant.AUTH_SETTINGS_FILENAME
            auth_doc = json.loads(
                _load_doc_from_s3(props.ConfigurationBucket,
                                  player_access_key))

            #if the doc has entries add them to the supported_login_providers dictionary
            if len(auth_doc) > 0:
                for key, value in auth_doc.iteritems():
                    supported_login_providers[
                        value['provider_uri']] = value['app_id']

        cognito_identity_providers = identity_pool.get_cognito_identity_providers(
            stack_manager, event['StackId'], event['LogicalResourceId'])

        print 'Identity Providers: ', cognito_identity_providers
        allow_anonymous = props.AllowUnauthenticatedIdentities.lower(
        ) == 'true'
        #if the pool exists just update it, otherwise create a new one

        args = {
            'IdentityPoolName': identity_pool_name,
            'AllowUnauthenticatedIdentities': allow_anonymous,
            'SupportedLoginProviders': supported_login_providers,
            'CognitoIdentityProviders': cognito_identity_providers
        }

        if props.DeveloperProviderName:
            args['DeveloperProviderName'] = props.DeveloperProviderName

        if found_pool != None:
            identity_client.update_identity_pool(
                IdentityPoolId=identity_pool_id, **args)
        else:
            response = identity_client.create_identity_pool(**args)
            identity_pool_id = response['IdentityPoolId']

        #update the roles for the pool
        role_mappings = {}
        if props.RoleMappings.Cognito.Type and len(
                cognito_identity_providers) > 0:
            print 'Adding role mappings for cognito', props.RoleMappings.Cognito.__dict__
            role_mappings['{}:{}'.format(
                cognito_identity_providers[0]['ProviderName'],
                cognito_identity_providers[0]
                ['ClientId'])] = props.RoleMappings.Cognito.__dict__

        print "Role Mappings: ", role_mappings
        identity_client.set_identity_pool_roles(
            IdentityPoolId=identity_pool_id,
            Roles=props.Roles.__dict__,
            RoleMappings=role_mappings)

        data = {
            'IdentityPoolName': identity_pool_name,
            'IdentityPoolId': identity_pool_id
        }

    physical_resource_id = identity_pool_id

    return custom_resource_response.success_response(data,
                                                     physical_resource_id)
Exemplo n.º 28
0
def handler(event, context):
    request_type = event['RequestType']
    logical_resource_id = event['LogicalResourceId']
    properties = event['ResourceProperties']
    physical_resource_id = aws_utils.get_stack_name_from_stack_arn(
        event['StackId']) + "-" + logical_resource_id
    domain_name = physical_resource_id + "-domain"
    workflow_type_name = physical_resource_id + "-workflow-type"

    if request_type == "Create":
        swf_client.register_domain(name=domain_name,
                                   workflowExecutionRetentionPeriodInDays="15")

        swf_client.register_workflow_type(
            domain=domain_name,
            name=workflow_type_name,
            version="1.0",
            defaultTaskStartToCloseTimeout=properties[
                'TaskStartToCloseTimeout'],
            defaultExecutionStartToCloseTimeout=properties[
                'ExecutionStartToCloseTimeout'],
            defaultTaskList=properties['TaskList'],
            defaultChildPolicy=properties['ChildPolicy'])

        for activity_type in properties['ActivityTypes']:
            swf_client.register_activity_type(domain=domain_name,
                                              name=activity_type,
                                              version="1.0")

    elif request_type == "Update":
        existing_types = set()
        params = {'domain': domain_name, 'registrationStatus': "REGISTERED"}

        while True:
            response = swf_client.list_activity_types(**params)
            existing_types.update(
                set([
                    item['activityType']['name']
                    for item in response['typeInfos']
                ]))

            if 'nextPageToken' in response:
                params['nextPageToken'] = response['nextPageToken']
            else:
                break

        for activity_type in properties['ActivityTypes']:
            if activity_type in existing_types:
                existing_types.discard(activity_type)
            else:
                swf_client.register_activity_type(domain=domain_name,
                                                  name=activity_type,
                                                  version="1.0")

        for activity_type in existing_types:
            swf_client.deprecate_activity_type(domain=domain_name,
                                               activityType={
                                                   'name': activity_type,
                                                   'version': "1.0"
                                               })

    elif request_type == "Delete":
        swf_client.deprecate_domain(name=domain_name)

    outputs = {
        'DomainName': domain_name,
        'WorkflowTypeName': workflow_type_name,
        'ActivityTypes': properties['ActivityTypes'],
        'TaskList': properties['TaskList']['name']
    }

    return custom_resource_response.success_response(outputs,
                                                     physical_resource_id)
Exemplo n.º 29
0
def get_crawler_status(request, name):
    glue_crawler = Glue()    
    response = glue_crawler.get_crawler(name.replace('-', '_'))    
    return custom_resource_response.success_response({ "State": response['Crawler']['State']}, "*")
Exemplo n.º 30
0
def run_crawler(request):
    context=dict({})
    lb = Lambda(context)
    response = lb.invoke(os.environ[c.ENV_GLUE_CRAWLER_LAUNCHER])    
    return custom_resource_response.success_response({}, "*")