Example #1
0
    def test_detects_property_not_in_schema(self):

        event = {'ResourceProperties': {'Test': 'Value'}}

        schema = {}

        with self.assertRaises(properties.ValidationError) as e:
            properties.load(event, schema)

        self.assertIn('Test', e.exception.message)
    def test_add_service_setting(self, mock_add_permitted_arns):

        interface = {
            'InterfaceId': 'gem_test-interface_1_0_0',
            'InterfaceUrl': 'test-interface-url',
            'InterfaceSwagger': 'test-interface-swagger'
        }

        stack = mock.MagicMock(name='stack')
        service_directory = mock.MagicMock(name='ServiceDirectory')
        service_directory.get_interface_services.return_value = [interface]

        props = properties.load(
            self.event, LambdaConfigurationResourceHandler.PROPERTIES_SCHEMA)
        services = props.Services
        service = services[0]

        service_settings = {}

        permitted_arns = []

        LambdaConfigurationResourceHandler._add_service_settings(
            stack, service_directory, service, service_settings,
            permitted_arns)

        service_directory.get_interface_services.assert_not_called()

        # If the interface is not from CloudGemFramework it should not be added here
        # Non-project interface references live in the lambda environment variables
        self.assertEquals(service_settings, {})
def handler(event, context):
    if event['RequestType'] == 'Delete':
        return custom_resource_response.success_response({'Arn': ''}, '')
    props = properties.load(event, {
            'ConfigurationBucket': properties.String(),
            'ConfigurationKey': properties.String(),
            'LogicalPoolName': properties.String(),
            'RoleType': properties.String(default=""),
            'Path': properties.String(),
            'AssumeRolePolicyDocument': properties.Dictionary()
        })

    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(event['StackId'])

    identity_client = identity_pool.get_identity_client()

    cognito_pool_info = aws_utils.get_cognito_pool_from_file(props.ConfigurationBucket,
        props.ConfigurationKey,
        props.LogicalPoolName,
        stack)

    arn = ''
    if cognito_pool_info:
        response = identity_client.get_identity_pool_roles(IdentityPoolId=cognito_pool_info['PhysicalResourceId'])
        arn = response.get("Roles", {}).get(props.RoleType, "")
    else:
        name = "{}{}Role".format(stack.stack_name, event['LogicalResourceId'])
        arn=create_role(name, props)

    return custom_resource_response.success_response({'Arn': arn}, arn)
Example #4
0
def handler(event, context):
    """Entry point for the Custom::ResourceGroupConfiguration resource handler."""

    props = properties.load(
        event, {
            'ConfigurationBucket': properties.String(),
            'ConfigurationKey': properties.String(),
            'ResourceGroupName': properties.String()
        })

    data = {
        'ConfigurationBucket':
        props.ConfigurationBucket,
        'ConfigurationKey':
        '{}/resource-group/{}'.format(props.ConfigurationKey,
                                      props.ResourceGroupName),
        'TemplateURL':
        'https://s3.amazonaws.com/{}/{}/resource-group/{}/{}'.format(
            props.ConfigurationBucket, props.ConfigurationKey,
            props.ResourceGroupName, constant.RESOURCE_GROUP_TEMPLATE_FILENAME)
    }

    physical_resource_id = 'CloudCanvas:LambdaConfiguration:{stack_name}:{resource_group_name}'.format(
        stack_name=aws_utils.get_stack_name_from_stack_arn(event['StackId']),
        resource_group_name=props.ResourceGroupName)

    return custom_resource_response.success_response(data,
                                                     physical_resource_id)
    def test_add_service_setting(self, mock_add_permitted_arns):

        interface = {
            'InterfaceId': 'gem_test-interface_1_0_0',
            'InterfaceUrl': 'test-interface-url',
            'InterfaceSwagger': 'test-interface-swagger'
        }

        stack = mock.MagicMock(name='stack')
        service_directory = mock.MagicMock(name='ServiceDirectory')
        service_directory.get_interface_services.return_value = [interface]

        props = properties.load(
            self.event, LambdaConfigurationResourceHandler.PROPERTIES_SCHEMA)
        services = props.Services
        service = services[0]

        service_settings = {}

        permitted_arns = []

        LambdaConfigurationResourceHandler._add_service_settings(
            stack, service_directory, service, service_settings,
            permitted_arns)

        service_directory.get_interface_services.assert_called_once_with(
            stack.deployment.deployment_name, service.InterfaceId)

        self.assertEquals(service_settings[service.InterfaceId], interface)

        mock_add_permitted_arns.assert_called_once_with(
            stack, interface, permitted_arns)
    def test_add_cgf_service_setting(self, mock_add_permitted_arns):
        interface = {
            'InterfaceId': 'CloudGemFramework_test_1_0_0',
            'InterfaceUrl': 'test-interface-url',
            'InterfaceSwagger': 'test-interface-swagger'
        }

        event = {
            'ResourceProperties': {
                'ConfigurationBucket':
                'TestBucket',
                'ConfigurationKey':
                'TestInputKey',
                'FunctionName':
                'TestFunction',
                'Runtime':
                'TestRuntime',
                'Settings': {
                    'TestSettingKey1': 'TestSettingValue1',
                    'TestSettingKey2': 'TestSettingValue2'
                },
                'Services': [{
                    "InterfaceId": "CloudGemFramework_test_1_0_0",
                    "Optional": True
                }, {
                    "InterfaceId": "Gem_TestInterface2_1_0_0",
                    "Optional": True
                }]
            },
            'StackId':
            'arn:aws:cloudformation:TestRegion:TestAccount:stack/TestStack/TestUUID',
            'LogicalResourceId': 'TestLogicalResourceId'
        }

        stack = mock.MagicMock(name='stack')
        service_directory = mock.MagicMock(name='ServiceDirectory')
        service_directory.get_interface_services.return_value = [interface]

        props = properties.load(
            event, LambdaConfigurationResourceHandler.PROPERTIES_SCHEMA)
        services = props.Services
        service = services[0]

        service_settings = {}

        permitted_arns = []

        LambdaConfigurationResourceHandler._add_service_settings(
            stack, service_directory, service, service_settings,
            permitted_arns)

        # For CloudGemFramework_* interfaces this should be called and service_settings should be populated
        service_directory.get_interface_services.assert_called_once()
        self.assertEquals(service_settings[service.InterfaceId], interface)
Example #7
0
def handler(event, context):

    props = properties.load(event, {
        'Input': properties.Dictionary(),
    })

    stack_name = aws_utils.get_stack_name_from_stack_arn(event['StackId'])
    physical_resource_id = stack_name + '-' + event['LogicalResourceId']

    output = _process_dict(props.Input)

    return custom_resource_response.success_response(output,
                                                     physical_resource_id)
    def test_add_services_setting_with_no_services(self):

        del self.event['ResourceProperties']['Services']

        props = properties.load(
            self.event, LambdaConfigurationResourceHandler.PROPERTIES_SCHEMA)
        settings = props.Settings.__dict__
        services = props.Services
        stack = mock.MagicMock('stack')

        actual_permittd_arns = LambdaConfigurationResourceHandler._add_services_settings(
            settings, services, stack)

        self.assertEquals(actual_permittd_arns, [])
Example #9
0
    def test_calls_validator_when_no_provided_value(self):

        event = {'ResourceProperties': {}}

        def handler(name, value):
            self.assertEqual(name, 'Test')
            self.assertEqual(value, None)
            return 'Result'

        schema = {'Test': handler}

        props = properties.load(event, schema)

        self.assertEqual('Result', props.Test)
Example #10
0
def handler(event, context):
    
    # This resource does nothing. It exists so that deployment stacks can be created
    # before any resource groups have been defined. In such cases the Resources list 
    # would be empty, which CloudFormation doesn't allow, so the lmbr_aws client inserts
    # this resource into the deployment template when no other resources are defined
    # by the template.

    props = properties.load(event, {})

    data = {}

    physical_resource_id = 'CloudCanvas:EmptyDeployment:{}'.format(aws_utils.get_stack_name_from_stack_arn(event['StackId']))

    return custom_resource_response.success_response(data, physical_resource_id)
Example #11
0
def handler(event, context):
    '''Entry point for the Custom::AccessControl resource handler.'''
    
    props = properties.load(event, {
        'ConfigurationBucket': properties.String(), # Currently not used
        'ConfigurationKey': properties.String()})   # Depend on unique upload id in key to force Cloud Formation to call handler

    # Validate RequestType
    request_type = event['RequestType']
    if request_type not in ['Create', 'Update', 'Delete']:
        raise RuntimeError('Unexpected request type: {}'.format(request_type))

    # Get stack_info for the AccessControl resource's stack.
    stack_arn = event['StackId']
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_arn)

    # Physical ID is always the same.
    physical_resource_id = aws_utils.get_stack_name_from_stack_arn(stack_arn) + '-' + event['LogicalResourceId']

    # The AccessControl resource has no output values. 
    data = {}

    # Accumlate problems encountered so we can give a full report.
    problems = ProblemList()

    # Apply access control as determined by the Cloud Canvas stack type.
    if stack.stack_type == stack.STACK_TYPE_RESOURCE_GROUP:
        were_changes = _apply_resource_group_access_control(request_type, stack, problems)
    elif stack.stack_type == stack.STACK_TYPE_DEPLOYMENT_ACCESS:
        were_changes = _apply_deployment_access_control(request_type, stack, problems)
    elif stack.stack_type == stack.STACK_TYPE_PROJECT:
        were_changes = _apply_project_access_control(request_type, stack, problems)
    else:
        raise RuntimeError('The Custom::AccessControl resource can only be used in resource group, deployment access, or project stack templates.')

    # If there were any problems, provide an error message with all the details.
    if problems:
        raise RuntimeError('Found invalid AccessControl metadata:\n    {}'.format(problems))

    # If there were changes, wait a few seconds for them to propagate
    if were_changes:
        print 'Delaying {} seconds for change propagation'.format(PROPAGATION_DELAY_SECONDS)
        time.sleep(PROPAGATION_DELAY_SECONDS)
    
    # Successful execution.
    return custom_resource_response.success_response(data, physical_resource_id)
def handler(event, context):
    """Entry point for the Custom::CognitoIdPoolSharedRole resource handler."""
    stack_id = event['StackId']

    if event['RequestType'] == 'Delete':
        return custom_resource_response.success_response({'Arn': ''}, '')

    props = properties.load(
        event, {
            'ConfigurationBucket': properties.String(),
            'ConfigurationKey': properties.String(),
            'LogicalPoolName': properties.String(),
            'RoleType': properties.String(default=""),
            'Path': properties.String(),
            'AssumeRolePolicyDocument': properties.Dictionary()
        })

    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_id)

    identity_client = identity_pool.get_identity_client()

    cognito_pool_info = aws_utils.get_cognito_pool_from_file(
        props.ConfigurationBucket, props.ConfigurationKey,
        props.LogicalPoolName, stack)

    arn = ''
    if cognito_pool_info:
        response = identity_client.get_identity_pool_roles(
            IdentityPoolId=cognito_pool_info['PhysicalResourceId'])
        arn = response.get("Roles", {}).get(props.RoleType, "")
    else:
        # Set up resource tags for all resources created
        tags = [{
            "Key": constant.PROJECT_NAME_TAG,
            "Value": stack.project_stack.project_name
        }, {
            "Key": constant.STACK_ID_TAG,
            "Value": stack_id
        }]

        name = "{}{}Role".format(stack.stack_name, event['LogicalResourceId'])
        arn = _create_role(name, props, tags)

    return custom_resource_response.success_response({'Arn': arn}, arn)
Example #13
0
def handler(event, context):
    
    props = properties.load(event, {
        'ConfigurationBucket': properties.String(),
        'ConfigurationKey': properties.String(),
        'ResourceGroupName': properties.String()})

    data = {
        'ConfigurationBucket': props.ConfigurationBucket,
        'ConfigurationKey': '{}/resource-group/{}'.format(props.ConfigurationKey, props.ResourceGroupName),
        'TemplateURL': 'https://s3.amazonaws.com/{}/{}/resource-group/{}/resource-template.json'.format(props.ConfigurationBucket, props.ConfigurationKey, props.ResourceGroupName)
    }

    physical_resource_id = 'CloudCanvas:LambdaConfiguration:{stack_name}:{resource_group_name}'.format(
        stack_name=aws_utils.get_stack_name_from_stack_arn(event['StackId']),
        resource_group_name=props.ResourceGroupName)

    response.succeed(event, context, data, physical_resource_id)
Example #14
0
    def test_calls_validator_with_provided_value_when_wildcard(self):

        event = {'ResourceProperties': {'Test1': 'Value1', 'Test2': 'Value2'}}

        def handler(name, value):
            if name == 'Test1':
                self.assertEqual(value, 'Value1')
                return 'Result1'
            if name == 'Test2':
                self.assertEqual(value, 'Value2')
                return 'Result2'
            self.assertTrue(False)

        schema = {'*': handler}

        props = properties.load(event, schema)

        self.assertEqual('Result1', props.Test1)
        self.assertEqual('Result2', props.Test2)
Example #15
0
    def test_calls_correct_validator_with_provided_value_when_wildcard(self):

        event = {'ResourceProperties': {'Test1': 'Value1', 'Test2': 'Value2'}}

        def handler1(name, value):
            self.assertEqual(name, 'Test1')
            self.assertEqual(value, 'Value1')
            return 'Result1'

        def handler2(name, value):
            self.assertEqual(name, 'Test2')
            self.assertEqual(value, 'Value2')
            return 'Result2'

        schema = {'*': handler1, 'Test2': handler2}

        props = properties.load(event, schema)

        self.assertEqual('Result1', props.Test1)
        self.assertEqual('Result2', props.Test2)
Example #16
0
def handler(event, context):
    stack_arn = event['StackId']
    stack = stack_info.StackInfoManager().get_stack_info(stack_arn)
    props = properties.load(event, {'ReferenceName': properties.String()})

    request_type = event['RequestType']
    if request_type not in ['Create', 'Update', 'Delete']:
        raise RuntimeError('Unexpected request type: {}'.format(request_type))

    data = {}
    if request_type != 'Delete':
        data = {
            'PhysicalId': _get_reference_physical_id(stack,
                                                     props.ReferenceName)
        }

    physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data(
        stack_arn, event['LogicalResourceId'],
        {'ReferenceName': props.ReferenceName})

    return custom_resource_response.success_response(data,
                                                     physical_resource_id)
Example #17
0
def load_reference_metadata_properties(event):
    # Strip the fields other than 'ReferenceMetadata' from event['ResourceProperties']
    reference_metadata = event.get('ResourceProperties',
                                   {}).get('ReferenceMetadata', {})
    event['ResourceProperties'] = {'ReferenceMetadata': reference_metadata}

    return properties.load(
        event, {
            'ReferenceMetadata':
            properties.Object(
                schema={
                    'Arn':
                    properties.String(),
                    'PhysicalId':
                    properties.String(),
                    'Permissions':
                    properties.Object(
                        schema={
                            'Action': properties.StringOrListOfString(),
                            'ResourceSuffix':
                            properties.StringOrListOfString()
                        })
                })
        })
    def test_add_services_settings(self, mock_ServiceDirectory,
                                   mock_add_service_settings):

        props = properties.load(
            self.event, LambdaConfigurationResourceHandler.PROPERTIES_SCHEMA)

        settings = props.Settings.__dict__
        services = props.Services

        stack = mock.MagicMock(name='stack')

        actual_permitted_arns = LambdaConfigurationResourceHandler._add_services_settings(
            stack, settings, services)

        self.assertEquals(actual_permitted_arns, [])

        mock_ServiceDirectory.assert_called_once_with(
            stack.project.configuration_bucket)

        mock_add_service_settings.assert_has_calls([
            mock.call(stack, mock_ServiceDirectory.return_value, service,
                      settings['Services'], actual_permitted_arns)
            for service in services
        ])
def handler(event, context):
    props = properties.load(event, PROPERTIES_SCHEMA)

    request_type = event['RequestType']
    stack_arn = event['StackId']
    logical_role_name = props.FunctionName
    stack_manager = stack_info.StackInfoManager()

    id_data = aws_utils.get_data_from_custom_physical_resource_id(
        event.get('PhysicalResourceId', None))

    if request_type == 'Delete':
        role_utils.delete_access_control_role(id_data, logical_role_name)

        response_data = {}

    else:
        stack = stack_manager.get_stack_info(stack_arn)

        if request_type == 'Create':

            project_service_lambda_arn = _get_project_service_lambda_arn(stack)
            assume_role_service = 'lambda.amazonaws.com'
            role_arn = role_utils.create_access_control_role(
                stack_manager,
                id_data,
                stack_arn,
                logical_role_name,
                assume_role_service,
                default_policy=get_default_policy(project_service_lambda_arn))

        elif request_type == 'Update':

            role_arn = role_utils.get_access_control_role_arn(
                id_data, logical_role_name)

        else:
            raise RuntimeError(
                'Unexpected request type: {}'.format(request_type))

        _add_built_in_settings(props.Settings.__dict__, stack)
        # Check if we have a folder just for this function, if not use the default
        output_key = input_key = _get_input_key(props)
        if not props.IgnoreAppendingSettingsToZip:
            output_key = _inject_settings(props.Settings.__dict__,
                                          props.Runtime,
                                          props.ConfigurationBucket, input_key,
                                          props.FunctionName)

        cc_settings = copy.deepcopy(props.Settings.__dict__)
        # Remove "Services" from settings because they get injected into the python code package during _inject_settings
        # TODO: move handling of project-level service interfaces to the same code as cross-gem interfaces
        if "Services" in cc_settings:
            del cc_settings["Services"]

        response_data = {
            'ConfigurationBucket':
            props.ConfigurationBucket,
            'ConfigurationKey':
            output_key,
            'Runtime':
            props.Runtime,
            'Role':
            role_arn,
            'RoleName':
            role_utils.get_access_control_role_name(stack_arn,
                                                    logical_role_name),
            'ComposedLambdaConfiguration': {
                'Code': {
                    'S3Bucket': props.ConfigurationBucket,
                    'S3Key': output_key
                },
                "Environment": {
                    "Variables": cc_settings
                },
                'Role': role_arn,
                'Runtime': props.Runtime
            },
            "CCSettings":
            cc_settings
        }

    physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data(
        stack_arn, event['LogicalResourceId'], id_data)
    custom_resource_response.succeed(event, context, response_data,
                                     physical_resource_id)
Example #20
0
def handler(event, context):
    """Entry point for the Custom::ServiceApi resource handler."""
    stack_id = event['StackId']
    request_type = event['RequestType']
    logical_resource_id = event['LogicalResourceId']
    logical_role_name = logical_resource_id
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_id)

    rest_api_resource_name = stack.stack_name + '-' + logical_resource_id
    id_data = aws_utils.get_data_from_custom_physical_resource_id(
        event.get('PhysicalResourceId', None))

    response_data = {}
    project_tags = {
        constant.PROJECT_NAME_TAG: stack.project_stack.project_name,
        constant.STACK_ID_TAG: stack_id
    }

    if request_type == 'Create':

        props = properties.load(event, PROPERTY_SCHEMA)
        role_arn = role_utils.create_access_control_role(
            stack_manager, id_data, stack.stack_arn, logical_role_name,
            API_GATEWAY_SERVICE_NAME)
        swagger_content = get_configured_swagger_content(
            stack, props, role_arn, rest_api_resource_name)
        rest_api_id = create_api_gateway(rest_api_resource_name, props,
                                         swagger_content)
        service_url = get_service_url(rest_api_id, stack.region)

        register_service_interfaces(stack, service_url, swagger_content)
        update_api_gateway_tags(rest_api_id, event, project_tags)

        response_data['Url'] = service_url
        id_data['RestApiId'] = rest_api_id

    elif request_type == 'Update':

        rest_api_id = id_data.get('RestApiId', None)
        if not rest_api_id:
            raise RuntimeError(
                'No RestApiId found in id_data: {}'.format(id_data))

        props = properties.load(event, PROPERTY_SCHEMA)
        role_arn = role_utils.get_access_control_role_arn(
            id_data, logical_role_name)
        swagger_content = get_configured_swagger_content(
            stack, props, role_arn, rest_api_resource_name)
        update_api_gateway(rest_api_id, props, swagger_content)
        service_url = get_service_url(rest_api_id, stack.region)
        register_service_interfaces(stack, service_url, swagger_content)

        update_api_gateway_tags(rest_api_id, event, project_tags)

        response_data['Url'] = service_url

    elif request_type == 'Delete':

        if not id_data:

            # The will be no data in the id if Cloud Formation cancels a resource creation
            # (due to a failure in another resource) before it processes the resource create
            # response. Apparently Cloud Formation has an internal temporary id for the
            # resource and uses it for the delete request.
            #
            # Unfortunately there isn't a good way to deal with this case. We don't have the
            # id data, so we can't clean up the things it identifies. At best we can allow the
            # stack cleanup to continue, leaving the rest API behind and role behind.

            print('WARNING: No id_data provided on delete.')

        else:

            rest_api_id = id_data.get('RestApiId', None)
            if not rest_api_id:
                raise RuntimeError(
                    'No RestApiId found in id_data: {}'.format(id_data))

            delete_api_gateway(rest_api_id)
            service_url = get_service_url(rest_api_id, stack.region)
            unregister_service_interfaces(stack, service_url)

            del id_data['RestApiId']

            role_utils.delete_access_control_role(id_data, logical_role_name)

    else:

        raise RuntimeError('Invalid RequestType: {}'.format(request_type))

    physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data(
        event['StackId'], logical_resource_id, id_data)

    return custom_resource_response.success_response(response_data,
                                                     physical_resource_id)
Example #21
0
def handler(event, context):
    """Entry point for the Custom::CognitoIdentityPool resource handler."""
    stack_id = event['StackId']

    props = properties.load(
        event,
        {
            'ConfigurationBucket':
            properties.String(),
            'ConfigurationKey':
            properties.String(
            ),  # this is only here to force the resource handler to execute on each update to the deployment
            'IdentityPoolName':
            properties.String(),
            'UseAuthSettingsObject':
            properties.String(),
            'AllowUnauthenticatedIdentities':
            properties.String(),
            'DeveloperProviderName':
            properties.String(default=''),
            'ShareMode':
            properties.String(
                default=''
            ),  # SHARED when the pool from the file should be used
            'Roles':
            properties.Object(default={}, schema={'*': properties.String()}),
            'RoleMappings':
            properties.Object(
                default={},
                schema={
                    'Cognito':
                    properties.Object(
                        default={},
                        schema={
                            'Type': properties.String(''),
                            'AmbiguousRoleResolution': properties.String('')
                        })
                })
        })

    # give the identity pool a unique name per stack
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_id)

    # Set up resource tags for all resources created
    tags = {
        constant.PROJECT_NAME_TAG: stack.project_stack.project_name,
        constant.STACK_ID_TAG: stack_id
    }

    shared_pool = aws_utils.get_cognito_pool_from_file(
        props.ConfigurationBucket, props.ConfigurationKey,
        event['LogicalResourceId'], stack)

    identity_pool_name = stack.stack_name + props.IdentityPoolName
    identity_pool_name = identity_pool_name.replace('-', ' ')
    identity_client = identity_pool.get_identity_client()
    identity_pool_id = custom_resource_utils.get_embedded_physical_id(
        event.get('PhysicalResourceId'))
    found_pool = identity_pool.get_identity_pool(identity_pool_id)

    request_type = event['RequestType']
    if shared_pool and props.ShareMode == 'SHARED':
        data = {
            'IdentityPoolName': identity_pool_name,
            'IdentityPoolId': shared_pool['PhysicalResourceId']
        }
        return custom_resource_response.success_response(
            data, shared_pool['PhysicalResourceId'])

    if request_type == 'Delete':
        if found_pool is not None:
            identity_client.delete_identity_pool(
                IdentityPoolId=identity_pool_id)
        data = {}

    else:
        use_auth_settings_object = props.UseAuthSettingsObject.lower(
        ) == 'true'
        supported_login_providers = {}

        if use_auth_settings_object:
            # download the auth settings from s3
            player_access_key = 'player-access/' + constant.AUTH_SETTINGS_FILENAME
            auth_doc = json.loads(
                _load_doc_from_s3(props.ConfigurationBucket,
                                  player_access_key))

            # if the doc has entries add them to the supported_login_providers dictionary
            if len(auth_doc) > 0:
                for key, value in six.iteritems(auth_doc):
                    supported_login_providers[
                        value['provider_uri']] = value['app_id']

        cognito_identity_providers = identity_pool.get_cognito_identity_providers(
            stack_manager, stack_id, event['LogicalResourceId'])

        print('Identity Providers: {}'.format(cognito_identity_providers))
        allow_anonymous = props.AllowUnauthenticatedIdentities.lower(
        ) == 'true'
        # if the pool exists just update it, otherwise create a new one

        args = {
            'IdentityPoolName': identity_pool_name,
            'AllowUnauthenticatedIdentities': allow_anonymous,
            'SupportedLoginProviders': supported_login_providers,
            'CognitoIdentityProviders': cognito_identity_providers,
            'IdentityPoolTags': tags
        }

        if props.DeveloperProviderName:
            args['DeveloperProviderName'] = props.DeveloperProviderName

        if found_pool is not None:
            identity_client.update_identity_pool(
                IdentityPoolId=identity_pool_id, **args)
        else:
            response = identity_client.create_identity_pool(**args)
            identity_pool_id = response['IdentityPoolId']

        # update the roles for the pool
        role_mappings = {}
        if props.RoleMappings.Cognito.Type and len(
                cognito_identity_providers) > 0:
            print('Adding role mappings for Cognito {}'.format(
                props.RoleMappings.Cognito.__dict__))
            role_mappings['{}:{}'.format(
                cognito_identity_providers[0]['ProviderName'],
                cognito_identity_providers[0]
                ['ClientId'])] = props.RoleMappings.Cognito.__dict__

        print("Role Mappings: {}".format(role_mappings))
        identity_client.set_identity_pool_roles(
            IdentityPoolId=identity_pool_id,
            Roles=props.Roles.__dict__,
            RoleMappings=role_mappings)

        data = {
            'IdentityPoolName': identity_pool_name,
            'IdentityPoolId': identity_pool_id
        }

    physical_resource_id = identity_pool_id

    return custom_resource_response.success_response(data,
                                                     physical_resource_id)
def handler(event, context):
    event_type = event['RequestType']
    stack_arn = event['StackId']
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_arn)
    if not stack.is_project_stack:
        raise RuntimeError("Resource Types can only be defined in the project stack.")
    configuration_bucket = stack.project_stack.configuration_bucket
    source_resource_name = event['LogicalResourceId']
    props = properties.load(event, _schema)
    definitions_src = event['ResourceProperties']['Definitions']
    lambda_client = _create_lambda_client(stack_arn)
    created_or_updated_lambdas = {}
    lambda_roles = []

    # Set up tags for all resources created, must be project stack
    # Note: IAM takes an array of [ {'Key':, 'Value':}] format, Lambda take a dict of {string: string} pairs
    iam_tags = [
        {'Key': constant.PROJECT_NAME_TAG, 'Value': stack.stack_name},
        {'Key': constant.STACK_ID_TAG, 'Value': stack_arn}
    ]
    lambda_tags = {constant.PROJECT_NAME_TAG: stack.stack_name, constant.STACK_ID_TAG: stack_arn}

    # Build the file key as "<root directory>/<project stack>/<deployment stack>/<resource_stack>/<resource_name>.json"
    path_components = [x.stack_name for x in stack.ancestry]
    path_components.insert(0, constant.RESOURCE_DEFINITIONS_PATH)
    path_components.append(source_resource_name + ".json")
    resource_file_key = aws_utils.s3_key_join(*path_components)
    path_info = resource_type_info.ResourceTypesPathInfo(resource_file_key)

    # Load information from the JSON file if it exists.
    # (It will exist on a Create event if the resource was previously deleted and recreated.)
    try:
        contents = s3_client.get_object(Bucket=configuration_bucket, Key=resource_file_key)['Body'].read()
        existing_info = json.loads(contents)
        definitions_dictionary = existing_info['Definitions']
        existing_lambdas = existing_info['Lambdas']
        if isinstance(existing_lambdas, dict):
            lambda_dictionary = existing_lambdas
        else:
            # Backwards compatibility
            lambda_dictionary = {}
            existing_lambdas = set([x.split(":")[6] for x in existing_lambdas])  # Convert arn to function name
    except ClientError as e:
        error_code = e.response['Error']['Code']
        if error_code == 'NoSuchKey':
            definitions_dictionary = {}
            existing_lambdas = {}
            lambda_dictionary = {}
        else:
            raise e

    # Process the actual event
    if event_type == 'Delete':
        deleted_entries = set(definitions_dictionary.keys())

    else:
        definitions = props.Definitions
        lambda_config_src = event['ResourceProperties'].get('LambdaConfiguration', None)

        # Create lambdas for fetching the ARN and handling the resource creation/update/deletion
        lambdas_to_create = []

        for resource_type_name in definitions_src.keys():
            type_info = resource_type_info.ResourceTypeInfo(
                stack_arn, source_resource_name, resource_type_name, lambda_dictionary, False,
                definitions_src[resource_type_name])
            function_infos = [type_info.arn_function, type_info.handler_function]

            for function_info, field, tag, description in zip(function_infos, _lambda_fields, _lambda_tags,
                                                              _lambda_descriptions):
                if function_info is None:
                    continue

                function_handler = function_info.get('Function', None)
                if function_handler is None:
                    raise RuntimeError("Definition for '%s' in type '%s' requires a 'Function' field with the handler "
                                       "to execute." % (field, resource_type_name))

                # Create the role for the lambda(s) that will be servicing this resource type
                lambda_function_name = type_info.get_lambda_function_name(tag)
                role_name = role_utils.sanitize_role_name(lambda_function_name)
                role_path = "/%s/%s/" % (type_info.stack_name, type_info.source_resource_name)
                assume_role_policy_document = role_utils.get_assume_role_policy_document_for_service("lambda.amazonaws.com")

                try:
                    res = iam_client.create_role(
                        RoleName=role_name,
                        AssumeRolePolicyDocument=assume_role_policy_document,
                        Path=role_path,
                        Tags=iam_tags)
                    role_arn = res['Role']['Arn']
                except ClientError as e:
                    if e.response["Error"]["Code"] != 'EntityAlreadyExists':
                        raise e
                    res = iam_client.get_role(RoleName=role_name)
                    role_arn = res['Role']['Arn']

                # Copy the base policy for the role and add any permissions that are specified by the type
                role_policy = copy.deepcopy(_create_base_lambda_policy())
                role_policy['Statement'].extend(function_info.get('PolicyStatement', []))
                iam_client.put_role_policy(RoleName=role_name, PolicyName=_inline_policy_name,
                                           PolicyDocument=json.dumps(role_policy))

                # Record this role and the type_info so we can create a lambda for it
                lambda_roles.append(role_name)

                lambda_info = {
                    'role_arn': role_arn,
                    'type_info': type_info,
                    'lambda_function_name': lambda_function_name,
                    'handler': "resource_types." + function_handler,
                    'description': description,
                    'tags': lambda_tags
                }

                # Merge in any lambda specific configs overrides
                if 'HandlerFunctionConfiguration' in function_info:
                    lambda_override = function_info['HandlerFunctionConfiguration']
                    if lambda_override:
                        print("Found LambdaConfiguration override {}".format(lambda_override))
                        lambda_info['lambda_config_overrides'] = lambda_override

                lambdas_to_create.append(lambda_info)

        # We create the lambdas in a separate pass because role-propagation to lambda takes a while, and we don't want
        # to have to delay multiple times for each role/lambda pair
        #
        # TODO: Replace delay (and all other instances of role/lambda creation) with exponential backoff
        time.sleep(role_utils.PROPAGATION_DELAY_SECONDS)

        for info in lambdas_to_create:
            # Create the lambda function
            arn, version = _create_or_update_lambda_function(
                lambda_client=lambda_client,
                timeout=props.LambdaTimeout,
                lambda_config_src=lambda_config_src,
                info=info,
                existing_lambdas=existing_lambdas
            )
            created_or_updated_lambdas[info['lambda_function_name']] = {'arn': arn, 'v': version}

            # Finally add/update a role policy to give least privileges to the Lambdas to log events
            policy_document = _generate_lambda_log_event_policy(arn)
            iam_client.put_role_policy(RoleName=aws_utils.get_role_name_from_role_arn(info['role_arn']),
                                       PolicyDocument=json.dumps(policy_document),
                                       PolicyName='LambdaLoggingEventsPolicy')

        deleted_entries = set(definitions_dictionary.keys()) - set(definitions_src.keys())

    physical_resource_id = "-".join(path_components[1:])
    lambda_dictionary.update(created_or_updated_lambdas)
    definitions_dictionary.update(definitions_src)
    config_info = {
        'StackId': stack_arn,
        'Id': physical_resource_id,
        'Lambdas': lambda_dictionary,
        'Definitions': definitions_dictionary,
        'Deleted': list(deleted_entries)
    }
    data = {
        'ConfigBucket': configuration_bucket,
        'ConfigKey': resource_file_key
    }

    # Copy the resource definitions to the configuration bucket.
    s3_client.put_object(Bucket=configuration_bucket, Key=resource_file_key, Body=json.dumps(config_info, indent=2))
    custom_resource_response.succeed(event, context, data, physical_resource_id)
def handler(event, context):
    """Entry point for the Custom::CognitoUserPool resource handler."""
    stack_id = event['StackId']

    props = properties.load(
        event,
        {
            'ClientApps':
            properties.StringOrListOfString(),
            'ExplicitAuthFlows':
            properties.StringOrListOfString(default=[]),
            'RefreshTokenValidity':
            properties.String('30'),
            'ConfigurationKey':
            properties.String(
            ),  # this is only here to force the resource handler to execute on each update to the deployment
            'LambdaConfig':
            properties.Dictionary({}),
            'PoolName':
            properties.String(),
            'Groups':
            properties.ObjectOrListOfObject(
                default=[],
                schema={
                    'Name': properties.String(),
                    'Description': properties.String(''),
                    'Role': properties.String(),
                    'Precedence': properties.String('99')
                }),
            'AllowAdminCreateUserOnly':
            properties.String('')
        })

    # give the identity pool a unique name per stack
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_id)

    stack_name = stack.stack_name
    pool_name = props.PoolName.replace('-', ' ')
    pool_name = stack_name + pool_name
    cognito_idp_client = user_pool.get_idp_client()
    pool_id = custom_resource_utils.get_embedded_physical_id(
        event.get('PhysicalResourceId'))
    found_pool = user_pool.get_user_pool(pool_id)

    # Set up tags for all resources created
    tags = {
        constant.PROJECT_NAME_TAG: stack.project_stack.project_name,
        constant.STACK_ID_TAG: stack_id
    }

    request_type = event['RequestType']

    if request_type == 'Delete':
        if found_pool is not None:
            cognito_idp_client.delete_user_pool(UserPoolId=pool_id)
        data = {}

    else:
        # if the pool exists just update it, otherwise create a new one

        mfa_config = 'OFF'  # MFA is currently unsupported by Lumberyard
        # Users are automatically prompted to verify these things.
        # At least one auto-verified thing (email or phone) is required to allow password recovery.
        auto_verified_attributes = ['email']

        client_app_data = {}
        lambda_config = props.LambdaConfig

        user_pool.validate_identity_metadata(stack_manager, stack_id,
                                             event['LogicalResourceId'],
                                             props.ClientApps)
        admin_create_user_config = __get_admin_create_user_config(
            props.AllowAdminCreateUserOnly)
        print(json.dumps(admin_create_user_config))

        if found_pool is not None:  # Update
            response = cognito_idp_client.update_user_pool(
                UserPoolId=pool_id,
                MfaConfiguration=mfa_config,
                AutoVerifiedAttributes=auto_verified_attributes,
                LambdaConfig=lambda_config,
                AdminCreateUserConfig=admin_create_user_config,
                UserPoolTags=tags)

            existing_client_apps = user_pool.get_client_apps(pool_id)
            client_app_data = update_client_apps(pool_id, props.ClientApps,
                                                 existing_client_apps, False,
                                                 props.ExplicitAuthFlows,
                                                 props.RefreshTokenValidity)

            response = cognito_idp_client.list_groups(UserPoolId=pool_id)

            found_groups = {}
            for actual_group in response['Groups']:
                group_name = actual_group['GroupName']
                for requested_group in props.Groups:
                    # does the group exist in the resource template
                    if group_name == requested_group.Name:
                        found_groups.update({group_name: True})
                        break

                # delete the group as it is no longer in the resource template
                if group_name not in found_groups:
                    cognito_idp_client.delete_group(
                        GroupName=actual_group['GroupName'],
                        UserPoolId=pool_id)

            print("Found groups=>{}".format(json.dumps(found_groups)))
            # iterate the groups defined in the user pool resource template
            for group in props.Groups:
                # update the group as it is currently a group in the user pool
                group_definition = __generate_group_definition(pool_id, group)
                print("Group '{}' is defined by {}".format(
                    group.Name, json.dumps(group_definition)))
                if group.Name in found_groups:
                    cognito_idp_client.update_group(**group_definition)
                else:
                    # group is a new group on the user pool
                    cognito_idp_client.create_group(**group_definition)

        else:  # Create
            response = cognito_idp_client.create_user_pool(
                PoolName=pool_name,
                MfaConfiguration=mfa_config,
                AutoVerifiedAttributes=auto_verified_attributes,
                LambdaConfig=lambda_config,
                AdminCreateUserConfig=admin_create_user_config,
                UserPoolTags=tags)
            pool_id = response['UserPool']['Id']
            print('User pool creation response: {}'.format(response))
            for group in props.Groups:
                group_definition = __generate_group_definition(pool_id, group)
                print("Group '{}' is defined by {}".format(
                    group.Name, json.dumps(group_definition)))
                cognito_idp_client.create_group(**group_definition)

            client_app_data = update_client_apps(pool_id, props.ClientApps, [],
                                                 False,
                                                 props.ExplicitAuthFlows,
                                                 props.RefreshTokenValidity)

        updated_resources = {
            stack_id: {
                event['LogicalResourceId']: {
                    'physical_id': pool_id,
                    'client_apps': {
                        client_app['ClientName']: {
                            'client_id': client_app['ClientId']
                        }
                        for client_app in client_app_data['Created'] +
                        client_app_data['Updated']
                    }
                }
            }
        }

        identity_pool.update_cognito_identity_providers(
            stack_manager, stack_id, pool_id, updated_resources)

        data = {
            'UserPoolName': pool_name,
            'UserPoolId': pool_id,
            'ClientApps': client_app_data,
        }

    physical_resource_id = pool_id

    return custom_resource_response.success_response(data,
                                                     physical_resource_id)
def handler(event, context):

    props = properties.load(event, PROPERTIES_SCHEMA)

    request_type = event['RequestType']
    stack_arn = event['StackId']
    logical_role_name = props.FunctionName
    stack_manager = stack_info.StackInfoManager()

    id_data = aws_utils.get_data_from_custom_physical_resource_id(event.get('PhysicalResourceId', None))

    if request_type == 'Delete':

        role_utils.delete_access_control_role(
            id_data,
            logical_role_name)

        response_data = {}

    else:

        stack = stack_manager.get_stack_info(stack_arn)

        if request_type == 'Create':

            project_service_lambda_arn = _get_project_service_lambda_arn(stack)
            assume_role_service = 'lambda.amazonaws.com'
            role_arn = role_utils.create_access_control_role(
                stack_manager,
                id_data,
                stack_arn,
                logical_role_name,
                assume_role_service,
                default_policy = get_default_policy(project_service_lambda_arn))

        elif request_type == 'Update':

            role_arn = role_utils.get_access_control_role_arn(
                id_data,
                logical_role_name)

        else:
            raise RuntimeError('Unexpected request type: {}'.format(request_type))
        _add_built_in_settings(props.Settings.__dict__, stack)
        # give access to project level ServiceDirectory APIs
        # Other deployment-level APIs are handled in InterfaceDependeny resolver custom resource type
        permitted_arns = _add_services_settings(stack, props.Settings.__dict__, props.Services)
        _add_service_access_policy_to_role(role_arn, permitted_arns)
        # Check if we have a folder just for this function, if not use the default
        output_key = input_key = _get_input_key(props)
        if not props.IgnoreAppendingSettingsToZip:
            output_key = _inject_settings(props.Settings.__dict__, props.Runtime, props.ConfigurationBucket, input_key, props.FunctionName)
        response_data = {
            'ConfigurationBucket': props.ConfigurationBucket,
            'ConfigurationKey': output_key,
            'Runtime': props.Runtime,
            'Role': role_arn,
            'RoleName': role_utils.get_access_control_role_name(stack_arn, logical_role_name),
            'ComposedLambdaConfiguration': {
                'Code': {
                    'S3Bucket': props.ConfigurationBucket,
                    'S3Key': output_key
                },
                'Role': role_arn,
                'Runtime': props.Runtime
            }
        }
        
    physical_resource_id = aws_utils.construct_custom_physical_resource_id_with_data(stack_arn, event['LogicalResourceId'], id_data)
    custom_resource_response.succeed(event, context, response_data, physical_resource_id)
Example #25
0
def handler(event, context):
    event_type = event['RequestType']
    stack_arn = event['StackId']
    stack_manager = stack_info.StackInfoManager()
    stack = stack_manager.get_stack_info(stack_arn)
    if not stack.is_project_stack:
        raise RuntimeError(
            "Resource Types can only be defined in the project stack.")
    configuration_bucket = stack.project_stack.configuration_bucket
    source_resource_name = event['LogicalResourceId']
    props = properties.load(event, _schema)
    definitions_src = event['ResourceProperties']['Definitions']
    lambda_client = aws_utils.ClientWrapper(
        boto3.client("lambda", aws_utils.get_region_from_stack_arn(stack_arn)))
    lambda_arns = []
    lambda_roles = []

    # Build the file key as "<root directory>/<project stack>/<deployment stack>/<resource_stack>/<resource_name>.json"
    path_components = [x.stack_name for x in stack.ancestry]
    path_components.insert(0, constant.RESOURCE_DEFINITIONS_PATH)
    path_components.append(source_resource_name + ".json")
    resource_file_key = aws_utils.s3_key_join(*path_components)
    path_info = resource_type_info.ResourceTypesPathInfo(resource_file_key)

    # Load information from the JSON file if it exists
    if event_type != 'Create':
        contents = s3_client.get_object(Bucket=configuration_bucket,
                                        Key=resource_file_key)['Body'].read()
        existing_info = json.loads(contents)
    else:
        existing_info = None

    # Process the actual event
    if event_type == 'Delete':
        _delete_resources(existing_info['Lambdas'], existing_info['Roles'],
                          lambda_client)
        custom_resource_response.succeed(event, context, {},
                                         existing_info['Id'])

    else:
        existing_roles = set()
        existing_lambdas = set()

        if event_type == 'Update':
            existing_roles = set(
                [arn.split(":")[-1] for arn in existing_info['Roles']])
            existing_lambdas = set(
                [arn.split(":")[-1] for arn in existing_info['Lambdas']])

        definitions = props.Definitions
        lambda_config_src = event['ResourceProperties'].get(
            'LambdaConfiguration', None)

        # Create lambdas for fetching the ARN and handling the resource creation/update/deletion
        lambdas_to_create = []

        for resource_type_name in definitions_src.keys():
            type_info = resource_type_info.ResourceTypeInfo(
                stack_arn, source_resource_name, resource_type_name,
                definitions_src[resource_type_name])
            function_infos = [
                type_info.arn_function, type_info.handler_function
            ]

            for function_info, field, tag, description in zip(
                    function_infos, _lambda_fields, _lambda_tags,
                    _lambda_descriptions):
                if function_info is None:
                    continue

                function_handler = function_info.get('Function', None)
                if function_handler is None:
                    raise RuntimeError(
                        "Definition for '%s' in type '%s' requires a 'Function' field with the handler "
                        "to execute." % (field, resource_type_name))

                # Create the role for the lambda(s) that will be servicing this resource type
                lambda_function_name = type_info.get_lambda_function_name(tag)
                role_name = role_utils.sanitize_role_name(lambda_function_name)
                role_path = "/%s/%s/" % (type_info.stack_name,
                                         type_info.source_resource_name)
                assume_role_policy_document = role_utils.get_assume_role_policy_document_for_service(
                    "lambda.amazonaws.com")

                try:
                    res = iam_client.create_role(
                        RoleName=role_name,
                        AssumeRolePolicyDocument=assume_role_policy_document,
                        Path=role_path)
                    role_arn = res['Role']['Arn']
                except ClientError as e:
                    if e.response["Error"]["Code"] != 'EntityAlreadyExists':
                        raise e
                    existing_roles.discard(role_name)
                    res = iam_client.get_role(RoleName=role_name)
                    role_arn = res['Role']['Arn']

                # Copy the base policy for the role and add any permissions that are specified by the type
                role_policy = copy.deepcopy(_lambda_base_policy)
                role_policy['Statement'].extend(
                    function_info.get('PolicyStatement', []))
                iam_client.put_role_policy(
                    RoleName=role_name,
                    PolicyName=_inline_policy_name,
                    PolicyDocument=json.dumps(role_policy))

                # Record this role and the type_info so we can create a lambda for it
                lambda_roles.append(role_name)
                lambdas_to_create.append({
                    'role_arn': role_arn,
                    'type_info': type_info,
                    'lambda_function_name': lambda_function_name,
                    'handler': "resource_types." + function_handler,
                    'description': description
                })

        # We create the lambdas in a separate pass because role-propagation to lambda takes a while, and we don't want
        # to have to delay multiple times for each role/lambda pair
        #
        # TODO: Replace delay (and all other instances of role/lambda creation) with exponential backoff
        time.sleep(role_utils.PROPAGATION_DELAY_SECONDS)

        for info in lambdas_to_create:
            # Create the lambda function
            arn = _create_or_update_lambda_function(
                lambda_client=lambda_client,
                timeout=props.LambdaTimeout,
                lambda_config_src=lambda_config_src,
                info=info,
                existing_lambdas=existing_lambdas)
            lambda_arns.append(arn)

        # For Update operations, delete any lambdas and roles that previously existed and now no longer do.
        _delete_resources(existing_lambdas, existing_roles, lambda_client)

    physical_resource_id = "-".join(path_components[1:])
    config_info = {
        'StackId': stack_arn,
        'Id': physical_resource_id,
        'Lambdas': lambda_arns,
        'Roles': lambda_roles,
        'Definitions': definitions_src
    }
    data = {
        'ConfigBucket': configuration_bucket,
        'ConfigKey': resource_file_key
    }

    # Copy the resource definitions to the configuration bucket.
    s3_client.put_object(Bucket=configuration_bucket,
                         Key=resource_file_key,
                         Body=json.dumps(config_info))
    custom_resource_response.succeed(event, context, data,
                                     physical_resource_id)
def handler(event, context):

    props = properties.load(
        event,
        {
            'ConfigurationBucket':
            properties.String(),
            'ConfigurationKey':
            properties.String(
            ),  ##this is only here to force the resource handler to execute on each update to the deployment
            'IdentityPoolName':
            properties.String(),
            'UseAuthSettingsObject':
            properties.String(),
            'AllowUnauthenticatedIdentities':
            properties.String(),
            'DeveloperProviderName':
            properties.String(default=''),
            'Roles':
            properties.Object(default={}, schema={'*': properties.String()}),
            'RoleMappings':
            properties.Object(
                default={},
                schema={
                    'Cognito':
                    properties.Object(
                        default={},
                        schema={
                            'Type': properties.String(''),
                            'AmbiguousRoleResolution': properties.String('')
                        })
                })
        })

    #give the identity pool a unique name per stack
    stack_manager = stack_info.StackInfoManager()
    stack_name = aws_utils.get_stack_name_from_stack_arn(event['StackId'])
    identity_pool_name = stack_name + props.IdentityPoolName
    identity_pool_name = identity_pool_name.replace('-', ' ')
    identity_client = identity_pool.get_identity_client()
    identity_pool_id = custom_resource_utils.get_embedded_physical_id(
        event.get('PhysicalResourceId'))
    found_pool = identity_pool.get_identity_pool(identity_pool_id)

    request_type = event['RequestType']
    if request_type == 'Delete':
        if found_pool != None:
            identity_client.delete_identity_pool(
                IdentityPoolId=identity_pool_id)
        data = {}

    else:
        use_auth_settings_object = props.UseAuthSettingsObject.lower(
        ) == 'true'
        supported_login_providers = {}

        if use_auth_settings_object == True:
            #download the auth settings from s3
            player_access_key = 'player-access/' + constant.AUTH_SETTINGS_FILENAME
            auth_doc = json.loads(
                _load_doc_from_s3(props.ConfigurationBucket,
                                  player_access_key))

            #if the doc has entries add them to the supported_login_providers dictionary
            if len(auth_doc) > 0:
                for key, value in auth_doc.iteritems():
                    supported_login_providers[
                        value['provider_uri']] = value['app_id']

        cognito_identity_providers = identity_pool.get_cognito_identity_providers(
            stack_manager, event['StackId'], event['LogicalResourceId'])

        print 'Identity Providers: ', cognito_identity_providers
        allow_anonymous = props.AllowUnauthenticatedIdentities.lower(
        ) == 'true'
        #if the pool exists just update it, otherwise create a new one

        args = {
            'IdentityPoolName': identity_pool_name,
            'AllowUnauthenticatedIdentities': allow_anonymous,
            'SupportedLoginProviders': supported_login_providers,
            'CognitoIdentityProviders': cognito_identity_providers
        }

        if props.DeveloperProviderName:
            args['DeveloperProviderName'] = props.DeveloperProviderName

        if found_pool != None:
            identity_client.update_identity_pool(
                IdentityPoolId=identity_pool_id, **args)
        else:
            response = identity_client.create_identity_pool(**args)
            identity_pool_id = response['IdentityPoolId']

        #update the roles for the pool
        role_mappings = {}
        if props.RoleMappings.Cognito.Type and len(
                cognito_identity_providers) > 0:
            print 'Adding role mappings for cognito', props.RoleMappings.Cognito.__dict__
            role_mappings['{}:{}'.format(
                cognito_identity_providers[0]['ProviderName'],
                cognito_identity_providers[0]
                ['ClientId'])] = props.RoleMappings.Cognito.__dict__

        print "Role Mappings: ", role_mappings
        identity_client.set_identity_pool_roles(
            IdentityPoolId=identity_pool_id,
            Roles=props.Roles.__dict__,
            RoleMappings=role_mappings)

        data = {
            'IdentityPoolName': identity_pool_name,
            'IdentityPoolId': identity_pool_id
        }

    physical_resource_id = identity_pool_id

    return custom_resource_response.success_response(data,
                                                     physical_resource_id)