Ejemplo n.º 1
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store, ADF_PIPELINE_PREFIX)
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, S3_BUCKET_NAME)
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    try:
        auto_create_repositories = parameter_store.fetch_parameter(
            'auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'

    threads = []
    for p in deployment_map.map_contents.get('pipelines'):
        thread = PropagatingThread(target=worker_thread,
                                   args=(p, organizations,
                                         auto_create_repositories, s3,
                                         deployment_map, parameter_store))
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()
Ejemplo n.º 2
0
def lambda_handler(event, _):
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            event["account_id"],
            event["cross_account_access_role"]
        ), 'master_lambda'
    )

    if event['is_deployment_account']:
        configure_master_account_parameters(event)
        configure_deployment_account_parameters(event, role)

    s3 = S3(
        region=REGION_DEFAULT,
        bucket=S3_BUCKET
    )

    for region in list(set([event["deployment_account_region"]] + event["regions"])):
        if not event["is_deployment_account"]:
            configure_generic_account(sts, event, region, role)
        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=event["deployment_account_region"],
            role=role,
            wait=False,
            stack_name=None, # Stack name will be automatically defined based on event
            s3=s3,
            s3_key_path=event["full_path"],
            account_id=event["account_id"]
        )
        cloudformation.create_stack()

    return event
def main():
    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store,
                                   os.environ["ADF_PIPELINE_PREFIX"])
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, boto3, S3_BUCKET_NAME)
    sts = STS(boto3)
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-org-access-adf'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    for p in deployment_map.map_contents.get('pipelines'):
        pipeline = Pipeline(p)

        for target in p['targets']:
            target_structure = TargetStructure(target)
            for step in target_structure.target:
                for path in step.get('path'):
                    try:
                        regions = step.get(
                            'regions',
                            p.get('regions', DEPLOYMENT_ACCOUNT_REGION))
                        pipeline.stage_regions.append(regions)
                        pipeline_target = Target(path, regions,
                                                 target_structure,
                                                 organizations)
                        pipeline_target.fetch_accounts_for_target()
                    except BaseException:
                        raise Exception(
                            "Failed to return accounts for {0}".format(path))

            pipeline.template_dictionary["targets"].append(
                target_structure.account_list)

        if DEPLOYMENT_ACCOUNT_REGION not in regions:
            pipeline.stage_regions.append(DEPLOYMENT_ACCOUNT_REGION)

        parameters = pipeline.generate_parameters()
        pipeline.generate()
        deployment_map.update_deployment_parameters(pipeline)
        s3_object_path = upload_if_required(s3, pipeline)

        store_regional_parameter_config(pipeline, parameter_store)
        cloudformation = CloudFormation(
            region=DEPLOYMENT_ACCOUNT_REGION,
            deployment_account_region=DEPLOYMENT_ACCOUNT_REGION,
            role=boto3,
            template_url=s3_object_path,
            parameters=parameters,
            wait=True,
            stack_name="{0}-{1}".format(os.environ["ADF_PIPELINE_PREFIX"],
                                        pipeline.name),
            s3=None,
            s3_key_path=None)

        cloudformation.create_stack()
def lambda_handler(event, _):
    # Return if we need to update the pipelines only
    if event.get('update_only'):
        LOGGER.info('Will only update pipelines for this execution.')
        return event
    try:
        sts = STS(boto3)
        parameter_store = ParameterStore(
            event.get('deployment_account_region'), boto3)
        for region in list(
                set([event.get('deployment_account_region')] +
                    event.get("regions"))):
            kms_key_arn = parameter_store.fetch_parameter(
                "/cross_region/kms_arn/{0}".format(region))
            s3_bucket = parameter_store.fetch_parameter(
                "/cross_region/s3_regional_bucket/{0}".format(region))
            for account_id in event.get('account_ids'):
                try:
                    role = sts.assume_cross_account_role(
                        'arn:aws:iam::{0}:role/{1}'.format(
                            account_id, 'adf-cloudformation-deployment-role'),
                        'base_cfn_role')
                    IAMUpdater(kms_key_arn, s3_bucket, role)
                    kms = KMS(region, boto3, kms_key_arn, account_id)
                    kms.enable_cross_account_access()
                except ClientError:
                    continue
        return event
    except BaseException as error:
        LOGGER.exception(error)
Ejemplo n.º 5
0
def main():  #pylint: disable=R0915
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store, ADF_PIPELINE_PREFIX)
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, S3_BUCKET_NAME)
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    for p in deployment_map.map_contents.get('pipelines'):
        pipeline = Pipeline(p)

        for target in p.get('targets', []):
            target_structure = TargetStructure(target)
            for step in target_structure.target:
                for path in step.get('path'):
                    regions = step.get(
                        'regions', p.get('regions', DEPLOYMENT_ACCOUNT_REGION))
                    step_name = step.get('name')
                    params = step.get('params', {})
                    pipeline.stage_regions.append(regions)
                    pipeline_target = Target(path, regions, target_structure,
                                             organizations, step_name, params)
                    pipeline_target.fetch_accounts_for_target()

            pipeline.template_dictionary["targets"].append(
                target_structure.account_list)

        if DEPLOYMENT_ACCOUNT_REGION not in regions:
            pipeline.stage_regions.append(DEPLOYMENT_ACCOUNT_REGION)

        parameters = pipeline.generate_parameters()
        pipeline.generate()
        deployment_map.update_deployment_parameters(pipeline)
        s3_object_path = upload_pipeline(s3, pipeline)

        store_regional_parameter_config(pipeline, parameter_store)
        cloudformation = CloudFormation(
            region=DEPLOYMENT_ACCOUNT_REGION,
            deployment_account_region=DEPLOYMENT_ACCOUNT_REGION,
            role=boto3,
            template_url=s3_object_path,
            parameters=parameters,
            wait=True,
            stack_name="{0}-{1}".format(ADF_PIPELINE_PREFIX, pipeline.name),
            s3=None,
            s3_key_path=None,
            account_id=DEPLOYMENT_ACCOUNT_ID)
        cloudformation.create_stack()
Ejemplo n.º 6
0
def create_or_update_account(org_session, support_session, account, adf_role_name, account_id=None):
    """Creates or updates a single AWS account.
    :param org_session: Instance of Organization class
    :param account: Instance of Account class
    """
    if not account_id:
        LOGGER.info(f'Creating new account {account.full_name}')
        account_id = org_session.create_account(account, adf_role_name)
        # This only runs on account creation at the moment.
        support_session.set_support_level_for_account(account, account_id)

    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            account_id,
            adf_role_name
        ), 'adf_account_provisioning'
    )

    LOGGER.info(f'Ensuring account {account_id} (alias {account.alias}) is in OU {account.ou_path}')
    org_session.move_account(account_id, account.ou_path)
    if account.delete_default_vpc:
        ec2_client = role.client('ec2')
        all_regions = [
            region['RegionName']
            for region in ec2_client.describe_regions(
                AllRegions=False,
                Filters=[
                    {
                        'Name': 'opt-in-status',
                        'Values': [
                            'opt-in-not-required',
                        ]
                    }
                ]
            )['Regions']
        ]
        args = (
            (account_id, region, role)
            for region in all_regions
        )
        with ThreadPoolExecutor(max_workers=10) as executor:
            for _ in executor.map(lambda f: schedule_delete_default_vpc(*f), args):
                pass

    if account.alias:
        LOGGER.info(f'Ensuring account alias for {account_id} of {account.alias}')
        org_session.create_account_alias(account.alias, role)

    if account.tags:
        LOGGER.info(f'Ensuring tags exist for account {account_id}: {account.tags}')
        org_session.create_account_tags(account_id, account.tags)
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(
        DEPLOYMENT_ACCOUNT_REGION,
        boto3
    )
    deployment_map = DeploymentMap(
        parameter_store,
        ADF_PIPELINE_PREFIX
    )
    s3 = S3(
        DEPLOYMENT_ACCOUNT_REGION,
        S3_BUCKET_NAME
    )
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')
        ), 'pipeline'
    )

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    try:
        auto_create_repositories = parameter_store.fetch_parameter('auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'

    threads = []
    for counter, p in enumerate(deployment_map.map_contents.get('pipelines')):
        thread = PropagatingThread(target=worker_thread, args=(
            p,
            organizations,
            auto_create_repositories,
            s3,
            deployment_map,
            parameter_store
        ))
        thread.start()
        threads.append(thread)
        _batcher = counter % 10
        if _batcher == 9: # 9 meaning we have hit a set of 10 threads since n % 10
            _interval = random.randint(5, 11)
            LOGGER.debug('Waiting for %s seconds before starting next batch of 10 threads.', _interval)
            time.sleep(_interval)

    for thread in threads:
        thread.join()
def lambda_handler(event, _):
    parameters = ParameterStore(REGION_DEFAULT, boto3)
    account_id = event.get(
        'detail').get(
            'requestParameters').get('accountId')
    organizations = Organizations(boto3, account_id)
    parsed_event = Event(event, parameters, organizations, account_id)
    cache = Cache()

    if parsed_event.moved_to_root or parsed_event.moved_to_protected:
        return parsed_event.create_output_object(cache)

    parsed_event.set_destination_ou_name()

    sts = STS(boto3)
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            parsed_event.account_id,
            parsed_event.cross_account_access_role
        ), 'master_lambda'
    )

    if parsed_event.is_deployment_account:
        update_master_account_parameters(parsed_event, parameters)
        configure_deployment_account(parsed_event, role)

    s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET)

    account_path = parsed_event.organizations.build_account_path(
        parsed_event.destination_ou_id,
        [],  # Initial empty array to hold OU Path,
        cache,
    )

    for region in list(set([parsed_event.deployment_account_region] + parsed_event.regions)):
        if not parsed_event.is_deployment_account:
            configure_generic_account(sts, parsed_event, region, role)
        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=parsed_event.deployment_account_region,
            role=role,
            wait=False,
            stack_name=None,
            s3=s3,
            s3_key_path=account_path,
            file_path=None,
        )
        cloudformation.create_stack()

    return parsed_event.create_output_object(cache)
Ejemplo n.º 9
0
def lambda_handler(event, _):
    if event.get("alias"):
        sts = STS()
        account_id = event.get("account_id")
        role = sts.assume_cross_account_role(
            f"arn:aws:iam::{account_id}:role/{ADF_ROLE_NAME}",
            "adf_account_alias_config",
        )
        create_account_alias(event, role.client("iam"))
    else:
        LOGGER.info(
            f"Account: {event.get('account_full_name')} does not need an alias"
        )
    return event
Ejemplo n.º 10
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    _create_inputs_folder()
    parameter_store = ParameterStore(
        DEPLOYMENT_ACCOUNT_REGION,
        boto3
    )
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, SHARED_MODULES_BUCKET)
    deployment_map = DeploymentMap(
        parameter_store,
        s3,
        ADF_PIPELINE_PREFIX
    )
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')
        ), 'pipeline'
    )
    organizations = Organizations(role)
    clean(parameter_store, deployment_map)
    ensure_event_bus_status(ORGANIZATION_ID)
    try:
        auto_create_repositories = parameter_store.fetch_parameter('auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'
    threads = []
    _cache = Cache()
    for p in deployment_map.map_contents.get('pipelines', []):
        _source_account_id = p.get('default_providers', {}).get('source', {}).get('properties', {}).get('account_id', {})
        if _source_account_id and int(_source_account_id) != int(DEPLOYMENT_ACCOUNT_ID) and not _cache.check(_source_account_id):
            rule = Rule(p['default_providers']['source']['properties']['account_id'])
            rule.create_update()
            _cache.add(p['default_providers']['source']['properties']['account_id'], True)
        thread = PropagatingThread(target=worker_thread, args=(
            p,
            organizations,
            auto_create_repositories,
            deployment_map,
            parameter_store
        ))
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()
Ejemplo n.º 11
0
def lambda_handler(event, _):
    """Main Lambda Entry point
    """
    sts = STS()
    account_id = event.get('account_id')
    partition = get_partition(REGION_DEFAULT)
    cross_account_access_role = event.get('cross_account_access_role')

    role = sts.assume_cross_account_role(
        f'arn:{partition}:iam::{account_id}:role/{cross_account_access_role}',
        'master')

    s3 = S3(REGION_DEFAULT, S3_BUCKET)

    for region in list(
            set([event['deployment_account_region']] + event['regions'])):

        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=event['deployment_account_region'],
            role=role,
            wait=False,
            stack_name=None,
            s3=s3,
            s3_key_path=event['ou_name'],
            account_id=account_id)

        status = cloudformation.get_stack_status()

        if status in ('CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'):
            raise RetryError(f"CloudFormation Stack status: {status}")

        if status in ('CREATE_FAILED', 'ROLLBACK_FAILED', 'DELETE_FAILED',
                      'UPDATE_ROLLBACK_FAILED', 'ROLLBACK_IN_PROGRESS',
                      'ROLLBACK_COMPLETE'):
            raise Exception(
                f"Account Bootstrap Failed - Account: {account_id} "
                f"Region: {region} Status: {status}")

        if event.get('is_deployment_account'):
            update_deployment_account_output_parameters(
                deployment_account_region=event['deployment_account_region'],
                region=region,
                deployment_account_role=role,
                cloudformation=cloudformation)

    return event
Ejemplo n.º 12
0
def lambda_handler(event, _):
    target_role_policies = {
        'adf-cloudformation-deployment-role': 'adf-cloudformation-deployment-role-policy-kms',
        'adf-cloudformation-role': 'adf-cloudformation-role-policy'
    }

    role_policies = {
        'adf-codepipeline-role': 'adf-codepipeline-role-policy',
        'adf-cloudformation-deployment-role': 'adf-cloudformation-deployment-role-policy',
        'adf-cloudformation-role': 'adf-cloudformation-role-policy'
    }

    sts = STS()
    partition = get_partition(REGION_DEFAULT)

    parameter_store = ParameterStore(
        region=event.get('deployment_account_region'),
        role=boto3
    )
    account_id = event.get("account_id")
    kms_key_arns = []
    s3_buckets = []
    for region in list(set([event.get('deployment_account_region')] + event.get("regions", []))):
        kms_key_arn = parameter_store.fetch_parameter(
            f"/cross_region/kms_arn/{region}"
        )
        kms_key_arns.append(kms_key_arn)
        s3_bucket = parameter_store.fetch_parameter(
            f"/cross_region/s3_regional_bucket/{region}"
        )
        s3_buckets.append(s3_bucket)
        try:
            role = sts.assume_cross_account_role(
                f'arn:{partition}:iam::{account_id}:role/adf-cloudformation-deployment-role',
                'base_cfn_role'
            )
            LOGGER.debug("Role has been assumed for %s", account_id)
            update_iam(role, s3_bucket, kms_key_arn, target_role_policies)
        except ClientError as err:
            LOGGER.debug("%s could not be assumed (%s), continuing", account_id, err, exc_info=True)
            continue

    update_iam(boto3, s3_buckets, kms_key_arns, role_policies)

    return event
def lambda_handler(event, _):
    sts = STS(boto3)

    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            event.get('deployment_account_id'),
            event.get('cross_account_iam_role')), 'step_function')

    step_functions = StepFunctions(
        role=role,
        deployment_account_id=event.get('deployment_account_id'),
        deployment_account_region=event.get('deployment_account_region'),
        regions=event.get('regions'),
        account_ids=[event.get('account_id')],
        update_pipelines_only=bool(
            event.get('moved_to_protected') or event.get('moved_to_root')))
    step_functions.execute_statemachine()

    return event
def lambda_handler(event, _):
    """Main Lambda Entry point
    """
    sts = STS(boto3)

    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            event.get('account_id'),
            event.get('cross_account_iam_role'),
        ), 'master')

    s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET)

    for region in list(
            set([event.get('deployment_account_region')] +
                event.get("regions"))):

        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=event.get('deployment_account_region'),
            role=role,
            wait=False,
            stack_name=None,
            s3=s3,
            s3_key_path=event.get('ou_name'),
            file_path=None,
        )

        status = cloudformation.get_stack_status()

        if status in ("CREATE_IN_PROGRESS", "UPDATE_IN_PROGRESS"):
            raise RetryError("Cloudformation Stack not yet complete")

        # TODO Better waiting validation to ensure stack is not failed
        if event.get('is_deployment_account'):
            update_deployment_account_output_parameters(
                deployment_account_region=event.get(
                    'deployment_account_region'),
                region=region,
                deployment_account_role=role,
                cloudformation=cloudformation)

    return event
Ejemplo n.º 15
0
def lambda_handler(event, _):
    target_role_policies = {
        'adf-cloudformation-deployment-role':
        'adf-cloudformation-deployment-role-policy-kms',
        'adf-cloudformation-role': 'adf-cloudformation-role-policy'
    }

    role_policies = {
        'adf-codepipeline-role': 'adf-codepipeline-role-policy',
        'adf-cloudformation-deployment-role':
        'adf-cloudformation-deployment-role-policy',
        'adf-cloudformation-role': 'adf-cloudformation-role-policy'
    }

    sts = STS()
    parameter_store = ParameterStore(
        region=event.get('deployment_account_region'), role=boto3)
    for region in list(
            set([event.get('deployment_account_region')] +
                event.get("regions", []))):
        kms_key_arn = parameter_store.fetch_parameter(
            "/cross_region/kms_arn/{0}".format(region))
        s3_bucket = parameter_store.fetch_parameter(
            "/cross_region/s3_regional_bucket/{0}".format(region))
        update_iam(boto3, s3_bucket, kms_key_arn, role_policies)
        for account_id in event.get('account_ids'):
            try:
                role = sts.assume_cross_account_role(
                    'arn:aws:iam::{0}:role/{1}'.format(
                        account_id, 'adf-cloudformation-deployment-role'),
                    'base_cfn_role')
                LOGGER.debug("Role has been assumed for %s", account_id)
                update_iam(role, s3_bucket, kms_key_arn, target_role_policies)
            except ClientError as err:
                LOGGER.debug("%s could not be assumed (%s), continuing",
                             account_id,
                             err,
                             exc_info=True)
                continue

    return event
def lambda_handler(event, _):
    sts = STS()

    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(event['deployment_account_id'],
                                           event['cross_account_access_role']),
        'step_function')

    step_functions = StepFunctions(
        role=role,
        deployment_account_id=event['deployment_account_id'],
        deployment_account_region=event['deployment_account_region'],
        full_path=event['full_path'],
        regions=event['regions'],
        account_ids=[event['account_id']],
        update_pipelines_only=1 if event.get('moved_to_protected')
        or event.get('moved_to_root') else 0,
        error=event.get('error', 0))
    step_functions.execute_statemachine()

    return event
Ejemplo n.º 17
0
def lambda_handler(event, _):
    sts = STS()

    account_id = event["account_id"]
    cross_account_access_role = event["cross_account_access_role"]
    role_arn = f'arn:{PARTITION}:iam::{account_id}:role/{cross_account_access_role}'

    role = sts.assume_cross_account_role(role_arn=role_arn,
                                         role_session_name='management_lambda')

    if event['is_deployment_account']:
        configure_master_account_parameters(event)
        configure_deployment_account_parameters(event, role)

    s3 = S3(region=REGION_DEFAULT, bucket=S3_BUCKET)

    for region in list(
            set([event["deployment_account_region"]] + event["regions"])):
        if not event["is_deployment_account"]:
            configure_generic_account(sts, event, region, role)
        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=event["deployment_account_region"],
            role=role,
            wait=True,
            # Stack name will be automatically defined based on event
            stack_name=None,
            s3=s3,
            s3_key_path=event["full_path"],
            account_id=account_id)
        if is_inter_ou_account_move(event):
            cloudformation.delete_all_base_stacks(True)  # override Wait
        cloudformation.create_stack()
        if region == event["deployment_account_region"]:
            cloudformation.create_iam_stack()

    return event
def lambda_handler(event, _):
    LOGGER.info(f"Fetching Default regions {event.get('account_full_name')}")
    sts = STS()
    account_id = event.get("account_id")
    role = sts.assume_cross_account_role(
        f"arn:aws:iam::{account_id}:role/{ADF_ROLE_NAME}",
        "adf_account_get_regions",
    )

    ec2_client = role.client("ec2")
    default_regions = [
        region["RegionName"] for region in ec2_client.describe_regions(
            AllRegions=False,
            Filters=[{
                "Name": "opt-in-status",
                "Values": ["opt-in-not-required", "opted-in"],
            }],
        )["Regions"]
    ]
    LOGGER.debug(f"Default regions for {account_id}: {default_regions}")
    return {
        **event,
        "default_regions": default_regions,
    }
Ejemplo n.º 19
0
def lambda_handler(event, _):
    sts = STS(boto3)
    parameter_store = ParameterStore(event.get('deployment_account_region'),
                                     boto3)
    for region in list(
            set([event.get('deployment_account_region')] +
                event.get("regions"))):
        kms_key_arn = parameter_store.fetch_parameter(
            "/cross_region/kms_arn/{0}".format(region))
        s3_bucket = parameter_store.fetch_parameter(
            "/cross_region/s3_regional_bucket/{0}".format(region))
        for account_id in event.get('account_ids'):
            try:
                role = sts.assume_cross_account_role(
                    'arn:aws:iam::{0}:role/{1}'.format(
                        account_id, 'adf-cloudformation-deployment-role'),
                    'base_cfn_role')
                IAMUpdater(kms_key_arn, s3_bucket, role)
                kms = KMS(region, boto3, kms_key_arn, account_id)
                kms.enable_cross_account_access()
            except ClientError:
                continue

    return event
Ejemplo n.º 20
0
def create_or_update_account(org_session,
                             account,
                             adf_role_name,
                             account_id=None):
    """Creates or updates a single AWS account.
    :param org_session: Instance of Organization class
    :param account: Instance of Account class
    """
    if not account_id:
        LOGGER.info(f'Creating new account {account.full_name}')
        account_id = org_session.create_account(account, adf_role_name)
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(account_id, adf_role_name),
        'delete_default_vpc')

    LOGGER.info(
        f'Ensuring account {account_id} (alias {account.alias}) is in OU {account.ou_path}'
    )
    org_session.move_account(account_id, account.ou_path)
    if account.delete_default_vpc:
        ec2_client = role.client('ec2')
        all_regions = get_all_regions(ec2_client)
        args = ((account_id, region, role) for region in all_regions)
        with ThreadPoolExecutor(max_workers=10) as executor:
            for _ in executor.map(lambda f: schedule_delete_default_vpc(*f),
                                  args):
                pass

    LOGGER.info(f'Ensuring account alias for {account_id} of {account.alias}')
    org_session.create_account_alias(account.alias, role)

    if account.tags:
        LOGGER.info(
            f'Ensuring tags exist for account {account_id}: {account.tags}')
        org_session.create_account_tags(account_id, account.tags)
Ejemplo n.º 21
0
def lambda_handler(event, _):
    sts = STS()

    deployment_account_id = event.get('deployment_account_id')
    partition = get_partition(REGION_DEFAULT)
    cross_account_access_role = event.get('cross_account_access_role')

    role = sts.assume_cross_account_role(
        f'arn:{partition}:iam::{deployment_account_id}:role/{cross_account_access_role}',
        'step_function')

    step_functions = StepFunctions(
        role=role,
        deployment_account_id=deployment_account_id,
        deployment_account_region=event['deployment_account_region'],
        full_path=event['full_path'],
        regions=event['regions'],
        account_ids=[event['account_id']],
        update_pipelines_only=1 if event.get('moved_to_protected')
        or event.get('moved_to_root') else 0,
        error=event.get('error', 0))
    step_functions.execute_statemachine()

    return event
Ejemplo n.º 22
0
class Resolver:
    def __init__(self, parameter_store, stage_parameters,
                 comparison_parameters):
        self.parameter_store = parameter_store
        self.stage_parameters = stage_parameters
        self.comparison_parameters = comparison_parameters
        self.s3 = S3(DEFAULT_REGION, S3_BUCKET_NAME)
        self.sts = STS()

    def fetch_stack_output(self, value, key, param=None, optional=False):  #pylint: disable=R0912, R0915
        try:
            [_, account_id, region, stack_name, export] = str(value).split(':')
            if export.endswith('?'):
                export = export[:-1]
                LOGGER.info("Import %s is considered optional", export)
                optional = True
        except ValueError:
            raise ValueError(
                "{0} is not a valid import string."
                "syntax should be import:account_id:region:stack_name:export_key"
                .format(str(value)))
        LOGGER.info(
            "Assuming the role %s", 'arn:aws:iam::{0}:role/{1}'.format(
                account_id, 'adf-cloudformation-deployment-role'))
        try:
            role = self.sts.assume_cross_account_role(
                'arn:aws:iam::{0}:role/{1}'.format(
                    account_id, 'adf-cloudformation-deployment-role'),
                'importer')
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=os.environ["AWS_REGION"],
                role=role,
                stack_name=stack_name,
                account_id=account_id)
            LOGGER.info("Retrieving value of key %s from %s on %s in %s",
                        export, stack_name, account_id, region)
            stack_output = cloudformation.get_stack_output(export)
            LOGGER.info("Stack output value is %s", stack_output)
        except ClientError:
            if not optional:
                raise
            stack_output = ""
            pass
        if optional:
            if param:
                self.stage_parameters[param][key] = stack_output
            else:
                self.stage_parameters[key] = stack_output
            return
        else:
            if not stack_output:
                raise Exception(
                    "No Stack Output found on %s in %s with stack name %s and output key %s",
                    account_id, region, stack_name, export)  #pylint: disable=W0715
            if param:
                self.stage_parameters[param][key] = stack_output
            else:
                self.stage_parameters[key] = stack_output
            return

    def upload(self, value, key, file_name, param=None):
        if str(value).count(':') > 1:
            [_, region, value] = value.split(':')
            bucket_name = self.parameter_store.fetch_parameter(
                '/cross_region/s3_regional_bucket/{0}'.format(region))
            regional_client = S3(region, bucket_name)
            LOGGER.info("Uploading %s as %s to S3 Bucket %s in %s", value,
                        file_name, bucket_name, region)
            if param:
                self.stage_parameters[param][key] = regional_client.put_object(
                    "adf-upload/{0}/{1}".format(value, file_name),
                    "{0}".format(value))
            else:
                self.stage_parameters[key] = regional_client.put_object(
                    "adf-upload/{0}/{1}".format(value, file_name),
                    "{0}".format(value))
            return True
        [_, value] = value.split(':')
        LOGGER.info("Uploading %s to S3", value)
        if param:
            self.stage_parameters[param][key] = self.s3.put_object(
                "adf-upload/{0}/{1}".format(value, file_name),
                "{0}".format(value))
        else:
            self.stage_parameters[key] = self.s3.put_object(
                "adf-upload/{0}/{1}".format(value, file_name),
                "{0}".format(value))
        return False

    def fetch_parameter_store_value(self,
                                    value,
                                    key,
                                    param=None,
                                    optional=False):  #pylint: disable=R0912, R0915
        if str(value).count(':') > 1:
            [_, region, value] = value.split(':')
            if value.endswith('?'):
                value = value[:-1]
                LOGGER.info("Parameter %s is considered optional", value)
                optional = True
            regional_client = ParameterStore(region, boto3)
            LOGGER.info("Fetching Parameter from %s", value)
            if param:
                try:
                    self.stage_parameters[param][
                        key] = regional_client.fetch_parameter(value)
                except ParameterNotFoundError:
                    if optional:
                        LOGGER.info(
                            "Parameter %s not found, returning empty string",
                            value)
                        self.stage_parameters[param][key] = ""
                    else:
                        raise
            else:
                try:
                    self.stage_parameters[
                        key] = regional_client.fetch_parameter(value)
                except ParameterNotFoundError:
                    if optional:
                        LOGGER.info(
                            "Parameter %s not found, returning empty string",
                            value)
                        self.stage_parameters[key] = ""
                    else:
                        raise
            return True
        [_, value] = value.split(':')
        if value.endswith('?'):
            value = value[:-1]
            LOGGER.info("Parameter %s is considered optional", value)
            optional = True
        LOGGER.info("Fetching Parameter from %s", value)
        regional_client = ParameterStore(DEFAULT_REGION, boto3)
        if param:
            try:
                self.stage_parameters[param][
                    key] = regional_client.fetch_parameter(value)
            except ParameterNotFoundError:
                if optional:
                    LOGGER.info(
                        "Parameter %s not found, returning empty string",
                        value)
                    self.stage_parameters[param][key] = ""
                else:
                    raise
        else:
            try:
                self.stage_parameters[key] = regional_client.fetch_parameter(
                    value)
            except ParameterNotFoundError:
                if optional:
                    LOGGER.info(
                        "Parameter %s not found, returning empty string",
                        value)
                    self.stage_parameters[key] = ""
                else:
                    raise
        return False

    def update_cfn(self, key, param):
        if key not in self.stage_parameters[param]:
            self.stage_parameters[param][key] = self.comparison_parameters[
                param][key]

    def update_sc(self, key):
        if key not in self.stage_parameters:
            self.stage_parameters[key] = self.comparison_parameters[key]
Ejemplo n.º 23
0
class Resolver:
    def __init__(self, parameter_store, stage_parameters,
                 comparison_parameters):
        self.parameter_store = parameter_store
        self.stage_parameters = stage_parameters
        self.comparison_parameters = comparison_parameters
        self.sts = STS()
        self.cache = Cache()

    @staticmethod
    def _is_optional(value):
        return value.endswith('?')

    def fetch_stack_output(self, value, key, optional=False):  # pylint: disable=too-many-statements
        partition = get_partition(DEFAULT_REGION)
        try:
            [_, account_id, region, stack_name,
             output_key] = str(value).split(':')
        except ValueError as error:
            raise ValueError(
                f"{value} is not a valid import string. Syntax should be "
                "import:account_id:region:stack_name:output_key") from error
        if Resolver._is_optional(output_key):
            LOGGER.info("Parameter %s is considered optional", output_key)
            optional = True
        output_key = output_key[:-1] if optional else output_key
        try:
            role = self.sts.assume_cross_account_role(
                f'arn:{partition}:iam::{account_id}:role/adf-readonly-automation-role',
                'importer')
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=os.environ["AWS_REGION"],
                role=role,
                stack_name=stack_name,
                account_id=account_id)
            stack_output = self.cache.check(
                value) or cloudformation.get_stack_output(output_key)
            if stack_output:
                LOGGER.info("Stack output value is %s", stack_output)
                self.cache.add(value, stack_output)
        except ClientError:
            if not optional:
                raise
            stack_output = ""
        try:
            parent_key = list(
                Resolver.determine_parent_key(self.comparison_parameters,
                                              key))[0]
            if optional:
                self.stage_parameters[parent_key][key] = stack_output
            else:
                if not stack_output:
                    raise Exception(
                        f"No Stack Output found on {account_id} in {region} "
                        f"with stack name {stack_name} and "
                        f"output key {output_key}")
                self.stage_parameters[parent_key][key] = stack_output
        except IndexError as error:
            if stack_output:
                if self.stage_parameters.get(key):
                    self.stage_parameters[key] = stack_output
            else:
                raise Exception(
                    "Could not determine the structure of the file in order "
                    "to import from CloudFormation", ) from error
        return True

    def upload(self, value, key, file_name):
        if not any(item in value for item in S3.supported_path_styles()):
            raise Exception(
                'When uploading to S3 you need to specify a path style'
                'to use for the returned value to be used. '
                f'Supported path styles include: {S3.supported_path_styles()}'
            ) from None
        if str(value).count(':') > 2:
            [_, region, style, value] = value.split(':')
        else:
            [_, style, value] = value.split(':')
            region = DEFAULT_REGION
        bucket_name = self.parameter_store.fetch_parameter(
            f'/cross_region/s3_regional_bucket/{region}')
        client = S3(region, bucket_name)
        try:
            parent_key = list(
                Resolver.determine_parent_key(self.comparison_parameters,
                                              key))[0]
        except IndexError:
            if self.stage_parameters.get(key):
                self.stage_parameters[key] = client.put_object(
                    f"adf-upload/{value}/{file_name}".format(value, file_name),
                    str(value),
                    style,
                    True  # pre-check
                )
            return True
        self.stage_parameters[parent_key][key] = client.put_object(
            f"adf-upload/{value}/{file_name}",
            str(value),
            style,
            True  # pre-check
        )
        return True

    @staticmethod
    def determine_parent_key(d, target_key, parent_key=None):
        for key, value in d.items():
            if key == target_key:
                yield parent_key
            if isinstance(value, dict):
                for result in Resolver.determine_parent_key(
                        value, target_key, key):
                    yield result

    def fetch_parameter_store_value(self, value, key, optional=False):  # pylint: disable=too-many-statements
        if self._is_optional(value):
            LOGGER.info("Parameter %s is considered optional", value)
            optional = True
        if str(value).count(':') > 1:
            [_, region, value] = value.split(':')
        else:
            [_, value] = value.split(':')
            region = DEFAULT_REGION
        value = value[:-1] if optional else value
        client = ParameterStore(region, boto3)
        try:
            parameter = self.cache.check(
                f'{region}/{value}') or client.fetch_parameter(value)
        except ParameterNotFoundError:
            if optional:
                LOGGER.info("Parameter %s not found, returning empty string",
                            value)
                parameter = ""
            else:
                raise
        try:
            parent_key = list(
                Resolver.determine_parent_key(self.comparison_parameters,
                                              key))[0]
            if parameter:
                self.cache.add(f'{region}/{value}', parameter)
                self.stage_parameters[parent_key][key] = parameter
        except IndexError as error:
            if parameter:
                if self.stage_parameters.get(key):
                    self.stage_parameters[key] = parameter
            else:
                LOGGER.error(
                    "Parameter was not found, unable to fetch it from parameter store"
                )
                raise Exception(
                    "Parameter was not found, unable to fetch it from parameter store"
                ) from error
        return True

    def update(self, key):
        for k, _ in self.comparison_parameters.items():
            if not self.stage_parameters.get(
                    k) and not self.stage_parameters.get(k, {}).get(key):
                self.stage_parameters[k] = self.comparison_parameters[k]
            if key not in self.stage_parameters[
                    k] and self.comparison_parameters.get(k, {}).get(key):
                self.stage_parameters[k][key] = self.comparison_parameters[k][
                    key]
Ejemplo n.º 24
0
class Resolver:
    def __init__(self, parameter_store, stage_parameters,
                 comparison_parameters):
        self.parameter_store = parameter_store
        self.stage_parameters = stage_parameters
        self.comparison_parameters = comparison_parameters
        self.s3 = S3(DEFAULT_REGION, S3_BUCKET_NAME)
        self.sts = STS()

    def fetch_stack_output(self, value, param, key=None):
        try:
            [_, account_id, region, stack_name, export] = str(value).split(':')
        except ValueError:
            raise ValueError(
                "{0} is not a valid import string."
                "syntax should be import:account_id:region:stack_name:export_key"
                .format(str(value)))

        LOGGER.info(
            "Assuming the role %s", 'arn:aws:iam::{0}:role/{1}'.format(
                account_id, 'adf-cloudformation-deployment-role'))
        role = self.sts.assume_cross_account_role(
            'arn:aws:iam::{0}:role/{1}'.format(
                account_id, 'adf-cloudformation-deployment-role'), 'importer')
        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=os.environ["AWS_REGION"],
            role=role,
            stack_name=stack_name,
            account_id=account_id)
        LOGGER.info("Retrieving value of key %s from %s on %s in %s", export,
                    stack_name, account_id, region)
        stack_output = cloudformation.get_stack_output(export)
        if not stack_output:
            raise Exception("No Key was found on {0} with the name {1}".format(
                stack_name, export))

        LOGGER.info("Stack output value is %s", stack_output)
        if key:
            self.stage_parameters[key][param] = stack_output
            return
        self.stage_parameters[key] = stack_output

    def upload(self, value, key, file_name, param=None):
        if str(value).count(':') > 1:
            [_, region, value] = value.split(':')
            bucket_name = self.parameter_store.fetch_parameter(
                '/cross_region/s3_regional_bucket/{0}'.format(region))
            regional_client = S3(region, bucket_name)
            LOGGER.info("Uploading %s as %s to S3 Bucket %s in %s", value,
                        file_name, bucket_name, region)
            if param:
                self.stage_parameters[param][key] = regional_client.put_object(
                    "adf-upload/{0}/{1}".format(value, file_name),
                    "{0}".format(value))
            else:
                self.stage_parameters[key] = regional_client.put_object(
                    "adf-upload/{0}/{1}".format(value, file_name),
                    "{0}".format(value))
            return True
        [_, value] = value.split(':')
        LOGGER.info("Uploading %s to S3", value)
        if param:
            self.stage_parameters[param][key] = self.s3.put_object(
                "adf-upload/{0}/{1}".format(value, file_name),
                "{0}".format(value))
        else:
            self.stage_parameters[key] = self.s3.put_object(
                "adf-upload/{0}/{1}".format(value, file_name),
                "{0}".format(value))
        return False

    def fetch_parameter_store_value(self, value, key, param=None):
        if str(value).count(':') > 1:
            [_, region, value] = value.split(':')
            regional_client = ParameterStore(region, boto3)
            LOGGER.info("Fetching Parameter from %s", value)
            if param:
                self.stage_parameters[param][
                    key] = regional_client.fetch_parameter(value)
            else:
                self.stage_parameters[key] = regional_client.fetch_parameter(
                    value)
            return True
        [_, value] = value.split(':')
        LOGGER.info("Fetching Parameter from %s", value)
        if param:
            self.stage_parameters[param][
                key] = self.parameter_store.fetch_parameter(value)
        else:
            self.stage_parameters[key] = self.parameter_store.fetch_parameter(
                value)
        return False

    def update_cfn(self, key, param):
        if key not in self.stage_parameters[param]:
            self.stage_parameters[param][key] = self.comparison_parameters[
                param][key]

    def update_sc(self, key):
        if key not in self.stage_parameters:
            self.stage_parameters[key] = self.comparison_parameters[key]
Ejemplo n.º 25
0
class Resolver:
    def __init__(self, parameter_store, stage_parameters,
                 comparison_parameters):
        self.parameter_store = parameter_store
        self.stage_parameters = stage_parameters
        self.comparison_parameters = comparison_parameters
        self.sts = STS()
        self.cache = Cache()

    @staticmethod
    def _is_optional(value):
        return value.endswith('?')

    def fetch_stack_output(self, value, key, optional=False):  # pylint: disable=too-many-statements
        try:
            [_, account_id, region, stack_name,
             output_key] = str(value).split(':')
        except ValueError:
            raise ValueError(
                "{0} is not a valid import string."
                "syntax should be import:account_id:region:stack_name:output_key"
                .format(str(value)))
        if Resolver._is_optional(output_key):
            LOGGER.info("Parameter %s is considered optional", output_key)
            optional = True
        output_key = output_key[:-1] if optional else output_key
        try:
            role = self.sts.assume_cross_account_role(
                'arn:aws:iam::{0}:role/{1}'.format(
                    account_id, 'adf-readonly-automation-role'), 'importer')
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=os.environ["AWS_REGION"],
                role=role,
                stack_name=stack_name,
                account_id=account_id)
            stack_output = self.cache.check(
                value) or cloudformation.get_stack_output(output_key)
            if stack_output:
                LOGGER.info("Stack output value is %s", stack_output)
                self.cache.add(value, stack_output)
        except ClientError:
            if not optional:
                raise
            stack_output = ""
            pass
        try:
            parent_key = list(
                Resolver.determine_parent_key(self.comparison_parameters,
                                              key))[0]
            if optional:
                self.stage_parameters[parent_key][key] = stack_output
            else:
                if not stack_output:
                    raise Exception(
                        "No Stack Output found on {account_id} in {region} "
                        "with stack name {stack} and output key "
                        "{output_key}".format(
                            account_id=account_id,
                            region=region,
                            stack=stack_name,
                            output_key=output_key,
                        ))
                self.stage_parameters[parent_key][key] = stack_output
        except IndexError:
            if stack_output:
                if self.stage_parameters.get(key):
                    self.stage_parameters[key] = stack_output
            else:
                raise Exception(
                    "Could not determine the structure of the file in order to import from CloudFormation"
                )
        return True

    def upload(self, value, key, file_name):
        if not any(item in value
                   for item in ['path', 'virtual-hosted', 's3-key-only']):
            raise Exception(
                'When uploading to S3 you need to specify a '
                'pathing style for the response either path or virtual-hosted, '
                'read more: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html'
            ) from None
        if str(value).count(':') > 2:
            [_, region, style, value] = value.split(':')
        else:
            [_, style, value] = value.split(':')
            region = DEFAULT_REGION
        bucket_name = self.parameter_store.fetch_parameter(
            '/cross_region/s3_regional_bucket/{0}'.format(region))
        client = S3(region, bucket_name)
        try:
            parent_key = list(
                Resolver.determine_parent_key(self.comparison_parameters,
                                              key))[0]
        except IndexError:
            if self.stage_parameters.get(key):
                self.stage_parameters[key] = client.put_object(
                    "adf-upload/{0}/{1}".format(value, file_name),
                    "{0}".format(value),
                    style,
                    True  #pre-check
                )
            return True
        self.stage_parameters[parent_key][key] = client.put_object(
            "adf-upload/{0}/{1}".format(value, file_name),
            "{0}".format(value),
            style,
            True  #pre-check
        )
        return True

    @staticmethod
    def determine_parent_key(d, target_key, parent_key=None):
        for key, value in d.items():
            if key == target_key:
                yield parent_key
            if isinstance(value, dict):
                for result in Resolver.determine_parent_key(
                        value, target_key, key):
                    yield result

    def fetch_parameter_store_value(self, value, key, optional=False):  # pylint: disable=too-many-statements
        if self._is_optional(value):
            LOGGER.info("Parameter %s is considered optional", value)
            optional = True
        if str(value).count(':') > 1:
            [_, region, value] = value.split(':')
        else:
            [_, value] = value.split(':')
            region = DEFAULT_REGION
        value = value[:-1] if optional else value
        client = ParameterStore(region, boto3)
        try:
            parameter = self.cache.check('{0}/{1}'.format(
                region, value)) or client.fetch_parameter(value)
        except ParameterNotFoundError:
            if optional:
                LOGGER.info("Parameter %s not found, returning empty string",
                            value)
                parameter = ""
            else:
                raise
        try:
            parent_key = list(
                Resolver.determine_parent_key(self.comparison_parameters,
                                              key))[0]
            if parameter:
                self.cache.add('{0}/{1}'.format(region, value), parameter)
                self.stage_parameters[parent_key][key] = parameter
        except IndexError as error:
            if parameter:
                if self.stage_parameters.get(key):
                    self.stage_parameters[key] = parameter
            else:
                LOGGER.error(
                    "Parameter was not found, unable to fetch it from parameter store"
                )
                raise Exception(
                    "Parameter was not found, unable to fetch it from parameter store"
                ) from error
        return True

    def update(self, key):
        for k, _ in self.comparison_parameters.items():
            if not self.stage_parameters.get(
                    k) and not self.stage_parameters.get(k, {}).get(key):
                self.stage_parameters[k] = self.comparison_parameters[k]
            if key not in self.stage_parameters[
                    k] and self.comparison_parameters.get(k, {}).get(key):
                self.stage_parameters[k][key] = self.comparison_parameters[k][
                    key]
Ejemplo n.º 26
0
def assume_role(account_id):
    sts = STS()
    return sts.assume_cross_account_role(
        f"arn:aws:iam::{account_id}:role/{ADF_ROLE_NAME}",
        "adf_delete_default_vpc",
    )