コード例 #1
0
 def __init__(self, parameter_store, stage_parameters,
              comparison_parameters):
     self.parameter_store = parameter_store
     self.stage_parameters = stage_parameters
     self.comparison_parameters = comparison_parameters
     self.s3 = S3(DEFAULT_REGION, S3_BUCKET_NAME)
     self.sts = STS()
コード例 #2
0
def lambda_handler(event, _):
    # Return if we need to update the pipelines only
    if event.get('update_only'):
        LOGGER.info('Will only update pipelines for this execution.')
        return event
    try:
        sts = STS(boto3)
        parameter_store = ParameterStore(
            event.get('deployment_account_region'), boto3)
        for region in list(
                set([event.get('deployment_account_region')] +
                    event.get("regions"))):
            kms_key_arn = parameter_store.fetch_parameter(
                "/cross_region/kms_arn/{0}".format(region))
            s3_bucket = parameter_store.fetch_parameter(
                "/cross_region/s3_regional_bucket/{0}".format(region))
            for account_id in event.get('account_ids'):
                try:
                    role = sts.assume_cross_account_role(
                        'arn:aws:iam::{0}:role/{1}'.format(
                            account_id, 'adf-cloudformation-deployment-role'),
                        'base_cfn_role')
                    IAMUpdater(kms_key_arn, s3_bucket, role)
                    kms = KMS(region, boto3, kms_key_arn, account_id)
                    kms.enable_cross_account_access()
                except ClientError:
                    continue
        return event
    except BaseException as error:
        LOGGER.exception(error)
コード例 #3
0
def main():
    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store,
                                   os.environ["ADF_PIPELINE_PREFIX"])
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, boto3, S3_BUCKET_NAME)
    sts = STS(boto3)
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-org-access-adf'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    for p in deployment_map.map_contents.get('pipelines'):
        pipeline = Pipeline(p)

        for target in p['targets']:
            target_structure = TargetStructure(target)
            for step in target_structure.target:
                for path in step.get('path'):
                    try:
                        regions = step.get(
                            'regions',
                            p.get('regions', DEPLOYMENT_ACCOUNT_REGION))
                        pipeline.stage_regions.append(regions)
                        pipeline_target = Target(path, regions,
                                                 target_structure,
                                                 organizations)
                        pipeline_target.fetch_accounts_for_target()
                    except BaseException:
                        raise Exception(
                            "Failed to return accounts for {0}".format(path))

            pipeline.template_dictionary["targets"].append(
                target_structure.account_list)

        if DEPLOYMENT_ACCOUNT_REGION not in regions:
            pipeline.stage_regions.append(DEPLOYMENT_ACCOUNT_REGION)

        parameters = pipeline.generate_parameters()
        pipeline.generate()
        deployment_map.update_deployment_parameters(pipeline)
        s3_object_path = upload_if_required(s3, pipeline)

        store_regional_parameter_config(pipeline, parameter_store)
        cloudformation = CloudFormation(
            region=DEPLOYMENT_ACCOUNT_REGION,
            deployment_account_region=DEPLOYMENT_ACCOUNT_REGION,
            role=boto3,
            template_url=s3_object_path,
            parameters=parameters,
            wait=True,
            stack_name="{0}-{1}".format(os.environ["ADF_PIPELINE_PREFIX"],
                                        pipeline.name),
            s3=None,
            s3_key_path=None)

        cloudformation.create_stack()
コード例 #4
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store, ADF_PIPELINE_PREFIX)
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, S3_BUCKET_NAME)
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    try:
        auto_create_repositories = parameter_store.fetch_parameter(
            'auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'

    threads = []
    for p in deployment_map.map_contents.get('pipelines'):
        thread = PropagatingThread(target=worker_thread,
                                   args=(p, organizations,
                                         auto_create_repositories, s3,
                                         deployment_map, parameter_store))
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()
コード例 #5
0
 def __init__(self, parameter_store, stage_parameters,
              comparison_parameters):
     self.parameter_store = parameter_store
     self.stage_parameters = stage_parameters
     self.comparison_parameters = comparison_parameters
     self.sts = STS()
     self.cache = Cache()
コード例 #6
0
def lambda_handler(event, _):
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            event["account_id"],
            event["cross_account_access_role"]
        ), 'master_lambda'
    )

    if event['is_deployment_account']:
        configure_master_account_parameters(event)
        configure_deployment_account_parameters(event, role)

    s3 = S3(
        region=REGION_DEFAULT,
        bucket=S3_BUCKET
    )

    for region in list(set([event["deployment_account_region"]] + event["regions"])):
        if not event["is_deployment_account"]:
            configure_generic_account(sts, event, region, role)
        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=event["deployment_account_region"],
            role=role,
            wait=False,
            stack_name=None, # Stack name will be automatically defined based on event
            s3=s3,
            s3_key_path=event["full_path"],
            account_id=event["account_id"]
        )
        cloudformation.create_stack()

    return event
コード例 #7
0
def main():  #pylint: disable=R0915
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store, ADF_PIPELINE_PREFIX)
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, S3_BUCKET_NAME)
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    for p in deployment_map.map_contents.get('pipelines'):
        pipeline = Pipeline(p)

        for target in p.get('targets', []):
            target_structure = TargetStructure(target)
            for step in target_structure.target:
                for path in step.get('path'):
                    regions = step.get(
                        'regions', p.get('regions', DEPLOYMENT_ACCOUNT_REGION))
                    step_name = step.get('name')
                    params = step.get('params', {})
                    pipeline.stage_regions.append(regions)
                    pipeline_target = Target(path, regions, target_structure,
                                             organizations, step_name, params)
                    pipeline_target.fetch_accounts_for_target()

            pipeline.template_dictionary["targets"].append(
                target_structure.account_list)

        if DEPLOYMENT_ACCOUNT_REGION not in regions:
            pipeline.stage_regions.append(DEPLOYMENT_ACCOUNT_REGION)

        parameters = pipeline.generate_parameters()
        pipeline.generate()
        deployment_map.update_deployment_parameters(pipeline)
        s3_object_path = upload_pipeline(s3, pipeline)

        store_regional_parameter_config(pipeline, parameter_store)
        cloudformation = CloudFormation(
            region=DEPLOYMENT_ACCOUNT_REGION,
            deployment_account_region=DEPLOYMENT_ACCOUNT_REGION,
            role=boto3,
            template_url=s3_object_path,
            parameters=parameters,
            wait=True,
            stack_name="{0}-{1}".format(ADF_PIPELINE_PREFIX, pipeline.name),
            s3=None,
            s3_key_path=None,
            account_id=DEPLOYMENT_ACCOUNT_ID)
        cloudformation.create_stack()
コード例 #8
0
def create_or_update_account(org_session, support_session, account, adf_role_name, account_id=None):
    """Creates or updates a single AWS account.
    :param org_session: Instance of Organization class
    :param account: Instance of Account class
    """
    if not account_id:
        LOGGER.info(f'Creating new account {account.full_name}')
        account_id = org_session.create_account(account, adf_role_name)
        # This only runs on account creation at the moment.
        support_session.set_support_level_for_account(account, account_id)

    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            account_id,
            adf_role_name
        ), 'adf_account_provisioning'
    )

    LOGGER.info(f'Ensuring account {account_id} (alias {account.alias}) is in OU {account.ou_path}')
    org_session.move_account(account_id, account.ou_path)
    if account.delete_default_vpc:
        ec2_client = role.client('ec2')
        all_regions = [
            region['RegionName']
            for region in ec2_client.describe_regions(
                AllRegions=False,
                Filters=[
                    {
                        'Name': 'opt-in-status',
                        'Values': [
                            'opt-in-not-required',
                        ]
                    }
                ]
            )['Regions']
        ]
        args = (
            (account_id, region, role)
            for region in all_regions
        )
        with ThreadPoolExecutor(max_workers=10) as executor:
            for _ in executor.map(lambda f: schedule_delete_default_vpc(*f), args):
                pass

    if account.alias:
        LOGGER.info(f'Ensuring account alias for {account_id} of {account.alias}')
        org_session.create_account_alias(account.alias, role)

    if account.tags:
        LOGGER.info(f'Ensuring tags exist for account {account_id}: {account.tags}')
        org_session.create_account_tags(account_id, account.tags)
コード例 #9
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(
        DEPLOYMENT_ACCOUNT_REGION,
        boto3
    )
    deployment_map = DeploymentMap(
        parameter_store,
        ADF_PIPELINE_PREFIX
    )
    s3 = S3(
        DEPLOYMENT_ACCOUNT_REGION,
        S3_BUCKET_NAME
    )
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')
        ), 'pipeline'
    )

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    try:
        auto_create_repositories = parameter_store.fetch_parameter('auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'

    threads = []
    for counter, p in enumerate(deployment_map.map_contents.get('pipelines')):
        thread = PropagatingThread(target=worker_thread, args=(
            p,
            organizations,
            auto_create_repositories,
            s3,
            deployment_map,
            parameter_store
        ))
        thread.start()
        threads.append(thread)
        _batcher = counter % 10
        if _batcher == 9: # 9 meaning we have hit a set of 10 threads since n % 10
            _interval = random.randint(5, 11)
            LOGGER.debug('Waiting for %s seconds before starting next batch of 10 threads.', _interval)
            time.sleep(_interval)

    for thread in threads:
        thread.join()
コード例 #10
0
def lambda_handler(event, _):
    if event.get("alias"):
        sts = STS()
        account_id = event.get("account_id")
        role = sts.assume_cross_account_role(
            f"arn:aws:iam::{account_id}:role/{ADF_ROLE_NAME}",
            "adf_account_alias_config",
        )
        create_account_alias(event, role.client("iam"))
    else:
        LOGGER.info(
            f"Account: {event.get('account_full_name')} does not need an alias"
        )
    return event
コード例 #11
0
def lambda_handler(event, _):
    parameters = ParameterStore(REGION_DEFAULT, boto3)
    account_id = event.get(
        'detail').get(
            'requestParameters').get('accountId')
    organizations = Organizations(boto3, account_id)
    parsed_event = Event(event, parameters, organizations, account_id)
    cache = Cache()

    if parsed_event.moved_to_root or parsed_event.moved_to_protected:
        return parsed_event.create_output_object(cache)

    parsed_event.set_destination_ou_name()

    sts = STS(boto3)
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            parsed_event.account_id,
            parsed_event.cross_account_access_role
        ), 'master_lambda'
    )

    if parsed_event.is_deployment_account:
        update_master_account_parameters(parsed_event, parameters)
        configure_deployment_account(parsed_event, role)

    s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET)

    account_path = parsed_event.organizations.build_account_path(
        parsed_event.destination_ou_id,
        [],  # Initial empty array to hold OU Path,
        cache,
    )

    for region in list(set([parsed_event.deployment_account_region] + parsed_event.regions)):
        if not parsed_event.is_deployment_account:
            configure_generic_account(sts, parsed_event, region, role)
        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=parsed_event.deployment_account_region,
            role=role,
            wait=False,
            stack_name=None,
            s3=s3,
            s3_key_path=account_path,
            file_path=None,
        )
        cloudformation.create_stack()

    return parsed_event.create_output_object(cache)
コード例 #12
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    _create_inputs_folder()
    parameter_store = ParameterStore(
        DEPLOYMENT_ACCOUNT_REGION,
        boto3
    )
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, SHARED_MODULES_BUCKET)
    deployment_map = DeploymentMap(
        parameter_store,
        s3,
        ADF_PIPELINE_PREFIX
    )
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')
        ), 'pipeline'
    )
    organizations = Organizations(role)
    clean(parameter_store, deployment_map)
    ensure_event_bus_status(ORGANIZATION_ID)
    try:
        auto_create_repositories = parameter_store.fetch_parameter('auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'
    threads = []
    _cache = Cache()
    for p in deployment_map.map_contents.get('pipelines', []):
        _source_account_id = p.get('default_providers', {}).get('source', {}).get('properties', {}).get('account_id', {})
        if _source_account_id and int(_source_account_id) != int(DEPLOYMENT_ACCOUNT_ID) and not _cache.check(_source_account_id):
            rule = Rule(p['default_providers']['source']['properties']['account_id'])
            rule.create_update()
            _cache.add(p['default_providers']['source']['properties']['account_id'], True)
        thread = PropagatingThread(target=worker_thread, args=(
            p,
            organizations,
            auto_create_repositories,
            deployment_map,
            parameter_store
        ))
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()
コード例 #13
0
def lambda_handler(event, _):
    """Main Lambda Entry point
    """
    sts = STS()
    account_id = event.get('account_id')
    partition = get_partition(REGION_DEFAULT)
    cross_account_access_role = event.get('cross_account_access_role')

    role = sts.assume_cross_account_role(
        f'arn:{partition}:iam::{account_id}:role/{cross_account_access_role}',
        'master')

    s3 = S3(REGION_DEFAULT, S3_BUCKET)

    for region in list(
            set([event['deployment_account_region']] + event['regions'])):

        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=event['deployment_account_region'],
            role=role,
            wait=False,
            stack_name=None,
            s3=s3,
            s3_key_path=event['ou_name'],
            account_id=account_id)

        status = cloudformation.get_stack_status()

        if status in ('CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'):
            raise RetryError(f"CloudFormation Stack status: {status}")

        if status in ('CREATE_FAILED', 'ROLLBACK_FAILED', 'DELETE_FAILED',
                      'UPDATE_ROLLBACK_FAILED', 'ROLLBACK_IN_PROGRESS',
                      'ROLLBACK_COMPLETE'):
            raise Exception(
                f"Account Bootstrap Failed - Account: {account_id} "
                f"Region: {region} Status: {status}")

        if event.get('is_deployment_account'):
            update_deployment_account_output_parameters(
                deployment_account_region=event['deployment_account_region'],
                region=region,
                deployment_account_role=role,
                cloudformation=cloudformation)

    return event
コード例 #14
0
def lambda_handler(event, _):
    target_role_policies = {
        'adf-cloudformation-deployment-role': 'adf-cloudformation-deployment-role-policy-kms',
        'adf-cloudformation-role': 'adf-cloudformation-role-policy'
    }

    role_policies = {
        'adf-codepipeline-role': 'adf-codepipeline-role-policy',
        'adf-cloudformation-deployment-role': 'adf-cloudformation-deployment-role-policy',
        'adf-cloudformation-role': 'adf-cloudformation-role-policy'
    }

    sts = STS()
    partition = get_partition(REGION_DEFAULT)

    parameter_store = ParameterStore(
        region=event.get('deployment_account_region'),
        role=boto3
    )
    account_id = event.get("account_id")
    kms_key_arns = []
    s3_buckets = []
    for region in list(set([event.get('deployment_account_region')] + event.get("regions", []))):
        kms_key_arn = parameter_store.fetch_parameter(
            f"/cross_region/kms_arn/{region}"
        )
        kms_key_arns.append(kms_key_arn)
        s3_bucket = parameter_store.fetch_parameter(
            f"/cross_region/s3_regional_bucket/{region}"
        )
        s3_buckets.append(s3_bucket)
        try:
            role = sts.assume_cross_account_role(
                f'arn:{partition}:iam::{account_id}:role/adf-cloudformation-deployment-role',
                'base_cfn_role'
            )
            LOGGER.debug("Role has been assumed for %s", account_id)
            update_iam(role, s3_bucket, kms_key_arn, target_role_policies)
        except ClientError as err:
            LOGGER.debug("%s could not be assumed (%s), continuing", account_id, err, exc_info=True)
            continue

    update_iam(boto3, s3_buckets, kms_key_arns, role_policies)

    return event
コード例 #15
0
def lambda_handler(event, _):
    sts = STS(boto3)

    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            event.get('deployment_account_id'),
            event.get('cross_account_iam_role')), 'step_function')

    step_functions = StepFunctions(
        role=role,
        deployment_account_id=event.get('deployment_account_id'),
        deployment_account_region=event.get('deployment_account_region'),
        regions=event.get('regions'),
        account_ids=[event.get('account_id')],
        update_pipelines_only=bool(
            event.get('moved_to_protected') or event.get('moved_to_root')))
    step_functions.execute_statemachine()

    return event
コード例 #16
0
def lambda_handler(event, _):
    """Main Lambda Entry point
    """
    sts = STS(boto3)

    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            event.get('account_id'),
            event.get('cross_account_iam_role'),
        ), 'master')

    s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET)

    for region in list(
            set([event.get('deployment_account_region')] +
                event.get("regions"))):

        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=event.get('deployment_account_region'),
            role=role,
            wait=False,
            stack_name=None,
            s3=s3,
            s3_key_path=event.get('ou_name'),
            file_path=None,
        )

        status = cloudformation.get_stack_status()

        if status in ("CREATE_IN_PROGRESS", "UPDATE_IN_PROGRESS"):
            raise RetryError("Cloudformation Stack not yet complete")

        # TODO Better waiting validation to ensure stack is not failed
        if event.get('is_deployment_account'):
            update_deployment_account_output_parameters(
                deployment_account_region=event.get(
                    'deployment_account_region'),
                region=region,
                deployment_account_role=role,
                cloudformation=cloudformation)

    return event
コード例 #17
0
def lambda_handler(event, _):
    target_role_policies = {
        'adf-cloudformation-deployment-role':
        'adf-cloudformation-deployment-role-policy-kms',
        'adf-cloudformation-role': 'adf-cloudformation-role-policy'
    }

    role_policies = {
        'adf-codepipeline-role': 'adf-codepipeline-role-policy',
        'adf-cloudformation-deployment-role':
        'adf-cloudformation-deployment-role-policy',
        'adf-cloudformation-role': 'adf-cloudformation-role-policy'
    }

    sts = STS()
    parameter_store = ParameterStore(
        region=event.get('deployment_account_region'), role=boto3)
    for region in list(
            set([event.get('deployment_account_region')] +
                event.get("regions", []))):
        kms_key_arn = parameter_store.fetch_parameter(
            "/cross_region/kms_arn/{0}".format(region))
        s3_bucket = parameter_store.fetch_parameter(
            "/cross_region/s3_regional_bucket/{0}".format(region))
        update_iam(boto3, s3_bucket, kms_key_arn, role_policies)
        for account_id in event.get('account_ids'):
            try:
                role = sts.assume_cross_account_role(
                    'arn:aws:iam::{0}:role/{1}'.format(
                        account_id, 'adf-cloudformation-deployment-role'),
                    'base_cfn_role')
                LOGGER.debug("Role has been assumed for %s", account_id)
                update_iam(role, s3_bucket, kms_key_arn, target_role_policies)
            except ClientError as err:
                LOGGER.debug("%s could not be assumed (%s), continuing",
                             account_id,
                             err,
                             exc_info=True)
                continue

    return event
コード例 #18
0
def lambda_handler(event, _):
    sts = STS()

    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(event['deployment_account_id'],
                                           event['cross_account_access_role']),
        'step_function')

    step_functions = StepFunctions(
        role=role,
        deployment_account_id=event['deployment_account_id'],
        deployment_account_region=event['deployment_account_region'],
        full_path=event['full_path'],
        regions=event['regions'],
        account_ids=[event['account_id']],
        update_pipelines_only=1 if event.get('moved_to_protected')
        or event.get('moved_to_root') else 0,
        error=event.get('error', 0))
    step_functions.execute_statemachine()

    return event
コード例 #19
0
def remove_base(account_id, regions, role, event):
    sts = STS()
    threads = []

    for region in list(set([event.get('deployment_account_region')] +
                           regions)):
        t = PropagatingThread(target=worker_thread,
                              args=(sts, region, account_id, role, event))
        t.start()
        threads.append(t)

    for thread in threads:
        thread.join()
コード例 #20
0
def lambda_handler(event, _):
    sts = STS()

    account_id = event["account_id"]
    cross_account_access_role = event["cross_account_access_role"]
    role_arn = f'arn:{PARTITION}:iam::{account_id}:role/{cross_account_access_role}'

    role = sts.assume_cross_account_role(role_arn=role_arn,
                                         role_session_name='management_lambda')

    if event['is_deployment_account']:
        configure_master_account_parameters(event)
        configure_deployment_account_parameters(event, role)

    s3 = S3(region=REGION_DEFAULT, bucket=S3_BUCKET)

    for region in list(
            set([event["deployment_account_region"]] + event["regions"])):
        if not event["is_deployment_account"]:
            configure_generic_account(sts, event, region, role)
        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=event["deployment_account_region"],
            role=role,
            wait=True,
            # Stack name will be automatically defined based on event
            stack_name=None,
            s3=s3,
            s3_key_path=event["full_path"],
            account_id=account_id)
        if is_inter_ou_account_move(event):
            cloudformation.delete_all_base_stacks(True)  # override Wait
        cloudformation.create_stack()
        if region == event["deployment_account_region"]:
            cloudformation.create_iam_stack()

    return event
コード例 #21
0
def lambda_handler(event, _):
    sts = STS(boto3)
    parameter_store = ParameterStore(event.get('deployment_account_region'),
                                     boto3)
    for region in list(
            set([event.get('deployment_account_region')] +
                event.get("regions"))):
        kms_key_arn = parameter_store.fetch_parameter(
            "/cross_region/kms_arn/{0}".format(region))
        s3_bucket = parameter_store.fetch_parameter(
            "/cross_region/s3_regional_bucket/{0}".format(region))
        for account_id in event.get('account_ids'):
            try:
                role = sts.assume_cross_account_role(
                    'arn:aws:iam::{0}:role/{1}'.format(
                        account_id, 'adf-cloudformation-deployment-role'),
                    'base_cfn_role')
                IAMUpdater(kms_key_arn, s3_bucket, role)
                kms = KMS(region, boto3, kms_key_arn, account_id)
                kms.enable_cross_account_access()
            except ClientError:
                continue

    return event
コード例 #22
0
def lambda_handler(event, _):
    LOGGER.info(f"Fetching Default regions {event.get('account_full_name')}")
    sts = STS()
    account_id = event.get("account_id")
    role = sts.assume_cross_account_role(
        f"arn:aws:iam::{account_id}:role/{ADF_ROLE_NAME}",
        "adf_account_get_regions",
    )

    ec2_client = role.client("ec2")
    default_regions = [
        region["RegionName"] for region in ec2_client.describe_regions(
            AllRegions=False,
            Filters=[{
                "Name": "opt-in-status",
                "Values": ["opt-in-not-required", "opted-in"],
            }],
        )["Regions"]
    ]
    LOGGER.debug(f"Default regions for {account_id}: {default_regions}")
    return {
        **event,
        "default_regions": default_regions,
    }
コード例 #23
0
def lambda_handler(event, _):
    sts = STS()

    deployment_account_id = event.get('deployment_account_id')
    partition = get_partition(REGION_DEFAULT)
    cross_account_access_role = event.get('cross_account_access_role')

    role = sts.assume_cross_account_role(
        f'arn:{partition}:iam::{deployment_account_id}:role/{cross_account_access_role}',
        'step_function')

    step_functions = StepFunctions(
        role=role,
        deployment_account_id=deployment_account_id,
        deployment_account_region=event['deployment_account_region'],
        full_path=event['full_path'],
        regions=event['regions'],
        account_ids=[event['account_id']],
        update_pipelines_only=1 if event.get('moved_to_protected')
        or event.get('moved_to_root') else 0,
        error=event.get('error', 0))
    step_functions.execute_statemachine()

    return event
コード例 #24
0
def create_or_update_account(org_session,
                             account,
                             adf_role_name,
                             account_id=None):
    """Creates or updates a single AWS account.
    :param org_session: Instance of Organization class
    :param account: Instance of Account class
    """
    if not account_id:
        LOGGER.info(f'Creating new account {account.full_name}')
        account_id = org_session.create_account(account, adf_role_name)
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(account_id, adf_role_name),
        'delete_default_vpc')

    LOGGER.info(
        f'Ensuring account {account_id} (alias {account.alias}) is in OU {account.ou_path}'
    )
    org_session.move_account(account_id, account.ou_path)
    if account.delete_default_vpc:
        ec2_client = role.client('ec2')
        all_regions = get_all_regions(ec2_client)
        args = ((account_id, region, role) for region in all_regions)
        with ThreadPoolExecutor(max_workers=10) as executor:
            for _ in executor.map(lambda f: schedule_delete_default_vpc(*f),
                                  args):
                pass

    LOGGER.info(f'Ensuring account alias for {account_id} of {account.alias}')
    org_session.create_account_alias(account.alias, role)

    if account.tags:
        LOGGER.info(
            f'Ensuring tags exist for account {account_id}: {account.tags}')
        org_session.create_account_tags(account_id, account.tags)
コード例 #25
0
def main():
    config = Config()
    config.store_config()

    try:
        parameter_store = ParameterStore(REGION_DEFAULT, boto3)
        deployment_account_id = parameter_store.fetch_parameter(
            'deployment_account_id')

        organizations = Organizations(boto3, deployment_account_id)

        sts = STS(boto3)
        deployment_account_role = prepare_deployment_account(
            sts=sts,
            deployment_account_id=deployment_account_id,
            config=config)

        cache = Cache()
        ou_id = organizations.get_parent_info().get("ou_parent_id")
        account_path = organizations.build_account_path(ou_id=ou_id,
                                                        account_path=[],
                                                        cache=cache)
        s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET_NAME)

        # First Setup the Deployment Account in all regions (KMS Key and S3 Bucket + Parameter Store values)
        for region in list(
                set([config.deployment_account_region] +
                    config.target_regions)):
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=config.deployment_account_region,
                role=deployment_account_role,
                wait=True,
                stack_name=None,
                s3=s3,
                s3_key_path=account_path)

            cloudformation.create_stack()

            update_deployment_account_output_parameters(
                deployment_account_region=config.deployment_account_region,
                region=region,
                deployment_account_role=deployment_account_role,
                cloudformation=cloudformation)

        threads = []
        account_ids = organizations.get_account_ids()
        for account_id in account_ids:
            t = PropagatingThread(target=worker_thread,
                                  args=(account_id, sts, config, s3, cache))
            t.start()
            threads.append(t)

        for thread in threads:
            thread.join()

        step_functions = StepFunctions(
            role=deployment_account_role,
            deployment_account_id=deployment_account_id,
            deployment_account_region=config.deployment_account_region,
            regions=config.target_regions,
            account_ids=[
                i for i in account_ids if i != config.deployment_account_region
            ],
            update_pipelines_only=1)

        step_functions.execute_statemachine()
    except ParameterNotFoundError:
        LOGGER.info("Deployment Account has not yet been Bootstrapped.")
        return
コード例 #26
0
def main():  # pylint: disable=R0915
    LOGGER.info("ADF Version %s", ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    policies = OrganizationPolicy()
    config = Config()
    config.store_config()

    try:
        parameter_store = ParameterStore(REGION_DEFAULT, boto3)
        deployment_account_id = parameter_store.fetch_parameter(
            'deployment_account_id')
        organizations = Organizations(role=boto3,
                                      account_id=deployment_account_id)
        policies.apply(organizations, parameter_store, config.config)
        sts = STS()
        deployment_account_role = prepare_deployment_account(
            sts=sts,
            deployment_account_id=deployment_account_id,
            config=config)

        cache = Cache()
        ou_id = organizations.get_parent_info().get("ou_parent_id")
        account_path = organizations.build_account_path(ou_id=ou_id,
                                                        account_path=[],
                                                        cache=cache)
        s3 = S3(region=REGION_DEFAULT, bucket=S3_BUCKET_NAME)

        kms_and_bucket_dict = {}
        # First Setup/Update the Deployment Account in all regions (KMS Key and
        # S3 Bucket + Parameter Store values)
        for region in list(
                set([config.deployment_account_region] +
                    config.target_regions)):
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=config.deployment_account_region,
                role=deployment_account_role,
                wait=True,
                stack_name=None,
                s3=s3,
                s3_key_path="adf-bootstrap/" + account_path,
                account_id=deployment_account_id)
            cloudformation.create_stack()
            update_deployment_account_output_parameters(
                deployment_account_region=config.deployment_account_region,
                region=region,
                kms_and_bucket_dict=kms_and_bucket_dict,
                deployment_account_role=deployment_account_role,
                cloudformation=cloudformation)
            if region == config.deployment_account_region:
                cloudformation.create_iam_stack()

        # Updating the stack on the master account in deployment region
        cloudformation = CloudFormation(
            region=config.deployment_account_region,
            deployment_account_region=config.deployment_account_region,
            role=boto3,
            wait=True,
            stack_name=None,
            s3=s3,
            s3_key_path='adf-build',
            account_id=ACCOUNT_ID)
        cloudformation.create_stack()
        threads = []
        account_ids = [
            account_id["Id"] for account_id in organizations.get_accounts()
        ]
        for account_id in [
                account for account in account_ids
                if account != deployment_account_id
        ]:
            thread = PropagatingThread(target=worker_thread,
                                       args=(account_id, sts, config, s3,
                                             cache, kms_and_bucket_dict))
            thread.start()
            threads.append(thread)

        for thread in threads:
            thread.join()

        LOGGER.info("Executing Step Function on Deployment Account")
        step_functions = StepFunctions(
            role=deployment_account_role,
            deployment_account_id=deployment_account_id,
            deployment_account_region=config.deployment_account_region,
            regions=config.target_regions,
            account_ids=account_ids,
            update_pipelines_only=0)

        step_functions.execute_statemachine()
    except ParameterNotFoundError:
        LOGGER.info(
            'A Deployment Account is ready to be bootstrapped! '
            'The Account provisioner will now kick into action, '
            'be sure to check out its progress in AWS Step Functions in this account.'
        )
        return
コード例 #27
0
def main():
    try:
        parameter_store = ParameterStore(REGION_DEFAULT, boto3)
        deployment_account_id = parameter_store.fetch_parameter(
            'deployment_account_id')

        config = Config()
        config.store_config()

        if deployment_account_id is None:
            raise NotConfiguredError(
                "Deployment Account has not yet been configured")

        organizations = Organizations(boto3, deployment_account_id)

        sts = STS(boto3)
        ou_id = organizations.get_parent_info().get("ou_parent_id")

        deployment_account_role = ensure_deployment_account_configured(
            sts, config, ou_id, deployment_account_id)

        cache = Cache()
        account_path = organizations.build_account_path(
            ou_id,
            [],  # Initial empty array to hold OU Path
            cache)

        s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET)

        # (First) Setup the Deployment Account in all regions (KMS Key and S3 Bucket + Parameter Store values)
        for region in list(
                set([config.deployment_account_region] +
                    config.target_regions)):
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=config.deployment_account_region,
                role=deployment_account_role,
                wait=True,
                stack_name=None,
                s3=s3,
                s3_key_path=account_path,
                file_path=None,
            )

            cloudformation.create_stack()

            update_deployment_account_output_parameters(
                deployment_account_region=config.deployment_account_region,
                region=region,
                deployment_account_role=deployment_account_role,
                cloudformation=cloudformation)

        threads = []
        account_ids = organizations.get_account_ids()
        for account_id in account_ids:
            t = PropagatingThread(target=worker_thread,
                                  args=(account_id, sts, config, s3, cache))
            t.start()
            threads.append(t)

        for thread in threads:
            thread.join()

        step_functions = StepFunctions(
            role=deployment_account_role,
            deployment_account_id=deployment_account_id,
            deployment_account_region=config.deployment_account_region,
            regions=config.target_regions,
            account_ids=[
                i for i in account_ids if i != config.deployment_account_region
            ],
            update_pipelines_only=False)

        step_functions.execute_statemachine()

    except NotConfiguredError as not_configured_error:
        LOGGER.info(not_configured_error)
コード例 #28
0
properties associated with a pipeline.
"""

import os
from cloudformation import CloudFormation
from s3 import S3
from sts import STS
from logger import configure_logger

LOGGER = configure_logger(__name__)
TARGET_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
DEPLOYMENT_ACCOUNT_ID = os.environ["ACCOUNT_ID"]
DEPLOYMENT_ACCOUNT_REGION = os.environ.get("AWS_REGION", 'us-east-1')
CODE_ACCOUNT_REGION = os.environ.get("AWS_REGION", 'us-east-1')
S3_BUCKET_NAME = os.environ["S3_BUCKET_NAME"]
sts = STS()
s3 = S3(DEPLOYMENT_ACCOUNT_REGION, S3_BUCKET_NAME)


class Repo:
    def __init__(self, account_id, name, description=''):
        self.name = name
        if not description:
            description = 'Created by ADF'

        self.description = description
        self.stack_name = "{0}-{1}".format('adf-codecommit', self.name)
        self.account_id = account_id
        self.session = sts.assume_cross_account_role(
            'arn:aws:iam::{0}:role/adf-cloudformation-deployment-role'.format(
                account_id), 'create_repo_{0}'.format(account_id))
コード例 #29
0
class Resolver:
    def __init__(self, parameter_store, stage_parameters,
                 comparison_parameters):
        self.parameter_store = parameter_store
        self.stage_parameters = stage_parameters
        self.comparison_parameters = comparison_parameters
        self.sts = STS()
        self.cache = Cache()

    @staticmethod
    def _is_optional(value):
        return value.endswith('?')

    def fetch_stack_output(self, value, key, optional=False):  # pylint: disable=too-many-statements
        try:
            [_, account_id, region, stack_name,
             output_key] = str(value).split(':')
        except ValueError:
            raise ValueError(
                "{0} is not a valid import string."
                "syntax should be import:account_id:region:stack_name:output_key"
                .format(str(value)))
        if Resolver._is_optional(output_key):
            LOGGER.info("Parameter %s is considered optional", output_key)
            optional = True
        output_key = output_key[:-1] if optional else output_key
        try:
            role = self.sts.assume_cross_account_role(
                'arn:aws:iam::{0}:role/{1}'.format(
                    account_id, 'adf-readonly-automation-role'), 'importer')
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=os.environ["AWS_REGION"],
                role=role,
                stack_name=stack_name,
                account_id=account_id)
            stack_output = self.cache.check(
                value) or cloudformation.get_stack_output(output_key)
            if stack_output:
                LOGGER.info("Stack output value is %s", stack_output)
                self.cache.add(value, stack_output)
        except ClientError:
            if not optional:
                raise
            stack_output = ""
            pass
        try:
            parent_key = list(
                Resolver.determine_parent_key(self.comparison_parameters,
                                              key))[0]
            if optional:
                self.stage_parameters[parent_key][key] = stack_output
            else:
                if not stack_output:
                    raise Exception(
                        "No Stack Output found on {account_id} in {region} "
                        "with stack name {stack} and output key "
                        "{output_key}".format(
                            account_id=account_id,
                            region=region,
                            stack=stack_name,
                            output_key=output_key,
                        ))
                self.stage_parameters[parent_key][key] = stack_output
        except IndexError:
            if stack_output:
                if self.stage_parameters.get(key):
                    self.stage_parameters[key] = stack_output
            else:
                raise Exception(
                    "Could not determine the structure of the file in order to import from CloudFormation"
                )
        return True

    def upload(self, value, key, file_name):
        if not any(item in value
                   for item in ['path', 'virtual-hosted', 's3-key-only']):
            raise Exception(
                'When uploading to S3 you need to specify a '
                'pathing style for the response either path or virtual-hosted, '
                'read more: https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html'
            ) from None
        if str(value).count(':') > 2:
            [_, region, style, value] = value.split(':')
        else:
            [_, style, value] = value.split(':')
            region = DEFAULT_REGION
        bucket_name = self.parameter_store.fetch_parameter(
            '/cross_region/s3_regional_bucket/{0}'.format(region))
        client = S3(region, bucket_name)
        try:
            parent_key = list(
                Resolver.determine_parent_key(self.comparison_parameters,
                                              key))[0]
        except IndexError:
            if self.stage_parameters.get(key):
                self.stage_parameters[key] = client.put_object(
                    "adf-upload/{0}/{1}".format(value, file_name),
                    "{0}".format(value),
                    style,
                    True  #pre-check
                )
            return True
        self.stage_parameters[parent_key][key] = client.put_object(
            "adf-upload/{0}/{1}".format(value, file_name),
            "{0}".format(value),
            style,
            True  #pre-check
        )
        return True

    @staticmethod
    def determine_parent_key(d, target_key, parent_key=None):
        for key, value in d.items():
            if key == target_key:
                yield parent_key
            if isinstance(value, dict):
                for result in Resolver.determine_parent_key(
                        value, target_key, key):
                    yield result

    def fetch_parameter_store_value(self, value, key, optional=False):  # pylint: disable=too-many-statements
        if self._is_optional(value):
            LOGGER.info("Parameter %s is considered optional", value)
            optional = True
        if str(value).count(':') > 1:
            [_, region, value] = value.split(':')
        else:
            [_, value] = value.split(':')
            region = DEFAULT_REGION
        value = value[:-1] if optional else value
        client = ParameterStore(region, boto3)
        try:
            parameter = self.cache.check('{0}/{1}'.format(
                region, value)) or client.fetch_parameter(value)
        except ParameterNotFoundError:
            if optional:
                LOGGER.info("Parameter %s not found, returning empty string",
                            value)
                parameter = ""
            else:
                raise
        try:
            parent_key = list(
                Resolver.determine_parent_key(self.comparison_parameters,
                                              key))[0]
            if parameter:
                self.cache.add('{0}/{1}'.format(region, value), parameter)
                self.stage_parameters[parent_key][key] = parameter
        except IndexError as error:
            if parameter:
                if self.stage_parameters.get(key):
                    self.stage_parameters[key] = parameter
            else:
                LOGGER.error(
                    "Parameter was not found, unable to fetch it from parameter store"
                )
                raise Exception(
                    "Parameter was not found, unable to fetch it from parameter store"
                ) from error
        return True

    def update(self, key):
        for k, _ in self.comparison_parameters.items():
            if not self.stage_parameters.get(
                    k) and not self.stage_parameters.get(k, {}).get(key):
                self.stage_parameters[k] = self.comparison_parameters[k]
            if key not in self.stage_parameters[
                    k] and self.comparison_parameters.get(k, {}).get(key):
                self.stage_parameters[k][key] = self.comparison_parameters[k][
                    key]
コード例 #30
0
class Resolver:
    def __init__(self, parameter_store, stage_parameters,
                 comparison_parameters):
        self.parameter_store = parameter_store
        self.stage_parameters = stage_parameters
        self.comparison_parameters = comparison_parameters
        self.s3 = S3(DEFAULT_REGION, S3_BUCKET_NAME)
        self.sts = STS()

    def fetch_stack_output(self, value, param, key=None):
        try:
            [_, account_id, region, stack_name, export] = str(value).split(':')
        except ValueError:
            raise ValueError(
                "{0} is not a valid import string."
                "syntax should be import:account_id:region:stack_name:export_key"
                .format(str(value)))

        LOGGER.info(
            "Assuming the role %s", 'arn:aws:iam::{0}:role/{1}'.format(
                account_id, 'adf-cloudformation-deployment-role'))
        role = self.sts.assume_cross_account_role(
            'arn:aws:iam::{0}:role/{1}'.format(
                account_id, 'adf-cloudformation-deployment-role'), 'importer')
        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=os.environ["AWS_REGION"],
            role=role,
            stack_name=stack_name,
            account_id=account_id)
        LOGGER.info("Retrieving value of key %s from %s on %s in %s", export,
                    stack_name, account_id, region)
        stack_output = cloudformation.get_stack_output(export)
        if not stack_output:
            raise Exception("No Key was found on {0} with the name {1}".format(
                stack_name, export))

        LOGGER.info("Stack output value is %s", stack_output)
        if key:
            self.stage_parameters[key][param] = stack_output
            return
        self.stage_parameters[key] = stack_output

    def upload(self, value, key, file_name, param=None):
        if str(value).count(':') > 1:
            [_, region, value] = value.split(':')
            bucket_name = self.parameter_store.fetch_parameter(
                '/cross_region/s3_regional_bucket/{0}'.format(region))
            regional_client = S3(region, bucket_name)
            LOGGER.info("Uploading %s as %s to S3 Bucket %s in %s", value,
                        file_name, bucket_name, region)
            if param:
                self.stage_parameters[param][key] = regional_client.put_object(
                    "adf-upload/{0}/{1}".format(value, file_name),
                    "{0}".format(value))
            else:
                self.stage_parameters[key] = regional_client.put_object(
                    "adf-upload/{0}/{1}".format(value, file_name),
                    "{0}".format(value))
            return True
        [_, value] = value.split(':')
        LOGGER.info("Uploading %s to S3", value)
        if param:
            self.stage_parameters[param][key] = self.s3.put_object(
                "adf-upload/{0}/{1}".format(value, file_name),
                "{0}".format(value))
        else:
            self.stage_parameters[key] = self.s3.put_object(
                "adf-upload/{0}/{1}".format(value, file_name),
                "{0}".format(value))
        return False

    def fetch_parameter_store_value(self, value, key, param=None):
        if str(value).count(':') > 1:
            [_, region, value] = value.split(':')
            regional_client = ParameterStore(region, boto3)
            LOGGER.info("Fetching Parameter from %s", value)
            if param:
                self.stage_parameters[param][
                    key] = regional_client.fetch_parameter(value)
            else:
                self.stage_parameters[key] = regional_client.fetch_parameter(
                    value)
            return True
        [_, value] = value.split(':')
        LOGGER.info("Fetching Parameter from %s", value)
        if param:
            self.stage_parameters[param][
                key] = self.parameter_store.fetch_parameter(value)
        else:
            self.stage_parameters[key] = self.parameter_store.fetch_parameter(
                value)
        return False

    def update_cfn(self, key, param):
        if key not in self.stage_parameters[param]:
            self.stage_parameters[param][key] = self.comparison_parameters[
                param][key]

    def update_sc(self, key):
        if key not in self.stage_parameters:
            self.stage_parameters[key] = self.comparison_parameters[key]