def lambda_handler(event, _): sts = STS() role = sts.assume_cross_account_role( 'arn:aws:iam::{0}:role/{1}'.format( event["account_id"], event["cross_account_access_role"] ), 'master_lambda' ) if event['is_deployment_account']: configure_master_account_parameters(event) configure_deployment_account_parameters(event, role) s3 = S3( region=REGION_DEFAULT, bucket=S3_BUCKET ) for region in list(set([event["deployment_account_region"]] + event["regions"])): if not event["is_deployment_account"]: configure_generic_account(sts, event, region, role) cloudformation = CloudFormation( region=region, deployment_account_region=event["deployment_account_region"], role=role, wait=False, stack_name=None, # Stack name will be automatically defined based on event s3=s3, s3_key_path=event["full_path"], account_id=event["account_id"] ) cloudformation.create_stack() return event
def main(): parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3) deployment_map = DeploymentMap(parameter_store, os.environ["ADF_PIPELINE_PREFIX"]) s3 = S3(DEPLOYMENT_ACCOUNT_REGION, boto3, S3_BUCKET_NAME) sts = STS(boto3) role = sts.assume_cross_account_role( 'arn:aws:iam::{0}:role/{1}-org-access-adf'.format( MASTER_ACCOUNT_ID, parameter_store.fetch_parameter('cross_account_access_role')), 'pipeline') organizations = Organizations(role) clean(parameter_store, deployment_map) for p in deployment_map.map_contents.get('pipelines'): pipeline = Pipeline(p) for target in p['targets']: target_structure = TargetStructure(target) for step in target_structure.target: for path in step.get('path'): try: regions = step.get( 'regions', p.get('regions', DEPLOYMENT_ACCOUNT_REGION)) pipeline.stage_regions.append(regions) pipeline_target = Target(path, regions, target_structure, organizations) pipeline_target.fetch_accounts_for_target() except BaseException: raise Exception( "Failed to return accounts for {0}".format(path)) pipeline.template_dictionary["targets"].append( target_structure.account_list) if DEPLOYMENT_ACCOUNT_REGION not in regions: pipeline.stage_regions.append(DEPLOYMENT_ACCOUNT_REGION) parameters = pipeline.generate_parameters() pipeline.generate() deployment_map.update_deployment_parameters(pipeline) s3_object_path = upload_if_required(s3, pipeline) store_regional_parameter_config(pipeline, parameter_store) cloudformation = CloudFormation( region=DEPLOYMENT_ACCOUNT_REGION, deployment_account_region=DEPLOYMENT_ACCOUNT_REGION, role=boto3, template_url=s3_object_path, parameters=parameters, wait=True, stack_name="{0}-{1}".format(os.environ["ADF_PIPELINE_PREFIX"], pipeline.name), s3=None, s3_key_path=None) cloudformation.create_stack()
def create_update(self): s3_object_path = s3.put_object( "adf-build/repo_templates/codecommit.yml", "{0}/adf-build/repo_templates/codecommit.yml".format(TARGET_DIR)) cloudformation = CloudFormation( region=CODE_ACCOUNT_REGION, deployment_account_region=CODE_ACCOUNT_REGION, role=self.session, template_url=s3_object_path, parameters=self.define_repo_parameters(), wait=True, stack_name=self.stack_name, s3=None, s3_key_path=None, account_id=DEPLOYMENT_ACCOUNT_ID, ) # Update the stack if the repo and the adf contolled stack exist update_stack = (self.repo_exists() and cloudformation.get_stack_status()) if not self.repo_exists() or update_stack: LOGGER.info( 'Creating Stack for Codecommit Repository %s on Account %s', self.name, self.account_id) cloudformation.create_stack()
def create_update(self): s3_object_path = s3.put_object( "adf-build/templates/codecommit.yml", f"{TARGET_DIR}/adf-build/templates/codecommit.yml", ) cloudformation = CloudFormation( region=DEPLOYMENT_ACCOUNT_REGION, deployment_account_region=DEPLOYMENT_ACCOUNT_REGION, role=self.session, template_url=s3_object_path, parameters=self.define_repo_parameters(), wait=True, stack_name=self.stack_name, s3=None, s3_key_path=None, account_id=DEPLOYMENT_ACCOUNT_ID, ) # Update the stack if the repo and the ADF controlled stack exist, # return if the repo exists but no stack (previously made) _repo_exists = self.repo_exists() _stack_exists = cloudformation.get_stack_status() if _repo_exists and not _stack_exists: return if not _repo_exists and not _stack_exists: LOGGER.info( 'Ensuring State for CodeCommit Repository Stack %s on Account %s', self.name, self.account_id, ) cloudformation.create_stack()
def worker_thread(p, organizations, auto_create_repositories, s3, deployment_map, parameter_store): pipeline = Pipeline(p) if auto_create_repositories == 'enabled': try: code_account_id = next(param['SourceAccountId'] for param in p['params'] if 'SourceAccountId' in param) has_custom_repo = bool( [item for item in p['params'] if 'RepositoryName' in item]) if auto_create_repositories and code_account_id and str( code_account_id).isdigit() and not has_custom_repo: repo = Repo(code_account_id, p.get('name'), p.get('description')) repo.create_update() except StopIteration: LOGGER.debug( "No need to create repository as SourceAccountId is not found in params" ) for target in p.get('targets', []): target_structure = TargetStructure(target) for step in target_structure.target: for path in step.get('path'): regions = step.get('regions', p.get('regions', DEPLOYMENT_ACCOUNT_REGION)) step_name = step.get('name') params = step.get('params', {}) pipeline.stage_regions.append(regions) pipeline_target = Target(path, regions, target_structure, organizations, step_name, params) pipeline_target.fetch_accounts_for_target() pipeline.template_dictionary["targets"].append( target_structure.account_list) if DEPLOYMENT_ACCOUNT_REGION not in regions: pipeline.stage_regions.append(DEPLOYMENT_ACCOUNT_REGION) parameters = pipeline.generate_parameters() pipeline.generate() deployment_map.update_deployment_parameters(pipeline) s3_object_path = upload_pipeline(s3, pipeline) store_regional_parameter_config(pipeline, parameter_store) cloudformation = CloudFormation( region=DEPLOYMENT_ACCOUNT_REGION, deployment_account_region=DEPLOYMENT_ACCOUNT_REGION, role=boto3, template_url=s3_object_path, parameters=parameters, wait=True, stack_name="{0}-{1}".format(ADF_PIPELINE_PREFIX, pipeline.name), s3=None, s3_key_path=None, account_id=DEPLOYMENT_ACCOUNT_ID) cloudformation.create_stack()
def main(): #pylint: disable=R0915 LOGGER.info('ADF Version %s', ADF_VERSION) LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL) parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3) deployment_map = DeploymentMap(parameter_store, ADF_PIPELINE_PREFIX) s3 = S3(DEPLOYMENT_ACCOUNT_REGION, S3_BUCKET_NAME) sts = STS() role = sts.assume_cross_account_role( 'arn:aws:iam::{0}:role/{1}-readonly'.format( MASTER_ACCOUNT_ID, parameter_store.fetch_parameter('cross_account_access_role')), 'pipeline') organizations = Organizations(role) clean(parameter_store, deployment_map) for p in deployment_map.map_contents.get('pipelines'): pipeline = Pipeline(p) for target in p.get('targets', []): target_structure = TargetStructure(target) for step in target_structure.target: for path in step.get('path'): regions = step.get( 'regions', p.get('regions', DEPLOYMENT_ACCOUNT_REGION)) step_name = step.get('name') params = step.get('params', {}) pipeline.stage_regions.append(regions) pipeline_target = Target(path, regions, target_structure, organizations, step_name, params) pipeline_target.fetch_accounts_for_target() pipeline.template_dictionary["targets"].append( target_structure.account_list) if DEPLOYMENT_ACCOUNT_REGION not in regions: pipeline.stage_regions.append(DEPLOYMENT_ACCOUNT_REGION) parameters = pipeline.generate_parameters() pipeline.generate() deployment_map.update_deployment_parameters(pipeline) s3_object_path = upload_pipeline(s3, pipeline) store_regional_parameter_config(pipeline, parameter_store) cloudformation = CloudFormation( region=DEPLOYMENT_ACCOUNT_REGION, deployment_account_region=DEPLOYMENT_ACCOUNT_REGION, role=boto3, template_url=s3_object_path, parameters=parameters, wait=True, stack_name="{0}-{1}".format(ADF_PIPELINE_PREFIX, pipeline.name), s3=None, s3_key_path=None, account_id=DEPLOYMENT_ACCOUNT_ID) cloudformation.create_stack()
def worker_thread(account_id, sts, config, s3, cache, kms_dict): """ The Worker thread function that is created for each account in which CloudFormation create_stack is called """ LOGGER.debug("%s - Starting new worker thread", account_id) organizations = Organizations(role=boto3, account_id=account_id) ou_id = organizations.get_parent_info().get("ou_parent_id") account_state = is_account_in_invalid_state(ou_id, config.config) if account_state: LOGGER.info("%s %s", account_id, account_state) return account_path = organizations.build_account_path( ou_id, [], # Initial empty array to hold OU Path, cache) try: role = ensure_generic_account_can_be_setup(sts, config, account_id) # Regional base stacks can be updated after global for region in list( set([config.deployment_account_region] + config.target_regions)): # Ensuring the kms_arn on the target account is up-to-date parameter_store = ParameterStore(region, role) parameter_store.put_parameter('kms_arn', kms_dict[region]) cloudformation = CloudFormation( region=region, deployment_account_region=config.deployment_account_region, role=role, wait=True, stack_name=None, s3=s3, s3_key_path=account_path, account_id=account_id) try: cloudformation.create_stack() except GenericAccountConfigureError as error: if 'Unable to fetch parameters' in str(error): LOGGER.error( '%s - Failed to update its base stack due to missing parameters (deployment_account_id or kms_arn), ' 'ensure this account has been bootstrapped correctly by being moved from the root ' 'into an Organizational Unit within AWS Organizations.', account_id) raise Exception from error except GenericAccountConfigureError as generic_account_error: LOGGER.info(generic_account_error) return
def worker_thread(template_path, name, s3): s3_object_path = upload_pipeline(template_path, name, s3) cloudformation = CloudFormation( region=DEPLOYMENT_ACCOUNT_REGION, deployment_account_region=DEPLOYMENT_ACCOUNT_REGION, role=boto3, template_url=s3_object_path, parameters=[], wait=True, stack_name=f"{ADF_PIPELINE_PREFIX}{name}", s3=None, s3_key_path=None, account_id=DEPLOYMENT_ACCOUNT_ID) cloudformation.create_stack()
def lambda_handler(event, _): parameters = ParameterStore(REGION_DEFAULT, boto3) account_id = event.get( 'detail').get( 'requestParameters').get('accountId') organizations = Organizations(boto3, account_id) parsed_event = Event(event, parameters, organizations, account_id) cache = Cache() if parsed_event.moved_to_root or parsed_event.moved_to_protected: return parsed_event.create_output_object(cache) parsed_event.set_destination_ou_name() sts = STS(boto3) role = sts.assume_cross_account_role( 'arn:aws:iam::{0}:role/{1}'.format( parsed_event.account_id, parsed_event.cross_account_access_role ), 'master_lambda' ) if parsed_event.is_deployment_account: update_master_account_parameters(parsed_event, parameters) configure_deployment_account(parsed_event, role) s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET) account_path = parsed_event.organizations.build_account_path( parsed_event.destination_ou_id, [], # Initial empty array to hold OU Path, cache, ) for region in list(set([parsed_event.deployment_account_region] + parsed_event.regions)): if not parsed_event.is_deployment_account: configure_generic_account(sts, parsed_event, region, role) cloudformation = CloudFormation( region=region, deployment_account_region=parsed_event.deployment_account_region, role=role, wait=False, stack_name=None, s3=s3, s3_key_path=account_path, file_path=None, ) cloudformation.create_stack() return parsed_event.create_output_object(cache)
def lambda_handler(event, _): s3 = S3(region=REGION_DEFAULT, bucket=S3_BUCKET) cloudformation = CloudFormation( region=event['deployment_account_region'], deployment_account_region=event['deployment_account_region'], role=boto3, wait=True, stack_name=None, s3=s3, s3_key_path='adf-build') cloudformation.create_stack() return event
def create_update(self): s3_object_path = s3.put_object( "adf-build/templates/events.yml", "{0}/templates/events.yml".format(TARGET_DIR) ) cloudformation = CloudFormation( region=SOURCE_ACCOUNT_REGION, deployment_account_region=SOURCE_ACCOUNT_REGION, role=self.role, template_url=s3_object_path, parameters=[], wait=True, stack_name=self.stack_name, s3=None, s3_key_path=None, account_id=DEPLOYMENT_ACCOUNT_ID, ) LOGGER.info('Ensuring Stack State for Event Rule forwarding from %s to %s', self.source_account_id, DEPLOYMENT_ACCOUNT_ID) cloudformation.create_stack()
def worker_thread(account_id, sts, config, s3, cache): """ The Worker thread function that is created for each account in which CloudFormation create_stack is called """ LOGGER.info("Starting new worker thread for %s", account_id) organizations = Organizations(boto3, account_id) ou_id = organizations.get_parent_info().get("ou_parent_id") if is_account_invalid_state(ou_id, config.config): LOGGER.info("%s is in an invalid state", account_id) return account_path = organizations.build_account_path( ou_id, [], # Initial empty array to hold OU Path, cache) LOGGER.info("The Account path for %s is %s", account_id, account_path) try: role = ensure_generic_account_can_be_setup(sts, config, account_id) # Regional base stacks can be updated after global for region in list( set([config.deployment_account_region] + config.target_regions)): cloudformation = CloudFormation( region=region, deployment_account_region=config.deployment_account_region, role=role, wait=True, stack_name=None, s3=s3, s3_key_path=account_path) cloudformation.create_stack() except GenericAccountConfigureError as generic_account_error: LOGGER.info(generic_account_error) return
def lambda_handler(event, _): sts = STS() account_id = event["account_id"] cross_account_access_role = event["cross_account_access_role"] role_arn = f'arn:{PARTITION}:iam::{account_id}:role/{cross_account_access_role}' role = sts.assume_cross_account_role(role_arn=role_arn, role_session_name='management_lambda') if event['is_deployment_account']: configure_master_account_parameters(event) configure_deployment_account_parameters(event, role) s3 = S3(region=REGION_DEFAULT, bucket=S3_BUCKET) for region in list( set([event["deployment_account_region"]] + event["regions"])): if not event["is_deployment_account"]: configure_generic_account(sts, event, region, role) cloudformation = CloudFormation( region=region, deployment_account_region=event["deployment_account_region"], role=role, wait=True, # Stack name will be automatically defined based on event stack_name=None, s3=s3, s3_key_path=event["full_path"], account_id=account_id) if is_inter_ou_account_move(event): cloudformation.delete_all_base_stacks(True) # override Wait cloudformation.create_stack() if region == event["deployment_account_region"]: cloudformation.create_iam_stack() return event
def main(): config = Config() config.store_config() try: parameter_store = ParameterStore(REGION_DEFAULT, boto3) deployment_account_id = parameter_store.fetch_parameter( 'deployment_account_id') organizations = Organizations(boto3, deployment_account_id) sts = STS(boto3) deployment_account_role = prepare_deployment_account( sts=sts, deployment_account_id=deployment_account_id, config=config) cache = Cache() ou_id = organizations.get_parent_info().get("ou_parent_id") account_path = organizations.build_account_path(ou_id=ou_id, account_path=[], cache=cache) s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET_NAME) # First Setup the Deployment Account in all regions (KMS Key and S3 Bucket + Parameter Store values) for region in list( set([config.deployment_account_region] + config.target_regions)): cloudformation = CloudFormation( region=region, deployment_account_region=config.deployment_account_region, role=deployment_account_role, wait=True, stack_name=None, s3=s3, s3_key_path=account_path) cloudformation.create_stack() update_deployment_account_output_parameters( deployment_account_region=config.deployment_account_region, region=region, deployment_account_role=deployment_account_role, cloudformation=cloudformation) threads = [] account_ids = organizations.get_account_ids() for account_id in account_ids: t = PropagatingThread(target=worker_thread, args=(account_id, sts, config, s3, cache)) t.start() threads.append(t) for thread in threads: thread.join() step_functions = StepFunctions( role=deployment_account_role, deployment_account_id=deployment_account_id, deployment_account_region=config.deployment_account_region, regions=config.target_regions, account_ids=[ i for i in account_ids if i != config.deployment_account_region ], update_pipelines_only=1) step_functions.execute_statemachine() except ParameterNotFoundError: LOGGER.info("Deployment Account has not yet been Bootstrapped.") return
def main(): # pylint: disable=R0915 LOGGER.info("ADF Version %s", ADF_VERSION) LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL) policies = OrganizationPolicy() config = Config() config.store_config() try: parameter_store = ParameterStore(REGION_DEFAULT, boto3) deployment_account_id = parameter_store.fetch_parameter( 'deployment_account_id') organizations = Organizations(role=boto3, account_id=deployment_account_id) policies.apply(organizations, parameter_store, config.config) sts = STS() deployment_account_role = prepare_deployment_account( sts=sts, deployment_account_id=deployment_account_id, config=config) cache = Cache() ou_id = organizations.get_parent_info().get("ou_parent_id") account_path = organizations.build_account_path(ou_id=ou_id, account_path=[], cache=cache) s3 = S3(region=REGION_DEFAULT, bucket=S3_BUCKET_NAME) kms_and_bucket_dict = {} # First Setup/Update the Deployment Account in all regions (KMS Key and # S3 Bucket + Parameter Store values) for region in list( set([config.deployment_account_region] + config.target_regions)): cloudformation = CloudFormation( region=region, deployment_account_region=config.deployment_account_region, role=deployment_account_role, wait=True, stack_name=None, s3=s3, s3_key_path="adf-bootstrap/" + account_path, account_id=deployment_account_id) cloudformation.create_stack() update_deployment_account_output_parameters( deployment_account_region=config.deployment_account_region, region=region, kms_and_bucket_dict=kms_and_bucket_dict, deployment_account_role=deployment_account_role, cloudformation=cloudformation) if region == config.deployment_account_region: cloudformation.create_iam_stack() # Updating the stack on the master account in deployment region cloudformation = CloudFormation( region=config.deployment_account_region, deployment_account_region=config.deployment_account_region, role=boto3, wait=True, stack_name=None, s3=s3, s3_key_path='adf-build', account_id=ACCOUNT_ID) cloudformation.create_stack() threads = [] account_ids = [ account_id["Id"] for account_id in organizations.get_accounts() ] for account_id in [ account for account in account_ids if account != deployment_account_id ]: thread = PropagatingThread(target=worker_thread, args=(account_id, sts, config, s3, cache, kms_and_bucket_dict)) thread.start() threads.append(thread) for thread in threads: thread.join() LOGGER.info("Executing Step Function on Deployment Account") step_functions = StepFunctions( role=deployment_account_role, deployment_account_id=deployment_account_id, deployment_account_region=config.deployment_account_region, regions=config.target_regions, account_ids=account_ids, update_pipelines_only=0) step_functions.execute_statemachine() except ParameterNotFoundError: LOGGER.info( 'A Deployment Account is ready to be bootstrapped! ' 'The Account provisioner will now kick into action, ' 'be sure to check out its progress in AWS Step Functions in this account.' ) return
def main(): try: parameter_store = ParameterStore(REGION_DEFAULT, boto3) deployment_account_id = parameter_store.fetch_parameter( 'deployment_account_id') config = Config() config.store_config() if deployment_account_id is None: raise NotConfiguredError( "Deployment Account has not yet been configured") organizations = Organizations(boto3, deployment_account_id) sts = STS(boto3) ou_id = organizations.get_parent_info().get("ou_parent_id") deployment_account_role = ensure_deployment_account_configured( sts, config, ou_id, deployment_account_id) cache = Cache() account_path = organizations.build_account_path( ou_id, [], # Initial empty array to hold OU Path cache) s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET) # (First) Setup the Deployment Account in all regions (KMS Key and S3 Bucket + Parameter Store values) for region in list( set([config.deployment_account_region] + config.target_regions)): cloudformation = CloudFormation( region=region, deployment_account_region=config.deployment_account_region, role=deployment_account_role, wait=True, stack_name=None, s3=s3, s3_key_path=account_path, file_path=None, ) cloudformation.create_stack() update_deployment_account_output_parameters( deployment_account_region=config.deployment_account_region, region=region, deployment_account_role=deployment_account_role, cloudformation=cloudformation) threads = [] account_ids = organizations.get_account_ids() for account_id in account_ids: t = PropagatingThread(target=worker_thread, args=(account_id, sts, config, s3, cache)) t.start() threads.append(t) for thread in threads: thread.join() step_functions = StepFunctions( role=deployment_account_role, deployment_account_id=deployment_account_id, deployment_account_region=config.deployment_account_region, regions=config.target_regions, account_ids=[ i for i in account_ids if i != config.deployment_account_region ], update_pipelines_only=False) step_functions.execute_statemachine() except NotConfiguredError as not_configured_error: LOGGER.info(not_configured_error)
def main(): #pylint: disable=R0915 LOGGER.info("ADF Version %s", ADF_VERSION) LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL) scp = SCP() config = Config() config.store_config() try: parameter_store = ParameterStore(REGION_DEFAULT, boto3) deployment_account_id = parameter_store.fetch_parameter( 'deployment_account_id' ) organizations = Organizations( role=boto3, account_id=deployment_account_id ) scp.apply(organizations, parameter_store, config.config) sts = STS() deployment_account_role = prepare_deployment_account( sts=sts, deployment_account_id=deployment_account_id, config=config ) cache = Cache() ou_id = organizations.get_parent_info().get("ou_parent_id") account_path = organizations.build_account_path( ou_id=ou_id, account_path=[], cache=cache ) s3 = S3( region=REGION_DEFAULT, bucket=S3_BUCKET_NAME ) # Updating the stack on the master account in deployment region cloudformation = CloudFormation( region=config.deployment_account_region, deployment_account_region=config.deployment_account_region, # pylint: disable=R0801 role=boto3, wait=True, stack_name=None, s3=s3, s3_key_path='adf-build', account_id=ACCOUNT_ID ) cloudformation.create_stack() # First Setup/Update the Deployment Account in all regions (KMS Key and S3 Bucket + Parameter Store values) for region in list(set([config.deployment_account_region] + config.target_regions)): cloudformation = CloudFormation( region=region, deployment_account_region=config.deployment_account_region, role=deployment_account_role, wait=True, stack_name=None, s3=s3, s3_key_path=account_path, account_id=deployment_account_id ) cloudformation.create_stack() update_deployment_account_output_parameters( deployment_account_region=config.deployment_account_region, region=region, deployment_account_role=deployment_account_role, cloudformation=cloudformation ) threads = [] account_ids = organizations.get_account_ids() for account_id in [account for account in account_ids if account != deployment_account_id]: thread = PropagatingThread(target=worker_thread, args=( account_id, sts, config, s3, cache )) thread.start() threads.append(thread) for thread in threads: thread.join() step_functions = StepFunctions( role=deployment_account_role, deployment_account_id=deployment_account_id, deployment_account_region=config.deployment_account_region, regions=config.target_regions, account_ids=account_ids, update_pipelines_only=0 ) step_functions.execute_statemachine() except ParameterNotFoundError: LOGGER.info( 'You are now ready to bootstrap a deployment account ' 'by moving it into your deployment OU. ' 'Once you have moved it into the deployment OU, ' 'be sure to check out its progress in AWS Step Functions' ) return