def _run_terraform_init(self, base_dir, infra_dir): logger.debug( 'Getting Terraform modules defined in {}'.format(infra_dir) ) check_call([ TERRAFORM_BINARY, 'init', infra_dir ], cwd=base_dir)
def _setup_base_dir(self, temp_dir): base_dir = '{}/{}-{}'.format( temp_dir, self.component_name, self.version ) logger.debug('Creating directory for release: {}'.format(base_dir)) mkdir(base_dir) return base_dir
def _boto_ecr_client(self): if not hasattr(self, '_ecr_client'): logger.debug('AWS region on client: {}'.format( self._release.boto_session.region_name )) self._ecr_client = self._release.boto_session.client('ecr') return self._ecr_client
def get_bucket_name(self, bucket_name_prefix=TFSTATE_NAME_PREFIX): bucket_tag = TFSTATE_TAG_NAME buckets = { bucket['Name'] for bucket in self._boto_s3_client.list_buckets()['Buckets'] } tagged_buckets = { bucket_name for bucket_name in buckets if self._bucket_has_tag(bucket_name, bucket_tag) and self._bucket_in_current_region(bucket_name) } assert len(tagged_buckets) <= 1, ''' multiple buckets with {}={} tag found '''.format(bucket_tag, TAG_VALUE).strip() if len(tagged_buckets) == 1: logger.debug( 'Single bucket ({}) with tag: {}={} found'.format( list(tagged_buckets)[0], bucket_tag, TAG_VALUE ) ) return list(tagged_buckets)[0] else: bucket_name = self._create_bucket(bucket_name_prefix) self._tag_bucket(bucket_name, bucket_tag) return bucket_name
def _copy_platform_configs(self, base_dir): path_in_release = '{}/{}'.format(base_dir, PLATFORM_CONFIG_BASE_PATH) for platform_config_path in self._platform_config_paths: logger.debug('Copying {} to {}'.format( platform_config_path, path_in_release )) _copy_platform_config(platform_config_path, path_in_release)
def _plan(self): with NamedTemporaryFile(mode='w+', encoding='utf-8', suffix='.json') \ as secrets_file: logger.debug(f'Writing secrets to file {secrets_file.name}') json.dump(self._secrets, secrets_file) secrets_file.flush() flags = ['-destroy', '-detailed-exitcode'] command = self._build_parameters('plan', secrets_file.name, flags=flags) logger.debug(f'Running {command}') process = Popen(command, cwd=self._release_path, env=env_with_aws_credetials( os.environ, self._boto_session), stdout=PIPE, stderr=PIPE) while True: (out, err) = process.communicate() self._print_obfuscated_output(out) self._print_err(err) exit_code = process.poll() if exit_code is not None: return self._handle_plan_exit_code(out, exit_code)
def terraform_init(self, get=False): credentials = self.boto_session.get_credentials() logger.debug( f'Initialising in {self.boto_session.region_name} ' f'with bucket: {self.bucket}, ' f'key prefix: {self.workspace_key_prefix}, ' f'tfstate file: {self.tfstate_filename}, ' f'dynamodb table: {self.dynamodb_table}' ) check_call( [ TERRAFORM_BINARY, 'init', f'-get={"true" if get else "false"}', f'-get-plugins={"true" if get else "false"}', f'-backend-config=bucket={self.bucket}', f'-backend-config=region={self.boto_session.region_name}', f'-backend-config=key={self.tfstate_filename}', ( '-backend-config=workspace_key_prefix=' f'{self.workspace_key_prefix}' ), f'-backend-config=dynamodb_table={self.dynamodb_table}', f'-backend-config=access_key={credentials.access_key}', f'-backend-config=secret_key={credentials.secret_key}', f'-backend-config=token={credentials.token}', self.working_directory, ], cwd=self.base_directory, )
def _copy_infra_files(self, base_dir): path_in_release = '{}/{}'.format( base_dir, INFRASTRUCTURE_DEFINITIONS_PATH ) logger.debug('Copying {} to {}'.format( INFRASTRUCTURE_DEFINITIONS_PATH, path_in_release )) copytree(INFRASTRUCTURE_DEFINITIONS_PATH, path_in_release)
def run(self, plan_only=False): plan_exit_code = self._plan() if plan_exit_code == TERRAFORM_PLAN_EXIT_CODE_ERROR: raise TerraformApplyError( f'terraform plan exited with {plan_exit_code}') logger.debug(f'terraform plan exit code: {plan_exit_code}') if plan_only: return if plan_exit_code == TERRAFORM_PLAN_EXIT_CODE_SUCCESS_CHANGES_PRESENT: self._apply()
def _bucket_has_tag(self, bucket_name, bucket_tag): logger.debug(f'Checking for tag {bucket_tag} on bucket {bucket_name}') tags = {} try: tags = self._get_bucket_tags(bucket_name) except ClientError: logger.debug( f"Checking for tag {bucket_tag} " f"failed on bucket {bucket_name}") return tags.get(bucket_tag) == TAG_VALUE
def write_backend_config(self, backend_file): logger.debug(f'Writing backend config to {backend_file.name}') backend_file.write(dedent(''' terraform { backend "s3" { } } ''').strip()) logger.debug( f'Registering {backend_file.name} to be removed at exit' ) atexit.register(remove_file, backend_file.name)
def _bucket_in_current_region(self, bucket_name): region_response = self._boto_s3_client.get_bucket_location( Bucket=bucket_name ) region = region_response['LocationConstraint'] logger.debug(f'Checking bucket {bucket_name} region {region}') if self._aws_region == 'us-east-1' and region is None: return True return region == self._aws_region
def env_with_aws_credetials(env, boto_session): result = env.copy() credentials = boto_session.get_credentials() logger.debug(f'Setting region to {boto_session.region_name}') result.update({ 'AWS_ACCESS_KEY_ID': credentials.access_key, 'AWS_SECRET_ACCESS_KEY': credentials.secret_key, 'AWS_DEFAULT_REGION': boto_session.region_name, }) if credentials.token is not None: result.update({ 'AWS_SESSION_TOKEN': credentials.token, }) return result
def _tag_bucket(self, bucket_name, bucket_tag): logger.debug('Tagging bucket: {} with tag: {}={}'.format( bucket_name, bucket_tag, TAG_VALUE )) self._boto_s3_client.put_bucket_tagging( Bucket=bucket_name, Tagging={ 'TagSet': [ { 'Key': bucket_tag, 'Value': TAG_VALUE, } ] } )
def _create_bucket(self, bucket_name_prefix): logger.debug('Creating bucket with name {}'.format(bucket_name_prefix)) for attempt in range(MAX_CREATION_ATTEMPTS): if bucket_name_prefix == TFSTATE_NAME_PREFIX: bucket_name = self._generate_bucket_name( attempt, bucket_name_prefix ) else: bucket_name = bucket_name_prefix if self._attempt_to_create_bucket(bucket_name): logger.debug( 's3 bucket with name: {} created'.format(bucket_name) ) return bucket_name raise Exception('could not create bucket after {} attempts'.format( MAX_CREATION_ATTEMPTS ))
def init(self, get_terraform_modules=False): with NamedTemporaryFile( prefix='cdflow_backend_', suffix='.tf', dir=self.working_directory, delete=False, mode='w+' ) as backend_file: self.write_backend_config(backend_file) self.terraform_init(get_terraform_modules) if self.workspace_exists(): logger.debug( f'Workspace exists, selecting {self.environment_name}' ) self.terraform_select_workspace() else: logger.debug( f'Creating new workspace {self.environment_name}' ) self.terraform_new_workspace()
def assume_role(root_session, account): sts = root_session.client('sts') session_name = get_role_session_name(sts) logger.debug( "Assuming role arn:aws:iam::{}:role/{} with session {}".format( account.id, account.role, session_name)) session_duration = get_session_duration(sts) logger.debug(f"Session duration is set to {session_duration}") response = sts.assume_role( RoleArn=f'arn:aws:iam::{account.id}:role/{account.role}', RoleSessionName=session_name, DurationSeconds=session_duration, ) return Session( response['Credentials']['AccessKeyId'], response['Credentials']['SecretAccessKey'], response['Credentials']['SessionToken'], account.region, )
def _component_secrets_for_environment( table, region_name, prefix, aws_credentials ): logger.debug(f'Fetching secrets from {table} in {region_name}') try: secrets = credstash.listSecrets( table=table, region=region_name, **aws_credentials ) except ClientError as e: if e.response['Error']['Code'] == 'ResourceNotFoundException': logger.debug('Table not found - returning empty list') return [] else: raise return [ secret_data['name'] for secret_data in secrets if secret_data['name'].startswith(prefix) ]
def migrate_state( root_session, account_scheme, old_scheme, team, component_name, ): release_account_session = assume_role( root_session, account_scheme.release_account, ) release_s3 = release_account_session.resource('s3') for account in old_scheme.accounts: logger.debug(f'Looking for state in account {account.alias}') session = assume_role(root_session, account) state_bucket = S3BucketFactory(session).get_bucket_name() prefixes = get_bucket_prefixes(session, state_bucket) logger.debug(f'State bucket {state_bucket} has prefixes: {prefixes}') s3 = session.resource('s3') for env in [p.strip('/') for p in prefixes]: migrated_flag = release_s3.Object( account_scheme.backend_s3_bucket, f'{team}/{component_name}/{env}/MIGRATED', ) old_state = s3.Object( state_bucket, f'{env}/{component_name}/terraform.tfstate', ) if key_exists(old_state) and not is_migrated(migrated_flag): logger.debug( 'Not migrated, checking for state at: ' f'{env}/{component_name}/terraform.tfstate', ) old_state_content = old_state.get()['Body'].read() logger.debug( f'Putting state into {account_scheme.backend_s3_bucket} ' f'under {team}/{component_name}/{env}/terraform.tfstate', ) new_state = release_s3.Object( account_scheme.backend_s3_bucket, f'{team}/{component_name}/{env}/terraform.tfstate', ) new_state.put(Body=old_state_content) migrated_flag.put(Body=b'1')
def init(self, get_terraform_modules=False): with NamedTemporaryFile( prefix='cdflow_backend_', suffix='.tf', dir=self.working_directory, delete=False, mode='w+' ) as backend_file: logger.debug(f'Writing backend config to {backend_file.name}') backend_file.write(dedent(''' terraform { backend "s3" { } } ''').strip()) logger.debug( f'Registering {backend_file.name} to be removed at exit' ) atexit.register(remove_file, backend_file.name) logger.debug( f'Initialising backend in {self.working_directory} ' f'with {self.bucket}, {self.boto_session.region_name}, ' f'{self.state_file_key}, {self.dynamodb_table}' ) credentials = self.boto_session.get_credentials() check_call( [ TERRAFORM_BINARY, 'init', f'-get={"true" if get_terraform_modules else "false"}', f'-get-plugins={"true" if get_terraform_modules else "false"}', f'-backend-config=bucket={self.bucket}', f'-backend-config=region={self.boto_session.region_name}', f'-backend-config=key={self.state_file_key}', f'-backend-config=dynamodb_table={self.dynamodb_table}', f'-backend-config=access_key={credentials.access_key}', f'-backend-config=secret_key={credentials.secret_key}', f'-backend-config=token={credentials.token}', self.working_directory, ], cwd=self.base_directory, )
def remove_file(filepath): try: logger.debug(f'Removing {filepath}') unlink(filepath) except OSError as e: logger.debug(f'Error removing {filepath}: {e}')
def _copy_app_config_files(self, base_dir): path_in_release = '{}/{}'.format(base_dir, CONFIG_BASE_PATH) logger.debug('Copying {} to {}'.format( CONFIG_BASE_PATH, path_in_release )) copytree(CONFIG_BASE_PATH, path_in_release)