Beispiel #1
0
def worker_thread(sts, region, account_id, role, event):
    partition = get_partition(REGION_DEFAULT)

    role = sts.assume_cross_account_role(
        f'arn:{partition}:iam::{account_id}:role/{role}', 'remove_base')

    parameter_store = ParameterStore(region, role)
    paginator = parameter_store.client.get_paginator('describe_parameters')
    page_iterator = paginator.paginate()
    for page in page_iterator:
        for parameter in page['Parameters']:
            if 'Used by The AWS Deployment Framework' in parameter.get(
                    'Description', ''):
                parameter_store.delete_parameter(parameter.get('Name'))

    cloudformation = CloudFormation(
        region=region,
        deployment_account_region=event.get('deployment_account_region'),
        role=role,
        wait=True,
        stack_name=None,
        s3=None,
        s3_key_path=None,
        account_id=account_id)
    return cloudformation.delete_all_base_stacks()
 def __init__(self, source_account_id):
     self.source_account_id = source_account_id
     self.stack_name = f'adf-event-rule-{source_account_id}-{DEPLOYMENT_ACCOUNT_ID}'
     self.partition = get_partition(DEPLOYMENT_ACCOUNT_REGION)
     # Requirement adf-automation-role to exist on target
     self.role = sts.assume_cross_account_role(
         f'arn:{self.partition}:iam::{source_account_id}:role/adf-automation-role',
         f'create_rule_{source_account_id}'
     )
Beispiel #3
0
def ensure_bucket_policy(bucket_name: str, region: str,
                         policy: MutableMapping) -> None:
    partition = get_partition(region)

    s3_client = get_s3_client(region)
    for action in policy["Statement"]:
        action["Resource"] = [
            f"arn:{partition}:s3:::{bucket_name}",
            f"arn:{partition}:s3:::{bucket_name}/*",
        ]
    s3_client.put_bucket_policy(Bucket=bucket_name, Policy=json.dumps(policy))
 def fetch_stack_output(self, value, key, optional=False):  # pylint: disable=too-many-statements
     partition = get_partition(DEFAULT_REGION)
     try:
         [_, account_id, region, stack_name,
          output_key] = str(value).split(':')
     except ValueError as error:
         raise ValueError(
             f"{value} is not a valid import string. Syntax should be "
             "import:account_id:region:stack_name:output_key") from error
     if Resolver._is_optional(output_key):
         LOGGER.info("Parameter %s is considered optional", output_key)
         optional = True
     output_key = output_key[:-1] if optional else output_key
     try:
         role = self.sts.assume_cross_account_role(
             f'arn:{partition}:iam::{account_id}:role/adf-readonly-automation-role',
             'importer')
         cloudformation = CloudFormation(
             region=region,
             deployment_account_region=os.environ["AWS_REGION"],
             role=role,
             stack_name=stack_name,
             account_id=account_id)
         stack_output = self.cache.check(
             value) or cloudformation.get_stack_output(output_key)
         if stack_output:
             LOGGER.info("Stack output value is %s", stack_output)
             self.cache.add(value, stack_output)
     except ClientError:
         if not optional:
             raise
         stack_output = ""
     try:
         parent_key = list(
             Resolver.determine_parent_key(self.comparison_parameters,
                                           key))[0]
         if optional:
             self.stage_parameters[parent_key][key] = stack_output
         else:
             if not stack_output:
                 raise Exception(
                     f"No Stack Output found on {account_id} in {region} "
                     f"with stack name {stack_name} and "
                     f"output key {output_key}")
             self.stage_parameters[parent_key][key] = stack_output
     except IndexError as error:
         if stack_output:
             if self.stage_parameters.get(key):
                 self.stage_parameters[key] = stack_output
         else:
             raise Exception(
                 "Could not determine the structure of the file in order "
                 "to import from CloudFormation", ) from error
     return True
Beispiel #5
0
 def __init__(self, account_id, name, description=''):
     self.name = name
     if not description:
         description = 'Created by ADF'
     self.description = description
     self.stack_name = f"adf-codecommit-{self.name}"
     self.account_id = account_id
     self.partition = get_partition(DEPLOYMENT_ACCOUNT_REGION)
     self.session = sts.assume_cross_account_role(
         f'arn:{self.partition}:iam::{account_id}:role/adf-automation-role',
         f'create_repo_{account_id}')
Beispiel #6
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    _create_inputs_folder()
    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, SHARED_MODULES_BUCKET)
    deployment_map = DeploymentMap(parameter_store, s3, ADF_PIPELINE_PREFIX)
    sts = STS()
    partition = get_partition(DEPLOYMENT_ACCOUNT_REGION)
    cross_account_access_role = parameter_store.fetch_parameter(
        'cross_account_access_role')
    role = sts.assume_cross_account_role(
        f'arn:{partition}:iam::{MASTER_ACCOUNT_ID}:role/'
        f'{cross_account_access_role}-readonly', 'pipeline')
    organizations = Organizations(role)
    ensure_event_bus_status(ORGANIZATION_ID)
    try:
        auto_create_repositories = parameter_store.fetch_parameter(
            'auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'
    threads = []
    _cache = Cache()
    for p in deployment_map.map_contents.get('pipelines', []):
        _source_account_id = p.get('default_providers',
                                   {}).get('source',
                                           {}).get('properties',
                                                   {}).get('account_id', {})
        if _source_account_id and int(_source_account_id) != int(
                DEPLOYMENT_ACCOUNT_ID) and not _cache.check(
                    _source_account_id):
            rule = Rule(
                p['default_providers']['source']['properties']['account_id'])
            rule.create_update()
            _cache.add(
                p['default_providers']['source']['properties']['account_id'],
                True)
        thread = PropagatingThread(target=worker_thread,
                                   args=(p, organizations,
                                         auto_create_repositories,
                                         deployment_map, parameter_store))
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()
Beispiel #7
0
def lambda_handler(event, _):
    """Main Lambda Entry point
    """
    sts = STS()
    account_id = event.get('account_id')
    partition = get_partition(REGION_DEFAULT)
    cross_account_access_role = event.get('cross_account_access_role')

    role = sts.assume_cross_account_role(
        f'arn:{partition}:iam::{account_id}:role/{cross_account_access_role}',
        'master')

    s3 = S3(REGION_DEFAULT, S3_BUCKET)

    for region in list(
            set([event['deployment_account_region']] + event['regions'])):

        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=event['deployment_account_region'],
            role=role,
            wait=False,
            stack_name=None,
            s3=s3,
            s3_key_path=event['ou_name'],
            account_id=account_id)

        status = cloudformation.get_stack_status()

        if status in ('CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'):
            raise RetryError(f"CloudFormation Stack status: {status}")

        if status in ('CREATE_FAILED', 'ROLLBACK_FAILED', 'DELETE_FAILED',
                      'UPDATE_ROLLBACK_FAILED', 'ROLLBACK_IN_PROGRESS',
                      'ROLLBACK_COMPLETE'):
            raise Exception(
                f"Account Bootstrap Failed - Account: {account_id} "
                f"Region: {region} Status: {status}")

        if event.get('is_deployment_account'):
            update_deployment_account_output_parameters(
                deployment_account_region=event['deployment_account_region'],
                region=region,
                deployment_account_role=role,
                cloudformation=cloudformation)

    return event
Beispiel #8
0
def lambda_handler(event, _):
    target_role_policies = {
        'adf-cloudformation-deployment-role': 'adf-cloudformation-deployment-role-policy-kms',
        'adf-cloudformation-role': 'adf-cloudformation-role-policy'
    }

    role_policies = {
        'adf-codepipeline-role': 'adf-codepipeline-role-policy',
        'adf-cloudformation-deployment-role': 'adf-cloudformation-deployment-role-policy',
        'adf-cloudformation-role': 'adf-cloudformation-role-policy'
    }

    sts = STS()
    partition = get_partition(REGION_DEFAULT)

    parameter_store = ParameterStore(
        region=event.get('deployment_account_region'),
        role=boto3
    )
    account_id = event.get("account_id")
    kms_key_arns = []
    s3_buckets = []
    for region in list(set([event.get('deployment_account_region')] + event.get("regions", []))):
        kms_key_arn = parameter_store.fetch_parameter(
            f"/cross_region/kms_arn/{region}"
        )
        kms_key_arns.append(kms_key_arn)
        s3_bucket = parameter_store.fetch_parameter(
            f"/cross_region/s3_regional_bucket/{region}"
        )
        s3_buckets.append(s3_bucket)
        try:
            role = sts.assume_cross_account_role(
                f'arn:{partition}:iam::{account_id}:role/adf-cloudformation-deployment-role',
                'base_cfn_role'
            )
            LOGGER.debug("Role has been assumed for %s", account_id)
            update_iam(role, s3_bucket, kms_key_arn, target_role_policies)
        except ClientError as err:
            LOGGER.debug("%s could not be assumed (%s), continuing", account_id, err, exc_info=True)
            continue

    update_iam(boto3, s3_buckets, kms_key_arns, role_policies)

    return event
    def _start_statemachine(self):
        """
        Executes the Update Cross Account IAM Step Function in the Deployment Account
        """
        partition = get_partition(self.deployment_account_region)

        self.execution_arn = self.client.start_execution(
            stateMachineArn=
            (f"arn:{partition}:states:{self.deployment_account_region}:"
             f"{self.deployment_account_id}:stateMachine:EnableCrossAccountAccess"
             ),
            input=json.dumps({
                "deployment_account_region": self.deployment_account_region,
                "deployment_account_id": self.deployment_account_id,
                "account_ids": self.account_ids,
                "regions": self.regions,
                "full_path": self.full_path,
                "update_only": self.update_pipelines_only,
                "error": self.error
            })).get('executionArn')

        self._fetch_statemachine_status()
Beispiel #10
0
def lambda_handler(event, _):
    sts = STS()

    deployment_account_id = event.get('deployment_account_id')
    partition = get_partition(REGION_DEFAULT)
    cross_account_access_role = event.get('cross_account_access_role')

    role = sts.assume_cross_account_role(
        f'arn:{partition}:iam::{deployment_account_id}:role/{cross_account_access_role}',
        'step_function')

    step_functions = StepFunctions(
        role=role,
        deployment_account_id=deployment_account_id,
        deployment_account_region=event['deployment_account_region'],
        full_path=event['full_path'],
        regions=event['regions'],
        account_ids=[event['account_id']],
        update_pipelines_only=1 if event.get('moved_to_protected')
        or event.get('moved_to_root') else 0,
        error=event.get('error', 0))
    step_functions.execute_statemachine()

    return event
from logger import configure_logger
from cache import Cache
from cloudformation import CloudFormation
from parameter_store import ParameterStore
from organizations import Organizations
from stepfunctions import StepFunctions
from errors import GenericAccountConfigureError, ParameterNotFoundError
from sts import STS
from s3 import S3
from partition import get_partition
from config import Config
from organization_policy import OrganizationPolicy

S3_BUCKET_NAME = os.environ["S3_BUCKET"]
REGION_DEFAULT = os.environ["AWS_REGION"]
PARTITION = get_partition(REGION_DEFAULT)
ACCOUNT_ID = os.environ["MASTER_ACCOUNT_ID"]
ADF_VERSION = os.environ["ADF_VERSION"]
ADF_LOG_LEVEL = os.environ["ADF_LOG_LEVEL"]
DEPLOYMENT_ACCOUNT_S3_BUCKET_NAME = os.environ["DEPLOYMENT_ACCOUNT_BUCKET"]
ADF_DEFAULT_SCM_FALLBACK_BRANCH = 'master'
LOGGER = configure_logger(__name__)


def is_account_in_invalid_state(ou_id, config):
    """
    Check if Account is sitting in the root
    of the Organization or in Protected OU
    """
    if ou_id.startswith('r-'):
        return "Is in the Root of the Organization, it will be skipped."
def test_partition_us_commercial_regions(region):
    assert get_partition(region) == 'aws'
def test_partition_govcloud_regions(region):
    assert get_partition(region) == 'aws-us-gov'