コード例 #1
0
def lambda_handler(event, _):
    LOGGER.info(f"Ensuring Account: {event.get('account_full_name')} is "
                f"in OU {event.get('organizational_unit_path')}")
    organizations = Organizations(boto3)
    organizations.move_account(
        event.get("account_id"),
        event.get("organizational_unit_path"),
    )
    return event
コード例 #2
0
def worker_thread(account_id, sts, config, s3, cache, kms_dict):
    """
    The Worker thread function that is created for each account
    in which CloudFormation create_stack is called
    """
    LOGGER.debug("%s - Starting new worker thread", account_id)

    organizations = Organizations(role=boto3, account_id=account_id)
    ou_id = organizations.get_parent_info().get("ou_parent_id")

    account_state = is_account_in_invalid_state(ou_id, config.config)
    if account_state:
        LOGGER.info("%s %s", account_id, account_state)
        return

    account_path = organizations.build_account_path(
        ou_id,
        [],  # Initial empty array to hold OU Path,
        cache)
    try:
        role = ensure_generic_account_can_be_setup(sts, config, account_id)

        # Regional base stacks can be updated after global
        for region in list(
                set([config.deployment_account_region] +
                    config.target_regions)):
            # Ensuring the kms_arn on the target account is up-to-date
            parameter_store = ParameterStore(region, role)
            parameter_store.put_parameter('kms_arn', kms_dict[region])

            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=config.deployment_account_region,
                role=role,
                wait=True,
                stack_name=None,
                s3=s3,
                s3_key_path=account_path,
                account_id=account_id)
            try:
                cloudformation.create_stack()
            except GenericAccountConfigureError as error:
                if 'Unable to fetch parameters' in str(error):
                    LOGGER.error(
                        '%s - Failed to update its base stack due to missing parameters (deployment_account_id or kms_arn), '
                        'ensure this account has been bootstrapped correctly by being moved from the root '
                        'into an Organizational Unit within AWS Organizations.',
                        account_id)
                raise Exception from error

    except GenericAccountConfigureError as generic_account_error:
        LOGGER.info(generic_account_error)
        return
コード例 #3
0
    def setUp(self):
        mock_config = {'ADAPTER_NAME': 'pipedrive'}
        mock_logger = mock.Mock()
        self.mock_adapter = mock.Mock()
        self.mock_adapter.list_organizations.return_value = [{
            'name': 'Foo'
        }]
        mock_app = {
            'config': mock_config,
            'logger': mock_logger,
            'organizations_adapter': self.mock_adapter
        }

        self.service = Organizations(mock_app)
コード例 #4
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store, ADF_PIPELINE_PREFIX)
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, S3_BUCKET_NAME)
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    try:
        auto_create_repositories = parameter_store.fetch_parameter(
            'auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'

    threads = []
    for p in deployment_map.map_contents.get('pipelines'):
        thread = PropagatingThread(target=worker_thread,
                                   args=(p, organizations,
                                         auto_create_repositories, s3,
                                         deployment_map, parameter_store))
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()
コード例 #5
0
def main():
    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store,
                                   os.environ["ADF_PIPELINE_PREFIX"])
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, boto3, S3_BUCKET_NAME)
    sts = STS(boto3)
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-org-access-adf'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    for p in deployment_map.map_contents.get('pipelines'):
        pipeline = Pipeline(p)

        for target in p['targets']:
            target_structure = TargetStructure(target)
            for step in target_structure.target:
                for path in step.get('path'):
                    try:
                        regions = step.get(
                            'regions',
                            p.get('regions', DEPLOYMENT_ACCOUNT_REGION))
                        pipeline.stage_regions.append(regions)
                        pipeline_target = Target(path, regions,
                                                 target_structure,
                                                 organizations)
                        pipeline_target.fetch_accounts_for_target()
                    except BaseException:
                        raise Exception(
                            "Failed to return accounts for {0}".format(path))

            pipeline.template_dictionary["targets"].append(
                target_structure.account_list)

        if DEPLOYMENT_ACCOUNT_REGION not in regions:
            pipeline.stage_regions.append(DEPLOYMENT_ACCOUNT_REGION)

        parameters = pipeline.generate_parameters()
        pipeline.generate()
        deployment_map.update_deployment_parameters(pipeline)
        s3_object_path = upload_if_required(s3, pipeline)

        store_regional_parameter_config(pipeline, parameter_store)
        cloudformation = CloudFormation(
            region=DEPLOYMENT_ACCOUNT_REGION,
            deployment_account_region=DEPLOYMENT_ACCOUNT_REGION,
            role=boto3,
            template_url=s3_object_path,
            parameters=parameters,
            wait=True,
            stack_name="{0}-{1}".format(os.environ["ADF_PIPELINE_PREFIX"],
                                        pipeline.name),
            s3=None,
            s3_key_path=None)

        cloudformation.create_stack()
コード例 #6
0
def main():
    accounts = read_config_files(ACCOUNTS_FOLDER)
    if not bool(accounts):
        LOGGER.info(f"Found {len(accounts)} account(s) in configuration file(s). Account provisioning will not continue.")
        return
    LOGGER.info(f"Found {len(accounts)} account(s) in configuration file(s).")
    organizations = Organizations(boto3)
    all_accounts = organizations.get_accounts()
    parameter_store = ParameterStore(os.environ.get('AWS_REGION', 'us-east-1'), boto3)
    adf_role_name = parameter_store.fetch_parameter('cross_account_access_role')
    for account in accounts:
        try:
            account_id = next(acc["Id"] for acc in all_accounts if acc["Name"] == account.full_name)
        except StopIteration: # If the account does not exist yet..
            account_id = None
        create_or_update_account(organizations, account, adf_role_name, account_id)
コード例 #7
0
def main():  #pylint: disable=R0915
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(DEPLOYMENT_ACCOUNT_REGION, boto3)
    deployment_map = DeploymentMap(parameter_store, ADF_PIPELINE_PREFIX)
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, S3_BUCKET_NAME)
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')),
        'pipeline')

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    for p in deployment_map.map_contents.get('pipelines'):
        pipeline = Pipeline(p)

        for target in p.get('targets', []):
            target_structure = TargetStructure(target)
            for step in target_structure.target:
                for path in step.get('path'):
                    regions = step.get(
                        'regions', p.get('regions', DEPLOYMENT_ACCOUNT_REGION))
                    step_name = step.get('name')
                    params = step.get('params', {})
                    pipeline.stage_regions.append(regions)
                    pipeline_target = Target(path, regions, target_structure,
                                             organizations, step_name, params)
                    pipeline_target.fetch_accounts_for_target()

            pipeline.template_dictionary["targets"].append(
                target_structure.account_list)

        if DEPLOYMENT_ACCOUNT_REGION not in regions:
            pipeline.stage_regions.append(DEPLOYMENT_ACCOUNT_REGION)

        parameters = pipeline.generate_parameters()
        pipeline.generate()
        deployment_map.update_deployment_parameters(pipeline)
        s3_object_path = upload_pipeline(s3, pipeline)

        store_regional_parameter_config(pipeline, parameter_store)
        cloudformation = CloudFormation(
            region=DEPLOYMENT_ACCOUNT_REGION,
            deployment_account_region=DEPLOYMENT_ACCOUNT_REGION,
            role=boto3,
            template_url=s3_object_path,
            parameters=parameters,
            wait=True,
            stack_name="{0}-{1}".format(ADF_PIPELINE_PREFIX, pipeline.name),
            s3=None,
            s3_key_path=None,
            account_id=DEPLOYMENT_ACCOUNT_ID)
        cloudformation.create_stack()
コード例 #8
0
class TestOrganizations(TestCase):
    def setUp(self):
        mock_config = {'ADAPTER_NAME': 'pipedrive'}
        mock_logger = mock.Mock()
        self.mock_adapter = mock.Mock()
        self.mock_adapter.list_organizations.return_value = [{
            'name': 'Foo'
        }]
        mock_app = {
            'config': mock_config,
            'logger': mock_logger,
            'organizations_adapter': self.mock_adapter
        }

        self.service = Organizations(mock_app)

    def test_list_calls_adapter_fn(self):
        self.service.list()
        self.assertTrue(self.mock_adapter.list_organizations.called)
コード例 #9
0
def worker_thread(account_id, sts, config, s3, cache):
    """
    The Worker thread function that is created for each account
    in which CloudFormation create_stack is called
    """
    LOGGER.info("Starting new worker thread for %s", account_id)

    organizations = Organizations(boto3, account_id)
    ou_id = organizations.get_parent_info().get("ou_parent_id")

    if is_account_invalid_state(ou_id, config.config):
        LOGGER.info("%s is in an invalid state", account_id)
        return

    account_path = organizations.build_account_path(
        ou_id,
        [],  # Initial empty array to hold OU Path,
        cache)
    LOGGER.info("The Account path for %s is %s", account_id, account_path)

    try:
        role = ensure_generic_account_can_be_setup(sts, config, account_id)

        # Regional base stacks can be updated after global
        for region in list(
                set([config.deployment_account_region] +
                    config.target_regions)):
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=config.deployment_account_region,
                role=role,
                wait=True,
                stack_name=None,
                s3=s3,
                s3_key_path=account_path)

            cloudformation.create_stack()

    except GenericAccountConfigureError as generic_account_error:
        LOGGER.info(generic_account_error)
        return
コード例 #10
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    parameter_store = ParameterStore(
        DEPLOYMENT_ACCOUNT_REGION,
        boto3
    )
    deployment_map = DeploymentMap(
        parameter_store,
        ADF_PIPELINE_PREFIX
    )
    s3 = S3(
        DEPLOYMENT_ACCOUNT_REGION,
        S3_BUCKET_NAME
    )
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')
        ), 'pipeline'
    )

    organizations = Organizations(role)
    clean(parameter_store, deployment_map)

    try:
        auto_create_repositories = parameter_store.fetch_parameter('auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'

    threads = []
    for counter, p in enumerate(deployment_map.map_contents.get('pipelines')):
        thread = PropagatingThread(target=worker_thread, args=(
            p,
            organizations,
            auto_create_repositories,
            s3,
            deployment_map,
            parameter_store
        ))
        thread.start()
        threads.append(thread)
        _batcher = counter % 10
        if _batcher == 9: # 9 meaning we have hit a set of 10 threads since n % 10
            _interval = random.randint(5, 11)
            LOGGER.debug('Waiting for %s seconds before starting next batch of 10 threads.', _interval)
            time.sleep(_interval)

    for thread in threads:
        thread.join()
コード例 #11
0
def lambda_handler(event, _):
    if event.get("tags"):
        organizations = Organizations(boto3)
        create_account_tags(
            event.get("account_id"),
            event.get("tags"),
            organizations,
        )
    else:
        LOGGER.info(
            f"Account: {event.get('account_full_name')} does not need tags configured"
        )
    return event
コード例 #12
0
def lambda_handler(event, _):
    parameters = ParameterStore(REGION_DEFAULT, boto3)
    account_id = event.get(
        'detail').get(
            'requestParameters').get('accountId')
    organizations = Organizations(boto3, account_id)
    parsed_event = Event(event, parameters, organizations, account_id)
    cache = Cache()

    if parsed_event.moved_to_root or parsed_event.moved_to_protected:
        return parsed_event.create_output_object(cache)

    parsed_event.set_destination_ou_name()

    sts = STS(boto3)
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}'.format(
            parsed_event.account_id,
            parsed_event.cross_account_access_role
        ), 'master_lambda'
    )

    if parsed_event.is_deployment_account:
        update_master_account_parameters(parsed_event, parameters)
        configure_deployment_account(parsed_event, role)

    s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET)

    account_path = parsed_event.organizations.build_account_path(
        parsed_event.destination_ou_id,
        [],  # Initial empty array to hold OU Path,
        cache,
    )

    for region in list(set([parsed_event.deployment_account_region] + parsed_event.regions)):
        if not parsed_event.is_deployment_account:
            configure_generic_account(sts, parsed_event, region, role)
        cloudformation = CloudFormation(
            region=region,
            deployment_account_region=parsed_event.deployment_account_region,
            role=role,
            wait=False,
            stack_name=None,
            s3=s3,
            s3_key_path=account_path,
            file_path=None,
        )
        cloudformation.create_stack()

    return parsed_event.create_output_object(cache)
コード例 #13
0
def main():
    LOGGER.info('ADF Version %s', ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    _create_inputs_folder()
    parameter_store = ParameterStore(
        DEPLOYMENT_ACCOUNT_REGION,
        boto3
    )
    s3 = S3(DEPLOYMENT_ACCOUNT_REGION, SHARED_MODULES_BUCKET)
    deployment_map = DeploymentMap(
        parameter_store,
        s3,
        ADF_PIPELINE_PREFIX
    )
    sts = STS()
    role = sts.assume_cross_account_role(
        'arn:aws:iam::{0}:role/{1}-readonly'.format(
            MASTER_ACCOUNT_ID,
            parameter_store.fetch_parameter('cross_account_access_role')
        ), 'pipeline'
    )
    organizations = Organizations(role)
    clean(parameter_store, deployment_map)
    ensure_event_bus_status(ORGANIZATION_ID)
    try:
        auto_create_repositories = parameter_store.fetch_parameter('auto_create_repositories')
    except ParameterNotFoundError:
        auto_create_repositories = 'enabled'
    threads = []
    _cache = Cache()
    for p in deployment_map.map_contents.get('pipelines', []):
        _source_account_id = p.get('default_providers', {}).get('source', {}).get('properties', {}).get('account_id', {})
        if _source_account_id and int(_source_account_id) != int(DEPLOYMENT_ACCOUNT_ID) and not _cache.check(_source_account_id):
            rule = Rule(p['default_providers']['source']['properties']['account_id'])
            rule.create_update()
            _cache.add(p['default_providers']['source']['properties']['account_id'], True)
        thread = PropagatingThread(target=worker_thread, args=(
            p,
            organizations,
            auto_create_repositories,
            deployment_map,
            parameter_store
        ))
        thread.start()
        threads.append(thread)

    for thread in threads:
        thread.join()
コード例 #14
0
def lambda_handler(event, _):
    parameters = ParameterStore(region=REGION_DEFAULT, role=boto3)
    account_id = event.get('detail').get('requestParameters').get('accountId')
    organizations = Organizations(role=boto3, account_id=account_id)
    parsed_event = Event(event=event,
                         parameter_store=parameters,
                         organizations=organizations,
                         account_id=account_id)
    cache = Cache()

    account_path = "ROOT" if parsed_event.moved_to_root else parsed_event.organizations.build_account_path(
        parsed_event.destination_ou_id,
        [],  # Initial empty array to hold OU Path,
        cache)

    if parsed_event.moved_to_root or parsed_event.moved_to_protected:
        return parsed_event.create_output_object(account_path)

    parsed_event.set_destination_ou_name()

    return parsed_event.create_output_object(account_path)
コード例 #15
0
def main():  # pylint: disable=R0915
    LOGGER.info("ADF Version %s", ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    policies = OrganizationPolicy()
    config = Config()
    config.store_config()

    try:
        parameter_store = ParameterStore(REGION_DEFAULT, boto3)
        deployment_account_id = parameter_store.fetch_parameter(
            'deployment_account_id')
        organizations = Organizations(role=boto3,
                                      account_id=deployment_account_id)
        policies.apply(organizations, parameter_store, config.config)
        sts = STS()
        deployment_account_role = prepare_deployment_account(
            sts=sts,
            deployment_account_id=deployment_account_id,
            config=config)

        cache = Cache()
        ou_id = organizations.get_parent_info().get("ou_parent_id")
        account_path = organizations.build_account_path(ou_id=ou_id,
                                                        account_path=[],
                                                        cache=cache)
        s3 = S3(region=REGION_DEFAULT, bucket=S3_BUCKET_NAME)

        kms_and_bucket_dict = {}
        # First Setup/Update the Deployment Account in all regions (KMS Key and
        # S3 Bucket + Parameter Store values)
        for region in list(
                set([config.deployment_account_region] +
                    config.target_regions)):
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=config.deployment_account_region,
                role=deployment_account_role,
                wait=True,
                stack_name=None,
                s3=s3,
                s3_key_path="adf-bootstrap/" + account_path,
                account_id=deployment_account_id)
            cloudformation.create_stack()
            update_deployment_account_output_parameters(
                deployment_account_region=config.deployment_account_region,
                region=region,
                kms_and_bucket_dict=kms_and_bucket_dict,
                deployment_account_role=deployment_account_role,
                cloudformation=cloudformation)
            if region == config.deployment_account_region:
                cloudformation.create_iam_stack()

        # Updating the stack on the master account in deployment region
        cloudformation = CloudFormation(
            region=config.deployment_account_region,
            deployment_account_region=config.deployment_account_region,
            role=boto3,
            wait=True,
            stack_name=None,
            s3=s3,
            s3_key_path='adf-build',
            account_id=ACCOUNT_ID)
        cloudformation.create_stack()
        threads = []
        account_ids = [
            account_id["Id"] for account_id in organizations.get_accounts()
        ]
        for account_id in [
                account for account in account_ids
                if account != deployment_account_id
        ]:
            thread = PropagatingThread(target=worker_thread,
                                       args=(account_id, sts, config, s3,
                                             cache, kms_and_bucket_dict))
            thread.start()
            threads.append(thread)

        for thread in threads:
            thread.join()

        LOGGER.info("Executing Step Function on Deployment Account")
        step_functions = StepFunctions(
            role=deployment_account_role,
            deployment_account_id=deployment_account_id,
            deployment_account_region=config.deployment_account_region,
            regions=config.target_regions,
            account_ids=account_ids,
            update_pipelines_only=0)

        step_functions.execute_statemachine()
    except ParameterNotFoundError:
        LOGGER.info(
            'A Deployment Account is ready to be bootstrapped! '
            'The Account provisioner will now kick into action, '
            'be sure to check out its progress in AWS Step Functions in this account.'
        )
        return
コード例 #16
0
ファイル: app.py プロジェクト: yadwinderpaul/org-management
from .exceptions import HttpException

flask_app = Flask(__name__)

app = {}

config = InitConfig(app)
app['config'] = config

logger = InitLogger(app)
app['logger'] = logger

adapter = InitAdapter(app)
app['organizations_adapter'] = adapter

organizations = Organizations(app)
app['organizations'] = organizations

ctrl = Controller(app)


def success_response(payload={}, meta={}, http_code=200):
    return jsonify({
        'success': True,
        'payload': payload,
        'meta': meta
    }), http_code


@flask_app.route('/', methods=['GET', 'POST'])
def index():
コード例 #17
0
def main():
    try:
        parameter_store = ParameterStore(REGION_DEFAULT, boto3)
        deployment_account_id = parameter_store.fetch_parameter(
            'deployment_account_id')

        config = Config()
        config.store_config()

        if deployment_account_id is None:
            raise NotConfiguredError(
                "Deployment Account has not yet been configured")

        organizations = Organizations(boto3, deployment_account_id)

        sts = STS(boto3)
        ou_id = organizations.get_parent_info().get("ou_parent_id")

        deployment_account_role = ensure_deployment_account_configured(
            sts, config, ou_id, deployment_account_id)

        cache = Cache()
        account_path = organizations.build_account_path(
            ou_id,
            [],  # Initial empty array to hold OU Path
            cache)

        s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET)

        # (First) Setup the Deployment Account in all regions (KMS Key and S3 Bucket + Parameter Store values)
        for region in list(
                set([config.deployment_account_region] +
                    config.target_regions)):
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=config.deployment_account_region,
                role=deployment_account_role,
                wait=True,
                stack_name=None,
                s3=s3,
                s3_key_path=account_path,
                file_path=None,
            )

            cloudformation.create_stack()

            update_deployment_account_output_parameters(
                deployment_account_region=config.deployment_account_region,
                region=region,
                deployment_account_role=deployment_account_role,
                cloudformation=cloudformation)

        threads = []
        account_ids = organizations.get_account_ids()
        for account_id in account_ids:
            t = PropagatingThread(target=worker_thread,
                                  args=(account_id, sts, config, s3, cache))
            t.start()
            threads.append(t)

        for thread in threads:
            thread.join()

        step_functions = StepFunctions(
            role=deployment_account_role,
            deployment_account_id=deployment_account_id,
            deployment_account_region=config.deployment_account_region,
            regions=config.target_regions,
            account_ids=[
                i for i in account_ids if i != config.deployment_account_region
            ],
            update_pipelines_only=False)

        step_functions.execute_statemachine()

    except NotConfiguredError as not_configured_error:
        LOGGER.info(not_configured_error)
コード例 #18
0
    def apply(self, organizations, parameter_store, config):  #pylint: disable=R0912, R0915
        status = organizations.get_organization_info()

        if status.get('feature_set') != 'ALL':
            LOGGER.info(
                'All Features are currently NOT enabled for this Organization, this is required to apply SCPs'
            )
            return

        organizations.enable_scp()
        scps = SCP._find_all()
        organization_mapping = organizations.get_organization_map(
            {'/': organizations.get_ou_root_id()})
        scp_keep_full_access = config.get('scp')

        try:
            current_stored_scps = ast.literal_eval(
                parameter_store.fetch_parameter('scp'))
            for stored_scp in current_stored_scps:
                path = SCP._trim_scp_file_name(stored_scp)
                if scp_keep_full_access:
                    if scp_keep_full_access.get(
                            'keep-default-scp') != 'enabled':
                        try:
                            organizations.detach_scp(
                                'p-FullAWSAccess', organization_mapping[path])
                        except organizations.client.exceptions.PolicyNotAttachedException:
                            LOGGER.info(
                                'FullAWSAccess will stay detached since keep-default-scp is not enabled. Path is: %s',
                                path)
                    else:
                        try:
                            organizations.attach_scp(
                                'p-FullAWSAccess', organization_mapping[path])
                        except organizations.client.exceptions.DuplicatePolicyAttachmentException:
                            LOGGER.info(
                                'FullAWSAccess will stay attached since keep-default-scp is enabled. Path is: %s',
                                path)
                            pass
                if stored_scp not in scps:
                    scp_id = organizations.describe_scp_id_for_target(
                        organization_mapping[path])
                    try:
                        organizations.attach_scp('p-FullAWSAccess',
                                                 organization_mapping[path])
                    except organizations.client.exceptions.DuplicatePolicyAttachmentException:
                        pass
                    organizations.detach_scp(scp_id,
                                             organization_mapping[path])
                    organizations.delete_scp(scp_id)
                    LOGGER.info('SCP %s will be deleted. Path is: %s',
                                organization_mapping[path], path)
        except ParameterNotFoundError:
            pass

        for scp in scps:
            path = SCP._trim_scp_file_name(scp)
            scp_id = organizations.describe_scp_id_for_target(
                organization_mapping[path])
            proposed_scp = Organizations.get_scp_body(scp)
            if scp_id:
                current_scp = organizations.describe_scp(scp_id)
                if self._compare_ordered_policy(
                        current_scp.get('Content')
                ) == self._compare_ordered_policy(proposed_scp):
                    LOGGER.info(
                        'SCP %s does not require updating. Path is: %s',
                        organization_mapping[path], path)
                    continue
                LOGGER.info('SCP will be updated for %s. Path is: %s',
                            organization_mapping[path], path)
                organizations.update_scp(proposed_scp, scp_id)
                continue
            try:
                policy_id = organizations.create_scp(proposed_scp, path)
                LOGGER.info('SCP has been created for %s. Path is: %s',
                            organization_mapping[path], path)
                organizations.attach_scp(policy_id, organization_mapping[path])
            except organizations.client.exceptions.DuplicatePolicyException:
                LOGGER.info(
                    'SCP for %s already exists but was not attached, attaching.',
                    organization_mapping[path])
                policy_id = organizations.list_scps('adf-scp-{0}'.format(path))
                organizations.attach_scp(policy_id, organization_mapping[path])

        parameter_store.put_parameter('scp', str(scps))
コード例 #19
0
def create_account_tags(account_id, tags, org_session: Organizations):
    LOGGER.info(f"Ensuring Account: {account_id} has tags: {tags}")
    org_session.create_account_tags(account_id, tags)
コード例 #20
0
def main():
    config = Config()
    config.store_config()

    try:
        parameter_store = ParameterStore(REGION_DEFAULT, boto3)
        deployment_account_id = parameter_store.fetch_parameter(
            'deployment_account_id')

        organizations = Organizations(boto3, deployment_account_id)

        sts = STS(boto3)
        deployment_account_role = prepare_deployment_account(
            sts=sts,
            deployment_account_id=deployment_account_id,
            config=config)

        cache = Cache()
        ou_id = organizations.get_parent_info().get("ou_parent_id")
        account_path = organizations.build_account_path(ou_id=ou_id,
                                                        account_path=[],
                                                        cache=cache)
        s3 = S3(REGION_DEFAULT, boto3, S3_BUCKET_NAME)

        # First Setup the Deployment Account in all regions (KMS Key and S3 Bucket + Parameter Store values)
        for region in list(
                set([config.deployment_account_region] +
                    config.target_regions)):
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=config.deployment_account_region,
                role=deployment_account_role,
                wait=True,
                stack_name=None,
                s3=s3,
                s3_key_path=account_path)

            cloudformation.create_stack()

            update_deployment_account_output_parameters(
                deployment_account_region=config.deployment_account_region,
                region=region,
                deployment_account_role=deployment_account_role,
                cloudformation=cloudformation)

        threads = []
        account_ids = organizations.get_account_ids()
        for account_id in account_ids:
            t = PropagatingThread(target=worker_thread,
                                  args=(account_id, sts, config, s3, cache))
            t.start()
            threads.append(t)

        for thread in threads:
            thread.join()

        step_functions = StepFunctions(
            role=deployment_account_role,
            deployment_account_id=deployment_account_id,
            deployment_account_region=config.deployment_account_region,
            regions=config.target_regions,
            account_ids=[
                i for i in account_ids if i != config.deployment_account_region
            ],
            update_pipelines_only=1)

        step_functions.execute_statemachine()
    except ParameterNotFoundError:
        LOGGER.info("Deployment Account has not yet been Bootstrapped.")
        return
コード例 #21
0
def cls():
    return Organizations(
        boto3,
        '12345678910'
    )
コード例 #22
0
ファイル: test.py プロジェクト: Belyashi/LogisticTask
def test_get_driver_id():
    print d.get_driver_id(4)


m = Map()


def test_map_get_all_cities():
    print m.get_all_cities()


def test_map_get_all_ways():
    print m.get_all_ways()


o = Organizations()


def test_get_organization_id():
    print o.get_organization_id(2)


def test_organization_orders():
    print o.get_orders(1)
    print o.get_orders(2)
    print o.get_orders(3)

    o.add_order(3, 2, 3)
    print o.get_orders(3)

コード例 #23
0
ファイル: main.py プロジェクト: atre/aws-deployment-framework
def main(): #pylint: disable=R0915
    LOGGER.info("ADF Version %s", ADF_VERSION)
    LOGGER.info("ADF Log Level is %s", ADF_LOG_LEVEL)

    scp = SCP()
    config = Config()
    config.store_config()

    try:
        parameter_store = ParameterStore(REGION_DEFAULT, boto3)
        deployment_account_id = parameter_store.fetch_parameter(
            'deployment_account_id'
        )
        organizations = Organizations(
            role=boto3,
            account_id=deployment_account_id
        )
        scp.apply(organizations, parameter_store, config.config)

        sts = STS()
        deployment_account_role = prepare_deployment_account(
            sts=sts,
            deployment_account_id=deployment_account_id,
            config=config
        )

        cache = Cache()
        ou_id = organizations.get_parent_info().get("ou_parent_id")
        account_path = organizations.build_account_path(
            ou_id=ou_id,
            account_path=[],
            cache=cache
        )
        s3 = S3(
            region=REGION_DEFAULT,
            bucket=S3_BUCKET_NAME
        )

        # Updating the stack on the master account in deployment region
        cloudformation = CloudFormation(
            region=config.deployment_account_region,
            deployment_account_region=config.deployment_account_region, # pylint: disable=R0801
            role=boto3,
            wait=True,
            stack_name=None,
            s3=s3,
            s3_key_path='adf-build',
            account_id=ACCOUNT_ID
        )
        cloudformation.create_stack()

        # First Setup/Update the Deployment Account in all regions (KMS Key and S3 Bucket + Parameter Store values)
        for region in list(set([config.deployment_account_region] + config.target_regions)):
            cloudformation = CloudFormation(
                region=region,
                deployment_account_region=config.deployment_account_region,
                role=deployment_account_role,
                wait=True,
                stack_name=None,
                s3=s3,
                s3_key_path=account_path,
                account_id=deployment_account_id
            )
            cloudformation.create_stack()

            update_deployment_account_output_parameters(
                deployment_account_region=config.deployment_account_region,
                region=region,
                deployment_account_role=deployment_account_role,
                cloudformation=cloudformation
            )

        threads = []
        account_ids = organizations.get_account_ids()
        for account_id in [account for account in account_ids if account != deployment_account_id]:
            thread = PropagatingThread(target=worker_thread, args=(
                account_id,
                sts,
                config,
                s3,
                cache
            ))
            thread.start()
            threads.append(thread)

        for thread in threads:
            thread.join()

        step_functions = StepFunctions(
            role=deployment_account_role,
            deployment_account_id=deployment_account_id,
            deployment_account_region=config.deployment_account_region,
            regions=config.target_regions,
            account_ids=account_ids,
            update_pipelines_only=0
        )

        step_functions.execute_statemachine()
    except ParameterNotFoundError:
        LOGGER.info(
            'You are now ready to bootstrap a deployment account '
            'by moving it into your deployment OU. '
            'Once you have moved it into the deployment OU, '
            'be sure to check out its progress in AWS Step Functions'
        )
        return
コード例 #24
0
    def apply(self, organizations, parameter_store, config):  # pylint: disable=R0912, R0915
        status = organizations.get_organization_info()
        if status.get('feature_set') != 'ALL':
            LOGGER.info(
                'All Features are currently NOT enabled for this Organization, this is required to apply SCPs or Tagging Policies'
            )
            return

        LOGGER.info(
            'Determining if Organization Policy changes are required. (Tagging or Service Controls)'
        )
        organization_mapping = organizations.get_organization_map(
            {'/': organizations.get_ou_root_id()})
        for policy in ['scp', 'tagging-policy']:
            _type = 'SERVICE_CONTROL_POLICY' if policy == 'scp' else 'TAG_POLICY'
            organizations.enable_organization_policies(_type)
            _policies = OrganizationPolicy._find_all(policy)
            try:
                current_stored_policy = ast.literal_eval(
                    parameter_store.fetch_parameter(policy))
                for stored_policy in current_stored_policy:
                    path = OrganizationPolicy._trim_scp_file_name(
                        stored_policy
                    ) if policy == 'scp' else OrganizationPolicy._trim_tagging_policy_file_name(
                        stored_policy)
                    OrganizationPolicy.set_scp_attachment(
                        config.get('scp'), organization_mapping, path,
                        organizations)
                    if stored_policy not in _policies:
                        OrganizationPolicy.clean_and_remove_policy_attachment(
                            organization_mapping, path, organizations, _type)
            except ParameterNotFoundError:
                LOGGER.debug(
                    'Parameter %s was not found in Parameter Store, continuing.',
                    policy)
                pass

            for _policy in _policies:
                path = OrganizationPolicy._trim_scp_file_name(
                    _policy
                ) if policy == 'scp' else OrganizationPolicy._trim_tagging_policy_file_name(
                    _policy)
                policy_id = organizations.describe_policy_id_for_target(
                    organization_mapping[path], _type)
                proposed_policy = Organizations.get_policy_body(_policy)
                if policy_id:
                    current_policy = organizations.describe_policy(policy_id)
                    if self._compare_ordered_policy(
                            current_policy.get('Content')
                    ) == self._compare_ordered_policy(proposed_policy):
                        LOGGER.info(
                            'Policy (%s) %s does not require updating. Path is: %s',
                            policy, organization_mapping[path], path)
                        continue
                    LOGGER.info(
                        'Policy (%s) will be updated for %s. Path is: %s',
                        policy, organization_mapping[path], path)
                    organizations.update_policy(proposed_policy, policy_id)
                    continue
                try:
                    policy_id = organizations.create_policy(
                        proposed_policy, path, _type)
                    LOGGER.info(
                        'Policy (%s) has been created for %s. Path is: %s',
                        policy, organization_mapping[path], path)
                    organizations.attach_policy(policy_id,
                                                organization_mapping[path])
                except organizations.client.exceptions.DuplicatePolicyException:
                    LOGGER.info('Policy (%s) for %s exists ensuring attached.',
                                policy, organization_mapping[path])
                    policy_id = organizations.list_policies(
                        'adf-{0}-{1}'.format(policy, path), _type)
                    organizations.attach_policy(policy_id,
                                                organization_mapping[path])
            parameter_store.put_parameter(policy, str(_policies))
コード例 #25
0
def get_all_accounts():
    org_client = Organizations(boto3)
    return org_client.get_accounts()