示例#1
0
def cleanup_repos(bucket_cleanup):
    yield None
    ecr = ck.aws.clients["ecr"]
    config_file = ck.config.get_config_file()
    section_suffix = ck.get_profile() + " " + ck.get_region()
    repos_section_name = "docker-repos " + section_suffix

    # Clean up repos from AWS
    # -----------------------
    # Get all repos with unit test prefix in the name
    response = ecr.describe_repositories()
    repos = [
        r for r in response.get("repositories")
        if ("unit_testing_func" in r["repositoryName"] or "test_func_input" in
            r["repositoryName"] or "simple_unit_testing_func" in
            r["repositoryName"] or UNIT_TEST_PREFIX in r["repositoryName"])
    ]

    # Delete the AWS ECR repo
    for r in repos:
        ecr.delete_repository(registryId=r["registryId"],
                              repositoryName=r["repositoryName"],
                              force=True)

    # Clean up repos from config file
    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)
        for repo_name in config.options(repos_section_name):
            if UNIT_TEST_PREFIX in repo_name:
                config.remove_option(repos_section_name, repo_name)
        with open(config_file, "w") as f:
            config.write(f)
示例#2
0
def cleanup_repos(bucket_cleanup):
    yield None
    ecr = ck.aws.clients['ecr']
    config_file = ck.config.get_config_file()
    section_suffix = ck.get_profile() + ' ' + ck.get_region()
    repos_section_name = 'docker-repos ' + section_suffix

    # Clean up repos from AWS
    # -----------------------
    # Get all repos with unit test prefix in the name
    response = ecr.describe_repositories()
    repos = [r for r in response.get('repositories')
             if ('unit-testing-func' in r['repositoryName']
                 or 'test-func-input' in r['repositoryName']
                 or 'simple-unit-testing-func' in r['repositoryName']
                 or UNIT_TEST_PREFIX in r['repositoryName'])]

    # Delete the AWS ECR repo
    for r in repos:
        ecr.delete_repository(
            registryId=r['registryId'],
            repositoryName=r['repositoryName'],
            force=True
        )

    # Clean up repos from config file
    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)
        for repo_name in config.options(repos_section_name):
            if UNIT_TEST_PREFIX in repo_name:
                config.remove_option(repos_section_name, repo_name)
        with open(config_file, 'w') as f:
            config.write(f)
示例#3
0
def test_set_profile(bucket_cleanup):
    old_credentials_file = os.environ.get("AWS_SHARED_CREDENTIALS_FILE")
    old_aws_config_file = os.environ.get("AWS_CONFIG_FILE")
    old_ck_config_file = os.environ.get("CLOUDKNOT_CONFIG_FILE")

    ref_dir = op.join(data_path, "profiles_ref_data")
    ck_config_file = op.join(ref_dir, "cloudknot_without_profile")
    shutil.copy(ck_config_file, ck_config_file + ".bak")
    try:
        os.environ["CLOUDKNOT_CONFIG_FILE"] = ck_config_file

        config_file = op.join(ref_dir, "config")
        os.environ["AWS_CONFIG_FILE"] = config_file

        cred_file = op.join(ref_dir, "credentials_without_default")
        os.environ["AWS_SHARED_CREDENTIALS_FILE"] = cred_file

        with pytest.raises(ck.aws.CloudknotInputError):
            ck.set_profile(profile_name="not_in_list_of_profiles")

        profile = "name-5"
        ck.set_profile(profile_name=profile)
        assert ck.get_profile() == profile
    finally:
        shutil.move(ck_config_file + ".bak", ck_config_file)

        if old_credentials_file:
            os.environ["AWS_SHARED_CREDENTIALS_FILE"] = old_credentials_file
        else:
            os.environ.pop("AWS_SHARED_CREDENTIALS_FILE")

        if old_aws_config_file:
            os.environ["AWS_CONFIG_FILE"] = old_aws_config_file
        else:
            os.environ.pop("AWS_CONFIG_FILE")

        if old_ck_config_file:
            os.environ["CLOUDKNOT_CONFIG_FILE"] = old_ck_config_file
        else:
            os.environ.pop("CLOUDKNOT_CONFIG_FILE")

        ck.refresh_clients()
示例#4
0
def test_DockerRepo(bucket_cleanup):
    ck.refresh_clients()
    ecr = ck.aws.clients["ecr"]
    config = configparser.ConfigParser()
    config_file = ck.config.get_config_file()
    repo_section_name = "docker-repos " + ck.get_profile(
    ) + " " + ck.get_region()

    try:
        name = get_testing_name()

        # Use boto3 to create an ECR repo
        response = ecr.create_repository(repositoryName=name)

        repo_name = response["repository"]["repositoryName"]
        repo_uri = response["repository"]["repositoryUri"]
        repo_registry_id = response["repository"]["registryId"]

        # Retrieve that same repo with cloudknot
        dr = ck.aws.DockerRepo(name=name)

        assert dr.name == repo_name
        assert dr.repo_uri == repo_uri
        assert dr.repo_registry_id == repo_registry_id

        # Confirm that the docker repo is in the config file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert name in config.options(repo_section_name)

        # Clobber the docker repo
        dr.clobber()

        retry = tenacity.Retrying(
            wait=tenacity.wait_exponential(max=16),
            stop=tenacity.stop_after_delay(180),
            retry=tenacity.retry_unless_exception_type((
                ecr.exceptions.RepositoryNotFoundException,
                botocore.exceptions.ClientError,
            )),
        )

        # Assert that it was removed from AWS
        with pytest.raises((
                ecr.exceptions.RepositoryNotFoundException,
                botocore.exceptions.ClientError,
        )):
            retry.call(ecr.describe_repositories, repositoryNames=[name])

        # Assert that it was removed from the config file
        # If we just re-read the config file, config will keep the union
        # of the in memory values and the file values, updating the
        # intersection of the two with the file values. So we must clear
        # config and then re-read the file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert name not in config.options(repo_section_name)

        # Now create a new repo using only cloudknot
        name = get_testing_name()
        dr = ck.aws.DockerRepo(name=name)

        # Confirm that it exists on AWS and retrieve its properties
        retry = tenacity.Retrying(
            wait=tenacity.wait_exponential(max=16),
            stop=tenacity.stop_after_delay(60),
            retry=tenacity.retry_if_exception_type((
                ecr.exceptions.RepositoryNotFoundException,
                botocore.exceptions.ClientError,
            )),
        )

        response = retry.call(ecr.describe_repositories,
                              repositoryNames=[name])

        repo_name = response["repositories"][0]["repositoryName"]
        repo_uri = response["repositories"][0]["repositoryUri"]
        repo_registry_id = response["repositories"][0]["registryId"]

        assert dr.name == repo_name
        assert dr.repo_uri == repo_uri
        assert dr.repo_registry_id == repo_registry_id

        # Confirm that the docker repo is in the config file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert name in config.options(repo_section_name)

        # Delete the repo from AWS before clobbering
        ecr.delete_repository(registryId=repo_registry_id,
                              repositoryName=repo_name,
                              force=True)

        # Clobber the docker repo
        dr.clobber()

        retry = tenacity.Retrying(
            wait=tenacity.wait_exponential(max=16),
            stop=tenacity.stop_after_delay(180),
            retry=tenacity.retry_unless_exception_type((
                ecr.exceptions.RepositoryNotFoundException,
                botocore.exceptions.ClientError,
            )),
        )

        # Assert that it was removed from AWS
        with pytest.raises((
                ecr.exceptions.RepositoryNotFoundException,
                botocore.exceptions.ClientError,
        )):
            retry.call(ecr.describe_repositories, repositoryNames=[name])

        # Assert that it was removed from the config file
        # If we just re-read the config file, config will keep the union
        # of the in memory values and the file values, updating the
        # intersection of the two with the file values. So we must clear
        # config and then re-read the file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert name not in config.options(repo_section_name)
    except Exception as e:
        response = ecr.describe_repositories()

        # Get all repos with unit test prefix in the name
        repos = [
            r for r in response.get("repositories")
            if UNIT_TEST_PREFIX in r["repositoryName"]
        ]

        # Delete the AWS ECR repo
        for r in repos:
            ecr.delete_repository(
                registryId=r["registryId"],
                repositoryName=r["repositoryName"],
                force=True,
            )

        # Clean up config file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)
            try:
                for name in config.options(repo_section_name):
                    if UNIT_TEST_PREFIX in name:
                        config.remove_option(repo_section_name, name)
            except configparser.NoSectionError:
                pass

            with open(config_file, "w") as f:
                config.write(f)

        raise e
示例#5
0
def test_get_profile(bucket_cleanup):
    try:
        old_credentials_file = os.environ["AWS_SHARED_CREDENTIALS_FILE"]
    except KeyError:
        old_credentials_file = None

    try:
        old_aws_config_file = os.environ["AWS_CONFIG_FILE"]
    except KeyError:
        old_aws_config_file = None

    try:
        old_ck_config_file = os.environ["CLOUDKNOT_CONFIG_FILE"]
    except KeyError:
        old_ck_config_file = None

    ref_dir = op.join(data_path, "profiles_ref_data")
    ck_config_with_profile = op.join(ref_dir, "cloudknot_with_profile")
    ck_config_without_profile = op.join(ref_dir, "cloudknot_without_profile")

    shutil.copy(ck_config_with_profile, ck_config_with_profile + ".bak")
    shutil.copy(ck_config_without_profile, ck_config_without_profile + ".bak")
    try:
        os.environ["CLOUDKNOT_CONFIG_FILE"] = ck_config_with_profile

        assert ck.get_profile() == "profile_from_cloudknot_config"

        os.environ["CLOUDKNOT_CONFIG_FILE"] = ck_config_without_profile

        config_file = op.join(ref_dir, "config")
        os.environ["AWS_CONFIG_FILE"] = config_file

        cred_file = op.join(ref_dir, "credentials_without_default")
        os.environ["AWS_SHARED_CREDENTIALS_FILE"] = cred_file

        assert ck.get_profile(fallback=None) is None
        assert ck.get_profile() == "from-env"

        cred_file = op.join(ref_dir, "credentials_with_default")
        os.environ["AWS_SHARED_CREDENTIALS_FILE"] = cred_file

        assert ck.get_profile() == "default"
    finally:
        shutil.move(ck_config_with_profile + ".bak", ck_config_with_profile)
        shutil.move(ck_config_without_profile + ".bak",
                    ck_config_without_profile)

        if old_credentials_file:
            os.environ["AWS_SHARED_CREDENTIALS_FILE"] = old_credentials_file
        else:
            try:
                del os.environ["AWS_SHARED_CREDENTIALS_FILE"]
            except KeyError:
                pass

        if old_aws_config_file:
            os.environ["AWS_CONFIG_FILE"] = old_aws_config_file
        else:
            try:
                del os.environ["AWS_CONFIG_FILE"]
            except KeyError:
                pass

        if old_ck_config_file:
            os.environ["CLOUDKNOT_CONFIG_FILE"] = old_ck_config_file
        else:
            try:
                del os.environ["CLOUDKNOT_CONFIG_FILE"]
            except KeyError:
                pass

        ck.refresh_clients()
示例#6
0
def test_get_profile(bucket_cleanup):
    try:
        old_credentials_file = os.environ['AWS_SHARED_CREDENTIALS_FILE']
    except KeyError:
        old_credentials_file = None

    try:
        old_aws_config_file = os.environ['AWS_CONFIG_FILE']
    except KeyError:
        old_aws_config_file = None

    try:
        old_ck_config_file = os.environ['CLOUDKNOT_CONFIG_FILE']
    except KeyError:
        old_ck_config_file = None

    ref_dir = op.join(data_path, 'profiles_ref_data')
    ck_config_with_profile = op.join(ref_dir, 'cloudknot_with_profile')
    ck_config_without_profile = op.join(ref_dir, 'cloudknot_without_profile')

    shutil.copy(ck_config_with_profile, ck_config_with_profile + '.bak')
    shutil.copy(ck_config_without_profile, ck_config_without_profile + '.bak')
    try:
        os.environ['CLOUDKNOT_CONFIG_FILE'] = ck_config_with_profile

        assert ck.get_profile() == 'profile_from_cloudknot_config'

        os.environ['CLOUDKNOT_CONFIG_FILE'] = ck_config_without_profile

        config_file = op.join(ref_dir, 'config')
        os.environ['AWS_CONFIG_FILE'] = config_file

        cred_file = op.join(ref_dir, 'credentials_without_default')
        os.environ['AWS_SHARED_CREDENTIALS_FILE'] = cred_file

        assert ck.get_profile(fallback=None) is None
        assert ck.get_profile() == 'from-env'

        cred_file = op.join(ref_dir, 'credentials_with_default')
        os.environ['AWS_SHARED_CREDENTIALS_FILE'] = cred_file

        assert ck.get_profile() == 'default'
    finally:
        shutil.move(ck_config_with_profile + '.bak', ck_config_with_profile)
        shutil.move(ck_config_without_profile + '.bak',
                    ck_config_without_profile)

        if old_credentials_file:
            os.environ['AWS_SHARED_CREDENTIALS_FILE'] = old_credentials_file
        else:
            try:
                del os.environ['AWS_SHARED_CREDENTIALS_FILE']
            except KeyError:
                pass

        if old_aws_config_file:
            os.environ['AWS_CONFIG_FILE'] = old_aws_config_file
        else:
            try:
                del os.environ['AWS_CONFIG_FILE']
            except KeyError:
                pass

        if old_ck_config_file:
            os.environ['CLOUDKNOT_CONFIG_FILE'] = old_ck_config_file
        else:
            try:
                del os.environ['CLOUDKNOT_CONFIG_FILE']
            except KeyError:
                pass

        ck.refresh_clients()
def cleanup(bucket_cleanup):
    """Use this fixture to delete all unit testing resources
    regardless of of the failure or success of the test"""
    yield None
    iam = ck.aws.clients['iam']
    ec2 = ck.aws.clients['ec2']
    batch = ck.aws.clients['batch']
    ecs = ck.aws.clients['ecs']
    config_file = ck.config.get_config_file()
    section_suffix = ck.get_profile() + ' ' + ck.get_region()
    jq_section_name = 'job-queues ' + section_suffix
    ce_section_name = 'compute-environments ' + section_suffix
    jd_section_name = 'job-definitions ' + section_suffix
    roles_section_name = 'roles ' + ck.get_profile() + ' global'
    vpc_section_name = 'vpc ' + section_suffix
    sg_section_name = 'security-groups ' + section_suffix

    retry = tenacity.Retrying(wait=tenacity.wait_exponential(max=16),
                              stop=tenacity.stop_after_delay(120),
                              retry=tenacity.retry_if_exception_type(
                                  batch.exceptions.ClientException))

    # Clean up job queues from AWS
    # ----------------------------
    # Find all unit testing job queues
    response = batch.describe_job_queues()

    job_queues = [{
        'name': d['jobQueueName'],
        'arn': d['jobQueueArn'],
        'state': d['state'],
        'status': d['status']
    } for d in response.get('jobQueues')]

    while response.get('nextToken'):
        response = batch.describe_job_queues(
            nextToken=response.get('nextToken'))

        job_queues = job_queues + [{
            'name': d['jobQueueName'],
            'arn': d['jobQueueArn'],
            'state': d['state'],
            'status': d['status']
        } for d in response.get('jobQueues')]

    unit_test_JQs = list(
        filter(lambda d: UNIT_TEST_PREFIX in d['name'], job_queues))

    enabled = list(filter(lambda d: d['state'] == 'ENABLED', unit_test_JQs))

    for jq in enabled:
        ck.aws.wait_for_job_queue(name=jq['name'], max_wait_time=180)
        retry.call(batch.update_job_queue,
                   jobQueue=jq['arn'],
                   state='DISABLED')

    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)

    requires_deletion = list(
        filter(lambda d: d['status'] not in ['DELETED', 'DELETING'],
               unit_test_JQs))

    for jq in requires_deletion:
        ck.aws.wait_for_job_queue(name=jq['name'], max_wait_time=180)

        # Finally, delete the job queue
        retry.call(batch.delete_job_queue, jobQueue=jq['arn'])

        # Clean up config file
        try:
            config.remove_option(jq_section_name, jq['name'])
        except configparser.NoSectionError:
            pass

    with open(config_file, 'w') as f:
        config.write(f)

    # Clean up compute environments from AWS
    # --------------------------------------
    # Find all unit testing compute environments
    response = batch.describe_compute_environments()

    comp_envs = [{
        'name': d['computeEnvironmentName'],
        'arn': d['computeEnvironmentArn'],
        'state': d['state'],
        'status': d['status']
    } for d in response.get('computeEnvironments')]

    while response.get('nextToken'):
        response = batch.describe_compute_environments(
            nextToken=response.get('nextToken'))

        comp_envs = comp_envs + [{
            'name': d['computeEnvironmentName'],
            'arn': d['computeEnvironmentArn'],
            'state': d['state'],
            'status': d['status']
        } for d in response.get('computeEnvironments')]

    unit_test_CEs = list(
        filter(lambda d: UNIT_TEST_PREFIX in d['name'], comp_envs))

    enabled = list(filter(lambda d: d['state'] == 'ENABLED', unit_test_CEs))

    for ce in enabled:
        ck.aws.wait_for_compute_environment(arn=ce['arn'],
                                            name=ce['name'],
                                            log=False)

        # Set the compute environment state to 'DISABLED'
        retry.call(batch.update_compute_environment,
                   computeEnvironment=ce['arn'],
                   state='DISABLED')

    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)

    for ce in unit_test_CEs:
        # Then disassociate from any job queues
        response = batch.describe_job_queues()
        associated_queues = list(
            filter(
                lambda q: ce['arn'] in [
                    c['computeEnvironment']
                    for c in q['computeEnvironmentOrder']
                ], response.get('jobQueues')))

        for queue in associated_queues:
            arn = queue['jobQueueArn']
            name = queue['jobQueueName']

            # Disable submissions to the queue
            if queue['state'] == 'ENABLED':
                ck.aws.wait_for_job_queue(name=name,
                                          log=True,
                                          max_wait_time=180)
                retry.call(batch.update_job_queue,
                           jobQueue=arn,
                           state='DISABLED')

            # Delete the job queue
            if queue['status'] not in ['DELETED', 'DELETING']:
                ck.aws.wait_for_job_queue(name=name,
                                          log=True,
                                          max_wait_time=180)
                retry.call(batch.delete_job_queue, jobQueue=arn)

            # Clean up config file
            try:
                config.remove_option(jq_section_name, name)
            except configparser.NoSectionError:
                pass

    requires_deletion = list(
        filter(lambda d: d['status'] not in ['DELETED', 'DELETING'],
               unit_test_CEs))

    for ce in requires_deletion:
        # Now get the associated ECS cluster
        response = batch.describe_compute_environments(
            computeEnvironments=[ce['arn']])
        cluster_arn = response.get('computeEnvironments')[0]['ecsClusterArn']

        # Get container instances
        response = ecs.list_container_instances(cluster=cluster_arn, )
        instances = response.get('containerInstanceArns')

        for i in instances:
            ecs.deregister_container_instance(cluster=cluster_arn,
                                              containerInstance=i,
                                              force=True)

        retry_if_exception = tenacity.Retrying(
            wait=tenacity.wait_exponential(max=16),
            stop=tenacity.stop_after_delay(120),
            retry=tenacity.retry_if_exception_type())
        retry_if_exception.call(ecs.delete_cluster, cluster=cluster_arn)

        ck.aws.wait_for_compute_environment(arn=ce['arn'],
                                            name=ce['name'],
                                            log=False)

        retry.call(batch.delete_compute_environment,
                   computeEnvironment=ce['arn'])

        # Clean up config file
        try:
            config.remove_option(ce_section_name, ce['name'])
        except configparser.NoSectionError:
            pass

    with open(config_file, 'w') as f:
        config.write(f)

    # Clean up job definitions from AWS
    # ---------------------------------
    # Find all unit testing job definitions
    response = batch.describe_job_definitions(status='ACTIVE')

    jds = [{
        'name': d['jobDefinitionName'],
        'arn': d['jobDefinitionArn']
    } for d in response.get('jobDefinitions')]

    unit_test_jds = list(filter(lambda d: UNIT_TEST_PREFIX in d['name'], jds))

    while response.get('nextToken'):
        response = batch.describe_job_definitions(
            status='ACTIVE', nextToken=response.get('nextToken'))

        jds = [{
            'name': d['jobDefinitionName'],
            'arn': d['jobDefinitionArn']
        } for d in response.get('jobDefinitions')]

        unit_test_jds = unit_test_jds + list(
            filter(lambda d: UNIT_TEST_PREFIX in d['name'], jds))

    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)

        for jd in unit_test_jds:
            # Deregister the job definition
            retry.call(batch.deregister_job_definition,
                       jobDefinition=jd['arn'])

            # Clean up config file
            try:
                config.remove_option(jd_section_name, jd['name'])
            except configparser.NoSectionError:
                pass

        with open(config_file, 'w') as f:
            config.write(f)

    # Clean up security_groups from AWS
    # ---------------------------------
    # Find all unit test security groups
    ec2_retry = tenacity.Retrying(wait=tenacity.wait_exponential(max=16),
                                  stop=tenacity.stop_after_delay(60),
                                  retry=tenacity.retry_if_exception_type(
                                      ec2.exceptions.ClientError))

    response = ec2.describe_security_groups()
    sgs = [{
        'name': d['GroupName'],
        'id': d['GroupId']
    } for d in response.get('SecurityGroups')]
    unit_test_sgs = filter(lambda d: UNIT_TEST_PREFIX in d['name'], sgs)

    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)

        for sg in unit_test_sgs:
            # Delete role
            ec2_retry.call(ec2.delete_security_group, GroupId=sg['id'])

            # Clean up config file
            try:
                config.remove_option(sg_section_name, sg['id'])
            except configparser.NoSectionError:
                pass

        with open(config_file, 'w') as f:
            config.write(f)

    # Clean up VPCs from AWS
    # ----------------------
    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)

        # Find all VPCs with a Name tag key
        response = ec2.describe_vpcs(Filters=[{
            'Name': 'tag-key',
            'Values': ['Name']
        }])

        for vpc in response.get('Vpcs'):
            # Test if the unit-test prefix is in the name
            if UNIT_TEST_PREFIX in [
                    d for d in vpc['Tags'] if d['Key'] == 'Name'
            ][0]['Value']:
                # Retrieve and delete subnets
                response = ec2.describe_subnets(
                    Filters=[{
                        'Name': 'vpc-id',
                        'Values': [vpc['VpcId']]
                    }])

                subnets = [d['SubnetId'] for d in response.get('Subnets')]

                for subnet_id in subnets:
                    ec2_retry.call(ec2.delete_subnet, SubnetId=subnet_id)

                response = ec2.describe_network_acls(
                    Filters=[{
                        'Name': 'vpc-id',
                        'Values': [vpc['VpcId']]
                    }, {
                        'Name': 'default',
                        'Values': ['false']
                    }])

                network_acl_ids = [
                    n['NetworkAclId'] for n in response.get('NetworkAcls')
                ]

                # Delete the network ACL
                for net_id in network_acl_ids:
                    ec2_retry.call(ec2.delete_network_acl, NetworkAclId=net_id)

                response = ec2.describe_route_tables(
                    Filters=[{
                        'Name': 'vpc-id',
                        'Values': [vpc['VpcId']]
                    }, {
                        'Name': 'association.main',
                        'Values': ['false']
                    }])

                route_table_ids = [
                    rt['RouteTableId'] for rt in response.get('RouteTables')
                ]

                # Delete the route table
                for rt_id in route_table_ids:
                    ec2_retry.call(ec2.delete_route_table, RouteTableId=rt_id)

                # Detach and delete the internet gateway
                response = ec2.describe_internet_gateways(
                    Filters=[{
                        'Name': 'attachment.vpc-id',
                        'Values': [vpc['VpcId']]
                    }])

                gateway_ids = [
                    g['InternetGatewayId']
                    for g in response.get('InternetGateways')
                ]

                for gid in gateway_ids:
                    ec2_retry.call(ec2.detach_internet_gateway,
                                   InternetGatewayId=gid,
                                   VpcId=vpc['VpcId'])
                    ec2_retry.call(ec2.delete_internet_gateway,
                                   InternetGatewayId=gid)

                # delete the VPC
                ec2_retry.call(ec2.delete_vpc, VpcId=vpc['VpcId'])

                # Clean up config file
                try:
                    config.remove_option(vpc_section_name, vpc['VpcId'])
                except configparser.NoSectionError:
                    pass

        with open(config_file, 'w') as f:
            config.write(f)

    # Clean up roles from AWS
    # -----------------------
    # Find all unit test roles
    response = iam.list_roles()
    role_names = [d['RoleName'] for d in response.get('Roles')]
    unit_test_roles = filter(lambda n: UNIT_TEST_PREFIX in n, role_names)

    for role_name in unit_test_roles:
        # Remove instance profiles
        response = iam.list_instance_profiles_for_role(RoleName=role_name)
        for ip in response.get('InstanceProfiles'):
            iam.remove_role_from_instance_profile(
                InstanceProfileName=ip['InstanceProfileName'],
                RoleName=role_name)
            iam.delete_instance_profile(
                InstanceProfileName=ip['InstanceProfileName'])

        # Detach policies from role
        response = iam.list_attached_role_policies(RoleName=role_name)
        for policy in response.get('AttachedPolicies'):
            iam.detach_role_policy(RoleName=role_name,
                                   PolicyArn=policy['PolicyArn'])

        # Delete role
        iam.delete_role(RoleName=role_name)

    # Clean up config file
    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)
        for role_name in config.options(roles_section_name):
            if UNIT_TEST_PREFIX in role_name:
                config.remove_option(roles_section_name, role_name)
        with open(config_file, 'w') as f:
            config.write(f)