Exemplo n.º 1
0
def test_prune_repos(configured, aws_credentials):
    config_name = configured
    ck.refresh_clients()

    repo0 = ck.aws.DockerRepo(name="test-prune-0")
    repo1 = ck.aws.DockerRepo(name="test-prune-1")
    repo2 = ck.aws.DockerRepo(name="test-prune-2")

    ck.aws.clients["ecr"].delete_repository(repositoryName=repo0.name)
    ck.config.prune_repos()

    config = configparser.ConfigParser()
    config.read(config_name)
    repo_section = config[repo0._section_name]

    assert repo0.name not in repo_section
    assert repo1.name in repo_section
    assert repo2.name in repo_section

    ck.config.add_resource(repo1._section_name, repo1.name, "0123")
    ck.config.prune()

    config = configparser.ConfigParser()
    config.read(config_name)
    repo_section = config[repo0._section_name]

    assert repo1.name not in repo_section
    assert repo2.name in repo_section
Exemplo n.º 2
0
def test_is_valid_stack(configured, aws_credentials):
    ck.refresh_clients()

    pars = ck.Pars(name="test-valid-stack")
    assert ck.config.is_valid_stack(stack_id=pars.stack_id)

    ck.aws.clients["cloudformation"].delete_stack(StackName=pars.stack_id)
    assert not ck.config.is_valid_stack(stack_id=pars.stack_id)
Exemplo n.º 3
0
def test_prune_stacks(configured, aws_credentials):
    config_name = configured
    ck.refresh_clients()

    pars0 = ck.Pars(name="test-prune-0")
    pars1 = ck.Pars(name="test-prune-1")

    ck.aws.clients["cloudformation"].delete_stack(StackName=pars0.stack_id)
    ck.config.prune_stacks()

    config = configparser.ConfigParser()
    config.read(config_name)
    assert pars0.pars_name not in config.sections()
    assert pars1.pars_name in config.sections()
Exemplo n.º 4
0
def test_get_tags(aws_credentials):
    ck.refresh_clients()
    name = "test-name"
    tags_with_name_only = ck.aws.get_tags(name=name)
    ref_name = {"Key": "Name", "Value": name}
    ref_owner = {"Key": "Owner", "Value": ck.aws.get_user()}
    ref_env = {"Key": "Environment", "Value": "cloudknot"}
    ref_dicts = [ref_name, ref_owner, ref_env]

    def is_eq_list_of_dicts(lst0, lst1):
        sort0 = sorted(lst0, key=lambda d: d["Key"])
        sort1 = sorted(lst1, key=lambda d: d["Key"])
        return all([pair0 == pair1 for pair0, pair1 in zip(sort0, sort1)])

    assert is_eq_list_of_dicts(ref_dicts, tags_with_name_only)

    add_list = [{"Key": "Project", "Value": "unit-testing"}]
    tags_with_add_list = ck.aws.get_tags(name=name, additional_tags=add_list)
    assert is_eq_list_of_dicts(ref_dicts + add_list, tags_with_add_list)

    add_dict = {"Project": "unit-testing"}
    tags_with_add_dict = ck.aws.get_tags(name=name, additional_tags=add_dict)
    assert is_eq_list_of_dicts(ref_dicts + add_list, tags_with_add_dict)

    add_name = {"Key": "Name", "Value": "custom-name"}
    tags_add_name = ck.aws.get_tags(name=name, additional_tags=[add_name])
    assert is_eq_list_of_dicts([add_name, ref_owner, ref_env], tags_add_name)

    add_env = {"Key": "Environment", "Value": "custom-env"}
    tags_add_env = ck.aws.get_tags(name=name, additional_tags=[add_env])
    assert is_eq_list_of_dicts([ref_name, ref_owner, add_env], tags_add_env)

    add_owner = {"Key": "Owner", "Value": "custom-env"}
    tags_add_owner = ck.aws.get_tags(name=name, additional_tags=[add_owner])
    assert is_eq_list_of_dicts([ref_name, add_owner, ref_env], tags_add_owner)

    with pytest.raises(ValueError):
        ck.aws.get_tags(name=name, additional_tags=42)

    with pytest.raises(ValueError):
        ck.aws.get_tags(name=name, additional_tags=[{"Foo": "Bar"}])

    with pytest.raises(ValueError):
        ck.aws.get_tags(name=name, additional_tags={"Key": 42})

    with pytest.raises(ValueError):
        ck.aws.get_tags(name=name, additional_tags={"Value": 42})
Exemplo n.º 5
0
def test_set_profile(bucket_cleanup):
    old_credentials_file = os.environ.get("AWS_SHARED_CREDENTIALS_FILE")
    old_aws_config_file = os.environ.get("AWS_CONFIG_FILE")
    old_ck_config_file = os.environ.get("CLOUDKNOT_CONFIG_FILE")

    ref_dir = op.join(data_path, "profiles_ref_data")
    ck_config_file = op.join(ref_dir, "cloudknot_without_profile")
    shutil.copy(ck_config_file, ck_config_file + ".bak")
    try:
        os.environ["CLOUDKNOT_CONFIG_FILE"] = ck_config_file

        config_file = op.join(ref_dir, "config")
        os.environ["AWS_CONFIG_FILE"] = config_file

        cred_file = op.join(ref_dir, "credentials_without_default")
        os.environ["AWS_SHARED_CREDENTIALS_FILE"] = cred_file

        with pytest.raises(ck.aws.CloudknotInputError):
            ck.set_profile(profile_name="not_in_list_of_profiles")

        profile = "name-5"
        ck.set_profile(profile_name=profile)
        assert ck.get_profile() == profile
    finally:
        shutil.move(ck_config_file + ".bak", ck_config_file)

        if old_credentials_file:
            os.environ["AWS_SHARED_CREDENTIALS_FILE"] = old_credentials_file
        else:
            os.environ.pop("AWS_SHARED_CREDENTIALS_FILE")

        if old_aws_config_file:
            os.environ["AWS_CONFIG_FILE"] = old_aws_config_file
        else:
            os.environ.pop("AWS_CONFIG_FILE")

        if old_ck_config_file:
            os.environ["CLOUDKNOT_CONFIG_FILE"] = old_ck_config_file
        else:
            os.environ.pop("CLOUDKNOT_CONFIG_FILE")

        ck.refresh_clients()
Exemplo n.º 6
0
def test_set_region(bucket_cleanup):
    ck.refresh_clients()
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.set_region(region="not a valid region name")

    old_region = ck.get_region()

    try:
        old_config_file = os.environ["CLOUDKNOT_CONFIG_FILE"]
    except KeyError:
        old_config_file = None

    try:
        with tempfile.NamedTemporaryFile() as tmp:
            os.environ["CLOUDKNOT_CONFIG_FILE"] = tmp.name

            region = "us-west-1"
            ck.set_region(region)

            assert ck.get_region() == region

            for service, client in ck.aws.clients.items():
                if service == "iam":
                    assert client.meta.region_name == "aws-global"
                else:
                    assert client.meta.region_name == region
    finally:
        ck.set_region(old_region)
        if old_config_file:
            os.environ["CLOUDKNOT_CONFIG_FILE"] = old_config_file
        else:
            try:
                del os.environ["CLOUDKNOT_CONFIG_FILE"]
            except KeyError:
                pass

        ck.refresh_clients()
Exemplo n.º 7
0
def test_pars_errors(cleanup):
    ck.refresh_clients()
    name = get_testing_name()

    # Confirm name input validation
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Pars(name=42)

    # Assert ck.aws.CloudknotInputError on long name
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Pars(name="a" * 46)

    # Confirm batch_service_role_name input validation
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Pars(name=name, batch_service_role_name=42)

    # Confirm error on redundant VPC input
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Pars(name=name, ipv4_cidr="172.31.0.0/16")

    # Confirm error on invalid VPC CIDR
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Pars(name=name, use_default_vpc=False, ipv4_cidr=42)

    # Confirm error on invalid VPC instance tenancy
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Pars(name=name, use_default_vpc=False, instance_tenancy=42)

    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Pars(name=name, use_default_vpc=False, policies=42)

    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Pars(name=name, use_default_vpc=False, policies=[42, 42])

    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Pars(name=name, use_default_vpc=False, policies=["foo"])
Exemplo n.º 8
0
def test_DockerImage(cleanup_repos):
    ck.refresh_clients()
    config = configparser.ConfigParser()
    config_file = ck.config.get_config_file()
    ecr = ck.aws.clients["ecr"]

    try:
        correct_pip_imports = {
            "boto3",
            "six",
            "dask",
            "docker",
            "pytest",
            "cloudpickle",
        }

        # First, test a DockerImage instance with `func` input
        # ----------------------------------------------------
        di = ck.DockerImage(
            name=unit_testing_func.__name__.replace("_", "-"), func=unit_testing_func
        )

        assert di.name == unit_testing_func.__name__.replace("_", "-")
        import_names = set([d["name"] for d in di.pip_imports])
        assert import_names == correct_pip_imports
        assert di.missing_imports == []
        assert di.username == "cloudknot-user"
        assert di.func == unit_testing_func

        py_dir = "py3"

        # Compare the created files with the reference files
        correct_dir = op.join(data_path, "docker_reqs_ref_data", py_dir, "ref1")
        correct_req_path = op.join(correct_dir, "requirements.txt")
        correct_dockerfile = op.join(correct_dir, "Dockerfile")

        correct_script_path = op.join(correct_dir, "unit-testing-func.py")

        with open(correct_req_path) as f:
            correct_reqs = set([s.split("=")[0] for s in f.readlines()])

        with open(di.req_path) as f:
            created_reqs = set([s.split("=")[0] for s in f.readlines()])

        assert created_reqs == correct_reqs
        assert filecmp.cmp(di.docker_path, correct_dockerfile, shallow=False)
        assert filecmp.cmp(di.script_path, correct_script_path, shallow=False)

        # Confirm that the docker image is in the config file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert "docker-image " + di.name in config.sections()

        # Next, retrieve another instance with the same name, confirm that it
        # retrieves the same info from the config file
        di2 = ck.DockerImage(name=di.name)
        assert di2.build_path == di.build_path
        assert di2.docker_path == di.docker_path
        assert di2.images == di.images
        assert di2.missing_imports == di.missing_imports
        assert di2.name == di.name
        assert di2.pip_imports == di.pip_imports
        assert di2.repo_uri == di.repo_uri
        assert di2.req_path == di.req_path
        assert di2.script_path == di.script_path
        assert di2.username == di.username

        # Clobber and confirm that it deleted all the created files and dirs
        di2.clobber()
        assert not op.isfile(di.req_path)
        assert not op.isfile(di.docker_path)
        assert not op.isfile(di.script_path)
        assert not op.isdir(di.build_path)

        # Assert that it was removed from the config file
        # If we just re-read the config file, config will keep the union
        # of the in memory values and the file values, updating the
        # intersection of the two with the file values. So we must clear
        # config and then re-read the file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert "docker-image " + di.name not in config.sections()

        # Second, test a DockerImage with a func and a dir_name
        # -----------------------------------------------------
        dir_name = tempfile.mkdtemp(dir=os.getcwd())
        di = ck.DockerImage(func=unit_testing_func, dir_name=dir_name)

        assert di.name == unit_testing_func.__name__.replace("_", "-")
        import_names = set([d["name"] for d in di.pip_imports])
        assert import_names == correct_pip_imports
        assert di.missing_imports == []
        assert di.username == "cloudknot-user"
        assert di.func == unit_testing_func

        with open(di.req_path) as f:
            created_reqs = set([s.split("=")[0] for s in f.readlines()])

        assert created_reqs == correct_reqs
        assert filecmp.cmp(di.docker_path, correct_dockerfile, shallow=False)
        assert filecmp.cmp(di.script_path, correct_script_path, shallow=False)

        # Confirm that the docker image is in the config file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert "docker-image " + di.name in config.sections()

        # Clobber and confirm that it deleted all the created files and dirs
        di.clobber()
        assert not op.isfile(di.req_path)
        assert not op.isfile(di.docker_path)
        assert not op.isfile(di.script_path)
        assert not op.isdir(di.build_path)

        # Assert that it was removed from the config file
        # If we just re-read the config file, config will keep the union
        # of the in memory values and the file values, updating the
        # intersection of the two with the file values. So we must clear
        # config and then re-read the file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert "docker-image " + di.name not in config.sections()

        # Third, test a DockerImage with script_path and dir_name input
        # -------------------------------------------------------------
        correct_dir = op.join(data_path, "docker_reqs_ref_data", py_dir, "ref2")
        script_path = op.join(correct_dir, "test-func-input.py")

        # Put the results in a temp dir with a pre-existing file
        dir_name = tempfile.mkdtemp(dir=os.getcwd())
        _, tmp_file_name = tempfile.mkstemp(dir=dir_name)

        di = ck.DockerImage(
            script_path=script_path,
            dir_name=dir_name,
            username="******",
            pin_pip_versions=True,
        )

        assert di.name == op.splitext(op.basename(script_path))[0].replace("_", "-")
        import_names = set([d["name"] for d in di.pip_imports])
        assert import_names == correct_pip_imports
        assert di.missing_imports == []
        assert di.username == "unit-test-username"
        assert di.func is None
        assert di.build_path == dir_name
        assert di.script_path == script_path

        # Compare the created files with the reference files
        correct_dir = op.join(data_path, "docker_reqs_ref_data", py_dir, "ref2")
        correct_req_path = op.join(correct_dir, "requirements.txt")
        correct_dockerfile = op.join(correct_dir, "Dockerfile")

        with open(correct_req_path) as f:
            correct_reqs = set([s.split("=")[0] for s in f.readlines()])

        with open(di.req_path) as f:
            created_reqs = set([s.split("=")[0] for s in f.readlines()])

        assert created_reqs == correct_reqs
        assert filecmp.cmp(di.docker_path, correct_dockerfile, shallow=False)

        # Confirm that the docker image is in the config file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert "docker-image " + di.name in config.sections()

        # Assert ck.aws.CloudknotInputError on name plus other input
        with pytest.raises(ck.aws.CloudknotInputError):
            ck.DockerImage(name=di.name, script_path="Foo")

        # Clobber and confirm that it deleted all the created files
        di.clobber()
        assert not op.isfile(di.req_path)
        assert not op.isfile(di.docker_path)

        # But since we had a pre-existing file in the build_path, it should not
        # have deleted the build_path or the input python script
        assert op.isfile(di.script_path)
        assert op.isfile(tmp_file_name)
        assert op.isdir(di.build_path)

        # Now delete them to clean up after ourselves
        os.remove(tmp_file_name)
        os.rmdir(di.build_path)

        # Assert that it was removed from the config file
        # If we just re-read the config file, config will keep the union
        # of the in memory values and the file values, updating the
        # intersection of the two with the file values. So we must clear
        # config and then re-read the file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert "docker-image " + di.name not in config.sections()

        # Test for exception handling of incorrect input
        # ----------------------------------------------

        # Assert ck.aws.CloudknotInputError on no input
        with pytest.raises(ck.aws.CloudknotInputError):
            ck.DockerImage()

        # Assert ck.aws.CloudknotInputError on non-string name input
        with pytest.raises(ck.aws.CloudknotInputError):
            ck.DockerImage(name=42)

        # Assert ck.aws.CloudknotInputError on redundant input
        with pytest.raises(ck.aws.CloudknotInputError):
            ck.DockerImage(
                func=unit_testing_func,
                script_path=correct_script_path,
                dir_name=os.getcwd(),
            )

        # Assert ck.aws.CloudknotInputError on invalid script path
        with pytest.raises(ck.aws.CloudknotInputError):
            ck.DockerImage(script_path=str(uuid.uuid4()), dir_name=os.getcwd())

        # Assert ck.aws.CloudknotInputError on invalid dir name
        with pytest.raises(ck.aws.CloudknotInputError):
            ck.DockerImage(script_path=correct_script_path, dir_name=str(uuid.uuid4()))

        correct_dir = op.join(data_path, "docker_reqs_ref_data", py_dir, "ref1")
        # Assert CloudknotInputError to prevent overwriting existing script
        with pytest.raises(ck.aws.CloudknotInputError):
            ck.DockerImage(func=unit_testing_func, dir_name=correct_dir)

        # Assert CloudknotInputError to prevent overwriting existing Dockerfile
        with pytest.raises(ck.aws.CloudknotInputError):
            ck.DockerImage(script_path=correct_script_path)

        # Assert CloudknotInputError to prevent overwriting existing
        # requirements.txt
        # First, avoid the existing Dockerfile error by renaming the Dockerfile
        old_dockerfile = op.join(op.dirname(correct_script_path), "Dockerfile")

        new_dockerfile = op.join(op.dirname(correct_script_path), "tmpdockerfile")
        os.rename(old_dockerfile, new_dockerfile)

        # Assert the ck.aws.CloudknotInputError
        with pytest.raises(ck.aws.CloudknotInputError):
            ck.DockerImage(script_path=correct_script_path)

        # Clean up our mess by renaming to the old Dockerfile
        os.rename(new_dockerfile, old_dockerfile)

        # Finally, test the build and push methods
        # ----------------------------------------

        # Make one last DockerImage instance with the simple_unit_testing_func
        di = ck.DockerImage(func=simple_unit_testing_func)

        # Create a repo to which to push this image
        response = ecr.create_repository(repositoryName=get_testing_name())
        repo_name = response["repository"]["repositoryName"]
        repo_uri = response["repository"]["repositoryUri"]

        repo = ck.aws.DockerRepo(name=repo_name)

        # Assert ck.aws.CloudknotInputError on push without args
        with pytest.raises(ck.aws.CloudknotInputError):
            di.push()

        # Assert ck.aws.CloudknotInputError on over-specified input
        with pytest.raises(ck.aws.CloudknotInputError):
            di.push(repo="input doesn't matter here", repo_uri=str(repo_uri))

        # Assert ck.aws.CloudknotInputError on push before build
        with pytest.raises(ck.aws.CloudknotInputError):
            di.push(repo_uri=str(repo_uri))

        # Assert ck.aws.CloudknotInputError on incorrect build args
        with pytest.raises(ck.aws.CloudknotInputError):
            di.build(tags=[42, -42])

        # Assert ck.aws.CloudknotInputError on 'latest' in tags
        with pytest.raises(ck.aws.CloudknotInputError):
            di.build(tags=["testing", "latest"])

        tags = ["testing", ["testing1", "testing2"]]
        image_names = [None, "testing_image"]

        for idx, (tag, n) in enumerate(zip(tags, image_names)):
            di.build(tags=tag, image_name=n)

            n = n if n else "cloudknot/" + di.name
            if isinstance(tag, str):
                tag = [tag]

            images = [{"name": n, "tag": t} for t in tag]
            for im in images:
                assert im in di.images

            if idx % 2:
                di.push(repo_uri=str(repo_uri))
            else:
                di.push(repo=repo)

            assert repo_uri in di.repo_uri

        # Assert ck.aws.CloudknotInputError on push with invalid repo
        with pytest.raises(ck.aws.CloudknotInputError):
            di.push(repo=42)

        # Assert ck.aws.CloudknotInputError on push with invalid repo_uri
        with pytest.raises(ck.aws.CloudknotInputError):
            di.push(repo_uri=42)

        di.clobber()

        # Assert error on build after clobber
        with pytest.raises(ck.aws.ResourceClobberedException):
            di.build(tags=["testing"])

        # Assert ck.aws.CloudknotInputError on push with invalid repo_uri
        with pytest.raises(ck.aws.ResourceClobberedException):
            di.push(repo=repo)
    except Exception as e:
        # Get all local images with unit test prefix in any of the repo tags
        c = docker.from_env().api
        unit_test_images = [
            im
            for im in c.images()
            if any(
                ("unit-testing-func" in tag or "test-func-input" in tag)
                for tag in im["RepoTags"]
            )
        ]

        # Remove local images
        for im in unit_test_images:
            for tag in im["RepoTags"]:
                c.remove_image(tag, force=True)

        # Clean up config file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

            for name in list(config.sections()):
                if name in [
                    "docker-image unit-testing-func",
                    "docker-image test-func-input",
                ]:
                    config.remove_section(name)

            try:
                section_name = "docker-repos" + ck.aws.get_region()
                for option in config.options(section_name):
                    if UNIT_TEST_PREFIX in option:
                        config.remove_option(section_name, option)
            except configparser.NoSectionError:
                pass

            with open(config_file, "w") as f:
                config.write(f)

        raise e
Exemplo n.º 9
0
def test_pars_with_new_vpc(cleanup):
    ck.refresh_clients()
    name = get_testing_name()

    p = ck.Pars(name=name, use_default_vpc=False)

    response = ck.aws.clients["cloudformation"].describe_stacks(
        StackName=name + "-pars"
    )

    stack_id = response.get("Stacks")[0]["StackId"]
    assert stack_id == p.stack_id

    response = ck.aws.clients["iam"].list_roles()

    response = ck.aws.clients["iam"].get_role(RoleName=name + "-batch-service-role")
    bsr_arn = response.get("Role")["Arn"]
    assert bsr_arn == p.batch_service_role

    response = ck.aws.clients["iam"].get_role(RoleName=name + "-ecs-instance-role")
    ecs_arn = response.get("Role")["Arn"]
    assert ecs_arn == p.ecs_instance_role

    response = ck.aws.clients["iam"].get_role(RoleName=name + "-spot-fleet-role")
    sfr_arn = response.get("Role")["Arn"]
    assert sfr_arn == p.spot_fleet_role

    response = ck.aws.clients["iam"].list_instance_profiles_for_role(
        RoleName=name + "-ecs-instance-role"
    )
    ecs_profile_arn = response.get("InstanceProfiles")[0]["Arn"]
    assert ecs_profile_arn == p.ecs_instance_profile

    # Check for a VPC with the tag "Name: name"
    response = ck.aws.clients["ec2"].describe_tags(
        Filters=[
            {"Name": "resource-type", "Values": ["vpc"]},
            {"Name": "key", "Values": ["Name"]},
            {"Name": "value", "Values": [p.name]},
        ]
    )

    vpc_id = response.get("Tags")[0]["ResourceId"]
    assert vpc_id == p.vpc

    response = ck.aws.clients["ec2"].describe_subnets(
        Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]
    )

    subnet_ids = [d["SubnetId"] for d in response.get("Subnets")]
    assert set(subnet_ids) == set(p.subnets)

    response = ck.aws.clients["ec2"].describe_security_groups(
        Filters=[
            {"Name": "vpc-id", "Values": [vpc_id]},
            {"Name": "tag-key", "Values": ["Name"]},
            {"Name": "tag-value", "Values": [p.name]},
        ]
    )

    sg_id = response.get("SecurityGroups")[0]["GroupId"]
    assert sg_id == p.security_group

    # Now, confirm input validation for pre-existing PARS
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Pars(name=name, batch_service_role_name="error-test")

    p = ck.Pars(name=name)

    assert stack_id == p.stack_id
    assert bsr_arn == p.batch_service_role
    assert ecs_arn == p.ecs_instance_role
    assert sfr_arn == p.spot_fleet_role
    assert ecs_profile_arn == p.ecs_instance_profile
    assert vpc_id == p.vpc
    assert set(subnet_ids) == set(p.subnets)
    assert sg_id == p.security_group

    p.clobber()
    assert p.clobbered

    # Clobbering twice shouldn't be a problem
    p.clobber()

    response = ck.aws.clients["cloudformation"].describe_stacks(StackName=stack_id)

    status = response.get("Stacks")[0]["StackStatus"]
    assert status in ["DELETE_IN_PROGRESS", "DELETE_COMPLETE"]

    waiter = ck.aws.clients["cloudformation"].get_waiter("stack_delete_complete")
    waiter.wait(StackName=stack_id, WaiterConfig={"Delay": 10})

    # Confirm that clobber deleted the stack from the config file
    config_file = ck.config.get_config_file()
    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)
        assert p.pars_name not in config.sections()

    name = get_testing_name()
    instance_tenancy = "dedicated"
    cidr = "172.32.0.0/16"
    p = ck.Pars(
        name=name,
        use_default_vpc=False,
        ipv4_cidr=cidr,
        instance_tenancy=instance_tenancy,
    )

    response = ck.aws.clients["ec2"].describe_vpcs(VpcIds=[p.vpc])
    assert instance_tenancy == response.get("Vpcs")[0]["InstanceTenancy"]
    assert cidr == response.get("Vpcs")[0]["CidrBlock"]

    ck.aws.clients["cloudformation"].delete_stack(StackName=p.stack_id)

    # Change the stack-id in the config file to get an error
    config_file = ck.config.get_config_file()
    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)
        stack_id = config.get(p.pars_name, "stack-id")
        stack_id = stack_id.split("/")
        stack_id[1] = get_testing_name()
        stack_id = "/".join(stack_id)
        config.set(p.pars_name, "stack-id", stack_id)
        with open(config_file, "w") as f:
            config.write(f)

    # Confirm error on retrieving the nonexistent stack
    with pytest.raises(ck.aws.ResourceDoesNotExistException) as e:
        ck.Pars(name=name)

    assert e.value.resource_id == stack_id

    # Confirm that the previous error deleted the stack from the config file
    config_file = ck.config.get_config_file()
    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)
        assert p.pars_name not in config.sections()
Exemplo n.º 10
0
def test_pars_with_default_vpc(cleanup):
    ck.refresh_clients()
    name = get_testing_name()

    batch_service_role_name = "ck-unit-test-batch-service-role-1"
    ecs_instance_role_name = "ck-unit-test-ecs-instance-role-1"
    spot_fleet_role_name = "ck-unit-test-spot-fleet-role-1"

    try:
        p = ck.Pars(
            name=name,
            batch_service_role_name=batch_service_role_name,
            ecs_instance_role_name=ecs_instance_role_name,
            spot_fleet_role_name=spot_fleet_role_name,
        )

        response = ck.aws.clients["cloudformation"].describe_stacks(
            StackName=name + "-pars"
        )
        stack_id = response.get("Stacks")[0]["StackId"]
        assert stack_id == p.stack_id

        response = ck.aws.clients["iam"].get_role(RoleName=batch_service_role_name)
        bsr_arn = response.get("Role")["Arn"]
        assert bsr_arn == p.batch_service_role

        response = ck.aws.clients["iam"].get_role(RoleName=ecs_instance_role_name)
        ecs_arn = response.get("Role")["Arn"]
        assert ecs_arn == p.ecs_instance_role

        response = ck.aws.clients["iam"].get_role(RoleName=spot_fleet_role_name)
        sfr_arn = response.get("Role")["Arn"]
        assert sfr_arn == p.spot_fleet_role

        response = ck.aws.clients["iam"].list_instance_profiles_for_role(
            RoleName=ecs_instance_role_name
        )
        ecs_profile_arn = response.get("InstanceProfiles")[0]["Arn"]
        assert ecs_profile_arn == p.ecs_instance_profile

        # Check for a default VPC
        response = ck.aws.clients["ec2"].describe_vpcs(
            Filters=[{"Name": "isDefault", "Values": ["true"]}]
        )

        vpc_id = response.get("Vpcs")[0]["VpcId"]
        assert vpc_id == p.vpc

        response = ck.aws.clients["ec2"].describe_subnets(
            Filters=[{"Name": "vpc-id", "Values": [vpc_id]}]
        )

        subnet_ids = [d["SubnetId"] for d in response.get("Subnets")]
        assert set(subnet_ids) == set(p.subnets)

        response = ck.aws.clients["ec2"].describe_security_groups(
            Filters=[
                {"Name": "vpc-id", "Values": [vpc_id]},
                {"Name": "tag-key", "Values": ["Name"]},
                {"Name": "tag-value", "Values": [p.name]},
            ]
        )

        sg_id = response.get("SecurityGroups")[0]["GroupId"]
        assert sg_id == p.security_group

        # Delete the stack using boto3 to check for an error from Pars
        # on reinstantiation
        ck.aws.clients["cloudformation"].delete_stack(StackName=p.stack_id)

        waiter = ck.aws.clients["cloudformation"].get_waiter("stack_delete_complete")
        waiter.wait(StackName=p.stack_id, WaiterConfig={"Delay": 10})

        # Confirm error on retrieving the deleted stack
        with pytest.raises(ck.aws.ResourceDoesNotExistException) as e:
            ck.Pars(name=name)

        assert e.value.resource_id == p.stack_id

        # Confirm that the previous error deleted
        # the stack from the config file
        config_file = ck.config.get_config_file()
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)
            assert p.pars_name not in config.sections()
    except ck.aws.CannotCreateResourceException:
        # Cannot create a default VPC in this account
        # Ignore test
        pass
Exemplo n.º 11
0
def test_DockerRepo(bucket_cleanup):
    ck.refresh_clients()
    ecr = ck.aws.clients["ecr"]
    config = configparser.ConfigParser()
    config_file = ck.config.get_config_file()
    repo_section_name = "docker-repos " + ck.get_profile(
    ) + " " + ck.get_region()

    try:
        name = get_testing_name()

        # Use boto3 to create an ECR repo
        response = ecr.create_repository(repositoryName=name)

        repo_name = response["repository"]["repositoryName"]
        repo_uri = response["repository"]["repositoryUri"]
        repo_registry_id = response["repository"]["registryId"]

        # Retrieve that same repo with cloudknot
        dr = ck.aws.DockerRepo(name=name)

        assert dr.name == repo_name
        assert dr.repo_uri == repo_uri
        assert dr.repo_registry_id == repo_registry_id

        # Confirm that the docker repo is in the config file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert name in config.options(repo_section_name)

        # Clobber the docker repo
        dr.clobber()

        retry = tenacity.Retrying(
            wait=tenacity.wait_exponential(max=16),
            stop=tenacity.stop_after_delay(180),
            retry=tenacity.retry_unless_exception_type((
                ecr.exceptions.RepositoryNotFoundException,
                botocore.exceptions.ClientError,
            )),
        )

        # Assert that it was removed from AWS
        with pytest.raises((
                ecr.exceptions.RepositoryNotFoundException,
                botocore.exceptions.ClientError,
        )):
            retry.call(ecr.describe_repositories, repositoryNames=[name])

        # Assert that it was removed from the config file
        # If we just re-read the config file, config will keep the union
        # of the in memory values and the file values, updating the
        # intersection of the two with the file values. So we must clear
        # config and then re-read the file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert name not in config.options(repo_section_name)

        # Now create a new repo using only cloudknot
        name = get_testing_name()
        dr = ck.aws.DockerRepo(name=name)

        # Confirm that it exists on AWS and retrieve its properties
        retry = tenacity.Retrying(
            wait=tenacity.wait_exponential(max=16),
            stop=tenacity.stop_after_delay(60),
            retry=tenacity.retry_if_exception_type((
                ecr.exceptions.RepositoryNotFoundException,
                botocore.exceptions.ClientError,
            )),
        )

        response = retry.call(ecr.describe_repositories,
                              repositoryNames=[name])

        repo_name = response["repositories"][0]["repositoryName"]
        repo_uri = response["repositories"][0]["repositoryUri"]
        repo_registry_id = response["repositories"][0]["registryId"]

        assert dr.name == repo_name
        assert dr.repo_uri == repo_uri
        assert dr.repo_registry_id == repo_registry_id

        # Confirm that the docker repo is in the config file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert name in config.options(repo_section_name)

        # Delete the repo from AWS before clobbering
        ecr.delete_repository(registryId=repo_registry_id,
                              repositoryName=repo_name,
                              force=True)

        # Clobber the docker repo
        dr.clobber()

        retry = tenacity.Retrying(
            wait=tenacity.wait_exponential(max=16),
            stop=tenacity.stop_after_delay(180),
            retry=tenacity.retry_unless_exception_type((
                ecr.exceptions.RepositoryNotFoundException,
                botocore.exceptions.ClientError,
            )),
        )

        # Assert that it was removed from AWS
        with pytest.raises((
                ecr.exceptions.RepositoryNotFoundException,
                botocore.exceptions.ClientError,
        )):
            retry.call(ecr.describe_repositories, repositoryNames=[name])

        # Assert that it was removed from the config file
        # If we just re-read the config file, config will keep the union
        # of the in memory values and the file values, updating the
        # intersection of the two with the file values. So we must clear
        # config and then re-read the file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)

        assert name not in config.options(repo_section_name)
    except Exception as e:
        response = ecr.describe_repositories()

        # Get all repos with unit test prefix in the name
        repos = [
            r for r in response.get("repositories")
            if UNIT_TEST_PREFIX in r["repositoryName"]
        ]

        # Delete the AWS ECR repo
        for r in repos:
            ecr.delete_repository(
                registryId=r["registryId"],
                repositoryName=r["repositoryName"],
                force=True,
            )

        # Clean up config file
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)
            try:
                for name in config.options(repo_section_name):
                    if UNIT_TEST_PREFIX in name:
                        config.remove_option(repo_section_name, name)
            except configparser.NoSectionError:
                pass

            with open(config_file, "w") as f:
                config.write(f)

        raise e
Exemplo n.º 12
0
def test_get_region(bucket_cleanup):
    ck.refresh_clients()
    # Save environment variables for restoration later
    try:
        old_region_env = os.environ["AWS_DEFAULT_REGION"]
    except KeyError:
        old_region_env = None

    old_region = ck.get_region()

    try:
        old_config_file = os.environ["CLOUDKNOT_CONFIG_FILE"]
    except KeyError:
        old_config_file = None

    try:
        # With empty config file, get_region should return the
        # environment variable AWS_DEFAULT_REGION
        with tempfile.NamedTemporaryFile(mode="w+") as tmp:
            os.environ["CLOUDKNOT_CONFIG_FILE"] = tmp.name

            region = "test-region-0"
            os.environ["AWS_DEFAULT_REGION"] = region
            assert ck.get_region() == region
            del os.environ["AWS_DEFAULT_REGION"]

        # With region in a temporary config file, region should simply
        # read the config file
        with tempfile.NamedTemporaryFile(mode="w+") as tmp:
            os.environ["CLOUDKNOT_CONFIG_FILE"] = tmp.name

            region = "test-region-1"
            tmp.file.write("[aws]\n")
            tmp.file.write("region = {region:s}\n".format(region=region))
            tmp.file.flush()
            os.fsync(tmp.file.fileno())
            assert ck.get_region() == region

        # With no cloudknot config file and no environment variable
        # get_region should return region in aws config file
        with tempfile.NamedTemporaryFile(mode="w+") as tmp:
            os.environ["CLOUDKNOT_CONFIG_FILE"] = tmp.name

            aws_config_file = op.join(op.expanduser("~"), ".aws", "config")

            try:
                if op.isfile(aws_config_file):
                    if op.isfile(aws_config_file + ".bak"):
                        raise Exception(
                            "Backup aws config file already exists.")
                    shutil.move(aws_config_file, aws_config_file + ".bak")

                assert ck.get_region() == "us-east-1"
            finally:
                if op.isfile(aws_config_file + ".bak"):
                    shutil.move(aws_config_file + ".bak", aws_config_file)

        with tempfile.NamedTemporaryFile(mode="w+") as tmp:
            os.environ["CLOUDKNOT_CONFIG_FILE"] = tmp.name

            aws_config_file = op.join(op.expanduser("~"), ".aws", "config")

            try:
                if op.isfile(aws_config_file):
                    if op.isfile(aws_config_file + ".bak"):
                        raise Exception(
                            "Backup aws config file already exists.")
                    shutil.move(aws_config_file, aws_config_file + ".bak")
                else:
                    # Create the config directory if it doesn't exist
                    aws_config_dir = op.dirname(aws_config_file)
                    try:
                        os.makedirs(aws_config_dir)
                    except OSError as e:
                        pre_existing = e.errno == errno.EEXIST and op.isdir(
                            aws_config_dir)
                        if pre_existing:
                            pass
                        else:
                            raise e

                region = "test-region-2"

                with open(aws_config_file, "w") as f:
                    f.write("[default]\n")
                    f.write("region = {region:s}\n".format(region=region))
                    f.flush()
                    os.fsync(f.fileno())

                assert ck.get_region() == region
            finally:
                if op.isfile(aws_config_file + ".bak"):
                    shutil.move(aws_config_file + ".bak", aws_config_file)
                elif op.isfile(aws_config_file):
                    os.remove(aws_config_file)
    finally:
        ck.set_region(old_region)

        # Restore old environment variables
        if old_config_file:
            os.environ["CLOUDKNOT_CONFIG_FILE"] = old_config_file
        else:
            try:
                del os.environ["CLOUDKNOT_CONFIG_FILE"]
            except KeyError:
                pass

        if old_region_env:
            os.environ["AWS_DEFAULT_REGION"] = old_region_env
        else:
            try:
                del os.environ["AWS_DEFAULT_REGION"]
            except KeyError:
                pass

        ck.refresh_clients()
Exemplo n.º 13
0
def test_get_profile(bucket_cleanup):
    try:
        old_credentials_file = os.environ['AWS_SHARED_CREDENTIALS_FILE']
    except KeyError:
        old_credentials_file = None

    try:
        old_aws_config_file = os.environ['AWS_CONFIG_FILE']
    except KeyError:
        old_aws_config_file = None

    try:
        old_ck_config_file = os.environ['CLOUDKNOT_CONFIG_FILE']
    except KeyError:
        old_ck_config_file = None

    ref_dir = op.join(data_path, 'profiles_ref_data')
    ck_config_with_profile = op.join(ref_dir, 'cloudknot_with_profile')
    ck_config_without_profile = op.join(ref_dir, 'cloudknot_without_profile')

    shutil.copy(ck_config_with_profile, ck_config_with_profile + '.bak')
    shutil.copy(ck_config_without_profile, ck_config_without_profile + '.bak')
    try:
        os.environ['CLOUDKNOT_CONFIG_FILE'] = ck_config_with_profile

        assert ck.get_profile() == 'profile_from_cloudknot_config'

        os.environ['CLOUDKNOT_CONFIG_FILE'] = ck_config_without_profile

        config_file = op.join(ref_dir, 'config')
        os.environ['AWS_CONFIG_FILE'] = config_file

        cred_file = op.join(ref_dir, 'credentials_without_default')
        os.environ['AWS_SHARED_CREDENTIALS_FILE'] = cred_file

        assert ck.get_profile(fallback=None) is None
        assert ck.get_profile() == 'from-env'

        cred_file = op.join(ref_dir, 'credentials_with_default')
        os.environ['AWS_SHARED_CREDENTIALS_FILE'] = cred_file

        assert ck.get_profile() == 'default'
    finally:
        shutil.move(ck_config_with_profile + '.bak', ck_config_with_profile)
        shutil.move(ck_config_without_profile + '.bak',
                    ck_config_without_profile)

        if old_credentials_file:
            os.environ['AWS_SHARED_CREDENTIALS_FILE'] = old_credentials_file
        else:
            try:
                del os.environ['AWS_SHARED_CREDENTIALS_FILE']
            except KeyError:
                pass

        if old_aws_config_file:
            os.environ['AWS_CONFIG_FILE'] = old_aws_config_file
        else:
            try:
                del os.environ['AWS_CONFIG_FILE']
            except KeyError:
                pass

        if old_ck_config_file:
            os.environ['CLOUDKNOT_CONFIG_FILE'] = old_ck_config_file
        else:
            try:
                del os.environ['CLOUDKNOT_CONFIG_FILE']
            except KeyError:
                pass

        ck.refresh_clients()
Exemplo n.º 14
0
def test_get_region(bucket_cleanup):
    # Save environment variables for restoration later
    try:
        old_region_env = os.environ['AWS_DEFAULT_REGION']
    except KeyError:
        old_region_env = None

    old_region = ck.get_region()

    try:
        old_config_file = os.environ['CLOUDKNOT_CONFIG_FILE']
    except KeyError:
        old_config_file = None

    try:
        # With empty config file, get_region should return the
        # environment variable AWS_DEFAULT_REGION
        with tempfile.NamedTemporaryFile(mode='w+') as tmp:
            os.environ['CLOUDKNOT_CONFIG_FILE'] = tmp.name

            region = 'test-region-0'
            os.environ['AWS_DEFAULT_REGION'] = region
            assert ck.get_region() == region
            del os.environ['AWS_DEFAULT_REGION']

        # With region in a temporary config file, region should simply
        # read the config file
        with tempfile.NamedTemporaryFile(mode='w+') as tmp:
            os.environ['CLOUDKNOT_CONFIG_FILE'] = tmp.name

            region = 'test-region-1'
            tmp.file.write('[aws]\n')
            tmp.file.write('region = {region:s}\n'.format(region=region))
            tmp.file.flush()
            os.fsync(tmp.file.fileno())
            assert ck.get_region() == region

        # With no cloudknot config file and no environment variable
        # get_region should return region in aws config file
        with tempfile.NamedTemporaryFile(mode='w+') as tmp:
            os.environ['CLOUDKNOT_CONFIG_FILE'] = tmp.name

            aws_config_file = op.join(op.expanduser('~'), '.aws', 'config')

            try:
                if op.isfile(aws_config_file):
                    if op.isfile(aws_config_file + '.bak'):
                        raise Exception(
                            'Backup aws config file already exists.')
                    shutil.move(aws_config_file, aws_config_file + '.bak')

                assert ck.get_region() == 'us-east-1'
            finally:
                if op.isfile(aws_config_file + '.bak'):
                    shutil.move(aws_config_file + '.bak', aws_config_file)

        with tempfile.NamedTemporaryFile(mode='w+') as tmp:
            os.environ['CLOUDKNOT_CONFIG_FILE'] = tmp.name

            aws_config_file = op.join(op.expanduser('~'), '.aws', 'config')

            try:
                if op.isfile(aws_config_file):
                    if op.isfile(aws_config_file + '.bak'):
                        raise Exception(
                            'Backup aws config file already exists.')
                    shutil.move(aws_config_file, aws_config_file + '.bak')
                else:
                    # Create the config directory if it doesn't exist
                    aws_config_dir = op.dirname(aws_config_file)
                    try:
                        os.makedirs(aws_config_dir)
                    except OSError as e:
                        pre_existing = (e.errno == errno.EEXIST
                                        and op.isdir(aws_config_dir))
                        if pre_existing:
                            pass
                        else:
                            raise e

                region = 'test-region-2'

                with open(aws_config_file, 'w') as f:
                    f.write('[default]\n')
                    f.write('region = {region:s}\n'.format(region=region))
                    f.flush()
                    os.fsync(f.fileno())

                assert ck.get_region() == region
            finally:
                if op.isfile(aws_config_file + '.bak'):
                    shutil.move(aws_config_file + '.bak', aws_config_file)
                elif op.isfile(aws_config_file):
                    os.remove(aws_config_file)
    finally:
        ck.set_region(old_region)

        # Restore old environment variables
        if old_config_file:
            os.environ['CLOUDKNOT_CONFIG_FILE'] = old_config_file
        else:
            try:
                del os.environ['CLOUDKNOT_CONFIG_FILE']
            except KeyError:
                pass

        if old_region_env:
            os.environ['AWS_DEFAULT_REGION'] = old_region_env
        else:
            try:
                del os.environ['AWS_DEFAULT_REGION']
            except KeyError:
                pass

        ck.refresh_clients()
Exemplo n.º 15
0
def test_knot(cleanup_repos):
    config_file = ck.config.get_config_file()
    knot = None

    ck.refresh_clients()

    try:
        ec2 = ck.aws.clients["ec2"]
        instance = ec2.run_instances(MaxCount=1, MinCount=1)["Instances"][0]
        ec2.create_image(
            BlockDeviceMappings=[{
                "DeviceName": "/dev/xvda",
                "Ebs": {
                    "DeleteOnTermination": True,
                    "VolumeSize": 30,
                    "VolumeType": "gp2",
                    "Encrypted": False,
                },
            }],
            Description="amazon linux ami 2 x86_64 ecs hvm gp2",
            Name="unit-test-ecs-optimized-ami",
            InstanceId=instance["InstanceId"],
        )

        pars = ck.Pars(name=get_testing_name(), use_default_vpc=False)
        name = get_testing_name()

        knot = ck.Knot(name=name, pars=pars, func=unit_testing_func)

        # Now remove the images and repo-uri from the docker-image
        # Forcing the next call to Knot to rebuild and re-push the image.
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)
            config.set("docker-image " + knot.docker_image.name, "images", "")
            config.set("docker-image " + knot.docker_image.name, "repo-uri",
                       "")
            with open(config_file, "w") as f:
                config.write(f)

        # Re-instantiate the knot so that it retrieves from config
        # with AWS resources that already exist
        knot = ck.Knot(name=name)
        knot.docker_image._clobber_script = True

        # Assert properties are as expected
        assert knot.name == name
        assert knot.knot_name == "knot " + name
        assert knot.pars.name == pars.name
        func_name = unit_testing_func.__name__.replace("_", "-")
        assert knot.docker_image.name == func_name
        assert knot.docker_repo.name == "cloudknot"
        pre = name + "-ck-"
        assert knot.job_definition.name == pre + "jd"

        # Delete the stack using boto3 to check for an error from Pars
        # on reinstantiation
        ck.aws.clients["cloudformation"].delete_stack(StackName=knot.stack_id)

        waiter = ck.aws.clients["cloudformation"].get_waiter(
            "stack_delete_complete")
        waiter.wait(StackName=knot.stack_id, WaiterConfig={"Delay": 10})

        # Confirm error on retrieving the deleted stack
        with pytest.raises(ck.aws.ResourceDoesNotExistException) as e:
            ck.Knot(name=name)

        assert e.value.resource_id == knot.stack_id

        # Confirm that the previous error deleted
        # the stack from the config file
        config_file = ck.config.get_config_file()
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)
            assert knot.knot_name not in config.sections()

        name = get_testing_name()
        knot = ck.Knot(name=name, func=unit_testing_func)
        knot.docker_image._clobber_script = True
        knot.clobber(clobber_pars=True, clobber_image=True, clobber_repo=True)
        assert knot.clobbered

        # Clobbering twice shouldn't be a problem
        knot.clobber()

        response = ck.aws.clients["cloudformation"].describe_stacks(
            StackName=knot.stack_id)

        status = response.get("Stacks")[0]["StackStatus"]
        assert status in ["DELETE_IN_PROGRESS", "DELETE_COMPLETE"]

        waiter = ck.aws.clients["cloudformation"].get_waiter(
            "stack_delete_complete")
        waiter.wait(StackName=knot.stack_id, WaiterConfig={"Delay": 10})

        # Confirm that clobber deleted the stack from the config file
        config_file = ck.config.get_config_file()
        config = configparser.ConfigParser()
        with ck.config.rlock:
            config.read(config_file)
            assert knot.knot_name not in config.sections()

    except Exception as e:
        try:
            if knot:
                knot.clobber(clobber_pars=True,
                             clobber_image=True,
                             clobber_repo=True)
        except Exception:
            pass

        raise e
Exemplo n.º 16
0
def test_knot_errors(cleanup_repos):
    ck.refresh_clients()

    # Test Exceptions on invalid input
    # --------------------------------
    # Assert ck.aws.CloudknotInputError on invalid name
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(name=42)

    # Assert ck.aws.CloudknotInputError on long name
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(name="a" * 56)

    # Assert ck.aws.CloudknotInputError on invalid pars input
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(func=unit_testing_func, pars=42)

    # Assert ck.aws.CloudknotInputError on redundant docker_image and func
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(func=unit_testing_func, docker_image=42)

    # Assert ck.aws.CloudknotInputError on invalid docker_image input
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(docker_image=42)

    # Assert ck.aws.CloudknotInputError on invalid retries
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(retries=0)

    # Assert ck.aws.CloudknotInputError on invalid retries
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(retries=11)

    # Assert ck.aws.CloudknotInputError on invalid memory
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(memory=0)

    # Assert ck.aws.CloudknotInputError on invalid job_def_vcpus
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(job_def_vcpus=-42)

    # Assert ck.aws.CloudknotInputError on invalid priority
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(priority=-42)

    # Assert ck.aws.CloudknotInputError on invalid min_vcpus
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(min_vcpus=-1)

    # Assert ck.aws.CloudknotInputError on invalid desired_vcpus
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(desired_vcpus=-1)

    # Assert ck.aws.CloudknotInputError on invalid max_vcpus
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(max_vcpus=-1)

    # Assert ck.aws.CloudknotInputError on invalid instance_types
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(instance_types=[42])

    # Assert ck.aws.CloudknotInputError on invalid instance_types
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(instance_types="not a valid instance")

    # Assert ck.aws.CloudknotInputError on invalid instance_types
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(instance_types=["not", "a", "valid", "instance"])

    # Assert ck.aws.CloudknotInputError on invalid image_id
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(image_id=42)

    # Assert ck.aws.CloudknotInputError on invalid ec2_key_pair
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(ec2_key_pair=42)

    # Assert ck.aws.CloudknotInputError on invalid volume_size
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(volume_size="string")

    # Assert ck.aws.CloudknotInputError on volume_size < 1
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(volume_size=0)

    # Assert ck.aws.CloudknotInputError when providing both image_id and volume_size
    with pytest.raises(ck.aws.CloudknotInputError):
        ck.Knot(image_id="test-string", volume_size=30)
Exemplo n.º 17
0
def test_get_profile(bucket_cleanup):
    try:
        old_credentials_file = os.environ["AWS_SHARED_CREDENTIALS_FILE"]
    except KeyError:
        old_credentials_file = None

    try:
        old_aws_config_file = os.environ["AWS_CONFIG_FILE"]
    except KeyError:
        old_aws_config_file = None

    try:
        old_ck_config_file = os.environ["CLOUDKNOT_CONFIG_FILE"]
    except KeyError:
        old_ck_config_file = None

    ref_dir = op.join(data_path, "profiles_ref_data")
    ck_config_with_profile = op.join(ref_dir, "cloudknot_with_profile")
    ck_config_without_profile = op.join(ref_dir, "cloudknot_without_profile")

    shutil.copy(ck_config_with_profile, ck_config_with_profile + ".bak")
    shutil.copy(ck_config_without_profile, ck_config_without_profile + ".bak")
    try:
        os.environ["CLOUDKNOT_CONFIG_FILE"] = ck_config_with_profile

        assert ck.get_profile() == "profile_from_cloudknot_config"

        os.environ["CLOUDKNOT_CONFIG_FILE"] = ck_config_without_profile

        config_file = op.join(ref_dir, "config")
        os.environ["AWS_CONFIG_FILE"] = config_file

        cred_file = op.join(ref_dir, "credentials_without_default")
        os.environ["AWS_SHARED_CREDENTIALS_FILE"] = cred_file

        assert ck.get_profile(fallback=None) is None
        assert ck.get_profile() == "from-env"

        cred_file = op.join(ref_dir, "credentials_with_default")
        os.environ["AWS_SHARED_CREDENTIALS_FILE"] = cred_file

        assert ck.get_profile() == "default"
    finally:
        shutil.move(ck_config_with_profile + ".bak", ck_config_with_profile)
        shutil.move(ck_config_without_profile + ".bak",
                    ck_config_without_profile)

        if old_credentials_file:
            os.environ["AWS_SHARED_CREDENTIALS_FILE"] = old_credentials_file
        else:
            try:
                del os.environ["AWS_SHARED_CREDENTIALS_FILE"]
            except KeyError:
                pass

        if old_aws_config_file:
            os.environ["AWS_CONFIG_FILE"] = old_aws_config_file
        else:
            try:
                del os.environ["AWS_CONFIG_FILE"]
            except KeyError:
                pass

        if old_ck_config_file:
            os.environ["CLOUDKNOT_CONFIG_FILE"] = old_ck_config_file
        else:
            try:
                del os.environ["CLOUDKNOT_CONFIG_FILE"]
            except KeyError:
                pass

        ck.refresh_clients()