Пример #1
0
def test_get_cloud_provider_invalid(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            """
# needs to be called `cloud/openstack` because
# that's what the example cluster below requires
[cloud/openstack]
provider = invalid
auth_url = http://openstack.example.com:5000/v2.0
username = ${USER}
password = XXXXXX
project_name = test
    """
            + make_config_snippet("cluster", "example_openstack")
            + make_config_snippet("login", "ubuntu", keyname='test', valid_path=ssh_key_path)
            + make_config_snippet("setup", "slurm_setup")
        )
    conf = load_config_files(config_path)
    assert 'invalid' not in conf['cloud']
Пример #2
0
def test_get_cloud_provider_openstack(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            """
[cloud/openstack]
provider = openstack
auth_url = http://openstack.example.com:5000/v2.0
username = ${USER}
password = XXXXXX
project_name = test
    """
            + make_config_snippet("cluster", "example_openstack")
            + make_config_snippet("login", "ubuntu", keyname='test', valid_path=ssh_key_path)
            + make_config_snippet("setup", "slurm_setup_old")
        )
    creator = make_creator(config_path)
    cloud = creator.create_cloud_provider('example_openstack')
    from elasticluster.providers.openstack import OpenStackCloudProvider
    assert isinstance(cloud, OpenStackCloudProvider)
Пример #3
0
def test_invalid_ssh_to(tmpdir):
    """
    Drop cluster definition with an invalid `ssh_to=` line.
    """
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            make_config_snippet("cluster", "example_openstack",
                                'ssh_to=non-existent') +
            make_config_snippet("cloud", "openstack") +
            make_config_snippet("login",
                                "ubuntu",
                                keyname='test_invalid_ssh_to',
                                valid_path=ssh_key_path) +
            make_config_snippet("setup", "slurm_setup_old"))
        config_file.flush()
    creator = make_creator(config_path)
    # ERROR: Cluster `example_openstack` is configured to SSH into nodes of kind `non-existent`, but no such kind is defined
    with raises(ConfigurationError):
        creator.create_cluster('slurm')
Пример #4
0
def test_get_cloud_provider_invalid(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            """
# needs to be called `cloud/openstack` because
# that's what the example cluster below requires
[cloud/openstack]
provider = invalid
auth_url = http://openstack.example.com:5000/v2.0
username = ${USER}
password = XXXXXX
project_name = test
    """
            + make_config_snippet("cluster", "example_openstack")
            + make_config_snippet("login", "ubuntu", keyname='test', valid_path=ssh_key_path)
            + make_config_snippet("setup", "slurm_setup")
        )
    conf = load_config_files(config_path)
    assert 'invalid' not in conf['cloud']
Пример #5
0
def test_get_cloud_provider_openstack(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            """
[cloud/openstack]
provider = openstack
auth_url = http://openstack.example.com:5000/v2.0
username = ${USER}
password = XXXXXX
project_name = test
    """
            + make_config_snippet("cluster", "example_openstack")
            + make_config_snippet("login", "ubuntu", keyname='test', valid_path=ssh_key_path)
            + make_config_snippet("setup", "slurm_setup_old")
        )
    creator = make_creator(config_path)
    cloud = creator.create_cloud_provider('example_openstack')
    from elasticluster.providers.openstack import OpenStackCloudProvider
    assert isinstance(cloud, OpenStackCloudProvider)
Пример #6
0
def test_pr_378(tmpdir):
    wd = tmpdir.strpath
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            make_config_snippet("cloud", "google")
            # reported by @ikhaja in PR #378
            + """
[login/google]
image_user=my_username
image_user_sudo=root
image_sudo=True
user_key_name=elasticluster
user_key_private=~/.ssh/google_compute_engine
user_key_public=~/.ssh/google_compute_engine.pub
            """
            # FIXME: the std `cluster/*` snippet cannot set `login=` and `cloud=`
            + """
[cluster/slurm]
cloud=google
login=google
setup=slurm_setup
security_group=default
image_id=https://www.googleapis.com/compute/v1/projects/jda-labs---decision-science-01/global/images/image-python-ubuntu
flavor=n1-standard-1
master_nodes=1
worker_nodes=4
ssh_to=master
    """
            + make_config_snippet("setup", "slurm_setup")
        )
        config_file.flush()
    with patch('os.path.expanduser') as expanduser:
        # since `os.path.expanduser` is called from within
        # `_expand_config_file_list()` we need to provide the right return
        # value for it, as non-existent files will be removed from the list
        expanduser.return_value = config_path
        creator = make_creator(config_path)
        # check that `os.expanduser` has been called on the `user_key_*` values
        expanduser.assert_any_call('~/.ssh/google_compute_engine.pub')
        expanduser.assert_any_call('~/.ssh/google_compute_engine')
        # check that actual configured values have been expanded
        cluster = creator.create_cluster("slurm")
        assert os.path.isabs(cluster.user_key_public)
        assert os.path.isabs(cluster.user_key_private)
Пример #7
0
def test_pr_378(tmpdir):
    wd = tmpdir.strpath
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            make_config_snippet("cloud", "google")
            # reported by @ikhaja in PR #378
            + """
[login/google]
image_user=my_username
image_user_sudo=root
image_sudo=True
user_key_name=elasticluster
user_key_private=~/.ssh/google_compute_engine
user_key_public=~/.ssh/google_compute_engine.pub
            """
            # FIXME: the std `cluster/*` snippet cannot set `login=` and `cloud=`
            + """
[cluster/slurm]
cloud=google
login=google
setup=slurm_setup
security_group=default
image_id=https://www.googleapis.com/compute/v1/projects/jda-labs---decision-science-01/global/images/image-python-ubuntu
flavor=n1-standard-1
master_nodes=1
worker_nodes=4
ssh_to=master
    """
            + make_config_snippet("setup", "slurm_setup")
        )
        config_file.flush()
    with patch('os.path.expanduser') as expanduser:
        # since `os.path.expanduser` is called from within
        # `_expand_config_file_list()` we need to provide the right return
        # value for it, as non-existent files will be removed from the list
        expanduser.return_value = config_path
        creator = make_creator(config_path)
        # check that `os.expanduser` has been called on the `user_key_*` values
        expanduser.assert_any_call('~/.ssh/google_compute_engine.pub')
        expanduser.assert_any_call('~/.ssh/google_compute_engine')
        # check that actual configured values have been expanded
        cluster = creator.create_cluster("slurm")
        assert os.path.isabs(cluster.user_key_public)
        assert os.path.isabs(cluster.user_key_private)
Пример #8
0
def test_issue_376(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            # reported by @marcbrisson in issue #376
            """
[cluster/slurm]
cloud=google
login=ubuntu
setup=slurm_setup
security_group=default
image_id=https://www.googleapis.com/compute/v1/projects/jda-labs---decision-science-01/global/images/image-python-ubuntu
flavor=n1-standard-1
master_nodes=1
worker_nodes=4
ssh_to=master
image_userdata=
boot_disk_size=20

[cluster/slurm/master]
flavor=n1-standard-2
boot_disk_size=100
    """ + make_config_snippet("cloud", "google") +
            make_config_snippet("login",
                                "ubuntu",
                                keyname='test_issue_376',
                                valid_path=ssh_key_path) +
            make_config_snippet("setup", "slurm_setup"))
        config_file.flush()
    creator = make_creator(config_path)
    cluster = creator.create_cluster('slurm')
    # "master" nodes take values from their specific config section
    assert cluster.nodes['master'][0].flavor == 'n1-standard-2'
    assert cluster.nodes['master'][0].extra['boot_disk_size'] == '100'
    # "worker" nodes take values from the cluster defaults
    assert cluster.nodes['worker'][0].flavor == 'n1-standard-1'
    # FIXME: Actually, does this imply that the `boot_disk_size` value
    # defined at cluster level is not propagated to "worker" nodes?
    assert 'boot_disk_size' not in cluster.nodes['worker'][0].extra
Пример #9
0
def test_issue_376(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            # reported by @marcbrisson in issue #376
            """
[cluster/slurm]
cloud=google
login=ubuntu
setup=slurm_setup
security_group=default
image_id=https://www.googleapis.com/compute/v1/projects/jda-labs---decision-science-01/global/images/image-python-ubuntu
flavor=n1-standard-1
master_nodes=1
worker_nodes=4
ssh_to=master
image_userdata=
boot_disk_size=20

[cluster/slurm/master]
flavor=n1-standard-2
boot_disk_size=100
    """
            + make_config_snippet("cloud", "google")
            + make_config_snippet("login", "ubuntu", keyname='test_issue_376', valid_path=ssh_key_path)
            + make_config_snippet("setup", "slurm_setup")
        )
        config_file.flush()
    creator = make_creator(config_path)
    cluster = creator.create_cluster('slurm')
    # "master" nodes take values from their specific config section
    assert cluster.nodes['master'][0].flavor == 'n1-standard-2'
    assert cluster.nodes['master'][0].extra['boot_disk_size'] == '100'
    # "worker" nodes take values from the cluster defaults
    assert cluster.nodes['worker'][0].flavor == 'n1-standard-1'
    # FIXME: Actually, does this imply that the `boot_disk_size` value
    # defined at cluster level is not propagated to "worker" nodes?
    assert 'boot_disk_size' not in cluster.nodes['worker'][0].extra
Пример #10
0
def test_gce_accelerator1(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            make_config_snippet("cluster", "example_google",
                                '[cluster/example_google/misc]',
                                'accelerator_count=1')
            #             # ask for one GPU
            #             """
            # [cluster/slurm]
            # cloud=google
            # login=ubuntu
            # setup=slurm_setup
            # security_group=default
            # image_id=**not important**
            # flavor=n1-standard-1
            # master_nodes=1
            # worker_nodes=4
            # ssh_to=master

            # [cluster/slurm/worker]
            # accelerator_count=1
            #     """
            + make_config_snippet("cloud", "google") +
            make_config_snippet("login",
                                "ubuntu",
                                keyname='test_gce_accelerator',
                                valid_path=ssh_key_path) +
            make_config_snippet("setup", "misc_setup"))
        config_file.flush()
    creator = make_creator(config_path)
    cluster = creator.create_cluster('example_google')
    # "master" nodes take values from their specific config section
    #assert cluster.nodes['master'][0].extra['accelerator_count'] == 0
    # "worker" nodes take values from the cluster defaults
    assert 'accelerator_count' in cluster.nodes['misc'][0].extra
    assert cluster.nodes['misc'][0].extra['accelerator_count'] == 1
Пример #11
0
def test_gce_accelerator1(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            make_config_snippet("cluster", "example_google",
                                '[cluster/example_google/misc]',
                                'accelerator_count=1')
#             # ask for one GPU
#             """
# [cluster/slurm]
# cloud=google
# login=ubuntu
# setup=slurm_setup
# security_group=default
# image_id=**not important**
# flavor=n1-standard-1
# master_nodes=1
# worker_nodes=4
# ssh_to=master

# [cluster/slurm/worker]
# accelerator_count=1
#     """
            + make_config_snippet("cloud", "google")
            + make_config_snippet("login", "ubuntu", keyname='test_gce_accelerator', valid_path=ssh_key_path)
            + make_config_snippet("setup", "misc_setup")
        )
        config_file.flush()
    creator = make_creator(config_path)
    cluster = creator.create_cluster('example_google')
    # "master" nodes take values from their specific config section
    #assert cluster.nodes['master'][0].extra['accelerator_count'] == 0
    # "worker" nodes take values from the cluster defaults
    assert 'accelerator_count' in cluster.nodes['misc'][0].extra
    assert cluster.nodes['misc'][0].extra['accelerator_count'] == 1
Пример #12
0
def test_gce_accelerator2(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            # ask for two GPU on `worker` nodes only
            """
[cluster/test]
cloud=google
login=ubuntu
setup=slurm_setup
security_group=default
image_id=**not important**
flavor=n1-standard-1
master_nodes=1
worker_nodes=4
ssh_to=master

[cluster/test/worker]
accelerator_count=2
    """ + make_config_snippet("cloud", "google") +
            make_config_snippet("login",
                                "ubuntu",
                                keyname='test_gce_accelerator',
                                valid_path=ssh_key_path) +
            make_config_snippet("setup", "slurm_setup"))
        config_file.flush()
    creator = make_creator(config_path)
    cluster = creator.create_cluster('test')
    # "master" nodes take values from their specific config section
    assert cluster.nodes['master'][0].extra['accelerator_count'] == 0
    # "worker" nodes take values from the cluster defaults
    assert 'accelerator_count' in cluster.nodes['worker'][0].extra
    assert cluster.nodes['worker'][0].extra['accelerator_count'] == 2
Пример #13
0
def test_issue_415(tmpdir):
    """
    Drop cluster definition if not all node kinds are present in the `setup/*` section.
    """
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            # reported by @dirkpetersen in issue #415
            """
[cluster/gce-slurm]
cloud=google
#login=ubuntu
login=google
setup=slurm_setup_old
security_group=default
image_id=ubuntu-1604-xenial-v20170307
flavor=n1-standard-1
frontend_nodes=1
worker_nodes=2
image_userdata=
ssh_to=frontend
            """ + make_config_snippet("cloud", "google") +
            make_config_snippet("login",
                                "ubuntu",
                                keyname='test_issue_415',
                                valid_path=ssh_key_path) +
            make_config_snippet("setup", "slurm_setup_old"))
        config_file.flush()
    creator = make_creator(config_path)
    # ERROR: Configuration section `cluster/gce-slurm` references non-existing login section `google`. Dropping cluster definition.
    with raises(ConfigurationError):
        creator.create_cluster('gce-slurm')
Пример #14
0
def test_gce_accelerator2(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            # ask for two GPU on `worker` nodes only
            """
[cluster/test]
cloud=google
login=ubuntu
setup=slurm_setup
security_group=default
image_id=**not important**
flavor=n1-standard-1
master_nodes=1
worker_nodes=4
ssh_to=master

[cluster/test/worker]
accelerator_count=2
    """
            + make_config_snippet("cloud", "google")
            + make_config_snippet("login", "ubuntu", keyname='test_gce_accelerator', valid_path=ssh_key_path)
            + make_config_snippet("setup", "slurm_setup")
        )
        config_file.flush()
    creator = make_creator(config_path)
    cluster = creator.create_cluster('test')
    # "master" nodes take values from their specific config section
    assert cluster.nodes['master'][0].extra['accelerator_count'] == 0
    # "worker" nodes take values from the cluster defaults
    assert 'accelerator_count' in cluster.nodes['worker'][0].extra
    assert cluster.nodes['worker'][0].extra['accelerator_count'] == 2
Пример #15
0
def test_issue_415(tmpdir):
    """
    Drop cluster definition if not all node kinds are present in the `setup/*` section.
    """
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            # reported by @dirkpetersen in issue #415
            """
[cluster/gce-slurm]
cloud=google
#login=ubuntu
login=google
setup=slurm_setup_old
security_group=default
image_id=ubuntu-1604-xenial-v20170307
flavor=n1-standard-1
frontend_nodes=1
worker_nodes=2
image_userdata=
ssh_to=frontend
            """
            + make_config_snippet("cloud", "google")
            + make_config_snippet("login", "ubuntu", keyname='test_issue_415', valid_path=ssh_key_path)
            + make_config_snippet("setup", "slurm_setup_old")
        )
        config_file.flush()
    creator = make_creator(config_path)
    # ERROR: Configuration section `cluster/gce-slurm` references non-existing login section `google`. Dropping cluster definition.
    with raises(ConfigurationError):
        creator.create_cluster('gce-slurm')
Пример #16
0
def test_default_setup_provider_is_ansible(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            make_config_snippet("cloud", "openstack")
            + make_config_snippet("cluster", "example_openstack", 'setup=setup_no_ansible')
            + make_config_snippet("login", "ubuntu", keyname='test', valid_path=ssh_key_path)
            # *note:* no `provider=` line here
            + """
[setup/setup_no_ansible]
frontend_groups = slurm_master
compute_groups = slurm_worker
    """
        )
    creator = make_creator(config_path)
    setup = creator.create_setup_provider('example_openstack')
    from elasticluster.providers.ansible_provider import AnsibleSetupProvider
    assert isinstance(setup, AnsibleSetupProvider)
Пример #17
0
def test_invalid_ssh_to(tmpdir):
    """
    Drop cluster definition with an invalid `ssh_to=` line.
    """
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            make_config_snippet("cluster", "example_openstack", 'ssh_to=non-existent')
            + make_config_snippet("cloud", "openstack")
            + make_config_snippet("login", "ubuntu",
                                  keyname='test_invalid_ssh_to', valid_path=ssh_key_path)
            + make_config_snippet("setup", "slurm_setup_old")
        )
        config_file.flush()
    creator = make_creator(config_path)
    # ERROR: Cluster `example_openstack` is configured to SSH into nodes of kind `non-existent`, but no such kind is defined
    with raises(ConfigurationError):
        creator.create_cluster('slurm')
Пример #18
0
def test_default_setup_provider_is_ansible(tmpdir):
    wd = tmpdir.strpath
    ssh_key_path = os.path.join(wd, 'id_rsa.pem')
    with open(ssh_key_path, 'w+') as ssh_key_file:
        # don't really care about SSH key, just that the file exists
        ssh_key_file.write('')
        ssh_key_file.flush()
    config_path = os.path.join(wd, 'config.ini')
    with open(config_path, 'w+') as config_file:
        config_file.write(
            make_config_snippet("cloud", "openstack")
            + make_config_snippet("cluster", "example_openstack", 'setup=setup_no_ansible')
            + make_config_snippet("login", "ubuntu", keyname='test', valid_path=ssh_key_path)
            # *note:* no `provider=` line here
            + """
[setup/setup_no_ansible]
frontend_groups = slurm_master
compute_groups = slurm_worker
    """
        )
    creator = make_creator(config_path)
    setup = creator.create_setup_provider('example_openstack')
    from elasticluster.providers.ansible_provider import AnsibleSetupProvider
    assert isinstance(setup, AnsibleSetupProvider)