예제 #1
0
파일: custom.py 프로젝트: yolocs/azure-cli
def create_cluster(cmd, client,  # pylint: disable=too-many-locals
                   resource_group, cluster_name, json_file=None, location=None, user_name=None,
                   ssh_key=None, password=None, image='UbuntuLTS', vm_size=None, min_nodes=0, max_nodes=None,
                   nfs_name=None, nfs_resource_group=None, nfs_mount_path='nfs', azure_file_share=None,
                   afs_mount_path='afs', container_name=None, container_mount_path='bfs', account_name=None,
                   account_key=None, raw=False):
    if json_file:
        with open(json_file) as f:
            json_obj = json.load(f)
            params = _get_deserializer()('ClusterCreateParameters', json_obj)
    else:
        params = models.ClusterCreateParameters(None, None, None)
    params = _update_cluster_create_parameters_with_env_variables(cmd.cli_ctx, params, account_name, account_key)
    params = _update_user_account_settings(params, user_name, ssh_key, password)
    if location:
        params.location = location
    if not params.location:
        raise CLIError('Please provide location for cluster creation.')
    params = _update_nodes_information(params, image, vm_size, min_nodes, max_nodes)
    if nfs_name:
        file_server = client.file_servers.get(nfs_resource_group if nfs_resource_group else resource_group, nfs_name)
        params = _add_nfs_to_cluster_create_parameters(params, file_server.id, nfs_mount_path)
    if azure_file_share:
        params = _add_azure_file_share_to_cluster_create_parameters(cmd.cli_ctx, params, azure_file_share,
                                                                    afs_mount_path, account_name, account_key)
    if container_name:
        params = _add_azure_container_to_cluster_create_parameters(cmd.cli_ctx, params, container_name,
                                                                   container_mount_path, account_name, account_key)
    return client.clusters.create(resource_group, cluster_name, params, raw=raw)
예제 #2
0
def prepare_batch_ai_workspace(client, service, config):
    # Create Batch AI workspace
    client.workspaces.create(config.workspace_resource_group,
                             config.workspace,
                             config.location)

    # Create GPU cluster
    parameters = models.ClusterCreateParameters(
        # VM size. Use N-series for GPU
        vm_size=config.workspace_vm_size,
        # Configure the ssh users
        user_account_settings=models.UserAccountSettings(
            admin_user_name=config.admin,
            admin_user_password=config.admin_password),
        # Number of VMs in the cluster
        scale_settings=models.ScaleSettings(
            manual=models.ManualScaleSettings(target_node_count=config.workspace_node_count)
        ),
        # Configure each node in the cluster
        node_setup=models.NodeSetup(
            # Mount shared volumes to the host
            mount_volumes=models.MountVolumes(
                azure_file_shares=[
                    models.AzureFileShareReference(
                        account_name=config.storage_account_name,
                        credentials=models.AzureStorageCredentialsInfo(
                            account_key=config.storage_account_key),
                        azure_file_url='https://{0}/{1}'.format(
                            service.primary_endpoint, config.workspace_file_share),
                        relative_mount_path=config.workspace_relative_mount_path)],
            ),
        ),
    )
    client.clusters.create(config.workspace_resource_group, config.workspace, config.workspace_cluster, parameters).result()
예제 #3
0
파일: custom.py 프로젝트: zackliu/azure-cli
def create_cluster(cmd, client,  # pylint: disable=too-many-locals
                   resource_group, cluster_name, json_file=None, location=None, user_name=None,
                   ssh_key=None, password=None, generate_ssh_keys=None, image=None, custom_image=None,
                   use_auto_storage=False, vm_size=None, vm_priority='dedicated', target=None, min_nodes=None,
                   max_nodes=None, subnet=None, nfs_name=None, nfs_resource_group=None, nfs_mount_path='nfs',
                   azure_file_share=None, afs_mount_path='afs', container_name=None, container_mount_path='bfs',
                   account_name=None, account_key=None, setup_task=None, setup_task_output=None):
    if generate_ssh_keys:
        _generate_ssh_keys()
        if ssh_key is None:
            ssh_key = _get_default_ssh_public_key_location()
    _ensure_resource_not_exist(client.clusters, resource_group, cluster_name)
    _verify_subnet(client, subnet, nfs_name, nfs_resource_group or resource_group)
    if json_file:
        with open(json_file) as f:
            json_obj = json.load(f)
            params = _get_deserializer()('ClusterCreateParameters', json_obj)
    else:
        # noinspection PyTypeChecker
        params = models.ClusterCreateParameters()
    if params.node_setup:
        params.node_setup.mount_volumes = _patch_mount_volumes(
            cmd.cli_ctx, params.node_setup.mount_volumes, account_name, account_key)
    params = _update_user_account_settings(params, user_name, ssh_key, password)
    params.location = location or _get_resource_group_location(cmd.cli_ctx, resource_group)
    params = _update_nodes_information(params, image, custom_image, vm_size, vm_priority, target, min_nodes, max_nodes)
    if nfs_name or azure_file_share or container_name:
        params.node_setup = params.node_setup or models.NodeSetup()
    mount_volumes = params.node_setup.mount_volumes if params.node_setup else None
    if nfs_name:
        file_server = client.file_servers.get(nfs_resource_group or resource_group, nfs_name)
        mount_volumes = _add_nfs_to_mount_volumes(mount_volumes, file_server.id, nfs_mount_path)
    if azure_file_share:
        mount_volumes = _add_azure_file_share_to_mount_volumes(cmd.cli_ctx, mount_volumes, azure_file_share,
                                                               afs_mount_path, account_name, account_key)
    if container_name:
        mount_volumes = _add_azure_container_to_mount_volumes(cmd.cli_ctx, mount_volumes, container_name,
                                                              container_mount_path, account_name, account_key)
    if use_auto_storage:
        auto_storage_account, auto_storage_key = _configure_auto_storage(cmd.cli_ctx, params.location)
        mount_volumes = _add_azure_file_share_to_mount_volumes(
            cmd.cli_ctx, mount_volumes, AUTO_STORAGE_SHARE_NAME, AUTO_STORAGE_SHARE_PATH,
            auto_storage_account, auto_storage_key)
        mount_volumes = _add_azure_container_to_mount_volumes(
            cmd.cli_ctx, mount_volumes, AUTO_STORAGE_CONTAINER_NAME, AUTO_STORAGE_CONTAINER_PATH,
            auto_storage_account, auto_storage_key)
    if mount_volumes:
        if params.node_setup is None:
            params.node_setup = models.NodeSetup()
        params.node_setup.mount_volumes = mount_volumes
    if subnet:
        params.subnet = models.ResourceId(id=subnet)
    if setup_task:
        params = _add_setup_task(setup_task, setup_task_output, params)
    return client.clusters.create(resource_group, cluster_name, params)
예제 #4
0
 def test_experiments_isolation(self, resource_group, location):
     self.client.workspaces.create(resource_group.name, 'first',
                                   location).result()
     self.client.workspaces.create(resource_group.name, 'second',
                                   location).result()
     # Create a cluster, two experiments and a job in each experiment
     for workspace in ['first', 'second']:
         cluster = self.client.clusters.create(
             resource_group.name,
             workspace,
             'cluster',
             parameters=models.ClusterCreateParameters(
                 vm_size='STANDARD_D1',
                 scale_settings=models.ScaleSettings(
                     manual=models.ManualScaleSettings(
                         target_node_count=0)),
                 user_account_settings=models.UserAccountSettings(
                     admin_user_name=helpers.ADMIN_USER_NAME,
                     admin_user_password=helpers.ADMIN_USER_PASSWORD),
                 vm_priority='lowpriority')).result()
         for experiment in ['exp1', 'exp2']:
             self.client.experiments.create(resource_group.name, workspace,
                                            experiment).result()
             self.client.jobs.create(
                 resource_group.name,
                 workspace,
                 experiment,
                 'job',
                 parameters=models.JobCreateParameters(
                     cluster=models.ResourceId(id=cluster.id),
                     node_count=1,
                     std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT',
                     custom_toolkit_settings=models.CustomToolkitSettings(
                         command_line='true'))).result()
     # Delete exp1 in the first workspace
     self.client.experiments.delete(resource_group.name, 'first',
                                    'exp1').result()
     # Ensure the experiment was actually deleted
     self.assertRaises(
         CloudError, lambda: self.client.experiments.get(
             resource_group.name, 'first', 'exp1'))
     for workspace in ['first', 'second']:
         # Ensure the clusters are not affected
         self.client.clusters.get(resource_group.name, workspace, 'cluster')
         # Ensure the other experiments are not affected
         for experiment in ['exp1', 'exp2']:
             if workspace == 'first' and experiment == 'exp1':
                 continue
             self.client.experiments.get(resource_group.name, workspace,
                                         experiment)
             job = self.client.jobs.get(resource_group.name, workspace,
                                        experiment, 'job')
             # And check the job are not terminated
             self.assertEqual(job.execution_state,
                              models.ExecutionState.queued)
예제 #5
0
def cluster_parameters_for(config, container_settings, volumes):
    return models.ClusterCreateParameters(
        virtual_machine_configuration=models.VirtualMachineConfiguration(
            image_reference=models.ImageReference(offer='UbuntuServer',
                                                  publisher='Canonical',
                                                  sku='16.04-LTS',
                                                  version='16.04.201708151')),
        location=config.location,
        vm_size=config.vm_type,
        user_account_settings=models.UserAccountSettings(
            admin_user_name=config.admin_user['name'],
            admin_user_password=config.admin_user['password']),
        scale_settings=models.ScaleSettings(manual=models.ManualScaleSettings(
            target_node_count=config.node_count)),
        node_setup=models.NodeSetup(mount_volumes=volumes))
예제 #6
0
    def create_cluster(client,
                       location,
                       resource_group,
                       cluster_name,
                       vm_size,
                       target_nodes,
                       storage_account,
                       storage_account_key,
                       file_servers=None,
                       file_systems=None,
                       subnet_id=None,
                       setup_task_cmd=None,
                       setup_task_env=None,
                       setup_task_secrets=None):
        """Creates a cluster with given parameters and mounted Azure Files

        :param BatchAIManagementClient client: client instance.
        :param str location: location.
        :param str resource_group: resource group name.
        :param str cluster_name: name of the cluster.
        :param str vm_size: vm size.
        :param int target_nodes: number of nodes.
        :param str storage_account: name of the storage account.
        :param str storage_account_key: storage account key.
        :param list(models.FileServerReference) file_servers: file servers.
        :param list(models.UnmanagedFileServerReference) file_systems: file systems.
        :param str setup_task_cmd: start task cmd line.
        :param dict[str, str] setup_task_env: environment variables for start task.
        :param dict[str, str] setup_task_secrets: environment variables with secret values for start task, server doesn't
                                                  return values for these environment variables in get cluster responses.
        :param str subnet_id: virtual network subnet id.
        :return models.Cluster: the created cluster
        """
        Helpers._create_file_share(storage_account, storage_account_key)
        setup_task = None
        if setup_task_cmd:
            setup_task = models.SetupTask(
                command_line=setup_task_cmd,
                environment_variables=[
                    models.EnvironmentVariable(name=k, value=v)
                    for k, v in setup_task_env.items()
                ],
                secrets=[
                    models.EnvironmentVariableWithSecretValue(name=k, value=v)
                    for k, v in setup_task_secrets.items()
                ],
                std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(
                    Helpers.AZURE_FILES_MOUNTING_PATH))
        client.workspaces.create(resource_group,
                                 Helpers.DEFAULT_WORKSPACE_NAME,
                                 location).result()
        return client.clusters.create(
            resource_group,
            Helpers.DEFAULT_WORKSPACE_NAME,
            cluster_name,
            parameters=models.ClusterCreateParameters(
                vm_size=vm_size,
                scale_settings=models.ScaleSettings(
                    manual=models.ManualScaleSettings(
                        target_node_count=target_nodes)),
                node_setup=models.NodeSetup(
                    mount_volumes=models.MountVolumes(
                        azure_file_shares=[
                            models.AzureFileShareReference(
                                azure_file_url=
                                'https://{0}.file.core.windows.net/{1}'.format(
                                    storage_account, Helpers.AZURE_FILES_NAME),
                                relative_mount_path=Helpers.
                                AZURE_FILES_MOUNTING_PATH,
                                account_name=storage_account,
                                credentials=models.AzureStorageCredentialsInfo(
                                    account_key=storage_account_key),
                            )
                        ],
                        file_servers=file_servers,
                        unmanaged_file_systems=file_systems),
                    setup_task=setup_task),
                subnet=subnet_id,
                user_account_settings=models.UserAccountSettings(
                    admin_user_name=Helpers.ADMIN_USER_NAME,
                    admin_user_password=Helpers.ADMIN_USER_PASSWORD),
                vm_priority='lowpriority')).result()
예제 #7
0
cluster_name = 'shwarscluster'
relative_mount_point = 'azurefileshare'

parameters = models.ClusterCreateParameters(
    location='northeurope',
    vm_size='STANDARD_NC6',
    user_account_settings=models.UserAccountSettings(
         admin_user_name="shwars",
         admin_user_password="******"),
    scale_settings=models.ScaleSettings(
         manual=models.ManualScaleSettings(target_node_count=1)
     ),
    node_setup=models.NodeSetup(
        # Mount shared volumes to the host
         mount_volumes=models.MountVolumes(
             azure_file_shares=[
                 models.AzureFileShareReference(
                     account_name=storage_account_name,
                     credentials=models.AzureStorageCredentialsInfo(
         account_key=storage_account_key),
         azure_file_url='https://{0}.file.core.windows.net/{1}'.format(
               storage_account_name, fileshare),
                  relative_mount_path = relative_mount_point)],
         ),
    ),
)

client.clusters.create(resource_group_name, cluster_name, parameters).result()

cluster = client.clusters.get(resource_group_name, cluster_name)