Exemple #1
0
def _update_nodes_information(params, image, vm_size, min_nodes, max_nodes):
    """Updates cluster's nodes information.

    :param models.ClusterCreateParameters params: cluster create parameters.
    :param str or None image: image.
    :param str or None vm_size: VM size.
    :param int min_nodes: min number of nodes.
    :param int or None max_nodes: max number of nodes.
    :return models.ClusterCreateParameters: updated parameters.
    """
    result = copy.deepcopy(params)
    if vm_size:
        result.vm_size = vm_size
    if not result.vm_size:
        raise CLIError('Please provide VM size')
    if image:
        result.virtual_machine_configuration = models.VirtualMachineConfiguration(_get_image_reference_or_die(image))
    if min_nodes == max_nodes:
        result.scale_settings = models.ScaleSettings(manual=models.ManualScaleSettings(min_nodes))
    elif max_nodes is not None:
        result.scale_settings = models.ScaleSettings(auto_scale=models.AutoScaleSettings(min_nodes, max_nodes))
    if not result.scale_settings or (not result.scale_settings.manual and not result.scale_settings.auto_scale):
        raise CLIError('Please provide scale setting for the cluster via configuration file or via --min and --max '
                       'parameters.')
    return result
Exemple #2
0
def set_cluster_auto_scale_parameters(client, resource_group, cluster_name,
                                      min_nodes, max_nodes):
    return client.update(
        resource_group,
        cluster_name,
        scale_settings=models.ScaleSettings(
            auto_scale=models.AutoScaleSettings(min_nodes, max_nodes)))
    def test_auto_scaling(self, resource_group, location, storage_account,
                          storage_account_key):
        """Tests auto-scaling"""
        # Create the cluster with no nodes.
        cluster = helpers.create_cluster(self.client, location,
                                         resource_group.name,
                                         self.cluster_name, 'STANDARD_D1', 0,
                                         storage_account.name,
                                         storage_account_key)

        # Switch the cluster into auto-scale mode
        self.client.clusters.update(
            resource_group.name,
            self.cluster_name,
            scale_settings=models.ScaleSettings(
                auto_scale=models.AutoScaleSettings(minimum_node_count=0,
                                                    maximum_node_count=1)))

        # Submit a task. BatchAI must increase the number of nodes to execute the task.
        self.assertCanRunJobOnHost(resource_group,
                                   location,
                                   cluster.id,
                                   timeout_sec=helpers.AUTO_SCALE_TIMEOUT_SEC)

        # Verify that cluster downsized to zero since there are no more jobs for it
        self.assertEqual(
            helpers.wait_for_nodes(self.is_live, self.client,
                                   resource_group.name, self.cluster_name, 0,
                                   helpers.NODE_STARTUP_TIMEOUT_SEC), 0)
        self.client.clusters.delete(resource_group.name,
                                    self.cluster_name).result()
def prepare_batch_ai_workspace(client, service, config):
    # Create Batch AI workspace
    client.workspaces.create(config.workspace_resource_group,
                             config.workspace,
                             config.location)

    # Create GPU cluster
    parameters = models.ClusterCreateParameters(
        # VM size. Use N-series for GPU
        vm_size=config.workspace_vm_size,
        # Configure the ssh users
        user_account_settings=models.UserAccountSettings(
            admin_user_name=config.admin,
            admin_user_password=config.admin_password),
        # Number of VMs in the cluster
        scale_settings=models.ScaleSettings(
            manual=models.ManualScaleSettings(target_node_count=config.workspace_node_count)
        ),
        # Configure each node in the cluster
        node_setup=models.NodeSetup(
            # Mount shared volumes to the host
            mount_volumes=models.MountVolumes(
                azure_file_shares=[
                    models.AzureFileShareReference(
                        account_name=config.storage_account_name,
                        credentials=models.AzureStorageCredentialsInfo(
                            account_key=config.storage_account_key),
                        azure_file_url='https://{0}/{1}'.format(
                            service.primary_endpoint, config.workspace_file_share),
                        relative_mount_path=config.workspace_relative_mount_path)],
            ),
        ),
    )
    client.clusters.create(config.workspace_resource_group, config.workspace, config.workspace_cluster, parameters).result()
Exemple #5
0
 def assertCanResizeCluster(self, resource_group, target):
     self.client.clusters.update(resource_group.name, Helpers.DEFAULT_WORKSPACE_NAME, self.cluster_name,
                                 scale_settings=models.ScaleSettings(
                                     manual=models.ManualScaleSettings(target_node_count=target)))
     self.assertEqual(
         Helpers.wait_for_nodes(self.is_live, self.client, resource_group.name, self.cluster_name, target,
                                Helpers.NODE_STARTUP_TIMEOUT_SEC),
         target)
     Helpers.assert_remote_login_info_reported_for_nodes(self, self.client, resource_group.name,
                                                         self.cluster_name, target)
Exemple #6
0
 def test_experiments_isolation(self, resource_group, location):
     self.client.workspaces.create(resource_group.name, 'first',
                                   location).result()
     self.client.workspaces.create(resource_group.name, 'second',
                                   location).result()
     # Create a cluster, two experiments and a job in each experiment
     for workspace in ['first', 'second']:
         cluster = self.client.clusters.create(
             resource_group.name,
             workspace,
             'cluster',
             parameters=models.ClusterCreateParameters(
                 vm_size='STANDARD_D1',
                 scale_settings=models.ScaleSettings(
                     manual=models.ManualScaleSettings(
                         target_node_count=0)),
                 user_account_settings=models.UserAccountSettings(
                     admin_user_name=helpers.ADMIN_USER_NAME,
                     admin_user_password=helpers.ADMIN_USER_PASSWORD),
                 vm_priority='lowpriority')).result()
         for experiment in ['exp1', 'exp2']:
             self.client.experiments.create(resource_group.name, workspace,
                                            experiment).result()
             self.client.jobs.create(
                 resource_group.name,
                 workspace,
                 experiment,
                 'job',
                 parameters=models.JobCreateParameters(
                     cluster=models.ResourceId(id=cluster.id),
                     node_count=1,
                     std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT',
                     custom_toolkit_settings=models.CustomToolkitSettings(
                         command_line='true'))).result()
     # Delete exp1 in the first workspace
     self.client.experiments.delete(resource_group.name, 'first',
                                    'exp1').result()
     # Ensure the experiment was actually deleted
     self.assertRaises(
         CloudError, lambda: self.client.experiments.get(
             resource_group.name, 'first', 'exp1'))
     for workspace in ['first', 'second']:
         # Ensure the clusters are not affected
         self.client.clusters.get(resource_group.name, workspace, 'cluster')
         # Ensure the other experiments are not affected
         for experiment in ['exp1', 'exp2']:
             if workspace == 'first' and experiment == 'exp1':
                 continue
             self.client.experiments.get(resource_group.name, workspace,
                                         experiment)
             job = self.client.jobs.get(resource_group.name, workspace,
                                        experiment, 'job')
             # And check the job are not terminated
             self.assertEqual(job.execution_state,
                              models.ExecutionState.queued)
Exemple #7
0
def _get_scale_settings(initial_count, min_count, max_count):
    """Returns scale settings for a cluster with gine parameters"""
    if not initial_count and not min_count and not max_count:
        # Get from the config file
        return None
    if sum([1 if v is not None else 0 for v in (min_count, max_count)]) == 1:
        raise CLIError('You need to either provide both min and max node counts or not provide any of them')
    if min_count is not None and max_count is not None and min_count > max_count:
        raise CLIError('Maximum nodes count must be greater or equal to minimum nodes count')
    if min_count == max_count:
        if min_count is None or initial_count == min_count:
            return models.ScaleSettings(
                manual=models.ManualScaleSettings(target_node_count=initial_count))
        if initial_count is None:
            return models.ScaleSettings(
                manual=models.ManualScaleSettings(target_node_count=min_count)
            )
    return models.ScaleSettings(
        auto_scale=models.AutoScaleSettings(
            minimum_node_count=min_count,
            maximum_node_count=max_count,
            initial_node_count=initial_count or 0))
Exemple #8
0
def cluster_parameters_for(config, container_settings, volumes):
    return models.ClusterCreateParameters(
        virtual_machine_configuration=models.VirtualMachineConfiguration(
            image_reference=models.ImageReference(offer='UbuntuServer',
                                                  publisher='Canonical',
                                                  sku='16.04-LTS',
                                                  version='16.04.201708151')),
        location=config.location,
        vm_size=config.vm_type,
        user_account_settings=models.UserAccountSettings(
            admin_user_name=config.admin_user['name'],
            admin_user_password=config.admin_user['password']),
        scale_settings=models.ScaleSettings(manual=models.ManualScaleSettings(
            target_node_count=config.node_count)),
        node_setup=models.NodeSetup(mount_volumes=volumes))
Exemple #9
0
def resize_cluster(client, resource_group, cluster_name, target):
    return client.update(resource_group, cluster_name, scale_settings=models.ScaleSettings(
        manual=models.ManualScaleSettings(target_node_count=target)))
Exemple #10
0
    def create_cluster(client,
                       location,
                       resource_group,
                       cluster_name,
                       vm_size,
                       target_nodes,
                       storage_account,
                       storage_account_key,
                       file_servers=None,
                       file_systems=None,
                       subnet_id=None,
                       setup_task_cmd=None,
                       setup_task_env=None,
                       setup_task_secrets=None):
        """Creates a cluster with given parameters and mounted Azure Files

        :param BatchAIManagementClient client: client instance.
        :param str location: location.
        :param str resource_group: resource group name.
        :param str cluster_name: name of the cluster.
        :param str vm_size: vm size.
        :param int target_nodes: number of nodes.
        :param str storage_account: name of the storage account.
        :param str storage_account_key: storage account key.
        :param list(models.FileServerReference) file_servers: file servers.
        :param list(models.UnmanagedFileServerReference) file_systems: file systems.
        :param str setup_task_cmd: start task cmd line.
        :param dict[str, str] setup_task_env: environment variables for start task.
        :param dict[str, str] setup_task_secrets: environment variables with secret values for start task, server doesn't
                                                  return values for these environment variables in get cluster responses.
        :param str subnet_id: virtual network subnet id.
        :return models.Cluster: the created cluster
        """
        Helpers._create_file_share(storage_account, storage_account_key)
        setup_task = None
        if setup_task_cmd:
            setup_task = models.SetupTask(
                command_line=setup_task_cmd,
                environment_variables=[
                    models.EnvironmentVariable(name=k, value=v)
                    for k, v in setup_task_env.items()
                ],
                secrets=[
                    models.EnvironmentVariableWithSecretValue(name=k, value=v)
                    for k, v in setup_task_secrets.items()
                ],
                std_out_err_path_prefix='$AZ_BATCHAI_MOUNT_ROOT/{0}'.format(
                    Helpers.AZURE_FILES_MOUNTING_PATH))
        client.workspaces.create(resource_group,
                                 Helpers.DEFAULT_WORKSPACE_NAME,
                                 location).result()
        return client.clusters.create(
            resource_group,
            Helpers.DEFAULT_WORKSPACE_NAME,
            cluster_name,
            parameters=models.ClusterCreateParameters(
                vm_size=vm_size,
                scale_settings=models.ScaleSettings(
                    manual=models.ManualScaleSettings(
                        target_node_count=target_nodes)),
                node_setup=models.NodeSetup(
                    mount_volumes=models.MountVolumes(
                        azure_file_shares=[
                            models.AzureFileShareReference(
                                azure_file_url=
                                'https://{0}.file.core.windows.net/{1}'.format(
                                    storage_account, Helpers.AZURE_FILES_NAME),
                                relative_mount_path=Helpers.
                                AZURE_FILES_MOUNTING_PATH,
                                account_name=storage_account,
                                credentials=models.AzureStorageCredentialsInfo(
                                    account_key=storage_account_key),
                            )
                        ],
                        file_servers=file_servers,
                        unmanaged_file_systems=file_systems),
                    setup_task=setup_task),
                subnet=subnet_id,
                user_account_settings=models.UserAccountSettings(
                    admin_user_name=Helpers.ADMIN_USER_NAME,
                    admin_user_password=Helpers.ADMIN_USER_PASSWORD),
                vm_priority='lowpriority')).result()
Exemple #11
0
for f in ['Train-28x28_cntk_text.txt', 'Test-28x28_cntk_text.txt', 'ConvNet_MNIST.py']:
  filesystem.create_file_from_path(fileshare, "data", f, "z:/script/"+f)

## Create Cluster

cluster_name = 'shwarscluster'
relative_mount_point = 'azurefileshare'

parameters = models.ClusterCreateParameters(
    location='northeurope',
    vm_size='STANDARD_NC6',
    user_account_settings=models.UserAccountSettings(
         admin_user_name="shwars",
         admin_user_password="******"),
    scale_settings=models.ScaleSettings(
         manual=models.ManualScaleSettings(target_node_count=1)
     ),
    node_setup=models.NodeSetup(
        # Mount shared volumes to the host
         mount_volumes=models.MountVolumes(
             azure_file_shares=[
                 models.AzureFileShareReference(
                     account_name=storage_account_name,
                     credentials=models.AzureStorageCredentialsInfo(
         account_key=storage_account_key),
         azure_file_url='https://{0}.file.core.windows.net/{1}'.format(
               storage_account_name, fileshare),
                  relative_mount_path = relative_mount_point)],
         ),
    ),
)