Beispiel #1
0
def create_pool(batch_client, pool_id, vm_size, vm_count, app_files):
    """Creates an Azure Batch pool with the specified id.

    :param batch_client: The batch client to use.
    :type batch_client: `batchserviceclient.BatchServiceClient`
    :param block_blob_client: The storage block blob client to use.
    :type block_blob_client: `azure.storage.blob.BlockBlobService`
    :param str pool_id: The id of the pool to create.
    :param str vm_size: vm size (sku)
    :param int vm_count: number of vms to allocate
    :param list app_files: The list of all the other scripts to upload.
    """
    # pick the latest supported 16.04 sku for UbuntuServer
    sku_to_use, image_ref_to_use = \
        common.helpers.select_latest_verified_vm_image_with_node_agent_sku(
            batch_client, 'Canonical', 'UbuntuServer', '14.04')
    user = batchmodels.AutoUserSpecification(
        scope=batchmodels.AutoUserScope.pool,
        elevation_level=batchmodels.ElevationLevel.admin)
    task_commands = get_list_from_file('configs/start_commands')
    print(task_commands)
    pool = batchmodels.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use),
        vm_size=vm_size,
        target_dedicated=vm_count,
        start_task=batchmodels.StartTask(
            command_line=common.helpers.wrap_commands_in_shell(
                'linux', task_commands),
            user_identity=batchmodels.UserIdentity(auto_user=user),
            resource_files=app_files,
            wait_for_success=True))

    common.helpers.create_pool_if_not_exist(batch_client, pool)
Beispiel #2
0
def pool_create():
    image_reference = batchmodel.ImageReference(
        publisher=config_azure['batch_pool_image_publisher'],
        offer=config_azure['batch_pool_image_offer'],
        sku=config_azure['batch_pool_image_sku'])

    vm_config = batchmodel.VirtualMachineConfiguration(
        image_reference=image_reference,
        node_agent_sku_id=config_azure['batch_pool_node_agent_sku'])

    vm_start_task = batchmodel.StartTask(
        command_line=
        '/bin/bash -c "sudo yum -y install epel-release; sudo yum -y install python36 python36-devel python36-tools; sudo python36 -m ensurepip; sudo yum -y install openmpi openmpi-devel; sudo env MPICC=/usr/lib64/openmpi/bin/mpicc pip3 install mpi4py numpy; sudo pip3 --yes uninstall azure azure-common azure-storage; sudo pip3 install azure-storage azure-batch"',
        user_identity=batchmodel.UserIdentity(
            auto_user=batchmodel.AutoUserSpecification(
                scope=batchmodel.AutoUserScope.pool,
                elevation_level=batchmodel.ElevationLevel.admin)),
        wait_for_success=True)

    batch_service.pool.add(pool=batchmodel.PoolAddParameter(
        id=config_azure['batch_pool_name'],
        vm_size=config_azure['batch_pool_vm_size'],
        virtual_machine_configuration=vm_config,
        target_dedicated_nodes=config_azure[
            'batch_pool_target_dedicated_nodes'],
        enable_inter_node_communication=True,
        start_task=vm_start_task),
                           raw=True)
def create_pool_and_wait_for_nodes(batch_client, block_blob_client, pool_id,
                                   vm_size, vm_count):
    """Creates an Azure Batch pool with the specified id.

    :param batch_client: The batch client to use.
    :type batch_client: `batchserviceclient.BatchServiceClient`
    :param block_blob_client: The storage block blob client to use.
    :type block_blob_client: `azure.storage.blob.BlockBlobService`
    :param str pool_id: The id of the pool to create.
    :param str vm_size: vm size (sku)
    :param int vm_count: number of vms to allocate
    :rtype: list
    :return: list of `batchserviceclient.models.ComputeNode`
    """
    # pick the latest supported 14.04 sku for UbuntuServer
    sku_to_use, image_ref_to_use = \
        common.helpers.select_latest_verified_vm_image_with_node_agent_sku(
            batch_client, 'Canonical', 'UbuntuServer', '14.04')

    # upload start task script
    block_blob_client.create_container(_CONTAINER_NAME, fail_on_exist=False)
    sas_url = common.helpers.upload_blob_and_create_sas(
        block_blob_client, _CONTAINER_NAME, _STARTTASK_RESOURCE_FILE,
        _STARTTASK_SHELL_SCRIPT_PATH,
        datetime.datetime.utcnow() + datetime.timedelta(hours=1))

    # create pool with start task
    pool = batchmodels.CloudPool(
        id=pool_id,
        enable_inter_node_communication=True,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use),
        vm_size=vm_size,
        target_dedicated=vm_count,
        start_task=batchmodels.StartTask(
            command_line=_STARTTASK_RESOURCE_FILE,
            run_elevated=True,
            wait_for_success=True,
            resource_files=[
                batchmodels.ResourceFile(file_path=_STARTTASK_RESOURCE_FILE,
                                         blob_source=sas_url)
            ]),
    )
    common.helpers.create_pool_if_not_exist(batch_client, pool)

    # because we want all nodes to be available before any tasks are assigned
    # to the pool, here we will wait for all compute nodes to reach idle
    nodes = common.helpers.wait_for_all_nodes_state(
        batch_client, pool,
        frozenset((batchmodels.ComputeNodeState.starttaskfailed,
                   batchmodels.ComputeNodeState.unusable,
                   batchmodels.ComputeNodeState.idle)))

    # ensure all node are idle
    if any(node.state != batchmodels.ComputeNodeState.idle for node in nodes):
        raise RuntimeError('node(s) of pool {} not in idle state'.format(
            pool.id))

    return nodes
Beispiel #4
0
def create_pool(batch_service_client, pool_id, users):
    """
    Creates a pool of compute nodes with the specified OS settings.

    :param batch_service_client: A Batch service client.
    :type batch_service_client: `azure.batch.BatchServiceClient`
    :param str pool_id: An ID for the new pool.
    :param str publisher: Marketplace image publisher
    :param str offer: Marketplace image offer
    :param str sku: Marketplace image sky
    """
    print('Creating pool [{}]...'.format(pool_id))

    # Create a new pool of Linux compute nodes using an Azure Virtual Machines
    # Marketplace image. For more information about creating pools of Linux
    # nodes, see:
    # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/

    # The start task installs ffmpeg on each node from an available repository, using
    # an administrator user identity.

    new_pool = batch.models.PoolAddParameter(
        id=pool_id,
        user_accounts=users,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=batchmodels.ImageReference(publisher="Canonical",
                                                       offer="UbuntuServer",
                                                       sku="18.04-LTS",
                                                       version="latest"),
            node_agent_sku_id="batch.node.ubuntu 18.04"),
        vm_size=_POOL_VM_SIZE,
        target_dedicated_nodes=_DEDICATED_POOL_NODE_COUNT,
        target_low_priority_nodes=_LOW_PRIORITY_POOL_NODE_COUNT,
        start_task=batchmodels.StartTask(
            #command_line="/bin/bash -c \"git clone https://github.com/uiuc-arc/probfuzz.git; \
            #        cd probfuzz/; ./install_java.sh; ./install.sh\"",
            # command_line="/bin/bash -c \"apt-get update\"",
            command_line="/bin/bash -c \"sudo apt-get -y update; \
                sudo apt-get install git; \
                sudo apt-get install -y python2.7; \
                sudo apt-get install -y python-pip; \
                sudo apt-get install -y bc; \
                sudo apt-get install -y r-base; \
                sudo pip2 --no-cache-dir install pandas; \
                sudo pip2 install rpy2==2.8.6;\
                sudo pip2 install argparse;\
                sudo pip2 install numpy;\
                sudo pip2 install scipy;\
            \"",
            wait_for_success=True,
            user_identity=batchmodels.UserIdentity(user_name="admin"),
            #user_identity=batchmodels.UserIdentity(
            #    auto_user=batchmodels.AutoUserSpecification(
            #    scope=batchmodels.AutoUserScope.pool,
            #    elevation_level=batchmodels.ElevationLevel.admin)),
        ),
        max_tasks_per_node=_MAX_TASKS_PER_NODE)
    batch_service_client.pool.add(new_pool)
Beispiel #5
0
def create_pool(batch_service_client, pool_id):
    """
    Creates a pool of compute nodes with the specified OS settings.

    :param batch_service_client: A Batch service client.
    :type batch_service_client: `azure.batch.BatchServiceClient`
    :param str pool_id: An ID for the new pool.
    :param str publisher: Marketplace image publisher
    :param str offer: Marketplace image offer
    :param str sku: Marketplace image sky
    """
    print('Creating pool [{}]...'.format(pool_id))

    # Create a new pool of Linux compute nodes using an Azure Virtual Machines
    # Marketplace image. For more information about creating pools of Linux
    # nodes, see:
    # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/

    # The start task installs ffmpeg on each node from an available repository, using
    # an administrator user identity.
    # 
    task_commands = [
        "apt-get update",
        "apt-get -y install python3-pip",
        "apt -y install htop",
        "apt -y install iftop",
        "pip3 install azure-storage-blob",
        "pip3 install pyspark",
        "pip3 install pandas",
        "apt -y install openjdk-8-jre-headless"
    ]

    new_pool = batch.models.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=batchmodels.ImageReference(
                publisher="Canonical",
                offer="UbuntuServer",
                sku="18.04-LTS",
                version="latest"
            ),
            node_agent_sku_id="batch.node.ubuntu 18.04"),
        vm_size=POOL_VM_SIZE,
        target_dedicated_nodes=DEDICATED_POOL_NODE_COUNT,
        target_low_priority_nodes=LOW_PRIORITY_POOL_NODE_COUNT,
        start_task=batchmodels.StartTask(
            command_line=wrap_commands_in_shell('linux',task_commands),
            wait_for_success=True,
            user_identity=batchmodels.UserIdentity(
                auto_user=batchmodels.AutoUserSpecification(
                    scope=batchmodels.AutoUserScope.pool,
                    elevation_level=batchmodels.ElevationLevel.admin)),
        )
    )

    batch_service_client.pool.add(new_pool)
def generate_cluster_start_task(
    core_base_operations,
    zip_resource_file: batch_models.ResourceFile,
    cluster_id: str,
    gpu_enabled: bool,
    docker_repo: str = None,
    docker_run_options: str = None,
    file_shares: List[models.FileShare] = None,
    mixed_mode: bool = False,
    worker_on_master: bool = True,
):
    """
        This will return the start task object for the pool to be created.
        :param cluster_id str: Id of the cluster(Used for uploading the resource files)
        :param zip_resource_file: Resource file object pointing to the zip file containing scripts to run on the node
    """

    resource_files = [zip_resource_file]
    spark_web_ui_port = constants.DOCKER_SPARK_WEB_UI_PORT
    spark_worker_ui_port = constants.DOCKER_SPARK_WORKER_UI_PORT
    spark_job_ui_port = constants.DOCKER_SPARK_JOB_UI_PORT

    spark_container_name = constants.DOCKER_SPARK_CONTAINER_NAME
    spark_submit_logs_file = constants.SPARK_SUBMIT_LOGS_FILE

    # TODO use certificate
    environment_settings = (
        __get_secrets_env(core_base_operations) + [
            batch_models.EnvironmentSetting(name="SPARK_WEB_UI_PORT",
                                            value=spark_web_ui_port),
            batch_models.EnvironmentSetting(name="SPARK_WORKER_UI_PORT",
                                            value=spark_worker_ui_port),
            batch_models.EnvironmentSetting(name="SPARK_JOB_UI_PORT",
                                            value=spark_job_ui_port),
            batch_models.EnvironmentSetting(name="SPARK_CONTAINER_NAME",
                                            value=spark_container_name),
            batch_models.EnvironmentSetting(name="SPARK_SUBMIT_LOGS_FILE",
                                            value=spark_submit_logs_file),
            batch_models.EnvironmentSetting(
                name="AZTK_GPU_ENABLED", value=helpers.bool_env(gpu_enabled)),
        ] + __get_docker_credentials(core_base_operations) +
        _get_aztk_environment(cluster_id, worker_on_master, mixed_mode))

    # start task command
    command = __cluster_install_cmd(zip_resource_file, gpu_enabled,
                                    docker_repo, docker_run_options,
                                    file_shares)

    return batch_models.StartTask(
        command_line=helpers.wrap_commands_in_shell(command),
        resource_files=resource_files,
        environment_settings=environment_settings,
        user_identity=POOL_ADMIN_USER_IDENTITY,
        wait_for_success=True,
        max_task_retry_count=2,
    )
Beispiel #7
0
    def create_pool(self, pool_id):
        """
        Creates a pool of compute nodes with the specified OS settings.

        :param str pool_id: An ID for the new pool.
        :param dict config: Configuration details.
        """
        if pool_id in self.active_pools or self.pool_exists(pool_id):
            return

        self.logger.info("creating pool {}".format(pool_id))

        pool_config = self.config['pools'][pool_id]

        sku_to_use, image_ref_to_use = self.__get_vm_image_and_node_agent_sku(
            pool_config)

        start_vm_commands = None
        if pool_config.get('create_vm_commands', None):
            start_vm_commands = self.__create_commands(
                pool_config['create_vm_commands'])

        user = batchmodels.AutoUserSpecification(
            scope=batchmodels.AutoUserScope.pool,
            elevation_level=batchmodels.ElevationLevel.admin)

        vm_configuration = batchmodels.VirtualMachineConfiguration(
            image_reference=image_ref_to_use,
            node_agent_sku_id=sku_to_use,
        )

        vm_start_task = batchmodels.StartTask(
            command_line=self.__wrap_commands_in_shell('linux',
                                                       start_vm_commands),
            user_identity=batchmodels.UserIdentity(auto_user=user),
            wait_for_success=True)

        new_pool = batchmodels.PoolAddParameter(
            id=pool_id,
            virtual_machine_configuration=vm_configuration,
            vm_size=pool_config['pool_vm_size'],
            enable_auto_scale=True,
            auto_scale_formula=pool_config['auto_scale_formula'],
            auto_scale_evaluation_interval=datetime.timedelta(minutes=5),
            start_task=vm_start_task,
            max_tasks_per_node=pool_config['max_tasks_per_node'],
        )

        try:
            self.batch_client.pool.add(new_pool)
        except batchmodels.BatchErrorException as err:
            self.__print_batch_exception(err)
            raise

        self.active_pools.add(pool_id)
Beispiel #8
0
def generate_cluster_start_task(
        spark_client,
        zip_resource_file: batch_models.ResourceFile,
        gpu_enabled: bool,
        docker_repo: str = None,
        file_shares: List[aztk_models.FileShare] = None):
    """
        This will return the start task object for the pool to be created.
        :param cluster_id str: Id of the cluster(Used for uploading the resource files)
        :param zip_resource_file: Resource file object pointing to the zip file containing scripts to run on the node
    """

    resource_files = [zip_resource_file]
    spark_web_ui_port = constants.DOCKER_SPARK_WEB_UI_PORT
    spark_worker_ui_port = constants.DOCKER_SPARK_WORKER_UI_PORT
    spark_jupyter_port = constants.DOCKER_SPARK_JUPYTER_PORT
    spark_job_ui_port = constants.DOCKER_SPARK_JOB_UI_PORT
    spark_rstudio_server_port = constants.DOCKER_SPARK_RSTUDIO_SERVER_PORT

    # TODO use certificate
    environment_settings = [
        batch_models.EnvironmentSetting(
            name="BATCH_ACCOUNT_KEY", value=spark_client.batch_config.account_key),
        batch_models.EnvironmentSetting(
            name="BATCH_ACCOUNT_URL", value=spark_client.batch_config.account_url),
        batch_models.EnvironmentSetting(
            name="STORAGE_ACCOUNT_NAME", value=spark_client.blob_config.account_name),
        batch_models.EnvironmentSetting(
            name="STORAGE_ACCOUNT_KEY", value=spark_client.blob_config.account_key),
        batch_models.EnvironmentSetting(
            name="STORAGE_ACCOUNT_SUFFIX", value=spark_client.blob_config.account_suffix),
        batch_models.EnvironmentSetting(
            name="SPARK_WEB_UI_PORT", value=spark_web_ui_port),
        batch_models.EnvironmentSetting(
            name="SPARK_WORKER_UI_PORT", value=spark_worker_ui_port),
        batch_models.EnvironmentSetting(
            name="SPARK_JUPYTER_PORT", value=spark_jupyter_port),
        batch_models.EnvironmentSetting(
            name="SPARK_JOB_UI_PORT", value=spark_job_ui_port),
        batch_models.EnvironmentSetting(
            name="SPARK_RSTUDIO_SERVER_PORT", value=spark_rstudio_server_port),
    ] + __get_docker_credentials(spark_client)

    # start task command
    command = __cluster_install_cmd(zip_resource_file, gpu_enabled, docker_repo, file_shares)

    return batch_models.StartTask(
        command_line=helpers.wrap_commands_in_shell(command),
        resource_files=resource_files,
        environment_settings=environment_settings,
        user_identity=POOL_ADMIN_USER_IDENTITY,
        wait_for_success=True)
Beispiel #9
0
def create_pool_and_wait_for_node(batch_client, pool_id, vm_size, vm_count,
                                  sha1_cert_tp):
    """Creates an Azure Batch pool with the specified id.

    :param batch_client: The batch client to use.
    :type batch_client: `batchserviceclient.BatchServiceClient`
    :param str pool_id: The id of the pool to create.
    :param str vm_size: vm size (sku)
    :param int vm_count: number of vms to allocate
    :param str sha1_cert_tp: sha1 cert thumbprint for cert ref
    """
    # pick the latest supported 16.04 sku for UbuntuServer
    sku_to_use, image_ref_to_use = \
        common.helpers.select_latest_verified_vm_image_with_node_agent_sku(
            batch_client, 'Canonical', 'UbuntuServer', '16.04')

    # create start task commands
    # 1. update repository
    # 2. install blobxfer pre-requisites
    # 3. pip install blobxfer python script
    start_task_commands = [
        'apt-get update',
        'apt-get install -y build-essential libssl-dev libffi-dev ' +
        'libpython-dev python-dev python-pip', 'pip install --upgrade blobxfer'
    ]

    user = batchmodels.AutoUserSpecification(
        scope=batchmodels.AutoUserScope.pool,
        elevation_level=batchmodels.ElevationLevel.admin)
    # create pool with start task and cert ref with visibility of task
    pool = batchmodels.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use),
        vm_size=vm_size,
        target_dedicated=vm_count,
        start_task=batchmodels.StartTask(
            command_line=common.helpers.wrap_commands_in_shell(
                'linux', start_task_commands),
            user_identity=batchmodels.UserIdentity(auto_user=user),
            wait_for_success=True),
        certificate_references=[
            batchmodels.CertificateReference(
                sha1_cert_tp,
                'sha1',
                visibility=[batchmodels.CertificateVisibility.task])
        ],
    )
    common.helpers.create_pool_if_not_exist(batch_client, pool)
Beispiel #10
0
def create_processing_pool(batch_service_client, start_task):
    """
    Creates a pool of compute nodes with the specified OS settings.
    :param batch_service_client: A Batch service client.
    :param str start_task: task start command.
    :type batch_service_client: `azure.batch.BatchServiceClient`

    """
    LOGGER.info(f'Creating pool [{PROCESSING_POOL_ID}]...')

    image_ref_to_use = get_image_reference()

    container_registry = \
        batch_models.ContainerRegistry(
            registry_server=REGISTRY_SERVER,
            user_name=REGISTRY_ACCOUNT_USER,
            password=REGISTRY_ACCOUNT_PASSWORD)

    container_conf = batch_models.ContainerConfiguration(
            container_image_names=[DOCKER_CONTAINER_URL],
            container_registries=[container_registry])

    new_pool = batch_models.PoolAddParameter(
            id=PROCESSING_POOL_ID,
            virtual_machine_configuration=
            batch_models.VirtualMachineConfiguration(
                image_reference=image_ref_to_use,
                container_configuration=container_conf,
                node_agent_sku_id=VM_AGENT_SKU),
            vm_size=PROCESSING_POOL_VM_SIZE,
            start_task=batch_models.StartTask(
                command_line=start_task,
                user_identity=batch_models.UserIdentity(
                    auto_user=batch_models.AutoUserSpecification(
                        scope='pool',
                        elevation_level='admin'))
                    ),
            enable_auto_scale=True,
            auto_scale_evaluation_interval=datetime.timedelta(
                minutes=PROCESSING_POOL_SCALE_INTERVAL_MINUTES),
            auto_scale_formula=PROCESSING_POOL_SCALE_FORMULA)
    try:
        batch_service_client.pool.add(new_pool)
        LOGGER.info("Processing Pool Created")
    except batch_models.BatchErrorException as err:
        if 'The specified pool already exists.' in err.error.message.value:
            LOGGER.info("Pool already exists...")
        else:
            raise
Beispiel #11
0
def create_pool(batch_service_client, pool_id):
    """
    Creates a pool of compute nodes with the specified OS settings.

    :param batch_service_client: A Batch service client.
    :type batch_service_client: `azure.batch.BatchServiceClient`
    :param str pool_id: An ID for the new pool.
    :param str publisher: Marketplace image publisher
    :param str offer: Marketplace image offer
    :param str sku: Marketplace image sky
    """
    print('Creating pool [{}]...'.format(pool_id))

    # Create a new pool of Linux compute nodes using an Azure Virtual Machines
    # Marketplace image. For more information about creating pools of Linux
    # nodes, see:
    # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/

    # The start task installs ffmpeg on each node from an available repository, using
    # an administrator user identity.

    new_pool = batch.models.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=batchmodels.ImageReference(publisher="Canonical",
                                                       offer="UbuntuServer",
                                                       sku="16.04-LTS",
                                                       version="latest"),
            node_agent_sku_id="batch.node.ubuntu 16.04"),
        vm_size=os.environ['POOL_VM_SIZE'],
        target_dedicated_nodes=os.environ['DEDICATED_POOL_NODE_COUNT'],
        target_low_priority_nodes=os.environ['LOW_PRIORITY_POOL_NODE_COUNT'],
        start_task=batchmodels.StartTask(
            command_line="/bin/bash -c \"apt-get update && \
                apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&\
                curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \
                add-apt-repository 'deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable' && \
                apt-get update && \
                apt-get install -y docker-ce\"",
            wait_for_success=True,
            user_identity=batchmodels.UserIdentity(
                auto_user=batchmodels.AutoUserSpecification(
                    scope=batchmodels.AutoUserScope.pool,
                    elevation_level=batchmodels.ElevationLevel.admin)),
        ))

    batch_service_client.pool.add(new_pool)
Beispiel #12
0
def create_pool_and_wait_for_nodes(batch_client, block_blob_client, pool_id,
                                   vm_size, vm_count):

    sku_to_use, image_ref_to_use = common.helpers.select_latest_verified_vm_image_with_node_agent_sku(
        batch_client, 'Canonical', 'UbuntuServer', '14.04')
    block_blob_client.create_container(_CONTAINER_NAME, fail_on_exist=False)

    # upload start task script
    block_blob_client.create_container(_CONTAINER_NAME, fail_on_exist=False)
    sas_url = common.helpers.upload_blob_and_create_sas(
        block_blob_client, _CONTAINER_NAME, _STARTTASK_RESOURCE_FILE,
        _STARTTASK_SHELL_SCRIPT_PATH,
        datetime.datetime.utcnow() + datetime.timedelta(hours=1))

    # create pool and execute starttask
    pool = batchmodels.PoolAddParameter(
        id=pool_id,
        enable_inter_node_communication=True,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use),
        vm_size=vm_size,
        target_dedicated=vm_count,
        start_task=batchmodels.StartTask(
            command_line=_STARTTASK_RESOURCE_FILE,
            run_elevated=True,
            wait_for_success=True,
            resource_files=[
                batchmodels.ResourceFile(file_path=_STARTTASK_RESOURCE_FILE,
                                         blob_source=sas_url)
            ]),
    )
    common.helpers.create_pool_if_not_exist(batch_client, pool)

    # because we want all nodes to be available before any tasks are assigned
    # to the pool, here we will wait for all compute nodes to reach idle
    nodes = common.helpers.wait_for_all_nodes_state(
        batch_client, pool,
        frozenset((batchmodels.ComputeNodeState.starttaskfailed,
                   batchmodels.ComputeNodeState.unusable,
                   batchmodels.ComputeNodeState.idle)))

    # ensure all node are idle
    if any(node.state != batchmodels.ComputeNodeState.idle for node in nodes):
        raise RuntimeError('node(s) of pool {} not in idle state'.format(
            pool.id))

    return nodes
def create_pool(batch_service_client, pool_id, application_files):
    """
    Creates a pool of compute nodes with the specified OS settings.

    :param batch_service_client: A Batch service client.
    :type batch_service_client: `azure.batch.BatchServiceClient`
    :param str pool_id: An ID for the new pool.
    :param str publisher: Marketplace image publisher
    :param str offer: Marketplace image offer
    :param str sku: Marketplace image sky
    """
    print('Creating pool [{}]...'.format(pool_id))

    # Create a new pool of Linux compute nodes using an Azure Virtual Machines
    # Marketplace image. For more information about creating pools of Linux
    # nodes, see:
    # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/

    # The start task installs ffmpeg on each node from an available repository, using
    # an administrator user identity.

    new_pool = batch.models.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=batchmodels.ImageReference(
                publisher="MicrosoftWindowsServer",
                offer="WindowsServer",
                sku="2016-Datacenter",
                version="latest"
            ),
            node_agent_sku_id="batch.node.windows amd64"),
        vm_size=config._POOL_VM_SIZE,
        target_dedicated_nodes=config._DEDICATED_POOL_NODE_COUNT,
        target_low_priority_nodes=config._LOW_PRIORITY_POOL_NODE_COUNT,
        start_task=batchmodels.StartTask(
            command_line="cmd /c starttask.cmd",
            resource_files=application_files,
            wait_for_success=True,
            user_identity=batchmodels.UserIdentity(
                auto_user=batchmodels.AutoUserSpecification(
                    scope=batchmodels.AutoUserScope.pool,
                    elevation_level=batchmodels.ElevationLevel.admin)),
        )
    )

    batch_service_client.pool.add(new_pool)
Beispiel #14
0
def create_pool(batch_service_client, pool_id):
    """
    Creates a pool of compute nodes with the specified OS settings.

    :param batch_service_client: A Batch service client.
    :type batch_service_client: `azure.batch.BatchServiceClient`
    :param str pool_id: An ID for the new pool.
    :param str publisher: Marketplace image publisher
    :param str offer: Marketplace image offer
    :param str sku: Marketplace image sky
    """
    print('Creating pool [{}]...'.format(pool_id))

    # Create a new pool of Linux compute nodes using an Azure Virtual Machines
    # Marketplace image. For more information about creating pools of Linux
    # nodes, see:
    # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/

    # The start task installs ffmpeg on each node from an available repository, using
    # an administrator user identity.

    new_pool = batch.models.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=batchmodels.ImageReference(publisher="Canonical",
                                                       offer="UbuntuServer",
                                                       sku="18.04-LTS",
                                                       version="latest"),
            node_agent_sku_id="batch.node.ubuntu 18.04"),
        vm_size=config._POOL_VM_SIZE,
        target_dedicated_nodes=config._DEDICATED_POOL_NODE_COUNT,
        target_low_priority_nodes=config._LOW_PRIORITY_POOL_NODE_COUNT,
        start_task=batchmodels.StartTask(
            command_line=
            "/bin/bash -c \"apt-get update && apt-get install wget && wget http://cab.spbu.ru/files/release3.14.0/SPAdes-3.14.0-Linux.tar.gz && tar -xf SPAdes-3.14.0-Linux.tar.gz\"",
            wait_for_success=True,
            user_identity=batchmodels.UserIdentity(
                auto_user=batchmodels.AutoUserSpecification(
                    scope=batchmodels.AutoUserScope.pool,
                    elevation_level=batchmodels.ElevationLevel.admin)),
        ))

    batch_service_client.pool.add(new_pool)
def create_pool(batch_service_client: batch.BatchServiceClient,
                pool_id: str,
                publisher: str = "Canonical",
                offer: str = "UbuntuServer",
                sku: str = "18.04-LTS") -> None:
    """
    Creates a pool of compute nodes with the specified OS settings.

    :param batch_service_client: A Batch service client.
    :param pool_id: An ID for the new pool.
    :param publisher: Marketplace image publisher
    :param offer: Marketplace image offer
    :param sku: Marketplace image sky
    """
    print('Creating pool [{}]...'.format(pool_id))

    # Create a new pool of Linux compute nodes using an Azure Virtual Machines
    # Marketplace image. For more information about creating pools of Linux
    # nodes, see:
    # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/

    new_pool = batch.models.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=batchmodels.ImageReference(publisher=publisher,
                                                       offer=offer,
                                                       sku=sku,
                                                       version="latest"),
            node_agent_sku_id="batch.node.ubuntu 18.04"),
        vm_size=config._POOL_VM_SIZE,
        target_dedicated_nodes=config._DEDICATED_POOL_NODE_COUNT,
        target_low_priority_nodes=config._LOW_PRIORITY_POOL_NODE_COUNT,
        start_task=batchmodels.StartTask(
            command_line=
            "/bin/bash -c \"apt-get update && apt-get -y install python3.7 python3-pip\"",
            wait_for_success=True,
            user_identity=batchmodels.UserIdentity(
                auto_user=batchmodels.AutoUserSpecification(
                    scope=batchmodels.AutoUserScope.pool,
                    elevation_level=batchmodels.ElevationLevel.admin)),
        ))
    batch_service_client.pool.add(new_pool)
Beispiel #16
0
def createBatchPool(batch_client, pool_id):

    start_cmd = "/bin/bash -c \"apt-get install -y python3-pip python3-venv\""
    admin = batchmodels.UserIdentity(
        auto_user=batchmodels.AutoUserSpecification(elevation_level='admin'))

    new_pool = batch.models.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=batchmodels.ImageReference(publisher="Canonical",
                                                       offer="UbuntuServer",
                                                       sku="18.04-LTS",
                                                       version="latest"),
            node_agent_sku_id="batch.node.ubuntu 18.04"),
        vm_size=
        'STANDARD_A2m_v2',  # VM Type/Size  # STANDARD_A2m_v2 16 GB # Standard_E4_v3 32 GB
        target_dedicated_nodes=1,  # pool node count
        start_task=batchmodels.StartTask(command_line=start_cmd,
                                         user_identity=admin))
    batch_client.pool.add(new_pool)
Beispiel #17
0
def create_pool(batch_client, container_conf, container_settings, image_ref_to_use, pool_id, sku_to_use,
                vm_count, vm_size):

    start_task_settings = container_settings
    start_task_settings.working_directory = ContainerWorkingDirectory.container_image_default
    pool = batchmodels.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=image_ref_to_use,
            container_configuration=container_conf,
            node_agent_sku_id=sku_to_use),
        vm_size=vm_size,
        max_tasks_per_node=1,
        target_dedicated_nodes=vm_count,
        start_task=batchmodels.StartTask(
            command_line="",
            wait_for_success=True,
            container_settings=start_task_settings),
    )
    azure_helpers.create_pool_if_not_exist(batch_client, pool)
Beispiel #18
0
def create_pool(batch_client, block_blob_client, pool_id, vm_size, vm_count):
    """Creates an Azure Batch pool with the specified id.

    :param batch_client: The batch client to use.
    :type batch_client: `batchserviceclient.BatchServiceClient`
    :param block_blob_client: The storage block blob client to use.
    :type block_blob_client: `azure.storage.blob.BlockBlobService`
    :param str pool_id: The id of the pool to create.
    :param str vm_size: vm size (sku)
    :param int vm_count: number of vms to allocate
    """
    # pick the latest supported 14.04 sku for UbuntuServer
    sku_to_use, image_ref_to_use = \
        common.helpers.select_latest_verified_vm_image_with_node_agent_sku(
            batch_client, 'Canonical', 'UbuntuServer', '14.04')

    block_blob_client.create_container(
        _CONTAINER_NAME,
        fail_on_exist=False)

    sas_url = common.helpers.upload_blob_and_create_sas(
        block_blob_client,
        _CONTAINER_NAME,
        _SIMPLE_TASK_NAME,
        _SIMPLE_TASK_PATH,
        datetime.datetime.utcnow() + datetime.timedelta(hours=1))

    pool = batchmodels.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=image_ref_to_use,
            node_agent_sku_id=sku_to_use),
        vm_size=vm_size,
        target_dedicated=vm_count,
        start_task=batchmodels.StartTask(
            command_line="python " + _SIMPLE_TASK_NAME,
            resource_files=[batchmodels.ResourceFile(
                            file_path=_SIMPLE_TASK_NAME,
                            blob_source=sas_url)]))

    common.helpers.create_pool_if_not_exist(batch_client, pool)
Beispiel #19
0
def create_commit_pool(batch_service_client):
    """
    Creates a pool of compute nodes with the specified OS settings.
    :param batch_service_client: A Batch service client.
    :type batch_service_client: `azure.batch.BatchServiceClient`

    """
    LOGGER.info(f'Creating pool [{PROCESSING_POOL_ID}]...')

    image_ref_to_use = batch_models.ImageReference(
            publisher='canonical',
            offer='ubuntuserver',
            sku='18.04-lts',
            version='latest')

    new_pool = batch_models.PoolAddParameter(
            id=COMMIT_POOL_ID,
            virtual_machine_configuration=
            batch_models.VirtualMachineConfiguration(
                image_reference=image_ref_to_use,
                node_agent_sku_id="batch.node.ubuntu 18.04"),
            vm_size=COMMIT_POOL_VM_SIZE,
            start_task=batch_models.StartTask(
                command_line=COMMIT_POOL_START_TASK,
                user_identity=batch_models.UserIdentity(
                    auto_user=batch_models.AutoUserSpecification(
                        scope='pool',
                        elevation_level='admin'))
                    ),
            enable_auto_scale=True,
            auto_scale_evaluation_interval=datetime.timedelta(
                minutes=COMMIT_POOL_SCALE_INTERVAL_MINUTES),
            auto_scale_formula=COMMIT_POOL_SCALE_FORMULA)
    try:
        batch_service_client.pool.add(new_pool)
        LOGGER.info("Commit Pool Created")
    except batch_models.BatchErrorException as err:
        if 'The specified pool already exists.' in err.error.message.value:
            LOGGER.info("Pool already exists...")
        else:
            raise
Beispiel #20
0
    def _create_start_task(command, resource_files):
        """The start task is run each node as it joins the pool, and when it's rebooted or re-imaged to prep for
        any jobs later. The script is run "shared" directory that all tasks that run on the node have access to.

        Args:
            command: a string containing all the commands to run as the start task
            resource_files: list of file references (of type azure.batch.models.ResourceFile)
                from Azure Storage to download to each node

        Returns:
            azure.batch.models.StartTask object
        """

        user = batch_models.AutoUserSpecification(
            scope=batch_models.AutoUserScope.pool,
            elevation_level=batch_models.ElevationLevel.admin)
        return batch_models.StartTask(
            command_line=command,
            user_identity=batch_models.UserIdentity(auto_user=user),
            wait_for_success=True,
            resource_files=resource_files)
Beispiel #21
0
def create_pool(batch_client, block_blob_client, pool_id, vm_size, vm_count):
    """Creates an Azure Batch pool with the specified id.
    :param batch_client: The batch client to use.
    :type batch_client: `batchserviceclient.BatchServiceClient`
    :param block_blob_client: The storage block blob client to use.
    :type block_blob_client: `azure.storage.blob.BlockBlobService`
    :param str pool_id: The id of the pool to create.
    :param str vm_size: vm size (sku)
    :param int vm_count: number of vms to allocate
    """
    # pick the latest supported 16.04 sku for UbuntuServer
    sku_to_use, image_ref_to_use = \
        common.helpers.select_latest_verified_vm_image_with_node_agent_sku(
            batch_client, 'Canonical', 'UbuntuServer', '18.04')

    block_blob_client.create_container(_CONTAINER_NAME, fail_on_exist=False)

    sas_url = common.helpers.upload_blob_and_create_sas(
        block_blob_client, _CONTAINER_NAME, _SIMPLE_TASK_NAME,
        _SIMPLE_TASK_PATH, _EXPIRY_TIME)

    start_tasks = []

    pool = batchmodels.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use),
        vm_size=vm_size,
        target_dedicated_nodes=vm_count,
        start_task=batchmodels.StartTask(
            command_line=
            '/bin/bash -c \"sudo apt-get -y update && export DEBIAN_FRONTEND=noninteractive && sudo apt-get install -y python3-pip && sudo pip3 install numpy statsmodels pmdarima\"',
            wait_for_success=True,
            user_identity=batchmodels.UserIdentity(
                auto_user=batchmodels.AutoUserSpecification(
                    scope=batchmodels.AutoUserScope.pool,
                    elevation_level=batchmodels.ElevationLevel.admin)),
        ))

    common.helpers.create_pool_if_not_exist(batch_client, pool)
Beispiel #22
0
def create_pool(batch_service_client, pool_id):
    """
    Creates a pool of compute nodes with the specified OS settings.

    :param batch_service_client: A Batch service client.
    :type batch_service_client: `azure.batch.BatchServiceClient`
    :param str pool_id: An ID for the new pool.
    :param str publisher: Marketplace image publisher
    :param str offer: Marketplace image offer
    :param str sku: Marketplace image sky
    """
    print('Creating pool [{}]...'.format(pool_id))

    # Create a new pool of Linux compute nodes using an Azure Virtual Machines
    # Marketplace image. For more information about creating pools of Linux
    # nodes, see:
    # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/

    try:
        new_pool = batch.models.PoolAddParameter(
            id=pool_id,
            virtual_machine_configuration=batchmodels.
            VirtualMachineConfiguration(
                image_reference=batchmodels.ImageReference(
                    publisher="Canonical",
                    offer="UbuntuServer",
                    sku="16.04-LTS",
                    version="latest"),
                node_agent_sku_id="batch.node.ubuntu 16.04"),
            vm_size=_POOL_VM_SIZE,
            target_dedicated_nodes=_POOL_NODE_COUNT,
            start_task=batchmodels.StartTask(
                command_line=
                "/bin/bash -c \"sudo apt-get update && sudo apt-get -y install python3-pip build-essential libssl-dev libffi-dev python3-dev && sudo pip3 install azure;\"",
                user_identity=user,
                wait_for_success=True))
        batch_service_client.pool.add(new_pool)
    except batchmodels.batch_error.BatchErrorException as err:
        print_batch_exception(err)
Beispiel #23
0
def create_pool(batch_service_client, pool_id):
    """
    Creates a pool of compute nodes with the specified OS settings.

    :param batch_service_client: A Batch service client.
    :type batch_service_client: `azure.batch.BatchServiceClient`
    :param str pool_id: An ID for the new pool.
    :param str publisher: Marketplace image publisher
    :param str offer: Marketplace image offer
    :param str sku: Marketplace image sky
    """
    print('Creating pool [{}]...'.format(pool_id))

    # The start task downloads and installs ilastik  on each node and cd
    # into the ilastik directory (which resides in the nodes shared directory)

    command_line = "/bin/bash -c \"wget {} -P $AZ_BATCH_NODE_SHARED_DIR && cd $AZ_BATCH_NODE_SHARED_DIR && tar xjf ilastik*.tar.bz2\"".format(
        _ILASTIK_DOWNLOAD_URL)

    new_pool = batch.models.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=batchmodels.ImageReference(publisher="Canonical",
                                                       offer="UbuntuServer",
                                                       sku="16.04.0-LTS",
                                                       version="latest"),
            node_agent_sku_id="batch.node.ubuntu 16.04"),
        vm_size=_POOL_VM_SIZE,
        target_dedicated_nodes=_DEDICATED_POOL_NODE_COUNT,
        start_task=batchmodels.StartTask(
            command_line=command_line,
            wait_for_success=True,
            user_identity=batchmodels.UserIdentity(
                auto_user=batchmodels.AutoUserSpecification(
                    scope=batchmodels.AutoUserScope.pool,
                    elevation_level=batchmodels.ElevationLevel.admin)),
        ))
    batch_service_client.pool.add(new_pool)
Beispiel #24
0
def create_pool(batch_client, block_blob_client, pool_id, vm_size, vm_count):
    block_blob_client.create_container(CONTAINER_NAME, fail_on_exist=False)

    sku_to_use, image_ref_to_use = select_latest_vm_image_with_node_agent_sku(
        batch_client, 'Canonical', 'UbuntuServer', '18.04')

    sas_url = upload_blob_and_create_sas(
        block_blob_client, CONTAINER_NAME, TASK_NAME, TASK_PATH,
        datetime.datetime.utcnow() + datetime.timedelta(hours=1))

    pool = batchmodels.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use),
        vm_size=vm_size,
        target_dedicated_nodes=vm_count,
        start_task=batchmodels.StartTask(command_line="python3 " + TASK_NAME,
                                         resource_files=[
                                             batchmodels.ResourceFile(
                                                 file_path=TASK_NAME,
                                                 blob_source=sas_url)
                                         ]))

    create_pool_if_not_exist(batch_client, pool)
    def test_batch_update_pools(self, **kwargs):
        client = self.create_sharedkey_client(**kwargs)
        # Test Create Paas Pool
        test_paas_pool = models.PoolAddParameter(
            id=self.get_resource_name('batch_paas_'),
            vm_size='small',
            cloud_service_configuration=models.CloudServiceConfiguration(
                os_family='5'
            ),
            start_task=models.StartTask(
                command_line="cmd.exe /c \"echo hello world\"",
                resource_files=[models.ResourceFile('https://blobsource.com', 'filename.txt')],
                environment_settings=[models.EnvironmentSetting('ENV_VAR', 'env_value')],
                user_identity=models.UserIdentity(
                    auto_user=models.AutoUserSpecification(
                        elevation_level=models.ElevationLevel.admin
                    )
                )
            )
        )
        response = client.pool.add(test_paas_pool)
        self.assertIsNone(response)

        # Test Upgrade Pool OS
        self.assertBatchError(
            "PoolVersionEqualsUpgradeVersion",
            client.pool.upgrade_os,
            test_paas_pool.id,
            "*"
        )

        # Test Update Pool Parameters
        params = models.PoolUpdatePropertiesParameter([], [], [models.MetadataItem('foo', 'bar')])
        response = client.pool.update_properties(test_paas_pool.id, params)
        self.assertIsNone(response)

        # Test Patch Pool Parameters
        params = models.PoolPatchParameter(metadata=[models.MetadataItem('foo2', 'bar2')])
        response = client.pool.patch(test_paas_pool.id, params)
        self.assertIsNone(response)

        # Test Pool Exists
        response = client.pool.exists(test_paas_pool.id)
        self.assertTrue(response)

        # Test Get Pool
        pool = client.pool.get(test_paas_pool.id)
        self.assertIsInstance(pool, models.CloudPool)
        self.assertEqual(pool.id, test_paas_pool.id)
        self.assertEqual(pool.state, models.PoolState.active)
        self.assertEqual(pool.allocation_state, models.AllocationState.steady)
        self.assertEqual(pool.cloud_service_configuration.os_family, '5')
        self.assertEqual(pool.vm_size, 'small')
        self.assertIsNone(pool.start_task)
        self.assertEqual(pool.metadata[0].name, 'foo2')
        self.assertEqual(pool.metadata[0].value, 'bar2')

        # Test Get Pool with OData Clauses
        options = models.PoolGetOptions(select='id,state', expand='stats')
        pool = client.pool.get(test_paas_pool.id, options)
        self.assertIsInstance(pool, models.CloudPool)
        self.assertEqual(pool.id, test_paas_pool.id)
        self.assertEqual(pool.state, models.PoolState.active)
        self.assertIsNone(pool.allocation_state)
        self.assertIsNone(pool.vm_size)

        # Test Delete Pool
        response = client.pool.delete(test_paas_pool.id)
        self.assertIsNone(response)
Beispiel #26
0
def create_or_update_pool(batch_service_client, pool_id, num_tasks):
    """
    Creates or updates a pool of compute nodes with the specified OS settings.

    :param batch_service_client: A Batch service client.
    :type batch_service_client: `azure.batch.BatchServiceClient`
    :param str pool_id: An ID for the new pool.
    :param num_tasks: the number of tasks that will be added to the pool later (to compute amount of nodes).
    """
    logging.info('Creating pool [{}]...'.format(pool_id))

    maxNumberofVMs = 25
    pool_size = min(maxNumberofVMs, num_tasks)

    if batch_service_client.pool.exists(pool_id) == False:
        new_pool = batch.models.PoolAddParameter(
            id=pool_id,
            virtual_machine_configuration=batchmodels.
            VirtualMachineConfiguration(
                image_reference=batchmodels.ImageReference(
                    publisher="Canonical",
                    offer="UbuntuServer",
                    sku="18.04-LTS",
                    version="latest"),
                node_agent_sku_id="batch.node.ubuntu 18.04"),
            vm_size=os.environ["_POOL_VM_SIZE"],
            enable_auto_scale=True,
            auto_scale_evaluation_interval=timedelta(days=0,
                                                     hours=0,
                                                     minutes=5),
            auto_scale_formula='''// Sets the initial pool size:
initialPoolSize = {poolsize};
$TargetLowPriorityNodes = initialPoolSize;
// "Mon, 06 Oct 2014 10:20:00 GMT" represents the datetime that this autoscale formula starts to evaluate. This is an arbitrary value here.
lifespan = time() - time("Mon, 06 Oct 2014 10:20:00 GMT");
// Representing 15 minutes
span = TimeInterval_Minute * 15;
// Representing 10 minutes
startup = TimeInterval_Minute * 10;
ratio = 50;

// After 10 minutes, obtains the max value of the number of running and active tasks within the past 15 minutes.
// If both values are 0 (indicating that no tasks were running or active in the last 15 minutes), the pool size is set to 0.
// If either value is greater than zero, no change is made.
$TargetLowPriorityNodes = (lifespan > startup ? (max($RunningTasks.GetSample(span, ratio), 
$ActiveTasks.GetSample(span, ratio)) == 0 ? 0 : $TargetLowPriorityNodes) : initialPoolSize );
$NodeDeallocationOption = taskcompletion;'''.format(
                poolsize=pool_size, now=datetime.datetime.now()),
            start_task=batchmodels.StartTask(
                command_line=
                "/bin/bash -c \"add-apt-repository ppa:deadsnakes/ppa && apt-get update && apt-get install -y ffmpeg python3.7 python3.7-venv python3.7-dev && apt-get install -y python3-pip && apt-get install -f && python3.7 -m pip install --upgrade pip setuptools wheel && pip3 install python-dateutil && pip3 install psutil && pip3 install requests && pip3 install tzlocal && pip3 install tesla-dashcam\"",
                wait_for_success=True,
                user_identity=batchmodels.UserIdentity(
                    auto_user=batchmodels.AutoUserSpecification(
                        scope=batchmodels.AutoUserScope.pool,
                        elevation_level=batchmodels.ElevationLevel.admin)),
            ))
        batch_service_client.pool.add(new_pool)
        logging.info('Pool created.')
    else:
        logging.info('Pool already exists! Resizing..')
        pool_resize_parameter = batch.models.PoolResizeParameter(
            target_dedicated_nodes=0, target_low_priority_nodes=pool_size)
        batch_service_client.pool.resize(pool_id, pool_resize_parameter)
Beispiel #27
0
def create_job_schedule(batch_client, job_schedule_id, vm_size, vm_count,
                        block_blob_client):
    """Creates an Azure Batch pool and job schedule with the specified ids.

    :param batch_client: The batch client to use.
    :type batch_client: `batchserviceclient.BatchServiceClient`
    :param str job_schedule_id: The id of the job schedule to create
    :param str vm_size: vm size (sku)
    :param int vm_count: number of vms to allocate
    :param block_blob_client: The storage block blob client to use.
    :type block_blob_client: `azure.storage.blob.BlockBlobService`
    """
    cloud_service_config = batchmodels.CloudServiceConfiguration(os_family='6')

    user_id = batchmodels.UserIdentity(
        auto_user=batchmodels.AutoUserSpecification(
            elevation_level=_USER_ELEVATION_LEVEL))

    python_download = batchmodels.ResourceFile(http_url=_PYTHON_DOWNLOAD,
                                               file_path='python373.exe')

    pool_info = batchmodels.PoolInformation(
        auto_pool_specification=batchmodels.AutoPoolSpecification(
            auto_pool_id_prefix="JobScheduler",
            pool=batchmodels.PoolSpecification(
                vm_size=vm_size,
                target_dedicated_nodes=vm_count,
                cloud_service_configuration=cloud_service_config,
                start_task=batchmodels.StartTask(
                    command_line=common.helpers.wrap_commands_in_shell(
                        'windows', ['{}'.format(_PYTHON_INSTALL)]),
                    resource_files=[python_download],
                    wait_for_success=True,
                    user_identity=user_id)),
            keep_alive=False,
            pool_lifetime_option=batchmodels.PoolLifetimeOption.job))

    sas_url = common.helpers.upload_blob_and_create_sas(
        block_blob_client, _CONTAINER_NAME, _SIMPLE_TASK_NAME,
        _SIMPLE_TASK_PATH,
        datetime.datetime.utcnow() + datetime.timedelta(minutes=30))

    job_spec = batchmodels.JobSpecification(
        pool_info=pool_info,
        # Terminate job once all tasks under it are complete to allow for a new
        # job to be created under the schedule
        on_all_tasks_complete=batchmodels.OnAllTasksComplete.terminate_job,
        job_manager_task=batchmodels.JobManagerTask(
            id="JobManagerTask",
            command_line=common.helpers.wrap_commands_in_shell(
                'windows', ['python {}'.format(_SIMPLE_TASK_NAME)]),
            resource_files=[
                batchmodels.ResourceFile(file_path=_SIMPLE_TASK_NAME,
                                         http_url=sas_url)
            ]))

    do_not_run_after = datetime.datetime.utcnow() \
        + datetime.timedelta(minutes=30)

    schedule = batchmodels.Schedule(
        do_not_run_after=do_not_run_after,
        recurrence_interval=datetime.timedelta(minutes=10))

    scheduled_job = batchmodels.JobScheduleAddParameter(
        id=job_schedule_id, schedule=schedule, job_specification=job_spec)

    batch_client.job_schedule.add(cloud_job_schedule=scheduled_job)
    def check_or_create_pool(self, id=None):
        if id is None:
            id = self.config.get('POOL', 'id')

        self.pool_id = id

        if self.client.pool.exists(id):
            found_job = False
            # Update the Job ID here
            for job in self.client.job.list():
                if job.pool_info.pool_id == self.pool_id:
                    self.job_id = job.id
                    found_job = True
                    break
            if not found_job:
                self.start_mc_server_job_pool(
                )  # Restart Jobs for this pool - this is necessary!
            return self.client.pool.get(id)

        api_port = self.config.get('POOL', 'api_port')
        min_count = self.config.get('POOL', 'mincount')

        image_reference = batchmodels.ImageReference(
            virtual_machine_image_id=
            "/subscriptions/889566d5-6e5d-4d31-a82d-b60603b3e50b/resourceGroups/polycraft-game/providers/Microsoft.Compute/galleries/polycraftImgGallery/images/polycraftBestGameServerV1/versions/1.0.0"
        )

        vmc = batchmodels.VirtualMachineConfiguration(
            image_reference=image_reference,
            node_agent_sku_id="batch.node.ubuntu 18.04")

        users = [
            batchmodels.UserAccount(
                name='azureuser',
                password='******',
                elevation_level=batchmodels.ElevationLevel.admin),
            # batchmodels.UserAccount(
            #     name='pool-nonadmin',
            #     password='******',
            #     elevation_level=batchmodels.ElevationLevel.non_admin)
        ]

        # Thank you Ask Ubuntu https://askubuntu.com/a/373478
        wait_for_locks = 'while sudo fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock /var/cache/apt/archives/lock /var/lib/dpkg/lock-frontend >/dev/null 2>&1; do echo "Waiting for release of apt locks"; sleep 2; done; '

        # NOTE: Always use DOUBLE QUOTES within commands as azure prepends the entire string with a single quote.
        start_task = batchmodels.StartTask(
            command_line=helpers.wrap_commands_in_shell(
                'linux',
                [
                    'whoami',
                    'printenv',
                    'usermod -aG sudo azureuser',
                    'sudo systemctl disable --now apt-daily.timer',
                    'sudo systemctl disable --now apt-daily-upgrade.timer',
                    'sudo systemctl daemon-reload',
                    'cd /home/polycraft',
                    'chmod -R 777 *',
                    'rm /home/polycraft/oxygen/mods/*.jar',
                    'cd /home/polycraft/oxygen/',
                    'echo "[DEBUG] removing helium..."',
                    'ls -l',
                    f'sudo rm -rf /home/polycraft/oxygen/{self.config.get("SERVER","worldName")}',
                    'sudo rm -f *.zip',
                    'echo "[DEBUG] removed helium?"',
                    'ls -l',
                    # Stop the crontabs from running
                    'sudo rm /var/spool/cron/crontabs/*',
                    # Taken from: https://stackoverflow.com/questions/45269225/ansible-playbook-fails-to-lock-apt/51919678#51919678
                    'sudo systemd-run --property="After=apt-daily.service apt-daily-upgrade.service" --wait /bin/true',
                    'sudo apt-get -y purge unattended-upgrades',
                    'sudo apt-get -y update',
                    wait_for_locks +
                    'sudo apt-get install software-properties-common -y',
                    # 'while fuser /var/lib/dpkg/lock >/dev/null 2>&1; do sleep 1; done; sudo apt-add-repository universe',
                    wait_for_locks + 'sudo apt-add-repository universe',
                    # Mount the Polycraft Game FileShare
                    wait_for_locks +
                    'sudo apt-get install cifs-utils -y && sudo mkdir -p /mnt/PolycraftGame/',
                    f'mount -t cifs //polycraftbestbatch.file.core.windows.net/best-batch-round-1-test /mnt/PolycraftGame -o vers=3.0,username={self.credentials.get("Storage", "storageaccountname")},password={self.credentials.get("Storage", "storageaccountkey")},dir_mode=0777,file_mode=0777,serverino && ls /mnt/PolycraftGame',
                    # Copy the default world file to the right folder
                    f'cp /mnt/PolycraftGame/{self.config.get("SERVER","fileShareFolder")}/worlds/{self.config.get("SERVER","worldZipName")}.tar.gz /home/polycraft/oxygen/',
                    'cd /home/polycraft/oxygen/',
                    # 'sudo rm -r helium',
                    f'gzip -d /home/polycraft/oxygen/{self.config.get("SERVER","worldZipName")}.tar.gz',
                    'echo "[DEBUG] extracting the tar"',
                    'ls -l',
                    f'sudo tar -xf {self.config.get("SERVER","worldZipName")}.tar',
                    'echo "[DEBUG] extracted the tar"',
                    'ls -l',
                    # 'sudo mv helium-backup-0924 helium',
                    f'sudo mv helium {self.config.get("SERVER","worldName")}',  # TODO Remove this once we finalize the server name?
                    f'chmod -R 777 {self.config.get("SERVER","worldName")}/',  #  NOTE: The folder inside here is called helium!
                    'echo "[DEBUG] Adjusted permissions for helium?"',
                    'ls -l',
                ]),
            wait_for_success=True,
            # user_accounts=users,
            user_identity=batchmodels.UserIdentity(
                # user_name='azureuser',
                auto_user=batchmodels.AutoUserSpecification(
                    scope=batchmodels.AutoUserScope.pool,
                    elevation_level=batchmodels.ElevationLevel.admin)
                # ),
            ),
        )

        net_config = batchmodels.NetworkConfiguration(
            # subnet_id="/subscriptions/889566d5-6e5d-4d31-a82d-b60603b3e50b/resourceGroups/vnet-eastus-azurebatch/providers/Microsoft.Network/virtualNetworks/vnet-eastus-azurebatch/subnets/main-batch-subnet",
            endpoint_configuration=batchmodels.
            PoolEndpointConfiguration(inbound_nat_pools=[
                batchmodels.InboundNATPool(
                    name='minecraftServer',
                    protocol='tcp',
                    backend_port=25565,
                    frontend_port_range_start=44000,
                    frontend_port_range_end=44099,
                    network_security_group_rules=[
                        batchmodels.NetworkSecurityGroupRule(
                            priority=199,
                            access='allow',
                            source_address_prefix='*'),
                    ]),
                batchmodels.InboundNATPool(
                    name='api_port',
                    protocol='tcp',
                    backend_port=int(api_port)
                    if api_port and api_port.isdecimal() else 9007,
                    frontend_port_range_start=44500,
                    frontend_port_range_end=44599,
                    network_security_group_rules=[
                        # batchmodels.NetworkSecurityGroupRule(
                        #     priority=170,
                        #     access='allow',
                        #     source_address_prefix='192.168.1.0/24'      # TODO: is this the right subnet?
                        # ),
                        batchmodels.NetworkSecurityGroupRule(
                            priority=198,
                            access='allow',  # 'deny'
                            source_address_prefix=
                            '*'  # TODO: only allow access to the right ports
                        )
                    ]),
            ]))

        pool = batchmodels.PoolAddParameter(
            id=id,
            vm_size=self.config.get('POOL', 'vm_size'),
            target_dedicated_nodes=int(min_count)
            if min_count and min_count.isdecimal() else 1,
            virtual_machine_configuration=vmc,
            start_task=start_task,
            user_accounts=users,
            network_configuration=net_config)

        helpers.create_pool_if_not_exist(self.client, pool)
        self.start_mc_server_job_pool(pool.target_dedicated_nodes)
def create_pool_and_wait_for_node(batch_client, pool_id, vm_size, vm_count,
                                  sha1_cert_tp):
    """Creates an Azure Batch pool with the specified id.

    :param batch_client: The batch client to use.
    :type batch_client: `batchserviceclient.BatchServiceClient`
    :param str pool_id: The id of the pool to create.
    :param str vm_size: vm size (sku)
    :param int vm_count: number of vms to allocate
    :param str sha1_cert_tp: sha1 cert thumbprint for cert ref
    """
    # pick the latest supported 16.04 sku for UbuntuServer
    sku_to_use, image_ref_to_use = \
        common.helpers.select_latest_verified_vm_image_with_node_agent_sku(
            batch_client, 'Canonical', 'UbuntuServer', '16.04')

    # create start task commands
    # 1. update repository
    # 2. install blobxfer pre-requisites
    # 3. pip install blobxfer python script
    start_task_commands = [
        'apt-get update',
        'apt-get install -y build-essential libssl-dev libffi-dev ' +
        'libpython-dev python-dev python-pip', 'pip install --upgrade blobxfer'
    ]

    user = batchmodels.AutoUserSpecification(
        scope=batchmodels.AutoUserScope.pool,
        elevation_level=batchmodels.ElevationLevel.admin)
    # create pool with start task and cert ref with visibility of task
    pool = batchmodels.PoolAddParameter(
        id=pool_id,
        virtual_machine_configuration=batchmodels.VirtualMachineConfiguration(
            image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use),
        vm_size=vm_size,
        target_dedicated_nodes=vm_count,
        start_task=batchmodels.StartTask(
            command_line=common.helpers.wrap_commands_in_shell(
                'linux', start_task_commands),
            user_identity=batchmodels.UserIdentity(auto_user=user),
            wait_for_success=True),
        certificate_references=[
            batchmodels.CertificateReference(
                thumbprint=sha1_cert_tp,
                thumbprint_algorithm='sha1',
                visibility=[batchmodels.CertificateVisibility.task])
        ],
    )
    common.helpers.create_pool_if_not_exist(batch_client, pool)

    # because we want all nodes to be available before any tasks are assigned
    # to the pool, here we will wait for all compute nodes to reach idle
    nodes = common.helpers.wait_for_all_nodes_state(
        batch_client, pool,
        frozenset((batchmodels.ComputeNodeState.start_task_failed,
                   batchmodels.ComputeNodeState.unusable,
                   batchmodels.ComputeNodeState.idle)))
    # ensure all node are idle
    if any(node.state != batchmodels.ComputeNodeState.idle for node in nodes):
        raise RuntimeError(
            'node(s) of pool {} not in idle state'.format(pool_id))
Beispiel #30
0
    def create_pool(self,
                    pool_id,
                    vm_size,
                    target_dedicated,
                    target_low_priority,
                    batch_image_spec,
                    starttask_cmd,
                    starttask_url,
                    starttask_script,
                    sp_cert_thumb,
                    app_licenses=None,
                    disable_remote_access=True,
                    app_pkgs=None,
                    subnet_id=None,
                    app_insights_app_key=None,
                    app_insights_instrumentation_key=None):

        pool = batchmodels.PoolAddParameter(
            id=pool_id,
            display_name=pool_id,
            vm_size=vm_size,
            target_dedicated_nodes=target_dedicated,
            target_low_priority_nodes=target_low_priority,
            virtual_machine_configuration=batch_image_spec.
            get_virtual_machine_configuration(),
            application_package_references=app_pkgs,
            certificate_references=[
                batchmodels.CertificateReference(sp_cert_thumb, 'sha1')
            ])

        if app_licenses:
            pool.application_licenses = app_licenses

        pool.start_task = batchmodels.StartTask(
            command_line=starttask_cmd,
            max_task_retry_count=3,
            user_identity=batchmodels.UserIdentity(
                auto_user=batchmodels.AutoUserSpecification(
                    scope=batchmodels.AutoUserScope.pool,
                    elevation_level=batchmodels.ElevationLevel.admin)),
            wait_for_success=True,
            resource_files=[
                batchmodels.ResourceFile(starttask_url, starttask_script)
            ])

        if app_insights_app_key and app_insights_instrumentation_key:
            pool.start_task.environment_settings = [
                batchmodels.EnvironmentSetting('APP_INSIGHTS_APP_ID',
                                               app_insights_app_key),
                batchmodels.EnvironmentSetting(
                    'APP_INSIGHTS_INSTRUMENTATION_KEY',
                    app_insights_instrumentation_key)
            ]

        if subnet_id:
            pool.network_configuration = batchmodels.NetworkConfiguration(
                subnet_id=subnet_id)

        if disable_remote_access:
            if pool.network_configuration is None:
                pool.network_configuration = batchmodels.NetworkConfiguration()
            endpoint_config = batchmodels.PoolEndpointConfiguration(
                inbound_nat_pools=[
                    batchmodels.InboundNATPool(
                        'DisableRDP',
                        batchmodels.InboundEndpointProtocol.tcp,
                        3389,
                        60000,
                        60099,
                        network_security_group_rules=[
                            batchmodels.NetworkSecurityGroupRule(
                                150, batchmodels.
                                NetworkSecurityGroupRuleAccess.deny, '*')
                        ]),
                    batchmodels.InboundNATPool(
                        'DisableSSH',
                        batchmodels.InboundEndpointProtocol.tcp,
                        22,
                        61000,
                        61099,
                        network_security_group_rules=[
                            batchmodels.NetworkSecurityGroupRule(
                                151, batchmodels.
                                NetworkSecurityGroupRuleAccess.deny, '*')
                        ])
                ])
            pool.network_configuration.endpoint_configuration = endpoint_config

        try:
            client = self._get_batch_client()
            client.pool.add(pool)
        except batchmodels.BatchErrorException as be:
            if be.error:
                print('Error creating pool, code={}, message={}'.format(
                    be.error.code, be.error.message))
                if be.error.values:
                    for e in be.error.values:
                        print('Key={}, Value={}'.format(e.key, e.value))
            raise