def get_virtual_machine_configuration(self): if self.image_id: image_ref = batchmodels.ImageReference( virtual_machine_image_id=self.image_id) else: image_ref = batchmodels.ImageReference( publisher=self.image_publisher, offer=self.image_offer, sku=self.image_sku, version=self.image_version) return batchmodels.VirtualMachineConfiguration( image_reference=image_ref, node_agent_sku_id=self.node_agent_sku_id)
def create_pool(self, size, name): """Create and deploy a new pool. Called on job submission by submission.py. TODO: Support auto-scale formula. """ image = self.environment.get_image() node_agent_sku_id = image.pop('node_sku_id') pool_id = 'Maya_Pool_{}'.format(uuid.uuid4()) pool_config = models.VirtualMachineConfiguration( image_reference=models.ImageReference(**image), node_agent_sku_id=node_agent_sku_id) self._log.info("Creating new pool '{}' with {} VMs.".format( name, size)) new_pool = models.PoolAddParameter( id=pool_id, display_name="Maya Pool for {}".format(name), resize_timeout=datetime.timedelta(minutes=30), application_licenses=self.environment.get_application_licenses(), vm_size=self.environment.get_vm_sku(), virtual_machine_configuration=pool_config, target_dedicated_nodes=int(size[0]), target_low_priority_nodes=int(size[1]), max_tasks_per_node=1) self._call(self.batch.pool.add, new_pool) self._log.debug("Successfully created pool.") return {"poolId": pool_id}
def pool_create(): image_reference = batchmodel.ImageReference( publisher=config_azure['batch_pool_image_publisher'], offer=config_azure['batch_pool_image_offer'], sku=config_azure['batch_pool_image_sku']) vm_config = batchmodel.VirtualMachineConfiguration( image_reference=image_reference, node_agent_sku_id=config_azure['batch_pool_node_agent_sku']) vm_start_task = batchmodel.StartTask( command_line= '/bin/bash -c "sudo yum -y install epel-release; sudo yum -y install python36 python36-devel python36-tools; sudo python36 -m ensurepip; sudo yum -y install openmpi openmpi-devel; sudo env MPICC=/usr/lib64/openmpi/bin/mpicc pip3 install mpi4py numpy; sudo pip3 --yes uninstall azure azure-common azure-storage; sudo pip3 install azure-storage azure-batch"', user_identity=batchmodel.UserIdentity( auto_user=batchmodel.AutoUserSpecification( scope=batchmodel.AutoUserScope.pool, elevation_level=batchmodel.ElevationLevel.admin)), wait_for_success=True) batch_service.pool.add(pool=batchmodel.PoolAddParameter( id=config_azure['batch_pool_name'], vm_size=config_azure['batch_pool_vm_size'], virtual_machine_configuration=vm_config, target_dedicated_nodes=config_azure[ 'batch_pool_target_dedicated_nodes'], enable_inter_node_communication=True, start_task=vm_start_task), raw=True)
def __get_vm_image_and_node_agent_sku(self, pool_config): """Select the latest verified image that Azure Batch supports given a publisher, offer and sku (starts with filter). Get the node agent SKU and image reference for the virtual machine configuration. For more information about the virtual machine configuration, see: https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ :param dict pool_config: vm configuration :rtype: tuple :return: (node agent sku id to use, vm image ref to use) """ if pool_config.get("node_resource_id", None): image_ref_to_use = batchmodels.ImageReference( virtual_machine_image_id=pool_config["node_resource_id"]) sku_to_use = pool_config['node_os_sku'] else: publisher = pool_config['node_os_publisher'] offer = pool_config['node_os_offer'] sku_starts_with = pool_config['node_os_sku'] # get verified vm image list and node agent sku ids from service node_agent_skus = self.batch_client.account.list_node_agent_skus() # pick the latest supported sku skus_to_use = [ (sku, image_ref) for sku in node_agent_skus for image_ref in sorted( sku.verified_image_references, key=lambda item: item.sku) if image_ref.publisher.lower() == publisher.lower() and image_ref.offer.lower() == offer.lower() and image_ref.sku.startswith(sku_starts_with) ] # skus are listed in reverse order, pick first for latest sku_to_use, image_ref_to_use = skus_to_use[0] sku_to_use = sku_to_use.id return sku_to_use, image_ref_to_use
def create_pool(batch_client, name_pool, number_nodes=0, cmd_s_task=None, rule_scale_pool=None): #parameter image node param_image = models.VirtualMachineConfiguration( image_reference=models.ImageReference(offer='UbuntuServer', publisher='Canonical', sku='18.04-LTS', version='latest', virtual_machine_image_id=None), node_agent_sku_id='batch.node.ubuntu 18.04') #parameter pool new_pool = models.PoolAddParameter( id=name_pool, vm_size='standard_d1_v2', #target_dedicated_nodes = number_nodes, virtual_machine_configuration=param_image, enable_inter_node_communication=True, enable_auto_scale=True, auto_scale_formula=rule_scale_pool, auto_scale_evaluation_interval='PT5M' #start_task = s_task ) batch_client.pool.add(new_pool)
def create_pool(batch_service_client, pool_id, vm_size, imageName, versions, auto_scale_formula): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sky """ print('Creating pool [{}]...'.format(pool_id)) new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference( virtual_machine_image_id="/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Compute/images/{}".format( "ad49354a-6ce2-4dae-a51d-b6907372f608", "BrowseCloud", imageName) ), node_agent_sku_id="batch.node.windows amd64"), vm_size=vm_size, start_task=None, enable_auto_scale=True, auto_scale_formula=auto_scale_formula, application_package_references=[batchmodels.ApplicationPackageReference( application_id="browsecloudtrainer", version=version) for version in versions], auto_scale_evaluation_interval=timedelta( minutes=5) # the smallest evaluation interval ) batch_service_client.pool.add(new_pool)
def create_pool(pool_id, batch_service_client=None): """ Creates a pool of compute nodes. Parameters ========== pool_id: str, identifier for the pool batch_service_client: azure.batch.BatchServiceClient, A Batch service client. """ print("Creating pool [{}]...".format(pool_id)) if not batch_service_client: batch_service_client = create_batch_client() new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference( publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS", version="latest", ), node_agent_sku_id="batch.node.ubuntu 18.04", ), vm_size=config["pool_vm_size"], target_low_priority_nodes=config["pool_low_priority_node_count"], target_dedicated_nodes=config["pool_dedicated_node_count"], ) batch_service_client.pool.add(new_pool)
def create_pool(batch_service_client, pool_id): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sku """ print('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="16.04-LTS", version="latest"), node_agent_sku_id="batch.node.ubuntu 16.04"), vm_size=config._POOL_VM_SIZE, target_dedicated_nodes=config._POOL_NODE_COUNT) batch_service_client.pool.add(new_pool)
def create_pool(batch_service_client: batch.BatchServiceClient, config: Dict[str, str]) -> None: """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param config: Configuration """ print("Creating pool [{}]...".format(config['POOL_ID'])) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ new_pool = batch.models.PoolAddParameter( id=config['POOL_ID'], virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference( publisher="microsoft-azure-batch", offer="ubuntu-server-container", sku='16-04-lts', version="latest", ), node_agent_sku_id="batch.node.ubuntu 16.04", container_configuration=create_container_config(config), ), vm_size=config['POOL_VM_SIZE'], target_dedicated_nodes=config['POOL_NODE_COUNT'], task_slots_per_node=config['TASK_SLOTS_PER_NODE'], ) batch_service_client.pool.add(new_pool)
def test_batch_create_pool_with_blobfuse_mount(self, **kwargs): client = self.create_sharedkey_client(**kwargs) # Test Create Iaas Pool test_iaas_pool = models.PoolAddParameter( id=self.get_resource_name('batch_iaas_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='MicrosoftWindowsServer', offer='WindowsServer', sku='2016-Datacenter-smalldisk' ), node_agent_sku_id='batch.node.windows amd64', windows_configuration=models.WindowsConfiguration(enable_automatic_updates=True)), task_scheduling_policy=models.TaskSchedulingPolicy(node_fill_type=models.ComputeNodeFillType.pack), mount_configuration=[models.MountConfiguration( azure_blob_file_system_configuration=models.AzureBlobFileSystemConfiguration( account_name='test', container_name='https://test.blob.core.windows.net:443/test-container', relative_mount_path='foo', account_key='fake_account_key' ) )] ) response = client.pool.add(test_iaas_pool) self.assertIsNone(response) mount_pool = client.pool.get(test_iaas_pool.id) self.assertIsNotNone(mount_pool.mount_configuration) self.assertEqual(len(mount_pool.mount_configuration), 1) self.assertIsNotNone(mount_pool.mount_configuration[0].azure_blob_file_system_configuration) self.assertIsNone(mount_pool.mount_configuration[0].nfs_mount_configuration)
def create_pool(batch_service_client, pool_id, users): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sky """ print('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ # The start task installs ffmpeg on each node from an available repository, using # an administrator user identity. new_pool = batch.models.PoolAddParameter( id=pool_id, user_accounts=users, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS", version="latest"), node_agent_sku_id="batch.node.ubuntu 18.04"), vm_size=_POOL_VM_SIZE, target_dedicated_nodes=_DEDICATED_POOL_NODE_COUNT, target_low_priority_nodes=_LOW_PRIORITY_POOL_NODE_COUNT, start_task=batchmodels.StartTask( #command_line="/bin/bash -c \"git clone https://github.com/uiuc-arc/probfuzz.git; \ # cd probfuzz/; ./install_java.sh; ./install.sh\"", # command_line="/bin/bash -c \"apt-get update\"", command_line="/bin/bash -c \"sudo apt-get -y update; \ sudo apt-get install git; \ sudo apt-get install -y python2.7; \ sudo apt-get install -y python-pip; \ sudo apt-get install -y bc; \ sudo apt-get install -y r-base; \ sudo pip2 --no-cache-dir install pandas; \ sudo pip2 install rpy2==2.8.6;\ sudo pip2 install argparse;\ sudo pip2 install numpy;\ sudo pip2 install scipy;\ \"", wait_for_success=True, user_identity=batchmodels.UserIdentity(user_name="admin"), #user_identity=batchmodels.UserIdentity( # auto_user=batchmodels.AutoUserSpecification( # scope=batchmodels.AutoUserScope.pool, # elevation_level=batchmodels.ElevationLevel.admin)), ), max_tasks_per_node=_MAX_TASKS_PER_NODE) batch_service_client.pool.add(new_pool)
def create_pool(batch_service_client, pool_id): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sky """ print('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ # The start task installs ffmpeg on each node from an available repository, using # an administrator user identity. # task_commands = [ "apt-get update", "apt-get -y install python3-pip", "apt -y install htop", "apt -y install iftop", "pip3 install azure-storage-blob", "pip3 install pyspark", "pip3 install pandas", "apt -y install openjdk-8-jre-headless" ] new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference( publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS", version="latest" ), node_agent_sku_id="batch.node.ubuntu 18.04"), vm_size=POOL_VM_SIZE, target_dedicated_nodes=DEDICATED_POOL_NODE_COUNT, target_low_priority_nodes=LOW_PRIORITY_POOL_NODE_COUNT, start_task=batchmodels.StartTask( command_line=wrap_commands_in_shell('linux',task_commands), wait_for_success=True, user_identity=batchmodels.UserIdentity( auto_user=batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin)), ) ) batch_service_client.pool.add(new_pool)
def test_batch_network_configuration(self, **kwargs): client = self.create_aad_client(**kwargs) # Test Create Pool with Network Config network_config = models.NetworkConfiguration( endpoint_configuration=models.PoolEndpointConfiguration( inbound_nat_pools=[ models.InboundNATPool( name="TestEndpointConfig", protocol=models.InboundEndpointProtocol.udp, backend_port=64444, frontend_port_range_start=60000, frontend_port_range_end=61000, network_security_group_rules=[ models.NetworkSecurityGroupRule( priority=150, access=models.NetworkSecurityGroupRuleAccess.allow, source_address_prefix='*' ) ] ) ] ) ) virtual_machine_config = models.VirtualMachineConfiguration( node_agent_sku_id="batch.node.ubuntu 16.04", image_reference=models.ImageReference( publisher="Canonical", offer="UbuntuServer", sku="16.04-LTS") ) pool = models.PoolAddParameter( id=self.get_resource_name('batch_network_'), target_dedicated_nodes=1, vm_size='Standard_A1', virtual_machine_configuration=virtual_machine_config, network_configuration=network_config ) client.pool.add(pool) network_pool = client.pool.get(pool.id) while self.is_live and network_pool.allocation_state != models.AllocationState.steady: time.sleep(10) network_pool = client.pool.get(pool.id) # Test Compute Node Config nodes = list(client.compute_node.list(pool.id)) self.assertEqual(len(nodes), 1) self.assertIsInstance(nodes[0], models.ComputeNode) self.assertEqual(len(nodes[0].endpoint_configuration.inbound_endpoints), 2) self.assertEqual(nodes[0].endpoint_configuration.inbound_endpoints[0].name, 'TestEndpointConfig.0') self.assertEqual(nodes[0].endpoint_configuration.inbound_endpoints[0].protocol.value, 'udp')
def create_pool(config, batch_service_client): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sku """ # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ image_ref_to_use = models.ImageReference( publisher="microsoft-azure-batch", offer="ubuntu-server-container", sku="16-04-lts", version="latest", ) if config.REGISTRY_USERNAME: registry = batch.models.ContainerRegistry( user_name=config.REGISTRY_USERNAME, password=config.REGISTRY_PASSWORD, registry_server=config.REGISTRY_SERVER, ) container_conf = batch.models.ContainerConfiguration( container_image_names=[config.DOCKER_CONTAINER], container_registries=[registry], ) else: container_conf = batch.models.ContainerConfiguration( container_image_names=[config.DOCKER_CONTAINER] ) new_pool = batch.models.PoolAddParameter( id=config.POOL_ID, virtual_machine_configuration=batch.models.VirtualMachineConfiguration( image_reference=image_ref_to_use, container_configuration=container_conf, node_agent_sku_id="batch.node.ubuntu 16.04", ), vm_size=config.POOL_VM_SIZE, target_dedicated_nodes=config.POOL_NODE_COUNT, target_low_priority_nodes=config.POOL_LOW_PRIORITY_NODE_COUNT, ) batch_service_client.pool.add(new_pool)
def create_pool(batch_service_client, pool_id): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sky """ print('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ # The start task installs ffmpeg on each node from an available repository, using # an administrator user identity. new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="16.04-LTS", version="latest"), node_agent_sku_id="batch.node.ubuntu 16.04"), vm_size=os.environ['POOL_VM_SIZE'], target_dedicated_nodes=os.environ['DEDICATED_POOL_NODE_COUNT'], target_low_priority_nodes=os.environ['LOW_PRIORITY_POOL_NODE_COUNT'], start_task=batchmodels.StartTask( command_line="/bin/bash -c \"apt-get update && \ apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&\ curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - && \ add-apt-repository 'deb [arch=amd64] https://download.docker.com/linux/ubuntu xenial stable' && \ apt-get update && \ apt-get install -y docker-ce\"", wait_for_success=True, user_identity=batchmodels.UserIdentity( auto_user=batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin)), )) batch_service_client.pool.add(new_pool)
def create_pool(batch_service_client, pool_id, application_files): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sky """ print('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ # The start task installs ffmpeg on each node from an available repository, using # an administrator user identity. new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference( publisher="MicrosoftWindowsServer", offer="WindowsServer", sku="2016-Datacenter", version="latest" ), node_agent_sku_id="batch.node.windows amd64"), vm_size=config._POOL_VM_SIZE, target_dedicated_nodes=config._DEDICATED_POOL_NODE_COUNT, target_low_priority_nodes=config._LOW_PRIORITY_POOL_NODE_COUNT, start_task=batchmodels.StartTask( command_line="cmd /c starttask.cmd", resource_files=application_files, wait_for_success=True, user_identity=batchmodels.UserIdentity( auto_user=batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin)), ) ) batch_service_client.pool.add(new_pool)
def create_pool(batch_service_client, pool_id, resource_files): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sku """ print('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ task_commands = [ 'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'.format(config._TASK_FILE), '''wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda.sh && \ bash miniconda.sh -b -p $AZ_BATCH_NODE_SHARED_DIR/miniconda && \ export PATH="$AZ_BATCH_NODE_SHARED_DIR/miniconda/bin:$PATH" && \ source "$AZ_BATCH_NODE_SHARED_DIR/miniconda/bin/activate" && \ conda install -y -c anaconda -c conda-forge \ pip''' ] image_ref = batchmodels.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS", version="latest") vm_config = batchmodels.VirtualMachineConfiguration( image_reference=image_ref, node_agent_sku_id="batch.node.ubuntu 18.04") new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=vm_config, vm_size=config._POOL_VM_SIZE, target_low_priority_nodes=config._POOL_NODE_COUNT, start_task=batch.models.StartTask(command_line=wrap_commands_in_shell( 'linux', task_commands), resource_files=resource_files)) batch_service_client.pool.add(new_pool)
def create_pool(batch_service_client, pool_id): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sky """ print('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ # The start task installs ffmpeg on each node from an available repository, using # an administrator user identity. new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS", version="latest"), node_agent_sku_id="batch.node.ubuntu 18.04"), vm_size=config._POOL_VM_SIZE, target_dedicated_nodes=config._DEDICATED_POOL_NODE_COUNT, target_low_priority_nodes=config._LOW_PRIORITY_POOL_NODE_COUNT, start_task=batchmodels.StartTask( command_line= "/bin/bash -c \"apt-get update && apt-get install wget && wget http://cab.spbu.ru/files/release3.14.0/SPAdes-3.14.0-Linux.tar.gz && tar -xf SPAdes-3.14.0-Linux.tar.gz\"", wait_for_success=True, user_identity=batchmodels.UserIdentity( auto_user=batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin)), )) batch_service_client.pool.add(new_pool)
def create_pool(batch_service_client: batch.BatchServiceClient, pool_id: str, publisher: str = "Canonical", offer: str = "UbuntuServer", sku: str = "18.04-LTS") -> None: """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :param pool_id: An ID for the new pool. :param publisher: Marketplace image publisher :param offer: Marketplace image offer :param sku: Marketplace image sky """ print('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference(publisher=publisher, offer=offer, sku=sku, version="latest"), node_agent_sku_id="batch.node.ubuntu 18.04"), vm_size=config._POOL_VM_SIZE, target_dedicated_nodes=config._DEDICATED_POOL_NODE_COUNT, target_low_priority_nodes=config._LOW_PRIORITY_POOL_NODE_COUNT, start_task=batchmodels.StartTask( command_line= "/bin/bash -c \"apt-get update && apt-get -y install python3.7 python3-pip\"", wait_for_success=True, user_identity=batchmodels.UserIdentity( auto_user=batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin)), )) batch_service_client.pool.add(new_pool)
def createBatchPool(batch_client, pool_id): start_cmd = "/bin/bash -c \"apt-get install -y python3-pip python3-venv\"" admin = batchmodels.UserIdentity( auto_user=batchmodels.AutoUserSpecification(elevation_level='admin')) new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS", version="latest"), node_agent_sku_id="batch.node.ubuntu 18.04"), vm_size= 'STANDARD_A2m_v2', # VM Type/Size # STANDARD_A2m_v2 16 GB # Standard_E4_v3 32 GB target_dedicated_nodes=1, # pool node count start_task=batchmodels.StartTask(command_line=start_cmd, user_identity=admin)) batch_client.pool.add(new_pool)
def create_commit_pool(batch_service_client): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` """ LOGGER.info(f'Creating pool [{PROCESSING_POOL_ID}]...') image_ref_to_use = batch_models.ImageReference( publisher='canonical', offer='ubuntuserver', sku='18.04-lts', version='latest') new_pool = batch_models.PoolAddParameter( id=COMMIT_POOL_ID, virtual_machine_configuration= batch_models.VirtualMachineConfiguration( image_reference=image_ref_to_use, node_agent_sku_id="batch.node.ubuntu 18.04"), vm_size=COMMIT_POOL_VM_SIZE, start_task=batch_models.StartTask( command_line=COMMIT_POOL_START_TASK, user_identity=batch_models.UserIdentity( auto_user=batch_models.AutoUserSpecification( scope='pool', elevation_level='admin')) ), enable_auto_scale=True, auto_scale_evaluation_interval=datetime.timedelta( minutes=COMMIT_POOL_SCALE_INTERVAL_MINUTES), auto_scale_formula=COMMIT_POOL_SCALE_FORMULA) try: batch_service_client.pool.add(new_pool) LOGGER.info("Commit Pool Created") except batch_models.BatchErrorException as err: if 'The specified pool already exists.' in err.error.message.value: LOGGER.info("Pool already exists...") else: raise
def create_pool(batch_service_client, pool_id): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sky """ print('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ try: new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels. VirtualMachineConfiguration( image_reference=batchmodels.ImageReference( publisher="Canonical", offer="UbuntuServer", sku="16.04-LTS", version="latest"), node_agent_sku_id="batch.node.ubuntu 16.04"), vm_size=_POOL_VM_SIZE, target_dedicated_nodes=_POOL_NODE_COUNT, start_task=batchmodels.StartTask( command_line= "/bin/bash -c \"sudo apt-get update && sudo apt-get -y install python3-pip build-essential libssl-dev libffi-dev python3-dev && sudo pip3 install azure;\"", user_identity=user, wait_for_success=True)) batch_service_client.pool.add(new_pool) except batchmodels.batch_error.BatchErrorException as err: print_batch_exception(err)
def create_pool(batch_service_client, pool_id): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sky """ print('Creating pool [{}]...'.format(pool_id)) # The start task downloads and installs ilastik on each node and cd # into the ilastik directory (which resides in the nodes shared directory) command_line = "/bin/bash -c \"wget {} -P $AZ_BATCH_NODE_SHARED_DIR && cd $AZ_BATCH_NODE_SHARED_DIR && tar xjf ilastik*.tar.bz2\"".format( _ILASTIK_DOWNLOAD_URL) new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="16.04.0-LTS", version="latest"), node_agent_sku_id="batch.node.ubuntu 16.04"), vm_size=_POOL_VM_SIZE, target_dedicated_nodes=_DEDICATED_POOL_NODE_COUNT, start_task=batchmodels.StartTask( command_line=command_line, wait_for_success=True, user_identity=batchmodels.UserIdentity( auto_user=batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin)), )) batch_service_client.pool.add(new_pool)
def create_pool(batch_service_client, blob_client, pool_id, config): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param dict config: Configuration details. """ if "IMAGE_ID" in os.environ: image_ref_to_use = batchmodels.ImageReference( virtual_machine_image_id=os.environ["IMAGE_ID"]) sku_to_use = os.environ["SKU"] else: sku_to_use, image_ref_to_use = \ select_latest_verified_vm_image_with_node_agent_sku( batch_service_client, config['node_os_publisher'], config['node_os_offer'], config['node_os_sku']) account_name = os.environ['AZURE_STORAGE_ACCOUNT'] account_key = _get_blob_key(account_name) # Get the node agent SKU and image reference for the virtual machine # configuration. # For more information about the virtual machine configuration, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ start_vm_commands = None if config.get('create_vm_commands', None): start_vm_commands = _create_commands(config['create_vm_commands']) start_vm_commands = [ command.format(accountname=account_name, accountkey=account_key) for command in start_vm_commands ] data_disks = None if config['data_disk_sizes']: data_disks = [] for i, disk_size in config['data_disk_sizes'].iteritems(): data_disks.append(batchmodels.DataDisk(i, disk_size)) resource_files = [] if config['start_resources']: for vm_file_name, blob_path in config['start_resources'].iteritems(): container_name, blob_name = unpack_path(blob_path) res_url = generate_blob_url(blob_client, container_name, blob_name) resource_files.append( batch.models.ResourceFile(res_url, vm_file_name)) user = batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin) new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batchmodels.VirtualMachineConfiguration( image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use, data_disks=data_disks), vm_size=config['pool_vm_size'], enable_auto_scale=True, auto_scale_formula=config['auto_scale_formula'], auto_scale_evaluation_interval=datetime.timedelta(minutes=5), start_task=batch.models.StartTask( command_line=wrap_commands_in_shell('linux', start_vm_commands), user_identity=batchmodels.UserIdentity(auto_user=user), resource_files=resource_files, wait_for_success=True), max_tasks_per_node=config['max_tasks_per_node'], ) try: batch_service_client.pool.add(new_pool) except batchmodels.batch_error.BatchErrorException as err: print_batch_exception(err) raise
def test_batch_create_pools(self, **kwargs): client = self.create_sharedkey_client(**kwargs) # Test List Node Agent SKUs response = client.account.list_node_agent_skus() response = list(response) self.assertTrue(len(response) > 1) self.assertEqual(response[-1].id, "batch.node.windows amd64") self.assertEqual(response[-1].os_type.value, "windows") self.assertTrue(len(response[-1].verified_image_references) > 1) # Test Create Iaas Pool users = [ models.UserAccount('test-user-1', 'kt#_gahr!@aGERDXA'), models.UserAccount('test-user-2', 'kt#_gahr!@aGERDXA', models.ElevationLevel.admin) ] test_iaas_pool = models.PoolAddParameter( id=self.get_resource_name('batch_iaas_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='MicrosoftWindowsServer', offer='WindowsServer', sku='2016-Datacenter-smalldisk' ), node_agent_sku_id='batch.node.windows amd64', windows_configuration=models.WindowsConfiguration(True)), task_scheduling_policy=models.TaskSchedulingPolicy(models.ComputeNodeFillType.pack), user_accounts=users ) response = client.pool.add(test_iaas_pool) self.assertIsNone(response) # Test list pool node counnt counts = list(client.account.list_pool_node_counts()) self.assertIsNotNone(counts) self.assertEqual(len(counts), 1) self.assertEqual(counts[0].pool_id, test_iaas_pool.id) self.assertIsNotNone(counts[0].dedicated) self.assertEqual(counts[0].dedicated.total, 0) self.assertEqual(counts[0].low_priority.total, 0) # Test Create Pool with Network Configuration network_config = models.NetworkConfiguration('/subscriptions/00000000-0000-0000-0000-000000000000' '/resourceGroups/test' '/providers/Microsoft.Network' '/virtualNetworks/vnet1' '/subnets/subnet1') test_network_pool = models.PoolAddParameter( id=self.get_resource_name('batch_network_'), vm_size='Standard_A1', network_configuration=network_config, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04') ) self.assertBatchError('InvalidPropertyValue', client.pool.add, test_network_pool, models.PoolAddOptions(timeout=45)) # Test Create Pool with Custom Image test_image_pool = models.PoolAddParameter( id=self.get_resource_name('batch_image_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( virtual_machine_image_id="/subscriptions/00000000-0000-0000-0000-000000000000" "/resourceGroups/test" "/providers/Microsoft.Compute" "/images/FakeImage" ), node_agent_sku_id='batch.node.ubuntu 16.04' ) ) self.assertBatchError('InvalidPropertyValue', client.pool.add, test_image_pool, models.PoolAddOptions(timeout=45)) # Test Create Pool with OSDisk os_disk = models.OSDisk(caching=models.CachingType.read_write) test_osdisk_pool = models.PoolAddParameter( id=self.get_resource_name('batch_osdisk_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04', os_disk=os_disk) ) response = client.pool.add(test_osdisk_pool) self.assertIsNone(response) osdisk_pool = client.pool.get(test_osdisk_pool.id) self.assertEqual(osdisk_pool.virtual_machine_configuration.os_disk.caching, os_disk.caching) # Test Create Pool with Data Disk data_disk = models.DataDisk(lun=1, disk_size_gb=50) test_disk_pool = models.PoolAddParameter( id=self.get_resource_name('batch_disk_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04', data_disks=[data_disk]) ) response = client.pool.add(test_disk_pool) self.assertIsNone(response) disk_pool = client.pool.get(test_disk_pool.id) self.assertEqual(disk_pool.virtual_machine_configuration.data_disks[0].lun, 1) self.assertEqual(disk_pool.virtual_machine_configuration.data_disks[0].disk_size_gb, 50) # Test Create Pool with Application Licenses test_app_pool = models.PoolAddParameter( id=self.get_resource_name('batch_app_'), vm_size='Standard_A1', application_licenses=["maya"], virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04', data_disks=[data_disk]) ) response = client.pool.add(test_app_pool) self.assertIsNone(response) app_pool = client.pool.get(test_app_pool.id) self.assertEqual(app_pool.application_licenses[0], "maya") # Test List Pools without Filters pools = list(client.pool.list()) self.assertTrue(len(pools) > 1) # Test List Pools with Maximum options = models.PoolListOptions(max_results=1) pools = client.pool.list(options) pools.next() self.assertEqual(len(pools.current_page), 1) # Test List Pools with Filter options = models.PoolListOptions( filter='startswith(id,\'batch_app_\')', select='id,state', expand='stats') pools = list(client.pool.list(options)) self.assertEqual(len(pools), 1)
def configure_pool( self, pool_id: str, vm_size: Optional[str] = None, vm_publisher: Optional[str] = None, vm_offer: Optional[str] = None, sku_starts_with: Optional[str] = None, vm_sku: Optional[str] = None, vm_version: Optional[str] = None, vm_node_agent_sku_id: Optional[str] = None, os_family: Optional[str] = None, os_version: Optional[str] = None, display_name: Optional[str] = None, target_dedicated_nodes: Optional[int] = None, use_latest_image_and_sku: bool = False, **kwargs, ) -> PoolAddParameter: """ Configures a pool :param pool_id: A string that uniquely identifies the Pool within the Account :param vm_size: The size of virtual machines in the Pool. :param display_name: The display name for the Pool :param target_dedicated_nodes: The desired number of dedicated Compute Nodes in the Pool. :param use_latest_image_and_sku: Whether to use the latest verified vm image and sku :param vm_publisher: The publisher of the Azure Virtual Machines Marketplace Image. For example, Canonical or MicrosoftWindowsServer. :param vm_offer: The offer type of the Azure Virtual Machines Marketplace Image. For example, UbuntuServer or WindowsServer. :param sku_starts_with: The start name of the sku to search :param vm_sku: The name of the virtual machine sku to use :param vm_version: The version of the virtual machine :param vm_version: str :param vm_node_agent_sku_id: The node agent sku id of the virtual machine :param os_family: The Azure Guest OS family to be installed on the virtual machines in the Pool. :param os_version: The OS family version """ if use_latest_image_and_sku: self.log.info( 'Using latest verified virtual machine image with node agent sku' ) sku_to_use, image_ref_to_use = self._get_latest_verified_image_vm_and_sku( publisher=vm_publisher, offer=vm_offer, sku_starts_with=sku_starts_with) pool = batch_models.PoolAddParameter( id=pool_id, vm_size=vm_size, display_name=display_name, virtual_machine_configuration=batch_models. VirtualMachineConfiguration(image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use), target_dedicated_nodes=target_dedicated_nodes, **kwargs, ) elif os_family: self.log.info( 'Using cloud service configuration to create pool, virtual machine configuration ignored' ) pool = batch_models.PoolAddParameter( id=pool_id, vm_size=vm_size, display_name=display_name, cloud_service_configuration=batch_models. CloudServiceConfiguration(os_family=os_family, os_version=os_version), target_dedicated_nodes=target_dedicated_nodes, **kwargs, ) else: self.log.info( 'Using virtual machine configuration to create a pool') pool = batch_models.PoolAddParameter( id=pool_id, vm_size=vm_size, display_name=display_name, virtual_machine_configuration=batch_models. VirtualMachineConfiguration( image_reference=batch_models.ImageReference( publisher=vm_publisher, offer=vm_offer, sku=vm_sku, version=vm_version, ), node_agent_sku_id=vm_node_agent_sku_id, ), target_dedicated_nodes=target_dedicated_nodes, **kwargs, ) return pool
import configparser import logging.config import datetime import time import io import azure.batch.batch_service_client as batch import azure.batch.models as batchmodels from scarface_utils.azure_utils.azure_authentication import AzureAuthentication UBUNTU_IMAGE_CONFIG = batchmodels.VirtualMachineConfiguration( image_reference=batchmodels.ImageReference(publisher="Canonical", offer="UbuntuServer", sku="18.04-LTS", version="latest"), node_agent_sku_id="batch.node.ubuntu 18.04") class AzureBatch(object): """ This class exposes utility methods to interact with AzureBatch Service """ def __init__(self, batch_client=None, config_file=None, logger=None): self.logger = logger or logging.getLogger(__name__) self.config_file = config_file self.batch_client = batch_client @classmethod def from_config_file(cls, config_file): # type: (str) -> AzureBatch
def check_or_create_pool(self, id=None): if id is None: id = self.config.get('POOL', 'id') self.pool_id = id if self.client.pool.exists(id): found_job = False # Update the Job ID here for job in self.client.job.list(): if job.pool_info.pool_id == self.pool_id: self.job_id = job.id found_job = True break if not found_job: self.start_mc_server_job_pool( ) # Restart Jobs for this pool - this is necessary! return self.client.pool.get(id) api_port = self.config.get('POOL', 'api_port') min_count = self.config.get('POOL', 'mincount') image_reference = batchmodels.ImageReference( virtual_machine_image_id= "/subscriptions/889566d5-6e5d-4d31-a82d-b60603b3e50b/resourceGroups/polycraft-game/providers/Microsoft.Compute/galleries/polycraftImgGallery/images/polycraftBestGameServerV1/versions/1.0.0" ) vmc = batchmodels.VirtualMachineConfiguration( image_reference=image_reference, node_agent_sku_id="batch.node.ubuntu 18.04") users = [ batchmodels.UserAccount( name='azureuser', password='******', elevation_level=batchmodels.ElevationLevel.admin), # batchmodels.UserAccount( # name='pool-nonadmin', # password='******', # elevation_level=batchmodels.ElevationLevel.non_admin) ] # Thank you Ask Ubuntu https://askubuntu.com/a/373478 wait_for_locks = 'while sudo fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock /var/cache/apt/archives/lock /var/lib/dpkg/lock-frontend >/dev/null 2>&1; do echo "Waiting for release of apt locks"; sleep 2; done; ' # NOTE: Always use DOUBLE QUOTES within commands as azure prepends the entire string with a single quote. start_task = batchmodels.StartTask( command_line=helpers.wrap_commands_in_shell( 'linux', [ 'whoami', 'printenv', 'usermod -aG sudo azureuser', 'sudo systemctl disable --now apt-daily.timer', 'sudo systemctl disable --now apt-daily-upgrade.timer', 'sudo systemctl daemon-reload', 'cd /home/polycraft', 'chmod -R 777 *', 'rm /home/polycraft/oxygen/mods/*.jar', 'cd /home/polycraft/oxygen/', 'echo "[DEBUG] removing helium..."', 'ls -l', f'sudo rm -rf /home/polycraft/oxygen/{self.config.get("SERVER","worldName")}', 'sudo rm -f *.zip', 'echo "[DEBUG] removed helium?"', 'ls -l', # Stop the crontabs from running 'sudo rm /var/spool/cron/crontabs/*', # Taken from: https://stackoverflow.com/questions/45269225/ansible-playbook-fails-to-lock-apt/51919678#51919678 'sudo systemd-run --property="After=apt-daily.service apt-daily-upgrade.service" --wait /bin/true', 'sudo apt-get -y purge unattended-upgrades', 'sudo apt-get -y update', wait_for_locks + 'sudo apt-get install software-properties-common -y', # 'while fuser /var/lib/dpkg/lock >/dev/null 2>&1; do sleep 1; done; sudo apt-add-repository universe', wait_for_locks + 'sudo apt-add-repository universe', # Mount the Polycraft Game FileShare wait_for_locks + 'sudo apt-get install cifs-utils -y && sudo mkdir -p /mnt/PolycraftGame/', f'mount -t cifs //polycraftbestbatch.file.core.windows.net/best-batch-round-1-test /mnt/PolycraftGame -o vers=3.0,username={self.credentials.get("Storage", "storageaccountname")},password={self.credentials.get("Storage", "storageaccountkey")},dir_mode=0777,file_mode=0777,serverino && ls /mnt/PolycraftGame', # Copy the default world file to the right folder f'cp /mnt/PolycraftGame/{self.config.get("SERVER","fileShareFolder")}/worlds/{self.config.get("SERVER","worldZipName")}.tar.gz /home/polycraft/oxygen/', 'cd /home/polycraft/oxygen/', # 'sudo rm -r helium', f'gzip -d /home/polycraft/oxygen/{self.config.get("SERVER","worldZipName")}.tar.gz', 'echo "[DEBUG] extracting the tar"', 'ls -l', f'sudo tar -xf {self.config.get("SERVER","worldZipName")}.tar', 'echo "[DEBUG] extracted the tar"', 'ls -l', # 'sudo mv helium-backup-0924 helium', f'sudo mv helium {self.config.get("SERVER","worldName")}', # TODO Remove this once we finalize the server name? f'chmod -R 777 {self.config.get("SERVER","worldName")}/', # NOTE: The folder inside here is called helium! 'echo "[DEBUG] Adjusted permissions for helium?"', 'ls -l', ]), wait_for_success=True, # user_accounts=users, user_identity=batchmodels.UserIdentity( # user_name='azureuser', auto_user=batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin) # ), ), ) net_config = batchmodels.NetworkConfiguration( # subnet_id="/subscriptions/889566d5-6e5d-4d31-a82d-b60603b3e50b/resourceGroups/vnet-eastus-azurebatch/providers/Microsoft.Network/virtualNetworks/vnet-eastus-azurebatch/subnets/main-batch-subnet", endpoint_configuration=batchmodels. PoolEndpointConfiguration(inbound_nat_pools=[ batchmodels.InboundNATPool( name='minecraftServer', protocol='tcp', backend_port=25565, frontend_port_range_start=44000, frontend_port_range_end=44099, network_security_group_rules=[ batchmodels.NetworkSecurityGroupRule( priority=199, access='allow', source_address_prefix='*'), ]), batchmodels.InboundNATPool( name='api_port', protocol='tcp', backend_port=int(api_port) if api_port and api_port.isdecimal() else 9007, frontend_port_range_start=44500, frontend_port_range_end=44599, network_security_group_rules=[ # batchmodels.NetworkSecurityGroupRule( # priority=170, # access='allow', # source_address_prefix='192.168.1.0/24' # TODO: is this the right subnet? # ), batchmodels.NetworkSecurityGroupRule( priority=198, access='allow', # 'deny' source_address_prefix= '*' # TODO: only allow access to the right ports ) ]), ])) pool = batchmodels.PoolAddParameter( id=id, vm_size=self.config.get('POOL', 'vm_size'), target_dedicated_nodes=int(min_count) if min_count and min_count.isdecimal() else 1, virtual_machine_configuration=vmc, start_task=start_task, user_accounts=users, network_configuration=net_config) helpers.create_pool_if_not_exist(self.client, pool) self.start_mc_server_job_pool(pool.target_dedicated_nodes)
def createUbuntu1604Pool(batch_client, pool_id, startup_shell_cmds, resource_files, node_dedicated_count=0, node_low_prio_count=1, node_VM_size='STANDARD_A1', vm_image_id = None): if startup_shell_cmds == str: if startup_shell_cmds == "": startup_shell_cmds = "echo" startup_shell_cmds = [startup_shell_cmds] logging.info("creating pool") user = batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin) full_command = '/bin/bash -c \'set -e; set -o pipefail; {}; wait\''.format( ';'.join(startup_shell_cmds) ) start_task = batch.models.StartTask( command_line=full_command, user_identity=batchmodels.UserIdentity(auto_user=user), wait_for_success=True, resource_files=resource_files) # vm image config if vm_image_id == None: for i in batch_client.account.list_node_agent_skus(): if i.id == 'batch.node.ubuntu 16.04' : sku_to_use = i.id image_ref_to_use= i.verified_image_references vm_configuration = batchmodels.VirtualMachineConfiguration( image_reference=image_ref_to_use[0], node_agent_sku_id=sku_to_use) else: print("using custom image: %s" % vm_image_id) custom_image = batchmodels.ImageReference( offer='UbuntuServer', publisher='Canonical', sku='16.04-LTS', version='latest', virtual_machine_image_id=vm_image_id, ) print(custom_image) vm_configuration = batchmodels.VirtualMachineConfiguration( node_agent_sku_id = 'batch.node.ubuntu 16.04', image_reference = custom_image) # gather pool params new_pool = batch.models.PoolAddParameter( id=pool_id, virtual_machine_configuration=vm_configuration, vm_size=node_VM_size, enable_auto_scale = False, target_dedicated_nodes= node_dedicated_count, target_low_priority_nodes = node_low_prio_count, start_task=start_task ) batch_client.pool.add(new_pool) logging.info("pool creation finished")
def create_pool_with_containers(batch_service_client, pool_id, resource_files, publisher, offer, sku): """ Creates a pool of compute nodes with the specified OS settings. :param batch_service_client: A Batch service client. :type batch_service_client: `azure.batch.BatchServiceClient` :param str pool_id: An ID for the new pool. :param list resource_files: A collection of resource files for the pool's start task. :param str publisher: Marketplace image publisher :param str offer: Marketplace image offer :param str sku: Marketplace image sku """ print('Creating pool [{}]...'.format(pool_id)) # Create a new pool of Linux compute nodes using an Azure Virtual Machines # Marketplace image. For more information about creating pools of Linux # nodes, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ # Specify the commands for the pool's start task. The start task is run # on each node as it joins the pool, and when it's rebooted or re-imaged. # We use the start task to prep the node for running our task script. task_commands = [ # Copy the python_tutorial_task.py script to the "shared" directory # that all tasks that run on the node have access to. Note that # we are using the -p flag with cp to preserve the file uid/gid, # otherwise since this start task is run as an admin, it would not # be accessible by tasks run as a non-admin user. #'wget https://packages.microsoft.com/config/ubuntu/16.04/packages-microsoft-prod.deb', #'sudo dpkg -i packages-microsoft-prod.deb', #'wget -O azcopy.tar.gz https://aka.ms/downloadazcopylinux64', #'tar -xf azcopy.tar.gz', #'sudo ./install.sh', #'wget https://repo.anaconda.com/archive/Anaconda3-5.1.0-Linux-x86_64.sh -O ~/conda.sh', #'bash ~/conda.sh -b -p $AZ_BATCH_NODE_SHARED_DIR/conda', #'export PATH="$AZ_BATCH_NODE_SHARED_DIR/conda/bin:$PATH"', #'sudo apt-get -y update', #'sudo apt-get -y install azcopy', 'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'.format(_TUTORIAL_TASK_FILE), #'cp -p {} $AZ_BATCH_NODE_SHARED_DIR'.format(_ENV_YML_FILE), 'azcopy --source https://{0}.blob.core.windows.net/model/ghanamines.h5 --destination $AZ_BATCH_NODE_SHARED_DIR/ghanamines.h5 --source-key {1}' .format(_STORAGE_ACCOUNT_NAME, _STORAGE_ACCOUNT_KEY), #'sudo $AZ_BATCH_NODE_SHARED_DIR/conda/bin/conda env create -f {}'.format(_ENV_YML_FILE) ] # Get the node agent SKU and image reference for the virtual machine # configuration. # For more information about the virtual machine configuration, see: # https://azure.microsoft.com/documentation/articles/batch-linux-nodes/ sku_to_use, image_ref_to_use = \ common_helpers.select_latest_verified_vm_image_with_node_agent_sku( batch_service_client, publisher, offer, sku) user = batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin) container_reg = batchmodels.ContainerRegistry(user_name=CLIENT_ID, password=SECRET, registry_server=_ACR_URL) container_cfg = batchmodels.ContainerConfiguration( container_image_names=[_ACR_IMG_NAME], container_registries=[container_reg]) my_img_ref = batchmodels.ImageReference( virtual_machine_image_id=_CUSTOM_VM_IMG_ID) vm_cfg = batchmodels.VirtualMachineConfiguration( image_reference=my_img_ref, node_agent_sku_id= sku_to_use, #'batch.node.ubuntu 16.04', ##verificare che l'immagine ghanaimg abbia gpu container_configuration=container_cfg) task_containersettings = batchmodels.TaskContainerSettings( image_name=_ACR_IMG_NAME) new_pool = batchmodels.PoolAddParameter( id=pool_id, virtual_machine_configuration=vm_cfg, vm_size=_POOL_VM_SIZE, target_dedicated_nodes=_POOL_NODE_COUNT, target_low_priority_nodes=1, start_task=batch.models.StartTask( command_line=common_helpers.wrap_commands_in_shell( 'linux', task_commands), user_identity=batchmodels.UserIdentity(auto_user=user), wait_for_success=True, resource_files=resource_files, container_settings=task_containersettings)) try: batch_service_client.pool.add(new_pool) except batchmodels.batch_error.BatchErrorException as err: print_batch_exception(err) raise