def __create_pool_and_job(self, cluster_conf: models.ClusterConfiguration, software_metadata_key: str, start_task, VmImageModel): """ Create a pool and job :param cluster_conf: the configuration object used to create the cluster :type cluster_conf: aztk.models.ClusterConfiguration :parm software_metadata_key: the id of the software being used on the cluster :param start_task: the start task for the cluster :param VmImageModel: the type of image to provision for the cluster :param wait: wait until the cluster is ready """ self._get_cluster_data(cluster_conf.cluster_id).save_cluster_config(cluster_conf) # reuse pool_id as job_id pool_id = cluster_conf.cluster_id job_id = cluster_conf.cluster_id # Get a verified node agent sku sku_to_use, image_ref_to_use = \ helpers.select_latest_verified_vm_image_with_node_agent_sku( VmImageModel.publisher, VmImageModel.offer, VmImageModel.sku, self.batch_client) network_conf = None if cluster_conf.subnet_id is not None: network_conf = batch_models.NetworkConfiguration( subnet_id=cluster_conf.subnet_id) auto_scale_formula = "$TargetDedicatedNodes={0}; $TargetLowPriorityNodes={1}".format( cluster_conf.vm_count, cluster_conf.vm_low_pri_count) # Confiure the pool pool = batch_models.PoolAddParameter( id=pool_id, virtual_machine_configuration=batch_models.VirtualMachineConfiguration( image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use), vm_size=cluster_conf.vm_size, enable_auto_scale=True, auto_scale_formula=auto_scale_formula, auto_scale_evaluation_interval=timedelta(minutes=5), start_task=start_task, enable_inter_node_communication=True if not cluster_conf.subnet_id else False, max_tasks_per_node=4, network_configuration=network_conf, metadata=[ batch_models.MetadataItem( name=constants.AZTK_SOFTWARE_METADATA_KEY, value=software_metadata_key), batch_models.MetadataItem( name=constants.AZTK_MODE_METADATA_KEY, value=constants.AZTK_CLUSTER_MODE_METADATA) ]) # Create the pool + create user for the pool helpers.create_pool_if_not_exist(pool, self.batch_client) # Create job job = batch_models.JobAddParameter( id=job_id, pool_info=batch_models.PoolInformation(pool_id=pool_id)) # Add job to batch self.batch_client.job.add(job) return helpers.get_cluster(cluster_conf.cluster_id, self.batch_client)
def test_batch_network_configuration(self, **kwargs): client = self.create_aad_client(**kwargs) # Test Create Pool with Network Config network_config = models.NetworkConfiguration( endpoint_configuration=models.PoolEndpointConfiguration( inbound_nat_pools=[ models.InboundNATPool( name="TestEndpointConfig", protocol=models.InboundEndpointProtocol.udp, backend_port=64444, frontend_port_range_start=60000, frontend_port_range_end=61000, network_security_group_rules=[ models.NetworkSecurityGroupRule( priority=150, access=models.NetworkSecurityGroupRuleAccess.allow, source_address_prefix='*' ) ] ) ] ) ) virtual_machine_config = models.VirtualMachineConfiguration( node_agent_sku_id="batch.node.ubuntu 16.04", image_reference=models.ImageReference( publisher="Canonical", offer="UbuntuServer", sku="16.04-LTS") ) pool = models.PoolAddParameter( id=self.get_resource_name('batch_network_'), target_dedicated_nodes=1, vm_size='Standard_A1', virtual_machine_configuration=virtual_machine_config, network_configuration=network_config ) client.pool.add(pool) network_pool = client.pool.get(pool.id) while self.is_live and network_pool.allocation_state != models.AllocationState.steady: time.sleep(10) network_pool = client.pool.get(pool.id) # Test Compute Node Config nodes = list(client.compute_node.list(pool.id)) self.assertEqual(len(nodes), 1) self.assertIsInstance(nodes[0], models.ComputeNode) self.assertEqual(len(nodes[0].endpoint_configuration.inbound_endpoints), 2) self.assertEqual(nodes[0].endpoint_configuration.inbound_endpoints[0].name, 'TestEndpointConfig.0') self.assertEqual(nodes[0].endpoint_configuration.inbound_endpoints[0].protocol.value, 'udp')
def test_batch_create_pools(self, **kwargs): client = self.create_sharedkey_client(**kwargs) # Test List Node Agent SKUs response = client.account.list_node_agent_skus() response = list(response) self.assertTrue(len(response) > 1) self.assertEqual(response[-1].id, "batch.node.windows amd64") self.assertEqual(response[-1].os_type.value, "windows") self.assertTrue(len(response[-1].verified_image_references) > 1) # Test Create Iaas Pool users = [ models.UserAccount('test-user-1', 'kt#_gahr!@aGERDXA'), models.UserAccount('test-user-2', 'kt#_gahr!@aGERDXA', models.ElevationLevel.admin) ] test_iaas_pool = models.PoolAddParameter( id=self.get_resource_name('batch_iaas_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='MicrosoftWindowsServer', offer='WindowsServer', sku='2016-Datacenter-smalldisk' ), node_agent_sku_id='batch.node.windows amd64', windows_configuration=models.WindowsConfiguration(True)), task_scheduling_policy=models.TaskSchedulingPolicy(models.ComputeNodeFillType.pack), user_accounts=users ) response = client.pool.add(test_iaas_pool) self.assertIsNone(response) # Test list pool node counnt counts = list(client.account.list_pool_node_counts()) self.assertIsNotNone(counts) self.assertEqual(len(counts), 1) self.assertEqual(counts[0].pool_id, test_iaas_pool.id) self.assertIsNotNone(counts[0].dedicated) self.assertEqual(counts[0].dedicated.total, 0) self.assertEqual(counts[0].low_priority.total, 0) # Test Create Pool with Network Configuration network_config = models.NetworkConfiguration('/subscriptions/00000000-0000-0000-0000-000000000000' '/resourceGroups/test' '/providers/Microsoft.Network' '/virtualNetworks/vnet1' '/subnets/subnet1') test_network_pool = models.PoolAddParameter( id=self.get_resource_name('batch_network_'), vm_size='Standard_A1', network_configuration=network_config, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04') ) self.assertBatchError('InvalidPropertyValue', client.pool.add, test_network_pool, models.PoolAddOptions(timeout=45)) # Test Create Pool with Custom Image test_image_pool = models.PoolAddParameter( id=self.get_resource_name('batch_image_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( virtual_machine_image_id="/subscriptions/00000000-0000-0000-0000-000000000000" "/resourceGroups/test" "/providers/Microsoft.Compute" "/images/FakeImage" ), node_agent_sku_id='batch.node.ubuntu 16.04' ) ) self.assertBatchError('InvalidPropertyValue', client.pool.add, test_image_pool, models.PoolAddOptions(timeout=45)) # Test Create Pool with OSDisk os_disk = models.OSDisk(caching=models.CachingType.read_write) test_osdisk_pool = models.PoolAddParameter( id=self.get_resource_name('batch_osdisk_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04', os_disk=os_disk) ) response = client.pool.add(test_osdisk_pool) self.assertIsNone(response) osdisk_pool = client.pool.get(test_osdisk_pool.id) self.assertEqual(osdisk_pool.virtual_machine_configuration.os_disk.caching, os_disk.caching) # Test Create Pool with Data Disk data_disk = models.DataDisk(lun=1, disk_size_gb=50) test_disk_pool = models.PoolAddParameter( id=self.get_resource_name('batch_disk_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04', data_disks=[data_disk]) ) response = client.pool.add(test_disk_pool) self.assertIsNone(response) disk_pool = client.pool.get(test_disk_pool.id) self.assertEqual(disk_pool.virtual_machine_configuration.data_disks[0].lun, 1) self.assertEqual(disk_pool.virtual_machine_configuration.data_disks[0].disk_size_gb, 50) # Test Create Pool with Application Licenses test_app_pool = models.PoolAddParameter( id=self.get_resource_name('batch_app_'), vm_size='Standard_A1', application_licenses=["maya"], virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04', data_disks=[data_disk]) ) response = client.pool.add(test_app_pool) self.assertIsNone(response) app_pool = client.pool.get(test_app_pool.id) self.assertEqual(app_pool.application_licenses[0], "maya") # Test List Pools without Filters pools = list(client.pool.list()) self.assertTrue(len(pools) > 1) # Test List Pools with Maximum options = models.PoolListOptions(max_results=1) pools = client.pool.list(options) pools.next() self.assertEqual(len(pools.current_page), 1) # Test List Pools with Filter options = models.PoolListOptions( filter='startswith(id,\'batch_app_\')', select='id,state', expand='stats') pools = list(client.pool.list(options)) self.assertEqual(len(pools), 1)
def check_or_create_pool(self, id=None): if id is None: id = self.config.get('POOL', 'id') self.pool_id = id if self.client.pool.exists(id): found_job = False # Update the Job ID here for job in self.client.job.list(): if job.pool_info.pool_id == self.pool_id: self.job_id = job.id found_job = True break if not found_job: self.start_mc_server_job_pool( ) # Restart Jobs for this pool - this is necessary! return self.client.pool.get(id) api_port = self.config.get('POOL', 'api_port') min_count = self.config.get('POOL', 'mincount') image_reference = batchmodels.ImageReference( virtual_machine_image_id= "/subscriptions/889566d5-6e5d-4d31-a82d-b60603b3e50b/resourceGroups/polycraft-game/providers/Microsoft.Compute/galleries/polycraftImgGallery/images/polycraftBestGameServerV1/versions/1.0.0" ) vmc = batchmodels.VirtualMachineConfiguration( image_reference=image_reference, node_agent_sku_id="batch.node.ubuntu 18.04") users = [ batchmodels.UserAccount( name='azureuser', password='******', elevation_level=batchmodels.ElevationLevel.admin), # batchmodels.UserAccount( # name='pool-nonadmin', # password='******', # elevation_level=batchmodels.ElevationLevel.non_admin) ] # Thank you Ask Ubuntu https://askubuntu.com/a/373478 wait_for_locks = 'while sudo fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock /var/cache/apt/archives/lock /var/lib/dpkg/lock-frontend >/dev/null 2>&1; do echo "Waiting for release of apt locks"; sleep 2; done; ' # NOTE: Always use DOUBLE QUOTES within commands as azure prepends the entire string with a single quote. start_task = batchmodels.StartTask( command_line=helpers.wrap_commands_in_shell( 'linux', [ 'whoami', 'printenv', 'usermod -aG sudo azureuser', 'sudo systemctl disable --now apt-daily.timer', 'sudo systemctl disable --now apt-daily-upgrade.timer', 'sudo systemctl daemon-reload', 'cd /home/polycraft', 'chmod -R 777 *', 'rm /home/polycraft/oxygen/mods/*.jar', 'cd /home/polycraft/oxygen/', 'echo "[DEBUG] removing helium..."', 'ls -l', f'sudo rm -rf /home/polycraft/oxygen/{self.config.get("SERVER","worldName")}', 'sudo rm -f *.zip', 'echo "[DEBUG] removed helium?"', 'ls -l', # Stop the crontabs from running 'sudo rm /var/spool/cron/crontabs/*', # Taken from: https://stackoverflow.com/questions/45269225/ansible-playbook-fails-to-lock-apt/51919678#51919678 'sudo systemd-run --property="After=apt-daily.service apt-daily-upgrade.service" --wait /bin/true', 'sudo apt-get -y purge unattended-upgrades', 'sudo apt-get -y update', wait_for_locks + 'sudo apt-get install software-properties-common -y', # 'while fuser /var/lib/dpkg/lock >/dev/null 2>&1; do sleep 1; done; sudo apt-add-repository universe', wait_for_locks + 'sudo apt-add-repository universe', # Mount the Polycraft Game FileShare wait_for_locks + 'sudo apt-get install cifs-utils -y && sudo mkdir -p /mnt/PolycraftGame/', f'mount -t cifs //polycraftbestbatch.file.core.windows.net/best-batch-round-1-test /mnt/PolycraftGame -o vers=3.0,username={self.credentials.get("Storage", "storageaccountname")},password={self.credentials.get("Storage", "storageaccountkey")},dir_mode=0777,file_mode=0777,serverino && ls /mnt/PolycraftGame', # Copy the default world file to the right folder f'cp /mnt/PolycraftGame/{self.config.get("SERVER","fileShareFolder")}/worlds/{self.config.get("SERVER","worldZipName")}.tar.gz /home/polycraft/oxygen/', 'cd /home/polycraft/oxygen/', # 'sudo rm -r helium', f'gzip -d /home/polycraft/oxygen/{self.config.get("SERVER","worldZipName")}.tar.gz', 'echo "[DEBUG] extracting the tar"', 'ls -l', f'sudo tar -xf {self.config.get("SERVER","worldZipName")}.tar', 'echo "[DEBUG] extracted the tar"', 'ls -l', # 'sudo mv helium-backup-0924 helium', f'sudo mv helium {self.config.get("SERVER","worldName")}', # TODO Remove this once we finalize the server name? f'chmod -R 777 {self.config.get("SERVER","worldName")}/', # NOTE: The folder inside here is called helium! 'echo "[DEBUG] Adjusted permissions for helium?"', 'ls -l', ]), wait_for_success=True, # user_accounts=users, user_identity=batchmodels.UserIdentity( # user_name='azureuser', auto_user=batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin) # ), ), ) net_config = batchmodels.NetworkConfiguration( # subnet_id="/subscriptions/889566d5-6e5d-4d31-a82d-b60603b3e50b/resourceGroups/vnet-eastus-azurebatch/providers/Microsoft.Network/virtualNetworks/vnet-eastus-azurebatch/subnets/main-batch-subnet", endpoint_configuration=batchmodels. PoolEndpointConfiguration(inbound_nat_pools=[ batchmodels.InboundNATPool( name='minecraftServer', protocol='tcp', backend_port=25565, frontend_port_range_start=44000, frontend_port_range_end=44099, network_security_group_rules=[ batchmodels.NetworkSecurityGroupRule( priority=199, access='allow', source_address_prefix='*'), ]), batchmodels.InboundNATPool( name='api_port', protocol='tcp', backend_port=int(api_port) if api_port and api_port.isdecimal() else 9007, frontend_port_range_start=44500, frontend_port_range_end=44599, network_security_group_rules=[ # batchmodels.NetworkSecurityGroupRule( # priority=170, # access='allow', # source_address_prefix='192.168.1.0/24' # TODO: is this the right subnet? # ), batchmodels.NetworkSecurityGroupRule( priority=198, access='allow', # 'deny' source_address_prefix= '*' # TODO: only allow access to the right ports ) ]), ])) pool = batchmodels.PoolAddParameter( id=id, vm_size=self.config.get('POOL', 'vm_size'), target_dedicated_nodes=int(min_count) if min_count and min_count.isdecimal() else 1, virtual_machine_configuration=vmc, start_task=start_task, user_accounts=users, network_configuration=net_config) helpers.create_pool_if_not_exist(self.client, pool) self.start_mc_server_job_pool(pool.target_dedicated_nodes)
def __submit_job(self, job_configuration, start_task, job_manager_task, autoscale_formula, software_metadata_key: str, vm_image_model, application_metadata): """ Job Submission :param job_configuration -> aztk_sdk.spark.models.JobConfiguration :param start_task -> batch_models.StartTask :param job_manager_task -> batch_models.TaskAddParameter :param autoscale forumula -> str :param software_metadata_key -> str :param vm_image_model -> aztk_sdk.models.VmImage :returns None """ self._get_cluster_data(job_configuration.id).save_cluster_config(job_configuration.to_cluster_config()) # get a verified node agent sku sku_to_use, image_ref_to_use = \ helpers.select_latest_verified_vm_image_with_node_agent_sku( vm_image_model.publisher, vm_image_model.offer, vm_image_model.sku, self.batch_client) # set up subnet if necessary network_conf = None if job_configuration.subnet_id: network_conf = batch_models.NetworkConfiguration( subnet_id=job_configuration.subnet_id) # set up a schedule for a recurring job auto_pool_specification = batch_models.AutoPoolSpecification( pool_lifetime_option=batch_models.PoolLifetimeOption.job_schedule, auto_pool_id_prefix=job_configuration.id, keep_alive=False, pool=batch_models.PoolSpecification( display_name=job_configuration.id, virtual_machine_configuration=batch_models.VirtualMachineConfiguration( image_reference=image_ref_to_use, node_agent_sku_id=sku_to_use), vm_size=job_configuration.vm_size, enable_auto_scale=True, auto_scale_formula=autoscale_formula, auto_scale_evaluation_interval=timedelta(minutes=5), start_task=start_task, enable_inter_node_communication=not job_configuration.mixed_mode(), network_configuration=network_conf, max_tasks_per_node=4, metadata=[ batch_models.MetadataItem( name=constants.AZTK_SOFTWARE_METADATA_KEY, value=software_metadata_key), batch_models.MetadataItem( name=constants.AZTK_MODE_METADATA_KEY, value=constants.AZTK_JOB_MODE_METADATA) ] ) ) # define job specification job_spec = batch_models.JobSpecification( pool_info=batch_models.PoolInformation(auto_pool_specification=auto_pool_specification), display_name=job_configuration.id, on_all_tasks_complete=batch_models.OnAllTasksComplete.terminate_job, job_manager_task=job_manager_task, metadata=[ batch_models.MetadataItem( name='applications', value=application_metadata) ] ) # define schedule schedule = batch_models.Schedule( do_not_run_until=None, do_not_run_after=None, start_window=None, recurrence_interval=None ) # create job schedule and add task setup = batch_models.JobScheduleAddParameter( id=job_configuration.id, schedule=schedule, job_specification=job_spec) self.batch_client.job_schedule.add(setup) return self.batch_client.job_schedule.get(job_schedule_id=job_configuration.id)
def create_pool(self, pool_id, vm_size, target_dedicated, target_low_priority, batch_image_spec, starttask_cmd, starttask_url, starttask_script, sp_cert_thumb, app_licenses=None, disable_remote_access=True, app_pkgs=None, subnet_id=None, app_insights_app_key=None, app_insights_instrumentation_key=None): pool = batchmodels.PoolAddParameter( id=pool_id, display_name=pool_id, vm_size=vm_size, target_dedicated_nodes=target_dedicated, target_low_priority_nodes=target_low_priority, virtual_machine_configuration=batch_image_spec. get_virtual_machine_configuration(), application_package_references=app_pkgs, certificate_references=[ batchmodels.CertificateReference(sp_cert_thumb, 'sha1') ]) if app_licenses: pool.application_licenses = app_licenses pool.start_task = batchmodels.StartTask( command_line=starttask_cmd, max_task_retry_count=3, user_identity=batchmodels.UserIdentity( auto_user=batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin)), wait_for_success=True, resource_files=[ batchmodels.ResourceFile(starttask_url, starttask_script) ]) if app_insights_app_key and app_insights_instrumentation_key: pool.start_task.environment_settings = [ batchmodels.EnvironmentSetting('APP_INSIGHTS_APP_ID', app_insights_app_key), batchmodels.EnvironmentSetting( 'APP_INSIGHTS_INSTRUMENTATION_KEY', app_insights_instrumentation_key) ] if subnet_id: pool.network_configuration = batchmodels.NetworkConfiguration( subnet_id=subnet_id) if disable_remote_access: if pool.network_configuration is None: pool.network_configuration = batchmodels.NetworkConfiguration() endpoint_config = batchmodels.PoolEndpointConfiguration( inbound_nat_pools=[ batchmodels.InboundNATPool( 'DisableRDP', batchmodels.InboundEndpointProtocol.tcp, 3389, 60000, 60099, network_security_group_rules=[ batchmodels.NetworkSecurityGroupRule( 150, batchmodels. NetworkSecurityGroupRuleAccess.deny, '*') ]), batchmodels.InboundNATPool( 'DisableSSH', batchmodels.InboundEndpointProtocol.tcp, 22, 61000, 61099, network_security_group_rules=[ batchmodels.NetworkSecurityGroupRule( 151, batchmodels. NetworkSecurityGroupRuleAccess.deny, '*') ]) ]) pool.network_configuration.endpoint_configuration = endpoint_config try: client = self._get_batch_client() client.pool.add(pool) except batchmodels.BatchErrorException as be: if be.error: print('Error creating pool, code={}, message={}'.format( be.error.code, be.error.message)) if be.error.values: for e in be.error.values: print('Key={}, Value={}'.format(e.key, e.value)) raise
def test_batch_create_pools(self, **kwargs): client = self.create_sharedkey_client(**kwargs) # Test List Node Agent SKUs response = client.account.list_supported_images() response = list(response) self.assertTrue(len(response) > 1) self.assertIsNotNone(response[-1].image_reference) # Test Create Iaas Pool users = [ models.UserAccount(name='test-user-1', password='******'), models.UserAccount(name='test-user-2', password='******', elevation_level=models.ElevationLevel.admin) ] test_iaas_pool = models.PoolAddParameter( id=self.get_resource_name('batch_iaas_'), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='MicrosoftWindowsServer', offer='WindowsServer', sku='2016-Datacenter-smalldisk' ), node_agent_sku_id='batch.node.windows amd64', windows_configuration=models.WindowsConfiguration(enable_automatic_updates=True)), task_scheduling_policy=models.TaskSchedulingPolicy(node_fill_type=models.ComputeNodeFillType.pack), user_accounts=users ) response = client.pool.add(test_iaas_pool) self.assertIsNone(response) # Test list pool node counnt counts = list(client.account.list_pool_node_counts()) self.assertIsNotNone(counts) self.assertEqual(len(counts), 1) self.assertEqual(counts[0].pool_id, test_iaas_pool.id) self.assertIsNotNone(counts[0].dedicated) self.assertEqual(counts[0].dedicated.total, 0) self.assertEqual(counts[0].dedicated.leaving_pool, 0) self.assertEqual(counts[0].low_priority.total, 0) # Test Create Pool with Network Configuration #TODO Public IP tests network_config = models.NetworkConfiguration(subnet_id='/subscriptions/00000000-0000-0000-0000-000000000000' '/resourceGroups/test' '/providers/Microsoft.Network' '/virtualNetworks/vnet1' '/subnets/subnet1') test_network_pool = models.PoolAddParameter( id=self.get_resource_name('batch_network_'), vm_size=DEFAULT_VM_SIZE, network_configuration=network_config, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='18.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 18.04') ) self.assertBatchError('InvalidPropertyValue', client.pool.add, test_network_pool, models.PoolAddOptions(timeout=45)) test_image_pool = models.PoolAddParameter( id=self.get_resource_name('batch_image_'), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( virtual_machine_image_id="/subscriptions/00000000-0000-0000-0000-000000000000" "/resourceGroups/test" "/providers/Microsoft.Compute" "/gallery/FakeGallery" "/images/FakeImage" "/versions/version" ), node_agent_sku_id='batch.node.ubuntu 18.04' ) ) self.assertBatchError('InvalidPropertyValue', client.pool.add, test_image_pool, models.PoolAddOptions(timeout=45)) # Test Create Pool with Data Disk data_disk = models.DataDisk(lun=1, disk_size_gb=50) test_disk_pool = models.PoolAddParameter( id=self.get_resource_name('batch_disk_'), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='18.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 18.04', data_disks=[data_disk]) ) response = client.pool.add(test_disk_pool) self.assertIsNone(response) disk_pool = client.pool.get(test_disk_pool.id) self.assertEqual(disk_pool.virtual_machine_configuration.data_disks[0].lun, 1) self.assertEqual(disk_pool.virtual_machine_configuration.data_disks[0].disk_size_gb, 50) # Test Create Pool with Application Licenses test_app_pool = models.PoolAddParameter( id=self.get_resource_name('batch_app_'), vm_size=DEFAULT_VM_SIZE, application_licenses=["maya"], virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='18.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 18.04', data_disks=[data_disk]) ) response = client.pool.add(test_app_pool) self.assertIsNone(response) app_pool = client.pool.get(test_app_pool.id) self.assertEqual(app_pool.application_licenses[0], "maya") # Test Create Pool with Azure Disk Encryption test_ade_pool = models.PoolAddParameter( id=self.get_resource_name('batch_ade_'), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='18.04-LTS' ), disk_encryption_configuration=models.DiskEncryptionConfiguration( targets=[models.DiskEncryptionTarget.temporary_disk] ), node_agent_sku_id='batch.node.ubuntu 18.04') ) response = client.pool.add(test_ade_pool) self.assertIsNone(response) ade_pool = client.pool.get(test_ade_pool.id) self.assertEqual(ade_pool.virtual_machine_configuration.disk_encryption_configuration.targets, [models.DiskEncryptionTarget.temporary_disk]) # Test List Pools without Filters pools = list(client.pool.list()) self.assertTrue(len(pools) > 1) # Test List Pools with Maximum options = models.PoolListOptions(max_results=1) pools = client.pool.list(options) pools.next() self.assertEqual(len(pools.current_page), 1) # Test List Pools with Filter options = models.PoolListOptions( filter='startswith(id,\'batch_app_\')', select='id,state', expand='stats') pools = list(client.pool.list(options)) self.assertEqual(len(pools), 1)
def get_pool(self, pool_id, vm_size="standard_d15_v2", node_count=0): ''' Creates an Azure Batch pool. :param pool_id: The pool_id of the pool to create :type pool_id: string :param vm_size: the type of compute nodes in the pool. (Defaults to 'standard_d15_v2') :type vm_size: string :param node_count: The number of compute nodes to initially create in the pool. Defaults to 0. :type node_count: number :rtype: string :return: the pool_id of the create pool ''' batch_client = self.batch_client() pool_list = list(batch_client.pool.list()) for pool in pool_list: if pool.id == pool_id: #We have a pool with this pool_id, is it busy? node_list = list(batch_client.compute_node.list(pool.id)) for node in node_list: if node.running_tasks_count > 0: logging.info( "pool '{0}' exists and is busy".format(pool_id)) break return pool.id logging.info( "pool '{0}' does not exist and will be created".format(pool_id)) user_identity = batchmodels.UserIdentity( auto_user=batchmodels.AutoUserSpecification( elevation_level='admin')) start_task = batchmodels.StartTask( command_line="cmd /c %AZ_BATCH_APP_PACKAGE_STARTUP%\startup.bat", user_identity=user_identity, wait_for_success=True) new_pool = batchmodels.PoolAddParameter(id=pool_id, vm_size=vm_size, start_task=start_task) new_pool.target_dedicated = node_count new_pool.max_tasks_per_node = 1 cloud_service_configuration = batchmodels.CloudServiceConfiguration( os_family=4) new_pool.cloud_service_configuration = cloud_service_configuration new_pool.application_package_references = [ batchmodels.ApplicationPackageReference("anaconda2"), batchmodels.ApplicationPackageReference("startup") ] #add the vnet #The ARM resource identifier of the virtual network subnet which the compute nodes of the pool will join. The virtual #network must be in the same region and subscription as the Azure Batch #account. This property can only be specified for pools created with a #cloudServiceConfiguration. # value u'Property subnetId should be of the form /subscriptions/{0}/resourceGroups/{1}/providers/{2}/virtualNetworks/{3}/subnets/{4}' unicode new_pool.network_configuration = batchmodels.NetworkConfiguration( self.vnet + "/subnets/default") try: batch_client.pool.add(new_pool) except Exception, e: print e