def test_batch_create_pools(self, **kwargs): client = self.create_sharedkey_client(**kwargs) # Test List Node Agent SKUs response = client.account.list_node_agent_skus() response = list(response) self.assertTrue(len(response) > 1) self.assertEqual(response[-1].id, "batch.node.windows amd64") self.assertEqual(response[-1].os_type.value, "windows") self.assertTrue(len(response[-1].verified_image_references) > 1) # Test Create Iaas Pool users = [ models.UserAccount('test-user-1', 'kt#_gahr!@aGERDXA'), models.UserAccount('test-user-2', 'kt#_gahr!@aGERDXA', models.ElevationLevel.admin) ] test_iaas_pool = models.PoolAddParameter( id=self.get_resource_name('batch_iaas_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='MicrosoftWindowsServer', offer='WindowsServer', sku='2016-Datacenter-smalldisk' ), node_agent_sku_id='batch.node.windows amd64', windows_configuration=models.WindowsConfiguration(True)), task_scheduling_policy=models.TaskSchedulingPolicy(models.ComputeNodeFillType.pack), user_accounts=users ) response = client.pool.add(test_iaas_pool) self.assertIsNone(response) # Test list pool node counnt counts = list(client.account.list_pool_node_counts()) self.assertIsNotNone(counts) self.assertEqual(len(counts), 1) self.assertEqual(counts[0].pool_id, test_iaas_pool.id) self.assertIsNotNone(counts[0].dedicated) self.assertEqual(counts[0].dedicated.total, 0) self.assertEqual(counts[0].low_priority.total, 0) # Test Create Pool with Network Configuration network_config = models.NetworkConfiguration('/subscriptions/00000000-0000-0000-0000-000000000000' '/resourceGroups/test' '/providers/Microsoft.Network' '/virtualNetworks/vnet1' '/subnets/subnet1') test_network_pool = models.PoolAddParameter( id=self.get_resource_name('batch_network_'), vm_size='Standard_A1', network_configuration=network_config, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04') ) self.assertBatchError('InvalidPropertyValue', client.pool.add, test_network_pool, models.PoolAddOptions(timeout=45)) # Test Create Pool with Custom Image test_image_pool = models.PoolAddParameter( id=self.get_resource_name('batch_image_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( virtual_machine_image_id="/subscriptions/00000000-0000-0000-0000-000000000000" "/resourceGroups/test" "/providers/Microsoft.Compute" "/images/FakeImage" ), node_agent_sku_id='batch.node.ubuntu 16.04' ) ) self.assertBatchError('InvalidPropertyValue', client.pool.add, test_image_pool, models.PoolAddOptions(timeout=45)) # Test Create Pool with OSDisk os_disk = models.OSDisk(caching=models.CachingType.read_write) test_osdisk_pool = models.PoolAddParameter( id=self.get_resource_name('batch_osdisk_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04', os_disk=os_disk) ) response = client.pool.add(test_osdisk_pool) self.assertIsNone(response) osdisk_pool = client.pool.get(test_osdisk_pool.id) self.assertEqual(osdisk_pool.virtual_machine_configuration.os_disk.caching, os_disk.caching) # Test Create Pool with Data Disk data_disk = models.DataDisk(lun=1, disk_size_gb=50) test_disk_pool = models.PoolAddParameter( id=self.get_resource_name('batch_disk_'), vm_size='Standard_A1', virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04', data_disks=[data_disk]) ) response = client.pool.add(test_disk_pool) self.assertIsNone(response) disk_pool = client.pool.get(test_disk_pool.id) self.assertEqual(disk_pool.virtual_machine_configuration.data_disks[0].lun, 1) self.assertEqual(disk_pool.virtual_machine_configuration.data_disks[0].disk_size_gb, 50) # Test Create Pool with Application Licenses test_app_pool = models.PoolAddParameter( id=self.get_resource_name('batch_app_'), vm_size='Standard_A1', application_licenses=["maya"], virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='16.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 16.04', data_disks=[data_disk]) ) response = client.pool.add(test_app_pool) self.assertIsNone(response) app_pool = client.pool.get(test_app_pool.id) self.assertEqual(app_pool.application_licenses[0], "maya") # Test List Pools without Filters pools = list(client.pool.list()) self.assertTrue(len(pools) > 1) # Test List Pools with Maximum options = models.PoolListOptions(max_results=1) pools = client.pool.list(options) pools.next() self.assertEqual(len(pools.current_page), 1) # Test List Pools with Filter options = models.PoolListOptions( filter='startswith(id,\'batch_app_\')', select='id,state', expand='stats') pools = list(client.pool.list(options)) self.assertEqual(len(pools), 1)
def check_or_create_pool(self, id=None): if id is None: id = self.config.get('POOL', 'id') self.pool_id = id if self.client.pool.exists(id): found_job = False # Update the Job ID here for job in self.client.job.list(): if job.pool_info.pool_id == self.pool_id: self.job_id = job.id found_job = True break if not found_job: self.start_mc_server_job_pool( ) # Restart Jobs for this pool - this is necessary! return self.client.pool.get(id) api_port = self.config.get('POOL', 'api_port') min_count = self.config.get('POOL', 'mincount') image_reference = batchmodels.ImageReference( virtual_machine_image_id= "/subscriptions/889566d5-6e5d-4d31-a82d-b60603b3e50b/resourceGroups/polycraft-game/providers/Microsoft.Compute/galleries/polycraftImgGallery/images/polycraftBestGameServerV1/versions/1.0.0" ) vmc = batchmodels.VirtualMachineConfiguration( image_reference=image_reference, node_agent_sku_id="batch.node.ubuntu 18.04") users = [ batchmodels.UserAccount( name='azureuser', password='******', elevation_level=batchmodels.ElevationLevel.admin), # batchmodels.UserAccount( # name='pool-nonadmin', # password='******', # elevation_level=batchmodels.ElevationLevel.non_admin) ] # Thank you Ask Ubuntu https://askubuntu.com/a/373478 wait_for_locks = 'while sudo fuser /var/lib/dpkg/lock /var/lib/apt/lists/lock /var/cache/apt/archives/lock /var/lib/dpkg/lock-frontend >/dev/null 2>&1; do echo "Waiting for release of apt locks"; sleep 2; done; ' # NOTE: Always use DOUBLE QUOTES within commands as azure prepends the entire string with a single quote. start_task = batchmodels.StartTask( command_line=helpers.wrap_commands_in_shell( 'linux', [ 'whoami', 'printenv', 'usermod -aG sudo azureuser', 'sudo systemctl disable --now apt-daily.timer', 'sudo systemctl disable --now apt-daily-upgrade.timer', 'sudo systemctl daemon-reload', 'cd /home/polycraft', 'chmod -R 777 *', 'rm /home/polycraft/oxygen/mods/*.jar', 'cd /home/polycraft/oxygen/', 'echo "[DEBUG] removing helium..."', 'ls -l', f'sudo rm -rf /home/polycraft/oxygen/{self.config.get("SERVER","worldName")}', 'sudo rm -f *.zip', 'echo "[DEBUG] removed helium?"', 'ls -l', # Stop the crontabs from running 'sudo rm /var/spool/cron/crontabs/*', # Taken from: https://stackoverflow.com/questions/45269225/ansible-playbook-fails-to-lock-apt/51919678#51919678 'sudo systemd-run --property="After=apt-daily.service apt-daily-upgrade.service" --wait /bin/true', 'sudo apt-get -y purge unattended-upgrades', 'sudo apt-get -y update', wait_for_locks + 'sudo apt-get install software-properties-common -y', # 'while fuser /var/lib/dpkg/lock >/dev/null 2>&1; do sleep 1; done; sudo apt-add-repository universe', wait_for_locks + 'sudo apt-add-repository universe', # Mount the Polycraft Game FileShare wait_for_locks + 'sudo apt-get install cifs-utils -y && sudo mkdir -p /mnt/PolycraftGame/', f'mount -t cifs //polycraftbestbatch.file.core.windows.net/best-batch-round-1-test /mnt/PolycraftGame -o vers=3.0,username={self.credentials.get("Storage", "storageaccountname")},password={self.credentials.get("Storage", "storageaccountkey")},dir_mode=0777,file_mode=0777,serverino && ls /mnt/PolycraftGame', # Copy the default world file to the right folder f'cp /mnt/PolycraftGame/{self.config.get("SERVER","fileShareFolder")}/worlds/{self.config.get("SERVER","worldZipName")}.tar.gz /home/polycraft/oxygen/', 'cd /home/polycraft/oxygen/', # 'sudo rm -r helium', f'gzip -d /home/polycraft/oxygen/{self.config.get("SERVER","worldZipName")}.tar.gz', 'echo "[DEBUG] extracting the tar"', 'ls -l', f'sudo tar -xf {self.config.get("SERVER","worldZipName")}.tar', 'echo "[DEBUG] extracted the tar"', 'ls -l', # 'sudo mv helium-backup-0924 helium', f'sudo mv helium {self.config.get("SERVER","worldName")}', # TODO Remove this once we finalize the server name? f'chmod -R 777 {self.config.get("SERVER","worldName")}/', # NOTE: The folder inside here is called helium! 'echo "[DEBUG] Adjusted permissions for helium?"', 'ls -l', ]), wait_for_success=True, # user_accounts=users, user_identity=batchmodels.UserIdentity( # user_name='azureuser', auto_user=batchmodels.AutoUserSpecification( scope=batchmodels.AutoUserScope.pool, elevation_level=batchmodels.ElevationLevel.admin) # ), ), ) net_config = batchmodels.NetworkConfiguration( # subnet_id="/subscriptions/889566d5-6e5d-4d31-a82d-b60603b3e50b/resourceGroups/vnet-eastus-azurebatch/providers/Microsoft.Network/virtualNetworks/vnet-eastus-azurebatch/subnets/main-batch-subnet", endpoint_configuration=batchmodels. PoolEndpointConfiguration(inbound_nat_pools=[ batchmodels.InboundNATPool( name='minecraftServer', protocol='tcp', backend_port=25565, frontend_port_range_start=44000, frontend_port_range_end=44099, network_security_group_rules=[ batchmodels.NetworkSecurityGroupRule( priority=199, access='allow', source_address_prefix='*'), ]), batchmodels.InboundNATPool( name='api_port', protocol='tcp', backend_port=int(api_port) if api_port and api_port.isdecimal() else 9007, frontend_port_range_start=44500, frontend_port_range_end=44599, network_security_group_rules=[ # batchmodels.NetworkSecurityGroupRule( # priority=170, # access='allow', # source_address_prefix='192.168.1.0/24' # TODO: is this the right subnet? # ), batchmodels.NetworkSecurityGroupRule( priority=198, access='allow', # 'deny' source_address_prefix= '*' # TODO: only allow access to the right ports ) ]), ])) pool = batchmodels.PoolAddParameter( id=id, vm_size=self.config.get('POOL', 'vm_size'), target_dedicated_nodes=int(min_count) if min_count and min_count.isdecimal() else 1, virtual_machine_configuration=vmc, start_task=start_task, user_accounts=users, network_configuration=net_config) helpers.create_pool_if_not_exist(self.client, pool) self.start_mc_server_job_pool(pool.target_dedicated_nodes)
output_container_sas_url = get_container_sas_url( blob_client, output_container_name, azureblob.BlobPermissions.WRITE) # Create a Batch service client. We'll now be interacting with the Batch # service in addition to Storage credentials = batchauth.SharedKeyCredentials(_BATCH_ACCOUNT_NAME, _BATCH_ACCOUNT_KEY) batch_client = batch.BatchServiceClient(credentials, batch_url=_BATCH_ACCOUNT_URL) try: users = [ batchmodels.UserAccount( name='admin', password='******', elevation_level=batchmodels.ElevationLevel.admin) ] # Create the pool that will contain the compute nodes that will execute the # tasks. if to_create_pool: create_pool(batch_client, _POOL_ID, users) # Create the job that will run the tasks. if to_create_job: create_job(batch_client, _JOB_ID, resource_files, _POOL_ID) # Add the tasks to the job. Pass the input files and a SAS URL # to the storage container for output files. add_tasks(batch_client, _JOB_ID, input_files, output_container_sas_url)
def test_batch_create_pools(self, **kwargs): client = self.create_sharedkey_client(**kwargs) # Test List Node Agent SKUs response = client.account.list_supported_images() response = list(response) self.assertTrue(len(response) > 1) self.assertIsNotNone(response[-1].image_reference) # Test Create Iaas Pool users = [ models.UserAccount(name='test-user-1', password='******'), models.UserAccount(name='test-user-2', password='******', elevation_level=models.ElevationLevel.admin) ] test_iaas_pool = models.PoolAddParameter( id=self.get_resource_name('batch_iaas_'), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='MicrosoftWindowsServer', offer='WindowsServer', sku='2016-Datacenter-smalldisk' ), node_agent_sku_id='batch.node.windows amd64', windows_configuration=models.WindowsConfiguration(enable_automatic_updates=True)), task_scheduling_policy=models.TaskSchedulingPolicy(node_fill_type=models.ComputeNodeFillType.pack), user_accounts=users ) response = client.pool.add(test_iaas_pool) self.assertIsNone(response) # Test list pool node counnt counts = list(client.account.list_pool_node_counts()) self.assertIsNotNone(counts) self.assertEqual(len(counts), 1) self.assertEqual(counts[0].pool_id, test_iaas_pool.id) self.assertIsNotNone(counts[0].dedicated) self.assertEqual(counts[0].dedicated.total, 0) self.assertEqual(counts[0].dedicated.leaving_pool, 0) self.assertEqual(counts[0].low_priority.total, 0) # Test Create Pool with Network Configuration #TODO Public IP tests network_config = models.NetworkConfiguration(subnet_id='/subscriptions/00000000-0000-0000-0000-000000000000' '/resourceGroups/test' '/providers/Microsoft.Network' '/virtualNetworks/vnet1' '/subnets/subnet1') test_network_pool = models.PoolAddParameter( id=self.get_resource_name('batch_network_'), vm_size=DEFAULT_VM_SIZE, network_configuration=network_config, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='18.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 18.04') ) self.assertBatchError('InvalidPropertyValue', client.pool.add, test_network_pool, models.PoolAddOptions(timeout=45)) test_image_pool = models.PoolAddParameter( id=self.get_resource_name('batch_image_'), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( virtual_machine_image_id="/subscriptions/00000000-0000-0000-0000-000000000000" "/resourceGroups/test" "/providers/Microsoft.Compute" "/gallery/FakeGallery" "/images/FakeImage" "/versions/version" ), node_agent_sku_id='batch.node.ubuntu 18.04' ) ) self.assertBatchError('InvalidPropertyValue', client.pool.add, test_image_pool, models.PoolAddOptions(timeout=45)) # Test Create Pool with Data Disk data_disk = models.DataDisk(lun=1, disk_size_gb=50) test_disk_pool = models.PoolAddParameter( id=self.get_resource_name('batch_disk_'), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='18.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 18.04', data_disks=[data_disk]) ) response = client.pool.add(test_disk_pool) self.assertIsNone(response) disk_pool = client.pool.get(test_disk_pool.id) self.assertEqual(disk_pool.virtual_machine_configuration.data_disks[0].lun, 1) self.assertEqual(disk_pool.virtual_machine_configuration.data_disks[0].disk_size_gb, 50) # Test Create Pool with Application Licenses test_app_pool = models.PoolAddParameter( id=self.get_resource_name('batch_app_'), vm_size=DEFAULT_VM_SIZE, application_licenses=["maya"], virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='18.04-LTS' ), node_agent_sku_id='batch.node.ubuntu 18.04', data_disks=[data_disk]) ) response = client.pool.add(test_app_pool) self.assertIsNone(response) app_pool = client.pool.get(test_app_pool.id) self.assertEqual(app_pool.application_licenses[0], "maya") # Test Create Pool with Azure Disk Encryption test_ade_pool = models.PoolAddParameter( id=self.get_resource_name('batch_ade_'), vm_size=DEFAULT_VM_SIZE, virtual_machine_configuration=models.VirtualMachineConfiguration( image_reference=models.ImageReference( publisher='Canonical', offer='UbuntuServer', sku='18.04-LTS' ), disk_encryption_configuration=models.DiskEncryptionConfiguration( targets=[models.DiskEncryptionTarget.temporary_disk] ), node_agent_sku_id='batch.node.ubuntu 18.04') ) response = client.pool.add(test_ade_pool) self.assertIsNone(response) ade_pool = client.pool.get(test_ade_pool.id) self.assertEqual(ade_pool.virtual_machine_configuration.disk_encryption_configuration.targets, [models.DiskEncryptionTarget.temporary_disk]) # Test List Pools without Filters pools = list(client.pool.list()) self.assertTrue(len(pools) > 1) # Test List Pools with Maximum options = models.PoolListOptions(max_results=1) pools = client.pool.list(options) pools.next() self.assertEqual(len(pools.current_page), 1) # Test List Pools with Filter options = models.PoolListOptions( filter='startswith(id,\'batch_app_\')', select='id,state', expand='stats') pools = list(client.pool.list(options)) self.assertEqual(len(pools), 1)