def test_storage_blob_azcopy_remove(self, resource_group, storage_account_info, test_dir): storage_account, _ = storage_account_info container = self.create_container(storage_account_info) # sync directory self.cmd('storage blob sync -s "{}" -c {} --account-name {}'.format( test_dir, container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 41)) self.cmd('storage remove -c {} -n readme --account-name {}'.format( container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 40)) self.cmd('storage remove -c {} -n apple --account-name {}'.format( container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 30)) self.cmd('storage remove -c {} -n butter --account-name {}'.format( container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 20)) self.cmd( 'storage remove -c {} -n butter --account-name {} --recursive'. format(container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 10)) self.cmd('storage remove -c {} -n duff --account-name {}'.format( container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 10)) # sync directory self.cmd('storage blob sync -s "{}" -c {} --account-name {}'.format( test_dir, container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 41)) self.cmd( 'storage remove -c {} -n butter --account-name {} --recursive --exclude-pattern "file_*"' .format(container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 41)) self.cmd( 'storage remove -c {} -n butter --account-name {} --exclude-pattern "file_1*"' .format(container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 32)) self.cmd( 'storage remove -c {} -n butter --account-name {} --recursive --exclude-pattern "file_1*"' .format(container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 23)) # sync directory self.cmd('storage blob sync -s "{}" -c {} --account-name {}'.format( test_dir, container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 41)) self.cmd( 'storage remove -c {} -n butter --account-name {} --recursive --include-pattern "file_1*"' .format(container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 39)) self.cmd( 'storage remove -c {} -n butter --account-name {} --include-pattern "file_*"' .format(container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 30)) self.cmd( 'storage remove -c {} -n butter --account-name {} --recursive --include-pattern "file_*"' .format(container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 21)) self.cmd( 'storage remove -c {} --include-path apple --account-name {} --include-pattern "file*" --exclude-pattern "file_1*" --recursive' .format(container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 12)) self.cmd('storage remove -c {} --account-name {} --recursive'.format( container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 0))
def test_storage_account_revoke_delegation_keys(self, resource_group, storage_account): if self.run_under_service_principal(): return # this test delete users which are beyond a SP's capacity, so quit... from datetime import datetime, timedelta import time expiry = (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ') account_info = self.get_account_info(resource_group, storage_account) c = self.create_container(account_info) b = self.create_random_name('blob', 24) local_file = self.create_temp_file(128, full_random=False) self.kwargs.update({ 'expiry': expiry, 'account': storage_account, 'container': c, 'local_file': local_file, 'blob': b, 'rg': resource_group }) result = self.cmd( 'storage account show -n {account} -g {rg}').get_output_in_json() self.kwargs['sc_id'] = result['id'] user = self.create_random_name('testuser', 15) self.kwargs['upn'] = user + '@azuresdkteam.onmicrosoft.com' self.cmd( 'ad user create --display-name tester123 --password Test123456789 --user-principal-name {upn}' ) time.sleep( 15 ) # By-design, it takes some time for RBAC system propagated with graph object change self.cmd( 'role assignment create --assignee {upn} --role "Storage Blob Data Contributor" --scope {sc_id}' ) container_sas = self.cmd( 'storage blob generate-sas --account-name {account} -n {blob} -c {container} --expiry {expiry} --permissions ' 'rw --https-only --as-user --auth-mode login -otsv').output self.kwargs['container_sas'] = container_sas self.cmd( 'storage blob upload -c {container} -n {blob} -f "{local_file}" --account-name {account} --sas-token {container_sas}' ) blob_sas = self.cmd( 'storage blob generate-sas --account-name {account} -n {blob} -c {container} --expiry {expiry} --permissions ' 'r --https-only --as-user --auth-mode login -otsv').output self.kwargs['blob_sas'] = blob_sas self.cmd('storage blob show -c {container} -n {blob} --account-name {account} --sas-token {blob_sas}') \ .assert_with_checks(JMESPathCheck('name', b)) self.cmd('storage account revoke-delegation-keys -n {account} -g {rg}') time.sleep( 15 ) # By-design, it takes some time for RBAC system propagated with graph object change self.cmd( 'storage blob show -c {container} -n {blob} --account-name {account} --sas-token {blob_sas}', expect_failure=True)
def test_batchai_cluster_with_file_systems(self, resource_group, storage_account): # Tests creation of a cluster with mounted file systems defined in config. # 1. Create an Azure File Share and Azure Blob Container to mount on the cluster. # 2. Create a cluster and verify parameters. # 3. Verify that cluster was able to start nodes. with self._given_configured_environment(resource_group, storage_account): # Create a file share 'share' and blob container 'container' to be mounted on cluster nodes. self.cmd('az storage share create -n share') self.cmd('az storage container create -n container') self.cmd('az batchai workspace create -g {0} -n workspace'.format(resource_group), checks=[JMESPathCheck('name', 'workspace')]) self.cmd( 'az batchai cluster create -g {0} -w workspace -n cluster -f {1} ' '--afs-name share --bfs-name container ' '-u DemoUser -k {2}'.format(resource_group, _data_file('simple_cluster.json'), _data_file('key.txt')), checks=[ JMESPathCheck('nodeSetup.mountVolumes.azureFileShares[0].accountName', storage_account), JMESPathCheck('nodeSetup.mountVolumes.azureFileShares[0].azureFileUrl', 'https://{0}.file.core.windows.net/share'.format(storage_account)), JMESPathCheck('nodeSetup.mountVolumes.azureFileShares[0].relativeMountPath', 'afs'), JMESPathCheck('nodeSetup.mountVolumes.azureFileShares[0].credentialsInfo.accountKey', None), JMESPathCheck('nodeSetup.mountVolumes.azureBlobFileSystems[0].accountName', storage_account), JMESPathCheck('nodeSetup.mountVolumes.azureBlobFileSystems[0].containerName', 'container'), JMESPathCheck('nodeSetup.mountVolumes.azureBlobFileSystems[0].relativeMountPath', 'bfs'), JMESPathCheck('nodeSetup.mountVolumes.azureBlobFileSystems[0].credentialsInfo.accountKey', None), JMESPathCheck('userAccountSettings.adminUserName', 'DemoUser'), JMESPathCheck('userAccountSettings.adminUserPassword', None)]) # Give file server and cluster to finish preparation. time.sleep(NODE_STARTUP_TIME * 2) # Check the node in the cluster successfully started - was able to mount nfs and azure filesystem. self.cmd('az batchai cluster show -g {0} -w workspace -n cluster'.format(resource_group), checks=[JMESPathCheck('nodeStateCounts.idleNodeCount', 1)])
def test_storage_account_service_endpoints(self, resource_group, storage_account): kwargs = { 'rg': resource_group, 'acc': storage_account, 'vnet': 'vnet1', 'subnet': 'subnet1' } self.cmd( 'storage account create -g {rg} -n {acc} --bypass Metrics --default-action Deny' .format(**kwargs), checks=[ JMESPathCheck('networkRuleSet.bypass', 'Metrics'), JMESPathCheck('networkRuleSet.defaultAction', 'Deny') ]) self.cmd( 'storage account update -g {rg} -n {acc} --bypass Logging --default-action Allow' .format(**kwargs), checks=[ JMESPathCheck('networkRuleSet.bypass', 'Logging'), JMESPathCheck('networkRuleSet.defaultAction', 'Allow') ]) self.cmd( 'storage account update -g {rg} -n {acc} --set networkRuleSet.default_action=deny' .format(**kwargs), checks=[ JMESPathCheck('networkRuleSet.bypass', 'Logging'), JMESPathCheck('networkRuleSet.defaultAction', 'Deny') ]) self.cmd( 'network vnet create -g {rg} -n {vnet} --subnet-name {subnet}'. format(**kwargs)) self.cmd( 'network vnet subnet update -g {rg} --vnet-name {vnet} -n {subnet} --service-endpoints Microsoft.Storage' .format(**kwargs)) self.cmd( 'storage account network-rule add -g {rg} --account-name {acc} --ip-address 25.1.2.3' .format(**kwargs)) self.cmd( 'storage account network-rule add -g {rg} --account-name {acc} --ip-address 25.2.0.0/24' .format(**kwargs)) self.cmd( 'storage account network-rule add -g {rg} --account-name {acc} --vnet-name {vnet} --subnet {subnet}' .format(**kwargs)) self.cmd( 'storage account network-rule list -g {rg} --account-name {acc}'. format(**kwargs), checks=[ JMESPathCheck('length(ipRules)', 2), JMESPathCheck('length(virtualNetworkRules)', 1) ]) self.cmd( 'storage account network-rule remove -g {rg} --account-name {acc} --ip-address 25.1.2.3' .format(**kwargs)) self.cmd( 'storage account network-rule remove -g {rg} --account-name {acc} --vnet-name {vnet} --subnet {subnet}' .format(**kwargs)) self.cmd( 'storage account network-rule list -g {rg} --account-name {acc}'. format(**kwargs), checks=[ JMESPathCheck('length(ipRules)', 1), JMESPathCheck('length(virtualNetworkRules)', 0) ])
def test_show_usage(self): self.cmd('storage account show-usage -l westus', checks=JMESPathCheck('name.value', 'StorageAccounts'))
def verify_blob_upload_and_download(self, group, account, file_size_kb, blob_type, block_count=0, skip_download=False): local_dir = self.create_temp_dir() local_file = self.create_temp_file(file_size_kb) blob_name = self.create_random_name(prefix='blob', length=24) account_info = self.get_account_info(group, account) container = self.create_container(account_info) self.storage_cmd('storage blob exists -n {} -c {}', account_info, blob_name, container) \ .assert_with_checks(JMESPathCheck('exists', False)) self.storage_cmd('storage blob upload -c {} -f "{}" -n {} --type {}', account_info, container, local_file, blob_name, blob_type) self.storage_cmd('storage blob exists -n {} -c {}', account_info, blob_name, container) \ .assert_with_checks(JMESPathCheck('exists', True)) self.storage_cmd('storage blob list -c {} -otable --num-results 1', account_info, container) show_result = self.storage_cmd('storage blob show -n {} -c {}', account_info, blob_name, container).get_output_in_json() self.assertEqual(show_result.get('name'), blob_name) if blob_type == 'page': self.assertEqual(type(show_result.get('properties').get('pageRanges')), list) else: self.assertEqual(show_result.get('properties').get('pageRanges'), None) expiry = (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ') sas = self.storage_cmd('storage blob generate-sas -n {} -c {} --expiry {} --permissions ' 'r --https-only', account_info, blob_name, container, expiry).output self.assertTrue(sas) self.assertIn('sig', sas) self.storage_cmd('storage blob update -n {} -c {} --content-type application/test-content', account_info, blob_name, container) self.storage_cmd('storage blob show -n {} -c {}', account_info, blob_name, container) \ .assert_with_checks( [JMESPathCheck('properties.contentSettings.contentType', 'application/test-content'), JMESPathCheck('properties.contentLength', file_size_kb * 1024)]) # check that blob properties can be set back to null self.storage_cmd('storage blob update -n {} -c {} --content-type ""', account_info, blob_name, container) self.storage_cmd('storage blob show -n {} -c {}', account_info, blob_name, container) \ .assert_with_checks(JMESPathCheck('properties.contentSettings.contentType', None)) self.storage_cmd('storage blob service-properties show', account_info) \ .assert_with_checks(JMESPathCheck('hourMetrics.enabled', True)) if not skip_download: downloaded = os.path.join(local_dir, 'test.file') self.storage_cmd('storage blob download -n {} -c {} --file "{}"', account_info, blob_name, container, downloaded) self.assertTrue(os.path.isfile(downloaded), 'The file is not downloaded.') self.assertEqual(file_size_kb * 1024, os.stat(downloaded).st_size, 'The download file size is not right.') self.storage_cmd('storage blob download -n {} -c {} --file "{}" --start-range 10 --end-range 499', account_info, blob_name, container, downloaded) self.assertEqual(490, os.stat(downloaded).st_size, 'The download file size is not right.') # Verify the requests in cassette to ensure the count of the block requests is expected # This portion of validation doesn't verify anything during playback because the recording # is fixed. def is_block_put_req(request): if request.method != 'PUT': return False if not re.search('/cont[0-9]+/blob[0-9]+', request.path): return False comp_block = False has_blockid = False for key, value in request.query: if key == 'comp' and value == 'block': comp_block = True elif key == 'blockid': has_blockid = True return comp_block and has_blockid requests = self.cassette.requests put_blocks = [request for request in requests if is_block_put_req(request)] self.assertEqual(block_count, len(put_blocks), 'The expected number of block put requests is {} but the actual ' 'number is {}.'.format(block_count, len(put_blocks)))
def verify_entity_operations(self, account_info, table_name): self.storage_cmd( 'storage entity insert -t {} -e rowkey=001 partitionkey=001 name=test value=something ' 'binaryProperty=AAECAwQF [email protected]=Edm.Binary', account_info, table_name) self.storage_cmd('storage entity show -t {} --row-key 001 --partition-key 001', account_info, table_name) \ .assert_with_checks(JMESPathCheck('name', 'test'), JMESPathCheck('value', 'something'), JMESPathCheck('binaryProperty.value', 'AAECAwQF')) self.storage_cmd( 'storage entity show -t {} --row-key 001 --partition-key 001 --select name', account_info, table_name).assert_with_checks(JMESPathCheck('name', 'test'), JMESPathCheck('value', None), JMESPathCheck('binaryProperty.value', None)) self.storage_cmd('storage entity merge -t {} -e rowkey=001 partitionkey=001 name=test value=newval', account_info, table_name) self.storage_cmd('storage entity show -t {} --row-key 001 --partition-key 001', account_info, table_name) \ .assert_with_checks(JMESPathCheck('name', 'test'), JMESPathCheck('value', 'newval'), JMESPathCheck('binaryProperty.value', 'AAECAwQF')) self.storage_cmd('storage entity replace -t {} -e rowkey=001 partitionkey=001 cat=hat', account_info, table_name) self.storage_cmd('storage entity show -t {} --row-key 001 --partition-key 001', account_info, table_name) \ .assert_with_checks(JMESPathCheck('cat', 'hat'), JMESPathCheck('name', None), JMESPathCheck('value', None), JMESPathCheck('binaryProperty.value', None)) self.storage_cmd('storage entity delete -t {} --row-key 001 --partition-key 001', account_info, table_name) self.storage_cmd_negative('storage entity show -t {} --row-key 001 --partition-key 001', account_info, table_name) self.storage_cmd('storage entity insert -t {} -e rowkey=001 partitionkey=001 name=test value=something ' 'binaryProperty=AAECAwQF [email protected]=Edm.Binary', account_info, table_name) self.storage_cmd('storage entity insert -t {} -e rowkey=002 partitionkey=002 name=test2 value=something2', account_info, table_name) result = self.storage_cmd('storage entity query -t {} --num-results 1', account_info, table_name).get_output_in_json() marker = result.get('nextMarker') self.storage_cmd('storage entity query -t {} --marker nextpartitionkey={} nextrowkey={}', account_info, table_name, marker.get('nextpartitionkey'), marker.get('nextrowkey')).assert_with_checks( JMESPathCheck('length(items)', 1))
def test_storage_adls_blob(self, resource_group): storage_account = self.create_random_name(prefix='clitestaldsaccount', length=24) self.kwargs.update({'sc': storage_account, 'rg': resource_group}) self.cmd( 'storage account create -n {sc} -g {rg} --kind StorageV2 --hierarchical-namespace true --https-only ' ) account_info = self.get_account_info(resource_group, storage_account) container = self.create_container(account_info) directory = 'testdirectory' # Create a storage blob directory and check its existence self.storage_cmd('storage blob directory exists -c {} -d {}', account_info, container, directory) \ .assert_with_checks(JMESPathCheck('exists', False)) self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory) self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, directory)\ .assert_with_checks(JMESPathCheck('exists', True)) self.storage_cmd('storage fs file list -f {}', account_info, container) \ .assert_with_checks(JMESPathCheck('length(@)', 1)) \ .assert_with_checks(JMESPathCheck('[0].isDirectory', True)) self.storage_cmd('storage blob directory show -c {} -d {} ', account_info, container, directory) \ .assert_with_checks(JMESPathCheck('metadata.hdi_isfolder', "true")) self.storage_cmd('storage blob directory access show -c {} -d {}', account_info, container, directory) \ .assert_with_checks(JMESPathCheck('permissions', "rwxr-x---")) # Argument validation: Throw error when using existing directory name with self.assertRaises(SystemExit): self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory) # Create a storage blob directory with permissions directory2 = 'testdirectory2' self.storage_cmd( 'storage blob directory create -c {} -d {} --permissions rwxrwxrwx --umask 0000', account_info, container, directory2) self.storage_cmd('storage blob directory show -c {} -d {} ', account_info, container, directory2) \ .assert_with_checks(JMESPathCheck('metadata.hdi_isfolder', "true")) self.storage_cmd('storage blob directory access show -c {} -d {}', account_info, container, directory2) \ .assert_with_checks(JMESPathCheck('permissions', "rwxrwxrwx")) # Storage blob access control local_file = self.create_temp_file(128) blob = self.create_random_name('blob', 24) self.storage_cmd('storage blob upload -c {} -f "{}" -n {}', account_info, container, local_file, blob) acl = "user::rwx,group::r--,other::---" self.storage_cmd('storage blob access set -c {} -b {} -a "{}"', account_info, container, blob, acl) self.storage_cmd('storage blob access show -c {} -b {}', account_info, container, blob) \ .assert_with_checks(JMESPathCheck('acl', acl)) self.storage_cmd( 'storage blob access update -c {} -b {} --permissions "rwxrwxrwx"', account_info, container, blob, acl) self.storage_cmd('storage blob access show -c {} -b {}', account_info, container, blob)\ .assert_with_checks(JMESPathCheck('permissions', "rwxrwxrwx")) # Storage blob directory access control acl = "user::rwx,group::r--,other::---" self.storage_cmd( 'storage blob directory access set -c {} -d {} -a "{}"', account_info, container, directory, acl) self.storage_cmd('storage blob directory access show -c {} -d {}', account_info, container, directory) \ .assert_with_checks(JMESPathCheck('acl', acl)) self.storage_cmd( 'storage blob directory access update -c {} -d {} --permissions "rwxrwxrwx"', account_info, container, directory, acl) self.storage_cmd('storage blob directory access show -c {} -d {}', account_info, container, directory).assert_with_checks( JMESPathCheck('permissions', "rwxrwxrwx")) # Storage blob directory metadata self.storage_cmd( 'storage blob directory metadata update -c {} -d {} --metadata "tag1=value1"', account_info, container, directory) self.storage_cmd('storage blob directory metadata show -c {} -d {} ', account_info, container, directory) \ .assert_with_checks(JMESPathCheck('tag1', "value1")) # Remove blob directory self.storage_cmd( 'storage blob directory delete -c {} -d {} --recursive', account_info, container, directory, directory) self.storage_cmd('storage blob directory exists -c {} -d {}', account_info, container, directory) \ .assert_with_checks(JMESPathCheck('exists', False))
def test_storage_blob_update_service_properties(self, resource_group): storage_account = self.create_random_name(prefix='account', length=24) self.cmd('storage account create -n {} -g {} --kind StorageV2'.format(storage_account, resource_group)) account_info = self.get_account_info(resource_group, storage_account) self.storage_cmd('storage blob service-properties show', account_info) \ .assert_with_checks(JMESPathCheck('staticWebsite.enabled', False), JMESPathCheck('hourMetrics.enabled', True), JMESPathCheck('minuteMetrics.enabled', False), JMESPathCheck('minuteMetrics.includeApis', None), JMESPathCheck('logging.delete', False)) self.storage_cmd('storage blob service-properties update --static-website --index-document index.html ' '--404-document error.html', account_info) self.storage_cmd('storage blob service-properties update --delete-retention --delete-retention-period 1', account_info) self.storage_cmd('storage blob service-properties update --set hourMetrics.enabled=false', account_info) self.storage_cmd('storage blob service-properties update --set minuteMetrics.enabled=true minuteMetrics.includeApis=true', account_info) self.storage_cmd('storage blob service-properties update --set logging.delete=true', account_info) self.storage_cmd('storage blob service-properties show', account_info) \ .assert_with_checks(JMESPathCheck('staticWebsite.enabled', True), JMESPathCheck('staticWebsite.errorDocument_404Path', 'error.html'), JMESPathCheck('staticWebsite.indexDocument', 'index.html'), JMESPathCheck('deleteRetentionPolicy.enabled', True), JMESPathCheck('deleteRetentionPolicy.days', 1), JMESPathCheck('hourMetrics.enabled', False), JMESPathCheck('minuteMetrics.enabled', True), JMESPathCheck('minuteMetrics.includeApis', True), JMESPathCheck('logging.delete', True))
def test_storage_adls_blob_directory_move(self, resource_group, test_dir): storage_account = self.create_random_name(prefix='clitestaldsaccount', length=24) self.kwargs.update({'sc': storage_account, 'rg': resource_group}) self.cmd( 'storage account create -n {sc} -g {rg} -l centralus --kind StorageV2 --hierarchical-namespace true ' ' --https-only') account_info = self.get_account_info(resource_group, storage_account) container = self.create_container(account_info) directory = 'dir' des_directory = 'dir1' self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory) self.storage_cmd( 'storage blob directory upload -c {} -d {} -s "{}" --recursive', account_info, container, directory, os.path.join(test_dir, 'apple')) # Move from a directory to a nonexistent directory self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, des_directory) \ .assert_with_checks(JMESPathCheck('exists', False)) self.storage_cmd('storage blob directory move -c {} -d {} -s {}', account_info, container, des_directory, directory) self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, directory) \ .assert_with_checks(JMESPathCheck('exists', False)) self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, des_directory) \ .assert_with_checks(JMESPathCheck('exists', True)) self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, des_directory) \ .assert_with_checks(JMESPathCheck('length(@)', 11)) # Test directory name contains Spaces contain_space_dir = 'test move directory' # Move directory to contain_space_dir self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, des_directory) \ .assert_with_checks(JMESPathCheck('exists', True)) self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, contain_space_dir) \ .assert_with_checks(JMESPathCheck('exists', False)) self.storage_cmd('storage blob directory move -c "{}" -d "{}" -s "{}"', account_info, container, contain_space_dir, des_directory) self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, contain_space_dir) \ .assert_with_checks(JMESPathCheck('exists', True)) self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, des_directory) \ .assert_with_checks(JMESPathCheck('exists', False)) # Move contain_space_dir back to directory self.storage_cmd('storage blob directory move -c "{}" -d "{}" -s "{}"', account_info, container, des_directory, contain_space_dir) self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, des_directory) \ .assert_with_checks(JMESPathCheck('exists', True)) self.storage_cmd('storage blob directory exists -c "{}" -d "{}"', account_info, container, contain_space_dir) \ .assert_with_checks(JMESPathCheck('exists', False)) # Move from a directory to a existing empty directory directory2 = 'dir2' self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory2) self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, des_directory) \ .assert_with_checks(JMESPathCheck('exists', True)) self.storage_cmd('storage blob directory move -c {} -d {} -s {}', account_info, container, directory2, des_directory) self.storage_cmd('storage blob directory exists -c {} -d {} ', account_info, container, des_directory) \ .assert_with_checks(JMESPathCheck('exists', False)) self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, directory2) \ .assert_with_checks(JMESPathCheck('length(@)', 12)) # Move from a directory to a existing nonempty directory with mode "legacy" directory3 = 'dir3' self.storage_cmd('storage blob directory create -c {} -d {}', account_info, container, directory3) self.storage_cmd('storage blob directory upload -c {} -d {} -s "{}"', account_info, container, directory3, os.path.join(test_dir, 'readme')) self.cmd( 'storage blob directory move -c {} -d {} -s {} --account-name {} --move-mode legacy' .format(container, directory3, directory2, storage_account), expect_failure=True) # Move from a directory to a existing nonempty directory with mode "posix" self.storage_cmd( 'storage blob directory move -c {} -d {} -s {} --move-mode posix', account_info, container, directory3, directory2) self.storage_cmd('storage blob directory exists -c {} -d {}', account_info, container, '/'.join([directory3, directory2])) \ .assert_with_checks(JMESPathCheck('exists', True)) # Move from a subdirectory to a new directory with mode "posix" directory4 = "dir4" self.storage_cmd( 'storage blob directory move -c {} -d {} -s {} --move-mode posix', account_info, container, directory4, '/'.join([directory3, directory2])) self.storage_cmd('storage blob directory list -c {} -d {}', account_info, container, directory4) \ .assert_with_checks(JMESPathCheck('length(@)', 12)) # Argument validation: Throw error when source path is blob name with self.assertRaises(SystemExit): self.storage_cmd('storage blob directory move -c {} -d {} -s {}', account_info, container, directory4, '/'.join([directory3, 'readme']))
def test_storage_fs_soft_delete(self, resource_group, storage_account): account_info = self.get_account_info(resource_group, storage_account) container = self.create_file_system(account_info) # Prepare local_file = self.create_temp_file(1) file_name = self.create_random_name(prefix='file', length=24) dir_name = 'dir' self.storage_cmd('storage fs file upload -f {} -s "{}" -p {} ', account_info, container, local_file, file_name) self.assertEqual( len( self.storage_cmd('storage fs file list -f {}', account_info, container).get_output_in_json()), 1) self.storage_cmd('storage fs directory create -f {} -n {} ', account_info, container, dir_name) self.assertEqual( len( self.storage_cmd('storage fs file list -f {}', account_info, container).get_output_in_json()), 2) # set delete-policy to enable soft-delete self.storage_cmd( 'storage fs service-properties update --delete-retention --delete-retention-period 2', account_info) self.storage_cmd('storage fs service-properties show', account_info).assert_with_checks( JMESPathCheck('delete_retention_policy.enabled', True), JMESPathCheck('delete_retention_policy.days', 2)) time.sleep(10) # soft-delete and check self.storage_cmd('storage fs file delete -f {} -p {} -y', account_info, container, file_name) self.storage_cmd('storage fs directory delete -f {} -n {} -y', account_info, container, dir_name) self.assertEqual( len( self.storage_cmd('storage fs file list -f {}', account_info, container).get_output_in_json()), 0) time.sleep(60) result = self.storage_cmd( 'storage fs list-deleted-path -f {} --path-prefix {} ', account_info, container, dir_name).get_output_in_json() self.assertEqual(len(result), 1) result = self.storage_cmd('storage fs list-deleted-path -f {}', account_info, container)\ .get_output_in_json() self.assertEqual(len(result), 2) result = self.storage_cmd('storage fs list-deleted-path -f {} --num-results 1', account_info, container)\ .get_output_in_json() self.assertEqual(len(result), 2) marker = result[-1]['nextMarker'] result = self.storage_cmd('storage fs list-deleted-path -f {} --marker {}', account_info, container, marker)\ .get_output_in_json() self.assertEqual(len(result), 1) deleted_version = result[0]["deletionId"] # undelete and check self.storage_cmd( 'storage fs undelete-path -f {} --deleted-path-name {} --deletion-id {}', account_info, container, file_name, deleted_version) self.assertEqual( len( self.storage_cmd('storage fs file list -f {}', account_info, container).get_output_in_json()), 1)
def test_storage_azcopy_blob_account(self, resource_group, first_account, second_account, test_dir): first_account_info = self.get_account_info(resource_group, first_account) second_account_info = self.get_account_info(resource_group, second_account) first_container = self.create_container(first_account_info) second_container = self.create_container(second_account_info) import os # Upload a single file self.cmd( 'storage copy --source-local-path "{}" --destination-account-name {} --destination-container {}' .format(os.path.join(test_dir, 'readme'), first_account, first_container)) self.cmd('storage blob list -c {} --account-name {}'.format( first_container, first_account), checks=JMESPathCheck('length(@)', 1)) # Upload entire directory self.cmd( 'storage copy --source-local-path "{}" --destination-account-name {} --destination-container {} --recursive' .format(os.path.join(test_dir, 'apple'), first_account, first_container)) self.cmd('storage blob list -c {} --account-name {}'.format( first_container, first_account), checks=JMESPathCheck('length(@)', 11)) # Upload a set of files self.cmd( 'storage copy --source-local-path "{}" --destination-account-name {} --destination-container {} --recursive' .format(os.path.join(test_dir, 'butter/file_*'), first_account, first_container)) self.cmd('storage blob list -c {} --account-name {}'.format( first_container, first_account), checks=JMESPathCheck('length(@)', 21)) local_folder = self.create_temp_dir() # Download a single file self.cmd( 'storage copy --source-account-name {} --source-container {} --source-blob {} --destination-local-path "{}"' .format(first_account, first_container, 'readme', local_folder)) self.assertEqual(1, sum(len(f) for r, d, f in os.walk(local_folder))) # Download entire directory self.cmd( 'storage copy --source-account-name {} --source-container {} --source-blob {} --destination-local-path "{}" --recursive' .format(first_account, first_container, 'apple/', local_folder)) self.assertEqual(1, sum(len(d) for r, d, f in os.walk(local_folder))) self.assertEqual(11, sum(len(f) for r, d, f in os.walk(local_folder))) # Download a set of files self.cmd( 'storage copy --source-account-name {} --source-container {} --include-path {} --include-pattern {} --destination-local-path "{}" --recursive' .format(first_account, first_container, 'apple', 'file*', local_folder)) self.assertEqual(3, sum(len(d) for r, d, f in os.walk(local_folder))) self.assertEqual(21, sum(len(f) for r, d, f in os.walk(local_folder))) # Copy a single blob to another single blob self.cmd( 'storage copy --source-account-name {} --source-container {} --source-blob {} \ --destination-account-name {} --destination-container {} --preserve-s2s-access-tier false' .format(first_account, first_container, 'readme', second_account, second_container)) self.cmd('storage blob list -c {} --account-name {}'.format( second_container, second_account), checks=JMESPathCheck('length(@)', 1)) # Copy an entire directory from blob virtual directory to another blob virtual directory self.cmd( 'storage copy --source-account-name {} --source-container {} --source-blob {} \ --destination-account-name {} --destination-container {} --recursive --preserve-s2s-access-tier false' .format(first_account, first_container, 'apple', second_account, second_container)) self.cmd('storage blob list -c {} --account-name {}'.format( second_container, second_account), checks=JMESPathCheck('length(@)', 11)) # Copy an entire storage account data to another blob account self.cmd( 'storage copy --source-account-name {} --destination-account-name {} --recursive --preserve-s2s-access-tier false' .format(first_account, second_account)) self.cmd( 'storage container list --account-name {}'.format(second_account), checks=JMESPathCheck('length(@)', 2)) self.cmd('storage blob list -c {} --account-name {}'.format( first_container, second_account), checks=JMESPathCheck('length(@)', 21))
def test_storage_azcopy_blob_url(self, resource_group, first_account, second_account, test_dir): first_account_info = self.get_account_info(resource_group, first_account) second_account_info = self.get_account_info(resource_group, second_account) first_container = self.create_container(first_account_info) second_container = self.create_container(second_account_info) first_account_url = 'https://{}.blob.core.windows.net'.format( first_account) second_account_url = 'https://{}.blob.core.windows.net'.format( second_account) first_container_url = '{}/{}'.format(first_account_url, first_container) second_container_url = '{}/{}'.format(second_account_url, second_container) import os # Upload a single file content_type = "application/json" self.cmd('storage copy -s "{}" -d "{}" --content-type {}'.format( os.path.join(test_dir, 'readme'), first_container_url, content_type)) self.cmd('storage blob list -c {} --account-name {}'.format( first_container, first_account), checks=JMESPathCheck('length(@)', 1)) self.cmd('storage blob show -n {} -c {} --account-name {}'.format( 'readme', first_container, first_account), checks=[ JMESPathCheck('properties.contentSettings.contentType', content_type) ]) # Upload entire directory self.cmd('storage copy -s "{}" -d "{}" --recursive'.format( os.path.join(test_dir, 'apple'), first_container_url)) self.cmd('storage blob list -c {} --account-name {}'.format( first_container, first_account), checks=JMESPathCheck('length(@)', 11)) # Upload a set of files self.cmd('storage copy -s "{}" -d "{}" --recursive'.format( os.path.join(test_dir, 'butter/file_*'), first_container_url)) self.cmd('storage blob list -c {} --account-name {}'.format( first_container, first_account), checks=JMESPathCheck('length(@)', 21)) local_folder = self.create_temp_dir() # Download a single file self.cmd('storage copy -s "{}" -d "{}"'.format( '{}/readme'.format(first_container_url), local_folder)) self.assertEqual(1, sum(len(f) for r, d, f in os.walk(local_folder))) # Download an entire directory self.cmd('storage copy -s "{}" -d "{}" --recursive'.format( '{}/apple'.format(first_container_url), local_folder)) self.assertEqual(1, sum(len(d) for r, d, f in os.walk(local_folder))) self.assertEqual(11, sum(len(f) for r, d, f in os.walk(local_folder))) # Download a set of files self.cmd( 'storage copy -s "{}" --include-path "apple" --include-pattern file* -d "{}" --recursive' .format(first_container_url, local_folder)) self.assertEqual(3, sum(len(d) for r, d, f in os.walk(local_folder))) self.assertEqual(21, sum(len(f) for r, d, f in os.walk(local_folder))) # Copy a single blob to another single blob self.cmd('storage account show -n {}'.format(second_account), checks=[self.check('kind', 'BlockBlobStorage')]) self.cmd( 'storage copy -s "{}" -d "{}" --preserve-s2s-access-tier false'. format('{}/readme'.format(first_container_url), second_container_url)) self.cmd('storage blob list -c {} --account-name {}'.format( second_container, second_account), checks=JMESPathCheck('length(@)', 1)) # Copy an entire directory from blob virtual directory to another blob virtual directory self.cmd( 'storage copy -s "{}" -d "{}" --recursive --preserve-s2s-access-tier false' .format('{}/apple'.format(first_container_url), second_container_url)) self.cmd('storage blob list -c {} --account-name {}'.format( second_container, second_account), checks=JMESPathCheck('length(@)', 11)) # Copy an entire storage account data to another blob account self.cmd( 'storage copy -s "{}" -d "{}" --recursive --preserve-s2s-access-tier false' .format(first_account_url, second_account_url)) self.cmd( 'storage container list --account-name {}'.format(second_account), checks=JMESPathCheck('length(@)', 2)) self.cmd('storage blob list -c {} --account-name {}'.format( first_container, second_account), checks=JMESPathCheck('length(@)', 21)) # Upload to managed disk diskname = self.create_random_name(prefix='disk', length=12) local_file = self.create_temp_file(20480) self.cmd( 'disk create -n {} -g {} --for-upload --upload-size-bytes 20972032' .format(diskname, resource_group)) sasURL = self.cmd( 'disk grant-access --access-level Write --duration-in-seconds 3600 -n {} -g {} --query accessSas' .format(diskname, resource_group)) self.cmd('storage copy -s "{}" -d "{}" --blob-type PageBlob'.format( local_file, sasURL))
def test_storage_blob_azcopy_sync(self, resource_group, storage_account_info, test_dir): storage_account, _ = storage_account_info container = self.create_container(storage_account_info) # sync directory connection_string = self.cmd( 'storage account show-connection-string -n {} -g {} -otsv'.format( storage_account, resource_group)).output self.cmd( 'storage blob sync -s "{}" -c {} --connection-string {}'.format( test_dir, container, connection_string)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 41)) self.cmd('storage blob delete-batch -s {} --account-name {}'.format( container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 0)) # resync container self.cmd('storage blob sync -s "{}" -c {} --account-name {}'.format( test_dir, container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 41)) # update file with open(os.path.join(test_dir, 'readme'), 'w') as f: f.write('updated.') # sync one blob self.cmd( 'storage blob list -c {} --account-name {} --prefix readme'.format( container, storage_account), checks=JMESPathCheck('[0].properties.contentLength', 87)) self.cmd('storage blob sync -s "{}" -c {} --account-name {} -d readme'. format(os.path.join(test_dir, 'readme'), container, storage_account)) self.cmd( 'storage blob list -c {} --account-name {} --prefix readme'.format( container, storage_account), checks=JMESPathCheck('[0].properties.contentLength', 8)) # delete one file and sync os.remove(os.path.join(test_dir, 'readme')) self.cmd('storage blob sync -s "{}" -c {} --account-name {}'.format( test_dir, container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 40)) # delete one folder and sync shutil.rmtree(os.path.join(test_dir, 'apple')) self.cmd('storage blob sync -s "{}" -c {} --account-name {}'.format( test_dir, container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 30)) # sync with another folder self.cmd('storage blob sync -s "{}" -c {} --account-name {}'.format( os.path.join(test_dir, 'butter'), container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 20)) # empty the folder and sync shutil.rmtree(os.path.join(test_dir, 'butter')) shutil.rmtree(os.path.join(test_dir, 'duff')) self.cmd('storage blob sync -s "{}" -c {} --account-name {}'.format( test_dir, container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 0)) # sync a subset of files in a directory with open(os.path.join(test_dir, 'test.json'), 'w') as f: f.write('updated.') self.cmd( 'storage blob sync -s "{}" -c {} --account-name {} --include-pattern *.json' .format(test_dir, container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 1)) self.cmd('storage blob delete-batch -s {} --account-name {}'.format( container, storage_account)) self.cmd('storage blob list -c {} --account-name {}'.format( container, storage_account), checks=JMESPathCheck('length(@)', 0))
def test_storage_blob_lease_operations(self, resource_group, storage_account): account_info = self.get_account_info(resource_group, storage_account) local_file = self.create_temp_file(128) c = self.create_container(account_info) b = self.create_random_name('blob', 24) proposed_lease_id = 'abcdabcd-abcd-abcd-abcd-abcdabcdabcd' new_lease_id = 'dcbadcba-dcba-dcba-dcba-dcbadcbadcba' date = '2016-04-01t12:00z' self.storage_cmd('storage blob upload -c {} -n {} -f "{}"', account_info, c, b, local_file) # test lease operations self.storage_cmd('storage blob lease acquire --lease-duration 60 -b {} -c {} ' '--if-modified-since {} --proposed-lease-id {}', account_info, b, c, date, proposed_lease_id) self.storage_cmd('storage blob show -n {} -c {}', account_info, b, c) \ .assert_with_checks(JMESPathCheck('properties.lease.duration', 'fixed'), JMESPathCheck('properties.lease.state', 'leased'), JMESPathCheck('properties.lease.status', 'locked')) self.storage_cmd('storage blob lease change -b {} -c {} --lease-id {} ' '--proposed-lease-id {}', account_info, b, c, proposed_lease_id, new_lease_id) self.storage_cmd('storage blob lease renew -b {} -c {} --lease-id {}', account_info, b, c, new_lease_id) self.storage_cmd('storage blob show -n {} -c {}', account_info, b, c) \ .assert_with_checks(JMESPathCheck('properties.lease.duration', 'fixed'), JMESPathCheck('properties.lease.state', 'leased'), JMESPathCheck('properties.lease.status', 'locked')) self.storage_cmd('storage blob lease break -b {} -c {} --lease-break-period 30', account_info, b, c) self.storage_cmd('storage blob show -n {} -c {}', account_info, b, c) \ .assert_with_checks(JMESPathCheck('properties.lease.duration', None), JMESPathCheck('properties.lease.state', 'breaking'), JMESPathCheck('properties.lease.status', 'locked')) self.storage_cmd('storage blob lease release -b {} -c {} --lease-id {}', account_info, b, c, new_lease_id) self.storage_cmd('storage blob show -n {} -c {}', account_info, b, c) \ .assert_with_checks(JMESPathCheck('properties.lease.duration', None), JMESPathCheck('properties.lease.state', 'available'), JMESPathCheck('properties.lease.status', 'unlocked'))
def test_storage_blob_list_scenarios(self, resource_group, storage_account): account_info = self.get_account_info(resource_group, storage_account) container = self.create_container(account_info, prefix="con") local_file = self.create_temp_file(128) blob_name1 = "/".join(["dir", self.create_random_name(prefix='blob', length=24)]) blob_name2 = "/".join(["dir", self.create_random_name(prefix='blob', length=24)]) # Prepare blob 1 self.storage_cmd('storage blob upload -c {} -f "{}" -n {} ', account_info, container, local_file, blob_name1) # Test self.storage_cmd('storage blob list -c {} ', account_info, container) \ .assert_with_checks(JMESPathCheck('[0].objectReplicationDestinationPolicy', None), JMESPathCheck('[0].objectReplicationSourceProperties', [])) # Test with include snapshot result = self.storage_cmd('storage blob snapshot -c {} -n {} ', account_info, container, blob_name1)\ .get_output_in_json() self.assertIsNotNone(result['snapshot']) snapshot = result['snapshot'] self.storage_cmd('storage blob list -c {} --include s', account_info, container) \ .assert_with_checks(JMESPathCheck('[0].snapshot', snapshot)) # Test with include metadata self.storage_cmd('storage blob metadata update -c {} -n {} --metadata test=1 ', account_info, container, blob_name1) self.storage_cmd('storage blob metadata show -c {} -n {} ', account_info, container, blob_name1)\ .assert_with_checks(JMESPathCheck('test', '1')) self.storage_cmd('storage blob list -c {} --include m', account_info, container) \ .assert_with_checks(JMESPathCheck('[0].metadata.test', '1')) # Prepare blob 2 self.storage_cmd('storage blob upload -c {} -f "{}" -n {} ', account_info, container, local_file, blob_name2) self.storage_cmd('storage blob list -c {} ', account_info, container).assert_with_checks( JMESPathCheck('length(@)', 2) ) # Test num_results and next marker self.storage_cmd('storage blob list -c {} --num-results 1 ', account_info, container).assert_with_checks( JMESPathCheck('length(@)', 1)) result = self.storage_cmd('storage blob list -c {} --num-results 1 --show-next-marker', account_info, container).get_output_in_json() self.assertIsNotNone(result[1]['nextMarker']) next_marker = result[1]['nextMarker'] # Test with marker self.storage_cmd('storage blob list -c {} --marker {} ', account_info, container, next_marker) \ .assert_with_checks(JMESPathCheck('length(@)', 1)) # Test with prefix self.storage_cmd('storage blob list -c {} --prefix {}', account_info, container, 'dir/') \ .assert_with_checks(JMESPathCheck('length(@)', 2)) # Test with delimiter self.storage_cmd('storage blob list -c {} --delimiter "/"', account_info, container) \ .assert_with_checks(JMESPathCheck('length(@)', 1), JMESPathCheck('[0].name', 'dir/')) # Test secondary location account_name = account_info[0] + '-secondary' account_key = account_info[1] self.cmd('storage blob list -c {} --account-name {} --account-key {} '.format( container, account_name, account_key)).assert_with_checks( JMESPathCheck('length(@)', 2))
def test_storage_blob_container_operations(self, resource_group, storage_account): account_info = self.get_account_info(resource_group, storage_account) c = self.create_container(account_info) proposed_lease_id = 'abcdabcd-abcd-abcd-abcd-abcdabcdabcd' new_lease_id = 'dcbadcba-dcba-dcba-dcba-dcbadcbadcba' date = '2016-04-01t12:00z' self.storage_cmd('storage container exists -n {}', account_info, c) \ .assert_with_checks(JMESPathCheck('exists', True)) self.storage_cmd('storage container set-permission -n {} --public-access blob', account_info, c) self.storage_cmd('storage container show-permission -n {}', account_info, c) \ .assert_with_checks(JMESPathCheck('publicAccess', 'blob')) self.storage_cmd('storage container set-permission -n {} --public-access off', account_info, c) self.storage_cmd('storage container show-permission -n {}', account_info, c) \ .assert_with_checks(JMESPathCheck('publicAccess', 'off')) self.storage_cmd('storage container show -n {}', account_info, c) \ .assert_with_checks(JMESPathCheck('name', c)) self.assertIn(c, self.storage_cmd('storage container list --query "[].name"', account_info).get_output_in_json()) self.storage_cmd('storage container metadata update -n {} --metadata foo=bar moo=bak', account_info, c) self.storage_cmd('storage container metadata show -n {}', account_info, c) \ .assert_with_checks(JMESPathCheck('foo', 'bar'), JMESPathCheck('moo', 'bak')) self.storage_cmd('storage container metadata update -n {}', account_info, c) self.storage_cmd('storage container metadata show -n {}', account_info, c) \ .assert_with_checks(NoneCheck()) # test lease operations self.storage_cmd('storage container lease acquire --lease-duration 60 -c {} ' '--if-modified-since {} --proposed-lease-id {}', account_info, c, date, proposed_lease_id) self.storage_cmd('storage container show --name {}', account_info, c) \ .assert_with_checks(JMESPathCheck('properties.lease.duration', 'fixed'), JMESPathCheck('properties.lease.state', 'leased'), JMESPathCheck('properties.lease.status', 'locked')) self.storage_cmd('storage container lease change -c {} --lease-id {} ' '--proposed-lease-id {}', account_info, c, proposed_lease_id, new_lease_id) self.storage_cmd('storage container lease renew -c {} --lease-id {}', account_info, c, new_lease_id) self.storage_cmd('storage container show -n {}', account_info, c) \ .assert_with_checks(JMESPathCheck('properties.lease.duration', 'fixed'), JMESPathCheck('properties.lease.state', 'leased'), JMESPathCheck('properties.lease.status', 'locked')) self.storage_cmd('storage container lease break -c {} --lease-break-period 30', account_info, c) self.storage_cmd('storage container show --name {}', account_info, c) \ .assert_with_checks(JMESPathCheck('properties.lease.duration', None), JMESPathCheck('properties.lease.state', 'breaking'), JMESPathCheck('properties.lease.status', 'locked')) self.storage_cmd('storage container lease release -c {} --lease-id {}', account_info, c, new_lease_id) self.storage_cmd('storage container show --name {}', account_info, c) \ .assert_with_checks(JMESPathCheck('properties.lease.duration', None), JMESPathCheck('properties.lease.state', 'available'), JMESPathCheck('properties.lease.status', 'unlocked')) self.assertIn('sig=', self.storage_cmd('storage container generate-sas -n {}', account_info, c).output) # verify delete operation self.storage_cmd('storage container delete --name {} --fail-not-exist', account_info, c) \ .assert_with_checks(JMESPathCheck('deleted', True)) self.storage_cmd('storage container exists -n {}', account_info, c) \ .assert_with_checks(JMESPathCheck('exists', False))
def test_storage_blob_restore(self, resource_group, storage_account): import time # Enable Policy self.cmd('storage account blob-service-properties update --enable-change-feed --enable-delete-retention --delete-retention-days 2 --enable-versioning -n {sa}')\ .assert_with_checks(JMESPathCheck('changeFeed.enabled', True), JMESPathCheck('deleteRetentionPolicy.enabled', True), JMESPathCheck('deleteRetentionPolicy.days', 2)) self.cmd('storage account blob-service-properties update --enable-restore-policy --restore-days 1 -n {sa} ') c1 = self.create_random_name(prefix='containera', length=24) c2 = self.create_random_name(prefix='containerb', length=24) b1 = self.create_random_name(prefix='blob1', length=24) b2 = self.create_random_name(prefix='blob2', length=24) b3 = self.create_random_name(prefix='blob3', length=24) b4 = self.create_random_name(prefix='blob4', length=24) local_file = self.create_temp_file(256) account_key = self.cmd('storage account keys list -n {} -g {} --query "[0].value" -otsv' .format(storage_account, resource_group)).output # Prepare containers and blobs for container in [c1, c2]: self.cmd('storage container create -n {} --account-name {} --account-key {}'.format( container, storage_account, account_key)) \ .assert_with_checks(JMESPathCheck('created', True)) for blob in [b1, b2, b3, b4]: self.cmd('storage blob upload -c {} -f "{}" -n {} --account-name {} --account-key {}'.format( container, local_file, blob, storage_account, account_key)) self.cmd('storage blob list -c {} --account-name {} --account-key {}'.format( container, storage_account, account_key)) \ .assert_with_checks(JMESPathCheck('length(@)', 4)) self.cmd('storage container delete -n {} --account-name {} --account-key {}'.format( container, storage_account, account_key)) \ .assert_with_checks(JMESPathCheck('deleted', True)) time.sleep(60) # Restore blobs, with specific ranges self.cmd('storage account blob-service-properties show -n {sa}') \ .assert_with_checks(JMESPathCheck('restorePolicy.enabled', True), JMESPathCheck('restorePolicy.days', 1), JMESPathCheckExists('restorePolicy.minRestoreTime')) time_to_restore = (datetime.utcnow() + timedelta(seconds=-5)).strftime('%Y-%m-%dT%H:%MZ') # c1/b1 -> c1/b2 start_range = '/'.join([c1, b1]) end_range = '/'.join([c1, b2]) self.cmd('storage blob restore -t {} -r {} {} --account-name {} -g {}'.format( time_to_restore, start_range, end_range, storage_account, resource_group), checks=[ JMESPathCheck('status', 'Complete'), JMESPathCheck('parameters.blobRanges[0].startRange', start_range), JMESPathCheck('parameters.blobRanges[0].endRange', end_range)]) self.cmd('storage blob restore -t {} -r {} {} --account-name {} -g {} --no-wait'.format( time_to_restore, start_range, end_range, storage_account, resource_group)) time.sleep(90) time_to_restore = (datetime.utcnow() + timedelta(seconds=-5)).strftime('%Y-%m-%dT%H:%MZ') # c1/b2 -> c2/b3 start_range = '/'.join([c1, b2]) end_range = '/'.join([c2, b3]) self.cmd('storage blob restore -t {} -r {} {} --account-name {} -g {}'.format( time_to_restore, start_range, end_range, storage_account, resource_group), checks=[ JMESPathCheck('status', 'Complete'), JMESPathCheck('parameters.blobRanges[0].startRange', start_range), JMESPathCheck('parameters.blobRanges[0].endRange', end_range)]) time.sleep(120) self.cmd('storage blob restore -t {} --account-name {} -g {} --no-wait'.format( time_to_restore, storage_account, resource_group))
def test_databricks(self, resource_group): self.kwargs.update({ 'workspace_name': 'my-test-workspace', 'subscription': '00000000-0000-0000-0000-000000000000', 'custom_workspace_name': 'my-custom-workspace', 'managed_resource_group': 'custom-managed-rg' }) self.cmd('az databricks workspace create ' '--resource-group {rg} ' '--name {workspace_name} ' '--location "westus" ' '--sku standard', checks=[JMESPathCheck('name', self.kwargs.get('workspace_name', '')), JMESPathCheck('sku.name', self.kwargs.get('sku.name', 'standard'))]) managed_resource_group_id = '/subscriptions/{}/resourceGroups/{}'.format(self.kwargs.get('subscription', ''), self.kwargs.get('managed_resource_group', '')) self.cmd('az databricks workspace create ' '--resource-group {rg} ' '--name {custom_workspace_name} ' '--location "westus" ' '--sku standard ' '--managed-resource-group {managed_resource_group} ' '--relay-namespace-name custom-relay-space ' '--storage-account-name customdbstorage ' '--storage-account-sku Standard_LRS', checks=[JMESPathCheck('name', self.kwargs.get('custom_workspace_name', '')), JMESPathCheck('parameters.relayNamespaceName.value', 'custom-relay-space'), JMESPathCheck('parameters.storageAccountName.value', 'customdbstorage'), JMESPathCheck('parameters.storageAccountSkuName.value', 'Standard_LRS'), JMESPathCheck('managedResourceGroupId', managed_resource_group_id)]) self.cmd('az databricks workspace update ' '--resource-group {rg} ' '--name {workspace_name} ' '--tags type=test', checks=[JMESPathCheck('tags.type', 'test')]) self.cmd('az databricks workspace show ' '--resource-group {rg} ' '--name {workspace_name}', checks=[JMESPathCheck('name', self.kwargs.get('workspace_name', ''))]) workspace_resource_id = resource_id( subscription=self.kwargs.get('subscription', ''), resource_group=resource_group, namespace='Microsoft.Databricks', type='workspaces', name=self.kwargs.get('workspace_name', '')) self.cmd('az databricks workspace show ' '--ids {}'.format(workspace_resource_id), checks=[JMESPathCheck('name', self.kwargs.get('workspace_name', ''))]) # todo service 502 # self.cmd('az databricks workspace list', # '--resource-group=' # checks=[]) self.cmd('az databricks workspace list ' '--resource-group {rg} ', checks=[]) self.cmd('az databricks workspace delete ' '--resource-group {rg} ' '--name {workspace_name} ' '-y', checks=[]) self.cmd('az databricks workspace delete ' '--resource-group {rg} ' '--name {custom_workspace_name} ' '-y', checks=[])
def test_authV2_authclassic(self, resource_group): webapp_name = self.create_random_name('webapp-authentication-test', 40) plan_name = self.create_random_name('webapp-authentication-plan', 40) self.cmd('appservice plan create -g {} -n {} --sku S1'.format( resource_group, plan_name)) self.cmd('webapp create -g {} -n {} --plan {}'.format( resource_group, webapp_name, plan_name)) self.cmd('webapp auth config-version show -g {} -n {}'.format( resource_group, webapp_name)).assert_with_checks( [JMESPathCheck('configVersion', 'v1')]) # testing show command for newly created app and initial fields self.cmd('webapp auth-classic show -g {} -n {}'.format( resource_group, webapp_name)).assert_with_checks([ JMESPathCheck('unauthenticatedClientAction', None), JMESPathCheck('defaultProvider', None), JMESPathCheck('enabled', False), JMESPathCheck('tokenStoreEnabled', None), JMESPathCheck('allowedExternalRedirectUrls', None), JMESPathCheck('tokenRefreshExtensionHours', None), JMESPathCheck('runtimeVersion', None), JMESPathCheck('clientId', None), JMESPathCheck('clientSecretCertificateThumbprint', None), JMESPathCheck('allowedAudiences', None), JMESPathCheck('issuer', None), JMESPathCheck('facebookAppId', None), JMESPathCheck('facebookOauthScopes', None) ]) # update and verify result = self.cmd( 'webapp auth-classic update -g {} -n {} --enabled true --action LoginWithFacebook ' '--token-store false --token-refresh-extension-hours 7.2 --runtime-version 1.2.8 ' '--aad-client-id aad_client_id --aad-client-secret-certificate-thumbprint aad_thumbprint ' '--aad-allowed-token-audiences https://audience1 --aad-token-issuer-url https://issuer_url ' '--facebook-app-id facebook_id --facebook-oauth-scopes public_profile email' .format(resource_group, webapp_name)).assert_with_checks([ JMESPathCheck('unauthenticatedClientAction', 'RedirectToLoginPage'), JMESPathCheck('defaultProvider', 'Facebook'), JMESPathCheck('enabled', True), JMESPathCheck('tokenStoreEnabled', False), JMESPathCheck('tokenRefreshExtensionHours', 7.2), JMESPathCheck('runtimeVersion', '1.2.8'), JMESPathCheck('clientId', 'aad_client_id'), JMESPathCheck('clientSecretCertificateThumbprint', 'aad_thumbprint'), JMESPathCheck('issuer', 'https://issuer_url'), JMESPathCheck('facebookAppId', 'facebook_id') ]).get_output_in_json() self.assertIn('https://audience1', result['allowedAudiences'])
def test_create_storage_account(self, resource_group, location): name = self.create_random_name(prefix='cli', length=24) self.cmd('az storage account create -n {} -g {} --sku {} -l {}'.format( name, resource_group, 'Standard_LRS', location)) self.cmd('storage account check-name --name {}'.format(name), checks=[ JMESPathCheck('nameAvailable', False), JMESPathCheck('reason', 'AlreadyExists') ]) self.cmd('storage account list -g {}'.format(resource_group), checks=[ JMESPathCheck('[0].location', 'westus'), JMESPathCheck('[0].sku.name', 'Standard_LRS'), JMESPathCheck('[0].resourceGroup', resource_group) ]) self.cmd('az storage account show -n {} -g {}'.format( name, resource_group), checks=[ JMESPathCheck('name', name), JMESPathCheck('location', location), JMESPathCheck('sku.name', 'Standard_LRS'), JMESPathCheck('kind', 'Storage') ]) self.cmd('az storage account show -n {}'.format(name), checks=[ JMESPathCheck('name', name), JMESPathCheck('location', location), JMESPathCheck('sku.name', 'Standard_LRS'), JMESPathCheck('kind', 'Storage') ]) self.cmd( 'storage account show-connection-string -g {} -n {} --protocol http' .format(resource_group, name), checks=[ JMESPathCheck("contains(connectionString, 'https')", False), JMESPathCheck("contains(connectionString, '{}')".format(name), True) ]) self.cmd( 'storage account update -g {} -n {} --tags foo=bar cat'.format( resource_group, name), checks=JMESPathCheck('tags', { 'cat': '', 'foo': 'bar' })) self.cmd( 'storage account update -g {} -n {} --sku Standard_GRS --tags'. format(resource_group, name), checks=[ JMESPathCheck('tags', {}), JMESPathCheck('sku.name', 'Standard_GRS') ]) self.cmd('storage account update -g {} -n {} --set tags.test=success'. format(resource_group, name), checks=JMESPathCheck('tags', {'test': 'success'})) self.cmd('storage account delete -g {} -n {} --yes'.format( resource_group, name)) self.cmd('storage account check-name --name {}'.format(name), checks=JMESPathCheck('nameAvailable', True))
def test_webapp_up_change_runtime_version(self, resource_group): plan = self.create_random_name('up-nodeplan', 24) webapp_name = self.create_random_name('up-nodeapp', 24) zip_file_name = os.path.join(TEST_DIR, 'node-Express-up.zip') # create a temp directory and unzip the code to this folder import zipfile import tempfile temp_dir = tempfile.mkdtemp() zip_ref = zipfile.ZipFile(zip_file_name, 'r') zip_ref.extractall(temp_dir) current_working_dir = os.getcwd() # change the working dir to the dir where the code has been extracted to up_working_dir = os.path.join(temp_dir, 'myExpressApp') os.chdir(up_working_dir) # test dryrun operation result = self.cmd( 'webapp up -n {} -g {} --plan {} --os "linux" --runtime "node|10.14" --sku "S1" --dryrun' .format(webapp_name, resource_group, plan)).get_output_in_json() self.assertTrue(result['sku'].lower() == 'standard') self.assertTrue(result['name'].startswith(webapp_name)) self.assertTrue(result['src_path'].replace(os.sep + os.sep, os.sep), up_working_dir) self.assertTrue(result['runtime_version'] == 'node|10.14') self.assertTrue(result['os'].lower() == 'linux') # test the full E2E operation works full_result = self.cmd( 'webapp up -n {} -g {} --plan {} --os "linux" --runtime "node|10.14" --sku "S1"' .format(webapp_name, resource_group, plan)).get_output_in_json() self.assertTrue(result['name'] == full_result['name']) # Verify app is created # since we set local context, -n and -g are no longer required self.cmd('webapp show', checks=[ JMESPathCheck('name', webapp_name), JMESPathCheck('httpsOnly', True), JMESPathCheck('kind', 'app,linux'), JMESPathCheck('resourceGroup', resource_group) ]) # test changing runtime to newer version full_result = self.cmd( 'webapp up -n {} -g {} --plan {} --os "linux" --runtime "node|12-lts" --sku "S1"' .format(webapp_name, resource_group, plan)).get_output_in_json() self.assertTrue(result['name'] == full_result['name']) # verify newer version self.cmd('webapp config show', checks=[ JMESPathCheck('linuxFxVersion', "NODE|12-lts"), JMESPathCheck('tags.cli', 'None') ]) # test changing runtime to older version full_result = self.cmd( 'webapp up -n {} -g {} --plan {} --os "linux" --runtime "node|10.14" --sku "S1"' .format(webapp_name, resource_group, plan)).get_output_in_json() self.assertTrue(result['name'] == full_result['name']) # verify older version self.cmd('webapp config show', checks=[ JMESPathCheck('linuxFxVersion', "NODE|10.14"), JMESPathCheck('tags.cli', 'None') ]) # cleanup # switch back the working dir os.chdir(current_working_dir) # delete temp_dir import shutil shutil.rmtree(temp_dir)
def test_storage_create_default_sku(self, resource_group): name = self.create_random_name(prefix='cli', length=24) create_cmd = 'az storage account create -n {} -g {}'.format( name, resource_group) self.cmd(create_cmd, checks=[JMESPathCheck('sku.name', 'Standard_RAGRS')])
def test_webapp_up_python_e2e(self, resource_group): plan = self.create_random_name('up-pythonplan', 24) webapp_name = self.create_random_name('up-pythonapp', 24) zip_file_name = os.path.join(TEST_DIR, 'python-hello-world-up.zip') # create a temp directory and unzip the code to this folder import zipfile import tempfile temp_dir = tempfile.mkdtemp() zip_ref = zipfile.ZipFile(zip_file_name, 'r') zip_ref.extractall(temp_dir) current_working_dir = os.getcwd() # change the working dir to the dir where the code has been extracted to up_working_dir = os.path.join(temp_dir, 'python-docs-hello-world') os.chdir(up_working_dir) # test dryrun operation result = self.cmd('webapp up -n {} --sku S1 --dryrun'.format( webapp_name)).get_output_in_json() self.assertTrue(result['sku'].lower() == 'standard') self.assertTrue(result['name'].startswith(webapp_name)) self.assertTrue(result['src_path'].replace(os.sep + os.sep, os.sep), up_working_dir) self.assertTrue(result['runtime_version'] == 'python|3.7') self.assertTrue(result['os'].lower() == 'linux') # test the full E2E operation works full_result = self.cmd( 'webapp up -n {} --sku S1 -g {} --plan {}'.format( webapp_name, resource_group, plan)).get_output_in_json() self.assertTrue(result['name'] == full_result['name']) # Verify app is created # since we set local context, -n and -g are no longer required self.cmd('webapp show', checks=[ JMESPathCheck('name', webapp_name), JMESPathCheck('httpsOnly', True), JMESPathCheck('kind', 'app,linux'), JMESPathCheck('resourceGroup', resource_group) ]) self.cmd('webapp config show', checks=[ JMESPathCheck('linuxFxVersion', 'PYTHON|3.7'), JMESPathCheck('tags.cli', 'None'), ]) self.cmd('webapp config appsettings list', checks=[ JMESPathCheck('[0].name', 'SCM_DO_BUILD_DURING_DEPLOYMENT'), JMESPathCheck('[0].value', 'True') ]) # verify SKU and kind of ASP created self.cmd('appservice plan show', checks=[ JMESPathCheck('reserved', True), JMESPathCheck('name', plan), JMESPathCheck('sku.tier', 'Standard'), JMESPathCheck('sku.name', 'S1') ]) # cleanup # switch back the working dir os.chdir(current_working_dir) # delete temp_dir import shutil shutil.rmtree(temp_dir)
def test_list_locations(self): self.cmd('az account list-locations', checks=[ JMESPathCheck("[?name=='westus'].displayName | [0]", 'West US') ])
def test_afd_profile_crud(self, resource_group): list_checks = [JMESPathCheck('length(@)', 0)] self.afd_profile_list_cmd(resource_group, checks=list_checks) profile_name = self.create_random_name(prefix='profile', length=24) tags = 'tag1=value1 tag2=value2' self.afd_profile_create_cmd(resource_group, profile_name, tags=tags) list_checks = [JMESPathCheck('length(@)', 1), JMESPathCheck('@[0].location', "Global"), JMESPathCheck('@[0].sku.name', 'Standard_AzureFrontDoor'), JMESPathCheck('@[0].tags.tag1', 'value1'), JMESPathCheck('@[0].tags.tag2', 'value2')] self.afd_profile_list_cmd(resource_group, checks=list_checks) show_checks = [JMESPathCheck('location', "Global"), JMESPathCheck('sku.name', 'Standard_AzureFrontDoor'), JMESPathCheck('length(tags)', 2), JMESPathCheck('tags.tag1', 'value1'), JMESPathCheck('tags.tag2', 'value2')] self.afd_profile_show_cmd(resource_group, profile_name, checks=show_checks) update_checks = [JMESPathCheck('location', "Global"), JMESPathCheck('sku.name', 'Standard_AzureFrontDoor'), JMESPathCheck('tags.tag1', None), JMESPathCheck('tags.tag2', None), JMESPathCheck('tags.tag3', 'value3'), JMESPathCheck('tags.tag4', 'value4')] tags = 'tag3=value3 tag4=value4' self.afd_profile_update_cmd(resource_group, profile_name, tags=tags, checks=update_checks) usage_checks = [JMESPathCheck('length(@)', 6)] self.cmd(f"afd profile usage -g {resource_group} --profile-name {profile_name}", checks=usage_checks) self.afd_profile_delete_cmd(resource_group, profile_name) list_checks = [JMESPathCheck('length(@)', 0)] self.afd_profile_list_cmd(resource_group, checks=list_checks)
def test_batchai_auto_scale_scenario(self, resource_group, storage_account): # Typical usage scenario for auto scale cluster. # 1. Create a compute cluster # 2. Submit a job # 3. The cluster will auto scale to execute the job # 4. Examine the job execution results # 5. The cluster will down scale with self._given_configured_environment(resource_group, storage_account): # Create a file share 'share' to be mounted on the cluster self.cmd('az storage share create -n share') # Create a workspace self.cmd('az batchai workspace create -g {0} -n workspace'.format(resource_group), checks=[JMESPathCheck('name', 'workspace')]) # Create a cluster self.cmd('az batchai cluster create -g {0} -w workspace -n cluster -f {1}'.format( resource_group, _data_file('auto_scale_cluster_with_azure_files.json')), checks=[ JMESPathCheck('name', 'cluster'), JMESPathCheck('scaleSettings.autoScale.minimumNodeCount', 0), JMESPathCheck('scaleSettings.autoScale.maximumNodeCount', 1), JMESPathCheck('vmSize', 'STANDARD_D1'), JMESPathCheck('nodeSetup.mountVolumes.azureFileShares[0].accountName', storage_account), JMESPathCheck('nodeSetup.mountVolumes.azureFileShares[0].azureFileUrl', 'https://{0}.file.core.windows.net/share'.format(storage_account)), JMESPathCheck('nodeSetup.mountVolumes.azureFileShares[0].credentialsInfo.accountKey', None), JMESPathCheck('userAccountSettings.adminUserName', 'DemoUser'), JMESPathCheck('userAccountSettings.adminUserPassword', None)]) # Create an experiment self.cmd('az batchai experiment create -g {0} -w workspace -n experiment'.format(resource_group), checks=[JMESPathCheck('name', 'experiment')]) # Create the job self.cmd('az batchai job create -c cluster -g {0} -w workspace -e experiment -n job -f {1}'.format( resource_group, _data_file('custom_toolkit_job.json')), checks=[ JMESPathCheck('name', 'job'), JMESPathCheck('customToolkitSettings.commandLine', 'echo hi | tee $AZ_BATCHAI_OUTPUT_OUTPUT/result.txt'), JMESPathCheck('executionState', 'queued')]) # Wait for the cluster to scale up and job completed self.cmd('az batchai job wait -g {0} -w workspace -e experiment -n job'.format(resource_group)) # The job must succeed by this time self.cmd('az batchai job show -g {0} -w workspace -e experiment -n job'.format(resource_group), checks=[ JMESPathCheck('name', 'job'), JMESPathCheck( 'customToolkitSettings.commandLine', 'echo hi | tee $AZ_BATCHAI_OUTPUT_OUTPUT/result.txt'), JMESPathCheck('executionState', 'succeeded'), JMESPathCheck('executionInfo.exitCode', 0), JMESPathCheck('executionInfo.errors', None), ]) # Give cluster a time do down scale time.sleep(CLUSTER_RESIZE_TIME) # By this time the cluster should not have any nodes self.cmd('az batchai cluster show -g {0} -w workspace -n cluster'.format(resource_group), checks=JMESPathCheck('currentNodeCount', 0))
def test_storage_file_batch_upload_scenarios_v2(self, test_dir, storage_account_info): # upload without pattern src_share = self.create_share(storage_account_info) local_folder = self.create_temp_dir() self.storage_cmd( 'storage file upload-batch -s "{}" -d {} --max-connections 3', storage_account_info, test_dir, src_share) self.storage_cmd('storage file download-batch -s {} -d "{}"', storage_account_info, src_share, local_folder) self.assertEqual(41, sum(len(f) for r, d, f in os.walk(local_folder))) # upload with pattern apple/* src_share = self.create_share(storage_account_info) local_folder = self.create_temp_dir() self.storage_cmd( 'storage file upload-batch -s "{}" -d {} --pattern apple/*', storage_account_info, test_dir, src_share) self.storage_cmd('storage file download-batch -s {} -d "{}"', storage_account_info, src_share, local_folder) self.assertEqual(10, sum(len(f) for r, d, f in os.walk(local_folder))) # upload with pattern */file_0 src_share = self.create_share(storage_account_info) local_folder = self.create_temp_dir() share_url = self.storage_cmd('storage file url -s {} -p \'\' -otsv', storage_account_info, src_share).output.strip()[:-1] self.storage_cmd( 'storage file upload-batch -s "{}" -d {} --pattern */file_0', storage_account_info, test_dir, share_url) self.storage_cmd('storage file download-batch -s {} -d "{}"', storage_account_info, src_share, local_folder) self.assertEqual(4, sum(len(f) for r, d, f in os.walk(local_folder))) # upload with pattern nonexists/* src_share = self.create_share(storage_account_info) local_folder = self.create_temp_dir() self.storage_cmd( 'storage file upload-batch -s "{}" -d {} --pattern nonexists/*', storage_account_info, test_dir, src_share) self.storage_cmd('storage file download-batch -s {} -d "{}"', storage_account_info, src_share, local_folder) self.assertEqual(0, sum(len(f) for r, d, f in os.walk(local_folder))) # upload while specifying share path src_share = self.create_share(storage_account_info) local_folder = self.create_temp_dir() share_url = self.storage_cmd('storage file url -s {} -p \'\' -otsv', storage_account_info, src_share).output.strip()[:-1] self.storage_cmd( 'storage file upload-batch -s "{}" -d {} --pattern */file_0 --destination-path some_dir', storage_account_info, test_dir, share_url) self.storage_cmd( 'storage file download-batch -s {} -d "{}" --pattern some_dir*', storage_account_info, src_share, local_folder) self.assertEqual(4, sum(len(f) for r, d, f in os.walk(local_folder))) # upload to specifying share path src_share = self.create_share(storage_account_info) local_folder = self.create_temp_dir() sub_dir = 'test_dir/sub_dir' self.storage_cmd( 'storage file upload-batch -s "{}" -d {} --pattern */file_0 --destination-path {} ', storage_account_info, test_dir, src_share, sub_dir) self.storage_cmd('storage file download-batch -s {} -d "{}"', storage_account_info, src_share + "/" + sub_dir, local_folder) self.assertEqual(4, sum(len(f) for r, d, f in os.walk(local_folder))) # upload with content settings src_share = self.create_share(storage_account_info) local_folder = self.create_temp_dir() self.storage_cmd( 'storage file upload-batch -s "{}" -d {} --pattern apple/file_0 ' '--content-cache-control no-cache ' '--content-disposition attachment ' '--content-encoding compress ' '--content-language en-US ' '--content-type "multipart/form-data;" ' '--metadata key=val', storage_account_info, test_dir, src_share) self.storage_cmd('storage file show -s {} -p "{}" ', storage_account_info, src_share, 'apple/file_0').\ assert_with_checks(JMESPathCheck('name', 'file_0'), JMESPathCheck('properties.contentSettings.cacheControl', 'no-cache'), JMESPathCheck('properties.contentSettings.contentDisposition', 'attachment'), JMESPathCheck('properties.contentSettings.contentEncoding', 'compress'), JMESPathCheck('properties.contentSettings.contentLanguage', 'en-US'), JMESPathCheck('properties.contentSettings.contentType', 'multipart/form-data;'), JMESPathCheck('metadata', {'key': 'val'}))
def test_batchai_config_less_cluster_with_file_systems(self, resource_group, storage_account): # Test creation of a cluster with mount file systems defined via command line. with self._given_configured_environment(resource_group, storage_account): self.cmd('az storage share create -n share') self.cmd('az storage container create -n container') self.cmd('az batchai workspace create -g {0} -n workspace'.format(resource_group)) self.cmd( 'az batchai cluster create -g {0} -w workspace -n cluster ' '-i UbuntuLTS --vm-size STANDARD_D1 --min 1 --max 1 -u DemoUser -k {1} ' '--afs-name share --bfs-name container'.format( resource_group, _data_file('key.txt')), checks=[ JMESPathCheck('nodeSetup.mountVolumes.azureFileShares[0].accountName', storage_account), JMESPathCheck('nodeSetup.mountVolumes.azureFileShares[0].azureFileUrl', 'https://{0}.file.core.windows.net/share'.format(storage_account)), JMESPathCheck('nodeSetup.mountVolumes.azureFileShares[0].relativeMountPath', 'afs'), JMESPathCheck('nodeSetup.mountVolumes.azureFileShares[0].credentialsInfo.accountKey', None), JMESPathCheck('nodeSetup.mountVolumes.azureBlobFileSystems[0].accountName', storage_account), JMESPathCheck('nodeSetup.mountVolumes.azureBlobFileSystems[0].containerName', 'container'), JMESPathCheck('nodeSetup.mountVolumes.azureBlobFileSystems[0].relativeMountPath', 'bfs'), JMESPathCheck('nodeSetup.mountVolumes.azureBlobFileSystems[0].credentialsInfo.accountKey', None), JMESPathCheck('userAccountSettings.adminUserName', 'DemoUser'), JMESPathCheck('userAccountSettings.adminUserPassword', None)]) # Give file server and cluster to finish preparation. time.sleep(NODE_STARTUP_TIME * 2) # Check the node in the cluster successfully started - was able to mount nfs and azure filesystem. self.cmd('az batchai cluster show -g {0} -w workspace -n cluster'.format(resource_group), checks=[JMESPathCheck('nodeStateCounts.idleNodeCount', 1)])
def test_aks_create_with_upgrade(self, resource_group, resource_group_location, sp_name, sp_password): ssh_pubkey_file = self.generate_ssh_keys().replace('\\', '\\\\') aks_name = self.create_random_name('cliakstest', 16) dns_prefix = self.create_random_name('cliaksdns', 16) original_k8s_version = '1.7.7' # create create_cmd = 'aks create -g {} -n {} --dns-name-prefix {} --ssh-key-value {} --kubernetes-version {} -l {} ' \ '--service-principal {} --client-secret {}' self.cmd(create_cmd.format(resource_group, aks_name, dns_prefix, ssh_pubkey_file, original_k8s_version, resource_group_location, sp_name, sp_password), checks=[ JMESPathCheckExists('fqdn'), JMESPathCheck('provisioningState', 'Succeeded') ]) # show self.cmd('aks show -g {} -n {}'.format(resource_group, aks_name), checks=[ JMESPathCheck( 'type', 'Microsoft.ContainerService/ManagedClusters'), JMESPathCheck('name', aks_name), JMESPathCheck('resourceGroup', resource_group), JMESPathCheck('agentPoolProfiles[0].count', 3), JMESPathCheck('agentPoolProfiles[0].vmSize', 'Standard_D1_v2'), JMESPathCheck('dnsPrefix', dns_prefix), JMESPathCheck('provisioningState', 'Succeeded'), JMESPathCheck('kubernetesVersion', '1.7.7') ]) # upgrade new_k8s_version = '1.8.1' upgrade_cmd = 'aks upgrade -g {} -n {} --kubernetes-version {} --yes' self.cmd(upgrade_cmd.format(resource_group, aks_name, new_k8s_version), checks=[JMESPathCheck('provisioningState', 'Succeeded')]) # show again self.cmd('aks show -g {} -n {}'.format(resource_group, aks_name), checks=[JMESPathCheck('kubernetesVersion', '1.8.1')])