def test_container_create_with_acr(self, resource_group,
                                       resource_group_location):
        container_group_name = self.create_random_name('clicontainer', 16)
        registry_username = '******'
        registry_server = '{}.azurecr.io'.format(registry_username)
        image = '{}/nginx:latest'.format(registry_server)
        password = '******'

        self.cmd(
            'container create -g {} -n {} --image {} --registry-password {}'.
            format(resource_group, container_group_name, image, password),
            checks=[
                JMESPathCheck('name', container_group_name),
                JMESPathCheck('location', resource_group_location),
                JMESPathCheck('provisioningState', 'Succeeded'),
                JMESPathCheck('osType', 'Linux'),
                JMESPathCheck('containers[0].image', image),
                JMESPathCheck('imageRegistryCredentials[0].server',
                              registry_server),
                JMESPathCheck('imageRegistryCredentials[0].username',
                              registry_username),
                JMESPathCheckExists('containers[0].resources.requests.cpu'),
                JMESPathCheckExists(
                    'containers[0].resources.requests.memoryInGb')
            ])
Esempio n. 2
0
    def test_container_azure_file_volume_mount(self, resource_group,
                                               resource_group_location):
        container_group_name = self.create_random_name('clicontainer', 16)
        azure_file_volume_share_name = 'testshare'
        azure_file_volume_account_name = 'ccondemostore1'
        azure_file_volume_account_key = 'mockstorageaccountkey'
        azure_file_volume_mount_path = '/mnt/azfile'

        create_cmd = 'container create -g {} -n {} --image nginx --azure-file-volume-share-name {} ' \
            '--azure-file-volume-account-name {} --azure-file-volume-account-key {} ' \
            '--azure-file-volume-mount-path {}'.format(resource_group,
                                                       container_group_name,
                                                       azure_file_volume_share_name,
                                                       azure_file_volume_account_name,
                                                       azure_file_volume_account_key,
                                                       azure_file_volume_mount_path)

        self.cmd(create_cmd,
                 checks=[
                     JMESPathCheck('name', container_group_name),
                     JMESPathCheck('location', resource_group_location),
                     JMESPathCheck('provisioningState', 'Succeeded'),
                     JMESPathCheck('osType', 'Linux'),
                     JMESPathCheckExists('volumes'),
                     JMESPathCheckExists('volumes[0].azureFile'),
                     JMESPathCheck('volumes[0].azureFile.shareName',
                                   azure_file_volume_share_name),
                     JMESPathCheck('volumes[0].azureFile.storageAccountName',
                                   azure_file_volume_account_name),
                     JMESPathCheckExists('containers[0].volumeMounts'),
                     JMESPathCheck('containers[0].volumeMounts[0].mountPath',
                                   azure_file_volume_mount_path)
                 ])
 def sf_test_good_cluster_health(self):
     self.cmd("az sf cluster health",
              checks=[
                  JMESPathCheckExists("aggregatedHealthState"),
                  JMESPathCheck("applicationHealthStates[0].name",
                                "fabric:/System"),
                  JMESPathCheckExists("nodeHealthStates")
              ])
 def sf_test_good_system_service_health(self):
     self.cmd(
         "az sf service health --service-id "
         "System/ClusterManagerService",
         checks=[
             JMESPathCheck("name", "fabric:/System/ClusterManagerService"),
             JMESPathCheckExists("partitionHealthStates"),
             JMESPathCheckExists("healthEvents")
         ])
 def sf_test_good_system_partition_health(self):
     self.cmd(
         "az sf partition health --partition-id "
         "00000000-0000-0000-0000-000000000001",
         checks=[
             JMESPathCheck("partitionId",
                           "00000000-0000-0000-0000-000000000001"),
             JMESPathCheckExists("replicaHealthStates"),
             JMESPathCheckExists("healthEvents")
         ])
Esempio n. 6
0
    def node_list_returns_non_empty_ids_and_names_test(self,
                                                       mock_config_parser):
        instance = mock_config_parser.return_value
        instance.no_verify_setting.return_value = False
        instance.ca_cert_info.return_value = False
        instance.connection_endpoint.return_value = test_endpoint
        instance.cert_info.return_value = False

        self.cmd("az sf node list",
                 checks=[
                     JMESPathCheckExists("items[0].id.id"),
                     JMESPathCheckExists("items[0].name")
                 ])
Esempio n. 7
0
    def cluster_health_returns_aggregated_states_test(self,
                                                      mock_config_parser):
        instance = mock_config_parser.return_value
        instance.no_verify_setting.return_value = False
        instance.ca_cert_info.return_value = False
        instance.connection_endpoint.return_value = test_endpoint
        instance.cert_info.return_value = False

        self.cmd("az sf cluster health",
                 checks=[
                     JMESPathCheckExists("aggregatedHealthState"),
                     JMESPathCheck("applicationHealthStates[0].name",
                                   "fabric:/System"),
                     JMESPathCheckExists("nodeHealthStates")
                 ])
Esempio n. 8
0
    def node_system_type_list_returns_non_empty_list_test(
            self, mock_config_parser):
        instance = mock_config_parser.return_value
        instance.no_verify_setting.return_value = False
        instance.ca_cert_info.return_value = False
        instance.connection_endpoint.return_value = test_endpoint
        instance.cert_info.return_value = False

        command = "az sf node service-type-list --application-id System --node-name {0}"
        command = command.format(test_node_name)
        self.cmd(command,
                 checks=[
                     JMESPathCheckExists("[0].codePackageName"),
                     JMESPathCheckExists("[0].serviceTypeName")
                 ])
Esempio n. 9
0
    def verify_blob_upload_and_download(self, group, account, file_size_kb, blob_type,
                                        skip_download=False):
        container = self.create_random_name(prefix='cont', length=24)
        local_dir = self.create_temp_dir()
        local_file = self.create_temp_file(file_size_kb, full_random=True)
        blob_name = self.create_random_name(prefix='blob', length=24)
        account_key = self.cmd('storage account keys list -n {} -g {} --query "[0].value" -otsv'
                               .format(account, group)).output

        self.set_env('AZURE_STORAGE_ACCOUNT', account)
        self.set_env('AZURE_STORAGE_KEY', account_key)

        self.cmd('storage container create -n {}'.format(container))

        self.cmd('storage blob exists -n {} -c {}'.format(blob_name, container),
                 checks=JMESPathCheck('exists', False))

        self.cmd('storage blob upload -c {} -f "{}" -n {} --type {}'
                 .format(container, local_file, blob_name, blob_type))

        self.cmd('storage blob exists -n {} -c {}'.format(blob_name, container),
                 checks=JMESPathCheck('exists', True))

        self.cmd('storage blob show -n {} -c {}'.format(blob_name, container),
                 checks=[JMESPathCheck('properties.contentLength', file_size_kb * 1024),
                         JMESPathCheckExists('properties.pageRanges') if blob_type == 'page' else
                         JMESPathCheck('properties.pageRanges', None)])

        if not skip_download:
            downloaded = os.path.join(local_dir, 'test.file')
            self.cmd('storage blob download -n {} -c {} --file "{}"'
                     .format(blob_name, container, downloaded))
            self.assertTrue(os.path.isfile(downloaded), 'The file is not downloaded.')
            self.assertEqual(file_size_kb * 1024, os.stat(downloaded).st_size,
                             'The download file size is not right.')
    def test_storage_blob_sas_permission_scenario(self, resource_group,
                                                  storage_account):
        """
        Test service SAS with stored access policy.

        A stored access policy is defined on a resource container, which can be a blob container, table, queue,
        or file share. The stored access policy can be used to manage constraints for one or more service shared
        access signatures. When you associate a service SAS with a stored access policy, the SAS inherits the
        constraints—the start time, expiry time, and permissions—defined for the stored access policy.
        """
        expiry = (datetime.utcnow() +
                  timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ')

        account_info = self.get_account_info(resource_group, storage_account)
        container = self.create_container(account_info)
        local_file = self.create_temp_file(128, full_random=False)
        blob_name = self.create_random_name('blob', 16)
        policy = self.create_random_name('policy', 16)

        self.storage_cmd(
            'storage container policy create -c {} -n {} --expiry {} --permissions acdlrw',
            account_info, container, policy, expiry)
        self.storage_cmd('storage container policy list -c {} ', account_info, container)\
            .assert_with_checks(JMESPathCheckExists('{}.expiry'.format(policy)),
                                JMESPathCheck('{}.permission'.format(policy), 'racwdl'))
        self.storage_cmd('storage container policy show -c {} -n {} ', account_info, container, policy, expiry)\
            .assert_with_checks(JMESPathCheckExists('expiry'),
                                JMESPathCheck('permission', 'racwdl'))

        sas = self.storage_cmd(
            'storage blob generate-sas -n {} -c {} --policy-name {} -otsv ',
            account_info, blob_name, container, policy).output.strip()

        self.storage_cmd(
            'storage blob upload -n {} -c {} -f "{}" --sas-token "{}" ',
            account_info, blob_name, container, local_file, sas)

        self.storage_cmd(
            'storage container policy update -c {} -n {} --permissions acdlr',
            account_info, container, policy)
        self.storage_cmd('storage container policy show -c {} -n {} ', account_info, container, policy)\
            .assert_with_checks(JMESPathCheckExists('expiry'),
                                JMESPathCheck('permission', 'racdl'))
        self.storage_cmd('storage container policy delete -c {} -n {} ',
                         account_info, container, policy)
        self.storage_cmd('storage container policy list -c {} ', account_info, container) \
            .assert_with_checks(NoneCheck())
Esempio n. 11
0
    def service_list_returns_system_services_test(self, mock_config_parser):
        instance = mock_config_parser.return_value
        instance.no_verify_setting.return_value = False
        instance.ca_cert_info.return_value = False
        instance.connection_endpoint.return_value = test_endpoint
        instance.cert_info.return_value = False

        command = "az sf service list --application-id System"
        self.cmd(command, checks=[JMESPathCheckExists("items")])
Esempio n. 12
0
    def node_load_returns_non_empty_load_metrics(self, mock_config_parser):
        instance = mock_config_parser.return_value
        instance.no_verify_setting.return_value = False
        instance.ca_cert_info.return_value = False
        instance.connection_endpoint.return_value = test_endpoint
        instance.cert_info.return_value = False

        self.cmd("az sf node load --node-name {0}".format(test_node_name),
                 checks=[JMESPathCheckExists("nodeLoadMetricInformation")])
 def sf_test_good_system_partition_info(self):
     self.cmd(
         "az sf partition info --partition-id "
         "00000000-0000-0000-0000-000000000001",
         checks=[
             JMESPathCheckExists("ServiceKind"),
             JMESPathCheck("partitionInformation.id",
                           "00000000-0000-0000-0000-000000000001")
         ])
Esempio n. 14
0
    def cluster_code_version_returns_not_empty_test(self, mock_config_parser):
        instance = mock_config_parser.return_value
        instance.no_verify_setting.return_value = False
        instance.ca_cert_info.return_value = False
        instance.connection_endpoint.return_value = test_endpoint
        instance.cert_info.return_value = False

        self.cmd("az sf cluster code-version",
                 checks=[JMESPathCheckExists("[0].codeVersion")])
Esempio n. 15
0
    def test_batchai_cluster_with_nfs_and_azure_file_share(
            self, resource_group, storage_account):
        """Tests creation of a cluster with file server and Azure file share.

        1. Create a file server and verify parameters.
        2. Create a cluster and verify parameters.
        3. Verify that cluster was able to start nodes.
        """
        self._configure_environment(resource_group, storage_account)
        # Create a file share 'share' to be mounted on the cluster
        self.cmd('az storage share create -n share')

        self.cmd('batchai file-server create -n nfs -g {0} -c {1}'.format(
            resource_group, _data_file('file_server.json')),
                 checks=[
                     JMESPathCheck('name', 'nfs'),
                     JMESPathCheck('mountSettings.mountPoint', '/mnt/data'),
                     JMESPathCheck('dataDisks.diskCount', 2),
                     JMESPathCheck('dataDisks.diskSizeInGb', 10)
                 ])
        self.cmd(
            'batchai cluster create -n cluster -g {0} -c {1} --nfs nfs --afs-name share -u alex -k {2}'
            .format(resource_group, _data_file('simple_cluster.json'),
                    _data_file('key.txt')),
            checks=[
                JMESPathCheck(
                    'nodeSetup.mountVolumes.azureFileShares[0].accountName',
                    storage_account),
                JMESPathCheck(
                    'nodeSetup.mountVolumes.azureFileShares[0].azureFileUrl',
                    'https://{0}.file.core.windows.net/share'.format(
                        storage_account)),
                JMESPathCheck(
                    'nodeSetup.mountVolumes.azureFileShares[0].relativeMountPath',
                    'afs'),
                JMESPathCheck(
                    'nodeSetup.mountVolumes.azureFileShares[0].credentialsInfo.accountKey',
                    None),
                JMESPathCheck('userAccountSettings.adminUserName', 'alex'),
                JMESPathCheck('userAccountSettings.adminUserPassword', None),
                JMESPathCheck(
                    'nodeSetup.mountVolumes.fileServers[0].relativeMountPath',
                    'nfs')
            ])

        # Give file server and cluster to finish preparation.
        time.sleep(NODE_STARTUP_TIME * 2)

        # Check the node in the cluster successfully started - was able to mount nfs and azure filesystem.
        self.cmd(
            'batchai cluster show -n cluster -g {0}'.format(resource_group),
            checks=[JMESPathCheck('nodeStateCounts.idleNodeCount', 1)])

        # Check the file server reports information about public ip.
        self.cmd(
            'batchai file-server show -n nfs -g {0}'.format(resource_group),
            checks=[JMESPathCheckExists('mountSettings.fileServerPublicIp')])
Esempio n. 16
0
    def test_sqlvm_add_and_remove(self, resource_group, resource_group_location, domainvm, sqlvm1, storage_account):

        add_account_script = '\"Set-AdUser -UserPrincipalName [email protected] -Identity admin123 -PasswordNeverExpires $true\"'

        # add account to domain controller
        self.cmd('vm run-command invoke -n {} -g {} --command-id RunPowerShellScript --scripts {}'
                 .format('adVM', resource_group, add_account_script))

        parameters_string = ('location={} domainJoinUserName=domain\\\\admin123 domainJoinUserPassword=SecretPassword123 '
                             'domainFQDN=domain.com vmList={}').format(resource_group_location, sqlvm1)

        # join vms to the domain
        self.cmd('group deployment create --name {} -g {} --template-uri {} --parameters {}'
                 .format('joinvms',
                         resource_group,
                         'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/201-vm-domain-join-existing/azuredeploy.json',
                         parameters_string))

        # Create the sqlvm group
        sa = self.cmd('storage account show -n {} -g {}'
                      .format(storage_account, resource_group)).get_output_in_json()

        key = self.cmd('storage account keys list -n {} -g {}'
                       .format(storage_account, resource_group)).get_output_in_json()

        sqlvmgroup = self.cmd('sql vm group create -n {} -g {} -l {} -i {} -s {} -f {} -p {} -k {} -e {} -u {} --bootstrap-acc {}'
                              .format('cligroup', resource_group, resource_group_location, 'SQL2017-WS2016', 'Enterprise',
                                      'domain.com', 'admin123', key[0]['value'], 'admin123', sa['primaryEndpoints']['blob'], 'admin123')).get_output_in_json()

        # test create sqlvm1
        self.cmd('sql vm create -n {} -g {} -l {} --connectivity-type {} --port {} --sql-auth-update-pwd {} --sql-auth-update-username {}'
                 .format(sqlvm1, resource_group, resource_group_location, 'PRIVATE', 1433, 'admin123', 'SecretPassword123'),
                 checks=[
                     JMESPathCheck('name', sqlvm1),
                     JMESPathCheck('location', resource_group_location)
                 ]).get_output_in_json()

        self.cmd('sql vm add-to-group -n {} -g {} -r {} -p {} -s {} -b {}'
                 .format(sqlvm1, resource_group, sqlvmgroup['id'], 'SecretPassword123', 'SecretPassword123', 'SecretPassword123'),
                 checks=[
                     JMESPathCheck('name', sqlvm1),
                     JMESPathCheck('location', resource_group_location),
                     JMESPathCheck('provisioningState', "Succeeded"),
                     JMESPathCheckExists('sqlVirtualMachineGroupResourceId')
                 ])

        # Remove from group
        self.cmd('sql vm remove-from-group -n {} -g {}'
                 .format(sqlvm1, resource_group),
                 checks=[
                     JMESPathCheck('name', sqlvm1),
                     JMESPathCheck('location', resource_group_location),
                     JMESPathCheck('provisioningState', "Succeeded")
                 ])
Esempio n. 17
0
    def app_health_returns_aggregated_and_name_test(self, mock_config_parser):
        instance = mock_config_parser.return_value
        instance.no_verify_setting.return_value = False
        instance.ca_cert_info.return_value = False
        instance.connection_endpoint.return_value = test_endpoint
        instance.cert_info.return_value = False

        self.cmd("az sf application health --application-id \"System\"",
                 checks=[
                     JMESPathCheck("name", "fabric:/System"),
                     JMESPathCheckExists("aggregatedHealthState")
                 ])
Esempio n. 18
0
    def system_partition_health_returns_aggregated_state_test(
            self, mock_config_parser):
        instance = mock_config_parser.return_value
        instance.no_verify_setting.return_value = False
        instance.ca_cert_info.return_value = False
        instance.connection_endpoint.return_value = test_endpoint
        instance.cert_info.return_value = False

        command = "az sf partition health --partition-id 00000000-0000-0000-0000-000000000001"
        self.cmd(command,
                 checks=[
                     JMESPathCheckExists("aggregatedHealthState"),
                     JMESPathCheck("partitionId",
                                   "00000000-0000-0000-0000-000000000001")
                 ])
Esempio n. 19
0
    def service_health_returns_system_services_aggregated_state_test(
            self, mock_config_parser):
        instance = mock_config_parser.return_value
        instance.no_verify_setting.return_value = False
        instance.ca_cert_info.return_value = False
        instance.connection_endpoint.return_value = test_endpoint
        instance.cert_info.return_value = False

        command = "az sf service health --service-id System/FailoverManagerService"
        self.cmd(command,
                 checks=[
                     JMESPathCheckExists("aggregatedHealthState"),
                     JMESPathCheck("name",
                                   "fabric:/System/FailoverManagerService")
                 ])
Esempio n. 20
0
    def test_aks_create_with_upgrade(self, resource_group,
                                     resource_group_location):
        loc = resource_group_location
        # override loc to westus2
        loc = 'westus2'
        ssh_pubkey_file = self.generate_ssh_keys()
        aks_name = self.create_random_name('cliakstest', 16)
        dns_prefix = self.create_random_name('cliaksdns', 16)
        original_k8s_version = '1.7.7'

        # create
        ssh_pubkey_file = ssh_pubkey_file.replace('\\', '\\\\')
        create_cmd = 'aks create -g {} -n {} --dns-name-prefix {} --ssh-key-value {} --kubernetes-version {} -l {}'
        self.cmd(create_cmd.format(resource_group, aks_name, dns_prefix,
                                   ssh_pubkey_file, original_k8s_version, loc),
                 checks=[
                     JMESPathCheckExists('properties.fqdn'),
                     JMESPathCheck('properties.provisioningState', 'Succeeded')
                 ])

        # show
        self.cmd('aks show -g {} -n {}'.format(resource_group, aks_name),
                 checks=[
                     JMESPathCheck(
                         'type', 'Microsoft.ContainerService/ManagedClusters'),
                     JMESPathCheck('name', aks_name),
                     JMESPathCheck('resourceGroup', resource_group),
                     JMESPathCheck('properties.agentPoolProfiles[0].count', 3),
                     JMESPathCheck('properties.agentPoolProfiles[0].vmSize',
                                   'Standard_D2_v2'),
                     JMESPathCheck('properties.dnsPrefix', dns_prefix),
                     JMESPathCheck('properties.provisioningState',
                                   'Succeeded'),
                     JMESPathCheck('properties.kubernetesVersion', '1.7.7')
                 ])

        # upgrade
        new_k8s_version = '1.8.1'
        upgrade_cmd = 'aks upgrade -g {} -n {} --kubernetes-version {} --yes'
        self.cmd(upgrade_cmd.format(resource_group, aks_name, new_k8s_version),
                 checks=[
                     JMESPathCheck('properties.provisioningState', 'Succeeded')
                 ])

        # show again
        self.cmd(
            'aks show -g {} -n {}'.format(resource_group, aks_name),
            checks=[JMESPathCheck('properties.kubernetesVersion', '1.8.1')])
Esempio n. 21
0
    def replica_health_system_service_returns_aggregated_state_test(
            self, mock_config_parser):
        instance = mock_config_parser.return_value
        instance.no_verify_setting.return_value = False
        instance.ca_cert_info.return_value = False
        instance.connection_endpoint.return_value = test_endpoint
        instance.cert_info.return_value = False

        command = "az sf replica health --replica-id {0} --partition-id {1}"
        command = command.format(test_replica_id, test_partition_id)
        self.cmd(command,
                 checks=[
                     JMESPathCheck("partitionId", test_partition_id),
                     JMESPathCheck("replicaId", test_replica_id),
                     JMESPathCheckExists("aggregatedHealthState")
                 ])
Esempio n. 22
0
    def test_batchai_configless_cluster_and_nfs_creation(
            self, resource_group, storage_account):

        # Test creation of a cluster and nfs without configuration files.
        self._configure_environment(resource_group, storage_account)
        self.cmd('az storage share create -n share')
        self.cmd(
            'az batchai file-server create -n nfs -g {0} -l eastus --vm-size STANDARD_D1 --storage-sku '
            'Standard_LRS --disk-count 2 --disk-size 10 -u alex -p Password_123'
            .format(resource_group))
        self.cmd(
            'az batchai cluster create -n cluster -g {0} -l eastus --afs-name share --nfs nfs '
            '-i UbuntuLTS --vm-size STANDARD_D1 --min 1 --max 1 -u alex -p Password_123'
            .format(resource_group),
            checks=[
                JMESPathCheck(
                    'nodeSetup.mountVolumes.azureFileShares[0].accountName',
                    storage_account),
                JMESPathCheck(
                    'nodeSetup.mountVolumes.azureFileShares[0].azureFileUrl',
                    'https://{0}.file.core.windows.net/share'.format(
                        storage_account)),
                JMESPathCheck(
                    'nodeSetup.mountVolumes.azureFileShares[0].relativeMountPath',
                    'afs'),
                JMESPathCheck(
                    'nodeSetup.mountVolumes.azureFileShares[0].credentialsInfo.accountKey',
                    None),
                JMESPathCheck('userAccountSettings.adminUserName', 'alex'),
                JMESPathCheck('userAccountSettings.adminUserPassword', None),
                JMESPathCheck(
                    'nodeSetup.mountVolumes.fileServers[0].relativeMountPath',
                    'nfs')
            ])

        # Give file server and cluster to finish preparation.
        time.sleep(NODE_STARTUP_TIME * 2)

        # Check the node in the cluster successfully started - was able to mount nfs and azure filesystem.
        self.cmd(
            'batchai cluster show -n cluster -g {0}'.format(resource_group),
            checks=[JMESPathCheck('nodeStateCounts.idleNodeCount', 1)])

        # Check the file server reports information about public ip.
        self.cmd(
            'batchai file-server show -n nfs -g {0}'.format(resource_group),
            checks=[JMESPathCheckExists('mountSettings.fileServerPublicIp')])
    def test_storage_share_rm_with_access_tier(self):

        self.kwargs.update({
            'share': self.create_random_name('share', 24),
            'new_share': self.create_random_name('share', 24)
        })

        # Update with access tier
        self.cmd(
            'storage share-rm create --storage-account {sa} -g {rg} -n {share}',
            checks={
                JMESPathCheck('name', self.kwargs['share']),
                JMESPathCheck('accessTier', None)
            })

        self.cmd(
            'storage share-rm show --storage-account {sa} -g {rg} -n {share}',
            checks={
                JMESPathCheck('name', self.kwargs['share']),
                JMESPathCheck('accessTier', 'TransactionOptimized')
            })

        self.cmd(
            'storage share-rm update --storage-account {sa} -g {rg} -n {share} --access-tier Hot',
            checks={
                JMESPathCheck('name', self.kwargs['share']),
                JMESPathCheck('accessTier', 'Hot')
            })

        self.cmd(
            'storage share-rm show --storage-account {sa} -g {rg} -n {share}',
            checks={
                JMESPathCheck('name', self.kwargs['share']),
                JMESPathCheck('accessTier', 'Hot'),
                JMESPathCheckExists('accessTierChangeTime')
            })

        # Create with access tier
        self.cmd(
            'storage share-rm create --storage-account {sa} -g {rg} -n {new_share} --access-tier Hot',
            checks={
                JMESPathCheck('[0].name', self.kwargs['share']),
                JMESPathCheck('accessTier', 'Hot')
            })
Esempio n. 24
0
    def test_aks_create_default_service(self, resource_group,
                                        resource_group_location):
        # the simplest aks create scenario
        loc = resource_group_location
        # override loc to westus2
        loc = 'westus2'
        ssh_pubkey_file = self.generate_ssh_keys()
        aks_name = self.create_random_name('cliakstest', 16)
        dns_prefix = self.create_random_name('cliaksdns', 16)

        # create
        ssh_pubkey_file = ssh_pubkey_file.replace('\\', '\\\\')
        create_cmd = 'aks create -g {} -n {} --dns-name-prefix {} --ssh-key-value {} -l {}'
        self.cmd(create_cmd.format(resource_group, aks_name, dns_prefix,
                                   ssh_pubkey_file, loc),
                 checks=[
                     JMESPathCheckExists('properties.fqdn'),
                     JMESPathCheck('properties.provisioningState', 'Succeeded')
                 ])

        # show
        self.cmd('aks show -g {} -n {}'.format(resource_group, aks_name),
                 checks=[
                     JMESPathCheck(
                         'type', 'Microsoft.ContainerService/ManagedClusters'),
                     JMESPathCheck('name', aks_name),
                     JMESPathCheck('resourceGroup', resource_group),
                     JMESPathCheck('properties.agentPoolProfiles[0].count', 3),
                     JMESPathCheck('properties.agentPoolProfiles[0].vmSize',
                                   'Standard_D2_v2'),
                     JMESPathCheck('properties.dnsPrefix', dns_prefix)
                 ])

        # scale-up
        self.cmd(
            'aks scale -g {} -n {} --agent-count 5'.format(
                resource_group, aks_name),
            checks=[JMESPathCheck('properties.agentPoolProfiles[0].count', 5)])

        # show again
        self.cmd(
            'aks show -g {} -n {}'.format(resource_group, aks_name),
            checks=[JMESPathCheck('properties.agentPoolProfiles[0].count', 5)])
Esempio n. 25
0
    def sf_test_application_lifecycle(self):
        self.cmd("az sf application upload --path {}".format(
            self.package_path))

        self.cmd("az sf application provision "
                 "--application-type-build-path {}".format(self.package_name))

        self.cmd("az sf application type",
                 checks=[
                     JMESPathCheck("items[0].name",
                                   self.application_type_name),
                     JMESPathCheck("items[0].version",
                                   self.application_type_version)
                 ])

        self.cmd("az sf application create "
                 "--app-type {} --version {} --name {}".format(
                     self.application_type_name, self.application_type_version,
                     self.application_name))

        self.cmd("az sf application list",
                 checks=[JMESPathCheck("items[0].id", self.application_id)])

        self.cmd("az sf application health "
                 "--application-id {}".format(self.application_id),
                 checks=[
                     JMESPathCheck("name", self.application_name),
                     JMESPathCheckExists("aggregatedHealthState")
                 ])

        self.cmd("az sf application delete --application-id {}".format(
            self.application_id))

        self.cmd("az sf application unprovision "
                 "--application-type-name {} "
                 "--application-type-version {}".format(
                     self.application_type_name,
                     self.application_type_version))
Esempio n. 26
0
    def test_backup_vault(self, resource_group, resource_group_location,
                          vault1, vault2):

        self.kwargs.update({
            'loc': resource_group_location,
            'vault1': vault1,
            'vault2': vault2
        })

        self.kwargs['vault3'] = self.create_random_name('clitest-vault', 50)
        self.cmd('backup vault create -n {vault3} -g {rg} -l {loc}',
                 checks=[
                     self.check('name', '{vault3}'),
                     self.check('resourceGroup', '{rg}'),
                     self.check('location', '{loc}'),
                     self.check('properties.provisioningState', 'Succeeded')
                 ])

        self.kwargs['vault4'] = self.create_random_name('clitest-vault', 50)
        self.cmd('backup vault create -n {vault4} -g {rg} -l {loc}',
                 checks=[
                     self.check('name', '{vault4}'),
                     self.check('resourceGroup', '{rg}'),
                     self.check('location', '{loc}'),
                     self.check('properties.provisioningState', 'Succeeded')
                 ])

        number_of_test_vaults = 4

        self.cmd('backup vault list',
                 checks=[
                     self.check("length([?resourceGroup == '{rg}'])",
                                number_of_test_vaults),
                     self.check("length([?name == '{vault1}'])", 1),
                     self.check("length([?name == '{vault2}'])", 1),
                     self.check("length([?name == '{vault3}'])", 1),
                     self.check("length([?name == '{vault4}'])", 1)
                 ])

        self.cmd('backup vault list -g {rg}',
                 checks=[
                     self.check("length(@)", number_of_test_vaults),
                     self.check("length([?name == '{vault1}'])", 1),
                     self.check("length([?name == '{vault2}'])", 1),
                     self.check("length([?name == '{vault3}'])", 1),
                     self.check("length([?name == '{vault4}'])", 1)
                 ])

        storage_model_types = [e.value for e in StorageType]
        vault_properties = self.cmd(
            'backup vault backup-properties show -n {vault1} -g {rg} --query [0]',
            checks=[
                JMESPathCheckExists(
                    "contains({}, properties.storageModelType)".format(
                        storage_model_types)),
                self.check('properties.storageTypeState', 'Unlocked'),
                self.check('resourceGroup', '{rg}')
            ]).get_output_in_json()

        if vault_properties['properties'][
                'storageModelType'] == StorageType.geo_redundant.value:
            new_storage_model = StorageType.locally_redundant.value
        else:
            new_storage_model = StorageType.geo_redundant.value

        self.kwargs['model'] = new_storage_model
        self.cmd(
            'backup vault backup-properties set -n {vault1} -g {rg} --backup-storage-redundancy {model}'
        )
        time.sleep(300)
        self.cmd(
            'backup vault backup-properties show -n {vault1} -g {rg} --query [0]',
            checks=[
                self.check('properties.storageModelType', new_storage_model)
            ])

        self.cmd('backup vault delete -n {vault4} -g {rg} -y')

        self.cmd('backup vault list',
                 checks=[
                     self.check("length([?resourceGroup == '{rg}'])",
                                number_of_test_vaults - 1),
                     self.check("length([?name == '{vault1}'])", 1),
                     self.check("length([?name == '{vault2}'])", 1),
                     self.check("length([?name == '{vault3}'])", 1)
                 ])
Esempio n. 27
0
    def test_storage_blob_url_scenarios(self, resource_group, storage_account):
        account_info = self.get_account_info(resource_group, storage_account)
        container = self.create_container(account_info, prefix="con1")

        local_file1 = self.create_temp_file(128)
        local_file2 = self.create_temp_file(64)
        blob_name1 = "/".join(
            ["dir", self.create_random_name(prefix='blob', length=24)])

        # set delete-policy to enable soft-delete
        self.storage_cmd(
            'storage blob service-properties delete-policy update --enable true --days-retained 2',
            account_info)
        self.storage_cmd('storage blob service-properties delete-policy show',
                         account_info).assert_with_checks(
                             JMESPathCheck('enabled', True),
                             JMESPathCheck('days', 2))
        # Prepare blob
        self.storage_cmd('storage blob upload -c {} -f "{}" -n {} ',
                         account_info, container, local_file1, blob_name1)

        expiry = (datetime.utcnow() +
                  timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ')
        blob_uri = self.storage_cmd(
            'storage blob generate-sas -n {} -c {} --expiry {} --permissions '
            'rwad --https-only --full-uri -o tsv', account_info, blob_name1,
            container, expiry).output.strip()

        self.cmd('storage blob exists --blob-url {}'.format(blob_uri),
                 checks=JMESPathCheck('exists', True))
        self.cmd('storage blob show --blob-url {}'.format(blob_uri),
                 checks=[
                     JMESPathCheck('name', blob_name1),
                     JMESPathCheck('properties.contentLength', 128 * 1024),
                     JMESPathCheck('properties.blobTier', 'Hot')
                 ])

        self.cmd(
            'storage blob upload -f "{}" --blob-url {} --overwrite'.format(
                local_file2, blob_uri))

        self.cmd('storage blob show --blob-url {}'.format(blob_uri),
                 checks=[
                     JMESPathCheck('name', blob_name1),
                     JMESPathCheck('properties.contentLength', 64 * 1024)
                 ])
        local_dir = self.create_temp_dir()
        downloaded = os.path.join(local_dir, 'test.file')
        self.cmd('storage blob download --blob-url {} -f "{}"'.format(
            blob_uri, downloaded))
        self.assertTrue(os.path.isfile(downloaded),
                        'The file is not downloaded.')
        self.assertEqual(64 * 1024,
                         os.stat(downloaded).st_size,
                         'The download file size is not right.')

        self.cmd(
            'storage blob set-tier --blob-url {} --tier Cool'.format(blob_uri))
        self.cmd('storage blob show --blob-url {}'.format(blob_uri),
                 checks=[
                     JMESPathCheck('name', blob_name1),
                     JMESPathCheck('properties.contentLength', 64 * 1024),
                     JMESPathCheck('properties.blobTier', "Cool")
                 ])

        self.cmd('storage blob snapshot --blob-url {}'.format(blob_uri),
                 checks=JMESPathCheckExists('snapshot'))
        self.storage_cmd('storage blob list -c {}', account_info, container)\
            .assert_with_checks(JMESPathCheck('length(@)', 1))

        self.cmd(
            'storage blob delete --blob-url {} --delete-snapshots include '.
            format(blob_uri))

        self.storage_cmd('storage blob list -c {}', account_info,
                         container).assert_with_checks(
                             JMESPathCheck('length(@)', 0))

        self.cmd('storage blob undelete --blob-url {} '.format(blob_uri))
        self.storage_cmd('storage blob list -c {}', account_info,
                         container).assert_with_checks(
                             JMESPathCheck('length(@)', 1))
 def sf_test_good_system_app_health(self):
     self.cmd("az sf application health --application-id \"System\"",
              checks=[
                  JMESPathCheck("name", "fabric:/System"),
                  JMESPathCheckExists("aggregatedHealthState")
              ])
 def sf_test_good_cluster_config_version(self):
     self.cmd("az sf cluster config-version",
              checks=[JMESPathCheckExists("[0].configVersion")])
 def sf_test_good_cluster_manifest(self):
     self.cmd("az sf cluster manifest",
              checks=[JMESPathCheckExists("manifest")])