Exemplo n.º 1
0
    def test_enable_encryption_error_cases_handling(
            self, mock_get_keyvault_key_url, mock_compute_client_factory):
        faked_keyvault = '/subscriptions/01234567-1bf0-4dda-aec3-cb9272f09590/resourceGroups/rg1/providers/Microsoft.KeyVault/vaults/v1'
        os_disk = OSDisk(None, OperatingSystemTypes.linux)
        existing_disk = DataDisk(lun=1,
                                 vhd='https://someuri',
                                 name='d1',
                                 create_option=DiskCreateOptionTypes.empty)
        vm = FakedVM(None, [existing_disk], os_disk=os_disk)

        compute_client_mock = mock.MagicMock()
        compute_client_mock.virtual_machines.get.return_value = vm
        mock_compute_client_factory.return_value = compute_client_mock

        mock_get_keyvault_key_url.return_value = 'https://somevaults.vault.azure.net/'

        # throw when VM has disks, but no --volume-type is specified
        with self.assertRaises(CLIError) as context:
            enable('rg1', 'vm1', 'client_id', faked_keyvault, 'client_secret')

        self.assertTrue("supply --volume-type" in str(context.exception))

        # throw when no AAD client secrets
        with self.assertRaises(CLIError) as context:
            enable('rg1', 'vm1', 'client_id', faked_keyvault)

        self.assertTrue("--aad-client-id or --aad-client-cert-thumbprint" in
                        str(context.exception))
Exemplo n.º 2
0
    def test_disable_encryption_error_cases_handling(self, mock_compute_client_factory, mock_vm_set):  # pylint: disable=unused-argument
        os_disk = OSDisk(None, OperatingSystemTypes.linux)
        existing_disk = DataDisk(lun=1, vhd='https://someuri', name='d1', create_option=DiskCreateOptionTypes.empty)
        vm = FakedVM(None, [existing_disk], os_disk=os_disk)
        vm_extension = VirtualMachineExtension('westus',
                                               settings={'SequenceVersion': 1},
                                               instance_view=VirtualMachineExtensionInstanceView(
                                                   statuses=[InstanceViewStatus(message='Encryption completed successfully')],
                                                   substatuses=[InstanceViewStatus(message='{"os":"Encrypted"}')]))
        vm_extension.provisioning_state = 'Succeeded'
        compute_client_mock = mock.MagicMock()
        compute_client_mock.virtual_machines.get.return_value = vm
        compute_client_mock.virtual_machine_extensions.get.return_value = vm_extension
        mock_compute_client_factory.return_value = compute_client_mock

        # throw on disabling encryption on OS disk of a linux VM
        with self.assertRaises(CLIError) as context:
            disable('rg1', 'vm1', 'OS')

        self.assertTrue("Only data disk is supported to disable on Linux VM" in str(context.exception))

        # throw on disabling encryption on data disk, but os disk is also encrypted
        with self.assertRaises(CLIError) as context:
            disable('rg1', 'vm1', 'DATA')

        self.assertTrue("Disabling encryption on data disk can render the VM unbootable" in str(context.exception))

        # works fine to disable encryption on daat disk when OS disk is never encrypted
        vm_extension.instance_view.substatuses[0].message = '{}'
        disable('rg1', 'vm1', 'DATA')
Exemplo n.º 3
0
    def test_attach_new_datadisk_custom_on_vm(self, mock_vm_set, mock_vm_get):
        # pylint: disable=line-too-long
        faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd'
        faked_vhd_uri2 = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d2.vhd'

        # stub to get the vm which has no datadisks
        existing_disk = DataDisk(lun=1,
                                 vhd=faked_vhd_uri,
                                 name='d1',
                                 create_option=DiskCreateOptionTypes.empty)
        vm = FakedVM(None, [existing_disk])
        mock_vm_get.return_value = vm

        # execute
        attach_unmanaged_data_disk('rg1', 'vm1', True, faked_vhd_uri2, None,
                                   'd2', 512, CachingTypes.read_write)

        # assert
        self.assertTrue(mock_vm_get.called)
        mock_vm_set.assert_called_once_with(vm)
        self.assertEqual(len(vm.storage_profile.data_disks), 2)
        data_disk = vm.storage_profile.data_disks[1]
        self.assertEqual(CachingTypes.read_write, data_disk.caching)
        self.assertEqual(DiskCreateOptionTypes.empty, data_disk.create_option)
        self.assertIsNone(data_disk.image)
        self.assertEqual(
            data_disk.lun, 0
        )  # the existing disk has '1', so it verifes the second one be picked as '0'
        self.assertEqual(data_disk.vhd.uri, faked_vhd_uri2)
Exemplo n.º 4
0
def _attach_disk(resource_group_name,
                 vm_name,
                 vhd,
                 create_option,
                 lun=None,
                 disk_name=None,
                 caching=None,
                 disk_size=None):
    vm = _vm_get(resource_group_name, vm_name)
    if disk_name is None:
        file_name = vhd.uri.split('/')[-1]
        disk_name = os.path.splitext(file_name)[0]
    #pylint: disable=no-member
    if lun is None:
        lun = _get_disk_lun(vm.storage_profile.data_disks)
    disk = DataDisk(lun=lun,
                    vhd=vhd,
                    name=disk_name,
                    create_option=create_option,
                    caching=caching,
                    disk_size_gb=disk_size)
    if vm.storage_profile.data_disks is None:
        vm.storage_profile.data_disks = []
    vm.storage_profile.data_disks.append(disk)  # pylint: disable=no-member
    return _vm_set(vm)
Exemplo n.º 5
0
    def disk_attach():
        # Delete VM
        print('Deleting VM and freeing OS disk from ' + orig_vm_name)
        print('OS Disk Location ' + orig_vm_os_disk)
        result = compute_client.virtual_machines.delete(sys.argv[2], orig_vm_name)
        result.wait()
        # Ensures no lingering lease issues
        time.sleep(5)

        # Attach OS disk to temporary VM
        print('Attaching original OS disk to {0}'.format(vm_name))
        result = compute_client.virtual_machines.create_or_update(
            group_name,
            vm_name,
            VirtualMachine(
                location=orig_vm_location,
                storage_profile=StorageProfile(
                    data_disks=[DataDisk(
                        lun=0,
                        caching=CachingTypes.none,
                        create_option=DiskCreateOptionTypes.attach,
                        name=orig_vm_name,
                        vhd=VirtualHardDisk(
                            uri=orig_vm_os_disk
                            )
                                    )]
                                )
                            )
                        )
        result.wait()
Exemplo n.º 6
0
    def test_deattach_disk_on_vm(self, mock_vm_set, mock_vm_get):
        # pylint: disable=line-too-long
        # stub to get the vm which has no datadisks
        faked_vhd_uri = 'https://your_stoage_account_name.blob.core.windows.net/vhds/d1.vhd'
        existing_disk = DataDisk(lun=1, vhd=faked_vhd_uri, name='d1', create_option=DiskCreateOptionTypes.empty)
        vm = FakedVM(None, [existing_disk])
        mock_vm_get.return_value = vm

        # execute
        detach_data_disk('rg1', 'vm1', 'd1')

        # assert
        self.assertTrue(mock_vm_get.called)
        mock_vm_set.assert_called_once_with(vm)
        self.assertEqual(len(vm.storage_profile.data_disks), 0)
Exemplo n.º 7
0
    def list_attached_disks(self, vm_name):
        vm = self.get_vm(vm_name=vm_name, expand="instanceView")

        disks_in_model = vm.storage_profile.data_disks
        disk_names = [d.name for d in disks_in_model]

        # If there's a disk in the instance view which is not in the model.
        # This disk is stuck.  Add it to our list since we need to know
        # about stuck disks
        for disk_instance in vm.instance_view.disks:
            if disk_instance.name in disk_names is None:
                disk = DataDisk(lun=-1, name=disk_instance.name)
                disks_in_model.append(disk)

        return disks_in_model
Exemplo n.º 8
0
    def attach(self, vm_name, disk):
        vm = self._get_vm(vm_name)
        # find the lun
        luns = ([d.lun for d in vm.storage_profile.data_disks]
                if vm.storage_profile.data_disks else [])
        lun = max(luns) + 1 if luns else 0

        # prepare the data disk
        params = ManagedDiskParameters(
            id=disk.get('id'),
            storage_account_type=disk.get('storage_account_type'))
        data_disk = DataDisk(lun,
                             DiskCreateOptionTypes.attach,
                             managed_disk=params)
        vm.storage_profile.data_disks.append(data_disk)
        self._update_vm(vm_name, vm)
Exemplo n.º 9
0
    def json_parse(self, compute_client):
        """Parses the local .json file for previously attached disks"""
        with open(self.json_path) as fp:
            ingest = json.load(fp)
            dd = []
            for disk in ingest['storageProfile']['dataDisks']:
                a_disk = DataDisk(lun=disk['lun'],
                                  caching=disk['caching'].lower(),
                                  create_option=DiskCreateOptionTypes.attach,
                                  name=disk['name'],
                                  vhd=VirtualHardDisk(uri=disk['vhd']['uri']))
                dd.append(a_disk)
                print(
                    'Attaching data disk {0} with name {1}, waiting until complete...'
                    .format(a_disk.lun, a_disk.name))

        result = compute_client.virtual_machines.create_or_update(
            self.rg_name, self.vm_name,
            VirtualMachine(location=ingest['location'],
                           storage_profile=StorageProfile(data_disks=dd)))
        result.wait()
        print('All disks should be attached now.')
Exemplo n.º 10
0
    def _attach_or_detach_disk(self,
                               vm_name,
                               vhd_name,
                               vhd_size_in_gibs,
                               lun,
                               detach=False,
                               allow_lun_0_detach=False,
                               is_from_retry=False):
        vmcompute = self.get_vm(vm_name)

        if (not detach):
            vhd_url = self._storage_client.make_blob_url(
                self._disk_container, vhd_name + ".vhd")
            print("Attach disk name %s lun %s uri %s" %
                  (vhd_name, lun, vhd_url))
            disk = DataDisk(lun=lun,
                            name=vhd_name,
                            vhd=VirtualHardDisk(vhd_url),
                            caching="None",
                            create_option="attach",
                            disk_size_gb=vhd_size_in_gibs)
            vmcompute.storage_profile.data_disks.append(disk)
        else:
            for i in range(len(vmcompute.storage_profile.data_disks)):
                disk = vmcompute.storage_profile.data_disks[i]
                if disk.name == vhd_name:
                    if disk.lun == 0 and not allow_lun_0_detach:
                        # lun-0 is special, throw an exception if attempting
                        # to detach that disk.
                        raise AzureOperationNotAllowed()

                    print("Detach disk name %s lun %s uri %s" %
                          (disk.name, disk.lun, disk.vhd.uri))
                    del vmcompute.storage_profile.data_disks[i]
                    break

        result = self._update_vm(vm_name, vmcompute)
        start = time.time()
        while True:
            time.sleep(2)
            waited_sec = int(abs(time.time() - start))
            if waited_sec > self._async_timeout:
                raise AzureAsynchronousTimeout()

            if not result.done():
                continue

            updated = self.get_vm(vm_name)

            print("Waited for %s s provisioningState is %s" %
                  (waited_sec, updated.provisioning_state))

            if updated.provisioning_state == "Succeeded":
                print("Operation finshed")
                break

            if updated.provisioning_state == "Failed":
                print("Provisioning ended up in failed state.")

                # Recovery from failed disk atatch-detach operation.
                # For Attach Disk: Detach The Disk then Try To Attach Again
                # For Detach Disk: Call update again, which always sets a tag,
                #                  which forces the service to retry.

                # is_from_retry is checked so we are not stuck in a loop
                # calling ourself
                if not is_from_retry and not detach:
                    print(
                        "Detach disk %s after failure, then try attach again" %
                        vhd_name)

                    self._attach_or_detach_disk(vm_name,
                                                vhd_name,
                                                vhd_size_in_gibs,
                                                lun,
                                                detach=True,
                                                allow_lun_0_detach=True,
                                                is_from_retry=True)

                print("Retry disk action for disk %s" % vhd_name)
                result = self._update_vm(vm_name, vmcompute)