def test_get_vm_qp(self, mock_loads): self.apt.helpers = ['helper1', pvm_log.log_helper, 'helper3'] # Defaults self.assertEqual(mock_loads.return_value, vm.get_vm_qp(self.apt, 'lpar_uuid')) self.apt.read.assert_called_once_with('LogicalPartition', root_id='lpar_uuid', suffix_type='quick', suffix_parm=None) mock_loads.assert_called_once_with(self.apt.read.return_value.body) self.apt.read.reset_mock() mock_loads.reset_mock() # Specific qprop, no logging errors self.assertEqual( mock_loads.return_value, vm.get_vm_qp(self.apt, 'lpar_uuid', qprop='Prop', log_errors=False)) self.apt.read.assert_called_once_with('LogicalPartition', root_id='lpar_uuid', suffix_type='quick', suffix_parm='Prop', helpers=['helper1', 'helper3']) resp = mock.MagicMock() resp.status = 404 self.apt.read.side_effect = pvm_exc.HttpError(resp) self.assertRaises(exception.InstanceNotFound, vm.get_vm_qp, self.apt, 'lpar_uuid', log_errors=False) self.apt.read.side_effect = pvm_exc.Error("message", response=None) self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt, 'lpar_uuid', log_errors=False) resp.status = 500 self.apt.read.side_effect = pvm_exc.Error("message", response=resp) self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt, 'lpar_uuid', log_errors=False)
def test_get_vm_qp(self, mock_loads): self.apt.helpers = ['helper1', pvm_log.log_helper, 'helper3'] # Defaults self.assertEqual(mock_loads.return_value, vm.get_vm_qp(self.apt, 'lpar_uuid')) self.apt.read.assert_called_once_with( 'LogicalPartition', root_id='lpar_uuid', suffix_type='quick', suffix_parm=None) mock_loads.assert_called_once_with(self.apt.read.return_value.body) self.apt.read.reset_mock() mock_loads.reset_mock() # Specific qprop, no logging errors self.assertEqual(mock_loads.return_value, vm.get_vm_qp(self.apt, 'lpar_uuid', qprop='Prop', log_errors=False)) self.apt.read.assert_called_once_with( 'LogicalPartition', root_id='lpar_uuid', suffix_type='quick', suffix_parm='Prop', helpers=['helper1', 'helper3']) resp = mock.MagicMock() resp.status = 404 self.apt.read.side_effect = pvm_exc.HttpError(resp) self.assertRaises(exception.InstanceNotFound, vm.get_vm_qp, self.apt, 'lpar_uuid', log_errors=False) self.apt.read.side_effect = pvm_exc.Error("message", response=None) self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt, 'lpar_uuid', log_errors=False) resp.status = 500 self.apt.read.side_effect = pvm_exc.Error("message", response=resp) self.assertRaises(pvm_exc.Error, vm.get_vm_qp, self.apt, 'lpar_uuid', log_errors=False)
def _detach_vol_for_vio(self, vios_w): """Removes the volume from a specific Virtual I/O Server. :param vios_w: The VIOS wrapper. :return: True if a remove action was done against this VIOS. False otherwise. """ LOG.debug("Detach volume %(vol)s from vios %(vios)s", dict(vol=self.volume_id, vios=vios_w.name), instance=self.instance) device_name = None udid = self._get_udid() try: if udid: # This will only work if vios_w has the Storage XAG. device_name = vios_w.hdisk_from_uuid(udid) if not udid or not device_name: # We lost our bdm data. We'll need to discover it. status, device_name, udid = self._discover_volume_on_vios( vios_w) # Check if the hdisk is in a bad state in the I/O Server. # Subsequent scrub code on future deploys will clean this up. if not hdisk.good_discovery(status, device_name): LOG.warning( "Detach Volume: The backing hdisk for volume " "%(volume_id)s on Virtual I/O Server %(vios)s is " "not in a valid state. This may be the result of " "an evacuate.", { 'volume_id': self.volume_id, 'vios': vios_w.name }, instance=self.instance) return False except Exception: LOG.exception( "Detach Volume: Failed to find disk on Virtual I/O " "Server %(vios_name)s for volume %(volume_id)s. Volume " "UDID: %(volume_uid)s.", { 'vios_name': vios_w.name, 'volume_id': self.volume_id, 'volume_uid': udid, }, instance=self.instance) return False # We have found the device name LOG.info( "Detach Volume: Discovered the device %(hdisk)s " "on Virtual I/O Server %(vios_name)s for volume " "%(volume_id)s. Volume UDID: %(volume_uid)s.", { 'hdisk': device_name, 'vios_name': vios_w.name, 'volume_id': self.volume_id, 'volume_uid': udid }, instance=self.instance) # Add the action to remove the mapping when the stg_ftsk is run. partition_id = vm.get_vm_qp(self.adapter, self.vm_uuid, qprop='PartitionID') with lockutils.lock(self.volume_id): self._add_remove_mapping(partition_id, vios_w.uuid, device_name) # Add a step to also remove the hdisk self._add_remove_hdisk(vios_w, device_name) # Found a valid element to remove return True
def _detach_vol_for_vio(self, vios_w): """Removes the volume from a specific Virtual I/O Server. :param vios_w: The VIOS wrapper. :return: True if a remove action was done against this VIOS. False otherwise. """ LOG.debug("Detach volume %(vol)s from vios %(vios)s", dict(vol=self.volume_id, vios=vios_w.name), instance=self.instance) device_name = None udid = self._get_udid() try: if udid: # This will only work if vios_w has the Storage XAG. device_name = vios_w.hdisk_from_uuid(udid) if not udid or not device_name: # We lost our bdm data. We'll need to discover it. status, device_name, udid = self._discover_volume_on_vios( vios_w) # Check if the hdisk is in a bad state in the I/O Server. # Subsequent scrub code on future deploys will clean this up. if not hdisk.good_discovery(status, device_name): LOG.warning( "Detach Volume: The backing hdisk for volume " "%(volume_id)s on Virtual I/O Server %(vios)s is " "not in a valid state. This may be the result of " "an evacuate.", {'volume_id': self.volume_id, 'vios': vios_w.name}, instance=self.instance) return False except Exception: LOG.exception( "Detach Volume: Failed to find disk on Virtual I/O " "Server %(vios_name)s for volume %(volume_id)s. Volume " "UDID: %(volume_uid)s.", {'vios_name': vios_w.name, 'volume_id': self.volume_id, 'volume_uid': udid, }, instance=self.instance) return False # We have found the device name LOG.info("Detach Volume: Discovered the device %(hdisk)s " "on Virtual I/O Server %(vios_name)s for volume " "%(volume_id)s. Volume UDID: %(volume_uid)s.", {'hdisk': device_name, 'vios_name': vios_w.name, 'volume_id': self.volume_id, 'volume_uid': udid}, instance=self.instance) # Add the action to remove the mapping when the stg_ftsk is run. partition_id = vm.get_vm_qp(self.adapter, self.vm_uuid, qprop='PartitionID') with lockutils.lock(self.volume_id): self._add_remove_mapping(partition_id, vios_w.uuid, device_name) # Add a step to also remove the hdisk self._add_remove_hdisk(vios_w, device_name) # Found a valid element to remove return True