def test_check_volume_type_for_lun_qos_fail(self, get_specs, get_ssc, get_vols, driver_log): self.zapi_client.connection.set_api_version(1, 20) self.library.ssc_vols = ['vol'] get_specs.return_value = { 'specs': 's', 'netapp:qos_policy_group': 'qos' } get_vols.return_value = [ ssc_cmode.NetAppVolume(name='name', vserver='vs') ] mock_lun = block_base.NetAppLun('handle', 'name', '1', { 'Volume': 'name', 'Path': '/vol/lun' }) self.zapi_client.set_lun_qos_policy_group = mock.Mock( side_effect=netapp_api.NaApiError) self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.library._check_volume_type_for_lun, {'vol': 'vol'}, mock_lun, {'ref': 'ref'}) get_specs.assert_called_once_with({'vol': 'vol'}) get_vols.assert_called_with(['vol'], {'specs': 's'}) self.assertEqual(0, get_ssc.call_count) self.zapi_client.set_lun_qos_policy_group.assert_called_once_with( '/vol/lun', 'qos') self.assertEqual(1, driver_log.call_count)
def _clone_lun(self, name, new_name, space_reserved=None, qos_policy_group_name=None, src_block=0, dest_block=0, block_count=0, source_snapshot=None): """Clone LUN with the given handle to the new name.""" if not space_reserved: space_reserved = self.lun_space_reservation metadata = self._get_lun_attr(name, 'metadata') volume = metadata['Volume'] self.zapi_client.clone_lun(volume, name, new_name, space_reserved, qos_policy_group_name=qos_policy_group_name, src_block=src_block, dest_block=dest_block, block_count=block_count, source_snapshot=source_snapshot) LOG.debug("Cloned LUN with new name %s", new_name) lun = self.zapi_client.get_lun_by_args(vserver=self.vserver, path='/vol/%s/%s' % (volume, new_name)) if len(lun) == 0: msg = _("No cloned LUN named %s found on the filer") raise exception.VolumeBackendAPIException(data=msg % new_name) clone_meta = self._create_lun_meta(lun[0]) self._add_lun_to_table( block_base.NetAppLun('%s:%s' % (clone_meta['Vserver'], clone_meta['Path']), new_name, lun[0].get_child_content('size'), clone_meta)) self._update_stale_vols( volume=ssc_cmode.NetAppVolume(volume, self.vserver))
def _create_lun(self, volume_name, lun_name, size, metadata, qos_policy_group_name=None): """Creates a LUN, handling Data ONTAP differences as needed.""" self.zapi_client.create_lun( volume_name, lun_name, size, metadata, qos_policy_group_name) self._update_stale_vols( volume=ssc_cmode.NetAppVolume(volume_name, self.vserver))
def delete_snapshot(self, snapshot): """Driver entry point for deleting a snapshot.""" lun = self.lun_table.get(snapshot['name']) netapp_vol = lun.get_metadata_property('Volume') if lun else None super(NetAppBlockStorageCmodeLibrary, self).delete_snapshot(snapshot) if netapp_vol: self._update_stale_vols( volume=ssc_cmode.NetAppVolume(netapp_vol, self.vserver))
def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" lun = self.lun_table.get(volume['name']) netapp_vol = None if lun: netapp_vol = lun.get_metadata_property('Volume') super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume) if netapp_vol: self._update_stale_vols( volume=ssc_cmode.NetAppVolume(netapp_vol, self.vserver))
def test_check_volume_type_for_lun_fail(self, get_ssc, get_vols): self.library.ssc_vols = ['vol'] fake_extra_specs = {'specs': 's'} get_vols.return_value = [ssc_cmode.NetAppVolume(name='name', vserver='vs')] mock_lun = block_base.NetAppLun('handle', 'name', '1', {'Volume': 'fake', 'Path': '/vol/lun'}) self.assertRaises(exception.ManageExistingVolumeTypeMismatch, self.library._check_volume_type_for_lun, {'vol': 'vol'}, mock_lun, {'ref': 'ref'}, fake_extra_specs) get_vols.assert_called_with(['vol'], {'specs': 's'}) self.assertEqual(1, get_ssc.call_count)
def delete_volume(self, volume): """Driver entry point for destroying existing volumes.""" lun = self.lun_table.get(volume['name']) netapp_vol = None if lun: netapp_vol = lun.get_metadata_property('Volume') super(NetAppBlockStorageCmodeLibrary, self).delete_volume(volume) try: qos_policy_group_info = na_utils.get_valid_qos_policy_group_info( volume) except exception.Invalid: # Delete even if there was invalid qos policy specified for the # volume. qos_policy_group_info = None self._mark_qos_policy_group_for_deletion(qos_policy_group_info) if netapp_vol: self._update_stale_vols( volume=ssc_cmode.NetAppVolume(netapp_vol, self.vserver)) msg = 'Deleted LUN with name %(name)s and QoS info %(qos)s' LOG.debug(msg, {'name': volume['name'], 'qos': qos_policy_group_info})
def createNetAppVolume(**kwargs): vol = ssc_cmode.NetAppVolume(kwargs['name'], kwargs['vs']) vol.state['vserver_root'] = kwargs.get('vs_root') vol.state['status'] = kwargs.get('status') vol.state['junction_active'] = kwargs.get('junc_active') vol.space['size_avl_bytes'] = kwargs.get('avl_byt') vol.space['size_total_bytes'] = kwargs.get('total_byt') vol.space['space-guarantee-enabled'] = kwargs.get('sg_enabled') vol.space['space-guarantee'] = kwargs.get('sg') vol.space['thin_provisioned'] = kwargs.get('thin') vol.mirror['mirrored'] = kwargs.get('mirrored') vol.qos['qos_policy_group'] = kwargs.get('qos') vol.aggr['name'] = kwargs.get('aggr_name') vol.aggr['junction'] = kwargs.get('junction') vol.sis['dedup'] = kwargs.get('dedup') vol.sis['compression'] = kwargs.get('compression') vol.aggr['raid_type'] = kwargs.get('raid') vol.aggr['ha_policy'] = kwargs.get('ha') vol.aggr['disk_type'] = kwargs.get('disk') return vol
'netapp_unmirrored': u'false', 'pool_name': 'open123', 'reserved_percentage': 0, 'total_capacity_gb': 4.65, 'thin_provisioning_support': True, 'thick_provisioning_support': False, 'provisioned_capacity_gb': 0.93, 'max_over_subscription_ratio': 20.0, 'utilization': 30.0, 'filter_function': 'filter', 'goodness_function': 'goodness', }] FAKE_CMODE_VOLUME = { 'all': [ ssc_cmode.NetAppVolume(name='open123', vserver='vs'), ssc_cmode.NetAppVolume(name='mixed', vserver='vs'), ssc_cmode.NetAppVolume(name='open321', vserver='vs') ], } FAKE_7MODE_VOLUME = { 'all': [ netapp_api.NaElement( etree.XML( """<volume-info xmlns="http://www.netapp.com/filer/admin"> <name>open123</name> </volume-info>""")), netapp_api.NaElement( etree.XML( """<volume-info xmlns="http://www.netapp.com/filer/admin">