def _validate_manage_parameters(self, context, body): if not (body and self.is_valid_body(body, 'share')): msg = _("Share entity not found in request body") raise exc.HTTPUnprocessableEntity(explanation=msg) required_parameters = ['export_path', 'service_host', 'protocol'] data = body['share'] for parameter in required_parameters: if parameter not in data: msg = _("Required parameter %s not found") % parameter raise exc.HTTPUnprocessableEntity(explanation=msg) if not data.get(parameter): msg = _("Required parameter %s is empty") % parameter raise exc.HTTPUnprocessableEntity(explanation=msg) if not share_utils.extract_host(data['service_host'], 'pool'): msg = _("service_host parameter should contain pool.") raise exc.HTTPBadRequest(explanation=msg) try: utils.validate_service_host( context, share_utils.extract_host(data['service_host'])) except exception.ServiceNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) except exception.PolicyNotAuthorized as e: raise exc.HTTPForbidden(explanation=six.text_type(e)) except exception.ServiceIsDown as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) data['share_type_id'] = self._get_share_type_id( context, data.get('share_type')) return data
def get_pools(self, context, filters=None): """Returns a dict of all pools on all hosts HostManager knows about.""" self._update_host_state_map(context) all_pools = [] for host, host_state in self.host_state_map.items(): for pool in host_state.pools.values(): fully_qualified_pool_name = share_utils.append_host( host, pool.pool_name) host_name = share_utils.extract_host( fully_qualified_pool_name, level='host') backend_name = share_utils.extract_host( fully_qualified_pool_name, level='backend').split('@')[1] \ if '@' in fully_qualified_pool_name else None pool_name = share_utils.extract_host( fully_qualified_pool_name, level='pool') new_pool = { 'name': fully_qualified_pool_name, 'host': host_name, 'backend': backend_name, 'pool': pool_name, 'capabilities': pool.capabilities, } if self._passes_filters(new_pool, filters): all_pools.append(new_pool) return all_pools
def host_passes(self, host_state, filter_properties): """Return True if host will work with desired consistency group.""" cg = filter_properties.get('consistency_group') cg_support = filter_properties.get('cg_support') # NOTE(ameade): If creating a share not in a CG, then of course the # host is valid for the cg. if not cg: return True # NOTE(ameade): If the CG host can only support shares on the same # pool, then the only valid pool is that one. if cg_support == 'pool' and cg.get('host') == host_state.host: return True # NOTE(ameade): If the CG host can support shares on the same host, # then any pool on that backend will work. elif cg_support == 'host': cg_backend = share_utils.extract_host(cg['host']) host_backend = share_utils.extract_host(host_state.host) return cg_backend == host_backend LOG.debug("Host %(host)s is not compatible with consistency " "group %(cg)s" % {"host": host_state.host, "cg": cg['id']}) return False
def get_pools(self, context, filters=None): """Returns a dict of all pools on all hosts HostManager knows about.""" self._update_host_state_map(context) all_pools = [] for host, host_state in self.host_state_map.items(): for pool in host_state.pools.values(): fully_qualified_pool_name = share_utils.append_host(host, pool.pool_name) host_name = share_utils.extract_host(fully_qualified_pool_name, level="host") backend_name = ( share_utils.extract_host(fully_qualified_pool_name, level="backend").split("@")[1] if "@" in fully_qualified_pool_name else None ) pool_name = share_utils.extract_host(fully_qualified_pool_name, level="pool") new_pool = { "name": fully_qualified_pool_name, "host": host_name, "backend": backend_name, "pool": pool_name, "capabilities": pool.capabilities, } if self._passes_filters(new_pool, filters): all_pools.append(new_pool) return all_pools
def test_extract_host_with_default_pool(self): host = 'Host' # Default_pool_name doesn't work for level other than 'pool' self.assertEqual( 'Host', share_utils.extract_host(host, 'host', True)) self.assertEqual( 'Host', share_utils.extract_host(host, 'host', False)) self.assertEqual( 'Host', share_utils.extract_host(host, 'backend', True)) self.assertEqual( 'Host', share_utils.extract_host(host, 'backend', False))
def test_extract_host_with_pool(self): host = 'Host@Backend#Pool' self.assertEqual( 'Host@Backend', share_utils.extract_host(host)) self.assertEqual( 'Host', share_utils.extract_host(host, 'host')) self.assertEqual( 'Host@Backend', share_utils.extract_host(host, 'backend'),) self.assertEqual( 'Pool', share_utils.extract_host(host, 'pool')) self.assertEqual( 'Pool', share_utils.extract_host(host, 'pool', True))
def _weigh_object(self, host_state, weight_properties): """Pools with existing share server win.""" pool_mapping = weight_properties.get('server_pools_mapping', {}) if not pool_mapping: return 0 ctx = context.get_admin_context() host = utils.extract_host(host_state.host, 'backend') servers = db_api.share_server_get_all_by_host(ctx, host) pool = utils.extract_host(host_state.host, 'pool') for server in servers: if any(pool == p['pool_name'] for p in pool_mapping.get( server['id'], [])): return 1 return 0
def _validate_manage_share_server_parameters(self, context, body): if not (body and self.is_valid_body(body, 'share_server')): msg = _("Share Server entity not found in request body") raise exc.HTTPUnprocessableEntity(explanation=msg) required_parameters = ('host', 'share_network_id', 'identifier') data = body['share_server'] for parameter in required_parameters: if parameter not in data: msg = _("Required parameter %s not found") % parameter raise exc.HTTPBadRequest(explanation=msg) if not data.get(parameter): msg = _("Required parameter %s is empty") % parameter raise exc.HTTPBadRequest(explanation=msg) identifier = data['identifier'] host, share_network_id = data['host'], data['share_network_id'] if share_utils.extract_host(host, 'pool'): msg = _("Host parameter should not contain pool.") raise exc.HTTPBadRequest(explanation=msg) try: utils.validate_service_host( context, share_utils.extract_host(host)) except exception.ServiceNotFound as e: raise exc.HTTPBadRequest(explanation=e) except exception.PolicyNotAuthorized as e: raise exc.HTTPForbidden(explanation=e) except exception.AdminRequired as e: raise exc.HTTPForbidden(explanation=e) except exception.ServiceIsDown as e: raise exc.HTTPBadRequest(explanation=e) try: share_network = db_api.share_network_get( context, share_network_id) except exception.ShareNetworkNotFound as e: raise exc.HTTPBadRequest(explanation=e) driver_opts = data.get('driver_options') if driver_opts is not None and not isinstance(driver_opts, dict): msg = _("Driver options must be in dictionary format.") raise exc.HTTPBadRequest(explanation=msg) return identifier, host, share_network, driver_opts
def test_unknown_is_last(self): hostinfo_list = self._get_all_hosts() last_host = self._get_weighed_host(hostinfo_list, index=-1) self.assertEqual( 'host6', utils.extract_host(last_host.obj.host)) self.assertEqual(0.0, last_host.weight)
def migrate_share(self, ctxt, share, dest_host, force_host_copy): new_host = utils.extract_host(share['host']) cctxt = self.client.prepare(server=new_host, version='1.6') host_p = {'host': dest_host.host, 'capabilities': dest_host.capabilities} cctxt.cast(ctxt, 'migrate_share', share_id=share['id'], host=host_p, force_host_copy=force_host_copy)
def delete_cgsnapshot(self, ctxt, cgsnapshot, host): new_host = utils.extract_host(host) cctxt = self.client.prepare(server=new_host, version='1.5') cctxt.cast( ctxt, 'delete_cgsnapshot', cgsnapshot_id=cgsnapshot['id'])
def get_pool(self, share): """Return pool name where the share resides on. :param share: The share hosted by the driver. """ pool_name = share_utils.extract_host(share['host'], level='pool') return pool_name
def create_consistency_group(self, ctxt, cg, host): new_host = utils.extract_host(host) cctxt = self.client.prepare(server=new_host, version='1.5') cctxt.cast( ctxt, 'create_consistency_group', cg_id=cg['id'])
def test_pool_weight_multiplier_negative(self): self.flags(pool_weight_multiplier=-1.0) weight_properties = { 'server_pools_mapping': { 'fake_server_id0': [{'pool_name': 'pool1'}], 'fake_server_id2': [{'pool_name': 'pool3'}], 'fake_server_id3': [ {'pool_name': 'pool4a'}, {'pool_name': 'pool4b'}, ], 'fake_server_id4': [ {'pool_name': 'pool5a'}, {'pool_name': 'pool5b'}, ], }, } # host1: weight = 1*(-1.0) # host2: weight = 0*(-1.0) # host3: weight = 1*(-1.0) # host4: weight = 1*(-1.0) # host5: weight = 1*(-1.0) # so, host2 should win: weighed_host = self._get_weighed_host(self._get_all_hosts(), weight_properties) self.assertEqual(0.0, weighed_host.weight) self.assertEqual( 'host2@BBB', utils.extract_host(weighed_host.obj.host))
def delete_share_instance(self, context, share_instance, force=False): host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=host, version='1.4') call_context.cast(context, 'delete_share_instance', share_instance_id=share_instance['id'], force=force)
def delete_snapshot(self, context, snapshot, host, force=False): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host) call_context.cast(context, 'delete_snapshot', snapshot_id=snapshot['id'], force=force)
def get_pool(self, share): pool = share_utils.extract_host(share['host'], level='pool') if pool: return pool share_name = self._get_valid_share_name(share['id']) return self._client.get_aggregate_for_volume(share_name)
def revert_to_snapshot(self, context, share, snapshot, host, reservations): host = utils.extract_host(host) call_context = self.client.prepare(server=host, version='1.18') call_context.cast(context, 'revert_to_snapshot', snapshot_id=snapshot['id'], reservations=reservations)
def create_share(self, context, share, share_server=None): """Is called to create a share.""" options = self._get_dataset_creation_options(share, is_readonly=False) cmd = ['create'] for option in options: cmd.extend(['-o', option]) dataset_name = self._get_dataset_name(share) cmd.append(dataset_name) ssh_cmd = '%(username)s@%(host)s' % { 'username': self.configuration.zfs_ssh_username, 'host': self.service_ip, } pool_name = share_utils.extract_host(share['host'], level='pool') self.private_storage.update( share['id'], { 'entity_type': 'share', 'dataset_name': dataset_name, 'ssh_cmd': ssh_cmd, # used in replication 'pool_name': pool_name, # used in replication 'used_options': ' '.join(options), } ) self.zfs(*cmd) return self._get_share_helper( share['share_proto']).create_exports(dataset_name)
def manage_share(self, ctxt, share, driver_options=None): host = utils.extract_host(share['host']) cctxt = self.client.prepare(server=host, version='1.1') cctxt.cast(ctxt, 'manage_share', share_id=share['id'], driver_options=driver_options)
def create_share_from_snapshot(self, context, share, snapshot, share_server=None): """Create a share from a snapshot - clone a snapshot.""" share_name = share['id'] share_proto = share['share_proto'] # Validate the share protocol if share_proto.upper() not in ('NFS', 'CIFS'): raise exception.InvalidShare( reason=(_('Invalid NAS protocol supplied: %s.') % share_proto)) # Get the pool name from share host field pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share['host']) raise exception.InvalidHost(reason=message) self._share_server_validation(share_server) self._allocate_container_from_snapshot( share, snapshot, share_server, pool_name) if share_proto == 'NFS': self._create_nfs_share(share_name, share_server) location = ('%(nfs_if)s:/%(share_name)s' % {'nfs_if': share_server['backend_details']['nfs_if'], 'share_name': share_name}) elif share_proto == 'CIFS': location = self._create_cifs_share(share_name, share_server) return location
def create_replicated_snapshot(self, context, share, replicated_snapshot): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.11') call_context.cast(context, 'create_replicated_snapshot', snapshot_id=replicated_snapshot['id'], share_id=share['id'])
def shrink_share(self, context, share, new_size): host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=host, version='1.3') call_context.cast(context, 'shrink_share', share_id=share['id'], new_size=new_size)
def delete_consistency_group(self, ctxt, cg): new_host = utils.extract_host(cg['host']) cctxt = self.client.prepare(server=new_host, version='1.5') cctxt.cast( ctxt, 'delete_consistency_group', cg_id=cg['id'])
def ensure_share(self, context, share, share_server=None): """Invoked to ensure that given share is exported.""" dataset_name = self.private_storage.get(share['id'], 'dataset_name') if not dataset_name: dataset_name = self._get_dataset_name(share) pool_name = share_utils.extract_host(share['host'], level='pool') out, err = self.zfs('list', '-r', pool_name) data = self.parse_zfs_answer(out) for datum in data: if datum['NAME'] == dataset_name: ssh_cmd = '%(username)s@%(host)s' % { 'username': self.configuration.zfs_ssh_username, 'host': self.service_ip, } self.private_storage.update( share['id'], {'ssh_cmd': ssh_cmd}) sharenfs = self.get_zfs_option(dataset_name, 'sharenfs') if sharenfs != 'off': self.zfs('share', dataset_name) export_locations = self._get_share_helper( share['share_proto']).get_exports(dataset_name) return export_locations else: raise exception.ShareResourceNotFound(share_id=share['id'])
def test_default_of_spreading_first(self): hostinfo_list = self._get_all_hosts() # host1: thin_provisioning = False # free_capacity_gb = 1024 # free = math.floor(1024 - 1024 * 0.1) = 921.0 # weight = 0.40 # host2: thin_provisioning = True # max_over_subscription_ratio = 2.0 # free_capacity_gb = 300 # free = math.floor(2048 * 2.0 - 1748 - 2048 * 0.1)=2143.0 # weight = 1.0 # host3: thin_provisioning = False # free_capacity_gb = 512 # free = math.floor(256 - 512 * 0)=256.0 # weight = 0.08 # host4: thin_provisioning = True # max_over_subscription_ratio = 1.0 # free_capacity_gb = 200 # free = math.floor(2048 * 1.0 - 1848 - 2048 * 0.05) = 97.0 # weight = 0.0 # host5: thin_provisioning = True # max_over_subscription_ratio = 1.5 # free_capacity_gb = 500 # free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0 # weight = 0.65 # host6: thin_provisioning = False # free = inf # weight = 0.0 # so, host2 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(1.0, weighed_host.weight) self.assertEqual( 'host2', utils.extract_host(weighed_host.obj.host))
def _get_pool_project_share_name(self, share): pool = share_utils.extract_host(share['host'], level='pool') project = self._default_project share_name = share['name'] return pool, project, share_name
def create_share(self, share, share_server=None): """Create a share.""" share_name = share['name'] share_proto = share['share_proto'] pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: msg = _("Pool is not available in the share host field.") raise exception.InvalidHost(reason=msg) result = self.helper._find_all_pool_info() poolinfo = self.helper._find_pool_info(pool_name, result) if not poolinfo: msg = (_("Can not find pool info by pool name: %s") % pool_name) raise exception.InvalidHost(reason=msg) fs_id = None # We sleep here to ensure the newly created filesystem can be read. wait_interval = self._get_wait_interval() timeout = self._get_timeout() try: fs_id = self.allocate_container(share, poolinfo) fs = self.helper._get_fs_info_by_id(fs_id) end_time = time.time() + timeout while not (self.check_fs_status(fs['HEALTHSTATUS'], fs['RUNNINGSTATUS']) or time.time() > end_time): time.sleep(wait_interval) fs = self.helper._get_fs_info_by_id(fs_id) if not self.check_fs_status(fs['HEALTHSTATUS'], fs['RUNNINGSTATUS']): raise exception.InvalidShare( reason=(_('Invalid status of filesystem: %(health)s ' '%(running)s.') % {'health': fs['HEALTHSTATUS'], 'running': fs['RUNNINGSTATUS']})) except Exception as err: if fs_id is not None: self.helper._delete_fs(fs_id) message = (_('Failed to create share %(name)s.' 'Reason: %(err)s.') % {'name': share_name, 'err': err}) raise exception.InvalidShare(reason=message) try: self.helper._create_share(share_name, fs_id, share_proto) except Exception as err: if fs_id is not None: self.helper._delete_fs(fs_id) raise exception.InvalidShare( reason=(_('Failed to create share %(name)s. Reason: %(err)s.') % {'name': share_name, 'err': err})) location = self._get_location_path(share_name, share_proto) return location
def migration_cancel(self, context, src_share_instance, dest_instance_id): new_host = utils.extract_host(src_share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.12') call_context.cast(context, 'migration_cancel', src_instance_id=src_share_instance['id'], dest_instance_id=dest_instance_id)
def update_share_replica(self, context, share_replica): host = utils.extract_host(share_replica['host']) call_context = self.client.prepare(server=host, version='1.8') call_context.cast(context, 'update_share_replica', share_replica_id=share_replica['id'], share_id=share_replica['share_id'])
def get_backend_info_for_share(self, share_obj): backend_name = share_utils.extract_host( share_obj['host'], level='backend_name') config = get_backend_configuration(backend_name) vserver = (self.get_vserver_from_share(share_obj) or config.netapp_vserver) volume_name = self._get_backend_volume_name( config, share_obj) return volume_name, vserver, backend_name
def update_access_for_instances(self, context, dest_host, share_instance_ids, share_server_id=None): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.21') call_context.cast(context, 'update_access_for_instances', share_instance_ids=share_instance_ids, share_server_id=share_server_id)
def manage_existing(self, share, driver_options): share_proto = share['share_proto'].lower() pool_name = share_utils.extract_host(share['host'], level='pool') pool_data = self._get_share_pool_data(pool_name) volume_name = self._extract_lv_name(pool_data) input_location = share['export_locations'][0]['path'] share_name = share['id'].replace('-', '') ch_ip, folder_name = self._parse_location(input_location, share_proto) if not self._check_channel_ip(ch_ip): msg = _('Export location ip: [%(ch_ip)s] ' 'is incorrect, please use data port ip.') % { 'ch_ip': ch_ip} LOG.error(msg) raise exception.InfortrendNASException(err=msg) if not self._check_share_exist(pool_name, folder_name): msg = _('Can not find folder [%(folder_name)s] ' 'in pool [%(pool_name)s].') % { 'folder_name': folder_name, 'pool_name': pool_name} LOG.error(msg) raise exception.InfortrendNASException(err=msg) share_path = pool_data['path'] + folder_name self._ensure_protocol_on(share_path, share_proto, share_name) share_size = self._get_share_size( pool_data['id'], pool_name, folder_name) if not share_size: msg = _('Folder [%(folder_name)s] has no size limitation, ' 'please set it first for Openstack management.') % { 'folder_name': folder_name} LOG.error(msg) raise exception.InfortrendNASException(err=msg) # rename folder name command_line = ['folder', 'options', pool_data['id'], volume_name, '-k', folder_name, share_name] self._execute(command_line) location = self._export_location( share_name, share_proto, pool_data['path']) LOG.info('Successfully Manage Infortrend Share [%(folder_name)s], ' 'Size: [%(size)s G], Protocol: [%(share_proto)s], ' 'new name: [%(share_name)s].', { 'folder_name': folder_name, 'size': share_size, 'share_proto': share_proto, 'share_name': share_name}) return {'size': share_size, 'export_locations': location}
def unmanage(self, share): pool_name = share_utils.extract_host(share['host'], level='pool') share_name = share['id'].replace('-', '') if not self._check_share_exist(pool_name, share_name): LOG.warning('Share [%(share_name)s] does not exist.', { 'share_name': share_name}) return LOG.info('Successfully Unmanaged Share [%(share)s].', { 'share': share['id']})
def create_share_replica(self, context, share_replica, host, request_spec, filter_properties): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.8') request_spec_p = jsonutils.to_primitive(request_spec) call_context.cast(context, 'create_share_replica', share_replica_id=share_replica['id'], request_spec=request_spec_p, filter_properties=filter_properties, share_id=share_replica['share_id'])
def host_passes(self, host_state, filter_properties): """Return True if new share's host is compatible with snapshot's host. Design of this filter: - Creating shares from snapshots in another pool or backend needs to match with one of the below conditions: - The backend of the new share must be the same as its parent snapshot. - Both new share and snapshot are in the same replication_domain """ snapshot_id = filter_properties.get('request_spec', {}).get('snapshot_id') snapshot_host = filter_properties.get('request_spec', {}).get('snapshot_host') if None in [snapshot_id, snapshot_host]: # NOTE(silvacarlose): if the request does not contain a snapshot_id # or a snapshot_host, the user is not creating a share from a # snapshot and we don't need to filter out the host. return True snapshot_backend = share_utils.extract_host(snapshot_host, 'backend') snapshot_rep_domain = filter_properties.get('replication_domain') host_backend = share_utils.extract_host(host_state.host, 'backend') host_rep_domain = host_state.replication_domain # Same backend if host_backend == snapshot_backend: return True # Same replication domain if snapshot_rep_domain and snapshot_rep_domain == host_rep_domain: return True msg = ("The parent's snapshot %(snapshot_id)s back end and " "replication domain don't match with the back end and " "replication domain of the Host %(host)s.") kwargs = {"snapshot_id": snapshot_id, "host": host_state.host} LOG.debug(msg, kwargs) return False
def migration_start(self, context, share, dest_host, force_host_copy, notify): new_host = utils.extract_host(share['instance']['host']) call_context = self.client.prepare(server=new_host, version='1.6') host_p = {'host': dest_host.host, 'capabilities': dest_host.capabilities} call_context.cast(context, 'migration_start', share_id=share['id'], host=host_p, force_host_copy=force_host_copy, notify=notify)
def migrate_share(self, ctxt, share, dest_host, force_host_copy): new_host = utils.extract_host(share['host']) cctxt = self.client.prepare(server=new_host, version='1.6') host_p = { 'host': dest_host.host, 'capabilities': dest_host.capabilities } cctxt.cast(ctxt, 'migrate_share', share_id=share['id'], host=host_p, force_host_copy=force_host_copy)
def extend_share(self, share, new_size, share_server=None): # Get the pool name from share host field pool_name = share_utils.extract_host(share['host'], level='pool') if not pool_name: message = (_("Pool is not available in the share host %s.") % share['host']) raise exception.InvalidHost(reason=message) share_name = share['id'] self._get_context('FileSystem').extend(share_name, pool_name, new_size * units.Ki)
def create_replica_pair( self, context, host, local_share_info, remote_device_wwn, remote_fs_id, local_replication): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.0') return call_context.call( context, 'create_replica_pair', local_share_info=local_share_info, remote_device_wwn=remote_device_wwn, remote_fs_id=remote_fs_id, local_replication=local_replication, )
def test_default_of_spreading_first(self): hostinfo_list = self._get_all_hosts() # host1: free_capacity_gb=1024, free=1024*(1-0.1) # host2: free_capacity_gb=300, free=300*(1-0.1) # host3: free_capacity_gb=512, free=512 # host4: free_capacity_gb=200, free=200*(1-0.05) # so, host1 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(weighed_host.weight, 1.0) self.assertEqual('host1', utils.extract_host(weighed_host.obj.host))
def create_share(self, context, share, share_server=None): """Create a new share instance.""" share_name = self.generate_share_name(share) share_size = share['size'] share_proto = share['share_proto'] pool_name = share_utils.extract_host(share['host'], level='pool') self.assistant.create_share(share_name, pool_name, share_size, share_proto) return self.assistant.get_export_locations(share_name, share_proto)
def create_share_instance(self, context, share_instance, host, request_spec, filter_properties, snapshot_id=None): new_host = utils.extract_host(host) call_context = self.client.prepare(server=new_host, version='1.4') request_spec_p = jsonutils.to_primitive(request_spec) call_context.cast(context, 'create_share_instance', share_instance_id=share_instance['id'], request_spec=request_spec_p, filter_properties=filter_properties, snapshot_id=snapshot_id)
def test_capacity_weight_multiplier_2(self, cap_thin, cap_thin_key, winner): self.flags(capacity_weight_multiplier=2.0) hosts = self._get_all_hosts() # pylint: disable=no-value-for-parameter # Results for the 1st test # {'capabilities:thin_provisioning': '<is> True'}: # host1: thin_provisioning = False # free_capacity_gb = 1024 # free = math.floor(1024-1024*0.1) = 921.0 # free * 2 = 1842.0 # weight = 0.81 # host2: thin_provisioning = True # max_over_subscription_ratio = 2.0 # free_capacity_gb = 300 # free = math.floor(2048 * 2.0 - 1748 - 2048 * 0.1) = 2143.0 # free * 2 = 4286.0 # weight = 2.0 # host3: thin_provisioning = [False] # free_capacity_gb = 512 # free = math.floor(256 - 512 * 0) = 256.0 # free * 2 = 512.0 # weight = 0.16 # host4: thin_provisioning = [True] # max_over_subscription_ratio = 1.0 # free_capacity_gb = 200 # free = math.floor(2048 * 1.0 - 1848 - 2048 * 0.05) = 97.0 # free * 2 = 194.0 # weight = 0.0 # host5: thin_provisioning = [True, False] # max_over_subscription_ratio = 1.5 # free_capacity_gb = 500 # free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0 # free * 2 = 2842.0 # weight = 1.29 # host6: thin_provisioning = False # free = inf # weight = 0.0 # so, host2 should win: weight_properties = { 'size': 1, 'share_type': { 'extra_specs': { cap_thin_key: cap_thin, } } } weighed_host = self._get_weighed_host( hosts, weight_properties=weight_properties) self.assertEqual(2.0, weighed_host.weight) self.assertEqual( winner, utils.extract_host(weighed_host.obj.host))
def provide_share_server(self, context, share_instance, share_network_id, snapshot_id=None): new_host = utils.extract_host(share_instance['host']) call_context = self.client.prepare(server=new_host, version='1.12') return call_context.call(context, 'provide_share_server', share_instance_id=share_instance['id'], share_network_id=share_network_id, snapshot_id=snapshot_id)
def test_capacity_weight_multiplier_negative_1(self, cap_thin, cap_thin_key, winner): self.flags(capacity_weight_multiplier=-1.0) hostinfo_list = self._get_all_hosts() # Results for the 1st test # {'capabilities:thin_provisioning': '<is> True'}: # host1: thin_provisioning = False # free_capacity_gb = 1024 # free = math.floor(1024 - 1024 * 0.1) = 921.0 # free * (-1) = -921.0 # weight = -0.40 # host2: thin_provisioning = True # max_over_subscription_ratio = 2.0 # free_capacity_gb = 300 # free = math.floor(2048 * 2.0-1748-2048 * 0.1) = 2143.0 # free * (-1) = -2143.0 # weight = -1.0 # host3: thin_provisioning = [False] # free_capacity_gb = 512 # free = math.floor(256 - 512 * 0) = 256.0 # free * (-1) = -256.0 # weight = -0.08 # host4: thin_provisioning = [True] # max_over_subscription_ratio = 1.0 # free_capacity_gb = 200 # free = math.floor(2048 * 1.0 - 1848 - 2048 * 0.05) = 97.0 # free * (-1) = -97.0 # weight = 0.0 # host5: thin_provisioning = [True, False] # max_over_subscription_ratio = 1.5 # free_capacity_gb = 500 # free = math.floor(2048 * 1.5 - 1548 - 2048 * 0.05) = 1421.0 # free * (-1) = -1421.0 # weight = -0.65 # host6: thin_provisioning = False # free = inf # free * (-1) = -inf # weight = 0.0 # so, host4 should win: weight_properties = { 'size': 1, 'share_type': { 'extra_specs': { cap_thin_key: cap_thin, } } } weighed_host = self._get_weighed_host( hostinfo_list, weight_properties=weight_properties) self.assertEqual(0.0, weighed_host.weight) self.assertEqual(winner, utils.extract_host(weighed_host.obj.host))
def share_server_migration_check(self, context, share_server_id, dest_host, writable, nondisruptive, preserve_snapshots, new_share_network_id): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.21') return call_context.call(context, 'share_server_migration_check', share_server_id=share_server_id, dest_host=dest_host, writable=writable, nondisruptive=nondisruptive, preserve_snapshots=preserve_snapshots, new_share_network_id=new_share_network_id)
def test_capacity_weight_multiplier2(self): self.flags(capacity_weight_multiplier=2.0) hostinfo_list = self._get_all_hosts() # host1: free_capacity_gb=1024, free=1024*(1-0.1)*2 # host2: free_capacity_gb=300, free=300*(1-0.1)*2 # host3: free_capacity_gb=512, free=512*2 # host4: free_capacity_gb=200, free=200*(1-0.05)*2 # so, host1 should win: weighed_host = self._get_weighed_host(hostinfo_list) self.assertEqual(weighed_host.weight, 2.0) self.assertEqual('host1', utils.extract_host(weighed_host.obj.host))
def delete_replicated_snapshot(self, context, replicated_snapshot, host, share_id=None, force=False): host = utils.extract_host(host) call_context = self.client.prepare(server=host, version='1.11') call_context.cast(context, 'delete_replicated_snapshot', snapshot_id=replicated_snapshot['id'], share_id=share_id, force=force)
def promote_share_replica(self, ctxt, share_replica_id, host, share_id=None): new_host = utils.extract_host(host) cctxt = self.client.prepare(server=new_host, version='1.8') cctxt.cast( ctxt, 'promote_share_replica', share_replica_id=share_replica_id, share_id=share_id, )
def test_choose_pool_with_existing_share_server(self): # host1: weight = 0*(1.0) # host2: weight = 1*(1.0) # host3: weight = 0*(1.0) # host4: weight = 0*(1.0) # host5: weight = 0*(1.0) # so, host2 should win: weighed_host = self._get_weighed_host(self._get_all_hosts()) self.assertEqual(1.0, weighed_host.weight) self.assertEqual('host2@BBB', utils.extract_host(weighed_host.obj.host))
def get_pool(self, share): pool_name = share_utils.extract_host(share['host'], level='pool') if pool_name: return pool_name share_name = share['name'] share_url_type = self.helper._get_share_url_type(share['share_proto']) share = self.helper._get_share_by_name(share_name, share_url_type) pool_name = None if share: pool = self.helper._get_fs_info_by_id(share['FSID']) pool_name = pool['POOLNAME'] return pool_name
def _validate_manage_parameters(self, context, body): if not (body and self.is_valid_body(body, 'share')): msg = _("Share entity not found in request body") raise exc.HTTPUnprocessableEntity(explanation=msg) required_parameters = ('export_path', 'service_host', 'protocol') data = body['share'] for parameter in required_parameters: if parameter not in data: msg = _("Required parameter %s not found") % parameter raise exc.HTTPUnprocessableEntity(explanation=msg) if not data.get(parameter): msg = _("Required parameter %s is empty") % parameter raise exc.HTTPUnprocessableEntity(explanation=msg) if not share_utils.extract_host(data['service_host'], 'pool'): msg = _("service_host parameter should contain pool.") raise exc.HTTPBadRequest(explanation=msg) try: utils.validate_service_host( context, share_utils.extract_host(data['service_host'])) except exception.ServiceNotFound as e: raise exc.HTTPNotFound(explanation=six.text_type(e)) except exception.PolicyNotAuthorized as e: raise exc.HTTPForbidden(explanation=six.text_type(e)) except exception.AdminRequired as e: raise exc.HTTPForbidden(explanation=six.text_type(e)) except exception.ServiceIsDown as e: raise exc.HTTPBadRequest(explanation=six.text_type(e)) data['share_type_id'] = self._get_share_type_id( context, data.get('share_type')) return data
def migration_check_compatibility(self, context, source_share, destination_share, share_server=None, destination_share_server=None): """Checks compatibility between self.host and destination host.""" # They must be in same vg and host compatible = False destination_host = destination_share['host'] source_host = source_share['host'] destination_vg = share_utils.extract_host(destination_host, level='pool') source_vg = share_utils.extract_host(source_host, level='pool') if destination_vg != source_vg: msg = ("Cannot migrate share %(shr)s between " "%(src)s and %(dest)s, they must be in the same volume " "group.") msg_args = { 'shr': source_share['id'], 'src': source_share['host'], 'dest': destination_host, } LOG.exception(msg, msg_args) else: compatible = True compatibility = { 'compatible': compatible, 'writable': True, 'nondisruptive': False, 'preserve_metadata': True, 'preserve_snapshots': False, } return compatibility
def check_update_share_network_security_service( self, context, dest_host, share_network_id, new_security_service_id, current_security_service_id=None): host = utils.extract_host(dest_host) call_context = self.client.prepare(server=host, version='1.22') call_context.cast( context, 'check_update_share_network_security_service', share_network_id=share_network_id, new_security_service_id=new_security_service_id, current_security_service_id=current_security_service_id)
def _get_share_pnsp(self, share): """Get pool, share_name, share_size, share_proto of share. AS13000 require all the names can only consist of letters,numbers, and undercores,and must begin with a letter. Also the length of name must less than 32 character. The driver will use the ID as the name in backend, add 'share_' to the beginning,and convert '-' to '_' """ pool = share_utils.extract_host(share['host'], level='pool') share_name_row = 'share_%s' % share['share_id'] share_name = self._format_name(share_name_row) share_size = share['size'] share_proto = share['share_proto'].lower() return pool, share_name, share_size, share_proto
def delete_share_replica(self, ctxt, share_replica_id, host, share_id=None, force=False): new_host = utils.extract_host(host) cctxt = self.client.prepare(server=new_host, version='1.8') cctxt.cast( ctxt, 'delete_share_replica', share_replica_id=share_replica_id, share_id=share_id, force=force, )
def migration_check_compatibility( self, context, source_share, destination_share, share_server=None, destination_share_server=None): """Is called to test compatibility with destination backend.""" backend_name = share_utils.extract_host( destination_share['host'], level='backend_name') config = get_backend_configuration(backend_name) compatible = 'Dummy' in config.share_driver return { 'compatible': compatible, 'writable': compatible, 'preserve_metadata': compatible, 'nondisruptive': False, 'preserve_snapshots': compatible, }
def test_pool_weight_multiplier_positive(self): self.flags(pool_weight_multiplier=2.0) # host1: weight = 0*(2.0) # host2: weight = 1*(2.0) # host3: weight = 0*(2.0) # host4: weight = 0*(2.0) # host5: weight = 0*(2.0) # so, host2 should win: weighed_host = self._get_weighed_host(self._get_all_hosts()) self.assertEqual(2.0, weighed_host.weight) self.assertEqual('host2@BBB', utils.extract_host(weighed_host.obj.host))
def _evict_unauthorized_clients(self, share, access_rules, share_server=None): pool_name = share_utils.extract_host(share['host'], level='pool') pool_data = self._get_share_pool_data(pool_name) share_proto = share['share_proto'].lower() share_name = share['id'].replace('-', '') share_path = pool_data['path'] + share_name access_list = [] for access in access_rules: access_list.append(access['access_to']) if share_proto == 'nfs': host_ip_list = [] command_line = ['share', 'status', '-f', share_path] rc, nfs_status = self._execute(command_line) host_list = nfs_status[0]['nfs_detail']['hostList'] for host in host_list: if host['host'] != '*': host_ip_list.append(host['host']) for ip in host_ip_list: if ip not in access_list: command_line = ['share', 'options', share_path, 'nfs', '-c', ip] try: self._execute(command_line) except exception.InfortrendNASException: msg = _("Failed to remove share access rule %s") % (ip) LOG.exception(msg) pass elif share_proto == 'cifs': host_user_list = [] command_line = ['acl', 'get', share_path] rc, cifs_status = self._execute(command_line) for cifs_rule in cifs_status: if cifs_rule['name']: host_user_list.append(cifs_rule['name']) for user in host_user_list: if user not in access_list: command_line = ['acl', 'delete', share_path, '-u', user] try: self._execute(command_line) except exception.InfortrendNASException: msg = _("Failed to remove share access rule %s") % ( user) LOG.exception(msg) pass