def _retype_2_2(self, ctxt, volume, new_type, diff, host): LOG.debug( "Retype called\n" "Volume: %(volume)s\n" "NewType: %(new_type)s\n" "Diff: %(diff)s\n" "Host: %(host)s\n", { 'volume': volume, 'new_type': new_type, 'diff': diff, 'host': host }) # We'll take the fast route only if the types share the same backend # And that backend matches this driver old_pol = self._get_policies_for_resource(volume) new_pol = self._get_policies_for_volume_type(new_type) if (host['capabilities']['volume_backend_name'].lower() == self.backend_name.lower()): LOG.debug("Starting fast volume retype") if old_pol.get('template') or new_pol.get('template'): LOG.warning( "Fast retyping between template-backed volume-types " "unsupported. Type1: %s, Type2: %s", volume['volume_type_id'], new_type) self._update_qos_2_2(volume, new_pol, clear_old=True) tenant = self.get_tenant(volume['project_id']) dvol = self.cvol_to_dvol(volume, tenant=tenant) # Only replica_count ip_pool requires offlining the app_instance if (new_pol['replica_count'] != old_pol['replica_count'] or new_pol['ip_pool'] != old_pol['ip_pool']): with self._offline_flip_2_2(volume): # ip_pool is Storage Instance level ai = self.cvol_to_ai(volume, tenant=tenant) si = ai.storage_instances.list(tenant=tenant)[0] ip_pool = datc.get_ip_pool(new_pol) si_params = ({ 'ip_pool': { 'path': ('/access_network_ip_pools/' '{}'.format(ip_pool)) }, }) si.set(tenant=tenant, **si_params) # placement_mode and replica_count are Volume level vol_params = ({ 'placement_mode': new_pol['placement_mode'], 'replica_count': new_pol['replica_count'], }) if datc.dat_version_gte(self.datera_version, '3.3.0.0'): ppolicy = { 'path': '/placement_policies/{}'.format( new_pol.get('placement_policy')) } vol_params['placement_policy'] = ppolicy dvol.set(tenant=tenant, **vol_params) elif (new_pol['placement_mode'] != old_pol['placement_mode'] or new_pol['placement_policy'] != old_pol['placement_policy']): vol_params = ({ 'placement_mode': new_pol['placement_mode'], }) if datc.dat_version_gte(self.datera_version, '3.3.0.0'): ppolicy = { 'path': '/placement_policies/{}'.format( new_pol.get('placement_policy')) } vol_params['placement_policy'] = ppolicy dvol.set(tenant=tenant, **vol_params) self._add_vol_meta_2_2(volume) return True else: LOG.debug("Couldn't fast-retype volume between specified types") return False
def _create_volume_2_2(self, volume): policies = self._get_policies_for_resource(volume) num_replicas = int(policies['replica_count']) storage_name = 'storage-1' volume_name = 'volume-1' template = policies['template'] placement = policies['placement_mode'] ppolicy = policies['placement_policy'] ip_pool = datc.get_ip_pool(policies) name = datc.get_name(volume) if template: app_params = ({ 'create_mode': 'openstack', # 'uuid': str(volume['id']), 'name': name, 'app_template': { 'path': '/app_templates/{}'.format(template) } }) if self._support_template_override_2_2(): app_params['template_override'] = { 'storage_instances': { storage_name: { 'volumes': { volume_name: { 'size': str(volume['size']) } } } } } else: app_params = ({ 'create_mode': 'openstack', 'uuid': str(volume['id']), 'name': name, 'access_control_mode': 'deny_all', 'storage_instances': [{ 'name': storage_name, 'ip_pool': { 'path': ('/access_network_ip_pools/' '{}'.format(ip_pool)) }, 'volumes': [{ 'name': volume_name, 'size': volume['size'], 'replica_count': num_replicas, 'snapshot_policies': [] }] }] }) create_vol = app_params['storage_instances'][0]['volumes'][0] if datc.dat_version_gte(self.datera_version, '3.3.0.0'): create_vol['placement_policy'] = { 'path': '/placement_policies/{}'.format(ppolicy) } else: create_vol['placement_mode'] = placement tenant = self.create_tenant(volume['project_id']) self.api.app_instances.create(tenant=tenant, **app_params) self._update_qos_2_2(volume, policies) self._add_vol_meta_2_2(volume)
def _create_export_2_2(self, context, volume, connector): tenant = self.get_tenant(volume['project_id']) ai = self.cvol_to_ai(volume, tenant=tenant) data = {'admin_state': 'offline', 'force': True} ai.set(tenant=tenant, **data) si = ai.storage_instances.list(tenant=tenant)[0] policies = self._get_policies_for_resource(volume) if connector and connector.get('ip'): # Case where volume_type has non default IP Pool info ip_pool = datc.get_ip_pool(policies) if ip_pool != 'default': initiator_ip_pool_path = self.api.access_network_ip_pools.get( ip_pool).path # Fallback to trying reasonable IP based guess else: initiator_ip_pool_path = self._get_ip_pool_for_string_ip_2_2( connector['ip'], tenant) ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}} if not ai.app_template["path"]: si.set(tenant=tenant, **ip_pool_data) data = {'admin_state': 'online'} ai.set(tenant=tenant, **data) # Check if we've already setup everything for this volume storage_instances = ai.storage_instances.list(tenant=tenant) # Handle adding initiator to product if necessary # Then add initiator to ACL if connector and connector.get('initiator'): initiator_name = "OpenStack-{}".format(str(uuid.uuid4())[:8]) initiator = connector['initiator'] dinit = None try: # We want to make sure the initiator is created under the # current tenant rather than using the /root one dinit = self.api.initiators.get(initiator, tenant=tenant) if dinit.tenant != tenant: raise dexceptions.ApiNotFoundError( "Initiator {} was not found under tenant {} " "[{} != {}]".format(initiator, tenant, dinit.tenant, tenant)) except dexceptions.ApiNotFoundError: # TODO(_alastor_): Take out the 'force' flag when we fix # DAT-15931 data = {'id': initiator, 'name': initiator_name, 'force': True} # Try and create the initiator # If we get a conflict, ignore it try: dinit = self.api.initiators.create(tenant=tenant, **data) except dexceptions.ApiConflictError: pass initiator_path = dinit['path'] # Create ACL with initiator group as reference for each # storage_instance in app_instance # TODO(_alastor_): We need to avoid changing the ACLs if the # template already specifies an ACL policy. for si in storage_instances: existing_acl = si.acl_policy.get(tenant=tenant) data = {} # Grabbing only the 'path' key from each existing initiator # within the existing acl. eacli --> existing acl initiator eacli = [] for acl in existing_acl['initiators']: nacl = {} nacl['path'] = acl['path'] eacli.append(nacl) data['initiators'] = eacli data['initiators'].append({"path": initiator_path}) # Grabbing only the 'path' key from each existing initiator # group within the existing acl. eaclig --> existing # acl initiator group eaclig = [] for acl in existing_acl['initiator_groups']: nacl = {} nacl['path'] = acl['path'] eaclig.append(nacl) data['initiator_groups'] = eaclig si.acl_policy.set(tenant=tenant, **data) if self.use_chap_auth: for si in storage_instances: data = { 'type': 'chap', 'target_user_name': self.chap_username, 'target_pswd': self.chap_password } si.auth.set(tenant=tenant, **data) # Check to ensure we're ready for go-time self._si_poll_2_2(volume, si, tenant) self._add_vol_meta_2_2(volume, connector=connector)