def _create_cloned_volume_2_1(self, volume, src_vref):
        policies = self._get_policies_for_resource(volume)
        tenant = self._create_tenant(volume)
        store_name, vol_name = self._scrape_template(policies)

        src = "/" + datc.URL_TEMPLATES['vol_inst'](
            store_name, vol_name).format(datc._get_name(src_vref['id']))
        data = {
            'create_mode': 'openstack',
            'name': datc._get_name(volume['id']),
            'uuid': str(volume['id']),
            'clone_volume_src': {'path': src},
        }
        self._issue_api_request(
            datc.URL_TEMPLATES['ai'](), 'post', body=data, api_version='2.1',
            tenant=tenant)

        if volume['size'] > src_vref['size']:
            self._extend_volume_2_1(volume, volume['size'])
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        volume_type = self._get_volume_type_obj(volume)
        if volume_type:
            vtype = volume_type['name']
        else:
            vtype = None
        metadata = {datc.M_TYPE: vtype,
                    datc.M_CLONE: datc._get_name(src_vref['id'])}
        self._store_metadata(url, metadata, "create_cloned_volume_2_1", tenant)
    def _extend_volume_2_1(self, volume, new_size):
        tenant = self._create_tenant(volume)
        policies = self._get_policies_for_resource(volume)
        template = policies['template']
        if template:
            LOG.warning(_LW("Volume size not extended due to template binding:"
                            " volume: %(volume)s, template: %(template)s"),
                        volume=volume, template=template)
            return

        # Offline App Instance, if necessary
        reonline = False
        app_inst = self._issue_api_request(
            datc.URL_TEMPLATES['ai_inst']().format(
                datc._get_name(volume['id'])),
            api_version='2.1', tenant=tenant)
        if app_inst['data']['admin_state'] == 'online':
            reonline = True
            self._detach_volume_2_1(None, volume)
        # Change Volume Size
        app_inst = datc._get_name(volume['id'])
        data = {
            'size': new_size
        }
        store_name, vol_name = self._scrape_template(policies)
        self._issue_api_request(
            datc.URL_TEMPLATES['vol_inst'](
                store_name, vol_name).format(app_inst),
            method='put',
            body=data,
            api_version='2.1',
            tenant=tenant)
        # Online Volume, if it was online before
        if reonline:
            self._create_export_2_1(None, volume, None)
 def _manage_existing_2_1(self, volume, existing_ref):
     # Only volumes created under the requesting tenant can be managed in
     # the v2.1 API.  Eg.  If tenant A is the tenant for the volume to be
     # managed, it must also be tenant A that makes this request.
     # This will be fixed in a later API update
     tenant = self._create_tenant(volume)
     existing_ref = existing_ref['source-name']
     if existing_ref.count(":") not in (2, 3):
         raise exception.ManageExistingInvalidReference(
             _("existing_ref argument must be of this format: "
               "tenant:app_inst_name:storage_inst_name:vol_name or "
               "app_inst_name:storage_inst_name:vol_name"))
     app_inst_name = existing_ref.split(":")[0]
     try:
         (tenant, app_inst_name, storage_inst_name,
             vol_name) = existing_ref.split(":")
     except TypeError:
         app_inst_name, storage_inst_name, vol_name = existing_ref.split(
             ":")
         tenant = None
     LOG.debug("Managing existing Datera volume %s  "
               "Changing name to %s",
               datc._get_name(volume['id']), existing_ref)
     data = {'name': datc._get_name(volume['id'])}
     self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
         app_inst_name), method='put', body=data, api_version='2.1',
         tenant=tenant)
 def _manage_existing_2_1(self, volume, existing_ref):
     # Only volumes created under the requesting tenant can be managed in
     # the v2.1 API.  Eg.  If tenant A is the tenant for the volume to be
     # managed, it must also be tenant A that makes this request.
     # This will be fixed in a later API update
     tenant = self._create_tenant(volume)
     existing_ref = existing_ref['source-name']
     if existing_ref.count(":") not in (2, 3):
         raise exception.ManageExistingInvalidReference(
             _("existing_ref argument must be of this format: "
               "tenant:app_inst_name:storage_inst_name:vol_name or "
               "app_inst_name:storage_inst_name:vol_name"))
     app_inst_name = existing_ref.split(":")[0]
     try:
         (tenant, app_inst_name, storage_inst_name,
             vol_name) = existing_ref.split(":")
     except TypeError:
         app_inst_name, storage_inst_name, vol_name = existing_ref.split(
             ":")
         tenant = None
     LOG.debug("Managing existing Datera volume %s  "
               "Changing name to %s",
               datc._get_name(volume['id']), existing_ref)
     data = {'name': datc._get_name(volume['id'])}
     self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
         app_inst_name), method='put', body=data, api_version='2.1',
         tenant=tenant)
    def _detach_volume_2_1(self, context, volume, attachment=None):
        tenant = self._create_tenant(volume)
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        data = {
            'admin_state': 'offline',
            'force': True
        }
        try:
            self._issue_api_request(url, method='put', body=data,
                                    api_version='2.1', tenant=tenant)
        except exception.NotFound:
            msg = _LI("Tried to detach volume %s, but it was not found in the "
                      "Datera cluster. Continuing with detach.")
            LOG.info(msg, volume['id'])
        # TODO(_alastor_): Make acl cleaning multi-attach aware
        self._clean_acl_2_1(volume, tenant)

        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        metadata = {}
        try:
            self._store_metadata(url, metadata, "detach_volume_2_1", tenant)
        except exception.NotFound:
            # If the object isn't found, we probably are deleting/detaching
            # an already deleted object
            pass
    def _extend_volume_2(self, volume, new_size):
        # Current product limitation:
        # If app_instance is bound to template resizing is not possible
        # Once policies are implemented in the product this can go away
        policies = self._get_policies_for_resource(volume)
        template = policies['template']
        if template:
            LOG.warning(
                "Volume size not extended due to template binding:"
                " volume: %(volume)s, template: %(template)s",
                volume=volume,
                template=template)
            return

        # Offline App Instance, if necessary
        reonline = False
        app_inst = self._issue_api_request(
            datc.URL_TEMPLATES['ai_inst']().format(datc._get_name(
                volume['id'])),
            api_version='2')
        if app_inst['admin_state'] == 'online':
            reonline = True
            self._detach_volume_2(None, volume)
        # Change Volume Size
        app_inst = datc._get_name(volume['id'])
        data = {'size': new_size}
        store_name, vol_name = self._scrape_template(policies)
        self._issue_api_request(datc.URL_TEMPLATES['vol_inst'](
            store_name, vol_name).format(app_inst),
                                method='put',
                                body=data,
                                api_version='2')
        # Online Volume, if it was online before
        if reonline:
            self._create_export_2(None, volume, None)
    def _create_volume_from_snapshot_2(self, volume, snapshot):
        policies = self._get_policies_for_resource(snapshot)

        store_name, vol_name = self._scrape_template(policies)

        snap_temp = datc.URL_TEMPLATES['vol_inst'](store_name,
                                                   vol_name) + '/snapshots'
        snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
        snapshots = self._issue_api_request(snapu,
                                            method='get',
                                            api_version='2')
        for ts, snap in snapshots.items():
            if snap['uuid'] == snapshot['id']:
                found_ts = ts
                break
        else:
            raise exception.NotFound

        snap_url = (snap_temp + '/{}').format(
            datc._get_name(snapshot['volume_id']), found_ts)

        self._snap_poll(snap_url)

        src = "/" + snap_url
        app_params = ({
            'create_mode': 'openstack',
            'uuid': str(volume['id']),
            'name': datc._get_name(volume['id']),
            'clone_src': src,
        })
        self._issue_api_request(datc.URL_TEMPLATES['ai'](),
                                method='post',
                                body=app_params,
                                api_version='2')
Exemple #8
0
    def _delete_snapshot_2(self, snapshot):
        policies = self._get_policies_for_resource(snapshot)

        store_name, vol_name = self._scrape_template(policies)

        snap_temp = datc.URL_TEMPLATES['vol_inst'](
            store_name, vol_name) + '/snapshots'
        snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
        snapshots = self._issue_api_request(snapu, method='get',
                                            api_version='2')

        try:
            for ts, snap in snapshots.items():
                if snap['uuid'] == snapshot['id']:
                    url_template = snapu + '/{}'
                    url = url_template.format(ts)
                    self._issue_api_request(url, method='delete',
                                            api_version='2')
                    break
            else:
                raise exception.NotFound
        except exception.NotFound:
            msg = ("Tried to delete snapshot %s, but was not found in "
                   "Datera cluster. Continuing with delete.")
            LOG.info(msg, datc._get_name(snapshot['id']))
    def _extend_volume_2_1(self, volume, new_size):
        tenant = self._create_tenant(volume)
        policies = self._get_policies_for_resource(volume)
        template = policies['template']
        if template:
            LOG.warning("Volume size not extended due to template binding:"
                        " volume: %(volume)s, template: %(template)s",
                        volume=volume, template=template)
            return

        # Offline App Instance, if necessary
        reonline = False
        app_inst = self._issue_api_request(
            datc.URL_TEMPLATES['ai_inst']().format(
                datc._get_name(volume['id'])),
            api_version='2.1', tenant=tenant)
        if app_inst['data']['admin_state'] == 'online':
            reonline = True
            self._detach_volume_2_1(None, volume)
        # Change Volume Size
        app_inst = datc._get_name(volume['id'])
        data = {
            'size': new_size
        }
        store_name, vol_name = self._scrape_template(policies)
        self._issue_api_request(
            datc.URL_TEMPLATES['vol_inst'](
                store_name, vol_name).format(app_inst),
            method='put',
            body=data,
            api_version='2.1',
            tenant=tenant)
        # Online Volume, if it was online before
        if reonline:
            self._create_export_2_1(None, volume, None)
    def _delete_snapshot_2_1(self, snapshot):
        tenant = self._create_tenant(snapshot)
        policies = self._get_policies_for_resource(snapshot)

        store_name, vol_name = self._scrape_template(policies)

        snap_temp = datc.URL_TEMPLATES['vol_inst'](
            store_name, vol_name) + '/snapshots'
        snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
        snapshots = self._issue_api_request(snapu,
                                            method='get',
                                            api_version='2.1',
                                            tenant=tenant)

        try:
            for snap in snapshots['data']:
                if snap['uuid'] == snapshot['id']:
                    url_template = snapu + '/{}'
                    url = url_template.format(snap['timestamp'])
                    self._issue_api_request(
                        url,
                        method='delete',
                        api_version='2.1',
                        tenant=tenant)
                    break
            else:
                raise exception.NotFound
        except exception.NotFound:
            msg = _LI("Tried to delete snapshot %s, but was not found in "
                      "Datera cluster. Continuing with delete.")
            LOG.info(msg, datc._get_name(snapshot['id']))
    def _create_volume_2_1(self, volume):
        tenant = self._create_tenant(volume)
        policies = self._get_policies_for_resource(volume)
        num_replicas = int(policies['replica_count'])
        storage_name = policies['default_storage_name']
        volume_name = policies['default_volume_name']
        template = policies['template']

        if template:
            app_params = (
                {
                    'create_mode': "openstack",
                    # 'uuid': str(volume['id']),
                    'name': datc._get_name(volume['id']),
                    'app_template': '/app_templates/{}'.format(template)
                })

        else:

            app_params = (
                {
                    'create_mode': "openstack",
                    'uuid': str(volume['id']),
                    'name': datc._get_name(volume['id']),
                    'access_control_mode': 'deny_all',
                    'storage_instances': [
                        {
                            'name': storage_name,
                            'volumes': [
                                {
                                    'name': volume_name,
                                    'size': volume['size'],
                                    'replica_count': num_replicas,
                                    'snapshot_policies': [
                                    ]
                                }
                            ]
                        }
                    ]
                })
        self._issue_api_request(
            datc.URL_TEMPLATES['ai'](),
            'post',
            body=app_params,
            api_version='2.1',
            tenant=tenant)
        self._update_qos_2_1(volume, policies, tenant)

        metadata = {}
        volume_type = self._get_volume_type_obj(volume)
        if volume_type:
            metadata.update({datc.M_TYPE: volume_type['name']})
        metadata.update(self.HEADER_DATA)
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        self._store_metadata(url, metadata, "create_volume_2_1", tenant)
Exemple #12
0
 def _delete_volume_2(self, volume):
     self.detach_volume(None, volume)
     app_inst = datc._get_name(volume['id'])
     try:
         self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
             app_inst),
             method='delete',
             api_version='2')
     except exception.NotFound:
         LOG.info("Tried to delete volume %s, but it was not found in the "
                  "Datera cluster. Continuing with delete.",
                  datc._get_name(volume['id']))
Exemple #13
0
    def _initialize_connection_2_1(self, volume, connector):
        # Now online the app_instance (which will online all storage_instances)
        multipath = connector.get('multipath', False)
        tenant = self._create_tenant(volume)
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        data = {'admin_state': 'online'}
        app_inst = self._issue_api_request(url,
                                           method='put',
                                           body=data,
                                           api_version='2.1',
                                           tenant=tenant)['data']
        storage_instances = app_inst["storage_instances"]
        si = storage_instances[0]

        portal = si['access']['ips'][0] + ':3260'
        iqn = si['access']['iqn']
        if multipath:
            portals = [p + ':3260' for p in si['access']['ips']]
            iqns = [iqn for _ in si['access']['ips']]
            lunids = [self._get_lunid() for _ in si['access']['ips']]

            result = {
                'driver_volume_type': 'iscsi',
                'data': {
                    'target_discovered': False,
                    'target_iqn': iqn,
                    'target_iqns': iqns,
                    'target_portal': portal,
                    'target_portals': portals,
                    'target_lun': self._get_lunid(),
                    'target_luns': lunids,
                    'volume_id': volume['id'],
                    'discard': False
                }
            }
        else:
            result = {
                'driver_volume_type': 'iscsi',
                'data': {
                    'target_discovered': False,
                    'target_iqn': iqn,
                    'target_portal': portal,
                    'target_lun': self._get_lunid(),
                    'volume_id': volume['id'],
                    'discard': False
                }
            }

        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        self._store_metadata(url, {}, "initialize_connection_2_1", tenant)
        return result
 def _delete_volume_2(self, volume):
     self.detach_volume(None, volume)
     app_inst = datc._get_name(volume['id'])
     try:
         self._issue_api_request(
             datc.URL_TEMPLATES['ai_inst']().format(app_inst),
             method='delete',
             api_version='2')
     except exception.NotFound:
         msg = _LI("Tried to delete volume %s, but it was not found in the "
                   "Datera cluster. Continuing with delete.")
         LOG.info(msg, datc._get_name(volume['id']))
Exemple #15
0
 def _manage_existing_2(self, volume, existing_ref):
     existing_ref = existing_ref['source-name']
     if existing_ref.count(":") != 2:
         raise exception.ManageExistingInvalidReference(
             _("existing_ref argument must be of this format:"
               "app_inst_name:storage_inst_name:vol_name"))
     app_inst_name = existing_ref.split(":")[0]
     LOG.debug("Managing existing Datera volume %s.  "
               "Changing name to %s",
               datc._get_name(volume['id']),
               existing_ref)
     data = {'name': datc._get_name(volume['id'])}
     self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
         app_inst_name), method='put', body=data, api_version='2')
    def _create_volume_2_1(self, volume):
        tenant = self._create_tenant(volume)
        policies = self._get_policies_for_resource(volume)
        num_replicas = int(policies['replica_count'])
        storage_name = policies['default_storage_name']
        volume_name = policies['default_volume_name']
        template = policies['template']
        placement = policies['placement_mode']

        if template:
            app_params = (
                {
                    'create_mode': "openstack",
                    # 'uuid': str(volume['id']),
                    'name': datc._get_name(volume['id']),
                    'app_template': '/app_templates/{}'.format(template)
                })

        else:

            app_params = (
                {
                    'create_mode': "openstack",
                    'uuid': str(volume['id']),
                    'name': datc._get_name(volume['id']),
                    'access_control_mode': 'deny_all',
                    'storage_instances': [
                        {
                            'name': storage_name,
                            'volumes': [
                                {
                                    'name': volume_name,
                                    'size': volume['size'],
                                    'placement_mode': placement,
                                    'replica_count': num_replicas,
                                    'snapshot_policies': [
                                    ]
                                }
                            ]
                        }
                    ]
                })
        self._issue_api_request(
            datc.URL_TEMPLATES['ai'](),
            'post',
            body=app_params,
            api_version='2.1',
            tenant=tenant)
        self._update_qos_2_1(volume, policies, tenant)
Exemple #17
0
 def _update_qos_2_1(self, resource, policies, tenant):
     url = datc.URL_TEMPLATES['vol_inst'](
         policies['default_storage_name'],
         policies['default_volume_name']) + '/performance_policy'
     url = url.format(datc._get_name(resource['id']))
     type_id = resource.get('volume_type_id', None)
     if type_id is not None:
         # Filter for just QOS policies in result. All of their keys
         # should end with "max"
         fpolicies = {
             k: int(v)
             for k, v in policies.items() if k.endswith("max")
         }
         # Filter all 0 values from being passed
         fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items()))
         if fpolicies:
             self._issue_api_request(url,
                                     'delete',
                                     api_version='2.1',
                                     tenant=tenant)
             self._issue_api_request(url,
                                     'post',
                                     body=fpolicies,
                                     api_version='2.1',
                                     tenant=tenant)
    def _clean_acl_2_1(self, volume, tenant):
        policies = self._get_policies_for_resource(volume)

        store_name, _ = self._scrape_template(policies)

        acl_url = (datc.URL_TEMPLATES["si_inst"](
            store_name) + "/acl_policy").format(datc._get_name(volume['id']))
        try:
            initiator_group = self._issue_api_request(
                acl_url, api_version='2.1', tenant=tenant)['data'][
                    'initiator_groups'][0]['path']
            initiator_iqn_path = self._issue_api_request(
                initiator_group.lstrip("/"), api_version='2.1', tenant=tenant)[
                    "data"]["members"][0]["path"]
            # Clear out ACL and delete initiator group
            self._issue_api_request(acl_url,
                                    method="put",
                                    body={'initiator_groups': []},
                                    api_version='2.1',
                                    tenant=tenant)
            self._issue_api_request(initiator_group.lstrip("/"),
                                    method="delete",
                                    api_version='2.1',
                                    tenant=tenant)
            if not self._check_for_acl_2(initiator_iqn_path):
                self._issue_api_request(initiator_iqn_path.lstrip("/"),
                                        method="delete",
                                        api_version='2.1',
                                        tenant=tenant)
        except (IndexError, exception.NotFound):
            LOG.debug("Did not find any initiator groups for volume: %s",
                      volume)
Exemple #19
0
    def _clean_acl_2(self, volume):
        policies = self._get_policies_for_resource(volume)

        store_name, _ = self._scrape_template(policies)

        acl_url = (datc.URL_TEMPLATES["si_inst"](
            store_name) + "/acl_policy").format(datc._get_name(volume['id']))
        try:
            initiator_group = self._issue_api_request(
                acl_url, api_version='2')['initiator_groups'][0]
            initiator_iqn_path = self._issue_api_request(
                initiator_group.lstrip("/"))["members"][0]
            # Clear out ACL and delete initiator group
            self._issue_api_request(acl_url,
                                    method="put",
                                    body={'initiator_groups': []},
                                    api_version='2')
            self._issue_api_request(initiator_group.lstrip("/"),
                                    method="delete",
                                    api_version='2')
            if not self._check_for_acl_2(initiator_iqn_path):
                self._issue_api_request(initiator_iqn_path.lstrip("/"),
                                        method="delete",
                                        api_version='2')
        except (IndexError, exception.NotFound):
            LOG.debug("Did not find any initiator groups for volume: %s",
                      volume)
    def _get_size_2_1(self, volume, tenant=None, app_inst=None, si_name=None,
                      vol_name=None):
        """Helper method for getting the size of a backend object

        If app_inst is provided, we'll just parse the dict to get
        the size instead of making a separate http request
        """
        policies = self._get_policies_for_resource(volume)
        si_name = si_name if si_name else policies['default_storage_name']
        vol_name = vol_name if vol_name else policies['default_volume_name']
        if not app_inst:
            vol_url = datc.URL_TEMPLATES['ai_inst']().format(
                datc._get_name(volume['id']))
            app_inst = self._issue_api_request(
                vol_url, api_version='2.1', tenant=tenant)['data']
        if 'data' in app_inst:
            app_inst = app_inst['data']
        sis = app_inst['storage_instances']
        found_si = None
        for si in sis:
            if si['name'] == si_name:
                found_si = si
                break
        found_vol = None
        for vol in found_si['volumes']:
            if vol['name'] == vol_name:
                found_vol = vol
        size = found_vol['size']
        return size
    def _get_size_2_1(self, volume, tenant=None, app_inst=None, si_name=None,
                      vol_name=None):
        """Helper method for getting the size of a backend object

        If app_inst is provided, we'll just parse the dict to get
        the size instead of making a separate http request
        """
        policies = self._get_policies_for_resource(volume)
        si_name = si_name if si_name else policies['default_storage_name']
        vol_name = vol_name if vol_name else policies['default_volume_name']
        if not app_inst:
            vol_url = datc.URL_TEMPLATES['ai_inst']().format(
                datc._get_name(volume['id']))
            app_inst = self._issue_api_request(
                vol_url, api_version='2.1', tenant=tenant)['data']
        if 'data' in app_inst:
            app_inst = app_inst['data']
        sis = app_inst['storage_instances']
        found_si = None
        for si in sis:
            if si['name'] == si_name:
                found_si = si
                break
        found_vol = None
        for vol in found_si['volumes']:
            if vol['name'] == vol_name:
                found_vol = vol
        size = found_vol['size']
        return size
Exemple #22
0
 def _manage_existing_2(self, volume, existing_ref):
     existing_ref = existing_ref['source-name']
     if existing_ref.count(":") != 2:
         raise exception.ManageExistingInvalidReference(
             _("existing_ref argument must be of this format:"
               "app_inst_name:storage_inst_name:vol_name"))
     app_inst_name = existing_ref.split(":")[0]
     LOG.debug(
         "Managing existing Datera volume %s.  "
         "Changing name to %s", datc._get_name(volume['id']), existing_ref)
     data = {'name': datc._get_name(volume['id'])}
     self._issue_api_request(
         datc.URL_TEMPLATES['ai_inst']().format(app_inst_name),
         method='put',
         body=data,
         api_version='2')
Exemple #23
0
    def _create_volume_2(self, volume):
        # Generate App Instance, Storage Instance and Volume
        # Volume ID will be used as the App Instance Name
        # Storage Instance and Volumes will have standard names
        policies = self._get_policies_for_resource(volume)
        num_replicas = int(policies['replica_count'])
        storage_name = policies['default_storage_name']
        volume_name = policies['default_volume_name']
        template = policies['template']

        if template:
            app_params = (
                {
                    'create_mode': "openstack",
                    # 'uuid': str(volume['id']),
                    'name': datc._get_name(volume['id']),
                    'app_template': '/app_templates/{}'.format(template)
                })
        else:

            app_params = (
                {
                    'create_mode': "openstack",
                    'uuid': str(volume['id']),
                    'name': datc._get_name(volume['id']),
                    'access_control_mode': 'deny_all',
                    'storage_instances': {
                        storage_name: {
                            'name': storage_name,
                            'volumes': {
                                volume_name: {
                                    'name': volume_name,
                                    'size': volume['size'],
                                    'replica_count': num_replicas,
                                    'snapshot_policies': {
                                    }
                                }
                            }
                        }
                    }
                })
        self._issue_api_request(
            datc.URL_TEMPLATES['ai'](),
            'post',
            body=app_params,
            api_version='2')
        self._update_qos(volume, policies)
Exemple #24
0
    def _create_volume_2_1(self, volume):
        tenant = self._create_tenant(volume)
        policies = self._get_policies_for_resource(volume)
        num_replicas = int(policies['replica_count'])
        storage_name = policies['default_storage_name']
        volume_name = policies['default_volume_name']
        template = policies['template']
        placement = policies['placement_mode']

        if template:
            app_params = ({
                'create_mode': "openstack",
                # 'uuid': str(volume['id']),
                'name': datc._get_name(volume['id']),
                'app_template': '/app_templates/{}'.format(template)
            })

        else:

            app_params = ({
                'create_mode':
                "openstack",
                'uuid':
                str(volume['id']),
                'name':
                datc._get_name(volume['id']),
                'access_control_mode':
                'deny_all',
                'storage_instances': [{
                    'name':
                    storage_name,
                    'volumes': [{
                        'name': volume_name,
                        'size': volume['size'],
                        'placement_mode': placement,
                        'replica_count': num_replicas,
                        'snapshot_policies': []
                    }]
                }]
            })
        self._issue_api_request(datc.URL_TEMPLATES['ai'](),
                                'post',
                                body=app_params,
                                api_version='2.1',
                                tenant=tenant)
        self._update_qos_2_1(volume, policies, tenant)
Exemple #25
0
 def _unmanage_2(self, volume):
     LOG.debug("Unmanaging Cinder volume %s.  Changing name to %s",
               volume['id'], datc._get_unmanaged(volume['id']))
     data = {'name': datc._get_unmanaged(volume['id'])}
     self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
         datc._get_name(volume['id'])),
                             method='put',
                             body=data,
                             api_version='2')
Exemple #26
0
 def _unmanage_2(self, volume):
     LOG.debug("Unmanaging Cinder volume %s.  Changing name to %s",
               volume['id'], datc._get_unmanaged(volume['id']))
     data = {'name': datc._get_unmanaged(volume['id'])}
     self._issue_api_request(datc.URL_TEMPLATES['ai_inst']().format(
         datc._get_name(volume['id'])),
         method='put',
         body=data,
         api_version='2')
Exemple #27
0
    def _create_cloned_volume_2(self, volume, src_vref):
        policies = self._get_policies_for_resource(volume)

        store_name, vol_name = self._scrape_template(policies)

        src = "/" + datc.URL_TEMPLATES['vol_inst'](
            store_name, vol_name).format(datc._get_name(src_vref['id']))
        data = {
            'create_mode': 'openstack',
            'name': datc._get_name(volume['id']),
            'uuid': str(volume['id']),
            'clone_src': src,
        }
        self._issue_api_request(
            datc.URL_TEMPLATES['ai'](), 'post', body=data, api_version='2')

        if volume['size'] > src_vref['size']:
            self._extend_volume_2(volume, volume['size'])
Exemple #28
0
    def _create_volume_2(self, volume):
        # Generate App Instance, Storage Instance and Volume
        # Volume ID will be used as the App Instance Name
        # Storage Instance and Volumes will have standard names
        policies = self._get_policies_for_resource(volume)
        num_replicas = int(policies['replica_count'])
        storage_name = policies['default_storage_name']
        volume_name = policies['default_volume_name']
        template = policies['template']

        if template:
            app_params = ({
                'create_mode': "openstack",
                # 'uuid': str(volume['id']),
                'name': datc._get_name(volume['id']),
                'app_template': '/app_templates/{}'.format(template)
            })
        else:

            app_params = ({
                'create_mode': "openstack",
                'uuid': str(volume['id']),
                'name': datc._get_name(volume['id']),
                'access_control_mode': 'deny_all',
                'storage_instances': {
                    storage_name: {
                        'name': storage_name,
                        'volumes': {
                            volume_name: {
                                'name': volume_name,
                                'size': volume['size'],
                                'replica_count': num_replicas,
                                'snapshot_policies': {}
                            }
                        }
                    }
                }
            })
        self._issue_api_request(datc.URL_TEMPLATES['ai'](),
                                'post',
                                body=app_params,
                                api_version='2')
        self._update_qos(volume, policies)
Exemple #29
0
    def _create_volume_from_snapshot_2_1(self, volume, snapshot):
        tenant = self._create_tenant(volume)
        policies = self._get_policies_for_resource(snapshot)

        store_name, vol_name = self._scrape_template(policies)

        snap_temp = datc.URL_TEMPLATES['vol_inst'](store_name,
                                                   vol_name) + '/snapshots'
        snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
        snapshots = self._issue_api_request(snapu,
                                            method='get',
                                            api_version='2.1',
                                            tenant=tenant)

        for snap in snapshots['data']:
            if snap['uuid'] == snapshot['id']:
                found_ts = snap['utc_ts']
                break
        else:
            raise exception.NotFound

        snap_url = (snap_temp + '/{}').format(
            datc._get_name(snapshot['volume_id']), found_ts)

        self._snap_poll_2_1(snap_url, tenant)

        src = "/" + snap_url
        app_params = ({
            'create_mode': 'openstack',
            'uuid': str(volume['id']),
            'name': datc._get_name(volume['id']),
            'clone_snapshot_src': {
                'path': src
            },
        })
        self._issue_api_request(datc.URL_TEMPLATES['ai'](),
                                method='post',
                                body=app_params,
                                api_version='2.1',
                                tenant=tenant)

        if (volume['size'] > snapshot['volume_size']):
            self._extend_volume_2_1(volume, volume['size'])
Exemple #30
0
    def _create_cloned_volume_2(self, volume, src_vref):
        policies = self._get_policies_for_resource(volume)

        store_name, vol_name = self._scrape_template(policies)

        src = "/" + datc.URL_TEMPLATES['vol_inst'](
            store_name, vol_name).format(datc._get_name(src_vref['id']))
        data = {
            'create_mode': 'openstack',
            'name': datc._get_name(volume['id']),
            'uuid': str(volume['id']),
            'clone_src': src,
        }
        self._issue_api_request(datc.URL_TEMPLATES['ai'](),
                                'post',
                                body=data,
                                api_version='2')

        if volume['size'] > src_vref['size']:
            self._extend_volume_2(volume, volume['size'])
Exemple #31
0
    def _initialize_connection_2(self, volume, connector):
        # Now online the app_instance (which will online all storage_instances)
        multipath = connector.get('multipath', False)
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        data = {'admin_state': 'online'}
        app_inst = self._issue_api_request(url,
                                           method='put',
                                           body=data,
                                           api_version=API_VERSION)
        storage_instances = app_inst["storage_instances"]
        si_names = list(storage_instances.keys())

        portal = storage_instances[si_names[0]]['access']['ips'][0] + ':3260'
        iqn = storage_instances[si_names[0]]['access']['iqn']
        if multipath:
            portals = [
                p + ':3260'
                for p in storage_instances[si_names[0]]['access']['ips']
            ]
            iqns = [
                iqn for _ in storage_instances[si_names[0]]['access']['ips']
            ]
            lunids = [
                self._get_lunid()
                for _ in storage_instances[si_names[0]]['access']['ips']
            ]

            return {
                'driver_volume_type': 'iscsi',
                'data': {
                    'target_discovered': False,
                    'target_iqn': iqn,
                    'target_iqns': iqns,
                    'target_portal': portal,
                    'target_portals': portals,
                    'target_lun': self._get_lunid(),
                    'target_luns': lunids,
                    'volume_id': volume['id'],
                    'discard': False
                }
            }
        else:
            return {
                'driver_volume_type': 'iscsi',
                'data': {
                    'target_discovered': False,
                    'target_iqn': iqn,
                    'target_portal': portal,
                    'target_lun': self._get_lunid(),
                    'volume_id': volume['id'],
                    'discard': False
                }
            }
    def _create_volume_from_snapshot_2_1(self, volume, snapshot):
        tenant = self._create_tenant(volume)
        policies = self._get_policies_for_resource(snapshot)

        store_name, vol_name = self._scrape_template(policies)

        snap_temp = datc.URL_TEMPLATES['vol_inst'](
            store_name, vol_name) + '/snapshots'
        snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
        snapshots = self._issue_api_request(
            snapu, method='get', api_version='2.1', tenant=tenant)

        for snap in snapshots['data']:
            if snap['uuid'] == snapshot['id']:
                found_ts = snap['utc_ts']
                break
        else:
            raise exception.NotFound

        snap_url = (snap_temp + '/{}').format(
            datc._get_name(snapshot['volume_id']), found_ts)

        self._snap_poll_2_1(snap_url, tenant)

        src = "/" + snap_url
        app_params = (
            {
                'create_mode': 'openstack',
                'uuid': str(volume['id']),
                'name': datc._get_name(volume['id']),
                'clone_snapshot_src': {'path': src},
            })
        self._issue_api_request(
            datc.URL_TEMPLATES['ai'](),
            method='post',
            body=app_params,
            api_version='2.1',
            tenant=tenant)

        if (volume['size'] > snapshot['volume_size']):
            self._extend_volume_2_1(volume, volume['size'])
    def _delete_snapshot_2_1(self, snapshot):
        tenant = self._create_tenant(snapshot)
        policies = self._get_policies_for_resource(snapshot)

        store_name, vol_name = self._scrape_template(policies)

        snap_temp = datc.URL_TEMPLATES['vol_inst'](
            store_name, vol_name) + '/snapshots'
        snapu = snap_temp.format(datc._get_name(snapshot['volume_id']))
        snapshots = []
        try:
            snapshots = self._issue_api_request(snapu,
                                                method='get',
                                                api_version='2.1',
                                                tenant=tenant)
        except exception.NotFound:
            msg = ("Tried to delete snapshot %s, but parent volume %s was "
                   "not found in Datera cluster. Continuing with delete.")
            LOG.info(msg,
                     datc._get_name(snapshot['id']),
                     datc._get_name(snapshot['volume_id']))
            return

        try:
            for snap in snapshots['data']:
                if snap['uuid'] == snapshot['id']:
                    url_template = snapu + '/{}'
                    url = url_template.format(snap['timestamp'])
                    self._issue_api_request(
                        url,
                        method='delete',
                        api_version='2.1',
                        tenant=tenant)
                    break
            else:
                raise exception.NotFound
        except exception.NotFound:
            msg = ("Tried to delete snapshot %s, but was not found in "
                   "Datera cluster. Continuing with delete.")
            LOG.info(msg, datc._get_name(snapshot['id']))
    def _initialize_connection_2_1(self, volume, connector):
        # Now online the app_instance (which will online all storage_instances)
        multipath = connector.get('multipath', False)
        tenant = self._create_tenant(volume)
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        data = {
            'admin_state': 'online'
        }
        app_inst = self._issue_api_request(
            url, method='put', body=data, api_version='2.1', tenant=tenant)[
            'data']
        storage_instances = app_inst["storage_instances"]
        si = storage_instances[0]

        # randomize portal chosen
        choice = 0
        policies = self._get_policies_for_resource(volume)
        if policies["round_robin"]:
            choice = random.randint(0, 1)
        portal = si['access']['ips'][choice] + ':3260'
        iqn = si['access']['iqn']
        if multipath:
            portals = [p + ':3260' for p in si['access']['ips']]
            iqns = [iqn for _ in si['access']['ips']]
            lunids = [self._get_lunid() for _ in si['access']['ips']]

            result = {
                'driver_volume_type': 'iscsi',
                'data': {
                    'target_discovered': False,
                    'target_iqn': iqn,
                    'target_iqns': iqns,
                    'target_portal': portal,
                    'target_portals': portals,
                    'target_lun': self._get_lunid(),
                    'target_luns': lunids,
                    'volume_id': volume['id'],
                    'discard': False}}
        else:
            result = {
                'driver_volume_type': 'iscsi',
                'data': {
                    'target_discovered': False,
                    'target_iqn': iqn,
                    'target_portal': portal,
                    'target_lun': self._get_lunid(),
                    'volume_id': volume['id'],
                    'discard': False}}

        return result
Exemple #35
0
 def _detach_volume_2(self, context, volume, attachment=None):
     url = datc.URL_TEMPLATES['ai_inst']().format(
         datc._get_name(volume['id']))
     data = {'admin_state': 'offline', 'force': True}
     try:
         self._issue_api_request(url,
                                 method='put',
                                 body=data,
                                 api_version='2')
     except exception.NotFound:
         msg = ("Tried to detach volume %s, but it was not found in the "
                "Datera cluster. Continuing with detach.")
         LOG.info(msg, volume['id'])
     # TODO(_alastor_): Make acl cleaning multi-attach aware
     self._clean_acl_2(volume)
Exemple #36
0
    def _extend_volume_2(self, volume, new_size):
        # Current product limitation:
        # If app_instance is bound to template resizing is not possible
        # Once policies are implemented in the product this can go away
        policies = self._get_policies_for_resource(volume)
        template = policies['template']
        if template:
            LOG.warning("Volume size not extended due to template binding:"
                        " volume: %(volume)s, template: %(template)s",
                        volume=volume, template=template)
            return

        # Offline App Instance, if necessary
        reonline = False
        app_inst = self._issue_api_request(
            datc.URL_TEMPLATES['ai_inst']().format(
                datc._get_name(volume['id'])),
            api_version='2')
        if app_inst['admin_state'] == 'online':
            reonline = True
            self._detach_volume_2(None, volume)
        # Change Volume Size
        app_inst = datc._get_name(volume['id'])
        data = {
            'size': new_size
        }
        store_name, vol_name = self._scrape_template(policies)
        self._issue_api_request(
            datc.URL_TEMPLATES['vol_inst'](
                store_name, vol_name).format(app_inst),
            method='put',
            body=data,
            api_version='2')
        # Online Volume, if it was online before
        if reonline:
            self._create_export_2(None, volume, None)
Exemple #37
0
    def _create_snapshot_2(self, snapshot):
        policies = self._get_policies_for_resource(snapshot)

        store_name, vol_name = self._scrape_template(policies)

        url_template = datc.URL_TEMPLATES['vol_inst'](
            store_name, vol_name) + '/snapshots'
        url = url_template.format(datc._get_name(snapshot['volume_id']))

        snap_params = {
            'uuid': snapshot['id'],
        }
        snap = self._issue_api_request(url, method='post', body=snap_params,
                                       api_version='2')
        snapu = "/".join((url, snap['timestamp']))
        self._snap_poll(snapu)
 def _update_qos(self, resource, policies):
     url = datc.URL_TEMPLATES['vol_inst'](
         policies['default_storage_name'],
         policies['default_volume_name']) + '/performance_policy'
     url = url.format(datc._get_name(resource['id']))
     type_id = resource.get('volume_type_id', None)
     if type_id is not None:
         # Filter for just QOS policies in result. All of their keys
         # should end with "max"
         fpolicies = {k: int(v) for k, v in
                      policies.items() if k.endswith("max")}
         # Filter all 0 values from being passed
         fpolicies = dict(filter(lambda _v: _v[1] > 0, fpolicies.items()))
         if fpolicies:
             self._issue_api_request(url, 'post', body=fpolicies,
                                     api_version='2')
Exemple #39
0
    def _get_size_2(self, volume, app_inst=None, si_name=None, vol_name=None):
        """Helper method for getting the size of a backend object

        If app_inst is provided, we'll just parse the dict to get
        the size instead of making a separate http request
        """
        policies = self._get_policies_for_resource(volume)
        si_name = si_name if si_name else policies['default_storage_name']
        vol_name = vol_name if vol_name else policies['default_volume_name']
        if not app_inst:
            vol_url = datc.URL_TEMPLATES['ai_inst']().format(
                datc._get_name(volume['id']))
            app_inst = self._issue_api_request(vol_url)
        size = app_inst['storage_instances'][si_name]['volumes'][vol_name][
            'size']
        return size
Exemple #40
0
 def _detach_volume_2(self, context, volume, attachment=None):
     url = datc.URL_TEMPLATES['ai_inst']().format(
         datc._get_name(volume['id']))
     data = {
         'admin_state': 'offline',
         'force': True
     }
     try:
         self._issue_api_request(url, method='put', body=data,
                                 api_version='2')
     except exception.NotFound:
         msg = ("Tried to detach volume %s, but it was not found in the "
                "Datera cluster. Continuing with detach.")
         LOG.info(msg, volume['id'])
     # TODO(_alastor_): Make acl cleaning multi-attach aware
     self._clean_acl_2(volume)
Exemple #41
0
    def _get_size_2(self, volume, app_inst=None, si_name=None, vol_name=None):
        """Helper method for getting the size of a backend object

        If app_inst is provided, we'll just parse the dict to get
        the size instead of making a separate http request
        """
        policies = self._get_policies_for_resource(volume)
        si_name = si_name if si_name else policies['default_storage_name']
        vol_name = vol_name if vol_name else policies['default_volume_name']
        if not app_inst:
            vol_url = datc.URL_TEMPLATES['ai_inst']().format(
                datc._get_name(volume['id']))
            app_inst = self._issue_api_request(vol_url)
        size = app_inst[
            'storage_instances'][si_name]['volumes'][vol_name]['size']
        return size
Exemple #42
0
    def _retype_2_1(self, ctxt, volume, new_type, diff, host):
        LOG.debug(
            "Retype called\n"
            "Volume: %(volume)s\n"
            "NewType: %(new_type)s\n"
            "Diff: %(diff)s\n"
            "Host: %(host)s\n", {
                'volume': volume,
                'new_type': new_type,
                'diff': diff,
                'host': host
            })
        # We'll take the fast route only if the types share the same backend
        # And that backend matches this driver
        old_pol = self._get_policies_for_resource(volume)
        new_pol = self._get_policies_for_volume_type(new_type)
        if (host['capabilities']['vendor_name'].lower() ==
                self.backend_name.lower()):
            LOG.debug("Starting fast volume retype")

            if old_pol.get('template') or new_pol.get('template'):
                LOG.warning(
                    "Fast retyping between template-backed volume-types "
                    "unsupported.  Type1: %s, Type2: %s",
                    volume['volume_type_id'], new_type)

            tenant = self._create_tenant(volume)
            self._update_qos_2_1(volume, new_pol, tenant)
            vol_params = ({
                'placement_mode': new_pol['placement_mode'],
                'replica_count': new_pol['replica_count'],
            })
            url = datc.URL_TEMPLATES['vol_inst'](
                old_pol['default_storage_name'],
                old_pol['default_volume_name']).format(
                    datc._get_name(volume['id']))
            self._issue_api_request(url,
                                    method='put',
                                    body=vol_params,
                                    api_version='2.1',
                                    tenant=tenant)
            return True

        else:
            LOG.debug("Couldn't fast-retype volume between specified types")
            return False
    def _create_snapshot_2_1(self, snapshot):
        tenant = self._create_tenant(snapshot)
        policies = self._get_policies_for_resource(snapshot)

        store_name, vol_name = self._scrape_template(policies)

        url_template = datc.URL_TEMPLATES['vol_inst'](
            store_name, vol_name) + '/snapshots'
        url = url_template.format(datc._get_name(snapshot['volume_id']))

        snap_params = {
            'uuid': snapshot['id'],
        }
        snap = self._issue_api_request(url, method='post', body=snap_params,
                                       api_version='2.1', tenant=tenant)
        snapu = "/".join((url, snap['data']['timestamp']))
        self._snap_poll_2_1(snapu, tenant)
Exemple #44
0
    def _initialize_connection_2(self, volume, connector):
        # Now online the app_instance (which will online all storage_instances)
        multipath = connector.get('multipath', False)
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        data = {
            'admin_state': 'online'
        }
        app_inst = self._issue_api_request(
            url, method='put', body=data, api_version='2')
        storage_instances = app_inst["storage_instances"]
        si_names = list(storage_instances.keys())

        portal = storage_instances[si_names[0]]['access']['ips'][0] + ':3260'
        iqn = storage_instances[si_names[0]]['access']['iqn']
        if multipath:
            portals = [p + ':3260' for p in
                       storage_instances[si_names[0]]['access']['ips']]
            iqns = [iqn for _ in
                    storage_instances[si_names[0]]['access']['ips']]
            lunids = [self._get_lunid() for _ in
                      storage_instances[si_names[0]]['access']['ips']]

            return {
                'driver_volume_type': 'iscsi',
                'data': {
                    'target_discovered': False,
                    'target_iqn': iqn,
                    'target_iqns': iqns,
                    'target_portal': portal,
                    'target_portals': portals,
                    'target_lun': self._get_lunid(),
                    'target_luns': lunids,
                    'volume_id': volume['id'],
                    'discard': False}}
        else:
            return {
                'driver_volume_type': 'iscsi',
                'data': {
                    'target_discovered': False,
                    'target_iqn': iqn,
                    'target_portal': portal,
                    'target_lun': self._get_lunid(),
                    'volume_id': volume['id'],
                    'discard': False}}
Exemple #45
0
 def _si_poll(self, volume, policies):
     # Initial 4 second sleep required for some Datera versions
     eventlet.sleep(datc.DEFAULT_SI_SLEEP_API_2)
     TIMEOUT = 10
     retry = 0
     check_url = datc.URL_TEMPLATES['si_inst'](
         policies['default_storage_name']).format(
             datc._get_name(volume['id']))
     poll = True
     while poll and retry < TIMEOUT:
         retry += 1
         si = self._issue_api_request(check_url, api_version='2')
         if si['op_state'] == 'available':
             poll = False
         else:
             eventlet.sleep(1)
     if retry >= TIMEOUT:
         raise exception.VolumeDriverException(
             message=_('Resource not ready.'))
Exemple #46
0
 def _si_poll(self, volume, policies):
     # Initial 4 second sleep required for some Datera versions
     eventlet.sleep(datc.DEFAULT_SI_SLEEP_API_2)
     TIMEOUT = 10
     retry = 0
     check_url = datc.URL_TEMPLATES['si_inst'](
         policies['default_storage_name']).format(
             datc._get_name(volume['id']))
     poll = True
     while poll and retry < TIMEOUT:
         retry += 1
         si = self._issue_api_request(check_url, api_version='2')
         if si['op_state'] == 'available':
             poll = False
         else:
             eventlet.sleep(1)
     if retry >= TIMEOUT:
         raise exception.VolumeDriverException(
             message=_('Resource not ready.'))
    def _create_tenant(self, volume=None):
        # Create the Datera tenant if specified in the config
        # Otherwise use the tenant provided
        if self.tenant_id is None:
            tenant = None
        elif self.tenant_id.lower() == "map" and volume:
            # Convert dashless uuid to uuid with dashes
            # Eg: 0e33e95a9b154d348c675a1d8ea5b651 -->
            #       0e33e95a-9b15-4d34-8c67-5a1d8ea5b651
            tenant = datc._get_name(str(uuid.UUID(volume["project_id"])))
        elif self.tenant_id.lower() == "map" and not volume:
            tenant = None
        else:
            tenant = self.tenant_id

        if tenant:
            params = {'name': tenant}
            self._issue_api_request(
                'tenants', method='post', body=params, conflict_ok=True,
                api_version='2.1')
        return tenant
    def _create_tenant(self, volume=None):
        # Create the Datera tenant if specified in the config
        # Otherwise use the tenant provided
        if self.tenant_id is None:
            tenant = None
        elif self.tenant_id.lower() == "map" and volume:
            # Convert dashless uuid to uuid with dashes
            # Eg: 0e33e95a9b154d348c675a1d8ea5b651 -->
            #       0e33e95a-9b15-4d34-8c67-5a1d8ea5b651
            tenant = datc._get_name(str(uuid.UUID(volume["project_id"])))
        elif self.tenant_id.lower() == "map" and not volume:
            tenant = None
        else:
            tenant = self.tenant_id

        if tenant:
            params = {'name': tenant}
            self._issue_api_request(
                'tenants', method='post', body=params, conflict_ok=True,
                api_version='2.1')
        return tenant
    def _retype_2_1(self, ctxt, volume, new_type, diff, host):
        LOG.debug("Retype called\n"
                  "Volume: %(volume)s\n"
                  "NewType: %(new_type)s\n"
                  "Diff: %(diff)s\n"
                  "Host: %(host)s\n", {'volume': volume, 'new_type': new_type,
                                       'diff': diff, 'host': host})
        # We'll take the fast route only if the types share the same backend
        # And that backend matches this driver
        old_pol = self._get_policies_for_resource(volume)
        new_pol = self._get_policies_for_volume_type(new_type)
        if (host['capabilities']['vendor_name'].lower() ==
                self.backend_name.lower()):
            LOG.debug("Starting fast volume retype")

            if old_pol.get('template') or new_pol.get('template'):
                LOG.warning(
                    "Fast retyping between template-backed volume-types "
                    "unsupported.  Type1: %s, Type2: %s",
                    volume['volume_type_id'], new_type)

            tenant = self._create_tenant(volume)
            self._update_qos_2_1(volume, new_pol, tenant)
            vol_params = (
                {
                    'placement_mode': new_pol['placement_mode'],
                    'replica_count': new_pol['replica_count'],
                })
            url = datc.URL_TEMPLATES['vol_inst'](
                old_pol['default_storage_name'],
                old_pol['default_volume_name']).format(
                    datc._get_name(volume['id']))
            self._issue_api_request(url, method='put', body=vol_params,
                                    api_version='2.1', tenant=tenant)
            return True

        else:
            LOG.debug("Couldn't fast-retype volume between specified types")
            return False
    def _clean_acl_2_1(self, volume, tenant):
        policies = self._get_policies_for_resource(volume)

        store_name, _ = self._scrape_template(policies)

        acl_url = (datc.URL_TEMPLATES["si_inst"](
            store_name) + "/acl_policy").format(datc._get_name(volume['id']))
        try:
            initiator_group = self._issue_api_request(
                acl_url, api_version='2.1', tenant=tenant)['data'][
                    'initiator_groups'][0]['path']
            # TODO(_alastor_): Re-enable this when we get a force-delete
            # option on the /initiators endpoint
            # initiator_iqn_path = self._issue_api_request(
            #     initiator_group.lstrip("/"), api_version='2.1',
            #     tenant=tenant)[
            #         "data"]["members"][0]["path"]
            # Clear out ACL and delete initiator group
            self._issue_api_request(acl_url,
                                    method="put",
                                    body={'initiator_groups': []},
                                    api_version='2.1',
                                    tenant=tenant)
            self._issue_api_request(initiator_group.lstrip("/"),
                                    method="delete",
                                    api_version='2.1',
                                    tenant=tenant)
            # TODO(_alastor_): Re-enable this when we get a force-delete
            # option on the /initiators endpoint
            # if not self._check_for_acl_2_1(initiator_iqn_path):
            #     self._issue_api_request(initiator_iqn_path.lstrip("/"),
            #                             method="delete",
            #                             api_version='2.1',
            #                             tenant=tenant)
        except (IndexError, exception.NotFound):
            LOG.debug("Did not find any initiator groups for volume: %s",
                      volume)
Exemple #51
0
    def _create_export_2(self, context, volume, connector):
        # Online volume in case it hasn't been already
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        data = {
            'admin_state': 'online'
        }
        self._issue_api_request(url, method='put', body=data, api_version='2')
        # Check if we've already setup everything for this volume
        url = (datc.URL_TEMPLATES['si']().format(datc._get_name(volume['id'])))
        storage_instances = self._issue_api_request(url, api_version='2')
        # Handle adding initiator to product if necessary
        # Then add initiator to ACL
        policies = self._get_policies_for_resource(volume)

        store_name, _ = self._scrape_template(policies)

        if (connector and
                connector.get('initiator') and
                not policies['acl_allow_all']):
            initiator_name = "OpenStack_{}_{}".format(
                self.driver_prefix, str(uuid.uuid4())[:4])
            initiator_group = datc.INITIATOR_GROUP_PREFIX + volume['id']
            found = False
            initiator = connector['initiator']
            current_initiators = self._issue_api_request(
                'initiators', api_version='2')
            for iqn, values in current_initiators.items():
                if initiator == iqn:
                    found = True
                    break
            # If we didn't find a matching initiator, create one
            if not found:
                data = {'id': initiator, 'name': initiator_name}
                # Try and create the initiator
                # If we get a conflict, ignore it because race conditions
                self._issue_api_request("initiators",
                                        method="post",
                                        body=data,
                                        conflict_ok=True,
                                        api_version='2')
            # Create initiator group with initiator in it
            initiator_path = "/initiators/{}".format(initiator)
            initiator_group_path = "/initiator_groups/{}".format(
                initiator_group)
            ig_data = {'name': initiator_group, 'members': [initiator_path]}
            self._issue_api_request("initiator_groups",
                                    method="post",
                                    body=ig_data,
                                    conflict_ok=True,
                                    api_version='2')
            # Create ACL with initiator group as reference for each
            # storage_instance in app_instance
            # TODO(_alastor_): We need to avoid changing the ACLs if the
            # template already specifies an ACL policy.
            for si_name in storage_instances.keys():
                acl_url = (datc.URL_TEMPLATES['si']() +
                           "/{}/acl_policy").format(
                    datc._get_name(volume['id']), si_name)
                existing_acl = self._issue_api_request(acl_url,
                                                       method="get",
                                                       api_version='2')
                data = {}
                data['initiators'] = existing_acl['initiators']
                data['initiator_groups'] = existing_acl['initiator_groups']
                data['initiator_groups'].append(initiator_group_path)
                self._issue_api_request(acl_url,
                                        method="put",
                                        body=data,
                                        api_version='2')

        if connector and connector.get('ip'):
            try:
                # Case where volume_type has non default IP Pool info
                if policies['ip_pool'] != 'default':
                    initiator_ip_pool_path = self._issue_api_request(
                        "access_network_ip_pools/{}".format(
                            policies['ip_pool']), api_version='2')['path']
                # Fallback to trying reasonable IP based guess
                else:
                    initiator_ip_pool_path = self._get_ip_pool_for_string_ip(
                        connector['ip'])

                ip_pool_url = datc.URL_TEMPLATES['si_inst'](
                    store_name).format(datc._get_name(volume['id']))
                ip_pool_data = {'ip_pool': initiator_ip_pool_path}
                self._issue_api_request(ip_pool_url,
                                        method="put",
                                        body=ip_pool_data,
                                        api_version='2')
            except exception.DateraAPIException:
                # Datera product 1.0 support
                pass

        # Check to ensure we're ready for go-time
        self._si_poll(volume, policies)
    def _create_export_2_1(self, context, volume, connector):
        tenant = self._create_tenant(volume)
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        data = {
            'admin_state': 'offline',
            'force': True
        }
        self._issue_api_request(
            url, method='put', body=data, api_version='2.1', tenant=tenant)
        policies = self._get_policies_for_resource(volume)
        store_name, _ = self._scrape_template(policies)
        if connector and connector.get('ip'):
            # Case where volume_type has non default IP Pool info
            if policies['ip_pool'] != 'default':
                initiator_ip_pool_path = self._issue_api_request(
                    "access_network_ip_pools/{}".format(
                        policies['ip_pool']),
                    api_version='2.1',
                    tenant=tenant)['path']
            # Fallback to trying reasonable IP based guess
            else:
                initiator_ip_pool_path = self._get_ip_pool_for_string_ip_2_1(
                    connector['ip'])

            ip_pool_url = datc.URL_TEMPLATES['si_inst'](
                store_name).format(datc._get_name(volume['id']))
            ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}}
            self._issue_api_request(ip_pool_url,
                                    method="put",
                                    body=ip_pool_data,
                                    api_version='2.1',
                                    tenant=tenant)
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        data = {
            'admin_state': 'online'
        }
        self._issue_api_request(
            url, method='put', body=data, api_version='2.1', tenant=tenant)
        # Check if we've already setup everything for this volume
        url = (datc.URL_TEMPLATES['si']().format(datc._get_name(volume['id'])))
        storage_instances = self._issue_api_request(
            url, api_version='2.1', tenant=tenant)
        # Handle adding initiator to product if necessary
        # Then add initiator to ACL
        if (connector and
                connector.get('initiator') and
                not policies['acl_allow_all']):
            initiator_name = "OpenStack_{}_{}".format(
                self.driver_prefix, str(uuid.uuid4())[:4])
            initiator_group = datc.INITIATOR_GROUP_PREFIX + str(uuid.uuid4())
            found = False
            initiator = connector['initiator']
            if not found:
                data = {'id': initiator, 'name': initiator_name}
                # Try and create the initiator
                # If we get a conflict, ignore it
                self._issue_api_request("initiators",
                                        method="post",
                                        body=data,
                                        conflict_ok=True,
                                        api_version='2.1',
                                        tenant=tenant)
            # Create initiator group with initiator in it
            initiator_path = "/initiators/{}".format(initiator)
            initiator_group_path = "/initiator_groups/{}".format(
                initiator_group)
            ig_data = {'name': initiator_group,
                       'members': [{'path': initiator_path}]}
            self._issue_api_request("initiator_groups",
                                    method="post",
                                    body=ig_data,
                                    conflict_ok=True,
                                    api_version='2.1',
                                    tenant=tenant)
            # Create ACL with initiator group as reference for each
            # storage_instance in app_instance
            # TODO(_alastor_): We need to avoid changing the ACLs if the
            # template already specifies an ACL policy.
            for si in storage_instances['data']:
                acl_url = (datc.URL_TEMPLATES['si']() +
                           "/{}/acl_policy").format(
                    datc._get_name(volume['id']), si['name'])
                existing_acl = self._issue_api_request(acl_url,
                                                       method="get",
                                                       api_version='2.1',
                                                       tenant=tenant)['data']
                data = {}
                data['initiators'] = existing_acl['initiators']
                data['initiator_groups'] = existing_acl['initiator_groups']
                data['initiator_groups'].append({"path": initiator_group_path})
                self._issue_api_request(acl_url,
                                        method="put",
                                        body=data,
                                        api_version='2.1',
                                        tenant=tenant)
        # Check to ensure we're ready for go-time
        self._si_poll_2_1(volume, policies, tenant)
    def _create_export_2_1(self, context, volume, connector):
        tenant = self._create_tenant(volume)
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        data = {
            'admin_state': 'online'
        }
        self._issue_api_request(
            url, method='put', body=data, api_version='2.1', tenant=tenant)
        # Check if we've already setup everything for this volume
        url = (datc.URL_TEMPLATES['si']().format(datc._get_name(volume['id'])))
        storage_instances = self._issue_api_request(
            url, api_version='2.1', tenant=tenant)
        # Handle adding initiator to product if necessary
        # Then add initiator to ACL
        policies = self._get_policies_for_resource(volume)

        store_name, _ = self._scrape_template(policies)

        if (connector and
                connector.get('initiator') and
                not policies['acl_allow_all']):
            initiator_name = "OpenStack_{}_{}".format(
                self.driver_prefix, str(uuid.uuid4())[:4])
            initiator_group = datc.INITIATOR_GROUP_PREFIX + volume['id']
            found = False
            initiator = connector['initiator']
            current_initiators = self._issue_api_request(
                'initiators', api_version='2.1', tenant=tenant)
            for iqn, values in current_initiators.items():
                if initiator == iqn:
                    found = True
                    break
            # If we didn't find a matching initiator, create one
            if not found:
                data = {'id': initiator, 'name': initiator_name}
                # Try and create the initiator
                # If we get a conflict, ignore it because race conditions
                self._issue_api_request("initiators",
                                        method="post",
                                        body=data,
                                        conflict_ok=True,
                                        api_version='2.1',
                                        tenant=tenant)
            # Create initiator group with initiator in it
            initiator_path = "/initiators/{}".format(initiator)
            initiator_group_path = "/initiator_groups/{}".format(
                initiator_group)
            ig_data = {'name': initiator_group,
                       'members': [{'path': initiator_path}]}
            self._issue_api_request("initiator_groups",
                                    method="post",
                                    body=ig_data,
                                    conflict_ok=True,
                                    api_version='2.1',
                                    tenant=tenant)
            # Create ACL with initiator group as reference for each
            # storage_instance in app_instance
            # TODO(_alastor_): We need to avoid changing the ACLs if the
            # template already specifies an ACL policy.
            for si in storage_instances['data']:
                acl_url = (datc.URL_TEMPLATES['si']() +
                           "/{}/acl_policy").format(
                    datc._get_name(volume['id']), si['name'])
                existing_acl = self._issue_api_request(acl_url,
                                                       method="get",
                                                       api_version='2.1',
                                                       tenant=tenant)['data']
                data = {}
                data['initiators'] = existing_acl['initiators']
                data['initiator_groups'] = existing_acl['initiator_groups']
                data['initiator_groups'].append({"path": initiator_group_path})
                self._issue_api_request(acl_url,
                                        method="put",
                                        body=data,
                                        api_version='2.1',
                                        tenant=tenant)

        if connector and connector.get('ip'):
            # Case where volume_type has non default IP Pool info
            if policies['ip_pool'] != 'default':
                initiator_ip_pool_path = self._issue_api_request(
                    "access_network_ip_pools/{}".format(
                        policies['ip_pool']),
                    api_version='2.1',
                    tenant=tenant)['path']
            # Fallback to trying reasonable IP based guess
            else:
                initiator_ip_pool_path = self._get_ip_pool_for_string_ip(
                    connector['ip'])

            ip_pool_url = datc.URL_TEMPLATES['si_inst'](
                store_name).format(datc._get_name(volume['id']))
            ip_pool_data = {'ip_pool': {'path': initiator_ip_pool_path}}
            self._issue_api_request(ip_pool_url,
                                    method="put",
                                    body=ip_pool_data,
                                    api_version='2.1',
                                    tenant=tenant)

        # Check to ensure we're ready for go-time
        self._si_poll_2_1(volume, policies, tenant)
        url = datc.URL_TEMPLATES['ai_inst']().format(
            datc._get_name(volume['id']))
        metadata = {}
        # TODO(_alastor_): Figure out what we want to post with a create_export
        # call
        self._store_metadata(url, metadata, "create_export_2_1", tenant)