Example #1
0
    def _get_manageable_resource_info(self, cinder_resources, resource_type,
                                      marker, limit, offset, sort_keys,
                                      sort_dirs):
        entries = []
        lvs = self.vg.get_volumes()
        cinder_ids = [resource['id'] for resource in cinder_resources]

        for lv in lvs:
            is_snap = self.vg.lv_is_snapshot(lv['name'])
            if ((resource_type == 'volume' and is_snap)
                    or (resource_type == 'snapshot' and not is_snap)):
                continue

            if resource_type == 'volume':
                potential_id = volume_utils.extract_id_from_volume_name(
                    lv['name'])
            else:
                unescape = self._unescape_snapshot(lv['name'])
                potential_id = volume_utils.extract_id_from_snapshot_name(
                    unescape)
            lv_info = {
                'reference': {
                    'source-name': lv['name']
                },
                'size': int(math.ceil(float(lv['size']))),
                'cinder_id': None,
                'extra_info': None
            }

            if potential_id in cinder_ids:
                lv_info['safe_to_manage'] = False
                lv_info['reason_not_safe'] = 'already managed'
                lv_info['cinder_id'] = potential_id
            elif self.vg.lv_is_open(lv['name']):
                lv_info['safe_to_manage'] = False
                lv_info['reason_not_safe'] = '%s in use' % resource_type
            else:
                lv_info['safe_to_manage'] = True
                lv_info['reason_not_safe'] = None

            if resource_type == 'snapshot':
                origin = self.vg.lv_get_origin(lv['name'])
                lv_info['source_reference'] = {'source-name': origin}

            entries.append(lv_info)

        return volume_utils.paginate_entries_list(entries, marker, limit,
                                                  offset, sort_keys, sort_dirs)
Example #2
0
 def _add_manageable_volume(self, kv, manageable_volumes, cinder_ids):
     cfg = kv['value']
     if kv['key'].find('@') >= 0:
         # snapshot
         return
     image_id = volume_utils.extract_id_from_volume_name(cfg['name'])
     image_info = {
         'reference': {
             'source-name': image_name
         },
         'size': int(math.ceil(float(cfg['size']) / units.Gi)),
         'cinder_id': None,
         'extra_info': None,
     }
     if image_id in cinder_ids:
         image_info['cinder_id'] = image_id
         image_info['safe_to_manage'] = False
         image_info['reason_not_safe'] = 'already managed'
     else:
         image_info['safe_to_manage'] = True
         image_info['reason_not_safe'] = None
     manageable_volumes.append(image_info)
Example #3
0
    def manage_existing(self, volume, existing_ref):
        """Manages an existing LV.

        Renames the LV to match the expected name for the volume.
        Error checking done by manage_existing_get_size is not repeated.
        """
        lv_name = existing_ref['source-name']
        self.vg.get_volume(lv_name)

        vol_id = volume_utils.extract_id_from_volume_name(lv_name)
        if volume_utils.check_already_managed_volume(vol_id):
            raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name)

        # Attempt to rename the LV to match the OpenStack internal name.
        try:
            self.vg.rename_volume(lv_name, volume['name'])
        except processutils.ProcessExecutionError as exc:
            exception_message = (_("Failed to rename logical volume %(name)s, "
                                   "error message was: %(err_msg)s") % {
                                       'name': lv_name,
                                       'err_msg': exc.stderr
                                   })
            raise exception.VolumeBackendAPIException(data=exception_message)
Example #4
0
    def _get_pool_stats(self, filter_function=None, goodness_function=None):
        """Retrieve pool (Data ONTAP flexvol) stats.

        Pool statistics are assembled from static driver capabilities, the
        Storage Service Catalog of flexvol attributes, and real-time capacity
        and controller utilization metrics.  The pool name is the flexvol name.
        """

        pools = []

        ssc = self.ssc_library.get_ssc()
        if not ssc:
            return pools

        # Utilization and performance metrics require cluster-scoped
        # credentials
        if self.using_cluster_credentials:
            # Get up-to-date node utilization metrics just once
            self.perf_library.update_performance_cache(ssc)

            # Get up-to-date aggregate capacities just once
            aggregates = self.ssc_library.get_ssc_aggregates()
            aggr_capacities = self.zapi_client.get_aggregate_capacities(
                aggregates)
        else:
            aggr_capacities = {}

        for ssc_vol_name, ssc_vol_info in ssc.items():

            pool = dict()

            # Add storage service catalog data
            pool.update(ssc_vol_info)

            # Add driver capabilities and config info
            pool['QoS_support'] = self.using_cluster_credentials
            pool['multiattach'] = True
            pool['online_extend_support'] = True
            pool['consistencygroup_support'] = True
            pool['consistent_group_snapshot_enabled'] = True
            pool['reserved_percentage'] = self.reserved_percentage
            pool['max_over_subscription_ratio'] = (
                self.max_over_subscription_ratio)

            # Add up-to-date capacity info
            capacity = self.zapi_client.get_flexvol_capacity(
                flexvol_name=ssc_vol_name)

            size_total_gb = capacity['size-total'] / units.Gi
            pool['total_capacity_gb'] = na_utils.round_down(size_total_gb)

            size_available_gb = capacity['size-available'] / units.Gi
            pool['free_capacity_gb'] = na_utils.round_down(size_available_gb)

            if self.configuration.netapp_driver_reports_provisioned_capacity:
                luns = self.zapi_client.get_lun_sizes_by_volume(ssc_vol_name)
                provisioned_cap = 0
                for lun in luns:
                    lun_name = lun['path'].split('/')[-1]
                    # Filtering luns that matches the volume name template to
                    # exclude snapshots
                    if volume_utils.extract_id_from_volume_name(lun_name):
                        provisioned_cap = provisioned_cap + lun['size']
                pool['provisioned_capacity_gb'] = na_utils.round_down(
                    float(provisioned_cap) / units.Gi)

            if self.using_cluster_credentials:
                dedupe_used = self.zapi_client.get_flexvol_dedupe_used_percent(
                    ssc_vol_name)
            else:
                dedupe_used = 0.0
            pool['netapp_dedupe_used_percent'] = na_utils.round_down(
                dedupe_used)

            aggregate_name = ssc_vol_info.get('netapp_aggregate')
            aggr_capacity = aggr_capacities.get(aggregate_name, {})
            pool['netapp_aggregate_used_percent'] = aggr_capacity.get(
                'percent-used', 0)

            # Add utilization data
            utilization = self.perf_library.get_node_utilization_for_pool(
                ssc_vol_name)
            pool['utilization'] = na_utils.round_down(utilization)
            pool['filter_function'] = filter_function
            pool['goodness_function'] = goodness_function

            # Add replication capabilities/stats
            pool.update(self.get_replication_backend_stats(self.configuration))

            pools.append(pool)

        return pools