Exemplo n.º 1
0
 def test_extract_id_from_volume_name_no_match(self, conf_mock):
     conf_mock.volume_name_template = '%s-volume'
     vol_name = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1'
     result = volume_utils.extract_id_from_volume_name(vol_name)
     self.assertIsNone(result)
     vol_name = 'blahblahblah'
     result = volume_utils.extract_id_from_volume_name(vol_name)
     self.assertIsNone(result)
Exemplo n.º 2
0
 def test_extract_id_from_volume_name_no_match(self, conf_mock):
     conf_mock.volume_name_template = '%s-volume'
     vol_name = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1'
     result = volume_utils.extract_id_from_volume_name(vol_name)
     self.assertIsNone(result)
     vol_name = 'blahblahblah'
     result = volume_utils.extract_id_from_volume_name(vol_name)
     self.assertIsNone(result)
Exemplo n.º 3
0
    def _get_manageable_resource_info(self, cinder_resources, resource_type,
                                      marker, limit, offset, sort_keys,
                                      sort_dirs):
        entries = []
        cinder_ids = [resource['id'] for resource in cinder_resources]

        root_helper = utils.get_root_helper()
        try:
            out, err = self._execute('zfs',
                                     'list',
                                     '-r',
                                     '-H',
                                     '-p',
                                     '-t',
                                     resource_type,
                                     '-oname,volsize',
                                     self.zpool,
                                     root_helper=root_helper,
                                     run_as_root=True)
        except processutils.ProcessExecutionError as exc:
            exception_message = (_("Failed to zfs list, "
                                   "error message was: %s") %
                                 six.text_type(exc.stderr))
            raise exception.VolumeBackendAPIException(data=exception_message)

        for entry in out.splitlines():
            name, size = entry.strip().split('\t')
            if resource_type == 'volume':
                potential_id = volutils.extract_id_from_volume_name(name)
            else:
                potential_id = volutils.extract_id_from_snapshot_name(name)

            info = {
                'reference': {
                    'source-name': name
                },
                'size': int(math.ceil(float(size) / units.Gi)),
                'cinder_id': None,
                'extra_info': None
            }

            if potential_id in cinder_ids:
                info['safe_to_manage'] = False
                info['reason_not_safe'] = 'already managed'
                info['cinder_id'] = potential_id
            else:
                info['safe_to_manage'] = True
                info['reason_not_safe'] = None

            if resource_type == 'snapshot':
                zpool, zvol, snapshot = name.replace('@', '/').split('/')
                info['source_reference'] = {'source-name': zvol}

            entries.append(info)

        return volutils.paginate_entries_list(entries, marker, limit, offset,
                                              sort_keys, sort_dirs)
Exemplo n.º 4
0
    def manage_existing(self, volume, existing_vol_ref):
        """Manages an existing volume.

        The specified Cinder volume is to be taken into Cinder management.
        The driver will verify its existence and then rename it to the
        new Cinder volume name. It is expected that the existing volume
        reference is an NFS share point and some [/path]/volume;
        e.g., 10.10.32.1:/openstack/vol_to_manage
        or 10.10.32.1:/openstack/some_directory/vol_to_manage

        :param volume: cinder volume to manage
        :param existing_vol_ref: driver-specific information used to identify a
        volume
        :returns: the provider location
        :raises: VolumeBackendAPIException
        """

        # Attempt to find NFS share, NFS mount, and volume path from vol_ref.
        (nfs_share, nfs_mount, vol_name
         ) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)

        LOG.info(_LI("Asked to manage NFS volume %(vol)s, "
                     "with vol ref %(ref)s."),
                 {'vol': volume.id,
                  'ref': existing_vol_ref['source-name']})

        vol_id = utils.extract_id_from_volume_name(vol_name)
        if utils.check_already_managed_volume(vol_id):
            raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name)

        self._check_pool_and_share(volume, nfs_share)

        if vol_name == volume.name:
            LOG.debug("New Cinder volume %(vol)s name matches reference name: "
                      "no need to rename.", {'vol': volume.name})
        else:
            src_vol = os.path.join(nfs_mount, vol_name)
            dst_vol = os.path.join(nfs_mount, volume.name)
            try:
                self._try_execute("mv", src_vol, dst_vol, run_as_root=False,
                                  check_exit_code=True)
                LOG.debug("Setting newly managed Cinder volume name "
                          "to %(vol)s.", {'vol': volume.name})
                self._set_rw_permissions_for_all(dst_vol)
            except (OSError, processutils.ProcessExecutionError) as err:
                msg = (_("Failed to manage existing volume "
                         "%(name)s, because rename operation "
                         "failed: Error msg: %(msg)s.") %
                       {'name': existing_vol_ref['source-name'],
                        'msg': six.text_type(err)})
                LOG.error(msg)
                raise exception.VolumeBackendAPIException(data=msg)
        return {'provider_location': nfs_share}
Exemplo n.º 5
0
    def manage_existing(self, volume, existing_vol_ref):
        """Manages an existing volume.

        The specified Cinder volume is to be taken into Cinder management.
        The driver will verify its existence and then rename it to the
        new Cinder volume name. It is expected that the existing volume
        reference is an NFS share point and some [/path]/volume;
        e.g., 10.10.32.1:/openstack/vol_to_manage
        or 10.10.32.1:/openstack/some_directory/vol_to_manage

        :param volume: cinder volume to manage
        :param existing_vol_ref: driver-specific information used to identify a
                                 volume
        :returns: the provider location
        :raises VolumeBackendAPIException:
        """

        # Attempt to find NFS share, NFS mount, and volume path from vol_ref.
        (nfs_share, nfs_mount, vol_name
         ) = self._get_share_mount_and_vol_from_vol_ref(existing_vol_ref)

        LOG.info("Asked to manage NFS volume %(vol)s, "
                 "with vol ref %(ref)s.",
                 {'vol': volume.id,
                  'ref': existing_vol_ref['source-name']})

        vol_id = utils.extract_id_from_volume_name(vol_name)
        if utils.check_already_managed_volume(vol_id):
            raise exception.ManageExistingAlreadyManaged(volume_ref=vol_name)

        self._check_pool_and_share(volume, nfs_share)

        if vol_name == volume.name:
            LOG.debug("New Cinder volume %(vol)s name matches reference name: "
                      "no need to rename.", {'vol': volume.name})
        else:
            src_vol = os.path.join(nfs_mount, vol_name)
            dst_vol = os.path.join(nfs_mount, volume.name)
            try:
                self._try_execute("mv", src_vol, dst_vol, run_as_root=False,
                                  check_exit_code=True)
                LOG.debug("Setting newly managed Cinder volume name "
                          "to %(vol)s.", {'vol': volume.name})
                self._set_rw_permissions_for_all(dst_vol)
            except (OSError, processutils.ProcessExecutionError) as err:
                msg = (_("Failed to manage existing volume "
                         "%(name)s, because rename operation "
                         "failed: Error msg: %(msg)s.") %
                       {'name': existing_vol_ref['source-name'],
                        'msg': six.text_type(err)})
                LOG.error(msg)
                raise exception.VolumeBackendAPIException(data=msg)
        return {'provider_location': nfs_share}
Exemplo n.º 6
0
    def _get_manageable_resource_info(self, cinder_resources, resource_type,
                                      marker, limit, offset, sort_keys,
                                      sort_dirs):
        entries = []
        lvs = self.vg.get_volumes()
        cinder_ids = [resource['id'] for resource in cinder_resources]

        for lv in lvs:
            is_snap = self.vg.lv_is_snapshot(lv['name'])
            if ((resource_type == 'volume' and is_snap)
                    or (resource_type == 'snapshot' and not is_snap)):
                continue

            if resource_type == 'volume':
                potential_id = volutils.extract_id_from_volume_name(lv['name'])
            else:
                unescape = self._unescape_snapshot(lv['name'])
                potential_id = volutils.extract_id_from_snapshot_name(unescape)
            lv_info = {
                'reference': {
                    'source-name': lv['name']
                },
                'size': int(math.ceil(float(lv['size']))),
                'cinder_id': None,
                'extra_info': None
            }

            if potential_id in cinder_ids:
                lv_info['safe_to_manage'] = False
                lv_info['reason_not_safe'] = 'already managed'
                lv_info['cinder_id'] = potential_id
            elif self.vg.lv_is_open(lv['name']):
                lv_info['safe_to_manage'] = False
                lv_info['reason_not_safe'] = '%s in use' % resource_type
            else:
                lv_info['safe_to_manage'] = True
                lv_info['reason_not_safe'] = None

            if resource_type == 'snapshot':
                origin = self.vg.lv_get_origin(lv['name'])
                lv_info['source_reference'] = {'source-name': origin}

            entries.append(lv_info)

        return volutils.paginate_entries_list(entries, marker, limit, offset,
                                              sort_keys, sort_dirs)
Exemplo n.º 7
0
    def manage_existing(self, volume, existing_ref):
        """Manages an existing ZVOL.

        Renames the ZVOL to match the expected name for the volume.
        Error checking done by manage_existing_get_size is not repeated.
        """
        root_helper = utils.get_root_helper()
        zvol_name = existing_ref['source-name']
        zvol_src = self._zfs_volume({'name': zvol_name})
        zvol_dst = self._zfs_volume(volume)

        try:
            self._execute('zfs',
                          'list',
                          '-H',
                          zvol_src,
                          root_helper=root_helper,
                          run_as_root=True)
        except processutils.ProcessExecutionError as exc:
            exception_message = (_("Failed to retrive zvol %(name)s, "
                                   "error message was: %(err_msg)s") % {
                                       'name': zvol_name,
                                       'err_msg': exc.stderr
                                   })
            raise exception.VolumeBackendAPIException(data=exception_message)

        vol_id = volutils.extract_id_from_volume_name(zvol_name)
        if volutils.check_already_managed_volume(vol_id):
            raise exception.ManageExistingAlreadyManaged(volume_ref=zvol_name)

        # Attempt to rename the ZVOL to match the OpenStack internal name.
        try:
            self._execute('zfs',
                          'rename',
                          zvol_src,
                          zvol_dst,
                          root_helper=root_helper,
                          run_as_root=True)
        except processutils.ProcessExecutionError as exc:
            exception_message = (_("Failed to rename logical volume %(name)s, "
                                   "error message was: %(err_msg)s") % {
                                       'name': zvol_name,
                                       'err_msg': exc.stderr
                                   })
            raise exception.VolumeBackendAPIException(data=exception_message)
Exemplo n.º 8
0
Arquivo: lvm.py Projeto: NetApp/cinder
    def _get_manageable_resource_info(self, cinder_resources, resource_type,
                                      marker, limit, offset, sort_keys,
                                      sort_dirs):
        entries = []
        lvs = self.vg.get_volumes()
        cinder_ids = [resource['id'] for resource in cinder_resources]

        for lv in lvs:
            is_snap = self.vg.lv_is_snapshot(lv['name'])
            if ((resource_type == 'volume' and is_snap) or
                    (resource_type == 'snapshot' and not is_snap)):
                continue

            if resource_type == 'volume':
                potential_id = volutils.extract_id_from_volume_name(lv['name'])
            else:
                unescape = self._unescape_snapshot(lv['name'])
                potential_id = volutils.extract_id_from_snapshot_name(unescape)
            lv_info = {'reference': {'source-name': lv['name']},
                       'size': int(math.ceil(float(lv['size']))),
                       'cinder_id': None,
                       'extra_info': None}

            if potential_id in cinder_ids:
                lv_info['safe_to_manage'] = False
                lv_info['reason_not_safe'] = 'already managed'
                lv_info['cinder_id'] = potential_id
            elif self.vg.lv_is_open(lv['name']):
                lv_info['safe_to_manage'] = False
                lv_info['reason_not_safe'] = '%s in use' % resource_type
            else:
                lv_info['safe_to_manage'] = True
                lv_info['reason_not_safe'] = None

            if resource_type == 'snapshot':
                origin = self.vg.lv_get_origin(lv['name'])
                lv_info['source_reference'] = {'source-name': origin}

            entries.append(lv_info)

        return volutils.paginate_entries_list(entries, marker, limit, offset,
                                              sort_keys, sort_dirs)
Exemplo n.º 9
0
    def _get_manageable_resource_info(
        self, cinder_resources, resource_type, marker, limit, offset, sort_keys, sort_dirs
    ):
        entries = []
        lvs = self.vg.get_volumes()
        cinder_ids = [resource["id"] for resource in cinder_resources]

        for lv in lvs:
            is_snap = self.vg.lv_is_snapshot(lv["name"])
            if (resource_type == "volume" and is_snap) or (resource_type == "snapshot" and not is_snap):
                continue

            if resource_type == "volume":
                potential_id = volutils.extract_id_from_volume_name(lv["name"])
            else:
                unescape = self._unescape_snapshot(lv["name"])
                potential_id = volutils.extract_id_from_snapshot_name(unescape)
            lv_info = {
                "reference": {"source-name": lv["name"]},
                "size": int(math.ceil(float(lv["size"]))),
                "cinder_id": None,
                "extra_info": None,
            }

            if potential_id in cinder_ids:
                lv_info["safe_to_manage"] = False
                lv_info["reason_not_safe"] = "already managed"
                lv_info["cinder_id"] = potential_id
            elif self.vg.lv_is_open(lv["name"]):
                lv_info["safe_to_manage"] = False
                lv_info["reason_not_safe"] = "%s in use" % resource_type
            else:
                lv_info["safe_to_manage"] = True
                lv_info["reason_not_safe"] = None

            if resource_type == "snapshot":
                origin = self.vg.lv_get_origin(lv["name"])
                lv_info["source_reference"] = {"source-name": origin}

            entries.append(lv_info)

        return volutils.paginate_entries_list(entries, marker, limit, offset, sort_keys, sort_dirs)
Exemplo n.º 10
0
    def manage_existing(self, volume, existing_ref):
        """Manages an existing LV.

        Renames the LV to match the expected name for the volume.
        Error checking done by manage_existing_get_size is not repeated.
        """
        lv_name = existing_ref["source-name"]
        self.vg.get_volume(lv_name)

        vol_id = volutils.extract_id_from_volume_name(lv_name)
        if volutils.check_already_managed_volume(vol_id):
            raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name)

        # Attempt to rename the LV to match the OpenStack internal name.
        try:
            self.vg.rename_volume(lv_name, volume["name"])
        except processutils.ProcessExecutionError as exc:
            exception_message = _("Failed to rename logical volume %(name)s, " "error message was: %(err_msg)s") % {
                "name": lv_name,
                "err_msg": exc.stderr,
            }
            raise exception.VolumeBackendAPIException(data=exception_message)
Exemplo n.º 11
0
    def manage_existing(self, volume, existing_ref):
        """Manages an existing LV.

        Renames the LV to match the expected name for the volume.
        Error checking done by manage_existing_get_size is not repeated.
        """
        lv_name = existing_ref['source-name']
        self.vg.get_volume(lv_name)

        vol_id = volutils.extract_id_from_volume_name(lv_name)
        if volutils.check_already_managed_volume(vol_id):
            raise exception.ManageExistingAlreadyManaged(volume_ref=lv_name)

        # Attempt to rename the LV to match the OpenStack internal name.
        try:
            self.vg.rename_volume(lv_name, volume['name'])
        except processutils.ProcessExecutionError as exc:
            exception_message = (_("Failed to rename logical volume %(name)s, "
                                   "error message was: %(err_msg)s")
                                 % {'name': lv_name,
                                    'err_msg': exc.stderr})
            raise exception.VolumeBackendAPIException(
                data=exception_message)
Exemplo n.º 12
0
 def test_extract_id_from_volume_name_id_vol_pattern(self, conf_mock):
     conf_mock.volume_name_template = '%s-volume'
     vol_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1'
     vol_name = conf_mock.volume_name_template % vol_id
     result = volume_utils.extract_id_from_volume_name(vol_name)
     self.assertEqual(vol_id, result)
Exemplo n.º 13
0
    def _get_manageable_resource_info(self, cinder_resources, resource_type,
                                      marker, limit, offset, sort_keys,
                                      sort_dirs):
        """Gets the resources on the backend available for management by Cinder.

        Receives the parameters from "get_manageable_volumes" and
        "get_manageable_snapshots" and gets the available resources

        :param cinder_resources: A list of resources in this host that Cinder
        currently manages
        :param resource_type: If it's a volume or a snapshot
        :param marker: The last item of the previous page; we return the
        next results after this value (after sorting)
        :param limit: Maximum number of items to return
        :param offset: Number of items to skip after marker
        :param sort_keys: List of keys to sort results by (valid keys
        are 'identifier' and 'size')
        :param sort_dirs: List of directions to sort by, corresponding to
        sort_keys (valid directions are 'asc' and 'desc')

        :returns: list of dictionaries, each specifying a volume or snapshot
        (resource) in the host, with the following keys:
            - reference (dictionary): The reference for a resource,
            which can be passed to "manage_existing_snapshot".
            - size (int): The size of the resource according to the storage
              backend, rounded up to the nearest GB.
            - safe_to_manage (boolean): Whether or not this resource is
            safe to manage according to the storage backend.
            - reason_not_safe (string): If safe_to_manage is False,
              the reason why.
            - cinder_id (string): If already managed, provide the Cinder ID.
            - extra_info (string): Any extra information to return to the
            user
            - source_reference (string): Similar to "reference", but for the
              snapshot's source volume.
        """

        entries = []
        exports = {}
        bend_rsrc = {}
        cinder_ids = [resource.id for resource in cinder_resources]

        for service in self.config['services']:
            exp_path = self.config['services'][service]['hdp']
            exports[exp_path] = (
                self.config['services'][service]['export']['fs'])

        for exp in exports.keys():
            # bend_rsrc has all the resources in the specified exports
            # volumes {u'172.24.54.39:/Export-Cinder':
            #   ['volume-325e7cdc-8f65-40a8-be9a-6172c12c9394',
            # '     snapshot-1bfb6f0d-9497-4c12-a052-5426a76cacdc','']}
            bend_rsrc[exp] = self._get_volumes_from_export(exp)
            mnt_point = self._get_mount_point_for_share(exp)

            for resource in bend_rsrc[exp]:
                # Ignoring resources of unwanted types
                if ((resource_type == 'volume' and
                     ('.' in resource or 'snapshot' in resource))
                        or (resource_type == 'snapshot' and '.' not in resource
                            and 'snapshot' not in resource)):
                    continue

                path = '%s/%s' % (exp, resource)
                mnt_path = '%s/%s' % (mnt_point, resource)
                size = self._get_file_size(mnt_path)

                rsrc_inf = {
                    'reference': {
                        'source-name': path
                    },
                    'size': size,
                    'cinder_id': None,
                    'extra_info': None
                }

                if resource_type == 'volume':
                    potential_id = utils.extract_id_from_volume_name(resource)
                elif 'snapshot' in resource:
                    # This is for the snapshot legacy case
                    potential_id = utils.extract_id_from_snapshot_name(
                        resource)
                else:
                    potential_id = resource.split('.')[1]

                # When a resource is already managed by cinder, it's not
                # recommended to manage it again. So we set safe_to_manage =
                # False. Otherwise, it is set safe_to_manage = True.
                if potential_id in cinder_ids:
                    rsrc_inf['safe_to_manage'] = False
                    rsrc_inf['reason_not_safe'] = 'already managed'
                    rsrc_inf['cinder_id'] = potential_id
                else:
                    rsrc_inf['safe_to_manage'] = True
                    rsrc_inf['reason_not_safe'] = None

                # If it's a snapshot, we try to get its source volume. However,
                # this search is not reliable in some cases. So, if it's not
                # possible to return a precise result, we return unknown as
                # source-reference, throw a warning message and fill the
                # extra-info.
                if resource_type == 'snapshot':
                    if 'snapshot' not in resource:
                        origin = self._get_snapshot_origin_from_name(resource)
                        if 'unmanage' in origin:
                            origin = origin[16:]
                        else:
                            origin = origin[7:]
                        rsrc_inf['source_reference'] = {'id': origin}
                    else:
                        path = path.split(':')[1]
                        origin = self._get_snapshot_origin(path, exports[exp])

                        if not origin:
                            # if origin is empty, the file is not a clone
                            continue
                        elif len(origin) == 1:
                            origin = origin[0].split('/')[2]
                            origin = utils.extract_id_from_volume_name(origin)
                            rsrc_inf['source_reference'] = {'id': origin}
                        else:
                            LOG.warning(
                                "Could not determine the volume "
                                "that owns the snapshot %(snap)s",
                                {'snap': resource})
                            rsrc_inf['source_reference'] = {'id': 'unknown'}
                            rsrc_inf['extra_info'] = ('Could not determine '
                                                      'the volume that owns '
                                                      'the snapshot')

                entries.append(rsrc_inf)

        return utils.paginate_entries_list(entries, marker, limit, offset,
                                           sort_keys, sort_dirs)
Exemplo n.º 14
0
 def test_extract_id_from_volume_name_id_vol_pattern(self, conf_mock):
     conf_mock.volume_name_template = '%s-volume'
     vol_id = 'd8cd1feb-2dcc-404d-9b15-b86fe3bec0a1'
     vol_name = conf_mock.volume_name_template % vol_id
     result = volume_utils.extract_id_from_volume_name(vol_name)
     self.assertEqual(vol_id, result)
Exemplo n.º 15
0
    def _get_manageable_resource_info(self, cinder_resources, resource_type,
                                      marker, limit, offset, sort_keys,
                                      sort_dirs):
        """Gets the resources on the backend available for management by Cinder.

        Receives the parameters from "get_manageable_volumes" and
        "get_manageable_snapshots" and gets the available resources

        :param cinder_resources: A list of resources in this host that Cinder
        currently manages
        :param resource_type: If it's a volume or a snapshot
        :param marker: The last item of the previous page; we return the
        next results after this value (after sorting)
        :param limit: Maximum number of items to return
        :param offset: Number of items to skip after marker
        :param sort_keys: List of keys to sort results by (valid keys
        are 'identifier' and 'size')
        :param sort_dirs: List of directions to sort by, corresponding to
        sort_keys (valid directions are 'asc' and 'desc')

        :returns: list of dictionaries, each specifying a volume or snapshot
        (resource) in the host, with the following keys:
            - reference (dictionary): The reference for a resource,
            which can be passed to "manage_existing_snapshot".
            - size (int): The size of the resource according to the storage
              backend, rounded up to the nearest GB.
            - safe_to_manage (boolean): Whether or not this resource is
            safe to manage according to the storage backend.
            - reason_not_safe (string): If safe_to_manage is False,
              the reason why.
            - cinder_id (string): If already managed, provide the Cinder ID.
            - extra_info (string): Any extra information to return to the
            user
            - source_reference (string): Similar to "reference", but for the
              snapshot's source volume.
        """

        entries = []
        exports = {}
        bend_rsrc = {}
        cinder_ids = [resource.id for resource in cinder_resources]

        for service in self.config['services']:
            exp_path = self.config['services'][service]['hdp']
            exports[exp_path] = (
                self.config['services'][service]['export']['fs'])

        for exp in exports.keys():
            # bend_rsrc has all the resources in the specified exports
            # volumes {u'172.24.54.39:/Export-Cinder':
            #   ['volume-325e7cdc-8f65-40a8-be9a-6172c12c9394',
            # '     snapshot-1bfb6f0d-9497-4c12-a052-5426a76cacdc','']}
            bend_rsrc[exp] = self._get_volumes_from_export(exp)
            mnt_point = self._get_mount_point_for_share(exp)

            for resource in bend_rsrc[exp]:
                # Ignoring resources of unwanted types
                if ((resource_type == 'volume' and 'snapshot' in resource) or
                        (resource_type == 'snapshot' and
                            'volume' in resource)):
                    continue
                path = '%s/%s' % (exp, resource)
                mnt_path = '%s/%s' % (mnt_point, resource)
                size = self._get_file_size(mnt_path)

                rsrc_inf = {'reference': {'source-name': path},
                            'size': size, 'cinder_id': None,
                            'extra_info': None}

                if resource_type == 'volume':
                    potential_id = utils.extract_id_from_volume_name(resource)
                else:
                    potential_id = utils.extract_id_from_snapshot_name(
                        resource)

                # When a resource is already managed by cinder, it's not
                # recommended to manage it again. So we set safe_to_manage =
                # False. Otherwise, it is set safe_to_manage = True.
                if potential_id in cinder_ids:
                    rsrc_inf['safe_to_manage'] = False
                    rsrc_inf['reason_not_safe'] = 'already managed'
                    rsrc_inf['cinder_id'] = potential_id
                else:
                    rsrc_inf['safe_to_manage'] = True
                    rsrc_inf['reason_not_safe'] = None

                # If it's a snapshot, we try to get its source volume. However,
                # this search is not reliable in some cases. So, if it's not
                # possible to return a precise result, we return unknown as
                # source-reference, throw a warning message and fill the
                # extra-info.
                if resource_type == 'snapshot':
                    path = path.split(':')[1]
                    origin = self._get_snapshot_origin(path, exports[exp])

                    if not origin:
                        # if origin is empty, the file is not a clone
                        continue
                    elif len(origin) == 1:
                        origin = origin[0].split('/')[2]
                        origin = utils.extract_id_from_volume_name(origin)
                        rsrc_inf['source_reference'] = {'id': origin}
                    else:
                        LOG.warning(_LW("Could not determine the volume that "
                                        "owns the snapshot %(snap)s"),
                                    {'snap': resource})
                        rsrc_inf['source_reference'] = {'id': 'unknown'}
                        rsrc_inf['extra_info'] = ('Could not determine the '
                                                  'volume that owns the '
                                                  'snapshot')

                entries.append(rsrc_inf)

        return utils.paginate_entries_list(entries, marker, limit, offset,
                                           sort_keys, sort_dirs)