コード例 #1
0
ファイル: lib_base.py プロジェクト: jkasarherou/manila
    def _allocate_container(self, share, vserver_client):
        """Create new share on aggregate."""
        share_name = self._get_valid_share_name(share["id"])

        # Get Data ONTAP aggregate name as pool name.
        pool_name = share_utils.extract_host(share["host"], level="pool")
        if pool_name is None:
            msg = _("Pool is not available in the share host field.")
            raise exception.InvalidHost(reason=msg)

        extra_specs = share_types.get_extra_specs_from_share(share)
        extra_specs = self._remap_standard_boolean_extra_specs(extra_specs)
        self._check_extra_specs_validity(share, extra_specs)
        provisioning_options = self._get_provisioning_options(extra_specs)

        LOG.debug(
            "Creating share %(share)s on pool %(pool)s with " "provisioning options %(options)s",
            {"share": share_name, "pool": pool_name, "options": provisioning_options},
        )
        vserver_client.create_volume(
            pool_name,
            share_name,
            share["size"],
            snapshot_reserve=self.configuration.netapp_volume_snapshot_reserve_percent,
            **provisioning_options
        )
コード例 #2
0
ファイル: driver.py プロジェクト: bswartz/manila
    def create_share(self, context, share, share_server=None):
        """Create a CephFS volume.

        :param context: A RequestContext.
        :param share: A Share.
        :param share_server: Always None for CephFS native.
        :return: The export locations dictionary.
        """
        requested_proto = share['share_proto'].upper()
        supported_proto = (
            self.configuration.cephfs_protocol_helper_type.upper())
        if (requested_proto != supported_proto):
            msg = _("Share protocol %s is not supported.") % requested_proto
            raise exception.ShareBackendException(msg=msg)

        # `share` is a Share
        msg = _("create_share {be} name={id} size={size}"
                " share_group_id={group}")
        LOG.debug(msg.format(
            be=self.backend_name, id=share['id'], size=share['size'],
            group=share['share_group_id']))

        extra_specs = share_types.get_extra_specs_from_share(share)
        data_isolated = extra_specs.get("cephfs:data_isolated", False)

        size = self._to_bytes(share['size'])

        # Create the CephFS volume
        cephfs_volume = self.volume_client.create_volume(
            cephfs_share_path(share), size=size, data_isolated=data_isolated)

        return self.protocol_helper.get_export_locations(share, cephfs_volume)
コード例 #3
0
ファイル: hpe_3par_driver.py プロジェクト: dinghb/manila
    def create_share_from_snapshot(self,
                                   context,
                                   share,
                                   snapshot,
                                   share_server=None):
        """Is called to create share from snapshot."""

        fpg, vfs, ips = self._get_pool_location(share, share_server)

        protocol = share['share_proto']
        extra_specs = share_types.get_extra_specs_from_share(share)

        path = self._hpe3par.create_share_from_snapshot(
            share['id'],
            protocol,
            extra_specs,
            share['project_id'],
            snapshot['share_id'],
            snapshot['id'],
            fpg,
            vfs,
            ips,
            size=share['size'],
            comment=self.build_share_comment(share))

        return self._hpe3par.build_export_locations(protocol, ips, path)
コード例 #4
0
ファイル: cephfs_native.py プロジェクト: ajarr/manila
    def create_share(self, context, share, share_server=None):
        """Create a CephFS volume.

        :param context: A RequestContext.
        :param share: A Share.
        :param share_server: Always None for CephFS native.
        :return: The export locations dictionary.
        """

        # `share` is a Share
        LOG.debug(
            "create_share {be} name={id} size={size} cg_id={cg}".format(
                be=self.backend_name, id=share["id"], size=share["size"], cg=share["consistency_group_id"]
            )
        )

        extra_specs = share_types.get_extra_specs_from_share(share)
        data_isolated = extra_specs.get("cephfs:data_isolated", False)

        size = self._to_bytes(share["size"])

        # Create the CephFS volume
        volume = self.volume_client.create_volume(self._share_path(share), size=size, data_isolated=data_isolated)

        # To mount this you need to know the mon IPs and the path to the volume
        mon_addrs = self.volume_client.get_mon_addrs()

        export_location = "{addrs}:{path}".format(addrs=",".join(mon_addrs), path=volume["mount_path"])

        LOG.info(
            _LI("Calculated export location for share %(id)s: %(loc)s"), {"id": share["id"], "loc": export_location}
        )

        return {"path": export_location, "is_admin_only": False, "metadata": {}}
コード例 #5
0
    def create_share(self, context, share, share_server=None):
        """Create a CephFS volume.

        :param context: A RequestContext.
        :param share: A Share.
        :param share_server: Always None for CephFS native.
        :return: The export locations dictionary.
        """
        requested_proto = share['share_proto'].upper()
        supported_proto = (
            self.configuration.cephfs_protocol_helper_type.upper())
        if (requested_proto != supported_proto):
            msg = _("Share protocol %s is not supported.") % requested_proto
            raise exception.ShareBackendException(msg=msg)

        # `share` is a Share
        msg = _("create_share {be} name={id} size={size}"
                " share_group_id={group}")
        LOG.debug(msg.format(
            be=self.backend_name, id=share['id'], size=share['size'],
            group=share['share_group_id']))

        extra_specs = share_types.get_extra_specs_from_share(share)
        data_isolated = extra_specs.get("cephfs:data_isolated", False)

        size = self._to_bytes(share['size'])

        # Create the CephFS volume
        cephfs_volume = self.volume_client.create_volume(
            cephfs_share_path(share), size=size, data_isolated=data_isolated,
            mode=self._cephfs_volume_mode)

        return self.protocol_helper.get_export_locations(share, cephfs_volume)
コード例 #6
0
    def _allocate_container(self, share, vserver_client):
        """Create new share on aggregate."""
        share_name = self._get_valid_share_name(share['id'])

        # Get Data ONTAP aggregate name as pool name.
        pool_name = share_utils.extract_host(share['host'], level='pool')
        if pool_name is None:
            msg = _("Pool is not available in the share host field.")
            raise exception.InvalidHost(reason=msg)

        extra_specs = share_types.get_extra_specs_from_share(share)
        self._check_extra_specs_validity(share, extra_specs)
        provisioning_options = self._get_provisioning_options(extra_specs)

        LOG.debug(
            'Creating share %(share)s on pool %(pool)s with '
            'provisioning options %(options)s', {
                'share': share_name,
                'pool': pool_name,
                'options': provisioning_options
            })

        LOG.debug('Creating share %(share)s on pool %(pool)s', {
            'share': share_name,
            'pool': pool_name
        })
        vserver_client.create_volume(pool_name, share_name, share['size'],
                                     **provisioning_options)
コード例 #7
0
ファイル: cephfs_native.py プロジェクト: ponychou/manila
    def delete_share(self, context, share, share_server=None):
        extra_specs = share_types.get_extra_specs_from_share(share)
        data_isolated = extra_specs.get("cephfs:data_isolated", False)

        self.volume_client.delete_volume(self._share_path(share),
                                         data_isolated=data_isolated)
        self.volume_client.purge_volume(self._share_path(share),
                                        data_isolated=data_isolated)
コード例 #8
0
    def test_get_extra_specs_from_share(self):
        expected = self.fake_extra_specs
        self.mock_object(share_types, "get_share_type_extra_specs", mock.Mock(return_value=expected))

        spec_value = share_types.get_extra_specs_from_share(self.fake_share)

        self.assertEqual(expected, spec_value)
        share_types.get_share_type_extra_specs.assert_called_once_with(self.fake_share_type_id)
コード例 #9
0
ファイル: cephfs_native.py プロジェクト: vponomaryov/manila
    def delete_share(self, context, share, share_server=None):
        extra_specs = share_types.get_extra_specs_from_share(share)
        data_isolated = extra_specs.get("cephfs:data_isolated", False)

        self.volume_client.delete_volume(self._share_path(share),
                                         data_isolated=data_isolated)
        self.volume_client.purge_volume(self._share_path(share),
                                        data_isolated=data_isolated)
コード例 #10
0
ファイル: test_share_types.py プロジェクト: openstack/manila
    def test_get_extra_specs_from_share(self):
        expected = self.fake_extra_specs
        self.mock_object(share_types, 'get_share_type_extra_specs',
                         mock.Mock(return_value=expected))

        spec_value = share_types.get_extra_specs_from_share(self.fake_share)

        self.assertEqual(expected, spec_value)
        share_types.get_share_type_extra_specs.assert_called_once_with(
            self.fake_share_type_id)
コード例 #11
0
ファイル: driver.py プロジェクト: ISCAS-VDI/manila-base
    def _get_dataset_creation_options(self, share, is_readonly=False):
        """Returns list of options to be used for dataset creation."""
        options = ['quota=%sG' % share['size']]
        extra_specs = share_types.get_extra_specs_from_share(share)

        dedupe_set = False
        dedupe = extra_specs.get('dedupe')
        if dedupe:
            dedupe = strutils.bool_from_string(
                dedupe.lower().split(' ')[-1], default=dedupe)
            if (dedupe in self.common_capabilities['dedupe']):
                options.append('dedup=%s' % ('on' if dedupe else 'off'))
                dedupe_set = True
            else:
                raise exception.ZFSonLinuxException(msg=_(
                    "Cannot use requested '%(requested)s' value of 'dedupe' "
                    "extra spec. It does not fit allowed value '%(allowed)s' "
                    "that is configured for backend.") % {
                        'requested': dedupe,
                        'allowed': self.common_capabilities['dedupe']})

        compression_set = False
        compression_type = extra_specs.get('zfsonlinux:compression')
        if compression_type:
            if (compression_type == 'off' and
                    False in self.common_capabilities['compression']):
                options.append('compression=off')
                compression_set = True
            elif (compression_type != 'off' and
                    True in self.common_capabilities['compression']):
                options.append('compression=%s' % compression_type)
                compression_set = True
            else:
                raise exception.ZFSonLinuxException(msg=_(
                    "Cannot use value '%s' of extra spec "
                    "'zfsonlinux:compression' because compression is disabled "
                    "for this backend. Set extra spec 'compression=True' to "
                    "make scheduler pick up appropriate backend."
                ) % compression_type)

        for option in self.dataset_creation_options or []:
            if any(v in option for v in (
                    'readonly', 'sharenfs', 'sharesmb', 'quota')):
                continue
            if 'dedup' in option and dedupe_set is True:
                continue
            if 'compression' in option and compression_set is True:
                continue
            options.append(option)
        if is_readonly:
            options.append('readonly=on')
        else:
            options.append('readonly=off')
        return options
コード例 #12
0
    def allow_access(self, context, share, access, share_server=None):
        """Allow access to the share."""

        extra_specs = None
        if 'NFS' == share['share_proto']:  # Avoiding DB call otherwise
            extra_specs = share_types.get_extra_specs_from_share(share)

        self._hpe3par.allow_access(share['project_id'], share['id'],
                                   share['share_proto'], extra_specs,
                                   access['access_type'], access['access_to'],
                                   access['access_level'], self.fpg, self.vfs)
コード例 #13
0
ファイル: gpfs.py プロジェクト: dinghb/manila
    def get_export_options(self, share, access, helper):
        """Get the export options."""
        extra_specs = share_types.get_extra_specs_from_share(share)
        if helper == 'KNFS':
            export_options = extra_specs.get('knfs:export_options')
        elif helper == 'CES':
            export_options = extra_specs.get('ces:export_options')
        else:
            export_options = None

        options = self._get_validated_opt_list(export_options)
        options.append(self.get_access_option(access))
        return ','.join(options)
コード例 #14
0
ファイル: hp_3par_driver.py プロジェクト: alinbalutoiu/manila
    def create_share(self, context, share, share_server=None):
        """Is called to create share."""

        ip = self.share_ip_address

        protocol = share["share_proto"]
        extra_specs = share_types.get_extra_specs_from_share(share)

        path = self._hp3par.create_share(
            share["project_id"], share["id"], protocol, extra_specs, self.fpg, self.vfs, size=share["size"]
        )

        return self._build_export_location(protocol, ip, path)
コード例 #15
0
ファイル: lib_base.py プロジェクト: jkasarherou/manila
    def _manage_container(self, share, vserver_client):
        """Bring existing volume under management as a share."""

        protocol_helper = self._get_helper(share)
        protocol_helper.set_client(vserver_client)

        volume_name = protocol_helper.get_share_name_for_share(share)
        if not volume_name:
            msg = _("Volume could not be determined from export location " "%(export)s.")
            msg_args = {"export": share["export_location"]}
            raise exception.ManageInvalidShare(reason=msg % msg_args)

        share_name = self._get_valid_share_name(share["id"])
        aggregate_name = share_utils.extract_host(share["host"], level="pool")

        # Get existing volume info
        volume = vserver_client.get_volume_to_manage(aggregate_name, volume_name)
        if not volume:
            msg = _("Volume %(volume)s not found on aggregate %(aggr)s.")
            msg_args = {"volume": volume_name, "aggr": aggregate_name}
            raise exception.ManageInvalidShare(reason=msg % msg_args)

        # Ensure volume is manageable
        self._validate_volume_for_manage(volume, vserver_client)

        # Validate extra specs
        extra_specs = share_types.get_extra_specs_from_share(share)
        try:
            self._check_extra_specs_validity(share, extra_specs)
            self._check_aggregate_extra_specs_validity(aggregate_name, extra_specs)
        except exception.ManilaException as ex:
            raise exception.ManageExistingShareTypeMismatch(reason=six.text_type(ex))
        provisioning_options = self._get_provisioning_options(extra_specs)

        debug_args = {"share": share_name, "aggr": aggregate_name, "options": provisioning_options}
        LOG.debug("Managing share %(share)s on aggregate %(aggr)s with " "provisioning options %(options)s", debug_args)

        # Rename & remount volume on new path
        vserver_client.unmount_volume(volume_name)
        vserver_client.set_volume_name(volume_name, share_name)
        vserver_client.mount_volume(share_name)

        # Modify volume to match extra specs
        vserver_client.manage_volume(aggregate_name, share_name, **provisioning_options)

        # Save original volume info to private storage
        original_data = {"original_name": volume["name"], "original_junction_path": volume["junction-path"]}
        self.private_storage.update(share["id"], original_data)

        # When calculating the size, round up to the next GB.
        return int(math.ceil(float(volume["size"]) / units.Gi))
コード例 #16
0
ファイル: hpe_3par_driver.py プロジェクト: sapcc/manila
    def allow_access(self, context, share, access, share_server=None):
        """Allow access to the share."""

        extra_specs = None
        if 'NFS' == share['share_proto']:  # Avoiding DB call otherwise
            extra_specs = share_types.get_extra_specs_from_share(share)

        self._hpe3par.allow_access(share['project_id'],
                                   share['id'],
                                   share['share_proto'],
                                   extra_specs,
                                   access['access_type'],
                                   access['access_to'],
                                   access['access_level'],
                                   self.fpg,
                                   self.vfs)
コード例 #17
0
ファイル: hpe_3par_driver.py プロジェクト: vponomaryov/manila
    def update_access(self, context, share, access_rules, add_rules,
                      delete_rules, share_server=None):
        """Update access to the share."""
        extra_specs = None
        if 'NFS' == share['share_proto']:  # Avoiding DB call otherwise
            extra_specs = share_types.get_extra_specs_from_share(share)

        fpg, vfs, ips = self._get_pool_location(share, share_server)
        self._hpe3par.update_access(share['project_id'],
                                    share['id'],
                                    share['share_proto'],
                                    extra_specs,
                                    access_rules,
                                    add_rules,
                                    delete_rules,
                                    fpg,
                                    vfs)
コード例 #18
0
ファイル: hpe_3par_driver.py プロジェクト: dinghb/manila
    def update_access(self,
                      context,
                      share,
                      access_rules,
                      add_rules,
                      delete_rules,
                      share_server=None):
        """Update access to the share."""
        extra_specs = None
        if 'NFS' == share['share_proto']:  # Avoiding DB call otherwise
            extra_specs = share_types.get_extra_specs_from_share(share)

        fpg, vfs, ips = self._get_pool_location(share, share_server)
        self._hpe3par.update_access(share['project_id'], share['id'],
                                    share['share_proto'], extra_specs,
                                    access_rules, add_rules, delete_rules, fpg,
                                    vfs)
コード例 #19
0
ファイル: cephfs_native.py プロジェクト: dinghb/manila
    def create_share(self, context, share, share_server=None):
        """Create a CephFS volume.

        :param context: A RequestContext.
        :param share: A Share.
        :param share_server: Always None for CephFS native.
        :return: The export locations dictionary.
        """

        # `share` is a Share
        msg = _("create_share {be} name={id} size={size}"
                " share_group_id={group}")
        LOG.debug(
            msg.format(be=self.backend_name,
                       id=share['id'],
                       size=share['size'],
                       group=share['share_group_id']))

        extra_specs = share_types.get_extra_specs_from_share(share)
        data_isolated = extra_specs.get("cephfs:data_isolated", False)

        size = self._to_bytes(share['size'])

        # Create the CephFS volume
        volume = self.volume_client.create_volume(self._share_path(share),
                                                  size=size,
                                                  data_isolated=data_isolated)

        # To mount this you need to know the mon IPs and the path to the volume
        mon_addrs = self.volume_client.get_mon_addrs()

        export_location = "{addrs}:{path}".format(addrs=",".join(mon_addrs),
                                                  path=volume['mount_path'])

        LOG.info(_LI("Calculated export location for share %(id)s: %(loc)s"), {
            "id": share['id'],
            "loc": export_location
        })

        return {
            'path': export_location,
            'is_admin_only': False,
            'metadata': {},
        }
コード例 #20
0
    def create_share(self, context, share, share_server=None):
        """Is called to create share."""

        ip = self._get_share_ip(share_server)

        protocol = share['share_proto']
        extra_specs = share_types.get_extra_specs_from_share(share)

        path = self._hpe3par.create_share(
            share['project_id'],
            share['id'],
            protocol,
            extra_specs,
            self.fpg,
            self.vfs,
            size=share['size'],
            comment=self.build_share_comment(share))

        return self._build_export_location(protocol, ip, path)
コード例 #21
0
ファイル: hpe_3par_driver.py プロジェクト: sapcc/manila
    def create_share(self, context, share, share_server=None):
        """Is called to create share."""

        ip = self._get_share_ip(share_server)

        protocol = share['share_proto']
        extra_specs = share_types.get_extra_specs_from_share(share)

        path = self._hpe3par.create_share(
            share['project_id'],
            share['id'],
            protocol,
            extra_specs,
            self.fpg, self.vfs,
            size=share['size'],
            comment=self.build_share_comment(share)
        )

        return self._hpe3par.build_export_location(protocol, ip, path)
コード例 #22
0
ファイル: lib_base.py プロジェクト: tomegathericon/manila
    def _allocate_container(self, share, vserver_client):
        """Create new share on aggregate."""
        share_name = self._get_valid_share_name(share['id'])

        # Get Data ONTAP aggregate name as pool name.
        pool_name = share_utils.extract_host(share['host'], level='pool')
        if pool_name is None:
            msg = _("Pool is not available in the share host field.")
            raise exception.InvalidHost(reason=msg)

        extra_specs = share_types.get_extra_specs_from_share(share)
        self._check_extra_specs_validity(share, extra_specs)
        provisioning_options = self._get_provisioning_options(extra_specs)

        LOG.debug('Creating share %(share)s on pool %(pool)s with '
                  'provisioning options %(options)s',
                  {'share': share_name, 'pool': pool_name,
                   'options': provisioning_options})
        vserver_client.create_volume(pool_name, share_name,
                                     share['size'],
                                     **provisioning_options)
コード例 #23
0
ファイル: hp_3par_driver.py プロジェクト: sajuptpm/manila
    def create_share_from_snapshot(self, context, share, snapshot,
                                   share_server=None):
        """Is called to create share from snapshot."""

        ip = self.share_ip_address

        protocol = share['share_proto']
        extra_specs = share_types.get_extra_specs_from_share(share)

        path = self._hp3par.create_share_from_snapshot(
            share['id'],
            protocol,
            extra_specs,
            snapshot['share']['project_id'],
            snapshot['share']['id'],
            snapshot['share']['share_proto'],
            snapshot['id'],
            self.fpg,
            self.vfs
        )

        return self._build_export_location(protocol, ip, path)
コード例 #24
0
ファイル: gpfs.py プロジェクト: vkmc/manila
    def get_export_options(self, share, access, helper, options_not_allowed):
        """Get the export options."""
        extra_specs = share_types.get_extra_specs_from_share(share)
        if helper == 'KNFS':
            export_options = extra_specs.get('knfs:export_options')
        elif helper == 'CES':
            export_options = extra_specs.get('ces:export_options')
        else:
            export_options = None

        if export_options:
            options = export_options.lower().split(',')
        else:
            options = []

        invalid_options = [
            option for option in options if option in options_not_allowed
        ]

        if invalid_options:
            raise exception.InvalidInput(reason='Invalid export_option %s as '
                                         'it is set by access_type.' %
                                         invalid_options)

        if access['access_level'] == constants.ACCESS_LEVEL_RO:
            if helper == 'KNFS':
                options.append(constants.ACCESS_LEVEL_RO)
            elif helper == 'CES':
                options.append('access_type=ro')
        else:
            if helper == 'KNFS':
                options.append(constants.ACCESS_LEVEL_RW)
            elif helper == 'CES':
                options.append('access_type=rw')

        return ','.join(options)
コード例 #25
0
ファイル: hpe_3par_driver.py プロジェクト: vponomaryov/manila
    def create_share_from_snapshot(self, context, share, snapshot,
                                   share_server=None):
        """Is called to create share from snapshot."""

        fpg, vfs, ips = self._get_pool_location(share, share_server)

        protocol = share['share_proto']
        extra_specs = share_types.get_extra_specs_from_share(share)

        path = self._hpe3par.create_share_from_snapshot(
            share['id'],
            protocol,
            extra_specs,
            share['project_id'],
            snapshot['share_id'],
            snapshot['id'],
            fpg,
            vfs,
            ips,
            size=share['size'],
            comment=self.build_share_comment(share)
        )

        return self._hpe3par.build_export_locations(protocol, ips, path)
コード例 #26
0
ファイル: gpfs.py プロジェクト: NetApp/manila
    def get_export_options(self, share, access, helper, options_not_allowed):
        """Get the export options."""
        extra_specs = share_types.get_extra_specs_from_share(share)
        if helper == 'KNFS':
            export_options = extra_specs.get('knfs:export_options')
        elif helper == 'CES':
            export_options = extra_specs.get('ces:export_options')
        else:
            export_options = None

        if export_options:
            options = export_options.lower().split(',')
        else:
            options = []

        invalid_options = [
            option for option in options if option in options_not_allowed
        ]

        if invalid_options:
            raise exception.InvalidInput(reason='Invalid export_option %s as '
                                                'it is set by access_type.'
                                                % invalid_options)

        if access['access_level'] == constants.ACCESS_LEVEL_RO:
            if helper == 'KNFS':
                options.append(constants.ACCESS_LEVEL_RO)
            elif helper == 'CES':
                options.append('access_type=ro')
        else:
            if helper == 'KNFS':
                options.append(constants.ACCESS_LEVEL_RW)
            elif helper == 'CES':
                options.append('access_type=rw')

        return ','.join(options)
コード例 #27
0
    def _manage_container(self, share, vserver_client):
        """Bring existing volume under management as a share."""

        protocol_helper = self._get_helper(share)
        protocol_helper.set_client(vserver_client)

        volume_name = protocol_helper.get_share_name_for_share(share)
        if not volume_name:
            msg = _('Volume could not be determined from export location '
                    '%(export)s.')
            msg_args = {'export': share['export_location']}
            raise exception.ManageInvalidShare(reason=msg % msg_args)

        share_name = self._get_valid_share_name(share['id'])
        aggregate_name = share_utils.extract_host(share['host'], level='pool')

        # Get existing volume info
        volume = vserver_client.get_volume_to_manage(aggregate_name,
                                                     volume_name)
        if not volume:
            msg = _('Volume %(volume)s not found on aggregate %(aggr)s.')
            msg_args = {'volume': volume_name, 'aggr': aggregate_name}
            raise exception.ManageInvalidShare(reason=msg % msg_args)

        # Ensure volume is manageable
        self._validate_volume_for_manage(volume, vserver_client)

        # Validate extra specs
        extra_specs = share_types.get_extra_specs_from_share(share)
        try:
            self._check_extra_specs_validity(share, extra_specs)
            self._check_aggregate_extra_specs_validity(aggregate_name,
                                                       extra_specs)
        except exception.ManilaException as ex:
            raise exception.ManageExistingShareTypeMismatch(
                reason=six.text_type(ex))
        provisioning_options = self._get_provisioning_options(extra_specs)

        debug_args = {
            'share': share_name,
            'aggr': aggregate_name,
            'options': provisioning_options
        }
        LOG.debug('Managing share %(share)s on aggregate %(aggr)s with '
                  'provisioning options %(options)s', debug_args)

        # Rename & remount volume on new path
        vserver_client.unmount_volume(volume_name)
        vserver_client.set_volume_name(volume_name, share_name)
        vserver_client.mount_volume(share_name)

        # Modify volume to match extra specs
        vserver_client.manage_volume(aggregate_name, share_name,
                                     **provisioning_options)

        # Save original volume info to private storage
        original_data = {
            'original_name': volume['name'],
            'original_junction_path': volume['junction-path']
        }
        self.private_storage.update(share['id'], original_data)

        # When calculating the size, round up to the next GB.
        return int(math.ceil(float(volume['size']) / units.Gi))
コード例 #28
0
ファイル: gpfs.py プロジェクト: dinghb/manila
 def _get_share_opts(self, share):
     """Get a list of NFS options from the share's share type."""
     extra_specs = share_types.get_extra_specs_from_share(share)
     opts_list = self._get_validated_opt_list(
         extra_specs.get('ces:export_options'))
     return opts_list
コード例 #29
0
    def manage_existing(self, share, driver_options):
        """Manages a share that exists on backend."""
        if share['share_proto'].lower() == 'nfs':
            # 10.0.0.1:/share/example
            LOG.info(
                "Share %(shr_path)s will be managed with ID "
                "%(shr_id)s.", {
                    'shr_path': share['export_locations'][0]['path'],
                    'shr_id': share['id']
                })

            old_path_info = share['export_locations'][0]['path'].split(
                ':/share/')

            if len(old_path_info) == 2:
                ip = old_path_info[0]
                share_name = old_path_info[1]
            else:
                msg = _("Incorrect path. It should have the following format: "
                        "IP:/share/share_name.")
                raise exception.ShareBackendException(msg=msg)
        else:
            msg = _('Invalid NAS protocol: %s') % share['share_proto']
            raise exception.InvalidInput(reason=msg)

        if ip != self.configuration.qnap_share_ip:
            msg = _("The NAS IP %(ip)s is not configured.") % {'ip': ip}
            raise exception.ShareBackendException(msg=msg)

        existing_share = self.api_executor.get_share_info(
            self.configuration.qnap_poolname, vol_label=share_name)
        if existing_share is None:
            msg = _("The share %s trying to be managed was not found on "
                    "backend.") % share['id']
            raise exception.ManageInvalidShare(reason=msg)

        extra_specs = share_types.get_extra_specs_from_share(share)
        qnap_thin_provision = share_types.parse_boolean_extra_spec(
            'thin_provisioning',
            extra_specs.get("thin_provisioning")
            or extra_specs.get('capabilities:thin_provisioning') or 'true')
        qnap_compression = share_types.parse_boolean_extra_spec(
            'compression',
            extra_specs.get("compression")
            or extra_specs.get('capabilities:compression') or 'true')
        qnap_deduplication = share_types.parse_boolean_extra_spec(
            'dedupe',
            extra_specs.get("dedupe") or extra_specs.get('capabilities:dedupe')
            or 'false')
        qnap_ssd_cache = share_types.parse_boolean_extra_spec(
            'qnap_ssd_cache',
            extra_specs.get("qnap_ssd_cache")
            or extra_specs.get("capabilities:qnap_ssd_cache") or 'false')
        LOG.debug(
            'qnap_thin_provision: %(qnap_thin_provision)s '
            'qnap_compression: %(qnap_compression)s '
            'qnap_deduplication: %(qnap_deduplication)s '
            'qnap_ssd_cache: %(qnap_ssd_cache)s', {
                'qnap_thin_provision': qnap_thin_provision,
                'qnap_compression': qnap_compression,
                'qnap_deduplication': qnap_deduplication,
                'qnap_ssd_cache': qnap_ssd_cache
            })
        if (qnap_deduplication and not qnap_thin_provision):
            msg = _("Dedupe cannot be enabled without thin_provisioning.")
            LOG.debug('Dedupe cannot be enabled without thin_provisioning.')
            raise exception.InvalidExtraSpec(reason=msg)

        vol_no = existing_share.find('vol_no').text
        vol = self.api_executor.get_specific_volinfo(vol_no)
        vol_size_gb = math.ceil(float(vol.find('size').text) / units.Gi)

        share_dict = {
            'sharename': share_name,
            'old_sharename': share_name,
            'thin_provision': qnap_thin_provision,
            'compression': qnap_compression,
            'deduplication': qnap_deduplication,
            'ssd_cache': qnap_ssd_cache,
            'share_proto': share['share_proto']
        }
        self.api_executor.edit_share(share_dict)

        _metadata = {}
        _metadata['volID'] = vol_no
        _metadata['volName'] = share_name
        _metadata['thin_provision'] = qnap_thin_provision
        _metadata['compression'] = qnap_compression
        _metadata['deduplication'] = qnap_deduplication
        _metadata['ssd_cache'] = qnap_ssd_cache
        self.private_storage.update(share['id'], _metadata)

        LOG.info(
            "Share %(shr_path)s was successfully managed with ID "
            "%(shr_id)s.", {
                'shr_path': share['export_locations'][0]['path'],
                'shr_id': share['id']
            })

        export_locations = self._get_location_path(
            share_name, share['share_proto'], self.configuration.qnap_share_ip,
            vol_no)

        return {'size': vol_size_gb, 'export_locations': export_locations}
コード例 #30
0
    def create_share(self, context, share, share_server=None):
        """Create a new share."""
        LOG.debug('share: %s', share.__dict__)
        extra_specs = share_types.get_extra_specs_from_share(share)
        LOG.debug('extra_specs: %s', extra_specs)
        qnap_thin_provision = share_types.parse_boolean_extra_spec(
            'thin_provisioning',
            extra_specs.get("thin_provisioning")
            or extra_specs.get('capabilities:thin_provisioning') or 'true')
        qnap_compression = share_types.parse_boolean_extra_spec(
            'compression',
            extra_specs.get("compression")
            or extra_specs.get('capabilities:compression') or 'true')
        qnap_deduplication = share_types.parse_boolean_extra_spec(
            'dedupe',
            extra_specs.get("dedupe") or extra_specs.get('capabilities:dedupe')
            or 'false')
        qnap_ssd_cache = share_types.parse_boolean_extra_spec(
            'qnap_ssd_cache',
            extra_specs.get("qnap_ssd_cache")
            or extra_specs.get("capabilities:qnap_ssd_cache") or 'false')
        LOG.debug(
            'qnap_thin_provision: %(qnap_thin_provision)s '
            'qnap_compression: %(qnap_compression)s '
            'qnap_deduplication: %(qnap_deduplication)s '
            'qnap_ssd_cache: %(qnap_ssd_cache)s', {
                'qnap_thin_provision': qnap_thin_provision,
                'qnap_compression': qnap_compression,
                'qnap_deduplication': qnap_deduplication,
                'qnap_ssd_cache': qnap_ssd_cache
            })

        share_proto = share['share_proto']

        # User could create two shares with the same name on horizon.
        # Therefore, we should not use displayname to create shares on NAS.
        create_share_name = self._gen_random_name("share")
        # If share name exists, need to change to another name.
        created_share = self.api_executor.get_share_info(
            self.configuration.qnap_poolname, vol_label=create_share_name)
        LOG.debug('created_share: %s', created_share)
        if created_share is not None:
            msg = (_("The share name %s is used by other share on NAS.") %
                   create_share_name)
            LOG.error(msg)
            raise exception.ShareBackendException(msg=msg)

        if (qnap_deduplication and not qnap_thin_provision):
            msg = _("Dedupe cannot be enabled without thin_provisioning.")
            LOG.debug('Dedupe cannot be enabled without thin_provisioning.')
            raise exception.InvalidExtraSpec(reason=msg)
        self.api_executor.create_share(share,
                                       self.configuration.qnap_poolname,
                                       create_share_name,
                                       share_proto,
                                       qnap_thin_provision=qnap_thin_provision,
                                       qnap_compression=qnap_compression,
                                       qnap_deduplication=qnap_deduplication,
                                       qnap_ssd_cache=qnap_ssd_cache)
        created_share = self._get_share_info(create_share_name)
        volID = created_share.find('vol_no').text
        # Use private_storage to record volume ID and Name created in the NAS.
        LOG.debug('volID: %(volID)s '
                  'volName: %(create_share_name)s', {
                      'volID': volID,
                      'create_share_name': create_share_name
                  })
        _metadata = {
            'volID': volID,
            'volName': create_share_name,
            'thin_provision': qnap_thin_provision,
            'compression': qnap_compression,
            'deduplication': qnap_deduplication,
            'ssd_cache': qnap_ssd_cache
        }
        self.private_storage.update(share['id'], _metadata)

        return self._get_location_path(create_share_name, share['share_proto'],
                                       self.configuration.qnap_share_ip, volID)
コード例 #31
0
ファイル: qnap.py プロジェクト: openstack/manila
    def create_share(self, context, share, share_server=None):
        """Create a new share."""
        LOG.debug('share: %s', share.__dict__)
        extra_specs = share_types.get_extra_specs_from_share(share)
        LOG.debug('extra_specs: %s', extra_specs)
        qnap_thin_provision = share_types.parse_boolean_extra_spec(
            'thin_provisioning', extra_specs.get("thin_provisioning") or
            extra_specs.get('capabilities:thin_provisioning') or 'true')
        qnap_compression = share_types.parse_boolean_extra_spec(
            'compression', extra_specs.get("compression") or
            extra_specs.get('capabilities:compression') or 'true')
        qnap_deduplication = share_types.parse_boolean_extra_spec(
            'dedupe', extra_specs.get("dedupe") or
            extra_specs.get('capabilities:dedupe') or 'false')
        qnap_ssd_cache = share_types.parse_boolean_extra_spec(
            'qnap_ssd_cache', extra_specs.get("qnap_ssd_cache") or
            extra_specs.get("capabilities:qnap_ssd_cache") or 'false')
        LOG.debug('qnap_thin_provision: %(qnap_thin_provision)s '
                  'qnap_compression: %(qnap_compression)s '
                  'qnap_deduplication: %(qnap_deduplication)s '
                  'qnap_ssd_cache: %(qnap_ssd_cache)s',
                  {'qnap_thin_provision': qnap_thin_provision,
                   'qnap_compression': qnap_compression,
                   'qnap_deduplication': qnap_deduplication,
                   'qnap_ssd_cache': qnap_ssd_cache})

        share_proto = share['share_proto']

        # User could create two shares with the same name on horizon.
        # Therefore, we should not use displayname to create shares on NAS.
        create_share_name = self._gen_random_name("share")
        # If share name exists, need to change to another name.
        created_share = self.api_executor.get_share_info(
            self.configuration.qnap_poolname,
            vol_label=create_share_name)
        LOG.debug('created_share: %s', created_share)
        if created_share is not None:
            msg = (_("The share name %s is used by other share on NAS.") %
                   create_share_name)
            LOG.error(msg)
            raise exception.ShareBackendException(msg=msg)

        if (qnap_deduplication and not qnap_thin_provision):
            msg = _("Dedupe cannot be enabled without thin_provisioning.")
            LOG.debug('Dedupe cannot be enabled without thin_provisioning.')
            raise exception.InvalidExtraSpec(reason=msg)
        self.api_executor.create_share(
            share,
            self.configuration.qnap_poolname,
            create_share_name,
            share_proto,
            qnap_thin_provision=qnap_thin_provision,
            qnap_compression=qnap_compression,
            qnap_deduplication=qnap_deduplication,
            qnap_ssd_cache=qnap_ssd_cache)
        created_share = self._get_share_info(create_share_name)
        volID = created_share.find('vol_no').text
        # Use private_storage to record volume ID and Name created in the NAS.
        LOG.debug('volID: %(volID)s '
                  'volName: %(create_share_name)s',
                  {'volID': volID,
                   'create_share_name': create_share_name})
        _metadata = {'volID': volID,
                     'volName': create_share_name,
                     'thin_provision': qnap_thin_provision,
                     'compression': qnap_compression,
                     'deduplication': qnap_deduplication,
                     'ssd_cache': qnap_ssd_cache}
        self.private_storage.update(share['id'], _metadata)

        return self._get_location_path(create_share_name,
                                       share['share_proto'],
                                       self.configuration.qnap_share_ip,
                                       volID)
コード例 #32
0
ファイル: qnap.py プロジェクト: openstack/manila
    def manage_existing(self, share, driver_options):
        """Manages a share that exists on backend."""
        if share['share_proto'].lower() == 'nfs':
            # 10.0.0.1:/share/example
            LOG.info("Share %(shr_path)s will be managed with ID "
                     "%(shr_id)s.",
                     {'shr_path': share['export_locations'][0]['path'],
                      'shr_id': share['id']})

            old_path_info = share['export_locations'][0]['path'].split(
                ':/share/')

            if len(old_path_info) == 2:
                ip = old_path_info[0]
                share_name = old_path_info[1]
            else:
                msg = _("Incorrect path. It should have the following format: "
                        "IP:/share/share_name.")
                raise exception.ShareBackendException(msg=msg)
        else:
            msg = _('Invalid NAS protocol: %s') % share['share_proto']
            raise exception.InvalidInput(reason=msg)

        if ip != self.configuration.qnap_share_ip:
            msg = _("The NAS IP %(ip)s is not configured.") % {'ip': ip}
            raise exception.ShareBackendException(msg=msg)

        existing_share = self.api_executor.get_share_info(
            self.configuration.qnap_poolname,
            vol_label=share_name)
        if existing_share is None:
            msg = _("The share %s trying to be managed was not found on "
                    "backend.") % share['id']
            raise exception.ManageInvalidShare(reason=msg)

        extra_specs = share_types.get_extra_specs_from_share(share)
        qnap_thin_provision = share_types.parse_boolean_extra_spec(
            'thin_provisioning', extra_specs.get("thin_provisioning") or
            extra_specs.get('capabilities:thin_provisioning') or 'true')
        qnap_compression = share_types.parse_boolean_extra_spec(
            'compression', extra_specs.get("compression") or
            extra_specs.get('capabilities:compression') or 'true')
        qnap_deduplication = share_types.parse_boolean_extra_spec(
            'dedupe', extra_specs.get("dedupe") or
            extra_specs.get('capabilities:dedupe') or 'false')
        qnap_ssd_cache = share_types.parse_boolean_extra_spec(
            'qnap_ssd_cache', extra_specs.get("qnap_ssd_cache") or
            extra_specs.get("capabilities:qnap_ssd_cache") or 'false')
        LOG.debug('qnap_thin_provision: %(qnap_thin_provision)s '
                  'qnap_compression: %(qnap_compression)s '
                  'qnap_deduplication: %(qnap_deduplication)s '
                  'qnap_ssd_cache: %(qnap_ssd_cache)s',
                  {'qnap_thin_provision': qnap_thin_provision,
                   'qnap_compression': qnap_compression,
                   'qnap_deduplication': qnap_deduplication,
                   'qnap_ssd_cache': qnap_ssd_cache})
        if (qnap_deduplication and not qnap_thin_provision):
            msg = _("Dedupe cannot be enabled without thin_provisioning.")
            LOG.debug('Dedupe cannot be enabled without thin_provisioning.')
            raise exception.InvalidExtraSpec(reason=msg)

        vol_no = existing_share.find('vol_no').text
        vol = self.api_executor.get_specific_volinfo(vol_no)
        vol_size_gb = math.ceil(float(vol.find('size').text) / units.Gi)

        share_dict = {
            'sharename': share_name,
            'old_sharename': share_name,
            'thin_provision': qnap_thin_provision,
            'compression': qnap_compression,
            'deduplication': qnap_deduplication,
            'ssd_cache': qnap_ssd_cache,
            'share_proto': share['share_proto']
        }
        self.api_executor.edit_share(share_dict)

        _metadata = {}
        _metadata['volID'] = vol_no
        _metadata['volName'] = share_name
        _metadata['thin_provision'] = qnap_thin_provision
        _metadata['compression'] = qnap_compression
        _metadata['deduplication'] = qnap_deduplication
        _metadata['ssd_cache'] = qnap_ssd_cache
        self.private_storage.update(share['id'], _metadata)

        LOG.info("Share %(shr_path)s was successfully managed with ID "
                 "%(shr_id)s.",
                 {'shr_path': share['export_locations'][0]['path'],
                  'shr_id': share['id']})

        export_locations = self._get_location_path(
            share_name,
            share['share_proto'],
            self.configuration.qnap_share_ip,
            vol_no)

        return {'size': vol_size_gb, 'export_locations': export_locations}