Esempio n. 1
0
    def NodeStageVolume(self, request, context):
        Utils.validate_params_exists(
            request, ['volume_id', 'staging_target_path', 'volume_capability'])

        volume_id = request.volume_id
        staging_target_path = request.staging_target_path
        volume_capability = request.volume_capability
        secrets = request.secrets
        publish_context = request.publish_context
        volume_context = request.volume_context

        reqJson = MessageToJson(request)
        self.logger.debug(
            'NodeStageVolume called with request: {}'.format(reqJson))

        access_mode = volume_capability.access_mode.mode
        access_type = self._get_block_or_mount_volume(request)

        nvmesh_volume_name = volume_id
        block_device_path = Utils.get_nvmesh_block_device_path(
            nvmesh_volume_name)

        # run nvmesh attach locally
        requested_nvmesh_access_mode = Consts.AccessMode.to_nvmesh(access_mode)
        Utils.nvmesh_attach_volume(nvmesh_volume_name,
                                   requested_nvmesh_access_mode)
        Utils.wait_for_volume_io_enabled(nvmesh_volume_name)

        if access_type == Consts.VolumeAccessType.MOUNT:
            mount_request = volume_capability.mount
            self.logger.info(
                'Requested Mounted FileSystem Volume with fs_type={}'.format(
                    mount_request.fs_type))
            fs_type = mount_request.fs_type or 'ext4'
            mount_flags = []

            if mount_request.mount_flags:
                for flag in mount_request.mount_flags.split(' '):
                    mount_flags.append(flag)

            FileSystemManager.format_block_device(block_device_path, fs_type)

            if FileSystemManager.is_mounted(staging_target_path):
                self.logger.warning(
                    'path {} is already mounted'.format(staging_target_path))

            FileSystemManager.mount(source=block_device_path,
                                    target=staging_target_path)

        elif access_type == Consts.VolumeAccessType.BLOCK:
            self.logger.info('Requested Block Volume')
            # We do not mount here, NodePublishVolume will mount directly from the block device to the publish_path
            # This is because Kubernetes automatically creates a directory in the staging_path

        else:
            self.logger.Info('Unknown AccessType {}'.format(access_type))

        return NodeStageVolumeResponse()
Esempio n. 2
0
    def NodePublishVolume(self, request, context):
        # NodePublishVolume: This method is called to mount the volume from staging to target path.
        Utils.validate_params_exists(request, ['volume_id', 'target_path'])

        volume_id = request.volume_id
        nvmesh_volume_name = volume_id
        staging_target_path = request.staging_target_path
        publish_path = request.target_path
        volume_capability = request.volume_capability
        access_mode = volume_capability.access_mode.mode
        readonly = request.readonly
        access_type = self._get_block_or_mount_volume(request)

        block_device_path = Utils.get_nvmesh_block_device_path(
            nvmesh_volume_name)

        reqJson = MessageToJson(request)
        self.logger.debug(
            'NodePublishVolume called with request: {}'.format(reqJson))

        if not Utils.is_nvmesh_volume_attached(nvmesh_volume_name):
            raise DriverError(
                StatusCode.NOT_FOUND,
                'nvmesh volume {} was not found under /dev/nvmesh/'.format(
                    nvmesh_volume_name))

        flags = []

        # K8s Bug Workaround: readonly flag is not sent to CSI, so we try to also infer from the AccessMode
        if readonly or access_mode == Consts.AccessMode.MULTI_NODE_READER_ONLY:
            flags.append('-o ro')

        if access_type == Consts.VolumeAccessType.BLOCK:
            # create an empty file for bind mount
            with open(publish_path, 'w'):
                pass

            # bind directly from block device to publish_path
            self.logger.debug(
                'NodePublishVolume trying to bind mount as block device {} to {}'
                .format(block_device_path, publish_path))
            FileSystemManager.bind_mount(source=block_device_path,
                                         target=publish_path,
                                         flags=flags)
        else:
            self.logger.debug(
                'NodePublishVolume trying to bind mount {} to {}'.format(
                    staging_target_path, publish_path))
            FileSystemManager.bind_mount(source=staging_target_path,
                                         target=publish_path,
                                         flags=flags)

        return NodePublishVolumeResponse()
Esempio n. 3
0
    def NodeExpandVolume(self, request, context):
        # if this function was called, assume the Controller checked that this volume is a FileSystem Mounted Volume.
        # So we will resize the File System here
        volume_id = request.volume_id
        volume_path = request.volume_path
        capacity_range = request.capacity_range

        reqJson = MessageToJson(request)
        self.logger.debug(
            'NodeExpandVolume called with request: {}'.format(reqJson))

        zone, nvmesh_vol_name = Utils.zone_and_vol_name_from_co_id(
            request.volume_id)
        block_device_path = Utils.get_nvmesh_block_device_path(nvmesh_vol_name)
        self.logger.debug(
            'NodeExpandVolume zone: {} nvmesh_vol_name: {} block_device_path: {}'
            .format(zone, nvmesh_vol_name, block_device_path))

        fs_type = FileSystemManager.get_fs_type(block_device_path)
        self.logger.debug('fs_type={}'.format(fs_type))

        attempts_left = 20
        resized = False
        while not resized and attempts_left:
            exit_code, stdout, stderr = FileSystemManager.expand_file_system(
                block_device_path, fs_type)
            if 'Nothing to do!' in stderr:
                block_device_size = FileSystemManager.get_block_device_size(
                    block_device_path)
                self.logger.warning(
                    'File System not resized. block device size is {}'.format(
                        block_device_size))
                attempts_left = attempts_left - 1
                Utils.interruptable_sleep(2)
            else:
                resized = True

        if not attempts_left:
            raise DriverError(
                StatusCode.INTERNAL,
                'Back-Off trying to expand {} FileSystem on volume {}'.format(
                    fs_type, block_device_path))

        self.logger.debug(
            'Finished Expanding File System of type {} on volume {}'.format(
                fs_type, block_device_path))
        return NodeExpandVolumeResponse()
Esempio n. 4
0
    def NodeStageVolume(self, request, context):
        Utils.validate_params_exists(
            request, ['volume_id', 'staging_target_path', 'volume_capability'])

        zone, nvmesh_volume_name = Utils.zone_and_vol_name_from_co_id(
            request.volume_id)

        staging_target_path = request.staging_target_path
        volume_capability = request.volume_capability
        secrets = request.secrets
        publish_context = request.publish_context
        volume_context = request.volume_context

        reqJson = MessageToJson(request)
        self.logger.debug(
            'NodeStageVolume called with request: {}'.format(reqJson))

        access_mode = volume_capability.access_mode.mode
        access_type = self._get_block_or_mount_volume(request)

        block_device_path = Utils.get_nvmesh_block_device_path(
            nvmesh_volume_name)

        if not Utils.is_nvmesh_volume_attached(nvmesh_volume_name):
            # run nvmesh attach locally
            requested_nvmesh_access_mode = Consts.AccessMode.to_nvmesh(
                access_mode)
            Utils.nvmesh_attach_volume(nvmesh_volume_name,
                                       requested_nvmesh_access_mode)

        try:
            Utils.wait_for_volume_io_enabled(nvmesh_volume_name)

            if access_type == Consts.VolumeAccessType.MOUNT:
                mount_request = volume_capability.mount
                self.logger.info(
                    'Requested Mounted FileSystem Volume with fs_type={}'.
                    format(mount_request.fs_type))
                fs_type = mount_request.fs_type or Consts.FSType.EXT4

                mount_permissions, mount_options = self._parse_mount_options(
                    mount_request)
                mkfs_options = volume_context.get('mkfsOptions', '')
                FileSystemManager.format_block_device(block_device_path,
                                                      fs_type, mkfs_options)

                if FileSystemManager.is_mounted(staging_target_path):
                    self.logger.warning('path {} is already mounted'.format(
                        staging_target_path))

                FileSystemManager.mount(source=block_device_path,
                                        target=staging_target_path,
                                        mount_options=mount_options)
                FileSystemManager.chmod(
                    mount_permissions or Consts.DEFAULT_MOUNT_PERMISSIONS,
                    staging_target_path)
            elif access_type == Consts.VolumeAccessType.BLOCK:
                self.logger.info('Requested Block Volume')
                # We do not mount here, NodePublishVolume will mount directly from the block device to the publish_path
                # This is because Kubernetes automatically creates a directory in the staging_path

            else:
                self.logger.info('Unknown AccessType {}'.format(access_type))
        except Exception as staging_err:
            # Cleanup - un-mount and detach the volume
            try:
                if FileSystemManager.is_mounted(staging_target_path):
                    FileSystemManager.umount(staging_target_path)

                Utils.nvmesh_detach_volume(nvmesh_volume_name)
            except Exception as cleanup_err:
                self.logger.warning(
                    'Failed to cleanup and detach device after attached and staging failed. Error: %s'
                    % cleanup_err)

            # Re-raise the initial exception
            raise staging_err

        self.logger.debug(
            'NodeStageVolume finished successfully for request: {}'.format(
                reqJson))
        return NodeStageVolumeResponse()
Esempio n. 5
0
    def NodePublishVolume(self, request, context):
        # NodePublishVolume: This method is called to mount the volume from staging to target path.
        Utils.validate_params_exists(request, ['volume_id', 'target_path'])

        zone, nvmesh_volume_name = Utils.zone_and_vol_name_from_co_id(
            request.volume_id)
        staging_target_path = request.staging_target_path
        publish_path = request.target_path
        volume_capability = request.volume_capability
        access_mode = volume_capability.access_mode.mode
        readonly = request.readonly
        access_type = self._get_block_or_mount_volume(request)
        volume_context = request.volume_context
        podInfo = self._extract_pod_info_from_volume_context(volume_context)

        # K8s Bug Workaround: readonly flag is not sent to CSI, so we try to also infer from the AccessMode
        is_readonly = readonly or access_mode == Consts.AccessMode.MULTI_NODE_READER_ONLY

        block_device_path = Utils.get_nvmesh_block_device_path(
            nvmesh_volume_name)

        reqJson = MessageToJson(request)
        self.logger.debug(
            'NodePublishVolume called with request: {}'.format(reqJson))
        self.logger.debug('NodePublishVolume podInfo: {}'.format(podInfo))

        if not Utils.is_nvmesh_volume_attached(nvmesh_volume_name):
            raise DriverError(
                StatusCode.NOT_FOUND,
                'nvmesh volume {} was not found under /dev/nvmesh/'.format(
                    nvmesh_volume_name))

        requested_mount_permissions, mount_options = self._parse_mount_options(
            volume_capability.mount)

        if is_readonly:
            mount_options.append('ro')

        if access_type == Consts.VolumeAccessType.BLOCK:
            # create an empty file for bind mount of a block device
            with open(publish_path, 'w'):
                pass

            # bind directly from block device to publish_path
            self.logger.debug(
                'NodePublishVolume trying to bind mount as block device {} to {}'
                .format(block_device_path, publish_path))
            FileSystemManager.bind_mount(source=block_device_path,
                                         target=publish_path,
                                         mount_options=mount_options)
        else:
            self.logger.debug(
                'NodePublishVolume creating directory for bind mount at {}'.
                format(publish_path))
            # create an empty dir for bind mount of a file system
            if not os.path.isdir(publish_path):
                os.makedirs(publish_path)

            self.logger.debug(
                'NodePublishVolume trying to bind mount {} to {}'.format(
                    staging_target_path, publish_path))
            FileSystemManager.bind_mount(source=staging_target_path,
                                         target=publish_path,
                                         mount_options=mount_options)

        if not is_readonly:
            FileSystemManager.chmod(
                requested_mount_permissions
                or Consts.DEFAULT_MOUNT_PERMISSIONS, publish_path)

        self.logger.debug(
            'NodePublishVolume finished successfully for request: {}'.format(
                reqJson))
        return NodePublishVolumeResponse()