示例#1
0
    def NodePublishVolume(self, request, context):
        start_time = time.time()
        hostvol = request.volume_context.get("hostvol", "")
        mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol)
        pvpath = request.volume_context.get("path", "")
        pvtype = request.volume_context.get("pvtype", "")
        pvpath_full = os.path.join(mntdir, pvpath)

        logging.debug(logf(
            "Received the mount request",
            volume=request.volume_id,
            hostvol=hostvol,
            pvpath=pvpath,
            pvtype=pvtype
        ))

        mount_glusterfs(hostvol, mntdir)
        logging.debug(logf(
            "Mounted Hosting Volume",
            pv=request.volume_id,
            hostvol=hostvol,
            mntdir=mntdir,
        ))
        # Mount the PV
        # TODO: Handle Volume capability mount flags
        mount_volume(pvpath_full, request.target_path, pvtype, fstype=None)
        logging.info(logf(
            "Mounted PV",
            volume=request.volume_id,
            pvpath=pvpath,
            pvtype=pvtype,
            hostvol=hostvol,
            duration_seconds=time.time() - start_time
        ))
        return csi_pb2.NodePublishVolumeResponse()
示例#2
0
文件: main.py 项目: vatsa287/kadalu
def mount_storage():
    """
    Mount storage if any volumes exist after a pod reboot
    """
    if os.environ.get("CSI_ROLE", "-") != "provisioner":
        logging.debug("Volume need to be mounted on only provisioner pod")
        return

    host_volumes = get_pv_hosting_volumes({})
    for volume in host_volumes:
        if volume["type"] == "External" and volume["k_format"] == "non-native":
            # Need to skip mounting external non-native mounts in-order for
            # kadalu-quotad not to set quota xattrs
            continue
        hvol = volume["name"]
        mntdir = os.path.join(HOSTVOL_MOUNTDIR, hvol)
        try:
            mount_glusterfs(volume, mntdir)
        except CommandException:
            logging.error(logf("Unable to mount volume", hvol=hvol))
        logging.info(logf("Volume is mounted successfully", hvol=hvol))
    return
示例#3
0
    def NodePublishVolume(self, request, context):
        start_time = time.time()
        if not request.volume_id:
            errmsg = "Volume ID is empty and must be provided"
            logging.error(errmsg)
            context.set_details(errmsg)
            context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
            return csi_pb2.NodePublishVolumeResponse()

        if not request.target_path:
            errmsg = "Target path is empty and must be provided"
            logging.error(errmsg)
            context.set_details(errmsg)
            context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
            return csi_pb2.NodePublishVolumeResponse()

        if not request.volume_capability:
            errmsg = "Volume capability is empty and must be provided"
            logging.error(errmsg)
            context.set_details(errmsg)
            context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
            return csi_pb2.NodePublishVolumeResponse()

        if not request.volume_context:
            errmsg = "Volume context is empty and must be provided"
            logging.error(errmsg)
            context.set_details(errmsg)
            context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
            return csi_pb2.NodePublishVolumeResponse()

        hostvol = request.volume_context.get("hostvol", "")
        pvpath = request.volume_context.get("path", "")
        pvtype = request.volume_context.get("pvtype", "")
        voltype = request.volume_context.get("type", "")
        gserver = request.volume_context.get("gserver", None)
        gvolname = request.volume_context.get("gvolname", None)
        options = request.volume_context.get("options", None)

        # Storage volfile options
        storage_options = request.volume_context.get("storage_options", "")
        mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol)

        pvpath_full = os.path.join(mntdir, pvpath)

        logging.debug(
            logf("Received a valid mount request",
                 request=request,
                 voltype=voltype,
                 hostvol=hostvol,
                 pvpath=pvpath,
                 pvtype=pvtype,
                 pvpath_full=pvpath_full,
                 storage_options=storage_options))

        volume = {
            'name': hostvol,
            'g_volname': gvolname,
            'g_host': gserver,
            'g_options': options,
            'type': voltype,
        }

        mountpoint = mount_glusterfs(volume, mntdir, storage_options, True)

        if voltype == "External":
            logging.debug(
                logf("Mounted Volume for PV",
                     volume=volume,
                     mntdir=mntdir,
                     storage_options=storage_options))
            # return csi_pb2.NodePublishVolumeResponse()

        # When 'storage_options' is configured mountpoint & volfile path change,
        # Update pvpath_full accordingly.
        if storage_options != "":
            pvpath_full = os.path.join(mountpoint, pvpath)

        logging.debug(
            logf("Mounted Hosting Volume",
                 pv=request.volume_id,
                 hostvol=hostvol,
                 mntdir=mntdir))
        # Mount the PV
        # TODO: Handle Volume capability mount flags
        if mount_volume(pvpath_full, request.target_path, pvtype, fstype=None):
            logging.info(
                logf("Mounted PV",
                     volume=request.volume_id,
                     pvpath=pvpath,
                     pvtype=pvtype,
                     hostvol=hostvol,
                     target_path=request.target_path,
                     duration_seconds=time.time() - start_time))
        else:
            errmsg = "Unable to bind PV to target path"
            logging.error(errmsg)
            context.set_details(errmsg)
            context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
        return csi_pb2.NodePublishVolumeResponse()
示例#4
0
    def NodePublishVolume(self, request, context):
        start_time = time.time()
        hostvol = request.volume_context.get("hostvol", "")
        pvpath = request.volume_context.get("path", "")
        pvtype = request.volume_context.get("pvtype", "")
        voltype = request.volume_context.get("type", "")
        gserver = request.volume_context.get("gserver", None)
        gvolname = request.volume_context.get("gvolname", None)
        options = request.volume_context.get("options", None)

        mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol)

        pvpath_full = os.path.join(mntdir, pvpath)

        logging.debug(
            logf("Received the mount request",
                 volume=request.volume_id,
                 voltype=voltype,
                 hostvol=hostvol,
                 pvpath=pvpath,
                 pvtype=pvtype))

        if voltype == "External":
            # If no separate PV Path, use the whole volume as PV
            if pvpath == "":
                mount_glusterfs_with_host(gvolname, request.target_path,
                                          gserver, options)

                logging.debug(
                    logf("Mounted Volume for PV",
                         volume=request.volume_id,
                         mntdir=request.target_path,
                         pvpath=gserver,
                         options=options))
                return csi_pb2.NodePublishVolumeResponse()

        volume = {
            'name': hostvol,
            'g_volname': gvolname,
            'g_host': gserver,
            'g_options': options,
            'type': voltype,
        }

        mount_glusterfs(volume, mntdir)

        logging.debug(
            logf(
                "Mounted Hosting Volume",
                pv=request.volume_id,
                hostvol=hostvol,
                mntdir=mntdir,
            ))
        # Mount the PV
        # TODO: Handle Volume capability mount flags
        mount_volume(pvpath_full, request.target_path, pvtype, fstype=None)
        logging.info(
            logf("Mounted PV",
                 volume=request.volume_id,
                 pvpath=pvpath,
                 pvtype=pvtype,
                 hostvol=hostvol,
                 duration_seconds=time.time() - start_time))
        return csi_pb2.NodePublishVolumeResponse()
示例#5
0
    def NodePublishVolume(self, request, context):
        start_time = time.time()
        if not request.volume_id:
            errmsg = "Volume ID is empty and must be provided"
            logging.error(errmsg)
            context.set_details(errmsg)
            context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
            return csi_pb2.NodePublishVolumeResponse()

        if not request.target_path:
            errmsg = "Target path is empty and must be provided"
            logging.error(errmsg)
            context.set_details(errmsg)
            context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
            return csi_pb2.NodePublishVolumeResponse()

        if not request.volume_capability:
            errmsg = "Volume capability is empty and must be provided"
            logging.error(errmsg)
            context.set_details(errmsg)
            context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
            return csi_pb2.NodePublishVolumeResponse()

        if not request.volume_context:
            errmsg = "Volume context is empty and must be provided"
            logging.error(errmsg)
            context.set_details(errmsg)
            context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
            return csi_pb2.NodePublishVolumeResponse()

        hostvol = request.volume_context.get("hostvol", "")
        pvpath = request.volume_context.get("path", "")
        pvtype = request.volume_context.get("pvtype", "")
        voltype = request.volume_context.get("type", "")
        gserver = request.volume_context.get("gserver", None)
        gvolname = request.volume_context.get("gvolname", None)
        options = request.volume_context.get("options", None)

        mntdir = os.path.join(HOSTVOL_MOUNTDIR, hostvol)

        pvpath_full = os.path.join(mntdir, pvpath)

        logging.debug(logf(
            "Received a valid mount request",
            request=request,
            voltype=voltype,
            hostvol=hostvol,
            pvpath=pvpath,
            pvtype=pvtype
        ))

        if voltype == "External":
            # If no separate PV Path, use the whole volume as PV
            if pvpath == "":
                mount_glusterfs_with_host(gvolname, request.target_path, gserver, options, True)

                logging.debug(logf(
                    "Mounted Volume for PV",
                    volume=request.volume_id,
                    mntdir=request.target_path,
                    pvpath=gserver,
                    options=options
                ))
                return csi_pb2.NodePublishVolumeResponse()

        volume = {
            'name': hostvol,
            'g_volname': gvolname,
            'g_host': gserver,
            'g_options': options,
            'type': voltype,
        }

        mount_glusterfs(volume, mntdir, True)

        logging.debug(logf(
            "Mounted Hosting Volume",
            pv=request.volume_id,
            hostvol=hostvol,
            mntdir=mntdir,
        ))
        # Mount the PV
        # TODO: Handle Volume capability mount flags
        mount_volume(pvpath_full, request.target_path, pvtype, fstype=None)
        logging.info(logf(
            "Mounted PV",
            volume=request.volume_id,
            pvpath=pvpath,
            pvtype=pvtype,
            hostvol=hostvol,
            duration_seconds=time.time() - start_time
        ))
        return csi_pb2.NodePublishVolumeResponse()