def mountdata(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AliyunNasResponse() naslinux.createCommonPath(cmd.mountPath, cmd.basePath) if not naslinux.is_mounted(cmd.dataPath, cmd.url): linux.mount(cmd.url, cmd.dataPath, cmd.options) logger.debug(http.path_msg(self.MOUNT_DATA_PATH, 'mounted %s on %s' % (cmd.url, cmd.dataPath))) rsp.mounted = True self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def remount(url, path, options=None): if not is_mounted(path): linux.mount(url, path, options) return o = shell.ShellCmd('timeout 180 mount -o remount %s' % path) o(False) if o.return_code == 124: raise Exception('unable to access the mount path[%s] of the nfs primary storage[url:%s] in 180s, timeout' % (path, url)) elif o.return_code != 0: o.raise_error()
def mount(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = MountResponse() linux.is_valid_nfs_url(cmd.url) if not linux.is_mounted(cmd.mountPath, cmd.url): linux.mount(cmd.url, cmd.mountPath, cmd.options) self.mount_path[cmd.uuid] = cmd.mountPath logger.debug(http.path_msg(self.MOUNT_PATH, 'mounted %s on %s' % (cmd.url, cmd.mountPath))) self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def mount(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = MountResponse() linux.is_valid_nfs_url(cmd.url) if not linux.is_mounted(cmd.mountPath, cmd.url): linux.mount(cmd.url, cmd.mountPath, cmd.options, "nfs4") self.mount_path[cmd.uuid] = cmd.mountPath logger.debug(http.path_msg(self.MOUNT_PATH, 'mounted %s on %s' % (cmd.url, cmd.mountPath))) self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def remount(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = MountResponse() linux.is_valid_nfs_url(cmd.url) if not linux.is_mounted(cmd.mountPath, cmd.url): linux.mount(cmd.url, cmd.mountPath, cmd.options) shell.call('mount -o remount %s' % cmd.mountPath) self.mount_path[cmd.uuid] = cmd.mountPath self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def remount(url, path, options=None): if not is_mounted(path): linux.mount(url, path, options) return o = shell.ShellCmd('timeout 180 mount -o remount %s' % path) o(False) if o.return_code == 124: raise Exception( 'unable to access the mount path[%s] of the nfs primary storage[url:%s] in 180s, timeout' % (path, url)) elif o.return_code != 0: o.raise_error()
def migrate_bits(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = NfsToNfsMigrateBitsRsp() mount_path = cmd.mountPath dst_folder_path = cmd.dstFolderPath temp_dir = None try: if not cmd.isMounted: linux.is_valid_nfs_url(cmd.url) temp_dir = tempfile.mkdtemp() # dst folder is absolute path mount_path = temp_dir + mount_path dst_folder_path = temp_dir + dst_folder_path if not linux.is_mounted(mount_path, cmd.url): linux.mount(cmd.url, mount_path, cmd.options, "nfs4") # Report task progress based on flow chain for now # To get more accurate progress, we need to report from here someday # begin migration, then check md5 sums shell.call("mkdir -p %s; cp -r %s/* %s; sync" % (dst_folder_path, cmd.srcFolderPath, dst_folder_path)) src_md5 = shell.call( "find %s -type f -exec md5sum {} \; | awk '{ print $1 }' | sort | md5sum" % cmd.srcFolderPath) dst_md5 = shell.call("find %s -type f -exec md5sum {} \; | awk '{ print $1 }' | sort | md5sum" % dst_folder_path) if src_md5 != dst_md5: rsp.error = "failed to copy files from %s to %s, md5sum not match" % (cmd.srcFolderPath, dst_folder_path) rsp.success = False if not cmd.isMounted: linux.umount(mount_path) finally: if temp_dir is not None: return_code = shell.run("mount | grep '%s'" % temp_dir) if return_code != 0: # in case dir is not empty try: os.rmdir(temp_dir) except OSError as e: logger.warn("delete temp_dir %s failed: %s", (temp_dir, str(e))) else: logger.warn("temp_dir %s still had mounted destination primary storage, skip cleanup operation" % temp_dir) return jsonobject.dumps(rsp)
def update_mount_point(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = UpdateMountPointResponse() linux.is_valid_nfs_url(cmd.newMountPoint) if not linux.is_mounted(cmd.mountPath, cmd.newMountPoint): # umount old one if linux.is_mounted(cmd.mountPath, cmd.oldMountPoint): linux.umount(cmd.mountPath) # mount new linux.mount(cmd.newMountPoint, cmd.mountPath, cmd.options, "nfs4") self.mount_path[cmd.uuid] = cmd.mountPath logger.debug('updated the mount path[%s] mounting point from %s to %s' % (cmd.mountPath, cmd.oldMountPoint, cmd.newMountPoint)) self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def update_mount_point(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = UpdateMountPointResponse() linux.is_valid_nfs_url(cmd.newMountPoint) if not linux.is_mounted(cmd.mountPath, cmd.newMountPoint): # umount old one if linux.is_mounted(cmd.mountPath, cmd.oldMountPoint): linux.umount(cmd.mountPath) # mount new linux.mount(cmd.newMountPoint, cmd.mountPath, cmd.options) self.mount_path[cmd.uuid] = cmd.mountPath logger.debug('updated the mount path[%s] mounting point from %s to %s' % (cmd.mountPath, cmd.oldMountPoint, cmd.newMountPoint)) self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def init(self, req): ''' cmd.url --> domain:/ps-[uuid] cmd.mountPath --> /opt/ps cmd.common --> /opt/ps/commons cmd.data --> /opt/ps/datas cmd.dirs --> [] ''' cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = InitResponse() linux.is_valid_nfs_url(cmd.url) ''' example: 1. mount url: /opt/ps 2. mkdir /opt/ps/ps-[uuid] 3. mkdir /opt/ps/ps-[uuid]/commons/xxx.. (such as heartbeat, cache, ..) at last we get url:/ps-[uuid] for hosts mount ''' domain = cmd.url.split(':')[0] + ":/" psDir = cmd.url.split(':')[1].lstrip('/') basedir = os.path.join(cmd.mountPath, psDir) ''' check if mounted {cmd.mountPath} ''' if linux.is_mounted(path=cmd.mountPath) and not naslinux.is_mounted(cmd.mountPath, cmd.url): raise Exception('mountPath[%s] already mount to another url' % cmd.mountPath) linux.mount(domain, cmd.mountPath, cmd.options) shell.call('mkdir -p %s' % basedir) for dir in cmd.dirs: shell.call('mkdir -p %s' % os.path.join(basedir, dir)) linux.umount(cmd.mountPath) common_dir = os.path.join(cmd.mountPath, cmd.common) data_dir = os.path.join(cmd.mountPath, cmd.data) shell.call('mkdir -p %s' % common_dir) shell.call('mkdir -p %s' % data_dir) linux.mount(cmd.url, common_dir, cmd.options) rsp.mounted = True self.mount_path[cmd.uuid] = common_dir self._set_capacity_to_response(cmd.uuid, rsp) self.uuid = cmd.uuid return jsonobject.dumps(rsp)
def remount(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = MountResponse() linux.is_valid_nfs_url(cmd.url) if not linux.is_mounted(cmd.mountPath, cmd.url): linux.mount(cmd.url, cmd.mountPath, cmd.options) o = shell.ShellCmd('timeout 180 mount -o remount %s' % cmd.mountPath) o(False) if o.return_code == 124: raise Exception('unable to access the mount path[%s] of the nfs primary storage[url:%s] in 180s, timeout' % (cmd.mountPath, cmd.url)) elif o.return_code != 0: o.raise_error() self.mount_path[cmd.uuid] = cmd.mountPath self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def mount(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = MountResponse() linux.is_valid_nfs_url(cmd.url) if not linux.is_mounted(cmd.mountPath, cmd.url): linux.mount(cmd.url, cmd.mountPath, cmd.options, "nfs4") with tempfile.TemporaryFile(dir=cmd.mountPath) as f: try: fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: linux.umount(cmd.mountPath) raise Exception( 'File lock unavailable on NFS: {}, mount options: {}'. format(cmd.url, cmd.options)) self.mount_path[cmd.uuid] = cmd.mountPath logger.debug( http.path_msg(self.MOUNT_PATH, 'mounted %s on %s' % (cmd.url, cmd.mountPath))) self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def migrate_bits(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = NfsToNfsMigrateBitsRsp() mount_path = cmd.mountPath dst_folder_path = cmd.dstFolderPath temp_dir = None fd, PFILE = tempfile.mkstemp() os.close(fd) f = open(PFILE, 'r') try: if not cmd.isMounted: linux.is_valid_nfs_url(cmd.url) temp_dir = tempfile.mkdtemp() # dst folder is absolute path mount_path = temp_dir + mount_path dst_folder_path = temp_dir + dst_folder_path if not linux.is_mounted(mount_path, cmd.url): linux.mount(cmd.url, mount_path, cmd.options, "nfs4") # begin migration, then check md5 sums linux.mkdir(dst_folder_path) t_shell = traceable_shell.get_shell(cmd) rsync_excludes = "" md5_excludes = "" if cmd.filtPaths: for filtPath in cmd.filtPaths: # filtPath cannot start with '/', because it must be a relative path if filtPath.startswith('/'): filtPath = filtPath[1:] if filtPath != '': rsync_excludes = rsync_excludes + " --exclude=%s" % filtPath md5_excludes = md5_excludes + " ! -path %s/%s" % ( cmd.srcFolderPath, filtPath) total_size = int( shell.call( "rsync -aznv %s/ %s %s | grep -o -P 'total size is \K\d*'" % (cmd.srcFolderPath, dst_folder_path, rsync_excludes))) stage = get_task_stage(cmd) reporter = Report.from_spec(cmd, "MigrateVolume") def _get_progress(synced): def get_written(regex): matcher = re.match(regex, line) return int(matcher.group(1)) if matcher else 0 lines = f.readlines() writing = 0 for line in lines: if line[1] == ' ' and line[-1] == '\n': synced += get_written(r'\s.*?(\d+)\s+100%') elif line[-1] == '\r' and line[1] == ' ': writing = get_written(r'.*?(\d+)\s+\d+%[^\r]*\r$') reporter.progress_report( get_exact_percent( float(synced + writing) / total_size * 100, stage)) return synced t_shell.bash_progress_1( "rsync -az --progress %s/ %s %s > %s" % (cmd.srcFolderPath, dst_folder_path, rsync_excludes, PFILE), _get_progress) src_md5 = t_shell.call( "find %s -type f %s -exec md5sum {} \; | awk '{ print $1 }' | sort | md5sum" % (cmd.srcFolderPath, md5_excludes)) dst_md5 = t_shell.call( "find %s -type f -exec md5sum {} \; | awk '{ print $1 }' | sort | md5sum" % dst_folder_path) if src_md5 != dst_md5: rsp.error = "failed to copy files from %s to %s, md5sum not match" % ( cmd.srcFolderPath, dst_folder_path) rsp.success = False if not cmd.isMounted: linux.umount(mount_path) finally: if temp_dir is not None: return_code = shell.run("mount | grep '%s'" % temp_dir) if return_code != 0: # in case dir is not empty try: os.rmdir(temp_dir) except OSError as e: logger.warn("delete temp_dir %s failed: %s", (temp_dir, str(e))) else: logger.warn( "temp_dir %s still had mounted destination primary storage, skip cleanup operation" % temp_dir) f.close() linux.rm_file_force(PFILE) return jsonobject.dumps(rsp)