def is_mounted(path=None, url=None): mounted = linux.is_mounted(path, url) if not url: return mounted if mounted: return True return linux.is_mounted(path, url.replace(":/", "://"))
def setup_heartbeat_file(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = SetupMountablePrimaryStorageHeartbeatResponse() for hb in cmd.heartbeatFilePaths: hb_dir = os.path.dirname(hb) mount_path = os.path.dirname(hb_dir) if not linux.is_mounted(mount_path): rsp.error = '%s is not mounted, setup heartbeat file[%s] failed' % ( mount_path, hb) rsp.success = False return jsonobject.dumps(rsp) for hb in cmd.heartbeatFilePaths: t = self.heartbeat_timer.get(hb, None) if t: t.cancel() hb_dir = os.path.dirname(hb) if not os.path.exists(hb_dir): os.makedirs(hb_dir, 0755) t = thread.timer(cmd.heartbeatInterval, self._heartbeat_func, args=[hb], stop_on_exception=False) t.start() self.heartbeat_timer[hb] = t logger.debug('create heartbeat file at[%s]' % hb) return jsonobject.dumps(rsp)
def update_mount_point(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = UpdateMountPointResponse() linux.is_valid_nfs_url(cmd.newMountPoint) if not linux.is_mounted(cmd.mountPath, cmd.newMountPoint): # umount old one if linux.is_mounted(cmd.mountPath, cmd.oldMountPoint): linux.umount(cmd.mountPath) # mount new linux.mount(cmd.newMountPoint, cmd.mountPath, cmd.options, "nfs4") self.mount_path[cmd.uuid] = cmd.mountPath logger.debug('updated the mount path[%s] mounting point from %s to %s' % (cmd.mountPath, cmd.oldMountPoint, cmd.newMountPoint)) self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def update_mount_point(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = UpdateMountPointResponse() linux.is_valid_nfs_url(cmd.newMountPoint) if not linux.is_mounted(cmd.mountPath, cmd.newMountPoint): # umount old one if linux.is_mounted(cmd.mountPath, cmd.oldMountPoint): linux.umount(cmd.mountPath) # mount new linux.mount(cmd.newMountPoint, cmd.mountPath, cmd.options) self.mount_path[cmd.uuid] = cmd.mountPath logger.debug('updated the mount path[%s] mounting point from %s to %s' % (cmd.mountPath, cmd.oldMountPoint, cmd.newMountPoint)) self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def try_remount_fs(): if mount_path_is_nfs(mount_path): shell.run("systemctl start nfs-client.target") while self.run_filesystem_fencer(ps_uuid, created_time): if linux.is_mounted( path=mount_path) and touch_heartbeat_file(): self.report_storage_status([ps_uuid], 'Connected') logger.debug( "fs[uuid:%s] is reachable again, report to management" % ps_uuid) break try: logger.debug( 'fs[uuid:%s] is unreachable, it will be remounted after 180s' % ps_uuid) time.sleep(180) if not self.run_filesystem_fencer( ps_uuid, created_time): break linux.remount(url, mount_path, options) self.report_storage_status([ps_uuid], 'Connected') logger.debug( "remount fs[uuid:%s] success, report to management" % ps_uuid) break except: logger.warn( 'remount fs[uuid:%s] fail, try again soon' % ps_uuid) kill_progresses_using_mount_path(mount_path) logger.debug('stop remount fs[uuid:%s]' % ps_uuid)
def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) if cmd.uuid not in self.mount_path.keys(): self.mount_path[cmd.uuid] = cmd.mountPath mount_path = self.mount_path[cmd.uuid] # if nfs service stop, os.path.isdir will hung if not linux.timeout_isdir(mount_path) or not linux.is_mounted( path=mount_path): raise Exception( 'the mount path[%s] of the nfs primary storage[uuid:%s] is not existing' % (mount_path, cmd.uuid)) test_file = os.path.join(mount_path, '%s-ping-test-file' % uuidhelper.uuid()) touch = shell.ShellCmd('timeout 60 touch %s' % test_file) touch(False) if touch.return_code == 124: raise Exception( 'unable to access the mount path[%s] of the nfs primary storage[uuid:%s] in 60s, timeout' % (mount_path, cmd.uuid)) elif touch.return_code != 0: touch.raise_error() linux.rm_file_force(test_file) return jsonobject.dumps(NfsResponse())
def setup_heartbeat_file(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = SetupMountablePrimaryStorageHeartbeatResponse() for hb in cmd.heartbeatFilePaths: hb_dir = os.path.dirname(hb) mount_path = os.path.dirname(hb_dir) if not linux.is_mounted(mount_path): rsp.error = '%s is not mounted, setup heartbeat file[%s] failed' % (mount_path, hb) rsp.success = False return jsonobject.dumps(rsp) for hb in cmd.heartbeatFilePaths: t = self.heartbeat_timer.get(hb, None) if t: t.cancel() hb_dir = os.path.dirname(hb) if not os.path.exists(hb_dir): os.makedirs(hb_dir, 0755) t = thread.timer(cmd.heartbeatInterval, self._heartbeat_func, args=[hb], stop_on_exception=False) t.start() self.heartbeat_timer[hb] = t logger.debug('create heartbeat file at[%s]' % hb) return jsonobject.dumps(rsp)
def umount(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = UnmountResponse() if linux.is_mounted(path=cmd.mountPath): ret = linux.umount(cmd.mountPath) if not ret: logger.warn(http.path_msg(self.UNMOUNT_PATH, 'unmount %s from %s failed' % (cmd.mountPath, cmd.url))) logger.debug(http.path_msg(self.UNMOUNT_PATH, 'umounted %s from %s' % (cmd.mountPath, cmd.url))) return jsonobject.dumps(rsp)
def prepare_heartbeat_dir(): heartbeat_dir = os.path.join(mount_path, "zs-heartbeat") if not mounted_by_zstack or linux.is_mounted(mount_path): if not os.path.exists(heartbeat_dir): os.makedirs(heartbeat_dir, 0755) else: if os.path.exists(heartbeat_dir): linux.rm_dir_force(heartbeat_dir) return heartbeat_dir
def test_unmount_always_success(self): cmd = nfs_primarystorage_plugin.UnmountCmd() cmd.url = NFS_URL cmd.mountPath = '/not_mounted_path' callurl = kvmagent._build_url_for_test([nfs_primarystorage_plugin.UNMOUNT_PATH]) ret = http.json_dump_post(callurl, cmd) rsp = jsonobject.loads(ret) self.assertTrue(rsp.success, rsp.error) self.assertFalse(linux.is_mounted(path=cmd.mountPath))
def mount(self): cmd = nfs_primarystorage_plugin.MountCmd() cmd.url = self.NFS_URL cmd.mountPath = os.path.join('/mnt', uuidhelper.uuid()) callurl = kvmagent._build_url_for_test([nfs_primarystorage_plugin.MOUNT_PATH]) ret = http.json_dump_post(callurl, cmd) rsp = jsonobject.loads(ret) self.assertTrue(rsp.success, rsp.error) self.assertTrue(linux.is_mounted(cmd.url, cmd.mountPath))
def test_unmount_always_success(self): cmd = nfs_primarystorage_plugin.UnmountCmd() cmd.url = NFS_URL cmd.mountPath = '/not_mounted_path' callurl = aliyunagent._build_url_for_test([nfs_primarystorage_plugin.UNMOUNT_PATH]) ret = http.json_dump_post(callurl, cmd) rsp = jsonobject.loads(ret) self.assertTrue(rsp.success, rsp.error) self.assertFalse(linux.is_mounted(path=cmd.mountPath))
def remount(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = MountResponse() linux.is_valid_nfs_url(cmd.url) if not linux.is_mounted(cmd.mountPath, cmd.url): linux.mount(cmd.url, cmd.mountPath, cmd.options) shell.call('mount -o remount %s' % cmd.mountPath) self.mount_path[cmd.uuid] = cmd.mountPath self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def mount(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = MountResponse() linux.is_valid_nfs_url(cmd.url) if not linux.is_mounted(cmd.mountPath, cmd.url): linux.mount(cmd.url, cmd.mountPath, cmd.options, "nfs4") self.mount_path[cmd.uuid] = cmd.mountPath logger.debug(http.path_msg(self.MOUNT_PATH, 'mounted %s on %s' % (cmd.url, cmd.mountPath))) self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def mount(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = MountResponse() linux.is_valid_nfs_url(cmd.url) if not linux.is_mounted(cmd.mountPath, cmd.url): linux.mount(cmd.url, cmd.mountPath, cmd.options) self.mount_path[cmd.uuid] = cmd.mountPath logger.debug(http.path_msg(self.MOUNT_PATH, 'mounted %s on %s' % (cmd.url, cmd.mountPath))) self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def migrate_bits(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = NfsToNfsMigrateBitsRsp() mount_path = cmd.mountPath dst_folder_path = cmd.dstFolderPath temp_dir = None try: if not cmd.isMounted: linux.is_valid_nfs_url(cmd.url) temp_dir = tempfile.mkdtemp() # dst folder is absolute path mount_path = temp_dir + mount_path dst_folder_path = temp_dir + dst_folder_path if not linux.is_mounted(mount_path, cmd.url): linux.mount(cmd.url, mount_path, cmd.options, "nfs4") # Report task progress based on flow chain for now # To get more accurate progress, we need to report from here someday # begin migration, then check md5 sums shell.call("mkdir -p %s; cp -r %s/* %s; sync" % (dst_folder_path, cmd.srcFolderPath, dst_folder_path)) src_md5 = shell.call( "find %s -type f -exec md5sum {} \; | awk '{ print $1 }' | sort | md5sum" % cmd.srcFolderPath) dst_md5 = shell.call("find %s -type f -exec md5sum {} \; | awk '{ print $1 }' | sort | md5sum" % dst_folder_path) if src_md5 != dst_md5: rsp.error = "failed to copy files from %s to %s, md5sum not match" % (cmd.srcFolderPath, dst_folder_path) rsp.success = False if not cmd.isMounted: linux.umount(mount_path) finally: if temp_dir is not None: return_code = shell.run("mount | grep '%s'" % temp_dir) if return_code != 0: # in case dir is not empty try: os.rmdir(temp_dir) except OSError as e: logger.warn("delete temp_dir %s failed: %s", (temp_dir, str(e))) else: logger.warn("temp_dir %s still had mounted destination primary storage, skip cleanup operation" % temp_dir) return jsonobject.dumps(rsp)
def init(self, req): ''' cmd.url --> domain:/ps-[uuid] cmd.mountPath --> /opt/ps cmd.common --> /opt/ps/commons cmd.data --> /opt/ps/datas cmd.dirs --> [] ''' cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = InitResponse() linux.is_valid_nfs_url(cmd.url) ''' example: 1. mount url: /opt/ps 2. mkdir /opt/ps/ps-[uuid] 3. mkdir /opt/ps/ps-[uuid]/commons/xxx.. (such as heartbeat, cache, ..) at last we get url:/ps-[uuid] for hosts mount ''' domain = cmd.url.split(':')[0] + ":/" psDir = cmd.url.split(':')[1].lstrip('/') basedir = os.path.join(cmd.mountPath, psDir) ''' check if mounted {cmd.mountPath} ''' if linux.is_mounted(path=cmd.mountPath) and not naslinux.is_mounted(cmd.mountPath, cmd.url): raise Exception('mountPath[%s] already mount to another url' % cmd.mountPath) linux.mount(domain, cmd.mountPath, cmd.options) shell.call('mkdir -p %s' % basedir) for dir in cmd.dirs: shell.call('mkdir -p %s' % os.path.join(basedir, dir)) linux.umount(cmd.mountPath) common_dir = os.path.join(cmd.mountPath, cmd.common) data_dir = os.path.join(cmd.mountPath, cmd.data) shell.call('mkdir -p %s' % common_dir) shell.call('mkdir -p %s' % data_dir) linux.mount(cmd.url, common_dir, cmd.options) rsp.mounted = True self.mount_path[cmd.uuid] = common_dir self._set_capacity_to_response(cmd.uuid, rsp) self.uuid = cmd.uuid return jsonobject.dumps(rsp)
def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) mount_path = self.mount_path[cmd.uuid] # if nfs service stop, os.path.isdir will hung if not linux.timeout_isdir(mount_path) or not linux.is_mounted(path=mount_path): raise Exception('the mount path[%s] of the nfs primary storage[uuid:%s] is not existing' % (mount_path, cmd.uuid)) test_file = os.path.join(mount_path, '%s-ping-test-file' % uuidhelper.uuid()) touch = shell.ShellCmd('timeout 60 touch %s' % test_file) touch(False) if touch.return_code == 124: raise Exception('unable to access the mount path[%s] of the nfs primary storage[uuid:%s] in 60s, timeout' % (mount_path, cmd.uuid)) elif touch.return_code != 0: touch.raise_error() linux.rm_file_force(test_file) return jsonobject.dumps(NfsResponse())
def remount(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = MountResponse() linux.is_valid_nfs_url(cmd.url) if not linux.is_mounted(cmd.mountPath, cmd.url): linux.mount(cmd.url, cmd.mountPath, cmd.options) o = shell.ShellCmd('timeout 180 mount -o remount %s' % cmd.mountPath) o(False) if o.return_code == 124: raise Exception('unable to access the mount path[%s] of the nfs primary storage[url:%s] in 180s, timeout' % (cmd.mountPath, cmd.url)) elif o.return_code != 0: o.raise_error() self.mount_path[cmd.uuid] = cmd.mountPath self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def mount(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = MountResponse() linux.is_valid_nfs_url(cmd.url) if not linux.is_mounted(cmd.mountPath, cmd.url): linux.mount(cmd.url, cmd.mountPath, cmd.options, "nfs4") with tempfile.TemporaryFile(dir=cmd.mountPath) as f: try: fcntl.flock(f.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB) except IOError: linux.umount(cmd.mountPath) raise Exception( 'File lock unavailable on NFS: {}, mount options: {}'. format(cmd.url, cmd.options)) self.mount_path[cmd.uuid] = cmd.mountPath logger.debug( http.path_msg(self.MOUNT_PATH, 'mounted %s on %s' % (cmd.url, cmd.mountPath))) self._set_capacity_to_response(cmd.uuid, rsp) return jsonobject.dumps(rsp)
def try_remount_fs(): if mount_path_is_nfs(mount_path): shell.run("systemctl start nfs-client.target") while self.run_fencer(ps_uuid, created_time): if linux.is_mounted(path=mount_path) and touch_heartbeat_file(): self.report_storage_status([ps_uuid], 'Connected') logger.debug("fs[uuid:%s] is reachable again, report to management" % ps_uuid) break try: logger.debug('fs[uuid:%s] is unreachable, it will be remounted after 180s' % ps_uuid) time.sleep(180) if not self.run_fencer(ps_uuid, created_time): break linux.remount(url, mount_path, options) self.report_storage_status([ps_uuid], 'Connected') logger.debug("remount fs[uuid:%s] success, report to management" % ps_uuid) break except: logger.warn('remount fs[uuid:%s] fail, try again soon' % ps_uuid) kill_progresses_using_mount_path(mount_path) logger.debug('stop remount fs[uuid:%s]' % ps_uuid)
def check_nfs_mounted(self, mount_path): if not linux.is_mounted(mount_path): raise Exception('NFS not mounted on: %s' % mount_path)
def migrate_bits(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = NfsToNfsMigrateBitsRsp() mount_path = cmd.mountPath dst_folder_path = cmd.dstFolderPath temp_dir = None fd, PFILE = tempfile.mkstemp() os.close(fd) f = open(PFILE, 'r') try: if not cmd.isMounted: linux.is_valid_nfs_url(cmd.url) temp_dir = tempfile.mkdtemp() # dst folder is absolute path mount_path = temp_dir + mount_path dst_folder_path = temp_dir + dst_folder_path if not linux.is_mounted(mount_path, cmd.url): linux.mount(cmd.url, mount_path, cmd.options, "nfs4") # begin migration, then check md5 sums linux.mkdir(dst_folder_path) t_shell = traceable_shell.get_shell(cmd) rsync_excludes = "" md5_excludes = "" if cmd.filtPaths: for filtPath in cmd.filtPaths: # filtPath cannot start with '/', because it must be a relative path if filtPath.startswith('/'): filtPath = filtPath[1:] if filtPath != '': rsync_excludes = rsync_excludes + " --exclude=%s" % filtPath md5_excludes = md5_excludes + " ! -path %s/%s" % ( cmd.srcFolderPath, filtPath) total_size = int( shell.call( "rsync -aznv %s/ %s %s | grep -o -P 'total size is \K\d*'" % (cmd.srcFolderPath, dst_folder_path, rsync_excludes))) stage = get_task_stage(cmd) reporter = Report.from_spec(cmd, "MigrateVolume") def _get_progress(synced): def get_written(regex): matcher = re.match(regex, line) return int(matcher.group(1)) if matcher else 0 lines = f.readlines() writing = 0 for line in lines: if line[1] == ' ' and line[-1] == '\n': synced += get_written(r'\s.*?(\d+)\s+100%') elif line[-1] == '\r' and line[1] == ' ': writing = get_written(r'.*?(\d+)\s+\d+%[^\r]*\r$') reporter.progress_report( get_exact_percent( float(synced + writing) / total_size * 100, stage)) return synced t_shell.bash_progress_1( "rsync -az --progress %s/ %s %s > %s" % (cmd.srcFolderPath, dst_folder_path, rsync_excludes, PFILE), _get_progress) src_md5 = t_shell.call( "find %s -type f %s -exec md5sum {} \; | awk '{ print $1 }' | sort | md5sum" % (cmd.srcFolderPath, md5_excludes)) dst_md5 = t_shell.call( "find %s -type f -exec md5sum {} \; | awk '{ print $1 }' | sort | md5sum" % dst_folder_path) if src_md5 != dst_md5: rsp.error = "failed to copy files from %s to %s, md5sum not match" % ( cmd.srcFolderPath, dst_folder_path) rsp.success = False if not cmd.isMounted: linux.umount(mount_path) finally: if temp_dir is not None: return_code = shell.run("mount | grep '%s'" % temp_dir) if return_code != 0: # in case dir is not empty try: os.rmdir(temp_dir) except OSError as e: logger.warn("delete temp_dir %s failed: %s", (temp_dir, str(e))) else: logger.warn( "temp_dir %s still had mounted destination primary storage, skip cleanup operation" % temp_dir) f.close() linux.rm_file_force(PFILE) return jsonobject.dumps(rsp)
def heartbeat_file_fencer(mount_path, ps_uuid, mounted_by_zstack, url, options): def try_remount_fs(): if mount_path_is_nfs(mount_path): shell.run("systemctl start nfs-client.target") while self.run_fencer(ps_uuid, created_time): if linux.is_mounted( path=mount_path) and touch_heartbeat_file(): self.report_storage_status([ps_uuid], 'Connected') logger.debug( "fs[uuid:%s] is reachable again, report to management" % ps_uuid) break try: logger.debug( 'fs[uuid:%s] is unreachable, it will be remounted after 180s' % ps_uuid) time.sleep(180) if not self.run_fencer(ps_uuid, created_time): break linux.remount(url, mount_path, options) self.report_storage_status([ps_uuid], 'Connected') logger.debug( "remount fs[uuid:%s] success, report to management" % ps_uuid) break except: logger.warn( 'remount fs[uuid:%s] fail, try again soon' % ps_uuid) kill_progresses_using_mount_path(mount_path) logger.debug('stop remount fs[uuid:%s]' % ps_uuid) def after_kill_vm(): if not killed_vm_pids or not mounted_by_zstack: return try: kill_and_umount(mount_path, mount_path_is_nfs(mount_path)) except UmountException: if shell.run('ps -p %s' % ' '.join(killed_vm_pids)) == 0: virsh_list = shell.call( "timeout 10 virsh list --all || echo 'cannot obtain virsh list'" ) logger.debug("virsh_list:\n" + virsh_list) logger.error( 'kill vm[pids:%s] failed because of unavailable fs[mountPath:%s].' ' please retry "umount -f %s"' % (killed_vm_pids, mount_path, mount_path)) return def touch_heartbeat_file(): touch = shell.ShellCmd( 'timeout %s touch %s' % (cmd.storageCheckerTimeout, heartbeat_file_path)) touch(False) if touch.return_code != 0: logger.warn( 'unable to touch %s, %s %s' % (heartbeat_file_path, touch.stderr, touch.stdout)) return touch.return_code == 0 def prepare_heartbeat_dir(): heartbeat_dir = os.path.join(mount_path, "zs-heartbeat") if not mounted_by_zstack or linux.is_mounted(mount_path): if not os.path.exists(heartbeat_dir): os.makedirs(heartbeat_dir, 0755) else: if os.path.exists(heartbeat_dir): linux.rm_dir_force(heartbeat_dir) return heartbeat_dir heartbeat_file_dir = prepare_heartbeat_dir() heartbeat_file_path = os.path.join( heartbeat_file_dir, 'heartbeat-file-kvm-host-%s.hb' % cmd.hostUuid) created_time = time.time() self.setup_fencer(ps_uuid, created_time) try: failure = 0 while self.run_fencer(ps_uuid, created_time): time.sleep(cmd.interval) if touch_heartbeat_file(): failure = 0 continue failure += 1 if failure == cmd.maxAttempts: logger.warn( 'failed to touch the heartbeat file[%s] %s times, we lost the connection to the storage,' 'shutdown ourselves' % (heartbeat_file_path, cmd.maxAttempts)) self.report_storage_status([ps_uuid], 'Disconnected') if cmd.strategy == 'Permissive': continue killed_vms = kill_vm(cmd.maxAttempts, [mount_path], True) if len(killed_vms) != 0: self.report_self_fencer_triggered( [ps_uuid], ','.join(killed_vms.keys())) clean_network_config(killed_vms.keys()) killed_vm_pids = killed_vms.values() after_kill_vm() if mounted_by_zstack and not linux.is_mounted( mount_path): try_remount_fs() prepare_heartbeat_dir() logger.debug('stop heartbeat[%s] for filesystem self-fencer' % heartbeat_file_path) except: content = traceback.format_exc() logger.warn(content)