def wipe_fs(disks, expected_vg=None): for disk in disks: exists_vg = None r = bash.bash_r("pvdisplay %s | grep %s" % (disk, expected_vg)) if r == 0: continue r, o = bash.bash_ro("pvs --nolocking --noheading -o vg_name %s" % disk) if r == 0 and o.strip() != "": exists_vg = o.strip() backup = backup_super_block(disk) if bash.bash_r("grep %s %s" % (expected_vg, backup)) == 0: raise Exception("found vg uuid in superblock backup while not found in lvm command!") need_flush_mpath = False bash.bash_roe("partprobe -s %s" % disk) cmd_type = bash.bash_o("lsblk %s -oTYPE | grep mpath" % disk) if cmd_type.strip() != "": need_flush_mpath = True bash.bash_roe("wipefs -af %s" % disk) if need_flush_mpath: bash.bash_roe("multipath -f %s && systemctl restart multipathd.service && sleep 1" % disk) if exists_vg is not None: logger.debug("found vg %s exists on this pv %s, start wipe" % (exists_vg, disk)) try: drop_vg_lock(exists_vg) remove_device_map_for_vg(exists_vg) finally: pass
def install_drbd(): mod_installed = bash.bash_r("lsmod | grep drbd") == 0 mod_exists = bash.bash_r("modinfo drbd") == 0 utils_installed = bash.bash_r( "rpm -ql drbd-utils || rpm -ql drbd84-utils") == 0 basearch = platform.machine() releasever = bash.bash_o("awk '{print $3}' /etc/zstack-release") utils_exists, o = bash.bash_ro( "ls /opt/zstack-dvd/{}/{}/Packages/drbd-utils*".format( basearch, releasever)) if mod_installed and utils_exists: return if not mod_installed: if mod_exists: bash.bash_errorout("modprobe drbd") else: raise Exception("drbd mod not installed and not exists!") if not utils_installed: if utils_exists == 0: bash.bash_errorout("rpm -ivh %s" % o) else: raise Exception("drbd utils not installed and not exists!")
def config_drbd(): bash.bash_r( "sed -i 's/usage-count yes/usage-count no/g' /etc/drbd.d/global_common.conf" ) bash.bash_r( "iptables -I INPUT -p tcp -m tcp --dport 20000:30000 -j ACCEPT" )
def get_config_path_from_name(name): if bash.bash_r("drbdadm dump %s" % name) == 0: return bash.bash_o( "drbdadm dump %s | grep 'defined at' | awk '{print $4}'" % name).split(":")[0] if bash.bash_r("ls /etc/drbd.d/%s.res" % name) == 0: return "/etc/drbd.d/%s.res" % name raise Exception("can not find drbd resource %s" % name)
def clean_imagestore_cache(self, cachedir): if not cachedir or not os.path.exists(cachedir): return cdir = os.path.join(os.path.realpath(cachedir), "zstore-cache") cmdstr = "find %s -type f -name image -links 1 -exec unlink {} \;" % cdir bash_r(cmdstr) cmdstr = "find %s -depth -mindepth 1 -type d -empty -exec rmdir {} \;" % cdir bash_r(cmdstr)
def is_multipath_running(): r = bash.bash_r("multipath -t") if r != 0: return False r = bash.bash_r("pgrep multipathd") if r != 0: return False return True
def is_multipath_running(): r = bash.bash_r("multipath -t") if r != 0: return False r = bash.bash_r("pgrep multipathd") if r != 0: return False return True
def examine_lockspace(lockspace): r = bash.bash_r("sanlock client examine -s %s" % lockspace) if r != 0: logger.warn("sanlock examine %s failed, return %s" % (lockspace, r)) return r r = bash.bash_r("sanlock direct read_leader -s %s" % lockspace) if r != 0: logger.warn("sanlock read leader %s failed, return %s" % (lockspace, r)) return r
def deactive_lv(path, raise_exception=True): if not lv_exists(path): return if not lv_is_active(path): return if raise_exception: bash.bash_errorout("lvchange -an %s" % path) else: bash.bash_r("lvchange -an %s" % path) if lv_is_active(path): raise RetryException("lv %s is still active after lvchange -an" % path)
def deactive_lv(path, raise_exception=True): if not lv_exists(path): return if not lv_is_active(path): return if raise_exception: bash.bash_errorout("lvchange -an %s" % path) else: bash.bash_r("lvchange -an %s" % path) if lv_is_active(path): raise RetryException("lv %s is still active after lvchange -an" % path)
def config_lvm_filter(files): if not os.path.exists(LVM_CONFIG_PATH): raise Exception("can not find lvm config path: %s, config lvm failed" % LVM_CONFIG_PATH) vgs = bash.bash_o("vgs --nolocking -oname --noheading").splitlines() filter_str = 'filter=["r|\\/dev\\/cdrom|"' for vg in vgs: filter_str += ', "r\\/dev\\/mapper\\/%s.*\\/"' % vg.strip() filter_str += ']' for file in files: bash.bash_r("sed -i 's/.*\\b%s.*/%s/g' %s/%s" % ("filter", filter_str, LVM_CONFIG_PATH, file))
def create_template_from_volume(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentRsp() volume_abs_path = translate_absolute_path_from_install_path( cmd.volumePath) install_abs_path = translate_absolute_path_from_install_path( cmd.installPath) if cmd.sharedVolume: lvm.do_active_lv(volume_abs_path, lvm.LvmlockdLockType.SHARE, True) with lvm.RecursiveOperateLv(volume_abs_path, shared=cmd.sharedVolume, skip_deactivate_tags=[IMAGE_TAG]): virtual_size = linux.qcow2_virtualsize(volume_abs_path) total_size = 0 compress = False for qcow2 in linux.qcow2_get_file_chain(volume_abs_path): if bash.bash_r("qemu-img check %s | grep compressed" % volume_abs_path) == 0: compress = True total_size += int(lvm.get_lv_size(qcow2)) if total_size > virtual_size: total_size = virtual_size if bash.bash_r("qemu-img info --backing-chain %s | grep compress" % volume_abs_path) == 0: compress = True if not lvm.lv_exists(install_abs_path): lvm.create_lv_from_absolute_path( install_abs_path, total_size, "%s::%s::%s" % (VOLUME_TAG, cmd.hostUuid, time.time())) with lvm.OperateLv(install_abs_path, shared=False, delete_when_exception=True): linux.create_template(volume_abs_path, install_abs_path, compress) logger.debug( 'successfully created template[%s] from volume[%s]' % (cmd.installPath, cmd.volumePath)) if cmd.compareQcow2 is True: logger.debug("comparing qcow2 between %s and %s") bash.bash_errorout("time qemu-img compare %s %s" % (volume_abs_path, install_abs_path)) logger.debug("confirmed qcow2 %s and %s are identical" % (volume_abs_path, install_abs_path)) rsp.totalCapacity, rsp.availableCapacity = lvm.get_vg_size(cmd.vgUuid) return jsonobject.dumps(rsp)
def down(self): r, o, e = bash.bash_roe("drbdadm down %s" % self.name) if r == 0: return if "conflicting use of device-minor" in o + e: logger.debug("detect conflicting use of device-minor! %s" % e) return if 0 == bash.bash_r("cat /proc/drbd | grep '^%s: cs:Unconfigured'" % self.config.local_host.minor): return if 1 == bash.bash_r("cat /proc/drbd | grep '^%s: '" % self.config.local_host.minor): return raise Exception("demote resource %s failed: %s, %s, %s" % (self.name, r, o, e))
def is_multipath(dev_name): if not is_multipath_running(): return False r = bash.bash_r("multipath /dev/%s -l | grep mpath" % dev_name) if r == 0: return True return False
def _get_file_actual_size(self, path): ret = bash.bash_r("rbd info %s | grep -q fast-diff" % path) # if no fast-diff supported and not xsky ceph skip actual size check if ret != 0 and not ceph.is_xsky(): return None # use json format result first r, jstr = bash.bash_ro("rbd du %s --format json" % path) if r == 0 and bool(jstr): total_size = 0 result = jsonobject.loads(jstr) if result.images is not None: for item in result.images: total_size += int(item.used_size) return total_size r, size = bash.bash_ro( "rbd du %s | awk 'END {if(NF==3) {print $3} else {print $4,$5} }' | sed s/[[:space:]]//g" % path, pipe_fail=True) if r != 0: return None size = size.strip() if not size: return None return sizeunit.get_size(size)
def check_gl_lock(raise_exception=False): r = bash.bash_r("lvmlockctl -i | grep 'LK GL'") if r == 0: return logger.debug("can not find any gl lock") r, o = bash.bash_ro("lvmlockctl -i | grep 'lock_type=sanlock' | awk '{print $2}'") if len(o.strip().splitlines()) != 0: for i in o.strip().splitlines(): if i == "": continue r, o, e = bash.bash_roe("lvmlockctl --gl-enable %s" % i) if r != 0: raise Exception("failed to enable gl lock on vg: %s, %s, %s" % (i, o, e)) r, o = bash.bash_ro("vgs --nolocking --noheadings -Svg_lock_type=sanlock -oname") result = [] for i in o.strip().split("\n"): if i != "": result.append(i) if len(result) == 0: if raise_exception is True: raise Exception("can not find any sanlock shared vg") else: return r, o, e = bash.bash_roe("lvmlockctl --gl-enable %s" % result[0]) if r != 0: raise Exception("failed to enable gl lock on vg: %s" % result[0])
def vg_lock_is_adding(vgUuid): # NOTE(weiw): this means vg locking is adding rather than complete return_code = bash.bash_r( "sanlock client status | grep -E 's lvm_%s.*\\:0 ADD'" % vgUuid) if return_code == 0: raise RetryException("vg %s lock space is starting" % vgUuid) return False
def check_gl_lock(raise_exception=False): r = bash.bash_r("lvmlockctl -i | grep 'LK GL'") if r == 0: return logger.debug("can not find any gl lock") r, o = bash.bash_ro( "lvmlockctl -i | grep 'lock_type=sanlock' | awk '{print $2}'") if len(o.strip().splitlines()) != 0: for i in o.strip().splitlines(): if i == "": continue r, o, e = bash.bash_roe("lvmlockctl --gl-enable %s" % i) if r != 0: raise Exception("failed to enable gl lock on vg: %s, %s, %s" % (i, o, e)) r, o = bash.bash_ro( "vgs --nolocking --noheadings -Svg_lock_type=sanlock -oname") result = [] for i in o.strip().split("\n"): if i != "": result.append(i) if len(result) == 0: if raise_exception is True: raise Exception("can not find any sanlock shared vg") else: return r, o, e = bash.bash_roe("lvmlockctl --gl-enable %s" % result[0]) if r != 0: raise Exception("failed to enable gl lock on vg: %s" % result[0])
def iscsi_logout(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentRsp() iqns = cmd.iscsiTargets if iqns is None or len(iqns) == 0: iqns = shell.call( "timeout 10 iscsiadm -m discovery --type sendtargets --portal %s:%s | awk '{print $2}'" % (cmd.iscsiServerIp, cmd.iscsiServerPort)).strip().splitlines() if iqns is None or len(iqns) == 0: rsp.iscsiTargetStructList = [] return jsonobject.dumps(rsp) for iqn in iqns: r = bash.bash_r("iscsiadm -m session | grep %s:%s | grep %s" % (cmd.iscsiServerIp, cmd.iscsiServerPort, iqn)) if r == 0: shell.call( 'timeout 10 iscsiadm --mode node --targetname "%s" -p %s:%s --logout' % (iqn, cmd.iscsiServerIp, cmd.iscsiServerPort)) shell.call( 'timeout 10 iscsiadm -m node -o delete -T "%s" -p %s:%s' % (iqn, cmd.iscsiServerIp, cmd.iscsiServerPort)) return jsonobject.dumps(rsp)
def check_vg_status(vgUuid, check_timeout, check_pv=True): # type: (str) -> tuple[bool, str] # 1. examine sanlock lock # 2. check the consistency of volume group # 3. check ps missing # 4. check vg attr return_code = bash.bash_r("sanlock client status | grep -E 's lvm_%s.*\\:0 ADD'" % vgUuid) if return_code == 0: logger.debug("lockspace for vg %s is adding, skip run fencer" % vgUuid) return True, "" lock_space = get_lockspace(vgUuid) if lock_space == "": s = "can not find lockspace of %s" % vgUuid logger.warn(s) return False, s r, s = check_sanlock_renewal_failure(lock_space) if r is False: return r, s r, s = check_sanlock_status(lock_space) if r is False: return r, s # if examine_lockspace(lock_space) != 0: # return False, "examine lockspace %s failed" % lock_space # # if set_sanlock_event(lock_space) != 0: # return False, "sanlock set event on lock space %s failed" % lock_space if not check_pv: return True, "" return check_pv_status(vgUuid, check_timeout)
def add_pv(vg_uuid, disk_path, metadata_size): bash.bash_errorout("vgextend --metadatasize %s %s %s" % (metadata_size, vg_uuid, disk_path)) if bash.bash_r("pvs --nolocking --readonly %s | grep %s" % (disk_path, vg_uuid)): raise Exception("disk %s not added to vg %s after vgextend" % (disk_path, vg_uuid))
def list_local_up_drbd(vgUuid): if bash.bash_r("drbd-overview | grep -v %s" % DrbdNetState.Unconfigured) == 1: return [] names = bash.bash_o( "drbd-overview | grep -v %s | awk -F ':' '{print $2}' | awk '{print $1}'" % DrbdNetState.Unconfigured).strip().splitlines() return [DrbdResource(name) for name in names]
def vg_lock_exists(vgUuid): return_code = bash.bash_r("lvmlockctl -i | grep %s" % vgUuid) if return_code != 0: raise RetryException("can not find lock space for vg %s via lvmlockctl" % vgUuid) elif vg_lock_is_adding(vgUuid) is True: raise RetryException("lock space for vg %s is adding" % vgUuid) else: return True
def vg_lock_exists(vgUuid): return_code = bash.bash_r("lvmlockctl -i | grep %s" % vgUuid) if return_code != 0: raise RetryException("can not find lock space for vg %s via lvmlockctl" % vgUuid) elif vg_lock_is_adding(vgUuid) is True: raise RetryException("lock space for vg %s is adding" % vgUuid) else: return True
def config_lvmlockd_by_sed(): cmd = shell.ShellCmd( "sed -i 's/.*ExecStart=.*/ExecStart=\\/usr\\/sbin\\/lvmlockd --daemon-debug --sanlock-timeout %s/g' /usr/lib/systemd/system/lvm2-lvmlockd.service" % SANLOCK_IO_TIMEOUT) cmd(is_exception=False) if bash.bash_r("grep StandardOutput /usr/lib/systemd/system/lvm2-lvmlockd.service") != 0: cmd = shell.ShellCmd( "sed -i '/ExecStart/a StandardOutput=%s' /usr/lib/systemd/system/lvm2-lvmlockd.service" % LVMLOCKD_LOG_FILE_PATH) cmd(is_exception=False) if bash.bash_r("grep StandardError /usr/lib/systemd/system/lvm2-lvmlockd.service") != 0: cmd = shell.ShellCmd( "sed -i '/ExecStart/a StandardError=%s' /usr/lib/systemd/system/lvm2-lvmlockd.service" % LVMLOCKD_LOG_FILE_PATH) cmd(is_exception=False) linux.sync() cmd = shell.ShellCmd("systemctl daemon-reload") cmd(is_exception=False)
def is_slave_of_multipath(dev_path): # type: (str) -> bool if is_multipath_running is False: return False r = bash.bash_r("multipath %s -l | grep policy" % dev_path) if r == 0: return True return False
def is_slave_of_multipath(dev_path): # type: (str) -> bool if is_multipath_running is False: return False r = bash.bash_r("multipath %s -l | grep policy" % dev_path) if r == 0: return True return False
def get_name_from_config_path(config_path): """ :type config_path: str """ if bash.bash_r("head -n 1 %s" % config_path) == 0: return bash.bash_o("head -n 1 %s | awk '{print $2}'" % config_path).strip() else: return config_path.split("/")[-1].split(".")[0]
def test_network_ok_to_peer(peer_address, via_dev=None): if not via_dev: via_dev = bash.bash_o("ip -o r get %s | awk '{print $3}'" % peer_address).strip() for i in range(5): recv = bash.bash_r("timeout 2 arping -w 1 -b %s -I %s -c 1" % (peer_address, via_dev)) if recv == 0: return True return False
def check_lv_on_pv_valid(vgUuid, pvUuid, lv_path=None): pv_name = bash.bash_o( "timeout -s SIGKILL 10 pvs --noheading --nolocking -oname -Spv_uuid=%s" % pvUuid).strip() one_active_lv = lv_path if lv_path is not None else bash.bash_o( "timeout -s SIGKILL 10 lvs --noheading --nolocking -opath,devices,tags " + "-Sactive=active %s | grep %s | grep %s | awk '{print $1}' | head -n1" % (vgUuid, pv_name, VOLUME_TAG)).strip() if one_active_lv == "": return True r = bash.bash_r("qemu-img info %s" % one_active_lv) if r != 0: return False return True
def enable_multipath(self, req): rsp = AgentRsp() lvm.enable_multipath() r = bash.bash_r("grep '^[[:space:]]*alias' /etc/multipath.conf") if r == 0: bash.bash_roe( "sed -i 's/^[[:space:]]*alias/#alias/g' /etc/multipath.conf") bash.bash_roe("systemctl reload multipathd") linux.set_fail_if_no_path() return jsonobject.dumps(rsp)
def self_test_is_running(bus, device): r = bash.bash_r( "smartctl -l selftest -d megaraid,%s /dev/bus/%s | grep 'Self-test routine in progress'" % (device, bus)) if r == 0: return r, o, e = bash.bash_roe("smartctl -a /dev/bus/%s -d megaraid,%s" % (bus, device)) if "Self-test routine in progress" in o + e: return raise RetryException( "can not find self test in progress on drive %s" % wwn)
def install_drbd(): mod_installed = bash.bash_r("lsmod | grep drbd") == 0 mod_exists = bash.bash_r("modinfo drbd") == 0 utils_installed = bash.bash_r( "rpm -ql drbd-utils || rpm -ql drbd84-utils") == 0 utils_exists, o = bash.bash_ro("ls /opt/zstack-dvd/Packages/drbd-utils*") if mod_installed and utils_exists: return if not mod_installed: if mod_exists: bash.bash_errorout("modprobe drbd") else: raise Exception("drbd mod not installed and not exists!") if not utils_installed: if utils_exists == 0: bash.bash_errorout("rpm -ivh %s" % o) else: raise Exception("drbd utils not installed and not exists!")
def is_multipath(dev_name): if not is_multipath_running(): return False r = bash.bash_r("multipath /dev/%s -l | grep policy" % dev_name) if r == 0: return True slaves = shell.call("ls /sys/class/block/%s/slaves/" % dev_name).strip().split("\n") if slaves is not None and len(slaves) > 0: if len(slaves) == 1 and slaves[0] == "": return False return True return False
def create_vg_if_not_found(vgUuid, diskPaths, hostUuid, forceWipe=False): @linux.retry(times=5, sleep_time=random.uniform(0.1, 3)) def find_vg(vgUuid, raise_exception=True): cmd = shell.ShellCmd( "timeout 5 vgscan --ignorelockingfailure; vgs --nolocking %s -otags | grep %s" % (vgUuid, INIT_TAG)) cmd(is_exception=False) if cmd.return_code != 0 and raise_exception: raise RetryException("can not find vg %s with tag %s" % (vgUuid, INIT_TAG)) elif cmd.return_code != 0: return False return True try: find_vg(vgUuid) except RetryException as e: if forceWipe is True: running_vm = bash.bash_o( "virsh list | grep running | awk '{print $2}'").strip( ).split() if running_vm != [] and running_vm[0] != "": for vm in running_vm: bash.bash_r("virsh destroy %s" % vm) r = bash.bash_r("drbdadm down all") if r == 0: bash.bash_r("mkdir -p %s" % BACKUP_DIR) bash.bash_r("mv /etc/drbd.d/*.res %s" % BACKUP_DIR) lvm.wipe_fs(diskPaths, vgUuid) cmd = shell.ShellCmd( "vgcreate -qq --addtag '%s::%s::%s::%s' --metadatasize %s %s %s" % (INIT_TAG, hostUuid, time.time(), bash.bash_o("hostname").strip(), DEFAULT_VG_METADATA_SIZE, vgUuid, " ".join(diskPaths))) cmd(is_exception=False) logger.debug("created vg %s, ret: %s, stdout: %s, stderr: %s" % (vgUuid, cmd.return_code, cmd.stdout, cmd.stderr)) if cmd.return_code == 0 and find_vg(vgUuid, False) is True: return True try: if find_vg(vgUuid) is True: return True except RetryException as ee: raise Exception( "can not find vg %s with disks: %s and create vg return: %s %s %s " % (vgUuid, diskPaths, cmd.return_code, cmd.stdout, cmd.stderr)) except Exception as ee: raise ee except Exception as e: raise e return False
def download_image_from_imagestore(self, cmd): rsp = AgentResponse() name, imageid = self._parse_image_reference(cmd.imageInstallPath) cmdstr = '%s -url %s:%s pull -installpath %s %s:%s' % ( self.ZSTORE_CLI_PATH, cmd.hostname, self.ZSTORE_DEF_PORT, cmd.cacheInstallPath, name, imageid) logger.debug('pulling %s:%s from image store' % (name, imageid)) ret = bash_r(cmdstr) if ret != 0: rsp.success = False rsp.error = "failed to download image from imagestore to baremetal image cache" else: logger.debug('%s:%s pulled to baremetal pxeserver' % (name, imageid)) return rsp
def get_multipath_dmname(dev_name): # if is multipath dev, return; # if is one of multipath paths, return multipath dev(dm-xxx); # else return None slaves = shell.call("ls /sys/class/block/%s/slaves/" % dev_name).strip().splitlines() if slaves is not None and len(slaves) > 0 and slaves[0].strip() != "": return dev_name r = bash.bash_r("multipath /dev/%s -l | grep policy" % dev_name) if r != 0: return None o = bash.bash_o("multipath -l /dev/%s | head -n1 | awk -F 'dm' '{print $2}' | awk '{print $1}'" % dev_name).strip() return "dm%s" % o
def wipe_fs(disks, expected_vg=None): for disk in disks: exists_vg = None r = bash.bash_r("pvdisplay %s | grep %s" % (disk, expected_vg)) if r == 0: continue r, o = bash.bash_ro("pvs --nolocking --noheading -o vg_name %s" % disk) if r == 0 and o.strip() != "": exists_vg = o.strip() backup = backup_super_block(disk) if bash.bash_r("grep %s %s" % (expected_vg, backup)) == 0: raise Exception( "found vg uuid in superblock backup while not found in lvm command!" ) need_flush_mpath = False bash.bash_roe("partprobe -s %s" % disk) cmd_type = bash.bash_o("lsblk %s -oTYPE | grep mpath" % disk) if cmd_type.strip() != "": need_flush_mpath = True bash.bash_roe("wipefs -af %s" % disk) if need_flush_mpath: bash.bash_roe( "multipath -f %s && systemctl restart multipathd.service && sleep 1" % disk) if exists_vg is not None: logger.debug("found vg %s exists on this pv %s, start wipe" % (exists_vg, disk)) try: drop_vg_lock(exists_vg) remove_device_map_for_vg(exists_vg) finally: pass
def is_multipath(dev_name): if not is_multipath_running(): return False r = bash.bash_r("multipath /dev/%s -l | grep policy" % dev_name) if r == 0: return True slaves = shell.call("ls /sys/class/block/%s/slaves/" % dev_name).strip().split("\n") if slaves is not None and len(slaves) > 0: if len(slaves) == 1 and slaves[0] == "": return False return True return False
def config_lvmlockd_by_sed(): cmd = shell.ShellCmd( "sed -i 's/.*ExecStart=.*/ExecStart=\\/usr\\/sbin\\/lvmlockd --daemon-debug --sanlock-timeout %s/g' /usr/lib/systemd/system/lvm2-lvmlockd.service" % SANLOCK_IO_TIMEOUT) cmd(is_exception=False) if bash.bash_r( "grep StandardOutput /usr/lib/systemd/system/lvm2-lvmlockd.service" ) != 0: cmd = shell.ShellCmd( "sed -i '/ExecStart/a StandardOutput=%s' /usr/lib/systemd/system/lvm2-lvmlockd.service" % LVMLOCKD_LOG_FILE_PATH) cmd(is_exception=False) if bash.bash_r( "grep StandardError /usr/lib/systemd/system/lvm2-lvmlockd.service" ) != 0: cmd = shell.ShellCmd( "sed -i '/ExecStart/a StandardError=%s' /usr/lib/systemd/system/lvm2-lvmlockd.service" % LVMLOCKD_LOG_FILE_PATH) cmd(is_exception=False) cmd = shell.ShellCmd("sync") cmd(is_exception=False)
def _get_file_actual_size(self, path): ret = bash.bash_r("rbd info %s | grep -q fast-diff" % path) # if no fast-diff supported and not xsky ceph skip actual size check if ret != 0 and not isXsky(): return None r, size = bash.bash_ro("rbd du %s | tail -1 | awk '{ print $3 }'" % path) if r != 0: return None size = size.strip('\t\n ') return sizeunit.get_size(size)
def check_lv_on_pv_valid(vgUuid, pvUuid, lv_path=None): pv_name = bash.bash_o( "timeout -s SIGKILL 10 pvs --noheading --nolocking -oname -Spv_uuid=%s" % pvUuid).strip() one_active_lv = lv_path if lv_path is not None else bash.bash_o( "timeout -s SIGKILL 10 lvs --noheading --nolocking -opath,devices,tags " + "-Sactive=active %s | grep %s | grep %s | awk '{print $1}' | head -n1" % (vgUuid, pv_name, VOLUME_TAG)).strip() if one_active_lv == "": return True r = bash.bash_r("qemu-img info %s" % one_active_lv) if r != 0: return False return True
def download_image_from_imagestore(self, cmd): rsp = AgentResponse() name, imageid = self._parse_image_reference(cmd.imageInstallPath) cmdstr = '%s -url %s:%s pull -installpath %s %s:%s' % ( self.ZSTORE_CLI_PATH, cmd.hostname, self.ZSTORE_DEF_PORT, cmd.cacheInstallPath, name, imageid) logger.debug('pulling %s:%s from image store' % (name, imageid)) ret = bash_r(cmdstr) if ret != 0: rsp.success = False rsp.error = "failed to download image from imagestore to baremetal image cache" else: logger.debug('%s:%s pulled to baremetal pxeserver' % (name, imageid)) return rsp
def get_running_vm_root_volume_on_pv(vgUuid, pvUuids, checkIo=True): # 1. get "-drive ... -device ... bootindex=1, # 2. get "-boot order=dc ... -drive id=drive-virtio-disk" # 3. make sure io has error # 4. filter for pv out = bash.bash_o("pgrep -a qemu-kvm | grep %s" % vgUuid).strip().split("\n") if len(out) == 0: return [] vms = [] for o in out: vm = VmStruct() vm.pid = o.split(" ")[0] vm.cmdline = o.split(" ", 3)[-1] vm.uuid = o.split(" -uuid ")[-1].split(" ")[0] if "bootindex=1" in vm.cmdline: vm.root_volume = vm.cmdline.split("bootindex=1")[0].split( " -drive file=")[-1].split(",")[0] elif " -boot order=dc" in vm.cmdline: # TODO(weiw): maybe support scsi volume as boot volume one day vm.root_volume = vm.cmdline.split("id=drive-virtio-disk0")[ 0].split(" -drive file=")[-1].split(",")[0] else: logger.warn( "found strange vm[pid: %s, cmdline: %s], can not find boot volume" % (vm.pid, vm.cmdline)) continue r = bash.bash_r("qemu-img info --backing-chain %s" % vm.root_volume) if checkIo is True and r == 0: logger.debug("volume %s for vm %s io success, skiped" % (vm.root_volume, vm.uuid)) continue out = bash.bash_o("virsh dumpxml %s | grep \"source file='/dev/\"" % vm.uuid).strip().splitlines() if len(out) != 0: for file in out: vm.volumes.append(file.strip().split("'")[1]) if is_volume_on_pvs(vm.root_volume, pvUuids, True): vms.append(vm) return vms
def get_running_vm_root_volume_on_pv(vgUuid, pvUuids, checkIo=True): # 1. get "-drive ... -device ... bootindex=1, # 2. get "-boot order=dc ... -drive id=drive-virtio-disk" # 3. make sure io has error # 4. filter for pv out = bash.bash_o("pgrep -a qemu-kvm | grep %s" % vgUuid).strip().split("\n") if len(out) == 0: return [] vms = [] for o in out: vm = VmStruct() vm.pid = o.split(" ")[0] vm.cmdline = o.split(" ", 3)[-1] vm.uuid = o.split(" -uuid ")[-1].split(" ")[0] if "bootindex=1" in vm.cmdline: vm.root_volume = vm.cmdline.split("bootindex=1")[0].split(" -drive file=")[-1].split(",")[0] elif " -boot order=dc" in vm.cmdline: # TODO(weiw): maybe support scsi volume as boot volume one day vm.root_volume = vm.cmdline.split("id=drive-virtio-disk0")[0].split(" -drive file=")[-1].split(",")[0] else: logger.warn("found strange vm[pid: %s, cmdline: %s], can not find boot volume" % (vm.pid, vm.cmdline)) continue r = bash.bash_r("qemu-img info --backing-chain %s" % vm.root_volume) if checkIo is True and r == 0: logger.debug("volume %s for vm %s io success, skiped" % (vm.root_volume, vm.uuid)) continue out = bash.bash_o("virsh dumpxml %s | grep \"source file='/dev/\"" % vm.uuid).strip().splitlines() if len(out) != 0: for file in out: vm.volumes.append(file.strip().split("'")[1]) if is_volume_on_pvs(vm.root_volume, pvUuids, True): vms.append(vm) return vms
def lv_is_active(path): # NOTE(weiw): use readonly to get active may return 'unknown' r = bash.bash_r("lvs --nolocking --noheadings %s -oactive | grep -w active" % path) return r == 0
def lv_exists(path): r = bash.bash_r("lvs --nolocking %s" % path) return r == 0
def compare(src, dst): return bash.bash_r("cmp %s %s" % (src, dst)) == 0
def add_pv(vg_uuid, disk_path, metadata_size): bash.bash_errorout("vgextend --metadatasize %s %s %s" % (metadata_size, vg_uuid, disk_path)) if bash.bash_r("pvs --nolocking --readonly %s | grep %s" % (disk_path, vg_uuid)): raise Exception("disk %s not added to vg %s after vgextend" % (disk_path, vg_uuid))
def iscsi_login(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = IscsiLoginRsp() @linux.retry(times=5, sleep_time=1) def discovery_iscsi(iscsiServerIp, iscsiServerPort): r, o, e = bash.bash_roe( "timeout 10 iscsiadm -m discovery --type sendtargets --portal %s:%s" % ( iscsiServerIp, iscsiServerPort)) if r != 0: raise RetryException("can not discovery iscsi portal %s:%s, cause %s" % (iscsiServerIp, iscsiServerPort, e)) return [i.strip().split(" ")[-1] for i in o.splitlines()] @linux.retry(times=20, sleep_time=1) def wait_iscsi_mknode(iscsiServerIp, iscsiServerPort, iscsiIqn, e=None): disks_by_dev = bash.bash_o("ls /dev/disk/by-path | grep %s:%s | grep %s" % (iscsiServerIp, iscsiServerPort, iscsiIqn)).strip().splitlines() sid = bash.bash_o("iscsiadm -m session | grep %s:%s | grep %s | awk '{print $2}'" % (iscsiServerIp, iscsiServerPort, iscsiIqn)).strip("[]\n ") if sid == "" or sid is None: err = "sid not found, this may because chap authentication failed" if e != None and e != "": err += " ,error: %s" % e raise RetryException(e) disks_by_iscsi = bash.bash_o("iscsiadm -m session -P 3 --sid=%s | grep Lun" % sid).strip().splitlines() if len(disks_by_dev) < len(disks_by_iscsi): raise RetryException("iscsiadm says there are [%s] disks[%s] but only found [%s] disks on /dev/disk[%s], so not all disks loged in, " "it may recover after a while so check and login again" %(len(disks_by_iscsi), disks_by_iscsi, len(disks_by_dev), disks_by_dev)) path = "/var/lib/iscsi/nodes" self.clean_iscsi_cache_configuration(path, cmd.iscsiServerIp, cmd.iscsiServerPort) iqns = cmd.iscsiTargets if iqns is None or len(iqns) == 0: try: iqns = discovery_iscsi(cmd.iscsiServerIp, cmd.iscsiServerPort) except Exception as e: current_hostname = shell.call('hostname') current_hostname = current_hostname.strip(' \t\n\r') rsp.error = "login iscsi server %s:%s on host %s failed, because %s" % \ (cmd.iscsiServerIp, cmd.iscsiServerPort, current_hostname, e.message) rsp.success = False return jsonobject.dumps(rsp) if iqns is None or len(iqns) == 0: rsp.iscsiTargetStructList = [] return jsonobject.dumps(rsp) for iqn in iqns: t = IscsiTargetStruct() t.iqn = iqn try: if cmd.iscsiChapUserName and cmd.iscsiChapUserPassword: bash.bash_o( 'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.authmethod --value=CHAP' % ( iqn, cmd.iscsiServerIp, cmd.iscsiServerPort)) bash.bash_o( 'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.username --value=%s' % ( iqn, cmd.iscsiServerIp, cmd.iscsiServerPort, cmd.iscsiChapUserName)) bash.bash_o( 'iscsiadm --mode node --targetname "%s" -p %s:%s --op=update --name node.session.auth.password --value=%s' % ( iqn, cmd.iscsiServerIp, cmd.iscsiServerPort, cmd.iscsiChapUserPassword)) r, o, e = bash.bash_roe('iscsiadm --mode node --targetname "%s" -p %s:%s --login' % (iqn, cmd.iscsiServerIp, cmd.iscsiServerPort)) wait_iscsi_mknode(cmd.iscsiServerIp, cmd.iscsiServerPort, iqn, e) finally: if bash.bash_r("ls /dev/disk/by-path | grep %s:%s | grep %s" % (cmd.iscsiServerIp, cmd.iscsiServerPort, iqn)) != 0: rsp.iscsiTargetStructList.append(t) else: disks = bash.bash_o("ls /dev/disk/by-path | grep %s:%s | grep %s" % (cmd.iscsiServerIp, cmd.iscsiServerPort, iqn)).strip().splitlines() for d in disks: t.iscsiLunStructList.append(self.get_disk_info_by_path(d.strip())) rsp.iscsiTargetStructList.append(t) return jsonobject.dumps(rsp)
def vg_lock_is_adding(vgUuid): # NOTE(weiw): this means vg locking is adding rather than complete return_code = bash.bash_r("sanlock client status | grep -E 's lvm_%s.*\\:0 ADD'" % vgUuid) if return_code == 0: raise RetryException("vg %s lock space is starting" % vgUuid) return False