def fact(self, req): rsp = HostFactResponse() rsp.osDistribution, rsp.osVersion, rsp.osRelease = platform.dist() # to be compatible with both `2.6.0` and `2.9.0(qemu-kvm-ev-2.9.0-16.el7_4.8.1)` qemu_img_version = shell.call( "qemu-img --version | grep 'qemu-img version' | cut -d ' ' -f 3 | cut -d '(' -f 1" ) qemu_img_version = qemu_img_version.strip('\t\r\n ,') ipV4Addrs = shell.call( "ip addr | grep -w inet | grep -v 127.0.0.1 | awk '{print $2}' | cut -d/ -f1" ) rsp.qemuImgVersion = qemu_img_version rsp.libvirtVersion = self.libvirt_version rsp.ipAddresses = ipV4Addrs.splitlines() if IS_AARCH64: # FIXME how to check vt of aarch64? rsp.hvmCpuFlag = 'vt' rsp.cpuModelName = "Unknown" else: if shell.run('grep vmx /proc/cpuinfo') == 0: rsp.hvmCpuFlag = 'vmx' if not rsp.hvmCpuFlag: if shell.run('grep svm /proc/cpuinfo') == 0: rsp.hvmCpuFlag = 'svm' rsp.cpuModelName = self._get_host_cpu_model() return jsonobject.dumps(rsp)
def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = PingRsp() facts = bash_o('ceph -s -f json') mon_facts = jsonobject.loads(facts) found = False for mon in mon_facts.monmap.mons: if cmd.monAddr in mon.addr: found = True break if not found: rsp.success = False rsp.failure = "MonAddrChanged" rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \ 'Reconnect the ceph primary storage' \ ' may solve this issue' % (cmd.monUuid, cmd.monAddr) return jsonobject.dumps(rsp) pool, objname = cmd.testImagePath.split('/') create_img = shell.ShellCmd("echo zstack | rados -p '%s' put '%s' -" % (pool, objname)) create_img(False) if create_img.return_code != 0: rsp.success = False rsp.failure = 'UnableToCreateFile' rsp.error = "%s %s" % (create_img.stderr, create_img.stdout) else: shell.run("rados -p '%s' rm '%s'" % (pool, objname)) return jsonobject.dumps(rsp)
def check_docker(self): if shell.run( "ip addr show docker0 > /dev/null && /sbin/iptables-save | grep -q 'FORWARD.*docker0'" ) != 0: logger.warn( "cannot find docker iptables rule, restart docker server!") shell.run("systemctl restart docker")
def update_os(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) if not cmd.excludePackages: exclude = "" else: exclude = "--exclude=" + cmd.excludePackages yum_cmd = "yum --enablerepo=* clean all && yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn %s update -y" % exclude rsp = UpdateHostOSRsp() if shell.run("which yum") != 0: rsp.success = False rsp.error = "no yum command found, cannot update host os" elif shell.run( "yum --disablerepo=* --enablerepo=zstack-mn repoinfo") != 0: rsp.success = False rsp.error = "no zstack-mn repo found, cannot update host os" elif shell.run( "yum --disablerepo=* --enablerepo=qemu-kvm-ev-mn repoinfo" ) != 0: rsp.success = False rsp.error = "no qemu-kvm-ev-mn repo found, cannot update host os" elif shell.run(yum_cmd) != 0: rsp.success = False rsp.error = "failed to update host os using zstack-mn,qemu-kvm-ev-mn repo" else: logger.debug("successfully run: %s" % yum_cmd) return jsonobject.dumps(rsp)
def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = PingRsp() facts = bash_o('ceph -s -f json') mon_facts = jsonobject.loads(facts) found = False for mon in mon_facts.monmap.mons: if cmd.monAddr in mon.addr: found = True break if not found: rsp.success = False rsp.failure = "MonAddrChanged" rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \ 'Reconnect the ceph primary storage' \ ' may solve this issue' % (cmd.monUuid, cmd.monAddr) return jsonobject.dumps(rsp) create_img = shell.ShellCmd('rbd create %s --image-format 2 --size 1' % cmd.testImagePath) create_img(False) if create_img.return_code != 0 and 'File exists' not in create_img.stderr and 'File exists' not in create_img.stdout: rsp.success = False rsp.failure = 'UnableToCreateFile' rsp.error = "%s %s" % (create_img.stderr, create_img.stdout) else: shell.run('rbd rm %s' % cmd.testImagePath) return jsonobject.dumps(rsp)
def _migrate_volume_snapshot(self, parent_uuid, snapshot_uuid, snapshot_size, src_snapshot_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port): src_snapshot_path = self._normalize_install_path(src_snapshot_path) dst_install_path = self._normalize_install_path(dst_install_path) if parent_uuid == "": ret = shell.run( 'rbd export-diff %s - | tee >(md5sum >/tmp/%s_src_md5) | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import-diff - %s\'' % (src_snapshot_path, snapshot_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid, dst_install_path)) else: ret = shell.run( 'rbd export-diff --from-snap %s %s - | tee >(md5sum >/tmp/%s_src_md5) | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import-diff - %s\'' % (parent_uuid, src_snapshot_path, snapshot_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid, dst_install_path)) if ret != 0: return ret src_md5 = self._read_file_content('/tmp/%s_src_md5' % snapshot_uuid) dst_md5 = shell.call( 'sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'cat /tmp/%s_dst_md5\'' % (dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid)) if src_md5 != dst_md5: return -1 else: return 0
def _get_origin_format(path): qcow2_length = 0x9007 if path.startswith('http://') or path.startswith( 'https://') or path.startswith('ftp://'): resp = urllib2.urlopen(path) qhdr = resp.read(qcow2_length) resp.close() elif path.startswith('sftp://'): fd, tmp_file = tempfile.mkstemp() get_header_from_pipe_cmd = "timeout 60 head --bytes=%d %s > %s" % ( qcow2_length, pipe_path, tmp_file) clean_cmd = "pkill -f %s" % pipe_path shell.run( '%s & %s && %s' % (scp_to_pipe_cmd, get_header_from_pipe_cmd, clean_cmd)) qhdr = os.read(fd, qcow2_length) if os.path.exists(tmp_file): os.remove(tmp_file) else: resp = open(path) qhdr = resp.read(qcow2_length) resp.close() if len(qhdr) < qcow2_length: return "raw" return get_image_format_from_buf(qhdr)
def cancel_sftp_download(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() def check(): return shell.run("rbd ls %s | grep -q %s" % (pool, image_name)) != 0 def remove(target_name): return shell.run("rbd info {0}/{1} || rbd rm {0}/{1}".format( pool, target_name)) == 0 pool, image_name = self._parse_install_path( cmd.primaryStorageInstallPath) tmp_image_name = 'tmp-%s' % image_name if check(): return jsonobject.dumps(rsp) for image in (tmp_image_name, image_name): shell.run("pkill -9 -f '%s'" % image) linux.wait_callback_success(remove, image, timeout=30) if not check(): rsp.set_err("remove image %s/%s fail" % (pool, image_name)) return jsonobject.dumps(rsp)
def connect(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = ConnectResponse() # page table extension if shell.run('lscpu | grep -q -w GenuineIntel') == 0: new_ept = False if cmd.pageTableExtensionDisabled else True rsp.error = self._set_intel_ept(new_ept) if rsp.error is not None: rsp.success = False return jsonobject.dumps(rsp) self.host_uuid = cmd.hostUuid self.config[kvmagent.HOST_UUID] = self.host_uuid self.config[kvmagent.SEND_COMMAND_URL] = cmd.sendCommandUrl Report.serverUuid = self.host_uuid Report.url = cmd.sendCommandUrl logger.debug(http.path_msg(self.CONNECT_PATH, 'host[uuid: %s] connected' % cmd.hostUuid)) rsp.libvirtVersion = self.libvirt_version rsp.qemuVersion = self.qemu_version # create udev rule self.handle_usb_device_events() ignore_msrs = 1 if cmd.ignoreMsrs else 0 shell.run("/bin/echo %s > /sys/module/kvm/parameters/ignore_msrs" % ignore_msrs) vm_plugin.cleanup_stale_vnc_iptable_chains() apply_iptables_result = self.apply_iptables_rules(cmd.iptablesRules) rsp.iptablesSucc = apply_iptables_result return jsonobject.dumps(rsp)
def try_remount_fs(): if mount_path_is_nfs(mount_path): shell.run("systemctl start nfs-client.target") while self.run_filesystem_fencer(ps_uuid, created_time): if linux.is_mounted( path=mount_path) and touch_heartbeat_file(): self.report_storage_status([ps_uuid], 'Connected') logger.debug( "fs[uuid:%s] is reachable again, report to management" % ps_uuid) break try: logger.debug( 'fs[uuid:%s] is unreachable, it will be remounted after 180s' % ps_uuid) time.sleep(180) if not self.run_filesystem_fencer( ps_uuid, created_time): break linux.remount(url, mount_path, options) self.report_storage_status([ps_uuid], 'Connected') logger.debug( "remount fs[uuid:%s] success, report to management" % ps_uuid) break except: logger.warn( 'remount fs[uuid:%s] fail, try again soon' % ps_uuid) kill_progresses_using_mount_path(mount_path) logger.debug('stop remount fs[uuid:%s]' % ps_uuid)
def create_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) do_create = True if cmd.skipOnExisting: image_name, sp_name = spath.split('@') o = shell.call('rbd --format json snap ls %s' % image_name) o = jsonobject.loads(o) for s in o: if s.name_ == sp_name: do_create = False if do_create: o = shell.ShellCmd('rbd snap create %s' % spath) o(False) if o.return_code != 0: shell.run("rbd snap rm %s" % spath) o.raise_error() rsp = CreateSnapshotRsp() rsp.size = self._get_file_size(spath) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp)
def scan_host(self, req): rsp = ScanRsp() success = 0 cmd = jsonobject.loads(req[http.REQUEST_BODY]) for i in range(0, cmd.times): if shell.run("nmap -sP -PI %s | grep 'Host is up'" % cmd.ip) == 0: success += 1 time.sleep(cmd.interval) if success == cmd.successTimes: rsp.result = self.RET_SUCCESS return jsonobject.dumps(rsp) if success == 0: rsp.result = self.RET_FAILURE return jsonobject.dumps(rsp) # WE SUCCEED A FEW TIMES, IT SEEMS THE CONNECTION NOT STABLE success = 0 for i in range(0, cmd.successTimes): if shell.run("nmap -sP -PI %s | grep 'Host is up'" % cmd.ip) == 0: success += 1 time.sleep(cmd.successInterval) if success == cmd.successTimes: rsp.result = self.RET_SUCCESS return jsonobject.dumps(rsp) rsp.result = self.RET_NOT_STABLE return jsonobject.dumps(rsp)
def _cleanup_conntrack(self, ips=None, ip_version="ipv4"): if ips: for ip in ips: shell.run("sudo conntrack -d %s -f %s -D" % (ip, ip_version)) logger.debug('clean up conntrack -d %s -D' % ip) else: shell.run("sudo conntrack -D") logger.debug('clean up conntrack -D')
def cancel_download_from_sftp(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() shell.run("pkill -9 -f '%s'" % cmd.primaryStorageInstallPath) self.do_delete_bits(cmd.primaryStorageInstallPath) return jsonobject.dumps(rsp)
def check_tools(): ceph = shell.run('which ceph') rbd = shell.run('which rbd') if ceph == 0 and rbd == 0: return True return False
def delete_qos_rules(target_interface): if target_interface: # delete ifb interface tc rules cmdstr = "tc qdisc del dev %s root >/dev/null 2>&1" % QOS_IFB shell.run(cmdstr) # delete target interface tc rules cmdstr = "tc qdisc del dev %s ingress >/dev/null 2>&1" % target_interface shell.run(cmdstr)
def clean_convert(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentRsp() clean_up_path = os.path.join(cmd.storagePath, cmd.srcVmUuid) shell.run("pkill -9 -f '%s'" % clean_up_path) linux.rm_dir_force(clean_up_path) return jsonobject.dumps(rsp)
def clean_convert(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentRsp() clean_up_path = os.path.join(cmd.storagePath, cmd.srcVmUuid) shell.run("pkill -9 -f '%s'" % clean_up_path) shell.run("/bin/rm -rf " + clean_up_path) return jsonobject.dumps(rsp)
def _cleanup_conntrack(self, ips=None, ip_version="ipv4"): if ips: for ip in ips: shell.run("conntrack -d %s -f %s -D" % (ip, ip_version)) logger.debug('clean up conntrack -d %s -D' % ip) else: shell.run("conntrack -D") logger.debug('clean up conntrack -D')
def cancel_download_from_sftp(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentRsp() install_abs_path = translate_absolute_path_from_install_path(cmd.primaryStorageInstallPath) shell.run("pkill -9 -f '%s'" % install_abs_path) self.do_delete_bits(cmd.primaryStorageInstallPath) return jsonobject.dumps(rsp)
def delete_lv(path, raise_exception=True): # remove meta-lv if any if lv_exists(get_meta_lv_path(path)): shell.run("lvremove -y %s" % get_meta_lv_path(path)) if not lv_exists(path): return cmd = shell.ShellCmd("lvremove -y %s" % path) cmd(is_exception=raise_exception) return cmd.return_code
def cancel_download_from_kvmhost(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = kvmagent.AgentResponse() install_abs_path = cmd.primaryStorageInstallPath shell.run("pkill -9 -f '%s'" % install_abs_path) linux.rm_file_force(cmd.primaryStorageInstallPath) return jsonobject.dumps(rsp)
def do_sftp_download(self, cmd, pool, image_name): hostname = cmd.hostname prikey = cmd.sshKey port = cmd.sshPort if cmd.bandWidth is not None: bandWidth = 'pv -q -L %s |' % cmd.bandWidth else: bandWidth = '' tmp_image_name = 'tmp-%s' % image_name prikey_file = linux.write_to_temp_file(prikey) @rollbackable def _0(): tpath = "%s/%s" % (pool, tmp_image_name) shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath)) _0() try: shell.run('rbd rm %s/%s' % (pool, tmp_image_name)) shell.call('set -o pipefail; ssh -p %d -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i %s root@%s cat %s | %s rbd import --image-format 2 - %s/%s' % (port, prikey_file, hostname, remote_shell_quote(cmd.backupStorageInstallPath), bandWidth, pool, tmp_image_name)) finally: os.remove(prikey_file) @rollbackable def _1(): shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) _1() file_format = shell.call( "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name)) file_format = file_format.strip() if file_format not in ['qcow2', 'raw']: raise Exception('unknown image format: %s' % file_format) shell.run('rbd rm %s/%s' % (pool, image_name)) if file_format == 'qcow2': conf_path = None try: with open('/etc/ceph/ceph.conf', 'r') as fd: conf = fd.read() conf = '%s\n%s\n' % (conf, 'rbd default format = 2') conf_path = linux.write_to_temp_file(conf) shell.call('qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % ( pool, tmp_image_name, pool, image_name, conf_path)) shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) finally: if conf_path: os.remove(conf_path) else: shell.call('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name))
def lichbd_export(src_path, dst_path): shellcmd = None protocol = get_protocol() shellcmd = call_try(lichbdfactory.get_lichbd_version_class().LICHBD_CMD_VOL_EXPORT+' %s %s -p %s 2>/dev/null' % (src_path, dst_path, protocol)) if shellcmd.return_code == 0: return shellcmd else: shell.run("rm -rf %s" % dst_path) raise_exp(shellcmd)
def config_qos(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentRsp() shell.run("modprobe ifb; ip link set %s up" % QOS_IFB) if cmd.vCenterIps: interface_setup_rule = [] def set_up_qos_rules(target_interface): # a bare number in tc class use bytes as unit config_qos_cmd = "tc qdisc add dev {0} ingress;" \ "tc filter add dev {0} parent ffff: protocol ip u32 match " \ "u32 0 0 flowid 1:1 action mirred egress redirect dev {1};" \ "tc qdisc del dev {1} root >/dev/null 2>&1;" \ "tc qdisc add dev {1} root handle 1: htb;" \ "tc class add dev {1} parent 1: classid 1:1 htb rate {2} burst 100m" \ .format(target_interface, QOS_IFB, cmd.inboundBandwidth) return shell.run(config_qos_cmd) for vcenter_ip in cmd.vCenterIps: interface = self._get_network_interface_to_ip_address(vcenter_ip) if interface is None: interface = self._get_network_interface_to_ip_address(self._get_ip_address_to_domain(vcenter_ip)) if interface and interface not in interface_setup_rule: if set_up_qos_rules(interface) == 0: interface_setup_rule.append(interface) else: logger.debug("Failed to set up qos rules on interface %s" % interface) continue list_url_cmd = shell.ShellCmd("ps aux | grep '[v]irt-v2v' | grep -v convert.ret | awk '{print $13}'") list_url_cmd(False) limited_interface = [] if list_url_cmd.return_code == 0 and list_url_cmd.stdout: # will get a url format like # vpx://administrator%[email protected]/Datacenter-xxx/Cluster-xxx/127.0.0.1?no_verify=1 for url in list_url_cmd.stdout.split('\n'): vmware_host_ip = url.split('/')[-1].split('?')[0] interface = self._get_network_interface_to_ip_address(vmware_host_ip) if interface: cmdstr = "tc filter replace dev %s protocol ip parent 1: prio 1 u32 match ip src %s/32 flowid 1:1" \ % (QOS_IFB, vmware_host_ip) if shell.run(cmdstr) != 0: logger.debug("Failed to set up tc filter on interface %s for ip %s" % (interface, vmware_host_ip)) else: limited_interface.append(interface) return jsonobject.dumps(rsp)
def clean(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentRsp() if not cmd.srcVmUuid: cleanUpPath = cmd.storagePath else: cleanUpPath = os.path.join(cmd.storagePath, cmd.srcVmUuid) shell.run("pkill -9 -f '%s'" % cleanUpPath) linux.rm_dir_force(cleanUpPath) return jsonobject.dumps(rsp)
def add_interface_to_bridge(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = kvmagent.AgentResponse() oldbr = shell.call("""brctl show | awk '$4 == "%s" {print $1}'""" % cmd.physicalInterfaceName).strip() if oldbr == cmd.bridgeName: return jsonobject.dumps(rsp) if oldbr: shell.run("brctl delif %s %s" % (oldbr, cmd.physicalInterfaceName)) shell.check_run("brctl addif %s %s" % (cmd.bridgeName, cmd.physicalInterfaceName)) return jsonobject.dumps(rsp)
def fact(self, req): rsp = HostFactResponse() rsp.osDistribution, rsp.osVersion, rsp.osRelease = platform.dist() # to be compatible with both `2.6.0` and `2.9.0(qemu-kvm-ev-2.9.0-16.el7_4.8.1)` qemu_img_version = shell.call("qemu-img --version | grep 'qemu-img version' | cut -d ' ' -f 3 | cut -d '(' -f 1") qemu_img_version = qemu_img_version.strip('\t\r\n ,') ipV4Addrs = shell.call("ip addr | grep -w inet | grep -v 127.0.0.1 | awk '!/zs$/{print $2}' | cut -d/ -f1") system_product_name = shell.call('dmidecode -s system-product-name').strip() baseboard_product_name = shell.call('dmidecode -s baseboard-product-name').strip() rsp.qemuImgVersion = qemu_img_version rsp.libvirtVersion = self.libvirt_version rsp.ipAddresses = ipV4Addrs.splitlines() rsp.systemProductName = system_product_name if system_product_name else baseboard_product_name if not rsp.systemProductName: rsp.systemProductName = 'unknown' if IS_AARCH64: # FIXME how to check vt of aarch64? rsp.hvmCpuFlag = 'vt' cpu_model = None try: cpu_model = self._get_host_cpu_model() except AttributeError: logger.debug("maybe XmlObject has no attribute model, use uname -p to get one") if cpu_model is None: cpu_model = os.uname()[-1] rsp.cpuModelName = cpu_model rsp.hostCpuModelName = "aarch64" cpuMHz = shell.call("lscpu | awk '/max MHz/{ print $NF }'") # in case lscpu doesn't show cpu max mhz cpuMHz = "2500.0000" if cpuMHz.strip() == '' else cpuMHz rsp.cpuGHz = '%.2f' % (float(cpuMHz) / 1000) else: if shell.run('grep vmx /proc/cpuinfo') == 0: rsp.hvmCpuFlag = 'vmx' if not rsp.hvmCpuFlag: if shell.run('grep svm /proc/cpuinfo') == 0: rsp.hvmCpuFlag = 'svm' rsp.cpuModelName = self._get_host_cpu_model() host_cpu_info = shell.call("grep -m2 -P -o '(model name|cpu MHz)\s*:\s*\K.*' /proc/cpuinfo").splitlines() host_cpu_model_name = host_cpu_info[0] rsp.hostCpuModelName = host_cpu_model_name transient_cpuGHz = '%.2f' % (float(host_cpu_info[1]) / 1000) static_cpuGHz_re = re.search('[0-9.]*GHz', host_cpu_model_name) rsp.cpuGHz = static_cpuGHz_re.group(0)[:-3] if static_cpuGHz_re else transient_cpuGHz return jsonobject.dumps(rsp)
def delete_lv(path, raise_exception=True): logger.debug("deleting lv %s" % path) # remove meta-lv if any if lv_exists(get_meta_lv_path(path)): shell.run("lvremove -y %s" % get_meta_lv_path(path)) if not lv_exists(path): return if raise_exception: o = bash.bash_errorout("lvremove -y %s" % path) else: o = bash.bash_o("lvremove -y %s" % path) return o
def clean(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentRsp() if not cmd.srcVmUuid: cleanUpPath = cmd.storagePath else: cleanUpPath = os.path.join(cmd.storagePath, cmd.srcVmUuid) shell.run("pkill -9 -f '%s'" % cleanUpPath) cmdstr = "/bin/rm -rf " + cleanUpPath shell.run(cmdstr) return jsonobject.dumps(rsp)
def lichbd_export(src_path, dst_path): shellcmd = None protocol = get_protocol() shellcmd = call_try( lichbdfactory.get_lichbd_version_class().LICHBD_CMD_VOL_EXPORT + ' %s %s -p %s 2>/dev/null' % (src_path, dst_path, protocol)) if shellcmd.return_code == 0: return shellcmd else: shell.run("rm -rf %s" % dst_path) raise_exp(shellcmd)
def lichbd_copy(src_path, dst_path): shellcmd = None protocol = get_protocol() shellcmd = call_try(lichbdfactory.get_lichbd_version_class().LICHBD_CMD_VOL_COPY+' %s %s -p %s 2>/dev/null' % (src_path, dst_path, protocol)) if shellcmd.return_code == 0: return shellcmd else: if dst_path.startswith(":"): shell.run("rm -rf %s" % (dst_path.lstrip(":"))) else: lichbd_rm(dst_path) raise_exp(shellcmd)
def lichbd_copy(src_path, dst_path): shellcmd = None protocol = get_protocol() shellcmd = call_try( lichbdfactory.get_lichbd_version_class().LICHBD_CMD_VOL_COPY + ' %s %s -p %s 2>/dev/null' % (src_path, dst_path, protocol)) if shellcmd.return_code == 0: return shellcmd else: if dst_path.startswith(":"): shell.run("rm -rf %s" % (dst_path.lstrip(":"))) else: lichbd_rm(dst_path) raise_exp(shellcmd)
def _set_intel_ept(self, new_ept): error = None old_ept = self._get_intel_ept() if new_ept != old_ept: param = "ept=%d" % new_ept if shell.run("modprobe -r kvm-intel") != 0 or shell.run("modprobe kvm-intel %s" % param) != 0: error = "failed to reload kvm-intel, please stop the running VM on the host and try again." else: with open('/etc/modprobe.d/intel-ept.conf', 'w') as writer: writer.write("options kvm_intel %s" % param) logger.info("_set_intel_ept(%s) OK." % new_ept) if error is not None: logger.warn("_set_intel_ept: %s" % error) return error
def cp(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) src_path = self._normalize_install_path(cmd.srcPath) dst_path = self._normalize_install_path(cmd.dstPath) if cmd.sendCommandUrl: Report.url = cmd.sendCommandUrl report = Report(cmd.threadContext, cmd.threadContextStack) report.processType = "CephCpVolume" _, PFILE = tempfile.mkstemp() stage = (cmd.threadContext['task-stage'], "10-90")[cmd.threadContext['task-stage'] is None] def _get_progress(synced): if not Report.url: return synced logger.debug("getProgress in ceph-agent") percent = shell.call( "tail -1 %s | grep -o '1\?[0-9]\{1,2\}%%' | tail -1" % PFILE).strip(' \t\n\r%') if percent and Report.url: report.progress_report(get_exact_percent(percent, stage), "report") return synced def _get_cp_cmd(): return "deep cp" if shell.run( "rbd help deep cp > /dev/null") == 0 else "cp" t_shell = traceable_shell.get_shell(cmd) _, _, err = t_shell.bash_progress_1( self._wrap_shareable_cmd( cmd, 'rbd %s %s %s 2> %s' % (_get_cp_cmd(), src_path, dst_path, PFILE)), _get_progress) if os.path.exists(PFILE): os.remove(PFILE) if err: shell.run('rbd rm %s' % dst_path) raise err rsp = CpRsp() rsp.size = self._get_file_size(dst_path) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp)
def check_libguestfs(): cmd = "/usr/bin/libguestfs-test-tool > {} 2>&1".format(LIBGUESTFS_TEST_LOG_PATH) if shell.run(cmd) != 0: rsp.success = False rsp.error = "libguestfs test failed, log file: %s" % LIBGUESTFS_TEST_LOG_PATH return jsonobject.dumps(rsp)
def _migrate_image(self, image_uuid, image_size, src_install_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port): src_install_path = self._normalize_install_path(src_install_path) dst_install_path = self._normalize_install_path(dst_install_path) ssh_cmd, tmp_file = linux.build_sshpass_cmd( dst_mon_addr, dst_mon_passwd, 'tee >(md5sum >/tmp/%s_dst_md5) | rbd import - %s' % (image_uuid, dst_install_path), dst_mon_user, dst_mon_port) rst = shell.run( "rbd export %s - | tee >(md5sum >/tmp/%s_src_md5) | %s" % (src_install_path, image_uuid, ssh_cmd)) linux.rm_file_force(tmp_file) if rst != 0: return rst src_md5 = self._read_file_content('/tmp/%s_src_md5' % image_uuid) dst_md5 = linux.sshpass_call(dst_mon_addr, dst_mon_passwd, 'cat /tmp/%s_dst_md5' % image_uuid, dst_mon_user, dst_mon_port) if src_md5 != dst_md5: return -1 else: return 0
def deploy_2ha(scenarioConfig, scenarioFile): mn_ip1 = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 0).ip_ mn_ip2 = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 1).ip_ node3_ip = get_host_by_index_in_scenario_file(scenarioConfig, scenarioFile, 2).ip_ vip = os.environ['zstackHaVip'] change_ip_cmd1 = "zstack-ctl change_ip --ip=" + mn_ip1 ssh.execute(change_ip_cmd1, mn_ip1, "root", "password", False, 22) iptables_cmd1 = "iptables -I INPUT -d " + vip + " -j ACCEPT" ssh.execute(iptables_cmd1, mn_ip1, "root", "password", False, 22) change_ip_cmd2 = "zstack-ctl change_ip --ip=" + mn_ip2 ssh.execute(change_ip_cmd2, mn_ip2, "root", "password", False, 22) iptables_cmd2 = "iptables -I INPUT -d " + vip + " -j ACCEPT" ssh.execute(iptables_cmd2, mn_ip2, "root", "password", False, 22) woodpecker_vm_ip = shell.call("ip r | grep src | grep '^172.20' | head -1 | awk '{print $NF}'").strip() zsha2_path = "/home/%s/zsha2" % woodpecker_vm_ip ssh.scp_file(zsha2_path, "/root/zsha2", mn_ip1, "root", "password") ssh.execute("chmod a+x /root/zsha2", mn_ip1, "root", "password", False, 22) zstack_hamon_path = "/home/%s/zstack-hamon" % woodpecker_vm_ip ssh.scp_file(zstack_hamon_path, "/root/zstack-hamon", mn_ip1, "root", "password") ssh.execute("chmod a+x /root/zstack-hamon", mn_ip1, "root", "password", False, 22) cmd = '/root/zsha2 install-ha -nic br_zsn0 -gateway 172.20.0.1 -slave "root:password@' + mn_ip2 + '" -vip ' + vip + ' -time-server ' + node3_ip + ' -db-root-pw zstack.mysql.password -yes' test_util.test_logger("deploy 2ha by cmd: %s" %(cmd)) ssh_cmd = 'sshpass -p password ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null' if shell.run("%s %s zsha2 status" %(ssh_cmd, mn_ip1)) != 0: ret, output, stderr = ssh.execute(cmd, mn_ip1, "root", "password", False, 22) test_util.test_logger("cmd=%s; ret=%s; output=%s; stderr=%s" %(cmd, ret, output, stderr)) if ret!=0: test_util.test_fail("deploy 2ha failed")
def image_already_pushed(self, hostname, imf): cmdstr = '%s -url %s:%s info %s' % ( self.ZSTORE_CLI_PATH, hostname, self.ZSTORE_DEF_PORT, self._build_install_path(imf.name, imf.id)) if shell.run(cmdstr) != 0: return False return True
def retry_if_needed(ret): if ret == 0: return if retry_counter[0] != max_retry_times and shell.run("grep -q 'guestfs_launch failed' %s" % log_path) == 0: retry_counter[0] += 1 raise RetryException( "launch guestfs failed, rerun v2v longjob %s" % cmd.longJobUuid)
def update_dependency(self, req): rsp = UpdateDependencyRsp() yum_cmd = "yum --enablerepo=* clean all && yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn install `cat /var/lib/zstack/dependencies` -y" if shell.run("which yum") != 0: rsp.success = False rsp.error = "no yum command found, cannot update kvmagent dependencies" elif shell.run("yum --disablerepo=* --enablerepo=zstack-mn repoinfo") != 0: rsp.success = False rsp.error = "no zstack-mn repo found, cannot update kvmagent dependencies" elif shell.run("yum --disablerepo=* --enablerepo=qemu-kvm-ev-mn repoinfo") != 0: rsp.success = False rsp.error = "no qemu-kvm-ev-mn repo found, cannot update kvmagent dependencies" elif shell.run(yum_cmd) != 0: rsp.success = False rsp.error = "failed to update kvmagent dependencies using zstack-mn,qemu-kvm-ev-mn repo" else: logger.debug("successfully run: %s" % yum_cmd) return jsonobject.dumps(rsp)
def set_up_qos_rules(target_interface): # a bare number in tc class use bytes as unit config_qos_cmd = "tc qdisc add dev {0} ingress;" \ "tc filter add dev {0} parent ffff: protocol ip u32 match " \ "u32 0 0 flowid 1:1 action mirred egress redirect dev {1};" \ "tc qdisc del dev {1} root >/dev/null 2>&1;" \ "tc qdisc add dev {1} root handle 1: htb;" \ "tc class add dev {1} parent 1: classid 1:1 htb rate {2} burst 100m" \ .format(target_interface, QOS_IFB, cmd.inboundBandwidth) return shell.run(config_qos_cmd)
def _get_origin_format(path): qcow2_length = 0x9007 if path.startswith('http://') or path.startswith('https://') or path.startswith('ftp://'): resp = urllib2.urlopen(path) qhdr = resp.read(qcow2_length) resp.close() elif path.startswith('sftp://'): fd, tmp_file = tempfile.mkstemp() get_header_from_pipe_cmd = "timeout 60 head --bytes=%d %s > %s" % (qcow2_length, pipe_path, tmp_file) clean_cmd = "pkill -f %s" % pipe_path shell.run('%s & %s && %s' % (scp_to_pipe_cmd, get_header_from_pipe_cmd, clean_cmd)) qhdr = os.read(fd, qcow2_length) if os.path.exists(tmp_file): os.remove(tmp_file) else: resp = open(path) qhdr = resp.read(qcow2_length) resp.close() if len(qhdr) < qcow2_length: return "raw" return get_image_format_from_buf(qhdr)
def try_remount_fs(): if mount_path_is_nfs(mount_path): shell.run("systemctl start nfs-client.target") while self.run_fencer(ps_uuid, created_time): if linux.is_mounted(path=mount_path) and touch_heartbeat_file(): self.report_storage_status([ps_uuid], 'Connected') logger.debug("fs[uuid:%s] is reachable again, report to management" % ps_uuid) break try: logger.debug('fs[uuid:%s] is unreachable, it will be remounted after 180s' % ps_uuid) time.sleep(180) if not self.run_fencer(ps_uuid, created_time): break linux.remount(url, mount_path, options) self.report_storage_status([ps_uuid], 'Connected') logger.debug("remount fs[uuid:%s] success, report to management" % ps_uuid) break except: logger.warn('remount fs[uuid:%s] fail, try again soon' % ps_uuid) kill_progresses_using_mount_path(mount_path) logger.debug('stop remount fs[uuid:%s]' % ps_uuid)
def _migrate_image(self, image_uuid, image_size, src_install_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port): src_install_path = self._normalize_install_path(src_install_path) dst_install_path = self._normalize_install_path(dst_install_path) rst = shell.run("rbd export %s - | tee >(md5sum >/tmp/%s_src_md5) | sshpass -p %s ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s -p %s 'tee >(md5sum >/tmp/%s_dst_md5) | rbd import - %s'" % (src_install_path, image_uuid, linux.shellquote(dst_mon_passwd), dst_mon_user, dst_mon_addr, dst_mon_port, image_uuid, dst_install_path)) if rst != 0: return rst src_md5 = self._read_file_content('/tmp/%s_src_md5' % image_uuid) dst_md5 = shell.call("sshpass -p %s ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s -p %s 'cat /tmp/%s_dst_md5'" % (linux.shellquote(dst_mon_passwd), dst_mon_user, dst_mon_addr, dst_mon_port, image_uuid)) if src_md5 != dst_md5: return -1 else: return 0
def migrate_bits(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = NfsToNfsMigrateBitsRsp() mount_path = cmd.mountPath dst_folder_path = cmd.dstFolderPath temp_dir = None try: if not cmd.isMounted: linux.is_valid_nfs_url(cmd.url) temp_dir = tempfile.mkdtemp() # dst folder is absolute path mount_path = temp_dir + mount_path dst_folder_path = temp_dir + dst_folder_path if not linux.is_mounted(mount_path, cmd.url): linux.mount(cmd.url, mount_path, cmd.options, "nfs4") # Report task progress based on flow chain for now # To get more accurate progress, we need to report from here someday # begin migration, then check md5 sums shell.call("mkdir -p %s; cp -r %s/* %s; sync" % (dst_folder_path, cmd.srcFolderPath, dst_folder_path)) src_md5 = shell.call( "find %s -type f -exec md5sum {} \; | awk '{ print $1 }' | sort | md5sum" % cmd.srcFolderPath) dst_md5 = shell.call("find %s -type f -exec md5sum {} \; | awk '{ print $1 }' | sort | md5sum" % dst_folder_path) if src_md5 != dst_md5: rsp.error = "failed to copy files from %s to %s, md5sum not match" % (cmd.srcFolderPath, dst_folder_path) rsp.success = False if not cmd.isMounted: linux.umount(mount_path) finally: if temp_dir is not None: return_code = shell.run("mount | grep '%s'" % temp_dir) if return_code != 0: # in case dir is not empty try: os.rmdir(temp_dir) except OSError as e: logger.warn("delete temp_dir %s failed: %s", (temp_dir, str(e))) else: logger.warn("temp_dir %s still had mounted destination primary storage, skip cleanup operation" % temp_dir) return jsonobject.dumps(rsp)
def cancel_sftp_download(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() def check(): return shell.run("rbd ls %s | grep -q %s" % (pool, image_name)) != 0 def remove(target_name): return shell.run("rbd info {0}/{1} || rbd rm {0}/{1}".format(pool, target_name)) == 0 pool, image_name = self._parse_install_path(cmd.primaryStorageInstallPath) tmp_image_name = 'tmp-%s' % image_name if check(): return jsonobject.dumps(rsp) for image in (tmp_image_name, image_name): shell.run("pkill -9 -f '%s'" % image) linux.wait_callback_success(remove, image, timeout=30) if not check(): rsp.set_err("remove image %s/%s fail" % (pool, image_name)) return jsonobject.dumps(rsp)
def after_kill_vm(): if not killed_vm_pids or not mounted_by_zstack: return try: kill_and_umount(mount_path, mount_path_is_nfs(mount_path)) except UmountException: if shell.run('ps -p %s' % ' '.join(killed_vm_pids)) == 0: virsh_list = shell.call("timeout 10 virsh list --all || echo 'cannot obtain virsh list'") logger.debug("virsh_list:\n" + virsh_list) logger.error('kill vm[pids:%s] failed because of unavailable fs[mountPath:%s].' ' please retry "umount -f %s"' % (killed_vm_pids, mount_path, mount_path)) return try_remount_fs()
def update_os(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) if not cmd.excludePackages: exclude = "" else: exclude = "--exclude=" + cmd.excludePackages yum_cmd = "yum --enablerepo=* clean all && yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn %s update -y" % exclude rsp = UpdateHostOSRsp() if shell.run("which yum") != 0: rsp.success = False rsp.error = "no yum command found, cannot update host os" elif shell.run("yum --disablerepo=* --enablerepo=zstack-mn repoinfo") != 0: rsp.success = False rsp.error = "no zstack-mn repo found, cannot update host os" elif shell.run("yum --disablerepo=* --enablerepo=qemu-kvm-ev-mn repoinfo") != 0: rsp.success = False rsp.error = "no qemu-kvm-ev-mn repo found, cannot update host os" elif shell.run(yum_cmd) != 0: rsp.success = False rsp.error = "failed to update host os using zstack-mn,qemu-kvm-ev-mn repo" else: logger.debug("successfully run: %s" % yum_cmd) return jsonobject.dumps(rsp)
def deploy_vbmc(host_ips): for host_ip in host_ips.strip().split(' '): test_util.test_logger('Candidate host ip is %s' %host_ip) ssh_cmd = 'sshpass -p password ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null' if shell.run("%s %s which vbmc" %(ssh_cmd, host_ip)) != 0: shell.call('%s %s yum --disablerepo=epel install -y \ /opt/zstack-dvd/Extra/qemu-kvm-ev/libvirt-4.1.0/libvirt-libs-*' %(ssh_cmd, host_ip)) shell.call('%s %s yum --disablerepo=epel install -y \ /opt/zstack-dvd/Extra/qemu-kvm-ev/libvirt-4.1.0/libvirt-devel-*' %(ssh_cmd, host_ip)) shell.call('%s %s wget http://192.168.200.100/mirror/scripts/get-pip.py'%(ssh_cmd, host_ip)) shell.call('%s %s python get-pip.py' %(ssh_cmd, host_ip)) #shell.call('%s %s pip install virtualbmc' %(ssh_cmd, host_ip)) shell.call('%s %s wget http://192.168.200.100/mirror/scripts/virtualbmc_190223.tar.gz' %(ssh_cmd, host_ip)) shell.call('%s %s "tar zxvf virtualbmc_190223.tar.gz && cd virtualbmc && pip install *"' %(ssh_cmd, host_ip)) test_util.test_logger('Virtualbmc has been deployed on Host')
def setup_static_ip(scenario_file): ssh_cmd = 'sshpass -p password ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null' with open(scenario_file, 'r') as fd: xmlstr = fd.read() fd.close() scenario_file = xmlobject.loads(xmlstr) for vm in xmlobject.safe_list(scenario_file.vms.vm): mnip = vm.managementIp_ if xmlobject.has_element(vm, 'ips'): for ip in xmlobject.safe_list(vm.ips.ip): nic_ip = ip.ip_ if nic_ip.startswith("10"): if shell.run("%s %s 'ip a|grep br_zsn1'"%(ssh_cmd, mnip))== 0: nic = "br_zsn1" else: nic = "zsn1" netmask = "255.255.255.0" shell.call("%s %s zs-network-setting -i %s %s %s|exit 0" %(ssh_cmd, mnip, nic, nic_ip, netmask) ) return
def storage_gateway_fencer(gw): failure = 0 try: while True: time.sleep(cmd.interval) if shell.run("nmap -sP -PI %s | grep 'Host is up'" % gw) == 0: failure = 0 continue failure += 1 if failure == cmd.maxAttempts: logger.warn('failed to ping storage gateway[%s] %s times, we lost connection to the storage,' 'shutdown ourselves' % (gw, cmd.maxAttempts)) shell.call('halt') except: content = traceback.format_exc() logger.warn(content)
def heartbeat_file_fencer(heartbeat_file_path): try: failure = 0 while True: time.sleep(cmd.interval) if shell.run('timeout 15 touch %s; exit $?' % heartbeat_file_path) == 0: failure = 0 continue failure += 1 if failure == cmd.maxAttempts: logger.warn('failed to touch the heartbeat file[%s] %s times, we lost the connection to the storage,' 'shutdown ourselves' % (heartbeat_file_path, cmd.maxAttempts)) shell.call('halt') except: content = traceback.format_exc() logger.warn(content)