def _get_members_from_conf(cls): member_list = [] cmd = "cat /etc/etcd/etcd.conf | grep '^ETCD_INITIAL_CLUSTER[ =]' | awk -F'\"' '{print $2}'" out, err = _exec_shell1(cmd, p=True) if len(out.strip()) == 0: raise Exp(errno.EPERM, "not find init cluster, please check it !") for member in out.strip().split(','): if len(member) == 0: continue host = member.split('=')[0] member_list.append(host) return member_list
def get_part_size_sgdisk(self, part): if not part.startswith("/dev/"): part = "/dev/" + part if not self.is_part(part): raise Exp(errno.EINVAL, os.strerror(errno.EINVAL)) (dev, partnum) = re.match('(\D+)(\d+)', part).group(1, 2) dev_info = self.dev_check_sgdisk(dev) for line in dev_info.splitlines(): m = re.search('\s*(\d+)\s+\d+\s+\d+\s+(\S+ \S+B)\s+\S+\s+.*', line) if m is not None: num = m.group(1) if num != partnum: continue return m.group(2).replace(' ', '').replace('i', '') return None
def test_exec(cmd): p = subprocess.Popen(cmd, shell=True) try: res = p.communicate() ret = p.wait() if (ret == 0): pass else: msg = "exec cmd: %s, fail: %s" % (cmd, str(res)) raise Exp(ret, msg) except KeyboardInterrupt as err: dwarn("interupted") p.kill() exit(errno.EINTR) test_coredump() test_invalid_rw()
def etcd_is_health(cls): """ :return: return 1: etcd cluster health return 0: etcd cluster unhealth """ host = cls.get_admin() cmd = u"etcdctl cluster-health" #print host.strip() (out, err, stat) = _exec_remote(host.strip(), cmd) if err.strip() != '': raise Exp(errno.EPERM, "%s:%s" % (host.strip(), err.strip())) if "cluster is healthy" in out: return 1 else: _dwarn("Etcd status is unhealth, please check etcd status") return 0
def del_conf(abs_conf_path, params_list): export_set = set() _exec = "cat %s | grep Path" % (CONF_PATH) try: result, err = exec_shell(_exec, p=True, need_return=True, timeout=60) except Exception as e: raise Exp(e.errno, str(e)) with open(abs_conf_path, 'r') as fp: content = fp.read() #去重 for export in params_list: export_path_uss = export.keys()[0].strip(' ') export_set.add(export_path_uss) path_list = result.split('\n') for l_path in path_list: path = l_path.split('=') if len(path) == 2: export_path_conf = path[1].split(';')[0].strip(' ') export_id = get_export_id_by_path(content, export_path_conf) # 判断配置文件中的path是否share中也存在 if export_path_conf not in export_set: delete_export_by_id(content, export_id) exportmgr = ExportMgr(SERVICE, '/org/ganesha/nfsd/ExportMgr', 'org.ganesha.nfsd.exportmgr') status, msg = exportmgr.RemoveExport(export_id) status_message(status, msg) continue # 判断配置文件中的client是否share中也存在 share_clients = get_clients_from_share(params_list, export_path_conf) conf_clients = get_clients_by_path(content, export_path_conf) for c_client in conf_clients: c_Cli = c_client.strip(' ') if c_Cli not in share_clients: delete_client_by_host(content, export_id, c_Cli) exportmgr = ExportMgr(SERVICE, '/org/ganesha/nfsd/ExportMgr', 'org.ganesha.nfsd.exportmgr') export_expression = build_expression(export_id) status, msg = exportmgr.UpdateExport( abs_conf_path, export_expression) status_message(status, msg)
def group_get(groupname, groupid): try: cmd = ["sdfs.group", "-G", groupname] out, err = exec_pipe1(cmd, 0, False) print out for line in out.splitlines(): name = line.split('\t')[0].split(':')[1] nameid = line.split('\t')[1].split(':')[1] if name == groupname and nameid == groupid: print 'get group success!' else: raise Exp(1, "sdfs.group get group fail! \n") return (0,err) except Exception as e: if e.err: #dwarn(str(e.err)) return (e.errno, e.err)
def is_swap(self, dev): if not dev.startswith("/dev/"): dev = "/dev/" + dev if not stat.S_ISBLK(os.lstat(dev).st_mode): raise Exp(errno.EINVAL, '%s not a block device' % dev) with file('/proc/swaps', 'rb') as proc_swaps: for line in proc_swaps.readlines()[1:]: fields = line.split() if len(fields) < 3: continue swaps_dev = fields[0] if swaps_dev.startswith('/') and os.path.exists(swaps_dev): swaps_dev = os.path.realpath(swaps_dev) if swaps_dev == dev: return True return False
def get_dev_interface(self, dev): if not dev.startswith("/dev/"): dev = "/dev/" + dev if not self.is_dev(dev): raise Exp(errno.EINVAL, os.strerror(errno.EINVAL)) interface = None try: res, err = _exec_pipe1(["hdparm", "-I", dev], 0, False) for line in res.splitlines(): m = re.search('ATA device', line) if m is not None: interface = 'ATA' m = re.search('\s*Transport:\s*Serial', line) if m is not None: interface = 'SATA' except Exception, e: pass
def locks_test(): worker_num = 10 worker_prefix1 = "%d process of" % worker_num worker_prefix2 = "%d process of %d" % (worker_num, worker_num) #10 processes lock file test #10 process of 10 successfully ran test : WRITE ON A READ LOCK cmd = "%s -n %d -f %s" % (LOCKTEST, worker_num, NFS_LOCK_FILE) try: stdout, stderr = exec_shell(cmd, need_return=True) stdout_list = stdout.split('\n') for line in stdout_list: print line if ((0 == line.find(worker_prefix1)) and (-1 == line.find(worker_prefix2))): raise except Exp, e: ret = e.errno raise Exp(ret, "%s failed. ret: %d, %s" % (cmd, ret, e))
def test_exec(cmd): p = subprocess.Popen(cmd, shell=True) try: ret = p.wait() stdout, stderr = p.communicate() ret = p.returncode if (ret == 0): return else: msg = "" msg = msg + "cmd: " + cmd msg = msg + "\nstdout: " + str(stdout) msg = msg + "\nstderr: " + str(stderr) raise Exp(ret, msg) except KeyboardInterrupt as err: dwarn("interupted") p.kill() exit(errno.EINTR)
def del_cachedev(self, cachedev): if not self.is_cachedev(cachedev): raise Exp(errno.EINVAL, '%s not a cachedev\n' % cachedev) cset_uuid = self.get_cset_uuid_by_dev(cachedev) coredevs = self.list_coredevs_by_cachedev(cachedev) for coredev in coredevs: if len(coredev) == 0: continue self.del_coredev_dangerously(coredev) deleted = self.is_all_deleted(coredevs) if deleted: _exec_stop_cache = 'echo 1 > /sys/fs/bcache/%s/stop' % cset_uuid _exec_shell1(_exec_stop_cache, p=True) time.sleep(1) _exec_dd = 'dd if=/dev/zero of=%s count=1 bs=1M oflag=direct' % cachedev _exec_shell1(_exec_dd, p=True)
def tenant_add(self, name, password, port): if self._check_exists(name, port): raise Exp(errno.EEXIST, "name or port has been used, please check it!") #create tenant config directory dirname = "%s_%s" % (name, port) abs_path = "%s/%s" % (MINIO_CONF_PATH, dirname) os.mkdir(abs_path) #generate config.json cmd = "cp /opt/minio/config.json %s" % (abs_path) exec_shell(cmd, p=False) #modify config.json about accessKey and secretKey json_file = "%s/config.json" % (abs_path) data = json_load(json_file) data["credential"]["accessKey"] = name data["credential"]["secretKey"] = password json_store(data, json_file)
def dns_list(self, name=None): if name is not None: domain_conf = "/var/named/%s.zone" % (name) if not os.path.isfile(domain_conf): raise Exp(errno.ENOENT, "not such domain name : %s" % (name)) cmd = "grep 'www' %s | awk '{print $4}'" % (domain_conf) out, err = exec_shell(cmd, p=False, need_return=True) print out.strip() return cmd = "grep -n '0.in-addr.arpa' /etc/named.rfc1912.zones | awk -F':' '{print $1}'" out, err = exec_shell(cmd, p=False, need_return=True) line_num = out.strip() cmd = "tail -n +%s /etc/named.rfc1912.zones| grep 'zone ' | awk 'NR > 1 {print $2}' | awk -F'\"' '{print $2}'" % ( line_num) out, err = exec_shell(cmd, p=False, need_return=True) for name in out.strip().split('\n'): print "%s" % (name)
def poolid(self, pool): cmd = "sdfs stat /%s | grep fileid | awk -F '-' '{print $2}'" % (pool) while (1): try: (out, err) = exec_shell(cmd, need_return=True) break except Exp, e: ret = e.errno if (ret in [errno.EAGAIN, errno.EBUSY, errno.ENONET]): time.sleep(1) dwarn("get poolid fail, %s, retry" % (e)) continue else: if (retry > retry_max): raise Exp(ret, "get pool id fail: ret: %d, %s" % (ret, e)) else: time.sleep(1) retry = retry + 1
def configure_global(): content = """ [global] {net} server string = this is centos security = {mode} {realm} encrypt passwords = yes workgroup = {workgroup} winbind enum groups = yes winbind enum users = yes winbind separator = / winbind use default domain = yes template homedir = /home/%U template shell = /bin/bash {imap_range} {imap_domain} {passdb_backend} log level = 3 max log size = 500000 aio read size = 16384 aio write size = 16384 cache directory = /dev/shm/sdfs/locks/cache lock directory = /dev/shm/sdfs/lock max connections = 0 """ mode = None real = '' errcode = 0 real_realm = '' netbios = get_netbios() _exec_attr = "sdfs.attr -g ad /system" try: out, __ = exec_shell(_exec_attr, need_return=True) except Exp, e: errcode = e.errno if errcode != 126 and errcode != 2: raise Exp(1, "sdfs.attr get ad info failed\n")
def is_mounted(self, dev): if not dev.startswith("/dev/"): dev = "/dev/" + dev if not self.is_block(dev): raise Exp(errno.EINVAL, '%s not a block device' % dev) with file('/proc/mounts', 'rb') as proc_mounts: for line in proc_mounts: fields = line.split() if len(fields) < 3: continue mounts_dev = fields[0] path = fields[1] if mounts_dev.startswith('/') and os.path.exists(mounts_dev): if mounts_dev == dev: return path mounts_dev = os.path.realpath(mounts_dev) if mounts_dev == dev: return path return None
def user_get(username, uid=0, gid=0, pwd=None): try: cmd = ["sdfs.user", "-G", username] out, err = exec_pipe1(cmd, 0, True) name = out.split('\n')[0].split(':')[1] nameuid = out.split('\n')[1].split(':')[1] namegid = out.split('\n')[2].split(':')[1] namepwd = out.split('\n')[3].split(':')[1] if name == username and namegid == gid and\ nameuid == uid and namepwd == pwd: print 'get group success!' else: raise Exp(1, "sdfs.group get group fail! \n") return (0, err) except Exception as e: if e.err: #dwarn(str(e.err)) return (e.errno, e.err) print '\n'
def health(node): retry = 0 retry_max = 10 while (1): try: cmd = "%s/app/bin/sdfs.health -s all >> %s/recovery.log 2>&1" % (TEST_PATH, LOG_PATH) (out, err) = exec_shell(cmd, p=True, need_return=True) break except Exp, e: ret = e.errno if (ret in [errno.EAGAIN, errno.EBUSY, errno.ENONET]): time.sleep(1) dwarn("health fail, %s, retry" % (e)) continue else: if (retry > retry_max): raise Exp(ret, "_scan fail: ret: %d, %s" % (ret, e)) else: time.sleep(1) retry = retry + 1
def get_dev_cache(self, dev): if not dev.startswith("/dev/"): dev = "/dev/" + dev if not self.is_dev(dev): raise Exp(errno.EINVAL, os.strerror(errno.EINVAL)) cache = None (out, err) = _exec_pipe1(["hdparm", "-W", dev], 0, False) for line in out.splitlines(): m = re.search('write-caching\s+=\s+(\d)\s+\(\S+\)', line) if m is not None: cache = m.group(1) if cache is None: return None elif cache == '1': return 'Enabled' elif cache == '0': return 'Disabled' else: return cache
def get_dev_info(self, dev): ''' {'pds': '1', 'raid': '0', 'dev': '/dev/sda'} ''' dev_info = {} all_raid = self.all_raid for adp in all_raid: for array in all_raid[adp]: if array == 'bbu_info': continue for type in all_raid[adp][array]: if type == 'logical': for drive, drive_info in all_raid[adp][array][ 'logical'].iteritems(): if dev in drive_info.values(): for key in drive_info: dev_info[key] = drive_info[key] dev_info['pds'] = str( len(all_raid[adp][array]['physical'])) dev_info['array'] = array dev_info['smart_support'] = all_raid[adp][ array]['smart_support'] if 'smart_path' in all_raid[adp][array]: dev_info['smart_path'] = all_raid[adp][ array]['smart_path'] dev_info['ld'] = drive dev_info['bbu_info'] = all_raid[adp][ 'bbu_info'] dev_info['disk'] = [] disk_list = all_raid[adp][array]['physical'] for disk in disk_list: disk_info = disk_list.get(disk) #dev_info['disk'][disk_info['inq']] = disk_info dev_info['disk'].append(disk_info['inq']) if len(dev_info) == 0: raise Exp(errno.EINVAL, "%s in Adapter not found" % (dev)) return dev_info
def member_del(self, hosts, proxy=False): origin_cluster_list = self._etcd_get_init_cluster().keys() del_list = hosts.split(',') new_cluster_list = origin_cluster_list new_del_list = [] for host in del_list: if host not in new_cluster_list: _dwarn("host:%s is not in etcd cluster, please check it!" % (host)) del_list.remove(host) continue else: #etcd member remove host new_cluster_list.remove(host) for host in del_list: if len(host) == 0: continue _dmsg("wait etcd for health, host:%s" % (host)) self._etcd_health_wait() _dmsg("etcd is health, begin check is permit to remove... host:%s" % (host)) if not self.etcd_check_member_remove_permition(host): _dwarn("host:%s not permit to remove. pelease check it!\n" % (host)) new_cluster_list.append(host) continue _dmsg("host:%s permit to remove, begin remove..." % (host)) try: cmd = "etcdctl member list | grep %s | awk '{print $1}' | awk -F':' '{print $1}'| awk -F'[' '{print $1}'| xargs etcdctl member remove" % (host) self._exec_node(new_cluster_list[0], cmd) _sysinfo("etcd member remove %s ok !" % (host)) new_del_list.append(host) except Exp, e: _syserror("etcd member remove %s fail ! errmsg:%s" % (host, e.err)) raise Exp(errno.EPERM, "etcd member remove %s fail, %s" % (host, e.err))
def get_dev_info(self, dev): dev_info = {} (dev_adp, dev_vd) = self.get_dev_vd(dev) all_raid = self.all_raid for adp in all_raid: for vd in all_raid[adp]: if dev_adp == adp and dev_vd == vd: dev_info = all_raid[adp][vd] dev_info['adp_name'] = all_raid[adp]['name'] dev_info['adp_memory'] = all_raid[adp]['memory'] dev_info['adp_type'] = all_raid[adp]['type'] dev_info['bbu_info'] = all_raid[adp]['bbu_info'] dev_info['disk_info'] = {} for disk in dev_info['disk']: disk_info = self.get_disk_info(disk) dev_info['disk_info'][disk] = disk_info if len(dev_info) == 0: raise Exp(errno.EINVAL, "%s in Adapter %s not found" % (dev, dev_adp)) return dev_info
def get_dev_serialno(self, dev): if not dev.startswith("/dev/"): dev = "/dev/" + dev if not self.is_dev(dev): raise Exp(errno.EINVAL, os.strerror(errno.EINVAL)) serialno = [] try: res, err = _exec_pipe1(["hdparm", "-I", dev], 0, False) for line in res.splitlines(): m = re.search('Model Number:\s*(\S+)', line) if m is not None: serialno.append(m.group(1)) m = re.search('Serial Number:\s*(\S+)', line) if m is not None: serialno.append(m.group(1)) m = re.search('Firmware Revision:\s*(\S+)', line) if m is not None: #serialno.append(m.group(1)) pass except Exception, e: pass
def set_raid_policy(self, dev, cache): cachearr = self.__set_raid_policy_check(cache) dev_info = self.get_dev_info(dev) smartpath = None raid_cache = None disk_cache = None ratio = None badbbu_cache = None for i in cachearr: if i == 'SMARTPATH': if dev_info['smart_support'] == 'True': if 'smart_path' not in dev_info: smartpath = 'enable' elif dev_info['smart_path'] == 'disable': smartpath = 'enable' else: raise Exp(errno.EINVAL, 'cannot suport smartpath') elif i == 'CACHED' or i == 'DIRECT' or '/' in i: if dev_info['smart_support'] == 'True': if 'smart_path' in dev_info and dev_info[ 'smart_path'] == 'enable': smartpath = 'disable' if i == 'CACHED': raid_cache = 'enable' elif i == 'DIRECT': raid_cache = 'disable' else: raid_cache = 'enable' ratio = i.split('/') elif i == 'ENDSKCACHE' or i == 'DISDSKCACHE': disk_cache = 'enable' if i == 'ENDSKCACHE' else 'disable' elif i == 'NOCACHEDBADBBU' or i == 'CACHEDBADBBU': badbbu_cache = 'enable' if i == 'CACHEDBADBBU' else 'disable' self.__set_raid_cache(dev_info, smartpath, raid_cache, disk_cache, ratio, badbbu_cache)
def _get_src(self, op): src_tar = None if op not in ["etc", "app", "samba"]: raise Exp(errno.EINVAL, "not support %s" % (op)) if op == "samba": src_file = "/usr/local/samba" else: src_file = os.path.join(self.config.home, op) src_tar = "/tmp/uss_%s.tar.gz" % (op) cmd = "rm -rf %s" % (src_tar) cmd2 = "cd %s" % (src_file) cmd3 = "tar czf %s *" % (src_tar) if op == "samba": cmd3 = cmd3 + " --exclude=./private/* --exclude=./var/* >/dev/null 2>&1" else: cmd3 = cmd3 cmd = " && ".join([cmd, cmd2, cmd3]) exec_shell(cmd) return src_tar
def member_add(self, hosts): origin_cluster_list = self._etcd_get_init_cluster().keys() new_cluster_list = origin_cluster_list add_list = [] for host in hosts.split(','): if host in new_cluster_list: _dwarn("host:%s is already in etcd cluster, please check it!" % (host)) continue else: # add etcd member one by one self._etcd_health_wait() self._etcd_add_member(host) new_cluster_list.append(host) try: cmd = "python %s/app/admin/node.py etcd --state %s --hosts %s" % (self.config.home, "existing", ','.join(new_cluster_list)) self._exec_node(host, cmd) except Exp, e: self._etcd_del_member(host) _syserror("etcd set conf on host:%s fail, errmsg:%s" % (host, e.err)) raise Exp(e.errno, "etcd set conf on host:%s fail, errmsg:%s" % (host, e.err))
def add_raid0(self, disk, force): is_new = False disk_info = None new_disk = self.get_new_disk() for adp in new_disk: for phy in new_disk[adp]: if disk == new_disk[adp][phy]['inq']: is_new = True disk_info = new_disk[adp][phy] disk_info['adp'] = adp disk_info['phy'] = phy if not is_new: raise Exp(errno.EINVAL, "'" + disk + "' not a new disk") fd = _lock_file1("/var/run/fusionstack_raid_hpacucli.lock") res = _exec_pipe2([ self.cmd, 'ctrl', 'slot=' + disk_info['adp'], 'create', 'type=ld', 'drives=' + disk_info['phy'], 'raid=0' ], 0, True, stdin='y') _unlock_file1(fd)
def _str2dict(s): """ [root@test02 build]# /sysy/yfs/app/bin/uss.configdump globals.clustername:none globals.hostname:test02 globals.home:/sysy/yfs globals.check_mountpoint:0 """ if (s[-1] == '\n'): s = s[:-1] a = s.split('\n') d = {} for i in a: p = i.split(':') if (d.get(p[0])): raise Exp(errno.EEXIST, "dup key exist") try: d[p[0].strip()] = i[i.index(":") + 1:].strip() except IndexError as err: print("str %s" % (s)) raise return d
assert (type(able) == type(True)) try: domain, user, passwd = self._ad_get() except Exp, e: if (e.errno == 126): domain = None user = None passwd = None else: raise if able: if None in [domain, user, passwd]: raise Exp( 1, "domain %s user %s passwd %s invalid" % (domain, user, passwd)) self._ad_enable(domain, user, passwd) else: if None in [domain, user, passwd]: dmsg("domain %s user %s passwd %s, skip disable" % (domain, user, passwd)) return None self._ad_disable(domain, user, passwd) def _ldap_enable(self, server, dn): cmd = "pkill -9 winbindd;pkill -9 nscd" cmd = cmd + ' ;authconfig --disablewinbind --disablewins --disablesssd --disablenis --enableldap --enableldapauth --ldapserver="%s" --ldapbasedn="%s" --enablemkhomedir --update' % ( server, dn) cmd = cmd + " ;systemctl start nslcd" print cmd
def raid_flush(self): if self.raid_type == "MegaRAID": self.raid_tool.raid_cache_flush() else: raise Exp(errno.EPERM, "now only support MegaRAID")