def build(build_server, wait=True): ''' pull the latest uniio repos (uniio, sysmgmt, nasmgmt, uniio-ui) from github on build server build on server ''' global g_runtime_dir gitcmd = get_gitcmd() a,b,c,git_ssh_identityfile = g_conf["build_server"] # [IP, username, password, git_ssh_identityfile] shs = [] cos = [] if g_uioonly: repos = ['uniio'] else: repos = ['uniio', 'uniio-ui', 'sysmgmt', 'nasmgmt'] # build all repos in parallel for repo in repos: sh = build_server.newshell() if not sh: return None shs.append(sh) # prepare shell cos.append(sh.exe("cd /tmp", wait=False)) cos.append(sh.exe("export GIT_SSH_COMMAND=\"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentityFile=%s -o ProxyCommand='ssh -q -W %%h:%%p evidence.orcadt.com'\"" % (git_ssh_identityfile), wait=False)) # clone repo checkout = g_conf["%s_checkout"%(repo)] if g_conf.has_key("%s_checkout"%(repo)) else "default" cmd = "[[ -e '%s/%s' ]] && { cd %s/%s && git fetch; } || { %s clone --recurse-submodules [email protected]:uniio/%s.git %s/%s; }" \ % (g_runtime_dir, repo, g_runtime_dir, repo, gitcmd, repo, g_runtime_dir, repo) cos.append( sh.exe(cmd, wait=False) ) if checkout != "default": # checkout desired branch or tag or commit cmd = "cd %s/%s && %s checkout %s" % (g_runtime_dir, repo, gitcmd, checkout) cos.append( sh.exe(cmd, wait=False) ) cmd = "cd %s/%s && %s pull --no-edit || true" % (g_runtime_dir, repo, gitcmd) cos.append( sh.exe(cmd, wait=False) ) cmd = "cd %s/%s && %s log --pretty=format:'%%h|%%ci|%%an|%%s' | head -8 || true" % (g_runtime_dir, repo, gitcmd) cos.append( sh.exe(cmd, wait=False) ) # cmake repo if repo == "uniio-ui": co = sh.exe("cd %s/%s && mkdir -p build_debug && cd build_debug && cmake3 .." % (g_runtime_dir, repo), wait=False) co = sh.exe("cd %s/%s/build_debug && rm -f *.rpm && make -j20 package" % (g_runtime_dir, repo), wait=False) cos.append(co) else: co = sh.exe("cd %s/%s && mkdir -p build && cd build && cmake3 -DCMAKE_BUILD_TYPE=Release .." % (g_runtime_dir, repo), wait=False) cos.append(co) co = sh.exe("cd %s/%s/build && rm -f *.rpm && make -j20 package" % (g_runtime_dir, repo), wait=False) cos.append(co) if wait: for co in cos: if not co.succ(): common.log("failed build command: '%s'" % (co.cmdline), 1) return None return cos
def discard_drives(federation_targets, wait=True): cos = [] for t in federation_targets: cos.append(t.exe("while read d; do wipefs -f -a \${d}; dd if=/dev/zero of=\${d} bs=1M count=16; blkdiscard \${d} & done < <(lsblk -lpn -o NAME | grep -w 'sd.' | grep -v $(mount | grep -w / | awk '{print $1}' | sed 's/[0-9]//g')) && wait", wait=wait)) if wait: for co in cos: if not co.succ(): common.log("failed when discarding drives.") return None return cos
def perf_test(client_targets, federation_targets, fill=0): ''' run performance test. start fio workload from clients, collect counter logs at the same time, fill: fill the luns first, for a given time return when all jobs are done. ''' # print threadtable.ini contents on all federation servers for t in federation_targets: t.exe("cat /etc/objblk/threadtable.ini") status_str = "" jobdesc, fio_job_dir, fio_cos, fio_driver = fio_run(client_targets, fill) if not fio_cos: return False counter_log_dir, counter_log_path, counter_cos = counter_log(jobdesc, federation_targets) # wait for jobs to end fio_fail = None for fio_co in fio_cos: if not fio_co.succ(): fio_fail = fio_co if fio_fail: status_str += ".FIO_FAIL_on_%s" % (fio_driver.address) counter_fail = None for co in counter_cos: if not co.succ(): counter_fail = co if counter_fail: status_str += ".COUNTER_FAIL_on_%s" % (counter_fail.shell.t.address) iscsi_out(client_targets) # download fio logs from fio driver node localtime = time.localtime() date_str = "%d-%02d-%02d" % (localtime.tm_year, localtime.tm_mon, localtime.tm_mday) time_str = "%02d.%02d.%02d" % (localtime.tm_hour, localtime.tm_min, localtime.tm_sec) logdir = "perflogs/%s/%s.%s_%s%s" % (date_str, jobdesc, date_str, time_str, status_str) me.exe("rm -rf %s" % (logdir)) me.exe("mkdir -p %s" % (logdir)) if not fio_driver.download(logdir, "%s/*" % (fio_job_dir)): return False # download counter logs and cpu data svg files from federation nodes for t in federation_targets: counterdir = "%s/counter_%s" % (logdir, t.address) svgdir = "%s/cpudata_%s" % (logdir, t.address) me.exe("mkdir -p %s" % (counterdir)) if g_cpudata: me.exe("mkdir -p %s" % (svgdir)) if not t.download(counterdir, counter_log_path): return False if g_cpudata and not t.download(svgdir, counter_log_dir+"/*%s*.svg" % (jobdesc)): return False json.dump(g_conf, open("%s/settings.json" % (logdir), 'w'), indent=2) # dump a copy of config file to the logdir common.log("DONE EEPERFTEST.\n%s\nlog location: %s" % ("-"*60, os.path.join(os.getcwd(), logdir))) showfio (logdir) return True
def counter_log(jobdesc, federation_targets): ''' start logging counter values using 'server/counters.sh' on federation nodes collect cpu data into flame graphs every 1 hour ( if applicable ) return: counter_log_dir, counter_log_path, cmdobjs counter_log_dir: the directory that contains counter logs and svg files counter_log_path: counter log location on each node cmdobj: the command objects tracking the counters.sh command ''' # numjobs and iodepth may be replaced by 'runfio.sh' njobs = g_conf["fio_numjobs"] if g_conf.has_key("fio_numjobs") else 1 qdepth = g_conf["fio_iodepth"] if g_conf.has_key("fio_iodepth") else 3 nj_str = g_conf["runfio_jobs"] if g_conf.has_key("runfio_jobs") else str(njobs) qd_str = g_conf["runfio_qdepth"] if g_conf.has_key("runfio_qdepth") else str(qdepth) nj_str = re.sub('\s+', ',', nj_str.strip()) qd_str = re.sub('\s+', ',', qd_str.strip()) runtime = g_conf["fio_runtime"] if g_conf.has_key("fio_runtime") else 60 dur = len(nj_str.split(',')) * len(qd_str.split(',')) * runtime cmdobjs = [] # collect counter logs counter_log_dir = "%s/counter_logs" % (g_runtime_dir) counter_log_path = "%s/%s.%ddur.log" % (counter_log_dir,jobdesc, dur) for t in federation_targets: t.exe("mkdir -p %s" % (counter_log_dir)) sh = t.newshell() if not sh: return None, None, None cmd = "%s/uio_scripts/server/counters.sh %d > %s" % (g_runtime_dir, dur, counter_log_path) cmdobjs.append(sh.exe(cmd, wait=False)) common.log("long task running on '%s': %s" % (t, cmd)) if g_cpudata: # collect cpu data with 'server/collect_cpu.sh' for one time at start # if duration is more than 1 hour, proceed cpu data collection every hour every = 3600 num_collects = dur / every + 1 for t in federation_targets: sh = t.newshell() # get a new shell for cpu collection, so no serilization with the counter shell if not sh: return None, None, None # stacking commands in the same shell, it will serialize all commands cmdobjs.append(sh.exe("cd %s" % (counter_log_dir), wait=False)) for elapse in [ every * i for i in range(num_collects) ]: prefix = "when%ds.%s" % (elapse, jobdesc) cmdobjs.append(sh.exe("sleep %d" % (elapse if elapse == 0 else every), wait=False)) cmd = "%s/uio_scripts/server/collect_cpu.sh cio_array -w %s -t 30" % (g_runtime_dir, prefix) cmdobjs.append(sh.exe(cmd, wait=False)) cmd = "%s/uio_scripts/server/collect_cpu.sh -w %s -t 30" % (g_runtime_dir, prefix) cmdobjs.append(sh.exe(cmd, wait=False)) return counter_log_dir, counter_log_path, cmdobjs
def fio_server(client_targets): cos = [] # restarting fio server for t in client_targets: cos.append(t.exe("killall -9 fio || true", wait=False)) cos.append(t.exe("fio --server --daemonize=/tmp/fio.pid", wait=False)) for co in cos: if not co.succ(): common.log("failed restarting fio server.") return False return True
def push_topology(federation_targets): t = None for n in federation_targets: if fab_running(n): t = n break if not t: common.log("fabric manager is not running on federation nodes.", 1) return False # pushing topology if not t.exe("cioctl topology %s" % (g_conf["topology"])).succ(): return False if not t.exe("cioctl portal --management_ip %s --iscsi_ip %s" % (g_conf["management_ip"], g_conf["iscsi_ip"])).succ(): return False return True
def shutdown_cluster(federation_targets, force=True, wait=True): # shutdown cluster cos = [] for t in federation_targets: if not (array_running(t) or fab_running(t)): cos.append(t.exe("echo uniio is not running.", wait=False)) continue cmd = "%s/uio_scripts/server/init_cluster.sh %s -s" % (g_runtime_dir, "-f" if force else "") cos.append(t.exe(cmd, wait=False)) if wait: for co in cos: if not co.succ(): common.log("failed when sending shutdown cmd to uniio.") return None return cos
def boot_cluster(federation_targets): # start objmgr and objmgr-fab on all uniio nodes cos = [] for t in federation_targets: cmd = "%s/uio_scripts/server/init_cluster.sh -b" % (g_runtime_dir) cos.append(t.exe(cmd, wait=False)) for co in cos: if not co.succ(): common.log("failed when starting uniio.") return False for t in federation_targets: # give time for fabricmanager to be ready for accepting topology if not t.wait_alive(8080, 600): common.log("fabricmanager is not starting after 600s. port: 8080") return False if not attach_luns(federation_targets): return False return True
def prep_targets(): ''' * build 3 target lists: client_targets, federation_targets, build_server * upload uio_scripts to all targets * return 3 target lists as a tuple: (client_targets, federation_targets, build_server) ''' global g_runtime_dir # node defs: [ (IP, username, password), ... ] client_node_def = g_conf["client_nodes"] federation_node_def = g_conf["federation_nodes"] build_node_def = g_conf["build_server"] # [IP, username, password, git_ssh_identifyfile] client_targets = [] for n in client_node_def: t = gettarget(n[0], username=n[1], password=n[2], svc="ssh", timeout=60) if t: client_targets.append(t) else: client_targets = [] break federation_targets = [] for n in federation_node_def: t = gettarget(n[0], username=n[1], password=n[2], svc="ssh", timeout=60) if t: federation_targets.append(t) else: federation_targets = [] break build_server = None build_server = gettarget(build_node_def[0], username=build_node_def[1], password=build_node_def[2], svc="ssh", timeout=60) if not build_server: build_server = None # upload uio_scripts to targets if client_targets and federation_targets: for t in client_targets + federation_targets: if not t.exe("mkdir -p %s" % (g_runtime_dir)): return None if not t.upload("%s" % (g_rootdir), g_runtime_dir): return None else: common.log("failed to connect to some of the nodes.") return (client_targets, federation_targets, build_server)
def create_luns(client_targets, federation_targets, numluns=0): # get client iscsi initiator iqns for mapping iqns = {} # { address : iqn } for t in client_targets: iqns[t.address] = t.exe("awk -F'=' '{print \$2}' /etc/iscsi/initiatorname.iscsi").stdout.strip() t = None for n in federation_targets: if fab_running(n): t = n break if not t: common.log("fabric manager is not running on all federation nodes.", 1) return False # create luns num_luns = numluns if numluns else g_conf["num_luns"] if g_conf.has_key("num_luns") else 18 for i in range(num_luns): if not t.exe("cioctl create lun%d %dG" % (i, g_conf["lunsize_G"])).succ(): return False if not t.exe("cioctl iscsi target create --name tgt-%d" % (i)).succ(): return False # create initiator groups inames = [] for address in iqns.keys(): iqn = iqns[address] addr = address.replace(".", "-") iname = "i%s" % (addr); inames.append(iname) if not t.exe("cioctl iscsi initiator create --name %s --iqn %s" % (iname, iqn)).succ(): return False if not t.exe("cioctl iscsi initiatorgroup create --name ig%s --initiators i%s" % (addr, addr)).succ(): return False if g_fullmap: if not t.exe("cioctl iscsi initiatorgroup create --name igall --initiators %s" % (','.join(inames))).succ(): return False # create mappings: luns are evenly mapped to clients num_igs = len(iqns); luns_per_client = num_luns / len(iqns) for i in range(num_luns): address = iqns.keys()[(i/luns_per_client)%num_igs] addr = address.replace(".", "-"); igroup = 'igall' if g_fullmap else "ig%s" % (addr) if not t.exe("cioctl iscsi mapping create --blockdevice lun%d --target tgt-%d --initiatorgroup %s" % (i, i, igroup)).succ(): return False return True
def detach_luns(federation_targets): t = None for n in federation_targets: if fab_running(n): t = n break if not t: common.log("fabric manager is not running on all federation nodes.", 1) return False luns = t.exe("cioctl list | grep GB | awk '{print \$2}' | grep -v '^-'").getlist() for lun in luns: if not t.exe("cioctl detach --ignore_session_check %s" % (lun)).succ(): return False snaps = t.exe("cioctl snapshot list | grep GiB | awk '{print \$2}'").getlist() for snap in snaps: if not t.exe("cioctl detach --ignore_session_check %s" % (snap)).succ(): return False return True
def init_backend(federation_targets, force=True, wait=True): if not federation_targets: common.log("federation nodes are None.") return False raw_disk_size_G = g_conf["raw_disk_size_G"] if g_conf.has_key("raw_disk_size_G") else None cos = [] for t in federation_targets: if raw_disk_size_G: cmd = "%s/uio_scripts/server/init_backend.sh -O init -G 300 -S %d" % (g_runtime_dir, raw_disk_size_G) else: cmd = "%s/uio_scripts/server/init_backend.sh -O init -G 300" % (g_runtime_dir) cos.append(t.exe(cmd, wait=False)) if wait: for co in cos: if not co.succ(): common.log("failed when initializing uniio.") return None return cos
def clear_luns(client_targets, federation_targets): iscsi_ip = g_conf["iscsi_ip"] cos = [] for t in client_targets: cos.append(t.exe("iscsiadm -m node --logout", wait=False)) cos.append(t.exe("iscsiadm -m session -u", wait=False)) cos.append(t.exe("iscsiadm -m discoverydb -t sendtargets -p %s:3260 -o delete" % (iscsi_ip), wait=False)) for co in cos: co.wait() t = None for n in federation_targets: if fab_running(n): t = n break if not t: common.log("fabric manager is not running on all federation nodes.", 1) return False mappings = t.exe("cioctl iscsi mapping list | grep iqn | awk '{print \$2}'").getlist() for mapping in mappings: if not t.exe("cioctl iscsi mapping delete --ignore_session_check --blockdevice %s --yes-i-really-really-mean-it" % (mapping)).succ(): return False targets = t.exe("cioctl iscsi target list | grep iqn | awk '{print \$2}'").getlist() for tgt in targets: if not t.exe(" cioctl iscsi target delete --name %s --yes-i-really-really-mean-it" % (tgt)).succ(): return False snapshots = t.exe("cioctl snapshot list | grep GiB | awk '{print \$2}'").getlist() for snap in snapshots: if not t.exe("cioctl detach %s" % (snap)).succ(): return False luns = t.exe("cioctl list | grep GB | awk '{print \$2}' | grep -v '^-'").getlist() for lun in luns: if not t.exe("cioctl delete %s" % (lun)).succ(): return False igs = t.exe("cioctl iscsi initiatorgroup list | grep -E '.+-[0-9]+-' | awk '{print \$2}'").getlist() for ig in igs: if not t.exe("cioctl iscsi initiatorgroup delete --name %s --yes-i-really-really-mean-it" % (ig)).succ(): return False initiators = t.exe("cioctl iscsi initiator list | grep -E '.+-[0-9]+-' | awk '{print \$2}'").getlist() for it in initiators: if not t.exe("cioctl iscsi initiator delete --name %s --yes-i-really-really-mean-it" % (it)).succ(): return False return True
def build_bin(build_server, wait=True): ''' pull the latest uniio repo from github on build server build cio_array and cio_array.sym on server ''' global g_runtime_dir gitcmd = get_gitcmd() a,b,c,git_ssh_identityfile = g_conf["build_server"] # [IP, username, password, git_ssh_identityfile] sh = build_server.newshell() cos = [] cos.append(sh.exe("cd /tmp", wait=False)) cos.append(sh.exe("export GIT_SSH_COMMAND=\"ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentityFile=%s -o ProxyCommand='ssh -q -W %%h:%%p evidence.orcadt.com'\"" % (git_ssh_identityfile), wait=False)) # git clone uniio repo repo = "uniio" checkout = g_conf["%s_checkout"%(repo)] if g_conf.has_key("%s_checkout"%(repo)) else "default" cmd = "[[ -e '%s/%s' ]] && { cd %s/%s && git fetch; } || { %s clone --recurse-submodules [email protected]:uniio/%s.git %s/%s; }" \ % (g_runtime_dir, repo, g_runtime_dir, repo, gitcmd, repo, g_runtime_dir, repo) cos.append( sh.exe(cmd, wait=False) ) if checkout != "default": # checkout desired branch or tag or commit cmd = "cd %s/%s && %s checkout %s" % (g_runtime_dir, repo, gitcmd, checkout) cos.append( sh.exe(cmd, wait=False) ) cmd = "cd %s/%s && %s pull --no-edit || true" % (g_runtime_dir, repo, gitcmd) cos.append(sh.exe(cmd, wait=False)) cmd = "cd %s/%s && %s log --pretty=format:'%%h|%%ci|%%an|%%s' | head -8 || true" % (g_runtime_dir, repo, gitcmd) cos.append(sh.exe(cmd, wait=False)) # cmake repos cos.append(sh.exe("cd %s/%s && mkdir -p build && cd build && cmake3 -DCMAKE_BUILD_TYPE=Release .." % (g_runtime_dir, repo), wait=False)) # make repos cos.append( sh.exe("cd %s/%s/build && make -j20 cio_array cio_array.sym ioperftest" % (g_runtime_dir, repo), wait=False) ) if wait: for co in cos: if not co.succ(): common.log("failed build bin. command: '%s'" % (co.cmdline), 1) return None return cos
def attach_luns(federation_targets): t = None for n in federation_targets: if fab_running(n): t = n break if not t: common.log("fabric manager is not running on all federation nodes.", 1) return False for i in range(3): luns = t.exe("cioctl list | grep GB | awk '{print \$2}' | grep -v '^-'").getlist() if luns: break time.sleep(5) for lun in luns: if not t.exe("cioctl attach %s" % (lun)).succ(): return False snaps = t.exe("cioctl snapshot list | grep GiB | awk '{print \$2}'").getlist() for snap in snaps: if not t.exe("cioctl attach %s" % (snap)).succ(): return False return True
def update_cluster(federation_targets, build_server, force=True): cos_shutdown = shutdown_cluster(federation_targets, force=True, wait=False) if not cos_shutdown: common.log("failed when shutting down uniio.") return False cos_backend = init_backend(federation_targets, force=True, wait=False) if not cos_backend: common.log("failed when sending reinit backend command.") return False if g_binonly: if not replace_bin(federation_targets, build_server, True): return False else: if not replace_rpm(federation_targets, build_server, True): return False # wait for backend to be reinitialized for co in cos_shutdown + cos_backend: if not co.succ(): common.log("failed when updating cluster. cmd: %s" % (co.cmdline)) return False if not init_cluster(federation_targets): return False return True
def replace_rpm(federation_targets, build_server, force=True): ''' build the latest uniio on build_server and replace rpms on federation nodes ''' if not build_server: common.log("failed replace rpms. build server is None.", 1) return False if not federation_targets: common.log("failed replace rpms. uniio servers are None.", 1) return False cos_build = build(build_server, wait=False) if not cos_build: return False # reinitialize backend if not shutdown_cluster(federation_targets, force=True, wait=True): return False # wait for build job to end for co in cos_build: if not co.succ(): common.log("failed when build.") return False # download from build server and upload rpm packages to federation nodes: me.exe("rm -rf /tmp/rpms && mkdir /tmp/rpms") if not build_server.download("/tmp/rpms/", "%s/uniio/build/object-array-*.rpm" % (g_runtime_dir)): # download uniio rpms return False if not g_uioonly: if not build_server.download("/tmp/rpms/", "%s/nasmgmt/build/object-array-nasmgmt-*.rpm" % (g_runtime_dir)): # download nasmgmt rpms return False if not build_server.download("/tmp/rpms/", "%s/sysmgmt/build/object-array-sysmgmt-*.rpm" % (g_runtime_dir)): # download sysmgmt rpms return False if not build_server.download("/tmp/rpms/", "%s/uniio-ui/build_debug/object-array-uniio-ui-*.rpm" % (g_runtime_dir)): # download uniio-ui rpms return False for t in federation_targets: t.exe("rm -rf /tmp/rpms && mkdir -p /tmp/rpms") if not t.upload("/tmp/rpms/*.rpm", "/tmp/rpms/"): return False cos = [] for t in federation_targets: cmd = "%s/uio_scripts/server/init_cluster.sh %s --replace=/tmp/rpms" % (g_runtime_dir, '-f' if force else "") cos.append(t.exe(cmd, wait=False)) for co in cos: if not co.succ(): common.log("failed when replacing uniio packages.") return False return True
def replace_bin(federation_targets, build_server, force=True): ''' if the g_binonly is a local file, simply upload the file and replace '/opt/uniio/sbin/cio_array' if the g_binonly is not a local file, build the latest cio_array, cio_array.sym on build_server and replace binaries on federation nodes ''' if not federation_targets: common.log("failed replace rpms. uniio servers are None.", 1) return False if me.is_path_executable(g_binonly): # use a local binary file to update the federation # wait for backend gets reinitialized if not shutdown_cluster(federation_targets, force=True, wait=True): return False for t in federation_targets: if not t.upload(g_binonly, "/opt/uniio/sbin/cio_array"): return False else: if not build_server: common.log("failed replace rpms. build server is None.", 1) return False cos_build = build_bin(build_server, wait=False) if not cos_build: return False # reinitialize backend if not shutdown_cluster(federation_targets, force=True, wait=True): return False # wait for build task to complete for co in cos_build: if not co.succ(): common.log("failed when building uniio binaries.") return False # download from build server and upload rpm packages to federation nodes: bins = ('cio_array', 'cio_array.sym', 'ioperftest') for bin in bins: if not build_server.download("/tmp/", "%s/uniio/build/%s" % (g_runtime_dir, bin)): return False for t in federation_targets: for bin in bins: if not t.upload("/tmp/%s" % (bin), "/opt/uniio/sbin/"): return False return True
def init_cluster(federation_targets, force=True): if not federation_targets: common.log("federation nodes are None.") return False if not shutdown_cluster(federation_targets, force): return False # restart uniio cos = [] for t in federation_targets: cmd = "%s/uio_scripts/server/init_cluster.sh -i" % (g_runtime_dir) cos.append(t.exe(cmd, wait=False)) for co in cos: if not co.succ(): common.log("failed when initializing uniio.") return False for t in federation_targets: # give time for fabricmanager to be ready for accepting topology if not t.wait_alive(8080, 600): common.log("fabricmanager is not starting after 600s. port: 8080") return False if not push_topology(federation_targets): return False return True
if g_threadtable: if not replace_threadtable(federation_targets, g_threadtable): exit(1) if g_boot_only: if not boot_cluster(federation_targets): exit(1) if g_init_backend_only: clear_clients(client_targets) if not shutdown_cluster(federation_targets, force=True): exit(1) if not init_backend(federation_targets, force=g_force): exit(1) if g_update: if not update_cluster(federation_targets, build_server, force=True): exit(1) if g_init: if not init_cluster(federation_targets, force=True): exit(1) if g_createluns: if not create_luns(client_targets, federation_targets, g_createluns): exit(1) if g_perftest: if (g_init or g_update) and (not g_createluns): if not create_luns(client_targets, federation_targets, 0): exit(1) if not perf_test(client_targets, federation_targets, g_fill): exit(1) # discard_drives(federation_targets) dur = time.time() - start common.log("DONE. REALTIME: %d seconds." % (dur)) exit(0)
def fio_build_job_contents(client_target, fill=0): """ generate fio job file contents for 'runfio.sh' for a given client return: jobdesc, fio_job_content jobdesc: string to summarize the job ( may be used as prefix of job filename ) fio_job_content: content string for the fio job definition file """ fio_job_content = "[global]" fio_job_content += "\n" "write_bw_log=xxx" # later xxx will be replaced by runfio.sh fio_job_content += "\n" "write_lat_log=xxx" # later xxx will be replaced by runfio.sh fio_job_content += "\n" "write_iops_log=xxx" # later xxx will be replaced by runfio.sh fio_job_content += "\n" "log_avg_msec=10000" fio_job_content += "\n" "ioengine=libaio" fio_job_content += "\n" "direct=1" # fio_job_content += "\n" "sync=1" fio_job_content += "\n" "bs=4k" dist = g_conf["fio_random_distribution"] if g_conf.has_key("fio_random_distribution") else "random" fio_job_content += "\n" "random_distribution=%s" % (dist) duprate = g_conf["fio_dedupe_percentage"] if g_conf.has_key("fio_dedupe_percentage") else 80 if fill: duprate = 0 if duprate != 0: fio_job_content += "\n" "dedupe_percentage=%d" % (duprate) else: # no deduplicable data, small changes to every fio buffer fio_job_content += "\n" "scramble_buffers=1" comprate = g_conf["fio_buffer_compress_percentage"] if g_conf.has_key("fio_buffer_compress_percentage") else 60 if fill: comprate = 0 if comprate != 0: fio_job_content += "\n" "buffer_compress_percentage=%d" % (comprate) else: # no compressible data, refill every fio buffer fio_job_content += "\n" "refill_buffers=1" fio_job_content += "\n" "group_reporting=1" runtime = g_conf["fio_runtime"] if g_conf.has_key("fio_runtime") else 60 if fill: runtime = fill fio_job_content += "\n" "runtime=%d" % (runtime) fio_job_content += "\n" "time_based=1" fio_job_content += "\n" "ramp_time=%d " % (g_conf["fio_ramp_time"] if g_conf.has_key("fio_ramp_time") else 60) # numjobs and iodepth may be replaced by 'runfio.sh' njobs = g_conf["fio_numjobs"] if g_conf.has_key("fio_numjobs") else 1 fio_job_content += "\n" "numjobs=%d" % (njobs) qdepth = g_conf["fio_iodepth"] if g_conf.has_key("fio_iodepth") else 3 fio_job_content += "\n" "iodepth=%d" % (qdepth) fio_job_content += "\n" "" rw = g_conf["fio_rw"] if g_conf.has_key("fio_rw") else "randrw" nj_str = g_conf["runfio_jobs"] if g_conf.has_key("runfio_jobs") else str(njobs) qd_str = g_conf["runfio_qdepth"] if g_conf.has_key("runfio_qdepth") else str(qdepth) jobdesc = "%s.qd%s.njobs%s.%ddup.%dcomp.%s_dist.%dsec" % \ (rw, re.sub(',| ', '-', qd_str.strip()), re.sub(',| ', '-', nj_str.strip()), duprate, comprate, dist, runtime) if fill: rw = "write" # get UNIIO iscsi luns on client client_target.exe("lsscsi") # list lun info cmd = "lsblk -p -o name,vendor | grep -w UNIIO | grep sd | awk '{print \$1}'" uio_devs = client_target.exe(cmd).getlist() if not uio_devs: # no uniio iscsi devices on this client common.log("no uniio iscsi devices on this client: %s" % (client_target.address), 1) return None, None for dev in uio_devs: fio_job_content += "\n" "" if rw[:6] == "sepjob": # no constraint for read and write, will define separate jobs for read and write if rw[7:] == "rw": # separated jobs for sequential read and write fio_job_content += "\n" "[%s_read]" % (dev) fio_job_content += "\n" "rw=read" fio_job_content += "\n" "filename=%s" % (dev) fio_job_content += "\n" "[%s_write]" % (dev) fio_job_content += "\n" "rw=write" fio_job_content += "\n" "filename=%s" % (dev) else: # separated jobs for random read and write fio_job_content += "\n" "[%s_read]" % (dev) fio_job_content += "\n" "rw=randread" fio_job_content += "\n" "filename=%s" % (dev) fio_job_content += "\n" "[%s_write]" % (dev) fio_job_content += "\n" "rw=randwrite" fio_job_content += "\n" "filename=%s" % (dev) else: # actual fio supported rw types fio_job_content += "\n" "[%s]" % (dev) fio_job_content += "\n" "filename=%s" % (dev) fio_job_content += "\n" "rw=%s" % (rw) if rw.strip().find('rw') >= 0: # mixed read/write fio_job_content += "\n" "rwmixread=%d" % (g_conf["fio_rwmixread"] if g_conf.has_key("fio_rwmixread") else 80) fio_job_content += "\n" "rwmixwrite=%d" % (g_conf["fio_rwmixwrite"] if g_conf.has_key("fio_rwmixwrite") else 20) return jobdesc, fio_job_content
def handleopts(): global g_conf, g_runtime_dir, g_force, g_shutdown_only, g_boot_only, g_update, g_init, g_perftest, g_binonly, g_fullmap, g_cpudata, g_fill, g_createluns, g_delluns_only, g_ref, g_init_backend_only, g_uioonly, g_threadtable conf_file = "%s/auto.json" % (os.path.dirname(os.path.realpath(__file__))) try: options, args = getopt.gnu_getopt(sys.argv[1:], "hc:fsbuipd", ["help", "configfile=","force","shutdown","boot","update","init","perftest", "binonly=", "uioonly", "fullmap", "cpudata", "fill=", "createluns=","deleteluns", "ref=", "threadtable="]) except getopt.GetoptError as err: usage(err) for o, a in options: if(o in ('-h', '--help')): usage() if(o in ('-c', '--config')): conf_file = a if(o in ('-f', '--force')): g_force = True if(o in ('-s', '--shutdown')): g_shutdown_only = True if(o in ('-b', '--boot')): g_boot_only = True if(o in ('-d', '--initbackend')): g_init_backend_only = True if(o in ('-u', '--update')): g_update = True if(o in ('', '--ref')): g_ref = a if(o in ('', '--binonly')): g_binonly = a if(o in ('', '--uioonly')): g_uioonly = True if(o in ('-i', '--init')): g_init = True if(o in ('-p', '--perftest')): g_perftest = True if(o in ('', '--fullmap')): g_fullmap = True if(o in ('', '--cpudata')): g_cpudata = True if(o in ('', '--fill')): g_fill = int(a) if(o in ('', '--createluns')): g_createluns = int(a) if(o in ('', '--deleteluns')): g_delluns_only = True if(o in ('', '--threadtable')): g_threadtable = a # load configuration file f = open(conf_file) if not f: common.log("can not open configuration file '%s'." % conf_file, 1) return None jstr = f.read() if not jstr: common.log("can not read configuration file '%s'." % conf_file, 1) return None conf = json.loads(jstr) if not conf: common.log("can not parse configuration file '%s'." % conf_file, 1) return None g_runtime_dir = conf["runtime_dir"] if g_ref: conf["uniio_checkout"] = g_ref # a git commit to checkout if g_binonly and g_binonly != 'build' and (not me.is_command_executable(g_binonly)): usage("%s does not exist or not executable." % (g_binonly)) if g_threadtable and ( (not os.path.exists(g_threadtable)) or (not os.path.isfile(g_threadtable)) ): usage("%s does not exist or is not a regular file." % (g_threadtable)) g_conf = conf return conf