def check_upgrade(): server_file = curl(PIFM_HOST + '/client_agent/pifm_agent.py') server_sum = awk( md5sum( grep(server_file, '-v', '^PIFM_HOST') ), '{print $1}' ) local_sum = awk( md5sum( grep('-v', '^PIFM_HOST', OUR_SCRIPT) ), '{print $1}' ) if str(server_sum) != str(local_sum): logging.info( "server: {server}, local: {local}, should update.".format( server=server_sum, local=local_sum ) ) with open(TMP_SCRIPT, 'w') as f: f.write(str(server_file)) sed('-i', "0,/def/ s#http://pi_director#{myhost}#".format(myhost=PIFM_HOST), OUR_SCRIPT ) status = python('-m', 'py_compile', TMP_SCRIPT) if (status.exit_code == 0): shutil.copy(TMP_SCRIPT, OUR_SCRIPT) os.chmod(OUR_SCRIPT, 0755) sys.exit(0)
def map_rule(tdb_path: str, sparql_rule: str, rule_name: str = None, dump_file: str = None, sparql_vars: dict = {}, namespaces: XNamespaceManager = DEFAULT_NAMESPACES): jena_home = get_jena_home() rule_name = extract_rule_name(sparql_rule, rule_name) if not rule_name: rule_name = '<Unknown>' # resolve placeholders for key in sparql_vars: sparql_rule = sparql_rule.replace("${%s}" % key, sparql_vars[key]) if namespaces: sparql_rule = namespaces.to_sparql() + "\n" + sparql_rule print(f"Applying '{rule_name}'", file=stderr) try: tdb_sh = sh.Command(jena_home + "/bin/tdb2.tdbquery") tdb_sh = tdb_sh("--loc=%s" % tdb_path, "--results=tsv", "--query=-", _piped=True, _in=sparql_rule) sh.awk(tdb_sh, 'NR > 1 { print $0 "." }', _out=dump_file) except Exception as ex: raise ChildProcessError("Error: while running the query:\n%s " % sparql_rule) from ex
def wifi(self): self.__update_network_interface() if (self.__network_interface): mSsid = sh.awk( sh.networksetup("-getairportnetwork", self.__network_interface), '{print $4}').strip() mIp = sh.awk( sh.ifconfig(self.__network_interface), '$1=="inet" {print $2}').strip() return mSsid + " " + mIp else: return "::"
def ls(): out = io.StringIO() sh.awk( sh.grep(sh.security("dump-keychain"), "0x00000007"), "-F=", "{print $2}", _out=out, ) return [ n.split(f'"{MACOS_KEYCHAIN_PREFIX}: ')[1][:-1] for n in out.getvalue().strip().split("\n") if n.startswith(f'"{MACOS_KEYCHAIN_PREFIX}: ') ]
def get_local_socket(): # try to determine the mysql socket path local_socket = "" if "linux" in sys.platform: local_socket = str( awk(netstat('-ln'), '/mysql(.*)?\.sock/ { print $9 }')).strip() elif sys.platform == "darwin": local_socket = str( awk(netstat('-an'), '/mysql(.*)?\.sock/ { print $5 }')).strip() # if we don't find a file, make it a required parameter if not os.path.exists(local_socket): local_socket = None return local_socket
def findContainer(name): container="" try: container = sh.awk("{print $1}", _in=sh.head("-n 1", _in=sh.grep(name, _in=sh.docker.ps()))) except: print "container not available" return container.rstrip()
def _get_files(file_type, file_index): files = awk(grep(cat("/tmp/git_hook"), "-P", "(A|M).*.%s$" % file_type, _ok_code=[0, 1]), "{print $%s}" % file_index, _iter=True) if not files: return None exten = ".%s" % file_type files = [ file_path[:file_path.rindex(exten) + len(exten)] for file_path in files ] if not except_paths: return files except_files = [] for file_path in files: for except_path in except_paths: if file_path not in except_files and file_path.startswith( except_path): except_files.append(file_path) return [file_path for file_path in files if file_path not in except_files]
def findImage(repository, tag): container="" try: container = sh.awk("{print $3}", _in=sh.head("-n 1", _in=sh.grep(tag, _in=sh.docker("images", repository)))) except: print "container not available" return container.rstrip()
def make_ip_window(): H, W = screen.getmaxyx() ip_win = screen.subwin(H / 2, W, H / 2 - HINT_WIDTH, 0) ip_win.box() try: ip = sh.head(sh.awk(sh.getent("ahosts", sh.hostname().strip()), "{print $1}"), n="1").strip() if ip == "127.0.0.1": raise DHCPMisconfiguration() except sh.ErrorReturnCode: ip_win.addstr(1, 2, "===================================") ip_win.addstr(2, 2, "Connectivity issues detected!") ip_win.addstr(3, 2, "===================================") ip_win.addstr(4, 2, "Check VM setup instructions") ip_win.addstr(6, 2, "For details, see VM setup instructions") except DHCPMisconfiguration: ip_win.addstr(1, 2, "===================================") ip_win.addstr(2, 2, "Connectivity issues detected!") ip_win.addstr(3, 2, "===================================") ip_win.addstr(4, 2, "Check connection of Host-only interface") ip_win.addstr(5, 2, "and check DHCP is enabled for it") ip_win.addstr(7, 2, "For details, see VM setup instructions") else: ip_win.addstr(1, 2, "To initiate your Hortonworks Sandbox session,") ip_win.addstr(2, 2, "please open a browser and enter this address ") ip_win.addstr(3, 2, "in the browser's address field: ") ip_win.addstr(4, 2, "http://%s/" % ip)
def main(): options,args = parser.parse_args(sys.argv[1:]) if len(args) < 3: parser.print_help() sys.exit(45) run=args[0] is2 = int(args[1]) ie_or_is2n = int(args[2]) conf=shapesim.read_config(run) simconf = shapesim.read_config(conf['sim']) pattern=shapesim.get_output_url(run, is2, ie_or_is2n, itrial='*', fs='hdfs') flist=awk(hadoop('fs','-ls',pattern), '{print $8}').split() nring = simconf['nring'] for i in xrange(nring): f=shapesim.get_output_url(run, is2, ie_or_is2n, itrial=i, fs='hdfs') f=f.replace('hdfs://','') if f not in flist: print f
def make_ip_window(): H, W = screen.getmaxyx() ip_win = screen.subwin(H / 2, W, H / 2 - HINT_WIDTH, 0) ip_win.box() try: ip = sh.head( sh.awk( sh.getent("ahosts", sh.hostname().strip()), "{print $1}"), n="1").strip() if ip == "127.0.0.1": raise DHCPMisconfiguration() except sh.ErrorReturnCode: ip_win.addstr(1,2,"===================================") ip_win.addstr(2,2,"Connectivity issues detected!") ip_win.addstr(3,2,"===================================") ip_win.addstr(4,2,"Check VM setup instructions") ip_win.addstr(6,2,"For details, see VM setup instructions") except DHCPMisconfiguration: ip_win.addstr(1,2,"===================================") ip_win.addstr(2,2,"Connectivity issues detected!") ip_win.addstr(3,2,"===================================") ip_win.addstr(4,2,"Check connection of Host-only interface") ip_win.addstr(5,2,"and check DHCP is enabled for it") ip_win.addstr(7,2,"For details, see VM setup instructions") else: ip_win.addstr(1,2,"To initiate your Hortonworks Sandbox session,") ip_win.addstr(2,2,"please open a browser and enter this address ") ip_win.addstr(3,2,"in the browser's address field: ") ip_win.addstr(4,2,"http://%s/" % ip)
def run(self): df = lambda output: sh.awk( sh.df("-h", f"--output={output}", "--total"), "END {print $1}" ).rstrip() result_free = df('avail') result_total = df('size') title_text = title('free disk space') print(f'{title_text}{result_free}/{result_total}')
def mounted(dir=None): try: cwd = dir if dir else os.path.join(os.getcwd(), 'files') cwd = os.path.realpath(cwd) return any(cwd == path.strip() for path in list(sh.awk(sh.cat("/proc/mounts"), "{print $2}"))) except sh.ErrorReturnCode: return False
def vcf_to_plink(baseo): # Changes from the VCF output from pyrad to a plink file that has been cleaned up for LD and Taxon_coverate (missing data) sh.vcftools(shlex.split(f"--vcf {basename}.vcf --out {basename} --plink")) # Replace first column of map file to 1 to avoid errors concening too many Chromosomes in plink. # Plink treats each locci as a chromosome with the way the map file is encoded. This sets all locci on Chrom1 # This does not affect anything because we are not doing any chromosomal analysis. Just analyzing a SNP dataset. maphandle=f"{basename}_temp.map" sh.awk("$1=1", f"{basename}.map", _out=maphandle) sh.mv(f"{basename}_temp.map",f"{basename}.map") # Prunes the dataset for LD with indep-pairwise # Removes locci with minimum allele freq of under 5% with maf. (ie removes fixed alleles) sh.plink2(shlex.split(f"--file {basename} --threads {threads} --indep-pairwise 50 10 0.1")) #MAF minor allele freq and mind is percent missing data cutoff before individual is removed sh.plink2(shlex.split(f"--file {basename} --threads {threads} --extract plink.prune.in --mind {mind} --geno {geno} --maf {maf} --recode --out {baseo}")) # sh.plink2(shlex.split(f"--file {basename} --threads {threads} --extract plink.prune.in --mind {mind} --geno {geno} --recode --out {baseo}")) return
def findContainer(name): container = "" try: container = sh.awk("{print $1}", _in=sh.head("-n 1", _in=sh.grep(name, _in=sh.docker.ps()))) except: print "container not available" return container.rstrip()
def memory(self): mAvailMem = sh.awk(sh.memory_pressure(), '/percent/ {print $5}').strip() mTopProcess = re.sub("\s+", " ", sh.top("-l", "1", "-o", "mem", "-U", "hasky", "-n1", "-stats", "COMMAND,MEM").split("\n")[-2]) return mAvailMem + " <" + mTopProcess + ">"
def _get_swarm_service_state(service_name): docker = sh.Command("docker") try: state = sh.awk( sh.tail(sh.head(docker(["service", "ps", service_name]), "-2"), "-1"), "{print $6}").strip(' \t\n\r\'') except: state = "Not present" return state
def parse_pull_args(desc=None): parser = argparse.ArgumentParser( description=desc, formatter_class=argparse.ArgumentDefaultsHelpFormatter) # some defaults for the local database dl_user = '******' dl_database = 'things_downstream' dl_password = '******' # try to determine the mysql socket path dl_socket = "" if "linux" in sys.platform: dl_socket = str(awk(netstat('-ln'), '/mysql(.*)?\.sock/ { print $9 }')).strip() elif sys.platform == "darwin": dl_socket = str(awk(netstat('-an'), '/mysql(.*)?\.sock/ { print $5 }')).strip() # if we don't find a file, make it a required parameter if not os.path.exists(dl_socket): dl_socket = None # some defaults for the remote database # typically this would be a remote server--using localhost for testing dr_database = 'things_upstream' dr_user = '******' dr_password = '******' dr_host = '127.0.0.1' parser.add_argument('--local-user', default=dl_user) parser.add_argument('--local-password', default=dl_password) parser.add_argument('--local-database', default=dl_database) parser.add_argument('--local-socket', default=dl_socket) parser.add_argument('-u', '--remote-user', default=dr_user) parser.add_argument('-p', '--remote-password', default=dr_password) parser.add_argument('-o', '--remote-host', default=dr_host) parser.add_argument('-d', '--remote-database', default=dr_database) parser.add_argument('-c', '--cipher') return parser.parse_args()
def evaluate_mem(_pid, logfile): """ Use the ps command for profiling memory for now. """ print "Start benchmarking the RSS memory usage, result will be in " + logfile with open(logfile, "w") as wf: while 1: info = sh.awk(sh.tr(sh.ps("aux"), "-s", "' '"), "-vpid={}".format(_pid), "{if ($2==pid) {print $6}}").strip() wf.write("{:<12s}: {:10s}\n".format(str(datetime.datetime.now()), info)) wf.flush() # FIXME: ugly solution here time.sleep(1)
def get_memory_usage(): memory_usage = 'NA' # for Windows syste returns 'NA' if platform.system() == 'Linux': #if not hasattr(sys, 'getwindowsversion'): # For Linux, Not for windoes OS import sh memory_usage = float(sh.awk(sh.ps('u','-p',os.getpid()),'{sum=sum+$6}; END {print sum/1024}')) # MB #print ( 'PLATFORM: ', platform.system() , memory_usage) return memory_usage
def ipv4_nslookup(ip): ip = ipaddress.ip_address(ip) from sh import awk, cut, nslookup, rev # nslookup ${ip} | cut -d= -f2- | awk '{print $1;}' | rev | cut -d. -f2- | rev hostname = str( rev( cut(rev(awk(cut(nslookup(str(ip)), "-d=", "-f2-"), "{print $1;}")), "-d.", "-f2-"))).strip() if hostname: return hostname raise StopIteration()
def _get_swarm_node_ip(container_name): docker = sh.Command("docker") state = "unknown" count = 0 while state != "Running": print "Waiting for container deployment..." time.sleep(5) state = _get_swarm_service_state(container_name) count += 1 if count == 12: print "Error: could not get swarm node IP" exit(1) ip = sh.awk( sh.xargs( sh.awk( sh.tail( sh.head(docker(["service", "ps", container_name]), "-2"), "-1"), "{print $4}"), "host"), "{print $NF}").strip(' \t\n\r\'') return ip
def get_all_ingress(self): backup_ingress = [] all_ingress = sh.awk( sh.grep(self.kubectl('get', 'ingress', '-n', self.namespace), 'ingress'), '{print $1}') for ingress in all_ingress.split('\n'): if not ('anaconda-session-ingress-' in ingress or 'anaconda-app-ingress-' in ingress or ingress == ''): backup_ingress.append(ingress) return backup_ingress
def findImage(repository, tag): container="" try: output = sh.awk("{print $3\":\"$2}", _in=sh.docker("images", repository)) for row in output.split('\n'): containerId, containerTag = row.split(':') if containerTag == tag: container = containerId break except: print "container not available" return container.rstrip()
def device_list(): """ Get udid from iPhone and iPad plugged into the computer Returns a list """ # TODO: separate iPhone and iPad raw = sh.sed(sh.system_profiler("SPUSBDataType"), "-n", "-e", '/iPad/,/Serial/p', "-e", '/iPhone/,/Serial/p') devices = sh.awk( sh.grep(raw, "Serial Number:"), "-F", ": ", "{print $2}").split('\n') return [device for device in devices if device]
def get_commit_files(file_type): system("git diff --cached --name-status > /tmp/git_hook") files = awk( grep( cat("/tmp/git_hook"), "-P", "A|M.*.%s$" % file_type, _ok_code = [0, 1] ), "{print $2}", _iter = True ) exten = ".%s" % file_type return [path[:path.rindex(exten) + len(exten)] for path in files]
def __get_host_count(self, host_type=""): """ Get the current number of VMs running that match host_type string """ hosts = 0 if host_type: hosts = sh.wc(sh.awk(sh.grep(sh.virsh('list', '--all'), '%s' % host_type), '{print $2}'), '-l') else: sys.exit("Can't count non-existant host_type") return str(hosts).rstrip()
def get_read_counts(sra_accession): "return read counts for fastq files 1 and 2" results = [] for i in range(1, 3): result = int( sh.awk( sh.zcat("{}_{}.fastq.gz".format(sra_accession, i)), "{s++}END{print s/4}", _piped=True, ).strip()) results.append(result) return results[0], results[1]
def find_process_pid(process_line, child_process=False): ps_opts = 'auxww' if not child_process else 'auxfww' try: pid = sh.awk( sh.grep( sh.grep(sh.ps(ps_opts, _piped=True, _tty_out=False), "-ie", process_line), '-v', 'grep'), "{print $2}", ) except sh.ErrorReturnCode: raise AssertionError("Cannot find process pid") return pid.strip()
def findImage(repository, tag): container = "" try: output = sh.awk("{print $3\":\"$2}", _in=sh.docker("images", repository)) for row in output.split('\n'): containerId, containerTag = row.split(':') if containerTag == tag: container = containerId break except: print "container not available" return container.rstrip()
def max_file_hash(n=10, short=False): pack_path = glob('.git/objects/pack/*.idx') if not pack_path: git.gc() pack_path = glob('.git/objects/pack/*.idx') if short: return awk( tail(sort(git('verify-pack', '-v', pack_path), '-k', '3'), '-n', '-{0:d}'.format(n)), '{print $1}') else: return tail(sort(git('verify-pack', '-v', pack_path), '-k', '3', '-n'), '-{0:d}'.format(n))
def mount_vm(self, img_src="", mount_path=""): """ Mount img_src at mount_path with kpartx """ if not os.path.exists(img_src): sys.exit("Invalid image to mount: %s" % (img_src)) if not os.path.exists(mount_path): sys.exit("Invalid mount path: %s" % (mount_path)) with sh.sudo: sh.kpartx('-a', img_src) mount_device=sh.head(sh.awk(sh.grep(sh.kpartx('-l', img_src), '-v', 'deleted'), '{print $1}'), '-1') sh.mount("/dev/mapper/%s" % (mount_device.rstrip()), mount_path)
def grep_package(self, name, pattern=None): pattern = pattern if pattern else "{0}" try: return [ line.rstrip().split(' ', 1) for line in awk( grep_dctrl('--field', 'Package,Provides', '--show-field', 'Package,Version', '--eregex', '--ignore-case', '--pattern', pattern.format(name), os.path.join(self.path, self.index_file)), '/Package/{p=$2;next} /Version/{print p " " $2}') ] except Exception as err: #print(str(err)) return []
def check_disks(hdd, pool): try: with open(sys.path[0]+'/hdds','r') as file: hard_drives = [x.strip() for x in file.readlines()] hdd_list = sh.awk(sh.grep(sh.fdisk('-l'),'^Disk /dev/s'),'{print $2}').split(':') hdd_list = [x.strip() for x in hdd_list if x != '\n'] if hdd_list == hard_drives: return False for hdd in hdd_list: if hdd not in hard_drives: add_hdd(hdd, pool) return True except IOError: print('Could not find file that tracks hdds, does "hdds" exist in {loc}?'.format(loc=sys.path[0]))
def containers(): """ Return list of container (names) instanciated """ from io import StringIO buf = StringIO() try: tail(awk(docker('ps', '-a'), '{print $NF}'), '-n+2', _out=buf) except Exception as err: log.error(err) containers = buf.getvalue().split() return containers
def disk_space_monitoring(perc): ''' return name of file systems where percent of usage if greater then 'perc' with bash it looks like on disk_space_monitoring.sh ''' output = awk(grep(df('-Ph'), "-vE", "'^Filesystem|tmpfs'"), '{ print $5,$1 }') for data in output: splitter = data.split('%')[0] try: perc_data = int(splitter) if perc_data > perc: print(data) except: continue
def __trafficPerSecond(): self.__flow_new = sh.awk( sh.netstat("-I", self.__network_interface, "-bn"), 'NR==2 {print $7,$10}').strip().split(" ") self.__flow_new = [int(i) for i in self.__flow_new] self.__stamp_new = time.time() d = self.__stamp_new - self.__stamp_old rRate = (self.__flow_new[0] - self.__flow_old[0]) / 1000 / d tRate = (self.__flow_new[1] - self.__flow_old[1]) / 1000 / d self.__flow_old[0] = self.__flow_new[0] self.__flow_old[1] = self.__flow_new[1] self.__stamp_old = self.__stamp_new return (rRate, tRate)
def grep_package(self, name, pattern="(^|-){0}$"): try: return [ line.rstrip().split(' ', 1) for line in awk( grep_dctrl( zcat(self.local_packages_gz), '-F', 'Package', '-e', pattern.format(name), '-s', 'Package,Version' ), '/Package/{p=$2;next} /Version/{print p " " $2}' ) ] except: return []
def get_current_brightness(self): """ Returns the current brightness (float) set with xrandr; if an error occurs returns None """ try: xrandr_output = sh.xrandr('--verbose') line_with_brightness = sh.grep(xrandr_output, '-im', '1', 'brightness') brightness = sh.awk(line_with_brightness, '{print $2}') return float(brightness) except sh.CommandNotFound: print('[error] missing tool') except: print('[error] something went wrong') return None
def get_display_name(self): """ Returns the name (str) of the primary display found via xrandr; if an error occurs, returns None """ try: xrandr_output = sh.xrandr() line_with_display = sh.grep(xrandr_output, '-im', '1', 'primary') display = sh.awk(line_with_display, '{print $1}') return display.strip() except sh.CommandNotFound: print('[error] missing tool') except: print('[error] something went wrong') return None
def check_disks(hdd, pool): try: with open(sys.path[0] + '/hdds', 'r') as file: hard_drives = [x.strip() for x in file.readlines()] hdd_list = sh.awk(sh.grep(sh.fdisk('-l'), '^Disk /dev/s'), '{print $2}').split(':') hdd_list = [x.strip() for x in hdd_list if x != '\n'] if hdd_list == hard_drives: return False for hdd in hdd_list: if hdd not in hard_drives: add_hdd(hdd, pool) return True except IOError: print( 'Could not find file that tracks hdds, does "hdds" exist in {loc}?' .format(loc=sys.path[0]))
def grep_package(self, name, pattern=None): pattern = pattern if pattern else "(^|-){0}$" try: return [ line.rstrip().split(' ', 1) for line in awk( grep_dctrl( zcat(os.path.join(self.cache_dir, self.index_file)), '-F', 'Package', '-e', pattern.format(name), '-s', 'Package,Version' ), '/Package/{p=$2;next} /Version/{print p " " $2}' ) ] except: return []
def cleanup_sessions_deployments(process): # Grab all sessions and deployments and remove them deployments = [] try: deployments = sh.awk( sh.grep( process.kubectl('get', 'deployments'), 'anaconda-app-\|anaconda-session-' # noqa ), '{print $1}') except Exception: # Ok if exception thrown as it means nothing was found pass for deploy in deployments: process.kubectl('delete', 'deployment', deploy.strip()) return
def grep_package(self, name, pattern=None): pattern = pattern if pattern else "{0}" try: return [ line.rstrip().split(' ', 1) for line in awk( grep_dctrl( '--field', 'Package,Provides', '--show-field', 'Package,Version', '--eregex', '--ignore-case', '--pattern', pattern.format(name), os.path.join(self.path, self.index_file) ), '/Package/{p=$2;next} /Version/{print p " " $2}' ) ] except Exception as err: #print(str(err)) return []
def make_status_window(): Height, Width = screen.getmaxyx() status_win = screen.subwin(Height / 4 - Width_Factor / 2 , Width, Height / 4 , 0) status_win.box() try: ip = sh.head(sh.awk(sh.getent("ahosts", sh.hostname().strip()),"{print $1}"),n="1").strip() if ip == "127.0.0.1": raise NetworkMisconfiguredException() except sh.ErrorReturnCode: status_win.addstr(1,2,"MapR-Platfora-Sandbox-For-Hadoop setup did not succseed.") raise ServiceFailedtoStartException() make_error_window() except NetworkMisconfiguredException: make_error_window() else: status_win.addstr(1,2,"MapR Services failed to start.", curses.A_BOLD) make_error_window()
def dhclient(iface, enable, script=None): # Disable the dhcp client and flush interface try: dhclients = sh.awk( sh.grep(sh.grep(sh.ps("ax"), iface, _timeout=5), "dhclient", _timeout=5), "{print $1}") dhclients = dhclients.split() for dhclient in dhclients: sh.kill(dhclient) except Exception as e: _logger.info("Failed to stop dhclient: %s" % e) pass if enable: if script: sh.dhclient("-nw", "-sf", script, iface, _bg=True) else: sh.dhclient("-nw", iface, _bg=True)
def dhclient(iface, enable, script=None): # Disable the dhcp client and flush interface try: dhclients = sh.awk( sh.grep( sh.grep(sh.ps("ax"), iface, _timeout=5), "dhclient", _timeout=5), "{print $1}") dhclients = dhclients.split() for dhclient in dhclients: sh.kill(dhclient) except Exception as e: _logger.info("Failed to stop dhclient: %s" % e) pass if enable: if script: sh.dhclient("-nw", "-sf", script, iface, _bg=True) else: sh.dhclient("-nw", iface, _bg=True)
def get_remote_file_list(job_base_dir, allowed_file_exts): """ Gets a list of all file names with the allowed extensions from a .listing file generated by wget. Note, if the structure of the .listing file ever changes, this method will fail. Args: job_base_dir (str): the directory path where the .listing file is located allowed_file_exts (str): set of allowed file extensions Returns: list: a list of filename strings Raises: Exception: any exception generated while trying to read the .listing file """ try: # get the list from the .listing file # awk '{out=""; for(i=9;i<=NF;i++){out=out" "$i}; print out}' # ~/Projects/tmp/.listing f_list = sh.awk( '{out=""; for(i=9;i<=NF;i++){out=out" "$i}; print out}', job_base_dir + '.listing') files = [] for f_name in f_list: files.append(f_name.strip()) allowed_exts = ["*." + str(e) for e in list(allowed_file_exts)] file_list = [] for ext in allowed_exts: for f_name in fnmatch.filter(files, ext): file_list.append(f_name) return file_list except BaseException: LOGGER.error("Error getting file names from .listing file at " + str(job_base_dir + '.listing') + "\n" + str(traceback.format_exc())) raise
def make_status_window(): Height, Width = screen.getmaxyx() status_win = screen.subwin(Height / 4 - Width_Factor / 2, Width, Height / 4, 0) status_win.box() try: ip = sh.head(sh.awk(sh.getent("ahosts", sh.hostname().strip()), "{print $1}"), n="1").strip() if ip == "127.0.0.1": raise NetworkMisconfiguredException() except sh.ErrorReturnCode: status_win.addstr(1, 2, "_MAPR_BANNER_NAME_ setup did not succseed.") raise ServiceFailedtoStartException() make_error_window() except NetworkMisconfiguredException: make_error_window() else: status_win.addstr(1, 2, "MapR Services failed to start.", curses.A_BOLD) make_error_window()
def _get_files(file_type, file_index): files = awk( grep( cat("/tmp/git_hook"), "-e", "(A|M).*.%s$" % file_type, _ok_code = [0, 1] ), "{print $%s}" % file_index, _iter = True ) if not files: return None exten = ".%s" % file_type files = [file_path[:file_path.rindex(exten) + len(exten)] for file_path in files] if not except_paths: return files except_files = [] for file_path in files: for except_path in except_paths: if file_path not in except_files and file_path.startswith(except_path): except_files.append(file_path) return [file_path for file_path in files if file_path not in except_files]
# encoding: utf-8 # from http://amoffat.github.io/sh/ from contextlib import contextmanager import sh from threading import Thread import time # ps aux | grep ssh | awk '{print $11}' print(sh.awk(sh.grep(sh.ps('aux'), 'ssh'), '{print $11}')) print(sh.grep(sh.docker.version(), 'Version:')) # most recent file print(sh.tail(sh.ls('-lrt'), '-n 1')) @contextmanager def parallel_delayed_run(callable, *args, **kwargs): delay = kwargs.get('delay', 0.5) def delayed_run(): time.sleep(delay) callable(*args) th = Thread(target=delayed_run) th.start() try: yield except sh.SignalException: print('Interrupted !') th.join()
def full_space(): return int(sh.sed(sh.awk(sh.quota(), '{print $3}'), '-n', '4p')) * 1024
def used_space(): return int(sh.sed(sh.awk(sh.quota(), '{print $1}'), '-n', '4p')) * 1024
from sh import uname, sed, dpkg, awk, grep # Get our current kernel version # $(uname -r | sed -r 's/-[a-z]+//') kernel_version = sed(uname('-r'), '-r', 's/-[a-z]+//').strip() print('kernel version:', kernel_version) # get & filter package list packages = \ grep( awk( dpkg('-l', 'linux-image-[0-9]*', 'linux-headers-[0-9]*', _env={'COLUMNS': '200'}), '/ii/{print $2}' ), '-ve', kernel_version ) print(packages.strip())
#!/usr/bin/env python import sys import time import getopt import pexpect from pexpect import pxssh from sh import awk,sed,cat,tr,echo # read and parse config file zz_full.info, # generate alias_to_ip,intra_to_ip,number_dv dicts, # and ip_list list: # hostname alise list alias = list(awk((sed(cat('zz_full.info'), "1d")), "{print $1}")) alias = [ str(x.rstrip()) for x in alias ] alias = filter(None, alias) # Of course, you can merge the two above statements: # alias = filter(None, [ str(x.rstrip()) for x in alias ]) # or, the three above statements: # alias = filter(None, # [ str(x.rstrip()) for x in list(awk((sed(cat('zz_full.info'), "1d")), "{print $1}"))]) # intranet ip list intra = list(awk((sed(cat('zz_full.info'), "1d")), "{print $6}")) intra = [ str(x.rstrip()) for x in intra ] intra = filter(None, intra) # public ip list ip_list = list(awk((sed(cat('zz_full.info'), "1d")), "{print $3}")) ip_list = [ str(x.rstrip()) for x in ip_list ] ip_list = filter(None, ip_list) # deal with gameserver number
def get_used_space(self): self.logger.debug("retrieving used space") return int( awk(gsutil('du', '-s', 'gs://cloudfusion'), '{print $1}') )
#!/usr/bin/env python import sys import getopt import pexpect from pexpect import pxssh from sh import awk username = '******' password = '******' # active gameserver alias active_alias = list(awk(awk('$14 ~ /^0$/ { print $0 }', 'zz_full.info'), '{print $1}')) active_alias = map(lambda x: str(x.rstrip()), active_alias) # active gameserver ip(public) active_ip = list(awk(awk('$14 ~ /^0$/ { print $0 }', 'zz_full.info'), '{print $11}')) active_ip = map(lambda x: str(x.rstrip()), active_ip) # mysql slave login passwd sql_passwd = list(awk(awk('$14 ~ /^0$/ { print $0 }', 'zz_full.info'), '{print $16}')) sql_passwd = map(lambda x: str(x.rstrip()), sql_passwd) # data structure number = map(lambda x:x.lstrip('z_'), active_alias) alias_ip_passwd = dict(zip(active_alias, zip(number,active_ip, sql_passwd))) alias_index = sorted(alias_ip_passwd) def single_gameserver(num, hostname, passwd): try: # cmd = '''\ # mysql -uroot -S /tmp/mysqlzz{0}.sock -e "show databases;" > /dev/null 2>&1;[ $(echo $?) == 0 ] && \ # mysql -uroot -S /tmp/mysqlzz{0}.sock -e "show slave status\G" | grep "Running" | sed "s/^ *//" || \