def unblock(self): ''' ''' st,output = getstatusoutput("pfctl -a switchyard -Fr") # flush rules log_debug("Flushing rules: {}".format(output)) st,output = getstatusoutput("pfctl -X {}".format(self._token)) log_info("Releasing pf: {}".format(output.replace('\n', '; ')))
def backup(): for (opath,spath) in paths: if (not os.path.exists(opath)):next if (not os.path.exists(spath)):cmd = 'mkdir -p '+spath;subprocess.getstatusoutput(cmd) isfile = os.path.isfile(opath) if (isfile):filename = opath.split('/')[-1] isdir = os.path.isdir(opath) if(isdir):dirname = os.path.dirname(opath).split('/')[-1] if (isfile): #print ('check file {0}|{1}'.format(filename,path)) #subprocess.Popen('cp -u {0} {1}'.format(path,savepath+filename),shell=True).wait() cmd = 'cp -u {0} {1}'.format(opath,spath) (status, output) = subprocess.getstatusoutput(cmd) print ('status: %-3s dir: %-6s'%(status,filename)) if (status):print (' '*3,output) #if (not status): # print('succeeded\t copy file:\t{0}'.format(filename)) #else: # print('failed \t copy file:\t{0}'.format(filename)) # elif(isdir): #subprocess.Popen('cp -urv {0} {1}'.format(path,savepath+dirname),shell=True).wait() cmd ='cp -urv {0} {1}'.format(opath,spath) (status, output) = subprocess.getstatusoutput(cmd) print ('status: %-3s dir: %-8s'%(status,dirname)) if (status):print (' '*3,output) #if (not status): # print('succeeded\t copy dir:\t{0}'.format(dirname)) #else: # print('failed \t copy dir:\t{0}'.format(dirnme)) else: #print ('path:{0} \tignored'.format(opath)) print ( 'ignored \t %s' % (opath) )
def check_status(): ''' Check the status of process and pull out error messages. ''' global update_err_msg, polling_err_msg if not update_proc: update_status = 'not started' elif update_proc.poll() is None: update_status = 'running({})'.format(update_proc.pid) else: # dead update_status = 'exited({})'.format(update_proc.returncode) if update_err_msg == '': # haven't pull out update_err_msg = update_proc.stderr.read().decode('utf-8') if not polling_proc: polling_status = 'not started' elif polling_proc.poll() is None: polling_status = 'running({})'.format(polling_proc.pid) else: # dead polling_status = 'exited({})'.format(polling_proc.returncode) if polling_err_msg == '': # haven't pull out polling_err_msg = polling_proc.stderr.read().decode('utf-8') # regenerate docs subprocess.getstatusoutput(DOCS_CMD) return update_status, polling_status
def main(): ''' Display HTML. ''' (st, branch) = subprocess.getstatusoutput(BRANCH_CMD) (st, head) = subprocess.getstatusoutput(HEAD_CMD) update_status, polling_status = check_status() return '''\ <html><body> <form action="/checkout" method="post"> current branch: {branch} <input name="branch" type="text"/> <input value="checkout" type="submit"/></form><br/> current HEAD: {head} <a href="/hook"><button type="button">pull and restart</button></a><br/><br/> update proc: {update_status}<br/> <pre>{update_err_msg}</pre><br/> pollinging proc: {polling_status}<br/> <pre>{polling_err_msg}</pre><br/> <a href="/docs/">docs</a> </body></html> '''.format(branch=branch, head=head, update_status=update_status, update_err_msg=update_err_msg, polling_status=polling_status, polling_err_msg=polling_err_msg)
def ams_config(): ams_unzip() project_war = '%s/%s/target/%s' %(svn_path,project_name,project_name) project_war_ver = '%s/%s/target/%s_%s' %(svn_path,project_name,project_name,ver) os.system('cp -Rf %s %s_%s' %(project_war,project_war,ver)) for i in ['28080','29080']: for j in ['test','staging','production']: project_war_ver_env_port = '%s_%s_%s_%s' %(project_war,ver,j,i) web_xml = '%s/WEB-INF/web.xml' %(project_war_ver_env_port) applicationContext_xml = '%s/WEB-INF/classes/applicationContext.xml' %project_war_ver_env_port os.system('cp -Rf %s %s' %(project_war_ver,project_war_ver_env_port)) if j == 'production' or j == 'staging': os.system("sed -r -i 's/(<param-value>)(development|test|production)(<\/param-value>)/\1production\3/g' %s" %web_xml) status,output = subprocess.getstatusoutput('grep -E "<param-value>production</param-value>" %s' %web_xml) if status != 0: logg.error("项目的web.xml配置文件修改错误:%s" %web_xml) exit_script() else: os.system("sed -r -i 's/(<param-value>)(development|test|production)(<\/param-value>)/\1%s\3/g' %s" %(j,web_xml)) status,output = subprocess.getstatusoutput('grep -E "<param-value>%s</param-value>" %s' %(j,web_xml)) if status != 0: logg.error("项目的web.xml配置文件修改错误:%s" %web_xml) exit_script() if i == '28080': ii = 29080 elif i == '29080': ii == 28080 os.system("sed -i -r 's/(property name=\"port\" value=\")%s/\1%s/g' %s" %(ii,i,applicationContext_xml)) status,output = subprocess.getstatusoutput('grep -E "name=\"port\" *value=\"%s\"" %s' %(i,applicationContext_xml)) if status != 0: logg.error("项目的配置文件修改错误:%s" %applicationContext_xml) exit_script()
def svn_update(): logg.info('开始更新代码') if not os.path.exists('%s/%s/.svn' %(svn_path,project_name)): status,output = subprocess.getstatusoutput('svn co %s %s/%s' %(repos,svn_path,project_name)) else: status,output = subprocess.getstatusoutput('svn up -r %s %s/%s' %(ver,svn_path,project_name)) if status == 0: logg.info('----svn update 成功') else: logg.error(output) logg.error('----svn update 失败') exit_script() if os.path.exists(need_inc): if not os.path.exists('%s/%s/src/main/webapp/inc/.svn/'): shell_cmd = '%s co http://svn/fenqi.d.xiaonei.com/fronted/xn.inc %s/%s/src/main/webapp/inc' %(SVN,svn_path,project_name) else: shell_cmd = '%s up %s/%s/src/main/webapp/inc' %(SVN,svn_path,project_name) status,output = subprocess.getstatusoutput(shell_cmd) if status == 0: logg.info('----update common inc 成功') else: logg.error(output) logg.error('----update common inc 失败') exit_script() else: logg.info("----不需要处理common inc") logg.info("结束更新代码 \n")
def setEnvironmentDarwin(): brew_prefix=subprocess.getstatusoutput('brew --prefix')[1] if not brew_prefix: return print("homebrew installed") opencv_version=subprocess.getstatusoutput('brew ls --versions opencv')[1] if not opencv_version: print("please install opencv by \n'brew install homebrew/science/opencv'") else: print("your opencv version is %s"%opencv_version) python_version=subprocess.getstatusoutput('python --version')[1].split(" ")[1] python_version="python"+".".join(python_version.split(".")[:-1]) print(python_version) PYTHONPATH=os.path.join(brew_prefix,"lib",python_version,"site-packages") print(PYTHONPATH) bashrc_fname=os.path.expanduser("~/.bashrc") bashrc_lines=open(bashrc_fname).read() if 'PYTHONPATH' not in os.environ : os.environ['PYTHONPATH']=PYTHONPATH print(os.environ['PYTHONPATH']) if os.environ['PYTHONPATH'] not in bashrc_lines : fp=open(bashrc_fname,"a") fp.write("\nexport PYTHONPATH=%s\n"%os.environ['PYTHONPATH']) fp.close() if PYTHONPATH not in os.environ['PYTHONPATH']: os.environ['PYTHONPATH']=" ".join(PYTHONPATH,os.environ['PYTHONPATH']) print(os.environ['PYTHONPATH'])
def create_machine(project_name): """Create docker-machine environment and create if neccessary.""" tool_name = re.sub(r'[\s_]', '-', project_name) machines = subprocess.getstatusoutput('docker-machine ls -q') machines = machines[1].split('\n') msg = """Activate with either: $ eval $(docker-machine env {envname}) or if you have docker-machine bash completion: $ docker-machine use {envname} """.format(envname=tool_name) if not [i for i in machines if tool_name in i]: click.echo( "Creating: docker-machine env {}".format(tool_name) ) status = subprocess.getstatusoutput( 'docker-machine create --driver virtualbox {}'.format(tool_name)) if status[0] == 0: click.secho("Docker-machine env `{}` created.".format(tool_name), fg='green') click.secho(msg, fg='yellow') else: click.secho(status[1], fg='red') else: click.secho("Docker-machine environment ready.", fg='green') click.secho(msg, fg='yellow')
def intltool_version(): ''' Return the version of intltool as a tuple. ''' if sys.platform == 'win32': cmd = ["perl", "-e print qx(intltool-update --version) =~ m/(\d+.\d+.\d+)/;"] try: ver, ret = subprocess.Popen(cmd ,stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate() ver = ver.decode("utf-8") if ver > "": version_str = ver else: return (0,0,0) except: return (0,0,0) else: cmd = 'intltool-update --version 2> /dev/null' # pathological case retcode, version_str = subprocess.getstatusoutput(cmd) if retcode != 0: return None cmd = 'intltool-update --version 2> /dev/null | head -1 | cut -d" " -f3' retcode, version_str = subprocess.getstatusoutput(cmd) if retcode != 0: # unlikely but just barely imaginable, so leave it return None return tuple([int(num) for num in version_str.split('.')])
def getEnv( self ): out = dict( ) if compiler.isMSVC( ): compilerDirs = { "msvc2010": "VS100COMNTOOLS", "msvc2012": "VS110COMNTOOLS", "msvc2013": "VS120COMNTOOLS", "msvc2015": "VS140COMNTOOLS" } architectures = { "x86": "x86", "x64": "amd64", "x64_cross": "x86_amd64" } crossmodifier = "" if not compiler.isNative(): crossmodifier="_cross" status, result = subprocess.getstatusoutput( "\"%s\\..\\..\\VC\\vcvarsall.bat\" %s > NUL && set" % ( os.getenv( compilerDirs[ compiler.getCompilerName( ) ] ), architectures[ compiler.architecture( ) + crossmodifier ]) ) if status != 0: print( "Failed to setup msvc compiler", file = sys.stderr ) out = self.stringToEnv( result ) elif compiler.isIntel( ): architectures = { "x86": "ia32", "x64": "intel64" } programFiles = os.getenv( "ProgramFiles(x86)" ) or os.getenv( "ProgramFiles" ) status, result = subprocess.getstatusoutput( "\"%s\\Intel\\Composer XE\\bin\\compilervars.bat\" %s > NUL && set" % ( programFiles, architectures[ compiler.architecture( ) ]) ) if status != 0: print( "Failed to setup intel compiler", file = sys.stderr ) out = self.stringToEnv( result ) elif compiler.isMinGW( ): out = { "Path": os.getenv( "Path" ) } return out
def __init__(self, interfaces, rules): super().__init__(interfaces, rules) self._intf = interfaces st,output = getstatusoutput("iptables-save") self._saved_iptables = output self._arpignore = {} self._rulecmds = [ 'iptables -F', 'iptables -t raw -F' ] # --protocol {} -i {} --port {} doall = False for r in rules: cmds = self._parse_rule(r) self._rulecmds.extend(cmds) if r == 'all': doall = True if doall: badintf = [] for intf in interfaces: st,output = getstatusoutput('sysctl net.ipv4.conf.{}.arp_ignore'.format(intf)) if st != 0: badintf.append(intf) continue self._arpignore[intf] = int(output.split()[-1]) st,output = getstatusoutput('sysctl -w net.ipv4.conf.{}.arp_ignore=8'.format(intf)) for intf in badintf: self._intf.remove(intf) # alias of interfaces, so just remove # from self._intf log_debug("Rules to install: {}".format(self._rulecmds))
def install_splash(newsplashfile, msg=instmsg): status, splashdata = getstatusoutput("pacman -Ql eclipse") splash = "" for line in splashdata.split("\n"): if line.endswith("splash.bmp"): splash = line break try: splashfile = splash.split()[1] except IndexError: print("Unable to find eclipse splash image (got %s)" % (splash)) exit(1) esplashdir = os.path.dirname(splashfile) today = "-".join(map(str, localtime()[0:6])) backupfile = os.path.join(progdir, "splash-backup-%s.bmp" % (today)) if os.path.exists(splashfile): if 0 != getstatusoutput("mv -f %s %s" % (splashfile, backupfile))[0]: print(ver + "\n" + req) exit(1) print(msg, end=" ") if 0 != getstatusoutput("cp %s %s" % (newsplashfile, splashfile))[0]: print() print("Unable to install splash image to %s/" % (esplashdir)) exit(1) else: splashdir = os.path.dirname(splashfile) if 0 != getstatusoutput("mkdir -p %s" % (splashdir))[0]: print(ver + "\n" + req) exit(1) print(msg, end=" ") if 0 != getstatusoutput("cp %s %s" % (newsplashfile, splashfile))[0]: print() print("Unable to install splash image to %s/" % (esplashdir)) exit(1) print("done")
def startPIGPIO(self): if sys.version_info[0] < 3: import commands status, process = commands.getstatusoutput('sudo pidof pigpiod') if status: # it wasn't running, so start it self.LogInfo ("pigpiod was not running") commands.getstatusoutput('sudo pigpiod') # try to start it time.sleep(0.5) # check it again status, process = commands.getstatusoutput('sudo pidof pigpiod') else: import subprocess status, process = subprocess.getstatusoutput('sudo pidof pigpiod') if status: # it wasn't running, so start it self.LogInfo ("pigpiod was not running") subprocess.getstatusoutput('sudo pigpiod') # try to start it time.sleep(0.5) # check it again status, process = subprocess.getstatusoutput('sudo pidof pigpiod') if not status: # if it was started successfully (or was already running)... pigpiod_process = process self.LogInfo ("pigpiod is running, process ID is {} ".format(pigpiod_process)) try: pi = pigpio.pi() # local GPIO only self.LogInfo ("pigpio's pi instantiated") except Exception as e: start_pigpiod_exception = str(e) self.LogError ("problem instantiating pi: {}".format(start_pigpiod_exception)) else: self.LogError ("start pigpiod was unsuccessful.") return False return True
def _test_unique_prompt(): import time time_start = time.time() LOG_FILE = 'remote.log' import subprocess subprocess.getstatusoutput('rm -fr %s'%LOG_FILE) log_file = open(LOG_FILE, 'wb+') h1 = SshHost('sunfire', 'geouser', 'geouser', logfile=log_file) h1.login(True) ssh_path = h1.docmd('which ssh') h1.switch_user('root', 'geoprobe', True) h2 = SshHost('xformer2', 'geo', 'geo.geo', h1) iret = h2.login(True) if(iret<0): h2 = SshHost('xformer2', 'geo', 'geo.geo', h1, ssh_path=ssh_path) iret = h2.login(True) h2.switch_user('root','inetinet') h2.logout() h2.login() h2.switch_user('root','inetinet') uptime = h2.docmd('uptime') print('uptime of xformer2: ', uptime) h2.quit_user() h2.logout() h2 = TelnetHost('xformer2', 'geo', 'geo.geo', h1) h2.login(True) h2.logout() h1.logout() log_file.close() time_end = time.time() print('done in %s minutes.'%str((time_end-time_start)/60.0))
def main(args): pc = PC() pc.add_func_path("../formulas") pc.load_formula_file("gf4d.frm") pc.load_formula_file("gf4d.cfrm") pc.compiler_name = "g++" pc.leave_dirty = True f = fractal.T(pc) f.loadFctFile(open(args[0])) outfile = f.compile() cfile = outfile[:-2] + "c" # compile the stub and the c file to create a program to profile files = " ".join(pc.cfiles + [cfile]) cmd = "%s %s %s -o %s %s" % \ (pc.compiler_name, files, "-g -pg -O3 -Ic -lpthread", "proftest", "") print(cmd) (status,output) = subprocess.getstatusoutput(cmd) if status != 0: raise Exception( "Error reported by C compiler:%s" % output) print(output) # compiled - hurrah! run it (status,output) = subprocess.getstatusoutput("./proftest") if status != 0: raise Exception( "Error reported by program:%s" % output) print(output)
def build_one_container(image_name, memory, code_address): """ 生成一个容器 :param image_name: 镜像名字 :param memory: 内存 :param code_address: 代码zip地址 :return: dict """ port = get_one_able_post() system_image = get_realname_from_image_name(image_name) command = "docker run -d -m %dm -p %d:80 %s /bin/bash /tmp/start.sh '%s'" % (int(memory), port, system_image, code_address) code, _ = subprocess.getstatusoutput(command) if code != 0: return {"code": 10004} code, result = subprocess.getstatusoutput("docker ps | grep -v grep |grep 0.0.0.0:%d|awk -F ' ' '{print $1}'" % port) if code != 0: return {"code": 10004} # 记录新建容器端口情况 obj = ContainerModel(port=port, container_id=result, memory=memory, code_address=code_address, image_name=system_image, create_time=datetime.datetime.now()) obj.save() return {"code": 0, "result": {"containerId": result, "port": port}}
def model_by_ip(ip_addr): """Get model switch by snmp. Return name of switch as name of module in switch packadge""" status,out = getstatusoutput('snmpwalk -v 1 -c mrtg ' + \ ip_addr + ' sysDescr.0') if status == 0 and '8000S' in out: status,model_port = getstatusoutput('snmpwalk -v 1 -c mrtg ' + \ ip_addr + ' mib-2.47.1.1.1.1.7.68420352') if '8000S/16' in model_port: model = 'AT8000_16' elif '8000S/24' in model_port: model = 'AT8000_24' elif '8000S/48' in model_port: model = 'AT8000_48' else: raise NotImplementedError('Unknown model Allied Telesis') elif status == 0 and '8012' in out: model = 'AT8012' elif status == 0 and 'AT-9924SP' in out: model = 'AT_9924SP' elif status == 0 and 'MES-3124F' in out: model = 'MES_3124F' elif status == 0 and 'MES-1024' in out: model = 'MES_1024' elif status == 0 and 'MES-3124' in out: model = 'MES_3124' elif status == 0 and 'MES-3024' in out: model = 'MES_3024' elif status == 0 and 'MES-2124' in out: model = 'MES_2124' elif status == 0 and 'ES-2024A' in out: model = 'ES_2024A' elif status == 0 and 'ES-2108' in out: model = 'ES_2108' elif status == 0 and 'DGS-3627G' in out: model = 'DGS_3627G' elif status == 0 and 'DES-2108' in out: model = 'DES_2108' elif status == 0 and 'C3550' in out: status,model_port = getstatusoutput('snmpwalk -v1 -c mrtg ' + \ ip_addr +' SNMPv2-SMI::mib-2.47.1.1.1.1.2.1') if '3550 24' in model_port: model = 'C3550_24' elif '3550 48' in model_port: model = 'C3550_48' elif '3550 10' in model_port: model = 'C3550_12' else: raise NotImplementedError('Unknown model Cisco 3550') elif status == 0 and '3Com SuperStack 3 Switch 3250' in out: model = '3Com_SuperStack_50' elif status == 0 and '3Com SuperStack 3 Switch 3226' in out: model = '3Com_SuperStack_26' elif status == 0: raise NotImplementedError('switch with ip: \ {0} is {1}'.format(ip_addr,'UNKNOWN')) else: raise NotImplementedError('no answer from ip: ' + ip_addr) return model
def unblock(self): # clear switchyard tables, load up saved state log_info("Restoring saved iptables state") st,output = getstatusoutput("iptables -F") st,output = getstatusoutput("iptables -t raw -F") st,output = _sendcmd(["iptables-restore"], self._saved_iptables) for intf in self._intf: st,output = getstatusoutput('sysctl -w net.ipv4.conf.{}.arp_ignore={}'.format(intf, self._arpignore[intf]))
def resize_all(allpics): for index, image in enumerate(allpics): for resolution in ["thumb", "small"]: exit_code = read_cmd(str(index), resolution) assert exit_code == 0, "Could not read " + str(index) + " from db, exit code: " + output getstatusoutput("rm *small.jpg") getstatusoutput("rm *thumb.jpg") print("Resized all pictures to all resolutions correctly")
def move_datadir(self): # Move datadir to new directory print("####################################################################################################") print("Moving MySQL datadir to /tmp/mysql: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -#") print("####################################################################################################") time.sleep(3) if os.path.isdir(self.tmpdir): rmdirc = 'rm -rf %s' % self.tmpdir status, output = subprocess.getstatusoutput(rmdirc) if status == 0: print("Emptied /tmp/mysql directory ...") try: shutil.move(self.datadir, self.tmp) print("Moved datadir to /tmp/mysql ...") except shutil.Error as err: print("Error occurred while moving datadir") print(err) return False print("Creating an empty data directory ...") makedir = self.mkdir_command status2, output2 = subprocess.getstatusoutput(makedir) if status2 == 0: print("/var/lib/mysql Created! ...") else: print("Error while creating datadir") print(output2) return False return True else: print("Could not delete /tmp/mysql directory") print(output) return False else: try: shutil.move(self.datadir, self.tmp) print("Moved datadir to /tmp/mysql ...") except shutil.Error as err: print("Error occurred while moving datadir") print(err) return False print("Creating an empty data directory ...") makedir = self.mkdir_command status2, output2 = subprocess.getstatusoutput(makedir) if status2 == 0: print("/var/lib/mysql Created! ...") return True else: print("Error while creating datadir") print(output2) return False
def get_cpu_speed(self): code,speed = subprocess.getstatusoutput("cat /sys/devices/system/cpu/cpu*/cpufreq/cpuinfo_max_freq") speed = [int(elem)*1000 for elem in speed.split("\n")] if code != 0: code,speed = subprocess.getstatusoutput("cat /sys/devices/system/cpu/cpu*/cpufreq/scaling_available_frequencies |cut -d\ -f 1") speed = [int(elem)*1000 for elem in speed.split("\n")] if code != 0: raise InformationMissing() return self.transform_unit(sum(speed)/len(speed))+"Hz"
def _test_read_write_helper(self, url, content_expected): url = util._make_internal_url(url) glconnect.get_unity().__write__(url, content_expected) content_read = glconnect.get_unity().__read__(url) self.assertEquals(content_read, content_expected) # clean up the file we wrote status, output = commands.getstatusoutput('hadoop fs -test -e ' + url) if status is 0: commands.getstatusoutput('hadoop fs -rm ' + url)
def main(): cmd='ls' d=commands.getoutput(cmd) # 此命令在python3 中不能使用 # 仅仅在一个子终端运行系统命令,而不能获取命令执行后的返回信息,也就是说不能“t=system('pwd')”这样使用,只能下面这种使用 system(pwd) # pwd 可以不加引号 # 如果再命令行下执行,结果直接打印出来 os.system('pwd') #pwd 必须加引号 #下面这样使用,虽说可以,但是t捕获到的仅仅是执行后的返回码,0代表成功,否则是其他的返回值,是一个int类型数值 t=os.system('pwd') # 该方法不但执行命令还返回执行后的信息对象 #好处在于:将返回的结果赋于一变量,便于程序的处理。 tmp = os.popen('ls *.py').readlines() #注意: 当执行命令的参数或者返回中包含了中文文字,那么建议使用subprocess,如果使用os.popen则会出现编码错误 #1、当你对shell命令的输出不感兴趣,只希望程序被运行,你可以典型的使用subprocess.call #2、如果你需要捕获命令的输出结果,那么你就需要使用subprocess.Popen #注意,以下才是重点: #在subprocess.call与Popen之间,存在一个非常大的区别。 #subprocess.call会封锁对响应的等待,而subprocess.Popen则不会!! #如果程序\子进程没有响应,python不理它,继续执行python语句,而Popen会等待,知道子进程输出结果才执行下一步语句。 import subprocess subprocess.call (["echo", "arg1", "arg2"],shell=True) #获取返回和输出: #利用subprocess.PIPE将多个子进程的输入和输出连接在一起,构成管道(pipe) p = subprocess.Popen('ls', shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) for line in p.stdout.readlines(): print (line.decode('utf8')) retval = p.wait() if PY3: subprocess.getoutput('date') #date 是shell是一个命令 #Out[4]: '2016年 05月 24日 星期二 09:12:06 CST' subprocess.getstatusoutput('date') #Out[5]: (0, '2016年 05月 24日 星期二 09:12:22 CST') else: #四、使用模块commands import commands #只返回执行的结果, 忽略返回值. commands.getoutput("date") #用os.popen()执行命令cmd, 然后返回两个元素的元组(status, result). cmd执行的方式是{ cmd ; } 2>&1, 这样返回结果里面就会包含标准输出和标准错误. commands.getstatusoutput("date") #返回ls -ld file执行的结果.参数必须是一个文件 commands.getstatus(file)
def create_account(username): status = subprocess.os.system('useradd --home-dir /cloud/'+username+' '+ username) print(subprocess.getstatusoutput('mount /dev/cloud/' + username + ' /cloud/' + username)) print(subprocess.getstatusoutput('echo \"/cloud/' + username + ' 192.168.122.1/0.0.0.0(rw,no_root_squash)\" ' + '>> /etc/exports')) print(subprocess.getstatusoutput('systemctl restart nfs-server')) print(subprocess.getstatusoutput('chown '+username+' /cloud/' + username + ' -R')) if status == 0: return True else: return False
def convert(infile, outfile=None): if outfile is None: res = subprocess.getstatusoutput("gifsicle --batch " + str(infile)) if res[0] == 0: return True return False res = subprocess.getstatusoutput("gifsicle " + str(infile) + " > " + outfile) if res[0] == 0: return True return False
def __init__(self): self.cores = multiprocessing.cpu_count() status, self.name = subprocess.getstatusoutput('grep "model name" /proc/cpuinfo -m 1') self.name = self.name[self.name.find(":") + 1:].strip() if status == 0 else "general" while self.name.find(' ') > -1: self.name = self.name.replace(' ', ' ') status, self.flags = subprocess.getstatusoutput('cpuinfo2cpuflags-x86') self.flags = self.flags[self.flags.find("\"") + 1: -1] if status == 0 else "mmx sse sse2 sse3 ssse3"
def stop_tail_log(case_id, case_result): for logname in taillogs: loginfo = taillogs[logname] if loginfo[1] != "": status, output = subprocess.getstatusoutput("kill -9 " + loginfo[1]) loginfo[1] = "" else: log.error("pid of " + logname + " was not found.") #if case_result != 0: status, output = subprocess.getstatusoutput("mv " + case_output_path + "/log/tail " + case_output_path + "/log/" + case_id)
def inc_backup(self): # Taking Incremental backup recent_bck = self.recent_full_backup_file() recent_inc = self.recent_inc_backup_file() if recent_inc == 0: # Testing with MariaDB Galera Cluster # args = '%s %s %s --incremental %s --incremental-basedir %s/%s' % ( # self.backup_tool, self.myuseroption, self.maria_xtrabck, self.inc_dir, self.full_dir, recent_bck) # MySQL(Oracle) args = '%s %s %s --incremental %s --incremental-basedir %s/%s' % (self.backup_tool, self.myuseroption, self.xtrabck, self.inc_dir, self.full_dir, recent_bck) status, output = subprocess.getstatusoutput(args) if status == 0: print(output[-27:]) return True else: print("INCREMENT BACKUP FAILED!") time.sleep(5) print(output) return False else: # Testing with MariaDB Galera Cluster # args = '%s %s %s --incremental %s --incremental-basedir %s/%s' % ( # self.backup_tool, self.myuseroption, self.maria_xtrabck, self.inc_dir, self.inc_dir, recent_inc) # MySQL(Oracle) args = '%s %s %s --incremental %s --incremental-basedir %s/%s' % (self.backup_tool, self.myuseroption, self.xtrabck, self.inc_dir, self.inc_dir, recent_inc) status, output = subprocess.getstatusoutput(args) if status == 0: print(output[-27:]) return True else: print("INCREMENT BACKUP FAILED!") time.sleep(5) print(output) return False
def tearDown(self): cmd = 'ssh %s virsh destroy %s' % (self.h1.ipv4, self.vm1.uuid) r, info = subprocess.getstatusoutput(cmd) cmd = 'ssh %s virsh undefine %s' % (self.h1.ipv4, self.vm1.uuid) r, info = subprocess.getstatusoutput(cmd) cmd1 = 'ssh %s rbd rm %s/%s' % (self.cp1.host.host, self.cp1.pool, self.vm1.uuid) r1, info1 = subprocess.getstatusoutput(cmd1)
def cache_dir_locator(base_path, options): com = ''.join('find ' + base_path + '-name Cache -type d ') if 'firefox_only' in options: com = ''.join(com + '| grep firefox') elif 'chrome_only' in options: com = ''.join(com + '| grep chrome') elif 'all' in options: return subprocess.getstatusoutput(com) else: return 'OPTIONS ERROR ON cache_dir_locator' return subprocess.getstatusoutput(com)
def install_yum_deps(deps_to_install: List[str]) -> None: print(WARNING + "RedHat support is still experimental.") run_as_root(["./scripts/lib/setup-yum-repo"]) # Hack specific to unregistered RHEL system. The moreutils # package requires a perl module package, which isn't available in # the unregistered RHEL repositories. # # Error: Package: moreutils-0.49-2.el7.x86_64 (epel) # Requires: perl(IPC::Run) yum_extra_flags = [] # type: List[str] if vendor == "rhel": exitcode, subs_status = subprocess.getstatusoutput( "sudo subscription-manager status") if exitcode == 1: # TODO this might overkill since `subscription-manager` is already # called in setup-yum-repo if "Status" in subs_status: # The output is well-formed yum_extra_flags = ["--skip-broken"] else: print( "Unrecognized output. `subscription-manager` might not be available" ) run_as_root(["yum", "install", "-y", *yum_extra_flags, *deps_to_install]) if "rhel" in os_families(): # This is how a pip3 is installed to /usr/bin in CentOS/RHEL # for python35 and later. run_as_root(["python36", "-m", "ensurepip"]) # `python36` is not aliased to `python3` by default run_as_root(["ln", "-nsf", "/usr/bin/python36", "/usr/bin/python3"]) postgresql_dir = f"pgsql-{POSTGRESQL_VERSION}" for cmd in ["pg_config", "pg_isready", "psql"]: # Our tooling expects these PostgreSQL scripts to be at # well-known paths. There's an argument for eventually # making our tooling auto-detect, but this is simpler. run_as_root([ "ln", "-nsf", f"/usr/{postgresql_dir}/bin/{cmd}", f"/usr/bin/{cmd}" ]) # From here, we do the first-time setup/initialization for the PostgreSQL database. pg_datadir = f"/var/lib/pgsql/{POSTGRESQL_VERSION}/data" pg_hba_conf = os.path.join(pg_datadir, "pg_hba.conf") # We can't just check if the file exists with os.path, since the # current user likely doesn't have permission to read the # pg_datadir directory. if subprocess.call(["sudo", "test", "-e", pg_hba_conf]) == 0: # Skip setup if it has been applied previously return run_as_root( [ f"/usr/{postgresql_dir}/bin/postgresql-{POSTGRESQL_VERSION}-setup", "initdb" ], sudo_args=["-H"], ) # Use vendored pg_hba.conf, which enables password authentication. run_as_root([ "cp", "-a", "puppet/zulip/files/postgresql/centos_pg_hba.conf", pg_hba_conf ]) # Later steps will ensure PostgreSQL is started # Link in tsearch data files run_as_root([ "ln", "-nsf", "/usr/share/myspell/en_US.dic", f"/usr/pgsql-{POSTGRESQL_VERSION}/share/tsearch_data/en_us.dict", ]) run_as_root([ "ln", "-nsf", "/usr/share/myspell/en_US.aff", f"/usr/pgsql-{POSTGRESQL_VERSION}/share/tsearch_data/en_us.affix", ])
def writesco(tones_dict,base_name): # ==================== score_name = base_name + '.sco' print(score_name) f_out = open("./" + score_name , 'w') # YOU MUST DELETE THE SOUND FILE BEFORE RUNNING (either with python or with -clobber ) f_out.write("set_option(\"clobber = on\")") f_out.write("rtsetparams(44100, 2)\n") f_out.write("reset(44100)\n") f_out.write("load(\"WAVETABLE\")\n") #----------------------CHECK IF CMIX COMMAND IS INSTALLED----------------------- #only use rtoutput if CMIX command is found. cmixStatus, cmixResult = sp.getstatusoutput("CMIX") #the cmixInstalled variable can also be passed from the notebook #in that is case, the output of sp.getstatusoutput("CMIX") is overridden if 'cmixInstalled' in tones_dict: if tones_dict['cmixInstalled']: cmixStatus = 0 else: cmixStatus = 127 if cmixStatus == 0: output_string = 'rtoutput(\"' + base_name + '.wav\")\n' # don't need the brackets to make it an array ! print("CMIX found.") print(output_string) f_out.write(output_string) else: print("CMIX not found; rtoutput() will not be used in score.") #------------------------------------------------------------------------------- #output_string = 'rtoutput(\"' + base_name + '.wav\")\n' # don't need the brackets to make it an array ! #f_out.write(output_string) f_out.write("waveform = maketable(\"wave\", 1000, 1.0, 0.4, 0.2)\n") f_out.write("ampenv = maketable(\"window\", 1000, \"hamming\")\n") # write out the score ! # (start time, duration, amplitude, frequency, channel mix [0 left, 1.0 right], # table_handle (which waveform to use) # for now, constants: # reset(44100) makes it very very smooth... tab_han = 'waveform' times = tones_dict['times'] notes = tones_dict['notes'] durs = tones_dict['durs'] amps = tones_dict['amps'] pans = tones_dict['pans'] for i,note_val in enumerate(notes): t_start = times[i] dur = durs[i] freq = note_val # coming in from enumerate amp = amps[i] pan = pans[i] note_string = 'WAVETABLE(' + str(t_start) + ', ' \ + str(dur) + ', ' + str(amp) + '*ampenv' + ', ' \ + str(freq) + ', ' + str(pan) + ', ' \ + tab_han + ')\n' f_out.write(note_string) f_out.close() return score_name
def test_fox(): """Test on fox""" rv, out = getstatusoutput(f'{prg} {fox}') assert rv == 0 assert out.rstrip() == ' 1 9 45 ../inputs/fox.txt'
def run(self, cmd): """runs a system command and returns an array of lines of the output""" if not cmd: end(UNKNOWN, "Internal python error - " \ + "no cmd supplied for run function") if self.no_cache_update: cmd += " -C" if self.enable_repo: for repo in self.enable_repo.split(","): cmd += " --enablerepo=%s" % repo if self.disable_repo: for repo in self.disable_repo.split(","): cmd += " --disablerepo=%s" % repo if self.disable_plugin: # --disableplugin can take a comma separated list directly #for plugin in self.disable_plugin.split(","): #cmd += " --disableplugin=%s" % plugin cmd += " --disableplugin=%s" % self.disable_plugin if self.yum_config: for repo in self.yum_config.split(","): cmd += " --config=%s" % repo self.vprint(3, "running command: %s" % cmd) if OLD_PYTHON: self.vprint(3, "subprocess not available, probably old python " \ + "version, using shell instead") os.environ['LANG'] = "en_US" returncode, stdout = subprocess.getstatusoutput(cmd) if returncode >= 256: returncode = returncode / 256 else: try: env = {'LANG': 'en_US'} process = Popen(cmd.split(), stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=env) except OSError as error: error = str(error) if error == "No such file or directory": end(UNKNOWN, "Cannot find utility '%s'" % cmd.split()[0]) end(UNKNOWN, "Error trying to run utility '%s' - %s" \ % (cmd.split()[0], error)) output = process.communicate() # for using debug outputs, either do not comment above line or explicitly set exit code below #output = [open(os.path.dirname(__file__) + '/test_input.txt').read(), ''] returncode = process.returncode stdout = output[0] # decode bytes to string for Python 3 stdout = stdout.decode("utf-8") if not stdout: end(UNKNOWN, "No output from utility '%s'" % cmd.split()[0]) self.vprint(3, "Returncode: '%s'\nOutput: '%s'" \ % (returncode, stdout)) output = str(stdout).split("\n") self.check_returncode(returncode, output) return output
#!/usr/bin/python36 print("content-type:text/html") print("\n") import subprocess as sp str = "abcde" p = sp.getstatusoutput("sudo echo {} >> /etc/ansible/hosts".format(str)) print(p[0]) print(p[1])
def run(protein: str, expected: str) -> None: """ Run test """ rv, out = getstatusoutput(f'{RUN} {protein}') assert rv == 0 assert out.rstrip() == expected
if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--cr', default='https://cr.deepin.io/changes') parser.add_argument('--changeid', required=True, type=int) parser.add_argument('--patchset', default=1, type=int) parser.add_argument('--parserjson', default='project_list.json') args = parser.parse_args() urlpath = '%(cr)s/%(changeid)d/revisions/%(patchset)s/patch' % { "cr": args.cr, "changeid": args.changeid, "patchset": args.patchset } status, output = subprocess.getstatusoutput('curl %s | base64 -d' % urlpath) if status != 0: raise subprocess.CalledProcessError('get output error') start = False lines = [] for line in output.split('\n'): if line.startswith('+++ b/%s' % args.parserjson): start = True continue if start: if line.startswith('diff --git'): start = False if start: lines.append(line)
def copyfile(src, dest, newmtime = None, sstat = None): """ Copies a file from src to dest, preserving all permissions and attributes; mtime will be preserved even when moving across filesystems. Returns true on success and false on failure. """ #print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")" try: if not sstat: sstat = os.lstat(src) except Exception as e: logger.warning("copyfile: stat of %s failed (%s)" % (src, e)) return False destexists = 1 try: dstat = os.lstat(dest) except: dstat = os.lstat(os.path.dirname(dest)) destexists = 0 if destexists: if stat.S_ISLNK(dstat[stat.ST_MODE]): try: os.unlink(dest) destexists = 0 except Exception as e: pass if stat.S_ISLNK(sstat[stat.ST_MODE]): try: target = os.readlink(src) if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): os.unlink(dest) os.symlink(target, dest) #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) return os.lstat(dest) except Exception as e: logger.warning("copyfile: failed to create symlink %s to %s (%s)" % (dest, target, e)) return False if stat.S_ISREG(sstat[stat.ST_MODE]): try: srcchown = False if not os.access(src, os.R_OK): # Make sure we can read it srcchown = True os.chmod(src, sstat[stat.ST_MODE] | stat.S_IRUSR) # For safety copy then move it over. shutil.copyfile(src, dest + "#new") os.rename(dest + "#new", dest) except Exception as e: logger.warning("copyfile: copy %s to %s failed (%s)" % (src, dest, e)) return False finally: if srcchown: os.chmod(src, sstat[stat.ST_MODE]) os.utime(src, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) else: #we don't yet handle special, so we need to fall back to /bin/mv a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'") if a[0] != 0: logger.warning("copyfile: failed to copy special file %s to %s (%s)" % (src, dest, a)) return False # failure try: os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown except Exception as e: logger.warning("copyfile: failed to chown/chmod %s (%s)" % (dest, e)) return False if newmtime: os.utime(dest, (newmtime, newmtime)) else: os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) newmtime = sstat[stat.ST_MTIME] return newmtime
def rsh(cmd): from subprocess import getstatusoutput return getstatusoutput(cmd)
''' import pytest, subprocess, time, os, allure, sys, random # 获取项目路径 BASEDIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__))) sys.path.append(BASEDIR) from common import models, logger from common.ui_operation import S20i_uiOpration from config import settings, U3_settings from config.config import global_config import uiautomator2 as u2 # from pywinauto import application from openpyxl import load_workbook # 启动切口工具 subprocess.getstatusoutput(" ".join(["start", settings.Qualcomm_SwitchCom_file])) # 获取项目名称 project_name = os.path.basename(__file__).split('.')[0] # 创建log对象 log_name = '.'.join(['_'.join([project_name, time.strftime('%Y%m%d%H%M%S', time.localtime())]), "log"]) log_file = os.path.join(BASEDIR, "logs", log_name) log = logger.Logger(screen_output=True, log_level='logging.INFO', log_file=log_file).create_logger() def screenShot(d, title): ''' 在uiautomator2的截图函数上封装的截图方法 :param d:uiautomator2对象 title: 自定义截图的名称 :return: 返回截图的路径
def movefile(src, dest, newmtime = None, sstat = None): """Moves a file from src to dest, preserving all permissions and attributes; mtime will be preserved even when moving across filesystems. Returns true on success and false on failure. Move is atomic. """ #print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")" try: if not sstat: sstat = os.lstat(src) except Exception as e: print("movefile: Stating source file failed...", e) return None destexists = 1 try: dstat = os.lstat(dest) except: dstat = os.lstat(os.path.dirname(dest)) destexists = 0 if destexists: if stat.S_ISLNK(dstat[stat.ST_MODE]): try: os.unlink(dest) destexists = 0 except Exception as e: pass if stat.S_ISLNK(sstat[stat.ST_MODE]): try: target = os.readlink(src) if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): os.unlink(dest) os.symlink(target, dest) #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) os.unlink(src) return os.lstat(dest) except Exception as e: print("movefile: failed to properly create symlink:", dest, "->", target, e) return None renamefailed = 1 if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]: try: # os.rename needs to know the dest path ending with file name # so append the file name to a path only if it's a dir specified srcfname = os.path.basename(src) destpath = os.path.join(dest, srcfname) if os.path.isdir(dest) \ else dest os.rename(src, destpath) renamefailed = 0 except Exception as e: if e[0] != errno.EXDEV: # Some random error. print("movefile: Failed to move", src, "to", dest, e) return None # Invalid cross-device-link 'bind' mounted or actually Cross-Device if renamefailed: didcopy = 0 if stat.S_ISREG(sstat[stat.ST_MODE]): try: # For safety copy then move it over. shutil.copyfile(src, dest + "#new") os.rename(dest + "#new", dest) didcopy = 1 except Exception as e: print('movefile: copy', src, '->', dest, 'failed.', e) return None else: #we don't yet handle special, so we need to fall back to /bin/mv a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'") if a[0] != 0: print("movefile: Failed to move special file:" + src + "' to '" + dest + "'", a) return None # failure try: if didcopy: os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown os.unlink(src) except Exception as e: print("movefile: Failed to chown/chmod/unlink", dest, e) return None if newmtime: os.utime(dest, (newmtime, newmtime)) else: os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) newmtime = sstat[stat.ST_MTIME] return newmtime
def subTimes(self,filename): self.filename = filename command = 'cat %s |wc -l'% filename times = int(subprocess.getstatusoutput(command)[1]) return times
dname = os.path.dirname(abspath) os.chdir(dname) def cmd(s): print("") print(s) print("-------------------------------------") r = os.system(s) if r != 0: print("Exit build due to previous error") exit(-1) # Get the current branch name status, br = subprocess.getstatusoutput("git branch | grep '*'") br = re.sub('\* ', '', br) urlpath = re.sub('release/', '', br) # Be sure the github links point to the right branch f = open("header.rst", "w") f.write(".. |github_link_base| replace:: https://github.com/lvgl/docs/blob/" + br) f.close() base_html = "html_baseurl = 'https://docs.lvgl.io/" + urlpath + "/en/html/'" os.system("sed -i \"s|html_baseurl = .*|" + base_html + "|\" conf.py") clean = 0 trans = 0
def execute(self): if self.mode == 'compileupload': try: import subprocess fname = '.'.join(self.fname.split('.')[:-1]) cmd = 'avr-gcc -Wall -O2 -mmcu=%s -o "%s" "%s"' % ( 'atmega328p', fname, self.fname) self.logThis.emit( '''<span style="color:green;">Compiling for Atmega328p (Nano)</span>''' ) print(cmd) res = subprocess.getstatusoutput(cmd) if res[0] != 0: self.logThis.emit( '''<span style="color:red;">Compile Error: %s</span>''' % res[1]) self.finished.emit() return else: self.logThis.emit( '''<span style="color:white;">%s</span><br>''' % res[1]) cmd = 'avr-objcopy -j .text -j .data -O ihex "%s" "%s.hex"' % ( fname, fname) res = subprocess.getstatusoutput(cmd) self.logThis.emit( '''<span style="color:white;">%s</span><br>''' % res[1]) cmd = 'avr-objdump -S "%s" > "%s.lst"' % (fname, fname) res = subprocess.getstatusoutput(cmd) self.logThis.emit( '''<span style="color:white;">%s</span><br>''' % res[1]) if self.fname[-2:] in ['.c', '.C']: self.fname = self.fname[:-2] + '.hex' #Replace .c with .hex self.mode = 'upload' self.logThis.emit( '''<span style="color:green;">Generated Hex File</span>''' ) self.logThis.emit( '''<span style="color:green;">Finished Compiling: Generated Hex File</span>''' ) except Exception as err: self.logThis.emit( '''<span style="color:red;">Failed to Compile:%s</span>''' % str(err)) if self.p.connected: if self.mode == 'upload': try: self.p.fd.setRTS(0) time.sleep(0.01) self.p.fd.setRTS(1) time.sleep(0.4) dude = uploader.Uploader(self.p.fd, hexfile=self.fname, logger=self.logThis) dude.program() dude.verify() self.p.fd.setRTS(0) time.sleep(0.01) self.p.fd.setRTS(1) time.sleep(0.2) self.p.get_version() self.logThis.emit( '''<span style="color:green;">Finished upload</span>''' ) except Exception as err: self.logThis.emit( '''<span style="color:red;">Failed to upload</span>''' ) self.finished.emit()
def choiceForExec(): questions = [{ 'type': 'list', 'name': 'choiceForExec', 'message': 'What would you like to do? ', 'choices': [ "Install Dependencies", 'Run the GraphQL APP', 'Stop the GraphQL app', 'Update the Database', 'Create SQL Tables in a new Database', "Check API Call", "Get Stats" ] }] answers = prompt(questions) if answers["choiceForExec"] == "Install Dependencies": try: # Option 1. Does pipenv install command = 'pipenv install' subprocess.getstatusoutput(command) print(Fore.GREEN + 'Dependencies installed successfully') except Exception as e: capture_exception(e) print(Fore.RED + 'Could not install dependencies. Install pipenv manually') elif answers["choiceForExec"] == 'Run the GraphQL APP': try: # Runs gunicorn app execution = 'cd {} && gunicorn -w 3 -k uvicorn.workers.UvicornWorker graphql-app:app -b 0.0.0.0:8000 '.format( app) print( Fore.GREEN + "App is running successfully. Go to https://graphql.itsezsid.com or " "0.0.0.0:8000 to access the GraphQL endpoint" + Style.RESET_ALL) subprocess.getstatusoutput(execution) except KeyboardInterrupt: try: # Stops gunicorn using pkill [Kills Process] execution = 'cd {} && pkill gunicorn'.format(app) subprocess.getstatusoutput(execution) print(Fore.GREEN + "Gunicorn Stopped Successfully" + Style.RESET_ALL) except Exception as e: # Except block capture_exception(e) print( Fore.RED + "Unable To stop Gunicorn Directly.\n" + Fore.GREEN + "Please enter the app directory and run 'pkill gunicorn'" + +Style.RESET_ALL) except: # Except block if cli cant run gunicorn print(Fore.RED + "Unable To Run Gunicorn Directly.\n" + Fore.GREEN + "Please enter the app directory " "and run 'gunicorn -w 3 -k " "uvicorn.workers.UvicornWorker " "graphql-app:app -b " "0.0.0.0:8000'" + +Style.RESET_ALL) elif answers["choiceForExec"] == 'Stop the GraphQL app': try: # Stops gunicorn using pkill [Kills Process] execution = 'cd {} && pkill gunicorn'.format(app) subprocess.getstatusoutput(execution) print(Fore.GREEN + "Gunicorn Stopped Successfully" + Style.RESET_ALL) except Exception as e: # Except block capture_exception(e) print(Fore.RED + "Unable To stop Gunicorn Directly.\n" + Fore.GREEN + "Please enter the app directory and run 'pkill gunicorn'" + +Style.RESET_ALL) elif answers["choiceForExec"] == 'Update the Database': try: # Runs the update function functions.updateDb() print(Fore.GREEN + "Database Updated Successfully" + Style.RESET_ALL) except Exception as e: capture_exception(e) print(Fore.RED + "Database couldnt be updated . Check .env file") elif answers["choiceForExec"] == 'Create SQL Tables in a new Database': try: # Runs the update function functions.insertDb() print(Fore.GREEN + "Database Updated Successfully" + Style.RESET_ALL) except Exception as e: capture_exception(e) print(Fore.RED + "Database couldnt be updated . Check .env file") elif answers["choiceForExec"] == 'Check API Call': try: # Makes the API call for testing data = functions.apiCall() print(json.dumps(data, indent=2)) print(Fore.GREEN + "Command ran successfully") except Exception as e: capture_exception(e) print(Fore.RED + "Command Failed" + Style.RESET_ALL) elif answers["choiceForExec"] == 'Get Stats': try: # Runs the stats_promp() function to stats_prompt() except Exception as e: capture_exception(e) print(Fore.RED + 'Could not get stats' + Style.RESET_ALL)
def test_stdin(): """Test on stdin""" rv, out = getstatusoutput(f'{prg} < {fox}') assert rv == 0 assert out.rstrip() == ' 1 9 45 <stdin>'
def main(): try: system_info() while (True): try: print( "\033[1;36m >>>>>>>>>>>>>>>>>>>>>>>>功能菜单<<<<<<<<<<<<<<<<<<<<<<<<<<< \033[0m" ) print('\n' * 2) print(" 1.install_kubernetes ") print(" 2.unstall_kubernetes") print(" 3.show_kubernetes ") print(" 4.quit ") print('\n') input_Function = input('请输入执行的选项序号或功能名称: ') print('\n') if input_Function == 'install_kubernetes' or input_Function == '1': return_master = install_master() if return_master == 'success': install_node1_return = install_node1() if install_node1_return == 'success': install_node2_return = install_node2() show_k8s() elif input_Function == 'unstall_kubernetes' or input_Function == '2': ansible_url = os.popen('which ansible') ansible_temp = ansible_url.read() ansible = ansible_temp.split()[0] uninstall_kubeadm_master = subprocess.getstatusoutput( " {0} -i /opt/k8s-admin/scripts/master_hosts all -m 'shell' -a 'kubeadm reset -f && yum remove -y kubelet kubeadm kubectl ' " .format(ansible)) uninstall_kubeadm_node1 = subprocess.getstatusoutput( " {0} -i /opt/k8s-admin/scripts/node1_hosts all -m 'shell' -a 'kubeadm reset -f && yum remove -y kubelet kubeadm kubectl ' " .format(ansible)) uninstall_kubeadm_node2 = subprocess.getstatusoutput( " {0} -i /opt/k8s-admin/scripts/node2_hosts all -m 'shell' -a 'kubeadm reset -f && yum remove -y kubelet kubeadm kubectl ' " .format(ansible)) rm_hosts = subprocess.getstatusoutput( "rm -rf /opt/k8s-admin/scripts/master_hosts node1_hosts node2_hosts " ) print('\033[1;32m kubernetes集群卸载完成!!!!!! \033[0m') elif input_Function == 'show_kubernetes' or input_Function == '3': if os.path.exists( '/opt/k8s-admin/scripts/master_hosts' ) and os.path.exists( '/opt/k8s-admin/scripts/node1_hosts' ) and os.path.exists('/opt/k8s-admin/scripts/node2_hosts'): show_k8s() else: print( "\033[1;31m 未找到集群信息,请先安装集群,再执行查看集群状态操作!!!! \033[0m" ) elif input_Function == 'quit' or input_Function == '4': sys.exit(1) else: print('\n') print('\033[1;33m 请输入正确的功能按键,如需退出,请输入quit!!! \033[0m') except KeyboardInterrupt: print('\n') print('\033[1;31m 如需退出,请输入quit!!! \033[0m') except KeyboardInterrupt: print('\n') print('\033[1;31m 程序被强制中断!!!!!!!! \033[0m')
def test_two(): """Test on two""" rv, out = getstatusoutput(f'{prg} {two_lines}') assert rv == 0 assert out.rstrip() == ' 2 2 4 ./inputs/two.txt'
def test_usage() -> None: """ Usage """ rv, out = getstatusoutput(RUN) assert rv != 0 assert out.lower().startswith('usage:')
# 28/4/20 - Stephen Kay, University of Regina #Short script to test reading in of config file # Import relevant packages import uproot as up import numpy as np import pandas as pd import ROOT import scipy import scipy.integrate as integrate import matplotlib.pyplot as plt import sys, math, os, subprocess sys.path.insert(0, 'python/') ConfigFileName = sys.argv[1] USER = subprocess.getstatusoutput("whoami") # Grab user info for file finding HOST = subprocess.getstatusoutput("hostname") if ("farm" in HOST[1]): REPLAYPATH = "/group/c-pionlt/USERS/%s/hallc_replay_lt" % USER[1] elif ("lark.phys.uregina" in HOST[1]): REPLAYPATH = "/home/%s/work/JLab/hallc_replay_lt" % USER[1] ConfigFile = "%s/UTIL_PROTON/config/%s" % (REPLAYPATH, ConfigFileName) print("Using cuts defined in %s" % ConfigFile) Configf = open(ConfigFile) for line in Configf: if ("Timing_Cuts" in line): array = line.split("=") TimingTmp = array[1] TimingCutFile = "%s/UTIL_PROTON/config/%s" % (REPLAYPATH, TimingTmp) if ("PID_Cuts" in line):
def __run_cmd__(self, line): result = subprocess.getstatusoutput(line) if result[0] != 0: raise Exception(result[1]) return result[1]
def init_data(self): status, output = subprocess.getstatusoutput( "hive -S -e 'select * from " + self.__tn + "'") init_data = self.__spark_instance.createDataFrame( self.cleaning_init_data(output), self.data_columns) return init_data
def RunCmd(name, autoOpen=False): s, output = subprocess.getstatusoutput(name) lines = output.split('\n') ListOutput(lines, autoOpen)
Or maybe just come up with a better approach altogether """ import subprocess feedback = '' off_set = 0 increment = 100000 ranges = { 'sched_a': (784000000, 999000000), 'sched_b': (739000000, 999000000), } for table_name in ranges: key_min, key_max = ranges[table_name] for chunk_min in range(key_min, key_max, increment): chunk_max = chunk_min + increment - 1 insert = """psql -c " INSERT INTO newdownload.%s SELECT * FROM frn.%s WHERE %s_sk BETWEEN %d AND %d " cfdm""" % (table_name, table_name, table_name, chunk_min, chunk_max) print(insert) (exit_code, feedback) = subprocess.getstatusoutput(insert) log_insert = """psql -c " INSERT INTO synch_successes (table_name, begin_sk, end_sk, success, err_msg) VALUES ('%s', %d, %d, %s, '%s') " cfdm """ % ( table_name, chunk_min, chunk_max, (not exit_code), feedback.replace("'", "''")) print(log_insert) (exit_code, feedback) = subprocess.getstatusoutput(log_insert)
import subprocess as sp normal = 0 bad = 0 count = 0 Bad_IP = [] ping_log = open('ping_log.txt', 'w+', encoding='utf-8') with open('ip.txt', 'rt', encoding='utf-8') as ip_list: for cur_ip in ip_list.readlines(): status, result = sp.getstatusoutput("ping -w 10 " + cur_ip) print(result) ping_log.write(result + '\n') if status == 0: normal += 1 else: bad += 1 Bad_IP.append(cur_ip.strip('\n')) count += 1 ip_list.close() ping_log.write("\n\n共计ping测{A}个IP,其中可达{B}个,不可达{C}个,不可达设备为:\n{D}".format( A=count, B=normal, C=bad, D=Bad_IP)) ping_log.close()
costLim=costLim, print_cost=True, learning_rate=learning_rate, guessPar=parameters) tmp1 = samplePS(x, np.max(y), parameters, qNom, eps, theta, num_points) # next point to try tmp2 = cosyrun(tmp1, index) x = np.concatenate((x, [tmp1]), axis=0) y = np.concatenate((y, [tmp2]), axis=None) #print(i, x, y) #print(parameters) return 0 # Removing files from older runs cmd = 'rm -f temp*.pdf' failure, output = commands.getstatusoutput(cmd) cmd = 'rm -f simpleOptimization*.pdf' failure, output = commands.getstatusoutput(cmd) cmd = 'rm -f results*.txt' failure, output = commands.getstatusoutput(cmd) pool = multiprocessing.Pool() # Take as many processes as possible for index in range(0, numSim): initial = np.asarray( [random.uniform(qNom[i] * 0.8, qNom[i] * 1.2) for i in range(7)]) pool.apply_async(optim, [initial, index]) pool.close() pool.join()
def execute_command(command): (exit_status, output) = subprocess.getstatusoutput(command) return exit_status
def verify(path, rm_broken): """ Verify converted GeoTIFF files are (Geo)TIFF with cloud optimized compatible structure. Optionally delete any directories (and their content) that contain broken GeoTIFFs PATH may be either a directory to recursively check, or a file with a list of filenames to check """ job_rank, job_size = _mpi_init() broken_files = set() path = Path(path) if path.is_dir(): # Non-lazy recursive search for geotiffs gtiff_file_list = Path(path).rglob("*.[tT][iI][fF]") gtiff_file_list = list(gtiff_file_list) else: # Read filenames to check from a file with path.open() as fin: gtiff_file_list = [line.strip() for line in fin] if job_size == 1 and sys.stdout.isatty(): # Not running in parallel, display a TQDM progress bar iter_wrapper = partial(tqdm, disable=None, desc='Checked GeoTIFFs', unit='file') elif job_size == 1: iter_wrapper = iter else: # Running in parallel, only process every nth file iter_wrapper = nth_by_mpi for geotiff_file in iter_wrapper(gtiff_file_list): # TODO: Call directly instead of instanciating more python interpreters! command = f"python3 {VALIDATE_GEOTIFF_CMD} {geotiff_file}" exitcode, output = subprocess.getstatusoutput(command) validator_output = output.split('/')[-1] # If no metadata file does not exists after cog conversion then add the tiff file to the broken files set if not list(geotiff_file.parent.rglob('*.yaml')): LOG.error("No YAML file created for GeoTIFF file", error=validator_output, filename=geotiff_file) broken_files.add(geotiff_file) if exitcode == 0: LOG.debug(validator_output) else: # Log and remember broken COG LOG.error("Invalid GeoTIFF file", error=validator_output, filename=geotiff_file) broken_files.add(geotiff_file) if rm_broken: # Delete directories containing broken files # Prevent deleting directories out from under another worker that's checking files within. from mpi4py import MPI comm = MPI.COMM_WORLD gathered = comm.gather(broken_files, root=0) if job_rank == 0: broken_files = set().union(*gathered) else: assert gathered is None return broken_directories = set(file.parent for file in broken_files) for directory in broken_directories: LOG.info('Deleting directory', dir=directory) shutil.rmtree(directory)
def test_one(): """Test on one""" rv, out = getstatusoutput(f'{prg} {one_line}') assert rv == 0 assert out.rstrip() == ' 1 1 2 ./inputs/one.txt'
def test_empty(): """Test on empty""" rv, out = getstatusoutput(f'{prg} {empty}') assert rv == 0 assert out.rstrip() == ' 0 0 0 ./inputs/empty.txt'