def mergeFiles(files, mergeName): fileString = " ".join(files) if fileString: commands.getstatusoutput("echo %s | xargs cat | sort > %s" % (fileString, mergeName)) return mergeName else: return ""
def validate_virt(self): """validate if virtualization is installed/supported""" if commands.getstatusoutput("which prlctl")[0] != 0: raise VirtException("prlctl not found") if commands.getstatusoutput('prlctl list %s | grep -qsv "UUID"' % self.VBOX_NAME)[0] == 0: self.run_or_die("prlctl delete %s" % self.VBOX_NAME)
def inst_node_modules(url, module): if module == 'iotivity-node': sub_str = url.split('/') for child_str in sub_str: if child_str.endswith('zip'): os.system('unzip %s >/dev/null 2>&1' % child_str) os.system('mv iotivity-node-%s iotivity-node' % \ child_str.strip('.zip')) os.system('rm %s*' % child_str) os.chdir('/tmp/%s' % module) inst_node_cmd = 'npm install' inst_node_status = commands.getstatusoutput(inst_node_cmd) if inst_node_status[0] != 0: print '\nRe-install node modules using root!' inst_node = commands.getstatusoutput('sudo npm install') if inst_node[0] != 0: print '%s \n Install node modules failed! Please check it!' % \ inst_node[1] sys.exit(1) if module == "iotivity-node": inst_grunt_cli_cmd = 'npm install grunt-cli' inst_grunt_cli_status = commands.getstatusoutput(inst_grunt_cli_cmd) if inst_grunt_cli_status[0] != 0: print '\nInstall grunt-cli failed! %s' % inst_grunt_cli_status[1] sys.exit(1) else: print '\nInstall grunt-cli done!'
def setup(arch_type,hg='all',web=False): prog_dir = os.path.dirname(os.path.abspath(__file__)) prog_get = 'wget' print '1) Check wget' cmd='which '+prog_get print 'CMD:',cmd out=getstatusoutput(cmd) print out[1] if out[0]!=0: print >> sys.stderr, "ERROR: Command wget not available." print sys.exit(1) print '\n2) Compile and check scikit-learn-0.17 for joblib' cmd='cd '+prog_dir+'/tools/; tar -xzvf scikit-learn-0.17.tar.gz;' cmd=cmd+'cd scikit-learn-0.17; python setup.py install --install-lib='+prog_dir+'/tools' print 'CMD:',cmd out=getstatusoutput(cmd) print out[1] if out[0]!=0: print >> sys.stderr, "ERROR: scikit-learn istallation failed." sys.exit(1) cmd='cd '+prog_dir+'/tools/; python -c \'from sklearn.externals import joblib\'' print 'CMD:',cmd out=getstatusoutput(cmd) print out[1] if out[0]!=0: print >> sys.stderr, "ERROR: joblib library not available." sys.exit(1) print '\n3) Download UCSC Tools' out=get_ucsc_tools(arch_type) if out[0]!=0 and out[0]!=65280: print >> sys.stderr,'ERROR: Incorrect architecture check your system or compile it.' print sys.exit(1) if web: sys.exit(0) dcount=0 print '\n4) Download UCSC Data. It can take several minutes depending on the newtork speed.' if (hg=='all' or hg=='hg19'): out=get_ucsc_data('hg19','hg19.2bit','bigZips') if out[0]==0: dcount+=1 biofold='http://snps.biofold.org/PhD-SNPg/ucsc' out=get_ucsc_data('hg19','hg19.phyloP46way.primate.bw','',biofold) if out[0]==0: dcount+=1 out=get_ucsc_data('hg19','hg19.100way.phyloP100way.bw','phyloP100way') if out[0]==0: dcount+=1 if dcount<3: print >> sys.stderr, 'ERROR: Problem in downloading hg19 data.' sys.exit(1) dcount=0 if (hg=='all' or hg=='hg38'): out=get_ucsc_data('hg38','hg38.2bit','bigZips') if out[0]==0: dcount+=1 out=get_ucsc_data('hg38','hg38.phyloP7way.bw','phyloP7way') if out[0]==0: dcount+=1 out=get_ucsc_data('hg38','hg38.phyloP100way.bw','phyloP100way') if out[0]==0: dcount+=1 if dcount<3: print >> sys.stderr, 'ERROR: Problem in downloading hg38 data' sys.exit(1) print ' Downloaded UCSC data'
def send_cmd_info(): global SERVER_IP,SERVER_PORT,DETECT_TIME sk=socket.socket(socket.AF_INET,socket.SOCK_STREAM) sec=86400 res,hostname=commands.getstatusoutput('hostname') while True: begin,end,script,log=parse_history_log_file('/tmp/.history_cmd.log') try: sk=socket.socket(socket.AF_INET,socket.SOCK_STREAM) sk.connect((SERVER_IP,SERVER_PORT)) for i in range(begin,end): cmd=script+' '+str(i)+' '+log res,info=commands.getstatusoutput(cmd) if info: str_list=info.split('@_@') dicts={'type':'cmd'} dicts['time']=str_list[0].rstrip(' ') dicts['user']=str_list[1] dicts['ip']=str_list[2] dicts['cmd']=str_list[3] dicts['host']=hostname json=simplejson.dumps(dicts) if dicts['ip'] == '' or dicts['user'] == '': continue sk.sendall(json) except Exception as e: print e finally: sk.close() time.sleep(DETECT_TIME)
def bi_fold(seq_1, seq_2, local_pairing=False, mfe=True): import random randID = random.randint(10000,999999) seq_1_fn = "/tmp/seq_%s_1.fa" % (randID, ) seq_2_fn = "/tmp/seq_%s_2.fa" % (randID, ) ct_fn = "/tmp/ss_%s.ct" % (randID, ) build_Single_Seq_Fasta(seq_1, seq_1_fn) build_Single_Seq_Fasta(seq_2, seq_2_fn) if not local_pairing: CMD = "bifold-smp --intramolecular %s %s %s > /dev/null" % (seq_1_fn, seq_2_fn, ct_fn) else: CMD = "bifold-smp %s %s %s > /dev/null" % (seq_1_fn, seq_2_fn, ct_fn) print CMD os.system(CMD) return_code, return_string = commands.getstatusoutput( "grep ENERGY %s | wc -l" % (ct_fn, ) ) structure_number = int( return_string.strip() ) structure_list = [] ct2dot_cmd = "ct2dot %s %d /dev/stdout" for idx in range(structure_number): if mfe and idx == 1: break return_code, return_string = commands.getstatusoutput( ct2dot_cmd % (ct_fn, idx+1) ) lines = return_string.split('\n') energy = float(lines[0].strip().split()[-2]) structure = return_string.split()[8] structure_list.append( (energy, lines[2]) ) cur_seq = seq_1 + "III" + seq_2 os.system("rm %s %s %s" % (seq_1_fn, seq_2_fn, ct_fn)) if mfe: return structure_list[0][1] else: return cur_seq, structure_list
def test_install_uninstall_repeatedly(self): setUp() global testName, runtime testName = 'test_install_uninstall_repeatedly' runtime = 7200 pre_time = time.time() sysmon_path = ConstPath + '/sysmon.sh' sysmon_cmd = sysmon_path + ' ' + testName + ' ' + str(runtime) + ' org.xwalk.iterative' subprocess.Popen(args=sysmon_cmd, shell=True) i = 0 while True: i = i + 1 cmd = 'adb -s ' + device + ' install -r ' + ConstPath + '/../iterative*.apk' inststatus = commands.getstatusoutput(cmd) elapsed_time = time.time() - pre_time if inststatus[0] == 0: if elapsed_time >= runtime: #kill process print i, elapsed_time, 'Process finished' uninststatus = commands.getstatusoutput('adb -s ' + device + ' uninstall org.xwalk.iterative') self.assertEquals(uninststatus[0], 0) break else: uninststatus = commands.getstatusoutput('adb -s ' + device + ' uninstall org.xwalk.iterative') self.assertEquals(uninststatus[0], 0) print i, elapsed_time, 'Continue' time.sleep(3) else: self.assertFalse(True, 'Install apk failed') #print 'Install apk failed' break
def updateReleaseInfoFromTc(configuration,release, arch=environ["SCRAM_ARCH"]): url = "'https://cmstags.cern.ch/tc/public/ReleaseExternalsXML?release="+release+"&architecture="+arch+"'" cmd = 'wget --no-check-certificate -nv -o /dev/null -O- '+url error, result = getstatusoutput ('which wget') if error: cmd = 'curl -L -k --stderr /dev/null '+url error, result = getstatusoutput (cmd) if error: print result return regex = re.compile('^\s*<external\s+external="([^"]+)"\s+tag="([^"]+)"\s*/>\s*$') rxBoolFalse = re.compile('^(f|false|0|)$', re.I) for line in result.split('\n'): m = regex.match(line) if m: var = m.group(1).strip() val = m.group(2).strip() if val == '-': val='' if configuration.has_key(var): vtype = type(configuration[var]) if vtype == type(True): if rxBoolFalse.match(val): val = False else: val = True elif vtype == type([]): if val: val = val.split(',') else: val = [] else: print 'ERROR: ---> Tag Collector Invalid key :',var if var == 'RelValArgs': val = val + ' --nproc %s ' % cmsRunProcessCount print '---> Tag Collector: ',var,'=',val configuration[var]=val return
def delete_external_route(self, subnet): ports = self.shell.quantum.list_ports()['ports'] gw_ip = subnet['gateway_ip'] subnet_id = subnet['id'] # 1. Find the port associated with the subnet's gateway # 2. Find the router associated with that port # 3. Find the port associated with this router and on the external net # 4. Set up route to the subnet through the port from step 3 ip_address = None for port in ports: for fixed_ip in port['fixed_ips']: if fixed_ip['subnet_id'] == subnet_id and fixed_ip['ip_address'] == gw_ip: gw_port = port router_id = gw_port['device_id'] router = self.shell.quantum.show_router(router_id)['router'] ext_net = router['external_gateway_info']['network_id'] for port in ports: if port['device_id'] == router_id and port['network_id'] == ext_net: ip_address = port['fixed_ips'][0]['ip_address'] if ip_address: cmd = "route delete -net %s" % (subnet['cidr']) commands.getstatusoutput(cmd) return 1
def get_packages_from_ftp(root, suite, component, architecture): """ Returns an object containing apt_pkg-parseable data collected by aggregating Packages.gz files gathered for each architecture. @type root: string @param root: path to ftp archive root directory @type suite: string @param suite: suite to extract files from @type component: string @param component: component to extract files from @type architecture: string @param architecture: architecture to extract files from @rtype: TagFile @return: apt_pkg class containing package data """ filename = "%s/dists/%s/%s/binary-%s/Packages.gz" % (root, suite, component, architecture) (fd, temp_file) = temp_filename() (result, output) = commands.getstatusoutput("gunzip -c %s > %s" % (filename, temp_file)) if (result != 0): fubar("Gunzip invocation failed!\n%s\n" % (output), result) filename = "%s/dists/%s/%s/debian-installer/binary-%s/Packages.gz" % (root, suite, component, architecture) if os.path.exists(filename): (result, output) = commands.getstatusoutput("gunzip -c %s >> %s" % (filename, temp_file)) if (result != 0): fubar("Gunzip invocation failed!\n%s\n" % (output), result) packages = open_file(temp_file) Packages = apt_pkg.TagFile(packages) os.unlink(temp_file) return Packages
def to_curl(self): url = self.ast.left.url method = self.ast.method.name params = {} headers = {} body = {} for option in self.ast.right.options if self.ast.right else []: if isinstance(option.key, QueryStringNode): if isinstance(option.value, ValueNode): params[option.key.key] = option.value.value elif isinstance(option.value, ShellNode): params[option.key.key] = commands.getstatusoutput(option.value.value)[1] elif isinstance(option.key, HeaderNode): if isinstance(option.value, ValueNode): headers[option.key.key] = option.value.value elif isinstance(option.value, ShellNode): headers[option.key.key] = commands.getstatusoutput(option.value.value)[1] elif isinstance(option.key, BodyNode): if isinstance(option.value, ValueNode): body[option.key.key] = option.value.value elif isinstance(option.value, ShellNode): body[option.key.key] = commands.getstatusoutput(option.value.value)[1] if '?' in url: url = url + urllib.urlencode(params) else: url = url + '?' + urllib.urlencode(params) headers = ['-H "{k}: {v}"'.format(k=k, v=v) for k, v in headers.items()] body = ['-d "{k}={v}"'.format(k=k, v=v) for k, v in body.items()] return ''' curl -X {method} {headers} {data} "{url}" '''.format(url=url, method=method, headers=" ".join(headers), data=" ".join(body))
def get_candidates_size(img_folder): total_std_size = 0 total_opt_size = 0 total_ari_size = 0 total_pro_size = 0 total_moz_size = 0 total_pjg_size = 0 for i in range(1, 101): os.system("/opt/libjpeg-turbo/bin/jpegtran -outputcoef t " + img_folder + "/" + str(i) + ".jpg temp.jpg") total_std_size += os.path.getsize("temp.jpg") c = commands.getstatusoutput("/opt/libjpeg-turbo/bin/jpegtran -optimize " + img_folder + "/" + str(i) + ".jpg temp.jpg") total_opt_size += os.path.getsize("temp.jpg") c = commands.getstatusoutput("/opt/libjpeg-turbo/bin/jpegtran -arithmetic " + img_folder + "/" + str(i) + ".jpg temp.jpg") total_ari_size += os.path.getsize("temp.jpg") c = commands.getstatusoutput("/opt/libjpeg-turbo/bin/jpegtran -progressive " + img_folder + "/" + str(i) + ".jpg temp.jpg") total_pro_size += os.path.getsize("temp.jpg") c = commands.getstatusoutput("time -p /opt/mozjpeg/bin/jpegtran " + img_folder + "/" + str(i) + ".jpg > temp.jpg") total_moz_size += os.path.getsize("temp.jpg") c = commands.getstatusoutput("./packJPG " + img_folder + "/" + str(i) + ".jpg") total_pjg_size += os.path.getsize(img_folder + "/" + str(i) + ".pjg") os.system("rm " + img_folder + "/" + str(i) + ".pjg") return total_std_size, total_opt_size, total_ari_size, total_pro_size, total_moz_size, total_pjg_size
def __generateAllGraphicsForGroups( self, graphicType ): """ @summary : Generated groups graphics based on the specified graphicType. @summary graphicType : "daily", "weekly", "monthly", "yearly" @raise Exception: When graphicType is unknown. """ configParameters = StatsConfigParameters( ) configParameters.getAllParameters() supportedGraphicTypes = { "daily": "-d", "weekly":"-w", "monthly":"-m", "yearly":"-y" } if graphicType not in supportedGraphicTypes: raise Exception( "Unsupported graphicType detected in __generateAllGraphicsForGroups" ) else: for group in configParameters.groupParameters.groups: groupMembers, groupMachines, groupProducts, groupFileTypes = configParameters.groupParameters.getAssociatedParametersInStringFormat( group ) groupMachines = str(groupMachines).replace( "[", "" ).replace( "]", "" ).replace( "'", "" ).replace( '"','' ) if graphicType == "daily": commands.getstatusoutput( '%sgenerateGnuGraphics.py -g %s -c %s --combineClients --copy -d "%s" -m %s -f %s -p %s -s 24 --outputLanguage %s' %( self.paths.STATSBIN, group, groupMembers, self.timeOfRequest, groupMachines, groupFileTypes, groupProducts, self.outputLanguage ) ) #print '%sgenerateGnuGraphics.py -g %s -c %s --combineClients --fixedCurrent --copy -d "%s" -m %s -f %s -p %s -s 24 --language %s' %( self.paths.STATSBIN, group, groupMembers, self.timeOfRequest, groupMachines, groupFileTypes, groupProducts, self.outputLanguage ) else: commands.getoutput("%sgenerateRRDGraphics.py %s --copy -f %s --machines '%s' -c %s --date '%s' --fixedCurrent --language %s" %( self.paths.STATSBIN, supportedGraphicTypes[ graphicType], groupFileTypes, groupMachines, group, self.timeOfRequest, self.outputLanguage ) ) print "%sgenerateRRDGraphics.py %s --copy -f %s --machines '%s' -c %s --date '%s' --fixedCurrent --language %s" %( self.paths.STATSBIN, supportedGraphicTypes[ graphicType], groupFileTypes, groupMachines, group, self.timeOfRequest, self.outputLanguage )
def clean(): print "Removing temporary files..." sys.stdout.flush() failure, output = commands.getstatusoutput("rm covar*") failure, output = commands.getstatusoutput("rm *.out") print "done."
def reattachSlaves(self): username = commands.getstatusoutput('whoami') if username[0] == 0: if self.cluster_id == '': log.info("no cluster ID yet") return False cmd = 'ps -fC python | grep slave.py | grep ' + self.cluster_id response = commands.getstatusoutput(cmd) self.slave_pids = [] if response[0] == 256: #no slaves running return True if response[0] == 0: lines = response[1].split('\n') for line in lines: #print line sline = line.split() if username[1] == sline[0]: self.slave_pids.append(sline[1]) #print self.slave_pids return True else: print "bad response for ps %s" % response[1] else: print "bad response for whoami %s" % username[1] return False
def test_monitorLoadBalancer(self): """ test monitoring of loadbalancers for libra """ # Create our loadbalancer self.create_result, self.actual_status, self.lb_id, self.lb_addr = self.driver.create_lb(self.lb_name, self.nodes, self.algorithm, self.bad_statuses) self.logging.info('load balancer id: %s' %self.lb_id) self.logging.info('load balancer ip addr: %s' %self.lb_addr) lbaas_utils.validate_loadBalancer(self) # use fabric to stop libra_worker self.logging.info("Stopping libra_worker on lb: %s address: %s" %(self.lb_id, self.lb_addr)) cmd = "fab --no-pty -H %s stop_libra_worker" %(self.lb_addr) status, output = commands.getstatusoutput(cmd) #if self.args.verbose: self.logging.info("Command: %s" %cmd) self.logging.info("Status: %s" %status) self.logging.info("Output: %s" %output) # check api to see if ERROR state is set / libra_worker failure is detected self.logging.info("Wait / poll api server for loadbalancer to be set to ERROR status...") lbaas_utils.wait_for_active_status(self, self.lb_id, active_wait_time=240, desired_status='ERROR') # restart libra_worker self.logging.info("Starting libra_worker on lb: %s address: %s" %(self.lb_id, self.lb_addr)) cmd = "fab --no-pty -H %s start_libra_worker" %(self.lb_addr) status, output = commands.getstatusoutput(cmd) #if self.args.verbose: self.logging.info("Command: %s" %cmd) self.logging.info("Status: %s" %status) self.logging.info("Output: %s" %output) # check api to ensure lb state is properly reset self.logging.info("Wait / poll api server for loadbalancer to be set to ACTIVE status...") lbaas_utils.wait_for_active_status(self, self.lb_id, active_wait_time=240, desired_status='ACTIVE') # one final lb validation lbaas_utils.validate_loadBalancer(self)
def lns(spath,dpath): if spath and dpath: rmLn = "rm -rf {dpath}".format(dpath=dpath) status,result = commands.getstatusoutput(rmLn) mkLn = "ln -s {spath} {dpath}".format(spath=spath,dpath=dpath) return commands.getstatusoutput(mkLn) else:return (1,"缺少路径")
def getHostActivityFromDevice(ip,listHostPublic): listOidData = [] dictOidData = {} command = "snmpwalk -v 1 -c public %s %s" % (ip,ids.activehost['macAddress']) output = commands.getstatusoutput(command) command = "snmpwalk -v 1 -c public %s %s" % (ip,ids.activehost['ipAddress']) output1 = commands.getstatusoutput(command) resultMacs = output[1].split("\n") resultIps = output1[1].split("\n") dictHostAlive = {} for lineMac in resultMacs: if len(lineMac.split("Hex-STRING:")) == 2: data = parser.changeMacFormat(lineMac.split("Hex-STRING:")[1]) OidData = lineMac.split("Hex-STRING:")[0].replace(ids.activehost['macAddress'],"") if data is not dictHostAlive.keys: dictHostAlive[data] = {} for lineIp in resultIps: if len(lineIp.split("IpAddress:")) == 2: if OidData in lineIp: ip = lineIp.split("IpAddress:")[1].replace(" ","") dictHostAlive[data]['ipAddress'] = ip if ip in listHostPublic.keys(): dictHostAlive[data]['name'] = listHostPublic[dictHostAlive[data]['ipAddress']] else: dictHostAlive[data]['name'] = None return dictHostAlive
def cvs_sync_files(self): """ Copy files from git into each CVS branch and add them. Extra files found in CVS will then be deleted. A list of CVS safe files is used to protect critical files both from being overwritten by a git file of the same name, as well as being deleted after. """ # Build the list of all files we will copy from git to CVS. debug("Searching for git files to copy to CVS:") files_to_copy = self._list_files_to_copy() for branch in self.cvs_branches: print("Syncing files with CVS branch [%s]" % branch) branch_dir = os.path.join(self.working_dir, self.project_name, branch) new, copied, old = \ self._sync_files(files_to_copy, branch_dir) os.chdir(branch_dir) # For entirely new files we need to cvs add: for add_file in new: commands.getstatusoutput("cvs add %s" % add_file) # Cleanup obsolete files: for cleanup_file in old: # Can't delete via full path, must not chdir: run_command("cvs rm -Rf %s" % cleanup_file)
def host_power_operation(): logger.debug("\nIn host power operation function\n-----------------------------------\n") livehosts = current.db(current.db.host.status == HOST_STATUS_UP).select() freehosts=[] try: for host_data in livehosts: if not has_running_vm(host_data.host_ip.private_ip): freehosts.append(host_data.host_ip.private_ip) freehostscount = len(freehosts) if(freehostscount == 2): logger.debug("Everything is Balanced. Green Cloud :)") elif(freehostscount < 2): logger.debug("Urgently needed "+str(2-freehostscount)+" more live hosts.") newhosts = current.db(current.db.host.status == HOST_STATUS_DOWN).select()[0:(2-freehostscount)] #Select only Shutoff hosts for host_data in newhosts: logger.debug("Sending magic packet to "+host_data.host_name) host_power_up(host_data) elif(freehosts > 2): logger.debug("Sending shutdown signal to total "+str(freehostscount-2)+" no. of host(s)") extrahosts=freehosts[2:] for host_data in extrahosts: logger.debug("Moving any dead vms to first running host") migrate_all_vms_from_host(host_data.host_ip.private_ip) logger.debug("Sending kill signal to " + host_data.host_ip.private_ip) commands.getstatusoutput("ssh root@" + host_data.host_ip.private_ip + " shutdown -h now") host_data.update_record(status=HOST_STATUS_DOWN) except: log_exception() return
def testCmdLength(): # normal list total_length = [[0 for col in range(3)]for row in range(6)] total_length[0][0] = 'python cat.py -t 20' total_length[0][1] = 20 total_length[1][0] = 'python cat.py -t 20 -M 30' total_length[1][1] = 20 total_length[2][0] = 'python cat.py -t 20 -m 30' total_length[2][1] = 20 total_length[3][0] = 'python cat.py -t 20 -M 50 -m 30' total_length[3][1] = 20 # exception list length_error = [[0 for col in range(3)]for row in range(6)] length_error[0][0] = 'python cat.py -t' length_error[0][1] = 20 length_error[1][0] = 'python cat.py -M' length_error[1][1] = 20 length_error[2][0] = 'python cat.py -m' length_error[2][1] = 20 #normal Test for i in range(4): status,output = commands.getstatusoutput(total_length[i][0]) if status != 0: print 'the command ' + total_length[i][0] + ' result is ' + str(status) + '-' + output if len(output) != total_length[i][1]: print 'the command ' + total_length[i][0] + ' length is ' + len(output) #exception Test for i in range(3): status,output = commands.getstatusoutput(length_error[i][0]) if status == 0: print 'the command ' + length_error[i][0] + ' result is ' + str(status) + '-' + output
def main(argv=sys.argv): def exit(): print print 'Usage: cactus.py <path> [create|build|serve|deploy]' print print ' create: Create a new website skeleton at path' print ' build: Rebuild your site from source files' print ' serve: Serve you website at local development server' print ' deploy: Upload and deploy your site to S3' print sys.exit() if len(argv) < 3: exit() # Handy shortcut for editing in TextMate if argv[2] == 'mate': commands.getstatusoutput('mate %s' % argv[1]) return if argv[2] not in ['create', 'build', 'serve', 'deploy']: exit() path = os.path.abspath(sys.argv[1]) for p in ['pages', 'static', 'templates']: if not os.path.isdir(os.path.join(path, p)): print 'This does not look like a cactus project (missing "%s" subfolder)' % p sys.exit() getattr(Site(path), argv[2])()
def test_local_iput_interrupt_directory(self): # local setup datadir = "newdatadir" output = commands.getstatusoutput('mkdir ' + datadir) datafiles = ["file1", "file2", "file3", "file4", "file5", "file6", "file7"] for datafilename in datafiles: print "-------------------" print "creating " + datafilename + "..." localpath = datadir + "/" + datafilename output = commands.getstatusoutput('dd if=/dev/zero of=' + localpath + ' bs=1M count=20') print output[1] assert output[0] == 0, "dd did not successfully exit" rf = "collectionrestartfile" # assertions iputcmd = "iput -X " + rf + " -r " + datadir if os.path.exists(rf): os.unlink(rf) self.admin.interrupt_icommand(iputcmd, rf, 10) # once restartfile reaches 10 bytes assert os.path.exists(rf), rf + " should now exist, but did not" output = commands.getstatusoutput('cat ' + rf) print " restartfile [" + rf + "] contents --> [" + output[1] + "]" self.admin.assert_icommand("ils -L " + datadir, 'STDOUT', datadir) # just to show contents self.admin.assert_icommand(iputcmd, 'STDOUT', "File last completed") # confirm the restart for datafilename in datafiles: self.admin.assert_icommand("ils -L " + datadir, 'STDOUT', datafilename) # should be listed # local cleanup output = commands.getstatusoutput('rm -rf ' + datadir) output = commands.getstatusoutput('rm ' + rf)
def main(): # Get params and examples directories paramsdir = os.environ.get("PERFEXPDIR") + '/src/examples/params' examplesdir = os.environ.get("PERFEXPDIR") + '/src/examples' # Save contents of params directory dirList=os.listdir(paramsdir) # Iterate through all 'params' files for fname in dirList: if not fname.startswith('.'): # Copy params.txt file to src/examples/ filename = paramsdir + '/' + fname cpcmd = 'cp ' + filename + ' ' + examplesdir + '/params.txt' commands.getstatusoutput(cpcmd) # Get values from params file modParams = RARMAParams() modParams._processConfigFile() ansParams = ANSParams() ansParams._processConfigFile() meParams = MEParams() meParams._processConfigFile() genParams = GENParams() genParams._processConfigFile() # comment needed vm = AIXMeasurementEnv() plotter = Plotter() model = RARMA() xdata,ydata = vm.validateModel(model) plotter.generatePlot(xdata,ydata)
def checkOutBoost(self): # Get the dependency Config if "boost" not in self.dependencies: URL = None REV = None USE_INSTALLED = True else: depCfg = self.dependencies["boost"].split(",") URL = depCfg[0] if len(depCfg)<2 or depCfg[1] == "": REV = "HEAD" else: REV = depCfg[1] if len(depCfg)>=3 and depCfg[2] == "*": USE_INSTALLED = True else: USE_INSTALLED = False if not URL or USE_INSTALLED or self.repoInter == "no": self.addLog("*Not downloading Boost") return self.addLog(commands.getstatusoutput("rm -rf " + os.path.join(self.DepSrcDir,"boost"))) os.chdir(self.DepSrcDir) if self.openInstallation: self.addLog("*Downloading Boost to trunk ("+URL+":"+REV+")") self.addLog(commands.getstatusoutput("rm -rf " + os.path.split(URL)[-1])) self.addLog(commands.getstatusoutput("wget " + URL )) else: self.addLog("*Using boost in SVN Repo") self.addLog(commands.getstatusoutput("tar xfz " + os.path.split(URL)[-1])) self.addLog(commands.getstatusoutput("mv " + os.path.split(URL)[-1][0:os.path.split(URL)[-1].rfind(".tar")] + " boost" )) os.chdir(install.currentDir)
def check(prefectFile,inputFile,toCheck): # inputFile is actually the test bench file # prefectFile is the answer file # toCheck is uploaded by the participant if prefectFile == None or inputFile == None or toCheck == None: return 0, 'Insufficient arguments' COMPILE_COMMAND = "iverilog -o /tmp/output.out "+ toCheck + " " + inputFile # Executable is in the /tmp/output.out status,output = commands.getstatusoutput(COMPILE_COMMAND) if status!=0: return 0, 'Compile error, testbench and uploaded file does not match' EXECUTE_COMMAND = 'sandbox -a3 -m 524288 -t 2 -o /tmp/submission.output -- /tmp/output.out' # The output of the executable is in the /tmp/submission.output status,output = commands.getstatusoutput(EXECUTE_COMMAND) with open(str(prefectFile), 'r') as f: answer = f.read().replace('\n', '').replace(' ', '') with open('/tmp/submission.output', 'r') as f: output = f.read().replace('\n', '').replace(' ', '') if output == answer: return 100,'Accepted' else: return 0,'Wrong Answer'
def snapshot_test(cmd): """ Subcommand 'qemu-img snapshot' test. @param cmd: qemu-img base command. """ cmd += " snapshot" for i in range(2): crtcmd = cmd sn_name = "snapshot%d" % i crtcmd += " -c %s %s" % (sn_name, image_name) s, o = commands.getstatusoutput(crtcmd) if s != 0: raise error.TestFail("Create snapshot failed via command: %s;" "Output is: %s" % (crtcmd, o)) logging.info("Created snapshot '%s' in '%s'", sn_name, image_name) listcmd = cmd listcmd += " -l %s" % image_name s, o = commands.getstatusoutput(listcmd) if not ("snapshot0" in o and "snapshot1" in o and s == 0): raise error.TestFail("Snapshot created failed or missed;" "snapshot list is: \n%s" % o) for i in range(2): sn_name = "snapshot%d" % i delcmd = cmd delcmd += " -d %s %s" % (sn_name, image_name) s, o = commands.getstatusoutput(delcmd) if s != 0: raise error.TestFail("Delete snapshot '%s' failed: %s" % (sn_name, o))
def test_irodsFs_issue_2252(self): # =-=-=-=-=-=-=- # set up a fuse mount mount_point = "fuse_mount_point" if not os.path.isdir(mount_point): os.mkdir(mount_point) os.system("irodsFs " + mount_point) largefilename = "big_file.txt" output = commands.getstatusoutput('dd if=/dev/zero of=' + largefilename + ' bs=1M count=100') # =-=-=-=-=-=-=- # use system copy to put some data into the mount mount # and verify that it shows up in the ils cmd = "cp ./" + largefilename + " ./" + mount_point + "; ls ./" + mount_point + "/" + largefilename output = commands.getstatusoutput(cmd) out_str = str(output) print("results[" + out_str + "]") os.system("rm ./" + largefilename) os.system("rm ./" + mount_point + "/" + largefilename) # tear down the fuse mount os.system("fusermount -uz " + mount_point) if os.path.isdir(mount_point): os.rmdir(mount_point) assert(-1 != out_str.find(largefilename))
def test(path_data): # parameters folder_data = 't2/' file_data = ['t2.nii.gz', 't2_seg.nii.gz'] dice_threshold = 0.99 # define command cmd = 'sct_propseg -i ' + path_data + folder_data + file_data[0] \ + ' -c t2' \ + ' -mesh'\ + ' -cross'\ + ' -centerline-binary'\ + ' -v 1' # run command status, output = commands.getstatusoutput(cmd) # if command ran without error, test integrity if status == 0: # compute dice coefficient between generated image and image from database cmd = 'sct_dice_coefficient -i ' + path_data + folder_data + file_data[1] + ' -d ' + file_data[1] status, output = commands.getstatusoutput(cmd) # parse output and compare to acceptable threshold dice = float(output.split('3D Dice coefficient = ')[1].split('\n')[0]) if dice < dice_threshold: status = 99 return status, output
def _create_thumbnail(self, path, second=30): """ Create thumbnail from movie file. Returns thumbnail creation succeed or failed. """ thumbnail_dir = os.path.dirname(path) if not os.path.exists(thumbnail_dir): os.makedirs(thumbnail_dir) f = open(self.file.path, "rb") tmp = tempfile.NamedTemporaryFile(suffix=".%s" % self.extension, delete=False) tmp.write(f.read()) tmp.close() f.close() thumbnail = tempfile.NamedTemporaryFile() thumbnail.close() ffmpeg = """ffmpeg -ss %(sec)s -vframes 1 -i "%(movie)s" -f image2 "%(output)s" """ kwargs = { "sec": second, "movie": tmp.name, "width": self.width, "height": self.height, "output": thumbnail.name, } for sec in xrange(0, second, 10): kwargs["sec"] = second - sec try: commands.getstatusoutput(ffmpeg % kwargs)[0] except: return False if os.path.exists(thumbnail.name): shutil.move(thumbnail.name, path) return True return False
int(x) - 1 for x in options.cols1.split(',') ] chr_col_2, position_col_2, forward_col_2, reverse_col_2 = [ int(x) - 1 for x in options.cols2.split(',') ] in_fname, out_fname = args except: doc_optparse.exception() # Sort through a tempfile first temp_file = tempfile.NamedTemporaryFile(mode="r") environ['LC_ALL'] = 'POSIX' commandline = "sort -f -n -k %d -k %d -k %d -o %s %s" % ( chr_col_1 + 1, start_col_1 + 1, end_col_1 + 1, temp_file.name, in_fname) errorcode, stdout = commands.getstatusoutput(commandline) coverage = CoverageWriter( out_stream=open(out_fname, "a"), chromCol=chr_col_2, positionCol=position_col_2, forwardCol=forward_col_2, reverseCol=reverse_col_2, ) temp_file.seek(0) interval = io.NiceReaderWrapper(temp_file, chrom_col=chr_col_1, start_col=start_col_1, end_col=end_col_1, strand_col=strand_col_1, fix_strand=True)
sampling_period = int( config_section_map("PyConnect", logger_main)['sampling_period']) reconnection = int( config_section_map("PyConnect", logger_main)['reconnection']) if ssl == 'True': ssl = True else: ssl = False logger_main.info('Reading Configuration File. Ok.') except Exception as e: logger_main.error('Reading Configuration File. Error: %s', e) CONN = False while CONN == False: args_thread = Queue() idps = commands.getstatusoutput( 'python -m serial.tools.list_ports | grep "USB" | cut -c 12') if idps[0] == 0: ports = str(idps[1]).split('\n') if ((len(ports) == 1) or ('No' in ports)): print 'Error: no hay puertos serie RS232 conectados.' logger_main.error('Error: Not Ports Serie RS232 Found.') else: CONN = True print 'Ok: se han reconocidos puertos conectados.' logger_main.info('Ok. Ports Found.') N_THREADS = amout_threads threads = [] for i in range(N_THREADS): args_thread.put(i + 1) args_thread.put(host) args_thread.put(port)
def systemd_unit_dir(): status, output = commands.getstatusoutput('pkg-config --variable systemdsystemunitdir systemd') if status or not output: return None # systemd not found return output.strip()
def systemd_tmpfiles_dir(): # There doesn't seem to be a specific pkg-config variable for this status, output = commands.getstatusoutput('pkg-config --variable prefix systemd') if status or not output: return None # systemd not found return output.strip() + '/lib/tmpfiles.d'
def ptx_next_state(self, state): mem = state['mem'] pc = state['pc'] instruction = mem[pc / 4] print instruction #pc += 4 #state['pc'] = pc opcode = self.OPCODE_MASK & instruction opcode = opcode >> instruction_format.OPCODE_BIT_BOT dst = self.DST_MASK & instruction dst = dst >> instruction_format.DST_BIT_BOT src0 = self.SRC0_MASK & instruction src0 = src0 >> instruction_format.SRC0_BIT_BOT src1 = self.SRC1_MASK & instruction src1 = src1 >> instruction_format.SRC1_BIT_BOT base = self.BASE_MASK & instruction base = base >> instruction_format.BASE_BIT_BOT pred = self.P_REG_MASK & instruction pred = pred >> self.P_REG_BIT bra = (self.BRA_MASK & instruction) >> instruction_format.IMM_BIT_BOT test_program = [] general_reg_book_file = 'general_reg_book' general_reg_book_obj = open(general_reg_book_file) general_reg_book = pickle.load(general_reg_book_obj) for general_reg in general_reg_book: test_program.append('mov.s32 ' + general_reg + ',' + str(state[general_reg]) + '; ') reg_book_file = 'reg_book' reg_book_obj = open(reg_book_file, 'r') reg_book = pickle.load(reg_book_obj) instruction_book_file = 'instruction_book' instruction_book_obj = open(instruction_book_file, 'r') instruction_book = instruction_book_obj.readlines() long_int_reg_book_file = 'long_int_reg_book' long_int_reg_book_obj = open(long_int_reg_book_file, 'r') long_int_reg_book = pickle.load(long_int_reg_book_obj) long_int_reg_book_obj.close() reg_book = general_reg_book + long_int_reg_book #if ((opcode != self.OPCODE_MUL) & (opcode != self.OPCODE_ADD) & (opcode != self.OPCODE_SUB)): if ((opcode != self.OPCODE_ADD) & (opcode != self.OPCODE_SUB) & (opcode != self.OPCODE_BRA) & (opcode != self.OPCODE_BAR) & (opcode != self.OPCODE_LD) & (opcode != self.OPCODE_ST) & (opcode != self.OPCODE_MOV) & (opcode != self.OPCODE_MUL)): state['pc'] = state['pc'] + 4 return state if (opcode == self.OPCODE_BRA): if base: if pred >= len(reg_book): return status pred_reg_text = reg_book[pred] if pred_reg_text not in general_reg_book: return status pred_reg_data = state[pred_reg_text] if pred_reg_data: pc += bra state['pc'] = pc else: pc += bra state['pc'] = pc return state ''' if (opcode == self.OPCODE_BAR): bar_state = state['bar_state'] bar_spec = ptxILA.barSpec(); bar_counter_enter = state['bar_counter_enter'] bar_counter_exit = state['bar_counter_exit'] if (bar_state == bar_spec.BAR_FINISH): state['pc'] = state['pc'] + 4 sim_program_line = '' sim_program_line += 'mov.u32 %r1, ' + str(bar_state) + ';' sim_program_line += 'mov.u32 %r23, ' + str(bar_counter_enter) + ';' sim_program_line += 'mov.u32 %r24, ' + str(bar_counter_exit) + '; \n' example_sim_program_file = 'tbar.ptx' example_sim_program_obj = open(example_sim_program_file, 'r') example_sim_program = example_sim_program_obj.readlines() sim_program = [] bar_program_hole = 42 for i in range(len(example_sim_program)): if i == bar_program_hole: sim_program.append(sim_program_line) else: sim_program.append(example_sim_program[i]) example_sim_program_obj.close() sim_program_file = 'tbar.ptx' sim_program_obj = open(sim_program_file, 'w') for sim_line in sim_program: sim_program_obj.write(sim_line) sim_program_obj.close() (status, output) = commands.getstatusoutput('./dryrun_bar.out') print status print output (status, output) = commands.getstatusoutput('sbatch parallel_bar.cmd') print status print output output_word = output.split() taskTag = output_word[3] time.sleep(5) (status, output) = commands.getstatusoutput('cat slurm-' + taskTag + '.out') while(status == 256): time.sleep(5) (status, output) = commands.getstatusoutput('cat slurm-' + taskTag + '.out') [bar_state, bar_counter_enter, bar_counter_exit] = output.split() bar_state = int(bar_state) bar_counter_enter = int(bar_counter_enter) bar_counter_exit = int(bar_counter_exit) if (bar_counter_enter < 0): bar_counter_enter = -bar_counter_enter bar_counter_enter = (1<<31) - bar_counter_enter + (1<<31) if (bar_counter_exit < 0): bar_counter_exit = -bar_counter_exit bar_counter_exit = (1<<31) - bar_counter_exit + (1<<31) state['bar_state'] = bar_state state['bar_counter_enter'] = bar_counter_enter state['bar_counter_exit'] = bar_counter_exit return state ''' if (opcode == self.OPCODE_BAR): bar_state = state['bar_state'] #bar_counter_enter = state['bar_counter_enter'] #bar_counter_exit = state['bar_counter_exit'] bar_spec = ptxILA.barSpec() bar_counter_enter = state['bar_counter_enter'] bar_counter_exit = state['bar_counter_exit'] if bar_state == bar_spec.BAR_INIT: bar_state = bar_spec.BAR_ENTER elif bar_state == bar_spec.BAR_FINISH: state['pc'] = state['pc'] + 4 bar_state = bar_spec.BAR_INIT elif bar_state == bar_spec.BAR_ENTER: if (bar_counter_exit == 0): bar_counter_enter = bar_counter_enter + 1 if bar_counter_enter == bar_spec.THREAD_NUM: bar_state = bar_spec.BAR_EXIT bar_counter_exit = bar_spec.THREAD_NUM else: if bar_counter_enter > bar_spec.THREAD_NUM: state['bar_state'] = bar_spec.BAR_WAIT return state bar_state = bar_spec.BAR_WAIT elif bar_state == bar_spec.BAR_WAIT: if bar_counter_enter == bar_spec.THREAD_NUM: bar_state = bar_spec.BAR_EXIT elif bar_state == bar_spec.BAR_EXIT: bar_counter_exit -= 1 bar_state = bar_spec.BAR_FINISH if bar_counter_exit < 0: state['bar_state'] = bar_spec.BAR_FINISH if (bar_counter_exit < 0): bar_counter_exit = -bar_counter_exit bar_counter_exit = ( 1 << (bar_spec.BAR_COUNTER_EXIT_BITS - 1)) - bar_counter_exit + ( 1 << (bar_spec.BAR_COUNTER_EXIT - 1)) state['bar_counter_exit'] = bar_counter_exit return state if bar_counter_exit == 0: bar_counter_enter = 0 state['bar_state'] = bar_state state['bar_counter_enter'] = bar_counter_enter state['bar_counter_exit'] = bar_counter_exit return state ''' if (bar_micro_flag): bar_counter_enter = state['bar_counter_enter'] bar_counter_exit = state['bar_counter_exit'] if bar_state == bar_spec.BAR_ENTER: if (bar_counter_exit == 0): bar_counter_enter = bar_counter_enter + 1 if bar_counter_enter == bar_spec.THREAD_NUM: bar_state = bar_spec.BAR_EXIT bar_counter_exit = bar_spec.THREAD_NUM else: bar_state = bar_spec.BAR_WAIT elif bar_state == bar_spec.BAR_WAIT: if bar_counter_enter == bar_spec.THREAD_NUM: bar_state = bar_spec.BAR_EXIT elif bar_state == bar_spec.BAR_EXIT: bar_counter_exit -= 1 bar_state = bar_spec.BAR_FINISH if bar_counter_exit == 0: bar_counter_enter = 0 state['bar_state'] = bar_state state['bar_counter_enter'] = bar_counter_enter state['bar_counter_exit'] = bar_counter_exit else: if bar_state == bar_spec.BAR_INIT: bar_state = bar_spec.BAR_ENTER elif bar_state == bar_spec.BAR_FINISH: bar_state = bar_spec.BAR_INIT state['pc'] = state['pc'] + 4 state['bar_state'] = bar_state #state['bar_counter_enter'] = bar_counter_enter #state['bar_counter_exit'] = bar_counter_exit return state ''' pc = pc + 4 state['pc'] = pc op_text = instruction_book[opcode] op_text = op_text[:(len(op_text) - 1)] def find_addr(laddr): for mem_key in mem_map.keys(): if (mem_map[mem_key][1]) >= laddr: start_addr = mem_map[mem_key][0] dmem_name = mem_key return [dmem_name, start_addr] if opcode == self.OPCODE_LD: mem = state['dmem'] default = mem.default values = mem.values addr = [] value = [] for (a, v) in values: addr.append(a * 4) value.append(v) dest_text = reg_book[dst] self.ldAddr = (self.ldIMM_MASK & instruction) >> instruction_format.IMM_BIT_BOT self.ldAddr = (self.ldAddr) << 2 print 'load_addr' + str(self.ldAddr) [dmem_name, start_addr] = find_addr(self.ldAddr) item = (self.ldAddr - start_addr) >> 2 pre_ld_program = '' pre_ld_program += '.reg .b64 %r_sim_ld<3>; .reg .b32 %r_ssim_ld;' for i in range(len(addr)): pre_addr = addr[i] [pre_mem_name, pre_start_addr] = find_addr(pre_addr) pre_item = (pre_addr - pre_start_addr) >> 2 pre_ld_program += 'ld.param.u64 %r_sim_ld1, [' + pre_mem_name + ']; ' pre_ld_program += 'cvta.to.global.u64 %r_sim_ld2, %r_sim_ld1; ' pre_ld_program += 'mov.u32 %r_ssim_ld, ' + str(pre_item) + '; ' pre_ld_program += 'mul.wide.s32 %r_sim_ld1, %r_ssim_ld, 4; ' pre_ld_program += 'add.s64 %r_sim_ld2, %r_sim_ld1, %r_sim_ld2; ' pre_ld_program += 'mov.u32 %r_ssim_ld, ' + str(value[i]) + '; ' pre_ld_program += 'st.global.b32 [%r_sim_ld2], %r_ssim_ld; ' pre_ld_program += '\n' ld_program = '' ld_program += 'ld.param.u64 %r_sim_ld1, [' + dmem_name + ']; ' ld_program += 'cvta.to.global.u64 %r_sim_ld2, %r_sim_ld1; ' ld_program += 'mov.u32 %r_ssim_ld, ' + str(item) + '; ' ld_program += 'mul.wide.s32 %r_sim_ld1, %r_ssim_ld, 4; ' ld_program += 'add.s64 %r_sim_ld2, %r_sim_ld1, %r_sim_ld2; ' ld_program += 'ld.global.b32 ' + dest_text + ',[%r_sim_ld2]; ' ld_program += 'mov.s32 %r9, ' + dest_text + '; ' example_sim_program_file = 't266.ptx' example_sim_program_obj = open(example_sim_program_file, 'r') example_sim_program = example_sim_program_obj.readlines() sim_program = [] for test_program_line in test_program: ld_program = test_program_line + ld_program for i in range(len(example_sim_program)): if i == self.EXAMPLE_PROGRAM_HOLE: sim_program.append(ld_program + '\n') elif i == self.PRE_LD_HOLE: sim_program.append(pre_ld_program) else: sim_program.append(example_sim_program[i]) example_sim_program_obj.close() sim_program_file = 't266.ptx' sim_program_obj = open(sim_program_file, 'w') for sim_program_line in sim_program: sim_program_obj.write(sim_program_line) sim_program_obj.close() (status, output) = commands.getstatusoutput('./dryrun.out') print status print output (status, output) = commands.getstatusoutput('sbatch parallel.cmd') print status print output output_word = output.split() taskTag = output_word[3] time.sleep(5) (status, output) = commands.getstatusoutput('cat slurm-' + taskTag + '.out') while (status == 256): time.sleep(5) (status, output) = commands.getstatusoutput('cat slurm-' + taskTag + '.out') poutput = int(output) if (poutput < 0): poutput = -poutput poutput = (1 << (instruction_format.REG_BITS - 1)) - poutput + ( 1 << (instruction_format.REG_BITS - 1)) print 'poutput: ' + str(poutput) state[dest_text] = poutput if self.ldAddr not in addr: state[dest_text] = default return state if opcode == self.OPCODE_ST: dmem = state['dmem'] dest_text = reg_book[dst] st_value = state[dest_text] self.stAddr = (self.stIMM_MASK & instruction) >> instruction_format.IMM_BIT_BOT self.stAddr = self.stAddr << 2 print 'store_addr' + str(self.stAddr) outMem = ila.MemValues(instruction_format.MEM_ADDRESS_BITS, instruction_format.DMEM_BITS, dmem.default) for (a, v) in dmem.values: outMem[a] = v outMem[self.stAddr] = st_value state['dmem'] = outMem return state if (opcode == self.OPCODE_MOV): dst_text = reg_book[dst] if (src0 >= len(reg_book)) | (dst >= len(reg_book)): return state if dst_text not in general_reg_book: return state src0_text = reg_book[src0] if src0_text not in general_reg_book: return state if base: return state src0_data = state[src0_text] test_program.append(op_text + '.s32 ' + dst_text + ',' + src0_text + ';') test_program.append('mov.s32 %r9, ' + dst_text + ';') single_op_program = '' for t in test_program: single_op_program += t single_op_program += '\n' example_sim_program_file = 't266.ptx' example_sim_program_obj = open(example_sim_program_file, 'r') example_sim_program = example_sim_program_obj.readlines() sim_program = [] for i in range(len(example_sim_program)): if i == self.EXAMPLE_PROGRAM_HOLE: sim_program.append(single_op_program) else: sim_program.append(example_sim_program[i]) example_sim_program_obj.close() sim_program_obj = open(example_sim_program_file, 'w') for sim_line in sim_program: sim_program_obj.write(sim_line) sim_program_obj.close() (status, output) = commands.getstatusoutput('./dryrun.out') print status print output (status, output) = commands.getstatusoutput('sbatch parallel.cmd') print status print output output_word = output.split() taskTag = output_word[3] time.sleep(5) (status, output) = commands.getstatusoutput('cat slurm-' + taskTag + '.out') while (status == 256): time.sleep(5) (status, output) = commands.getstatusoutput('cat slurm-' + taskTag + '.out') poutput = int(output) if (poutput < 0): poutput = -poutput poutput = (1 << (instruction_format.REG_BITS - 1)) - poutput + ( 1 << (instruction_format.REG_BITS - 1)) nxt_state = poutput state[dst_text] = nxt_state return state dst_text = reg_book[dst] if (src0 >= len(reg_book)) | (src1 >= len(reg_book)) | (dst >= len(reg_book)): return state if dst_text not in reg_book: return state src0_text = reg_book[src0] src1_text = reg_book[src1] if (src0_text not in reg_book) | (src1_text not in reg_book): return state if (base): return state src0_data = state[src0_text] src1_data = state[src1_text] test_program.append(op_text + '.s32 ' + dst_text + ', ' + src0_text + ', ' + src1_text + ';') print(op_text + ' ' + dst_text + ', ' + src0_text + ', ' + src1_text) if dst_text in general_reg_book: test_program.append('mov.s32 %r9, ' + dst_text + ';') print test_program[-1] example_sim_program_file = 't266.ptx' example_sim_program_obj = open(example_sim_program_file, 'r') example_sim_program = example_sim_program_obj.readlines() sim_program = [] sim_program_first_part = 0 for i in range(len(example_sim_program)): if i != self.EXAMPLE_PROGRAM_HOLE: sim_program.append(example_sim_program[i]) elif i == (self.EXAMPLE_PROGRAM_HOLE + 1): if dst_text not in general_reg_book: sim_program.append('st.local.u32 [%rd8], %rd0;\n') else: sim_program.append('st.local.u32 [%rd8], %r9;\n') else: sim_program += test_program sim_program += '\n' example_sim_program_obj.close() sim_program_obj = open(example_sim_program_file, 'w') for sim_line in sim_program: sim_program_obj.write(sim_line) sim_program_obj.close() (status, output) = commands.getstatusoutput('./dryrun.out') print status print output (status, output) = commands.getstatusoutput('sbatch parallel.cmd') print status print output output_word = output.split() taskTag = output_word[3] time.sleep(5) (status, output) = commands.getstatusoutput('cat slurm-' + taskTag + '.out') while (status == 256): time.sleep(5) (status, output) = commands.getstatusoutput('cat slurm-' + taskTag + '.out') poutput = int(output) if (poutput < 0): poutput = -poutput poutput = (1 << (instruction_format.REG_BITS - 1)) - poutput + ( 1 << (instruction_format.REG_BITS - 1)) nxt_state = poutput (status, output) = commands.getstatusoutput('rm a_dlin*') (status, output) = commands.getstatusoutput('rm ' + 'slurm-' + taskTag + '.out') state[dst_text] = nxt_state return state
print("Gain payload computing configuration") print(" firstRun = " + str(firstRun)) print(" lastRun = " + str(lastRun)) print(" publish = " + str(publish)) print(" usePCL = " + str(usePCL)) print(" calMode = " + calMode) print(" DQM_dir = " + DQM_dir) print() #go to parent directory = test directory os.chdir("..") #identify last run of the previous calibration if (firstRun <= 0): out = commands.getstatusoutput( "ls /afs/cern.ch/cms/tracker/sistrvalidation/WWW/CalibrationValidation/ParticleGain/ | grep Run_ | tail -n 1" ) firstRun = int(out[1].split('_')[3]) + 1 print("firstRun set to " + str(firstRun)) print() initEnv = 'cd ' + os.getcwd() + ';' initEnv += 'source /afs/cern.ch/cms/cmsset_default.sh' + ';' initEnv += 'eval `scramv1 runtime -sh`' + ';' #Get List of Files to process: NTotalEvents = 0 run = 0 FileList = "" dataCertInfo = dataCert.get()
elif args.destination[1].startswith("^"): dst_id = mapf[args.destination[0]][args.destination[1] [1:]] + INTER_CONST else: dst_id = mapf[args.destination[0]][args.destination[1]] + OUTPORT_CONST elif args.destination: dst_id = mapf[args.destination[0]][args.destination[1]] + OUTPORT_CONST else: dst_id = "" if (args.hop_count): command = command + " -c " + str(args.hop_count) command = command + " " + str(src_id) + " " + str(dst_id) (stat, res) = commands.getstatusoutput(command) print command lines = res.split("\n") str_rules_table = [] final_hs = "" count = 0 for line in lines: line = line.strip() if args.verbose and line.startswith("->"): p1 = line.find("Port:") p2 = line.find("Rules:") if p1 != -1: if p2 == -1: port = inv_mapf[int(line[p1 + 5:].strip())] str_rules_table.append([port]) else:
global BUILD_PARAMETERS (BUILD_PARAMETERS, args) = opts_parser.parse_args() except Exception as e: print "Got wrong options: %s, exit ..." % e sys.exit(1) if not BUILD_PARAMETERS.pkgmode: print "Please add the -m parameter for the pkgmode" sys.exit(1) elif BUILD_PARAMETERS.pkgmode and not BUILD_PARAMETERS.pkgmode in PKG_MODES: print "Wrong pkg-mode, only support: %s, exit ..." % PKG_MODES sys.exit(1) if os.path.exists("cordovaPackage"): os.system("rm -rf cordovaPackage") if BUILD_PARAMETERS.pkgmode == "shared": os.system("./bin/create cordovaPackage com.example.cordovaPackage1 CordovaPackage --xwalk-shared-library") else: os.system("./bin/create cordovaPackage com.example.cordovaPackage1 CordovaPackage") os.chdir("./cordovaPackage") os.system("./cordova/build") os.system("./cordova/run") lsstatus = commands.getstatusoutput("ls ./out/CordovaPackage*.apk") if lsstatus[0] == 0: print "Build Package Successfully" else: print "Build Package Error" pmstatus = commands.getstatusoutput("adb shell pm list packages |grep com.example.cordovaPackage1") if pmstatus[0] == 0: print "Package Name Consistent" else: print "Package Name Inconsistent"
def run_command(command): ret_code, output = commands.getstatusoutput(command) if ret_code == 1: raise Exception("FAILED: %s" % command) return output.splitlines()
import commands import os if __name__ == "__main__": # add more stocks to watch in here. watch_list = { #'facebook' : 'FB:US', 'capitalone' : 'COF:US', 'apple' : 'AAPL:US', 'tesaro' : 'TSRO:US', 'tesla' : 'TSLA:US', 'pfizer' : 'PFE:US', 'alibaba' : 'BABA:US' 'nintendo' : '7974:JP' } dirs = ['../Bloomberg/', '../Twitter/', '../Graphics/'] for dir in dirs: if not os.path.exists(dir): commands.getstatusoutput('mkdir '+dir) # lprd := duration twitter being listened to in [Seconds] lprd = 1200 blmbg.collect_stockdata (watch_list, dirs[0]) twttr.collect_rumors (watch_list, lprd, dirs[1]) smplt.stock_plot (watch_list,dirs[2],dirs[0]) smplt.twitter_hist(watch_list,dirs[2],dirs[1]) print ("... normal end of program")
# this function (in buildtools.py) generates the makefile # it's currently a bit ugly but it'll get cleaned up soon if not os.environ.get('EESKIPBUILDMAKEFILE'): print "build.py: Making the makefile" buildMakefile(CTX) if os.environ.get('EEONLYBUILDMAKEFILE'): sys.exit() ############################################################################### # RUN THE MAKEFILE ############################################################################### numHardwareThreads = 4 if CTX.PLATFORM == "Darwin": numHardwareThreads = 0 output = commands.getstatusoutput("sysctl hw.ncpu") numHardwareThreads = int(string.strip(string.split(output[1])[1])) elif CTX.PLATFORM == "Linux": numHardwareThreads = 0 for line in open('/proc/cpuinfo').readlines(): name_value = map(string.strip, string.split(line, ':', 1)) if len(name_value) != 2: continue name, value = name_value if name == "processor": numHardwareThreads = numHardwareThreads + 1 retval = os.system("make --directory=%s -j%d" % (CTX.OUTPUT_PREFIX, numHardwareThreads)) if retval != 0: sys.exit(-1)
<td><b>Enter size of Basket:</b></td> <td><input type="text" name="size" value=""/> </td></tr> <tr> <td><b>Enter Basket name:</b></td> <td><input type="text" name="obj_n" value=""/></td></tr> <br /> <tr> <td><input class="but" type="submit" value="Launch"/></td></tr> </table> </form> </div> </body> </html> ''' user = commands.getstatusoutput('cat /var/www/html/user_log') conf = cgi.FormContent() size = conf['size'][0] name = conf['obj_n'][0] commands.getstatusoutput('sudo lvcreate --size {} --name {} /dev/cloud'.format( size, name)) commands.getstatusoutput('sleep 3') commands.getstatusoutput('sudo mkfs.ext4 /dev/cloud/{}'.format(name)) commands.getstatusoutput('sleep 3') commands.getstatusoutput('sudo mkdir /media/{}'.format(name)) commands.getstatusoutput('sudo mount /dev/cloud/{0} /media/{1}'.format( name, name)) commands.getstatusoutput('sleep 3') commands.getstatusoutput( 'sudo echo "/media/{} *(rw,no_root_squash)" >> /etc/exports'.format(
def ensure_system(cmd): (rc, output) = commands.getstatusoutput(cmd) if rc: raise RuntimeError('Failed to run command: %s. rc=%d. output: %s' % (cmd, rc, output))
def vars_def(): global network_ack global network_opt global acc_server_ack global acc_server_opt global offline_ack global offline_opt global zabbix_agentd_user global zabbix_agentd_dlfilename global zabbix_agentd_install_dir global zabbix_agentd_install_path global zabbix_agentd_daemon_path global zabbix_crond_policy global zabbix_agentd_cfgfile_path global zabbix_agentd_dlurl global zabbix_agentd_cfgfile_hostname global zabbix_agentd_cfgfile_sourceip global zabbix_agentd_cfgfile_serverip OptionList = sys.argv[1:] for num in range(0, len(OptionList)): if OptionList[num] == '-n': network_ack = True network_opt = OptionList[num + 1] elif OptionList[num] == '-s': acc_server_ack = True acc_server_opt = OptionList[num + 1] elif OptionList[num] == '-O': offline_ack = True offline_opt = 'offline' if network_ack is True: if network_opt == 'public': zabbix_agentd_dlurl = ( 'http://mirrors.163.com/centos/7/isos/x86_64/0_README.txt') elif network_opt == 'internal': zabbix_agentd_dlurl = ( 'http://mirrors.163.com/centos/7/isos/x86_64/0_README.txt') else: HelpInfo() sys.exit() else: HelpInfo() sys.exit() if acc_server_ack is True: if acc_server_opt is not None: pass else: HelpInfo() sys.exit() else: HelpInfo() sys.exit() zabbix_agentd_user = '******' zabbix_agentd_dlfilename = 'zabbix_agentd_static.tar.gz' zabbix_agentd_install_dir = commands.getstatusoutput('echo ~')[1] zabbix_agentd_install_path = zabbix_agentd_install_dir + '/zabbix_agentd' zabbix_agentd_daemon_path = (zabbix_agentd_install_path + '/zabbix_agentd_daemon.sh') zabbix_crond_policy = ('*/10 * * * * /bin/sh ' + zabbix_agentd_daemon_path + ' 2>&1 >/dev/null') zabbix_agentd_cfgfile_path = (zabbix_agentd_install_path + '/etc/zabbix_agentd.conf') zabbix_agentd_cfgfile_hostname = '%change_hostname%' zabbix_agentd_cfgfile_sourceip = '%change_sourceip%' zabbix_agentd_cfgfile_serverip = '%change_serverip%'
cmd = '''ulimit -s unlimited ; ''' + AUTODOCK + ''' ''' cmds = [] links = [] garbage = [] for thing in sys.argv: if thing.startswith("http"): links.append(thing) elif thing.startswith(CURRENTBIN): garbage.append(thing) else: cmds.append(thing) for url in links: wCmd = CURL + url #print 'getting link ' + wCmd stat, out = commands.getstatusoutput(wCmd) if (os.path.exists("index.html")): os.remove("index.html") for arg in cmds: cmd += arg + ''' ''' #print 'running cmd ' + cmd status, output = commands.getstatusoutput(cmd) print output
def main(): # Get rid of queen modules that have finished running trash = imp.load_source("trash", "./processing_scripts/trash_collector.py") trash.main() # Queue wait time queue_time = 24 # If true, will not submit sbatch and return pseudo data instead test = False # Load progress report module report = imp.load_source("report", "./processing_scripts/progress_report.py") resubmit = imp.load_source("resubmit", "./processing_scripts/resubmit.py") os.popen("chmod +x ./processing_scripts/*.sh") print "" print "********************************************************************************* " print "* __ __ __ * " print "* ________________ | | | | | | | | | ________________|\\ * " print "* | |__| | |__| | |__ |___| \\ * " print "* |________________ |\\ | | | | | ________________ / * " print "* | \\ | | |__ |__ | |/ * " print "* * " print "********************************************************************************* " print " - THE NORDLAB RNA-SEQ INTERACTIVE PIPELINE EXPERIMENT YIELDER - " print "\n\n" # Keeps user in the interface, until they choose to exit task_exit = False while not task_exit: # Input task from user print " ------------------------------------------------------------------------------- " print "| STEP 1: PLEASE SELECT THE ID OF YOUR TASK FILE OR EXIT |" print " ------------------------------------------------------------------------------- " print "" # Display all task options input_files = os.listdir("./user_tasks") input_list = [] for file in input_files: if file[0] != "." and file != "blank_task.txt": input_list.append(file) input_list.sort() print " ID Task Sheet" print " ------------------------------------------------------------------------------- " for i in range(0, len(input_list)): print " %i = %s" % (i, input_list[i].replace(".txt", "")) print "" print " exit" print " ------------------------------------------------------------------------------- " print "" task_input = raw_input(" >>> ") if task_input == "exit": sys.exit(" Bye for now!") # Error for non-numeric input try: int(task_input) except ValueError: sys.exit(" ERROR: Input is not ID Number.") task_input = int(task_input) # Error for numeric input not in range if task_input >= len(input_list): sys.exit(" ERROR: Input is not listed.") task_input_path = "./user_tasks/%s" % input_list[task_input] # Error for non-existing user input if not os.path.isfile(task_input_path): sys.exit(" ERROR: Inputted task name does not exist.") else: print " You selected the input file: %s" % input_list[ task_input].replace(".txt", "") function_input = "" while function_input != "exit": # Input function from user print "\n\n" print " ------------------------------------------------------------------------------- " print "| STEP 2: PLEASE SELECT A FUNCTION(S) OR EXIT |" print " ------------------------------------------------------------------------------- " print " Note: Users can perform functions sequentially with the '->' symbol." print " Example: get_slim -> align -> feature" print "" print " Function Description" print " ------------------------------------------------------------------------------- " # Display all functions from function table function_files = open("./user_function_constructors/paths.txt", "r") function_options = dict() for line in function_files: if "#" not in line: line = line.replace("\n", "") data = line.split("\t") name = data[0] input_name = data[1] description = data[2] print " %s = %s" % (input_name, description) function_options[input_name] = name function_files.close() print "" print " exit" print " ------------------------------------------------------------------------------- " print "" function_input = raw_input(" >>> ") if function_input == "exit": break # Parse and load function info function_input = function_input.replace(" ", "") function_names_list = function_input.split("->") function_path_list = [] # Add time together from all jobs time_list = [] print " You have chosen the following function(s):" for name in function_names_list: # Error for non-existing user input if name not in function_options: sys.exit( " ERROR: Inputted function name does not exist: %s" % name) function_input_path = "./user_function_constructors/%s" % function_options[ name] # Error for non-existing user input if not os.path.isfile(function_input_path): sys.exit( " ERROR: Incorrect function path. Please check the input path in ./user_function_constructors/paths.txt" ) function_path_list.append(function_input_path) print " %s" % name # Get TIME function from function constructors time_call = commands.getoutput("grep '<TIME>' %s" % function_input_path) time_call = time_call.split("<TIME>")[1] time_call = time_call.replace(" ", "") time_call = time_call.replace("\n", "") time_list.append(time_call) # Get AUTO CALLED function from function constructors auto_call = commands.getoutput("grep '<AUTO CALL>' %s" % function_input_path) auto_call = auto_call.split("<AUTO CALL>")[1] auto_call = auto_call.replace(" ", "") auto_call = auto_call.replace("\n", "") if auto_call != "": auto_path = "./user_function_constructors/%s" % auto_call # Error for non-existing autocall input if not os.path.isfile(auto_path): sys.exit( " ERROR: Incorrect <AUTO CALL> file name. Please check function constructor: %s" % path.rsplit("/", 1)[1]) # Get AUTO CALLED function from function constructors time_call = commands.getoutput("grep '<TIME>' %s" % auto_path) time_call = time_call.split("<TIME>")[1] time_call = time_call.replace(" ", "") time_call = time_call.replace("\n", "") time_list.append(time_call) function_path_list.append(auto_path) print " + AUTO CALLED: %s" % auto_call # Input function from user print "\n\n" print " ------------------------------------------------------------------------------- " print "| STEP 3: PLEASE CHOOSE AN ACTION OR EXIT |" print " ------------------------------------------------------------------------------- " print " Note: If you selected multiple functions using the '->' symbol, the only action" print " you may perform is 'submit'." print "" print " Action Description" print " ------------------------------------------------------------------------------- " print " submit = submit your chosen function/string of functions to the sbatch queue" if "->" not in function_input: print " report = check the progress of your chosen function" print " resubmit = resubmit a singular sbatch script from your chosen function" print "" print " exit" print " ------------------------------------------------------------------------------- " print "" action_input = raw_input(" >>> ") if action_input != "exit": # Go into report progress if action_input == "report" and "->" not in function_input: report.main( task_input_path, "./user_function_constructors/%s" % function_options[function_input]) elif action_input == "resubmit" and "->" not in function_input: resubmit.main( task_input_path, "./user_function_constructors/%s" % function_options[function_input]) elif action_input == "submit": # Generate string input to give to queen function_string = "" for path in function_path_list: function_string += "%s+" % path print "" print " ------------------------------------------------------------------------------- " print " Constructing your module ..." print "" # Add time together total_secs = 0 for tm in time_list: time_parts = [int(s) for s in tm.split(':')] total_secs += (time_parts[0] * 60 + time_parts[1]) * 60 + time_parts[2] total_secs, sec = divmod(total_secs, 60) hr, m = divmod(total_secs, 60) time_display = "%d:%02d:%02d" % (hr, m, sec) t = "%d:%02d:%02d" % (hr + queue_time, m, sec) # Sumbit QUEEN module cmd = "sbatch --time=%s ./processing_scripts/queen_submitter.sh %s %s" % ( t, task_input_path, function_string[:-1]) status = 0 ID = "Submitted job as 1738" if not test: status, ID = commands.getstatusoutput(cmd) if status == 0: ID_split = ID.split(" ") ID = int(ID_split[3]) print " Your queen module has been submitted: %i" % ID print " Estimated total run time: %s (+ time in queue)" % time_display else: sys.exit(" ERROR:\n%s" % ID) else: sys.exit(" ERROR: Chosen action does not exist.") print "" sys.stdout.write(" Returning to step 2 ") sys.stdout.flush() time.sleep(1) for i in range(0, 3): sys.stdout.write(".") sys.stdout.flush() time.sleep(1) print "" print "" print " ------------------------------------------------------------------------------- " print " Would you like to select a different dataset to work with? (y/n)\n" more_task = raw_input(" >>> ") if more_task == "y": task_exit = False elif more_task == "n": task_exit = True else: sys.exit(" Incorrect input. Exiting program...") print " Bye for now!"
import datetime import imdb import sys # # CODE # # Setup some vars imdbScaper = imdb.IMDb() discIndex = "" movieName = "" # Execute the info gathering # Save output into /tmp/ for interpreting 3 or 4 lines later commands.getstatusoutput('makemkvcon -r info > %s' % MKV_TEMP_OUTPUT) # Open the info file from /tmp/ tempFile = open(MKV_TEMP_OUTPUT, 'r') # Check to see if there is a disc in the system # # For every line in the output # If the first 4 characters are DRV: # Check to see if a device is specified # If so, get the 1st and 6th element of the array for line in tempFile.readlines(): if line[:4] == "DRV:": if "/dev/" in line: drive = line.split(',') discIndex = drive[0].replace("DRV:", "")
def __init__(self, name=None, imgname=None, keeptb=False, fakeplatform=None): self.basicd = ['redis-server', 'rsyslogd'] self.swssd = ['orchagent', 'intfmgrd', 'neighsyncd', 'portsyncd', 'vlanmgrd', 'vrfmgrd', 'portmgrd'] self.syncd = ['syncd'] self.rtd = ['fpmsyncd', 'zebra', 'staticd'] self.teamd = ['teamsyncd', 'teammgrd'] self.natd = ['natsyncd', 'natmgrd'] self.alld = self.basicd + self.swssd + self.syncd + self.rtd + self.teamd + self.natd self.client = docker.from_env() self.appldb = None if subprocess.check_call(["/sbin/modprobe", "team"]) != 0: raise NameError("cannot install kernel team module") self.ctn = None if keeptb: self.cleanup = False else: self.cleanup = True if name != None: # get virtual switch container for ctn in self.client.containers.list(): if ctn.name == name: self.ctn = ctn (status, output) = commands.getstatusoutput("docker inspect --format '{{.HostConfig.NetworkMode}}' %s" % name) ctn_sw_id = output.split(':')[1] self.cleanup = False if self.ctn == None: raise NameError("cannot find container %s" % name) # get base container for ctn in self.client.containers.list(): if ctn.id == ctn_sw_id or ctn.name == ctn_sw_id: ctn_sw_name = ctn.name (status, output) = commands.getstatusoutput("docker inspect --format '{{.State.Pid}}' %s" % ctn_sw_name) self.ctn_sw_pid = int(output) # create virtual servers self.servers = [] for i in range(32): server = VirtualServer(ctn_sw_name, self.ctn_sw_pid, i) self.servers.append(server) self.mount = "/var/run/redis-vs/{}".format(ctn_sw_name) self.net_cleanup() self.ctn_restart() else: self.ctn_sw = self.client.containers.run('debian:jessie', privileged=True, detach=True, command="bash", stdin_open=True) (status, output) = commands.getstatusoutput("docker inspect --format '{{.State.Pid}}' %s" % self.ctn_sw.name) self.ctn_sw_pid = int(output) # create virtual server self.servers = [] for i in range(32): server = VirtualServer(self.ctn_sw.name, self.ctn_sw_pid, i) self.servers.append(server) # mount redis to base to unique directory self.mount = "/var/run/redis-vs/{}".format(self.ctn_sw.name) ensure_system("mkdir -p {}".format(self.mount)) self.environment = ["fake_platform={}".format(fakeplatform)] if fakeplatform else [] # create virtual switch container self.ctn = self.client.containers.run(imgname, privileged=True, detach=True, environment=self.environment, network_mode="container:%s" % self.ctn_sw.name, volumes={ self.mount: { 'bind': '/var/run/redis', 'mode': 'rw' } }) self.redis_sock = self.mount + '/' + "redis.sock" self.check_ctn_status_and_db_connect()
def standalone_main(): # get command line options: options=parseoptions() shortname = options.shortname date0 = options.date0 if options.date1==-1: date1 = date0 else: date1 = options.date1 if len(date0) != 8: sys.exit('\nStart date should be in format yyyymmdd !\nProgram will exit now !\n') if len(date1) != 8: sys.exit('\nEnd date should be in format yyyymmdd !\nProgram will exit now !\n') year0=date0[0:4]; month0=date0[4:6]; day0=date0[6:8]; year1=date1[0:4]; month1=date1[4:6]; day1=date1[6:8]; timeStr = '&startTime='+year0+'-'+month0+'-'+day0+'&endTime='+year1+'-'+month1+'-'+day1 box = list( options.box ) ig = options.gridpoints print ('\nPlease wait while program searching for the granules ...\n') if options.alltime: wsurl = PODAAC_WEB+'/ws/search/granule/?shortName='+shortname+'&bbox='+str(box[0])+','+str(box[2])+','+str(box[1])+','+str(box[3])+'&itemsPerPage=1&sortBy=timeAsc&format=atom' wsurl = PODAAC_WEB+'/ws/search/granule/?shortName='+shortname+'&itemsPerPage=1&sortBy=timeAsc&format=atom' #wsurl = PODAAC_WEB+'/ws/search/granule?shortName='+shortname+timeStr+'&bbox='+str(box[0])+','+str(box[2])+','+str(box[1])+','+str(box[3])+'&itemsPerPage=1&sortBy=timeAsc&format=atom' else: wsurl = PODAAC_WEB+'/ws/search/granule/?shortName='+shortname+timeStr+'&itemsPerPage=1&sortBy=timeAsc' #wsurl = PODAAC_WEB+'/ws/search/granule?shortName='+shortname+timeStr+'&itemsPerPage=1&sortBy=timeAsc' if sys.version_info >= (3,0): response = urllib.request.urlopen(wsurl) else: response = urllib.urlopen(wsurl) data = response.read() if (len(data.splitlines()) == 1): sys.exit('No granules found for dataset: '+shortname+'\nProgram will exit now !\n') numGranules = 0 doc = minidom.parseString(data) for arrays in doc.getElementsByTagName('link'): names = arrays.getAttribute("title") if names == 'OPeNDAP URL': numGranules = numGranules + 1 href = arrays.getAttribute("href") #if numGranules > 0: # break if numGranules == 0 and len(data.splitlines()) < 30: sys.exit('No granules found for dataset: '+shortname+'\nProgram will exit now !\n') elif numGranules == 0 and len(data.splitlines()) > 30: sys.exit('No OpenDap access for dataset: '+shortname+'\nProgram will exit now !\n') samplefile = href.rsplit( ".", 1 )[ 0 ] + '.ddx' variable_list = [] lon_order = 'X' lat_order = 'Y' nt = 1 #time dimension nd = 1 #depth dimension if sys.version_info >= (3,0): doc = minidom.parse(urllib.request.urlopen(samplefile)) else: doc = minidom.parse(urllib.urlopen(samplefile)) for arrays in doc.getElementsByTagName('Grid'): names = arrays.getAttribute("name") if names == 'lat' or names == 'latitude': for dimensions in arrays.getElementsByTagName("dimension"): size = dimensions.getAttribute("size") name = dimensions.getAttribute("name") ni = int(size) for attrs in arrays.getElementsByTagName("Attribute"): aname = attrs.getAttribute("name") if aname == 'axis': for nodes in attrs.getElementsByTagName("value"): for cn in nodes.childNodes: lat_order = cn.nodeValue elif names == 'lon' or names == 'longitude': for dimensions in arrays.getElementsByTagName("dimension"): size = dimensions.getAttribute("size") name = dimensions.getAttribute("name") nj = int(size) for attrs in arrays.getElementsByTagName("Attribute"): aname = attrs.getAttribute("name") if aname == 'axis': for nodes in attrs.getElementsByTagName("value"): for cn in nodes.childNodes: lon_order = cn.nodeValue else: variable_list.append(names) for arrays in doc.getElementsByTagName('Map'): names = arrays.getAttribute("name") if names == 'lat' or names == 'latitude': for dimensions in arrays.getElementsByTagName("dimension"): size = dimensions.getAttribute("size") name = dimensions.getAttribute("name") ni = int(size) for attrs in arrays.getElementsByTagName("Attribute"): aname = attrs.getAttribute("name") if aname == 'axis': for nodes in attrs.getElementsByTagName("value"): for cn in nodes.childNodes: lat_order = cn.nodeValue if names == 'lon' or names == 'longitude': for dimensions in arrays.getElementsByTagName("dimension"): size = dimensions.getAttribute("size") name = dimensions.getAttribute("name") nj = int(size) for attrs in arrays.getElementsByTagName("Attribute"): aname = attrs.getAttribute("name") if aname == 'axis': for nodes in attrs.getElementsByTagName("value"): for cn in nodes.childNodes: lon_order = cn.nodeValue if names == 'time': ntime = 1 for dimensions in arrays.getElementsByTagName("dimension"): size = dimensions.getAttribute("size") name = dimensions.getAttribute("name") nt = int(size) if names == 'depth': ndepth = 1 for dimensions in arrays.getElementsByTagName("dimension"): size = dimensions.getAttribute("size") name = dimensions.getAttribute("name") nd = int(size) try: ni # does a exist in the current namespace except NameError: sys.exit('Granule file format may not be in netcdf or no latitude or longitude info for dataset: '+shortname+'\n') #**************************************************************************** #*** Default southernmost_latitude, northernmost_latitude ******************* #*** and westernmost_longitude, easternmost_longitude ******************* #**************************************************************************** lat_sort = 'A' lon_sort = 'A' lat0 = -90.0 lat1 = 90.0 lon0 = -180.0 lon1 = 180.0 if "AQUARIUS" in shortname: lon0 = 0.0 lon1 = 360.0 if "MODIS" in shortname: lat_sort = 'D' #**************************************************************************** for arrays in doc.getElementsByTagName('Attribute'): names = arrays.getAttribute("name") if names == 'southernmost_latitude': for nodes in arrays.getElementsByTagName("value"): for cn in nodes.childNodes: lat0 = float(cn.nodeValue) if names == 'northernmost_latitude': for nodes in arrays.getElementsByTagName("value"): for cn in nodes.childNodes: lat1 = float(cn.nodeValue) if names == 'westernmost_longitude': for nodes in arrays.getElementsByTagName("value"): for cn in nodes.childNodes: lon0 = float(cn.nodeValue) if names == 'easternmost_longitude': for nodes in arrays.getElementsByTagName("value"): for cn in nodes.childNodes: lon1 = float(cn.nodeValue) try: lat0 # does a exist in the current namespace except NameError: sys.exit('No southernmost_latitude info for dataset: '+shortname+'\n') try: lat1 # does a exist in the current namespace except NameError: sys.exit('No northernmost_latitude info for dataset: '+shortname+'\n') try: lon0 # does a exist in the current namespace except NameError: sys.exit('No westernmost_longitude info for dataset: '+shortname+'\n') try: lon1 # does a exist in the current namespace except NameError: sys.exit('No easternmost_longitude info for dataset: '+shortname+'\n') nlon = nj nlat = ni dint_lon = (lon1-lon0)/float(nlon) dint_lat = (lat1-lat0)/float(nlat) [i0,i1]=boundingindex(lon_sort,nlon,lon0,dint_lon,nlon,box[0],box[1]) [j0,j1]=boundingindex(lat_sort,nlat,lat0,dint_lat,nlat,box[2],box[3]) if i0>i1 or j0>j1: sys.exit('No grid point in your domain box.') # modify the max grid indices, as necessary: if ig>1: i1=max(span(i0,i1,ig)) j1=max(span(j0,j1,ig)) #************************************************************************************ if lon_order == 'X': try: ntime # does a exist in the current namespace order=[0,2,1] try: ndepth # does a exist in the current namespace order=[0,1,3,2] except NameError: order=[0,2,1] except NameError: order=[1,0] else: try: ntime # does a exist in the current namespace order=[0,1,2] try: ndepth # does a exist in the current namespace order=[0,1,2,3] except NameError: order=[0,1,2] except NameError: order=[0,1] #************************************************************************************ # download size information: print (' ') print ('Longitude range: %f to %f'%(box[0],box[1])) print ('Latitude range: %f to %f'%(box[2],box[3])) print (' every %d pixel(s) is obtained'%(ig)) print (' ') print ('grid dimensions will be ( %d x %d )'%(len(span(i0,i1,ig)),len(span(j0,j1,ig)))) print (' ') if sys.version_info >= (3,0): r=input('OK to download? [yes or no]: ') else: r=raw_input('OK to download? [yes or no]: ') if len(r)==0 or (r[0]!='y' and r[0]!='Y'): print ('... no download') sys.exit(0) # Check if curl or wget commands exsit on your computer if sys.version_info >= (3,0): status_curl, result = subprocess.getstatusoutput('which curl') status_wget, result = subprocess.getstatusoutput('which wget') else: status_curl, result = commands.getstatusoutput("which curl") status_wget, result = commands.getstatusoutput("which wget") # form the index set for the command line: try: ntime # does a exist in the current namespace inx=[[0,1,nt-1],[i0,ig,i1],[j0,ig,j1]] try: ndepth # does a exist in the current namespace inx=[[0,1,nt-1],[0,1,nd-1],[i0,ig,i1],[j0,ig,j1]] except NameError: inx=[[0,1,nt-1],[i0,ig,i1],[j0,ig,j1]] except NameError: inx=[[i0,ig,i1],[j0,ig,j1]] try: ndepth # does a exist in the current namespace inx=[[0,1,nd-1],[i0,ig,i1],[j0,ig,j1]] except NameError: inx=[[i0,ig,i1],[j0,ig,j1]] index='' for i in order: index=index+'[%d:%d:%d]'%(inx[i][0],inx[i][1],inx[i][2]) # main loop: start = time.time() bmore = 1 while (bmore > 0): if (bmore == 1): if options.alltime: urllink = PODAAC_WEB+'/ws/search/granule/?shortName='+shortname+'&bbox='+str(box[0])+','+str(box[2])+','+str(box[1])+','+str(box[3])+'&itemsPerPage=%d&sortBy=timeAsc'%itemsPerPage urllink = PODAAC_WEB+'/ws/search/granule/?shortName='+shortname+'&itemsPerPage=%d&sortBy=timeAsc'%itemsPerPage else: urllink = PODAAC_WEB+'/ws/search/granule/?shortName='+shortname+timeStr+'&bbox='+str(box[0])+','+str(box[2])+','+str(box[1])+','+str(box[3])+'&itemsPerPage=%d&sortBy=timeAsc'%itemsPerPage urllink = PODAAC_WEB+'/ws/search/granule/?shortName='+shortname+timeStr+'&itemsPerPage=%d&sortBy=timeAsc'%itemsPerPage else: if options.alltime: urllink = PODAAC_WEB+'/ws/search/granule/?shortName='+shortname+'&bbox='+str(box[0])+','+str(box[2])+','+str(box[1])+','+str(box[3])+'&itemsPerPage=%d&sortBy=timeAsc&startIndex=%d'%(itemsPerPage, (bmore-1)*itemsPerPage) urllink = PODAAC_WEB+'/ws/search/granule/?shortName='+shortname+'&itemsPerPage=%d&sortBy=timeAsc&startIndex=%d'%(itemsPerPage, (bmore-1)*itemsPerPage) else: urllink = PODAAC_WEB+'/ws/search/granule/?shortName='+shortname+timeStr+'&bbox='+str(box[0])+','+str(box[2])+','+str(box[1])+','+str(box[3])+'&itemsPerPage=%d&sortBy=timeAsc&startIndex=%d'%(itemsPerPage, (bmore-1)*itemsPerPage) urllink = PODAAC_WEB+'/ws/search/granule/?shortName='+shortname+timeStr+'&itemsPerPage=%d&sortBy=timeAsc&startIndex=%d'%(itemsPerPage, (bmore-1)*itemsPerPage) bmore = bmore + 1 if sys.version_info >= (3,0): response = urllib.request.urlopen(urllink) else: response = urllib.urlopen(urllink) data = response.read() doc = minidom.parseString(data) numGranules = 0 for arrays in doc.getElementsByTagName('link'): names = arrays.getAttribute("title") if names == 'OPeNDAP URL': numGranules = numGranules + 1 href = arrays.getAttribute("href") ncfile = href.rsplit( ".", 1 )[ 0 ] head, tail = os.path.split(ncfile) ncout = tail if ncout.endswith('.bz2') or ncout.endswith('.gz'): ncout = ncout.rsplit( ".", 1 )[ 0 ] ncout = ncout.rsplit( ".", 1 )[ 0 ]+'_subset.'+ncout.rsplit( ".", 1 )[ 1 ] cmd=ncfile+'.nc?' if options.onlySST: cmd=cmd+'analysed_sst'+index+',' else: for item in variable_list: cmd=cmd+item+index+',' cmd=cmd[0:(len(cmd)-1)] # remove the extra "," at the end. if status_curl == 0: cmd='curl -g "'+cmd+'" -o '+ ncout elif status_wget == 0: cmd='wget "'+cmd+'" -O '+ ncout else: sys.exit('\nThe script will need curl or wget on the system, please install them first before running the script !\nProgram will exit now !\n') os.system( cmd ) print (ncout + ' download finished !') if numGranules < itemsPerPage: bmore = 0 end = time.time() print ('Time spend = ' + str(end - start) + ' seconds')
args = parser.parse_args() infile = args.infile variable = args.variable ### convert conv_units to boolean ### conv_units = args.conv_units if conv_units.lower() in ['true', 't', '1', 'on', 'yes', 'y']: conv_units = True else: conv_units = False ### if variable not specified get first one listed in file ### if (variable == None or variable == ''): variable = commands.getstatusoutput( 'cdo showname %s' % infile)[1].split('\n')[1].split()[0] ### generate a list of tmp filenames ### fpath, fname = os.path.split(infile) tmpfname, extn = os.path.splitext(fname) tmpInFile = cdms2.open(infile, 'r') tvar_orig = tmpInFile[variable] tvals = tvar_orig.getTime().asComponentTime() noseas = len(tvals) glob_atts = tmpInFile.attributes ### add the filename as title if no title supplied ### if args.title == None: title = fname else: title = args.title
def Copy(self): # Initializing the JobWriter jobber = JobWriter(self.main, self.path, False) # Writing process logging.info(" Creating folder '" + self.path + "'...") if not jobber.Open(): logging.error("job submission aborted.") return False # Copying SampleAnalyzer logging.info(" Copying required 'SampleAnalyzer' source files...") if not jobber.CopyLHEAnalysis(): logging.error(" job submission aborted.") return False # Writing an empty analysis logging.info("Please enter a name for your analysis") title = raw_input("Answer: ") if title == "": title = "user" title = title.replace(' ', '_') title = title.replace('-', '_') logging.info(" Writing an empty analysis...") os.system("cd " + self.path + "/Build/SampleAnalyzer; python newAnalyzer.py " + title + " 1") # Extracting analysis name file = open(self.path + "/Build/SampleAnalyzer/Analyzer/analysisList.h") title = "" for line in file: if "Add" not in line: continue words = line.split('"') if len(words) >= 3: title = words[1] break file.close() # Writing a Makefile logging.info(" Writing a 'Makefile'...") if not jobber.WriteMakefiles(): logging.error("job submission aborted.") return False # Writing Main if not jobber.CreateBldDir(analysisName=title, outputName="user.saf"): logging.error(" job submission aborted.") return False if self.main.shower.enable: mode = self.main.shower.type if self.main.shower.type == 'auto': mode = commands.getstatusoutput( 'less ' + self.main.datasets[0].filenames[0] + ' | grep parton_shower ') if mode[0] != 0: logging.error( 'Cannot retrieve the showering information from the LHE files' ) return False mode = (mode[1].split())[0] if not jobber.CreateShowerDir(mode): logging.error(" job submission aborted.") return False return True
#!/usr/bin/python2 import cgi import commands print "Content-Type: text/html" print actionIp=cgi.FormContent()['actionIp'][0] outDir=cgi.FormContent()['outDir'][0] f=commands.getstatusoutput("sshpass -p redhat ssh -o stricthostkeychecking=no -l root {} hadoop fs -cat {}/part-00000".format(actionIp,outDir)) print """ <br /> <br /> <br /> """ print f[1]
subtoBatch = False requirementtoBatch = 'type==SLC6_64&&pool>30000' samplesDB = '' theExecutable = '' inputdir = '' outdir = '' lumi = 1 cfg_file = '' split = 1 segment = 0 params = '' onlytag = 'all' queuelog = '' count = 0 who = commands.getstatusoutput('whoami')[1] SCRIPT = open('/tmp/' + who + '/SCRIPT_Submit2batch.sh', "w") SCRIPT_L = open('/tmp/' + who + '/SCRIPT_Local.sh', "w") SCRIPT_L.writelines('#!bin/sh \n\n') SCRIPT_L.writelines('cd $CMSSW_BASE/src/llvvAnalysis/DMAnalysis/; \n\n') for o, a in opts: if o in ("-?", "-h"): usage() sys.exit(0) elif o in ('-s'): subtoBatch = True queue = a if (queue == "True"): queue = "2nd" elif o in ('-j'): samplesDB = a elif o in ('-e'): theExecutable = a
def draw_nochannel(tmphdf, png_title, begin_time, end_time): cmd = [] sds_len = len(conf.draw_ncl_new[ins]['ncl_prog_no_channel']) for i in xrange(0, sds_len): file_out = conf.plot_path + '/' + png_title + conf.draw_ncl_new[ins][ 'ncl_prog_no_channel'][i]['tmp_png'] file_title = png_title + conf.draw_ncl_new[ins]['ncl_prog_no_channel'][ i]['tmp_png'] ncl_name = conf.plot_path + '/' + sat.upper() + '_' + ins.upper( ) + '_' + conf.draw_ncl_new[ins]['ncl_prog_no_channel'][i][ 'tmp_png'] + '_4SUB.ncl' temp_log = conf.tmp_path + '/monitor.' + log_tag + '.' + conf.draw_ncl_new[ ins]['ncl_prog_no_channel'][i]['tmp_png'] + '.log' temp_cmd = conf.ncl + " 'sat=\"" + sat.upper() + "\"' " \ + "'instrument=\"" + ins.upper() + "\"" +'\'' \ + " 'file_in=\"" + tmphdf + "\"' " \ + " 'file_out=\"" + file_out + "\"' " \ + " 'file_title=\"" + file_title + "\"' " + ncl_name \ + ' > ' + temp_log + ' 2>&1' print temp_cmd cmd.append(temp_cmd) #print cmd timeuse_begin = time.time() for cmd_temp in cmd: print cmd_temp (status, output) = commands.getstatusoutput(cmd_temp) common.debug(my_log, log_tag, str(status) + '`' + cmd_temp + '`' + output) # pooltest = ThreadPool() # ret = pooltest.map(commands.getstatusoutput, cmd ) # pooltest.close() # pooltest.join() timeuse_end = time.time() timeuse = str(round(timeuse_end - timeuse_begin, 2)) print timeuse sds_len = len(conf.draw_ncl_new[ins]['ncl_prog_no_channel']) for i in xrange(0, sds_len): file_out = conf.plot_path + '/' + png_title + conf.draw_ncl_new[ins][ 'ncl_prog_no_channel'][i]['tmp_png'] temp_log = conf.tmp_path + '/monitor.' + log_tag + '.' + conf.draw_ncl_new[ ins]['ncl_prog_no_channel'][i]['tmp_png'] + '.log' # check png.OK if not common.check_file_exist(file_out + '.png', check_ok=True): msg = 'ncl program error: output png file not exist.' + file_out print msg common.error(my_log, log_tag, time_tag + msg) return False dest_path = '/hds/assimilation/fymonitor/DATA/IMG/NSMC/'+ sat.upper() + '/' + ins.upper() + '/' \ + conf.draw_ncl_new[ins]['ncl_prog_no_channel'][i]['tmp_png'] + '/' arch_path = dest_path + str(end_time[0:4]) + '/' latest_path = dest_path + 'LATEST/' try: shutil.copyfile( file_out + '.png', arch_path + png_title + conf.draw_ncl_new[ins]['ncl_prog_no_channel'][i]['tmp_png'] + '.png') common.empty_folder(latest_path) common.mv_file( file_out + '.png', latest_path + png_title + conf.draw_ncl_new[ins]['ncl_prog_no_channel'][i]['tmp_png'] + '.png') os.remove(file_out + '.png.OK') os.remove(temp_log) except: msg = 'png created, but cp or mv to dest error' print msg common.error(my_log, log_tag, time_tag + msg) return False try: print 'ssss' #os.remove(tmphdf) #os.remove(tmphdf + '.txt') except OSError, e: msg = 'clean tmp file error[' + str(e.args[0]) + ']: ' + e.args[1] print msg common.warn(my_log, log_tag, time_tag + msg)
def brute(ip): print "\n[+] Attempting BruteForce:",ip try: for n in names: response = StringIO.StringIO(commands.getstatusoutput('snmpwalk '+ip+" "+n)[1]).readlines()
def test_command(self): self.assertIn('usage: codecov', commands.getstatusoutput('codecov --hep')[1])
def list(self): print "> list containers" retvar, ret = commands.getstatusoutput("sudo runc list") return ret
user = sys.argv[2] else: beginID = sys.argv[1] if (len(sys.argv) == 2): endID = sys.argv[1] else: endID = sys.argv[2] try: beginNumber = int(beginID[8:]) endNumber = int(endID[8:]) except: print "syntax error " + str(sys.argv) print usage os._exit(0) print "getting status of machines, please wait..." results = commands.getstatusoutput("hm power status range " + beginID + " " + endID)[1].splitlines() powerStatuses = {} for x in range(beginNumber, endNumber + 1): powerStatuses["computer" + str(x)] = "unknown" for result in results: if result[result.find(": ") + 2:].upper() == "ON" or result[result.find(": ") + 2:].upper() == "OFF": powerStatuses[result[:result.find(":")]] = result[result.find(": ") + 2:] freeMachines = [] usersMachines = [] for x in range(beginNumber, endNumber + 1): kernel = commands.getstatusoutput("ls -lha /export/machines/computer" + str(x) + " | grep 'kernel ->'")[1]
#!/usr/bin/python2 print "Content-Type: text/html" print import cgi import commands imageName =cgi.FormContent() ['imagename'][0] cName =cgi.FormContent() ['cname'][0] cNameCheck = commands.getstatusoutput("sudo docker inspect {}".format(cName)) if cNameCheck[0]==0: print "{} container name already exists.".format(cName) else : creationCheck = commands.getstatusoutput("sudo docker run -dit --name {0} {1}".format(cName,imageName)) if creationCheck[0] == 0: print """ <script> alert("Container lauched successfully."); </script> """ else: print """ <script> alert("Faied."); </script> """
def get_one_log(environ, start_response): status = '200 OK' response_headers = [('Content-type', 'text/plain')] start_response(status, response_headers) output = get_one_log_html(environ) return output def list(environ, start_response): query_string = environ['QUERY_STRING'] if query_string == '': output = get_log_list(environ, start_response) else: output = get_one_log(environ, start_response) return output #status, output = commands.getstatusoutput('ls /var/log/iptables.log|sort') #status = '200 OK' #response_headers = [('Content-type', 'text/plain')] #start_response(status, response_headers) #return output if __name__ == '__main__': output = get_log_list_html() status, output = commands.getstatusoutput('echo "' + output + '" > /home/zhangcl/test.htm')