def main(): # Project文件夹路径 if argv.count('-d') == 0: print("Error:Project文件夹路径必填,格式:“-d D:\F28_Dual_DVP”。") exit() else: project_directory = argv[argv.index('-d') + 1] for i in range(argv.index('-d') + 2, len(argv)): if not argv[i].startswith('-'): project_directory += (' ' + argv[i]) else: break if project_directory[:2] == '.\\': project_directory = getcwd() + project_directory[1:] # Excel文件路径 if argv.count('-f') == 0: print("Error:Excel文件路径必填,格式:“-f D:\Example_v2.0.xlsx”。") exit() else: excel_dir = argv[argv.index('-f') + 1] for i in range(argv.index('-f') + 2, len(argv)): if not argv[i].startswith('-'): excel_dir += (' ' + argv[i]) else: break # 生成文件 generate_project(excel_dir, project_directory)
def readARG(argv): """ reading the arguments """ global read_chelpg,read_mulliken, read_npa read_chelpg = 0 read_mulliken = 0 read_npa = 0 read_all = 0 if argv.count("-npa"): read_npa = 1 if argv.count("-chelpg"): read_chelpg = 1 if argv.count("-mulliken"): read_mulliken = 1 if argv.count("-all"): read_chelpg = 1 read_mulliken = 1 read_npa = 1 read_all = 1 checkall = read_chelpg + read_mulliken+ read_npa + read_all if checkall == 0: print cr + "\nERROR:"+ end+ " please select an option.\n\n" fname = argv[1] return fname
def run(self): otpNames = listdir(self.openPath) # collation folder path if argv.count('-c') == 0: collationFolder = self.openPath + '_collation' else: collationFolder = argv[argv.index('-c') + 1] MkDir(collationFolder) otpFolders = [] for otpName in otpNames: if isdir(join(self.openPath, otpName)): otpFolders.append(join(self.openPath, otpName)) # classify raw files according to the image type in the file name pattern = '(\S+)_(\S+)-(\S+)_(\d+)_(\S+)_(\d+)x(\d+).raw' for i in range(len(otpFolders)): # get files under the folder fileList = GetFileList(otpFolders[i], '.raw') for file in fileList: fileName = basename(file) res = match(pattern, fileName) form = res[5] targetFolder = join(collationFolder, form) MkDir(targetFolder) copy(file, targetFolder) self.currentCount += 1 if self.currentCount != barCount: self._signal.emit(str(self.currentCount * 100 // barCount)) self._signal.emit(str(100))
def getArgs(*args): """ Gets the arguments and stores them in a dictionnary. """ argDict = {} for i in range(len(args)): if argv.count(args[i]) != 0: if args[i] == '-help': argDict[args[i]] = 'on' else: j = argv.index(args[i]) argDict[args[i]] = argv[j + 1] else: argDict[args[i]] = None return argDict
def main(): woman = 500 man = 500 years = 20 if len(argv) == 4: woman = int(argv[1]) man = int(argv[2]) years = int(argv[3]) s = Simulator() s.build(woman, man, 12 * years) collector = s.sim() if argv.count('graph'): from app import app app.data = collector app.run(host='0.0.0.0', port=8000, debug=False)
def main(): # Project folder path if argv.count('-p') == 0: print("Error:Project folder path 格式:“-p D:\Project” 。") exit() else: project_folder = argv[argv.index('-p') + 1] project_data = read_project(project_folder) socketmap_data = read_socketmap(project_folder) signals_data = read_signals(project_folder) limit_data = read_limit(project_folder) signalgroups_data = read_signalgroups(project_folder) bindefinition_data = read_bindefinition(project_folder) dcmeasure_data = read_dcmeasure(project_folder) tests_data = read_tests(project_folder) uservars_data = read_uservars(project_folder) levels_data = read_levels(project_folder) write_excel(project_data, socketmap_data, signals_data, limit_data, signalgroups_data, bindefinition_data, dcmeasure_data, tests_data, uservars_data, levels_data)
future[i][3].append(len(close)) # Add future options to frontier frontier.extend(future) # Sort frontier based on (g(x) + h(x)) frontier.sort(key=lambda x: x[1]) # Add current to close close.append(curr[0]) # START print("John Bucknam") print("HW 3: A* Algorithm") print('') # Board is being defined board = ['b', 'b', 'b', 'n', 'w', 'w', 'w'] if len(argv) == 8 and argv.count('w') == 3 and argv.count('b') == 3 and argv.count('n') == 1: board = argv[1:8] else: print("Default: [b, b, b, n, w, w, w]") print('Problem:' + str(board)) # Start begin = time.clock() complete = solve(board) end = time.clock() # Print results print('') # Time rounded to 1000th msec print('Time: ' + str(round(((end - begin) * 1000), 3)) + ' msec') # Total cost
## @brief ## @author ? import string from sys import argv,stderr from os import popen,system from os.path import exists,dirname,basename,abspath #assert( len(argv)>2) pdbname = argv[1] if (pdbname[-4:] != '.pdb' and pdbname[-7:] != '.pdb.gz'): pdbname += '.pdb' removechain = 0 if argv.count('-nochain'): pos = argv.index('-nochain') del( argv[ pos ] ) removechain = 1 ignore_chain = 0 if argv.count('-ignorechain'): pos = argv.index('-ignorechain') del( argv[ pos ] ) ignore_chain = 1 chainids = [] if len( argv ) > 2: chainids = argv[2:] if len(chainids) > 0 and len(chainids[0])==1:
def Help(): print print 'Usage: clean_outfile <file1> <file2> ... [-all] [-fixtag]' print ' -all Get decoys marked with F_ (failed filters)' print ' -fixtag Make decoy tag match its scoreline.' print print exit() if len(argv)<2: Help() fix_tag = 0 if argv.count('-fixtag'): pos = argv.index('-fixtag') del(argv[pos]) fix_tag = 1 split_files = 0 if argv.count('-split'): pos = argv.index('-split') del(argv[pos]) split_files = 1 num_file = 1 leave_in_dir = 0 if argv.count('-leave_in_dir'): pos = argv.index('-leave_in_dir')
# [email protected]. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Microsoft Public License. # # You must not remove this notice, or any other, from this software. # # ##################################################################################### ## ## Test autoimport of assemblies found in DLLs directory ## from sys import exit, argv from iptest.assert_util import * if argv.count("OKtoRun") == 0 or is_cli == False: print "Bailing" exit(0) def test_sanity(): ''' Sanity checks. All of these are fairly normal imports and should work. ''' #first check that our native modules are still present and accounted for... import binascii import _collections import copy_reg import cPickle import cStringIO
def main(pdbname, chainid): # remote host for downloading pdbs remote_host = 'ws0.nrb' shit_stat_insres = False shit_stat_altpos = False shit_stat_modres = False shit_stat_misdns = False # missing density! fastaseq = "" pdbfile = "" if argv.count('-h'): print_help() files_to_unlink = [] if (pdbname[-4:] != '.pdb' and pdbname[-8:] != '.pdb1.gz'): pdbname += '.pdb' #outfile = string.lower(pdbname[0:4]) + chainid + pdbname[4:] outfile = pdbname[0:-4] + "_" + chainid + ".pdb" nopdbout = 0 if argv.count('nopdbout'): nopdbout = 1 removechain = 0 if argv.count('nochain'): removechain = 1 ignorechain = 0 if argv.count('ignorechain'): ignorechain = 1 netpdbname = pdbname if not exists(netpdbname): netpdbname = pdbname fixed_pdb = pdbname print("Looking for: ", fixed_pdb) if os.path.isfile(fixed_pdb): print("Found preoptimised or otherwise fixed PDB file. ") netpdbname = fixed_pdb else: print("File %s doesn't exist" % (netpdbname)) files_to_unlink.append(netpdbname) if netpdbname[-3:] == '.gz': lines = popen('zcat ' + netpdbname, 'r').readlines() else: lines = open(netpdbname, 'r').readlines() oldresnum = ' ' count = 1 modifiedres = '' residue_buffer = [] residue_letter = '' residue_invalid = False if chainid == '_': chainid = ' ' for i in range(len(lines)): line = lines[i] if len(line) > 5 and line[:6] == 'ENDMDL': break #Its an NMR model. if (chainid == line[21] or ignorechain or removechain): line_edit = line if line[0:3] == 'TER': continue elif (line[0:6] == 'HETATM'): ok = False ## Is it a modified residue ? if line[17:20] in modres: ## if so replace it with its canonical equivalent ! line_edit = 'ATOM ' + line[6:17] + modres[ line[17:20]] + line[20:] modifiedres = modifiedres + line[17:20] + ', ' ## dont count MSEs as modiied residues (cos they're so common and get_pdb deal with them previosuly) if line[17:20] != "MSE": shit_stat_modres = True ok = True ## other substitution (of atoms mainly) if (line[17:20] == 'MSE'): #Selenomethionine if (line_edit[12:14] == 'SE'): line_edit = line_edit[0:12] + ' S' + line_edit[14:] if len(line_edit) > 75: if (line_edit[76:78] == 'SE'): line_edit = line_edit[0:76] + ' S' + line_edit[78:] if not ok: continue # skip this atom if we havnt found a conversion if line_edit[0:4] == 'ATOM': #or line_edit[0:6] == 'HETATM': ## if line_edit[13:14]=='P': #Nucleic acid? Skip. ## resnum = line_edit[23:26] ## oldresnum = resnum ## while (resnum == oldresnum): ## print "HERE" ## i += 1 ## line = lines[i] ## resnum = line_edit[23:26] resnum = line_edit[22:27] insres = line[26] if insres != ' ': shit_stat_insres = True altpos = line[16] if altpos != ' ': shit_stat_altpos = True ## Is thresidue_letter if not resnum == oldresnum: if residue_buffer != []: ## is there a residue in the buffer ? if not residue_invalid: flag1, fastaseq, pdbfile = check_and_print_pdb( count, residue_buffer, residue_letter, pdbfile, fastaseq) if not flag1: ## if unsuccessful shit_stat_misdns = True else: count = count + 1 residue_buffer = [] residue_letter = '' residue_invalid = False longname = line_edit[17:20] if longname in longer_names: residue_letter = longer_names[longname] else: residue_letter = 'X' residue_invalid = True oldresnum = resnum ## What does this do ? if line_edit[16:17] == 'A': line_edit = line_edit[:16] + ' ' + line_edit[17:] if line_edit[16:17] != ' ': continue if removechain: line_edit = line_edit[0:21] + ' ' + line_edit[22:] residue_buffer.append(line_edit) flag1, fastaseq, pdbfile = check_and_print_pdb(count, residue_buffer, residue_letter, pdbfile, fastaseq) if not flag1: ## if unsuccessful shit_stat_misdns = True else: count = count + 1 flag_altpos = "---" if shit_stat_altpos: flag_altpos = "ALT" flag_insres = "---" if shit_stat_insres: flag_insres = "INS" flag_modres = "---" if shit_stat_modres: flag_modres = "MOD" flag_misdns = "---" if shit_stat_misdns: flag_misdns = "DNS" nres = len(fastaseq) flag_successful = "OK" if nres <= 0: flag_successful = "BAD" print(netpdbname, pdbname, chainid, "%5d" % nres, flag_altpos, flag_insres, flag_modres, flag_misdns, flag_successful) if chainid == ' ': chainid = '_' if nres > 0: if (nopdbout == 0): #outfile = string.lower( basename(outfile) ) outfile = outfile.replace('.pdb1.gz', '.pdb') outid = open(outfile, 'w') outid.write(pdbfile) outid.write("TER\n") outid.close() fastaid = stdout fastaid.write('>' + pdbname[0:4] + chainid + '\n') fastaid.write(fastaseq) fastaid.write('\n') if len(files_to_unlink) > 0: for file in files_to_unlink: os.unlink(file)
def cleanDirectory(dir): safeEntries = {} if argv.count("-all")==0 and argv.count("-a")==0: #these files/directories will NOT be removed. safeEntries = { 'ACS_INSTANCE.0' : '', 'ACS_INSTANCE.1' : '', 'ACS_INSTANCE.2' : '', 'ACS_INSTANCE.3' : '', 'ACS_INSTANCE.4' : '', 'ACS_INSTANCE.5' : '', 'ACS_INSTANCE.6' : '', 'ACS_INSTANCE.7' : '', 'ACS_INSTANCE.8' : '', 'ACS_INSTANCE.9' : '', 'USED_CONTAINER_PORTS' : '', '.acs_command_history' : '', 'ifr_cache.0' : '', 'ifr_cache.1' : '', 'ifr_cache.2' : '', 'ifr_cache.3' : '', 'ifr_cache.4' : '', 'ifr_cache.5' : '', 'ifr_cache.6' : '', 'ifr_cache.7' : '', 'ifr_cache.8' : '', 'ifr_cache.9' : '' } print "Safe directories (i.e., ACS_INSTANCE.*'s) and a few files used by ACS will be" print "preserved. To remove everything, provide the '-all' switch to this script." else: print "Cleaning up ALL directories" print "It is ONLY SAFE and recommended to do this after a machine has been rebooted or" print "killACS has been run! Use at your own risk." ################################################################################################ try: myDir = dir #make sure acs temp exists if not exists(myDir): print "'", myDir, "' does not exist!" exit(1) #get a list of all entries in it. tempList = listdir(myDir) #move to it chdir(myDir) #iterate through every entry for entry in tempList: #if OK to remove... if (not safeEntries.has_key(entry)) and (isdir(entry)): try: system("rm -rf " + entry) except: print "Unable to remove this directory:", entry elif (not safeEntries.has_key(entry)) and (isfile(entry)): try: remove(entry) except: print "Unable to remove this file:", entry except Exception, e: print "An exception occurred in acsdataClean.py's main:", e
return None if __name__ == '__main__': if len(argv) < 2: print( 'usage: %s (<configuration_filepath>) <mountpoint> (-o <fuse_mount_options>)' % argv[0]) exit(1) fuse_options = {} mounting_point = str(argv[1]) mongofs_argv_size = len(argv) if argv.count('-o') > 0: # let's construct a dictionnary for fuse options mongofs_argv_size = argv.index('-o') for fuse_opt_arg in argv[mongofs_argv_size + 1].split(','): fuse_opt_arg_parts = fuse_opt_arg.split('=') if (len(fuse_opt_arg_parts) == 1): fuse_opt_arg_parts.append(True) fuse_options[fuse_opt_arg_parts[0]] = fuse_opt_arg_parts[1] if mongofs_argv_size >= 3: configuration_filepath = argv[1] Configuration.FILEPATH = configuration_filepath mounting_point = str(argv[2]) if not mounting_point.startswith('/'): mounting_point = os.getcwd() + '/' + mounting_point
def main() : _usage = argv[0] + " docker_host_ip1:port[,docker_host_ip2:port,..,docker_host_ipn] [images names]" for _argv in argv : if argv.count("--help") or argv.count("-h") : print _usage exit(0) if len(argv) < 2 : print _usage exit(1) if len(argv) > 2 : _image_list = argv[2] else : _image_list = "all" _images_base_dir = "/tmp" _images_base_url = "http://9.2.212.67/repo/vmimages/" _images_arch = "x86_64" if _image_list == "all" : _images_names = [ "nullworkload", \ "hadoop", \ "ycsb", \ "iperf", \ "netperf", \ "nuttcp", \ "fio", \ "xping", \ "speccloud_cassandra_2111", \ "speccloud_hadoop_271" ] else : _images_names = argv[2].split(',') _images_cksum = _images_base_url + "/cloudbench/" + _images_arch + "/md5sum.txt" for _image in _images_names : if not access(_images_base_dir + "/cb_" + _image + ".tar", F_OK) : _image_url = _images_base_url + "/cloudbench/" + _images_arch + "/cb_" + _image + ".tar" _msg = "Downloading from URL \"" + _image_url + "\"..." print _msg wget.download(_image_url, out = _images_base_dir + "/cb_" + _image + ".tar") print " " print " " _endpoint_list = '' for _endpoint in argv[1].split(',') : _cli = docker.Client(base_url="tcp://" + _endpoint, timeout = 600) _info = _cli.info() if _info["SystemStatus"] : for _item in _info["SystemStatus"] : if _item[1].count(':') == 1 : _endpoint_list += _item[1] + ',' if len(_endpoint_list) : _endpoint_list = _endpoint_list[0:-1] else : _endpoint_list = argv[1] _endpoint_port = 17282 for _endpoint in _endpoint_list.split(',') : _cli = docker.Client(base_url="tcp://" + _endpoint, timeout = 600) for _image in _images_names : _image = "cb_" + _image if not len(_cli.images(name = _image)) : _image_filename = _images_base_dir + '/' + _image + ".tar" _msg = "Loading file \"" + _image_filename + "\" into Docker image store on host \"" + _endpoint + "\"..." print _msg with open(_image_filename, "rb") as f: _cli.load_image(f) else : _msg = "Image \"" + _image + " already present on Docker image store on host \"" + _endpoint + "\"..." print _msg
elif "-v" == argv[i]: printf(info) elif 0 == len(argv): printf("Please enter the argv!","warning") else: if argv[i - 1] in ["-u", "-d", "-f", "-t", "-p","-a","-i","-m"]: pass else: printf("Can't find argv: " + argv[i],"warning") for para_test in argv: if argv.count(para_test)>1: printf("Have two same argv "+para_test,"error") U = False D = False if 0 == len(result_file): result_file == "" if U ==True and D == True: printf(time.ctime()+"\n","normal") starttime = time.time() scan.dic_scan(url, dictionary, result_file, timeout, proxy,ua,ignore_text) endtime = time.time() printf(time.ctime()+"\n","normal") printf("Use time:"+str(endtime-starttime)[:-13],"normal") elif U == True and D == False:
print "clean_pdb.py <pdb> <chain id> [nopdbout]" print "pdb = file name of the file. Can be with or without the .pdb file handle" print "chain id = The chain id you are interested in. If more than one chain, " print "you can pass the chain id without spaces. For example \"AB\" gets you" print "chain A and B. \"A\" gets you chain A." print "\n", print "chain id = nochain. Removes chain identity from output" print "chain id = ignorechain. Gets all the chains for pdb" print "chain id = rechain. Collapse all chains into one model. Rename chains." print "\n", print "written by Phil Bradley, Rhiju Das, Michael Tyka, TJ Brunette, and James Thompson from the Baker Lab. Edits done by Steven Combs, Sam Deluca and Jordan Willis from the Meiler Lab." sys.exit() if argv.count('-h'): print_help() files_to_unlink = [] try: assert (len(argv) > 2) except AssertionError: print_help() pdbname = argv[1] if argv[2].strip() != "ignorechain" and argv[2].strip( ) != "nochain" and argv[2].strip() != "rechain": chainid = argv[2].upper() else: chainid = argv[2]
#!/usr/bin/python import string from sys import argv,stderr from os import popen,system from os.path import exists,dirname,basename,abspath #assert( len(argv)>2) pdbname = argv[1] if (pdbname[-4:] != '.pdb' and pdbname[-7:] != '.pdb.gz'): pdbname += '.pdb' removechain = 0 if argv.count('-nochain'): pos = argv.index('-nochain') del( argv[ pos ] ) removechain = 1 ignore_chain = 0 if argv.count('-ignorechain'): pos = argv.index('-ignorechain') del( argv[ pos ] ) ignore_chain = 1 chainids = [] if len( argv ) > 2: chainids = argv[2:] if len(chainids) > 0 and len(chainids[0])==1: pdbnames = [ pdbname ]
elif "-v" == argv[i]: printf(info) elif 0 == len(argv): printf("Please enter the argv!", "warning") else: if argv[i - 1] in ["-u", "-d", "-f", "-t", "-p", "-a", "-i", "-m"]: pass else: printf("Can't find argv: " + argv[i], "warning") for para_test in argv: if argv.count(para_test) > 1: printf("Have two same argv " + para_test, "error") U = False D = False if 0 == len(result_file): result_file == "" if U == True and D == True: printf(time.ctime() + "\n", "normal") starttime = time.time() scan.dic_scan(url, dictionary, result_file, timeout, proxy, ua, ignore_text) endtime = time.time() printf(time.ctime() + "\n", "normal") printf("Use time:" + str(endtime - starttime)[:-13], "normal")
return False assert (len(argv) > 2) pdbname = argv[1] pdbcode = argv[1] chainid = argv[2] if (pdbname[-4:] != '.pdb' and pdbname[-8:] != '.pdb1.gz'): pdbname += '.pdb' outfile = pdbname nopdbout = 0 if argv.count('-nopdbout'): nopdbout = 1 removechain = 0 if argv.count('-Aify'): #-nochain'): removechain = 1 ignorechain = 0 if argv.count('-ignorechain'): ignorechain = 1 netpdbname = local_pdb_database + pdbname[1:3] + '/' + pdbname[ 0:4] + '/' + argv[1] + '_0.pdb' #print "getting the name right:", netpdbname if not exists(netpdbname): netpdbname = pdbname
\tserver\tServer ip address or hostname. \tport\tServer port to use. \tpassword\tServer admin password. \tmessage\tMessage (string) to send to the server.""" exit() if __name__ == '__main__': server = "localhost" port = config.port password = None message = None # "shutdown"+"\n" specServer = specPort = specPassword = specMessage = False if argv.count("-h") or argv.count("--help"): usage() if argv.count("-s"): p = argv.index("-s") if len(argv) > p: server = argv[p + 1] specServer = True if argv.count("-p"): p = argv.index("-p") if len(argv) > p: port = int(argv[p + 1]) specPort = True if argv.count("-w"):
try: system("rm -rf " + entry) except: print "Unable to remove this directory:", entry elif (not safeEntries.has_key(entry)) and (isfile(entry)): try: remove(entry) except: print "Unable to remove this file:", entry except Exception, e: print "An exception occurred in acsdataClean.py's main:", e if argv.count("-h")!=0 or argv.count("--help")!=0: print "Clean the temporal directories of ACS, as given by $ACS_TMP, or otherwise $ACSDATA/tmp" print "Options:" print "-h or --help : Show this message and then exit.\n" print "-all : acsdataClean will remove all the directory entries under the tmp directory. Without this option, the ACS_INSTANCE.* subdirectories are preserved.\n" print "-other_hosts : acsdataClean will clean all directories located under $ACSDATA/tmp/, thus cleaning also the tmp directories of other hosts sharing the $ACSDATA in the same machine.\n" exit(0) print "Cleaning up ACS temporary directories" myDir = getAcsTmpDirectoryPath() cleanDirectory(myDir) if argv.count("-other_hosts")!=0: print "Cleaning all the possible hosts' temporary directories" myDir = str(environ['ACSDATA']) + '/tmp/' if not exists(myDir): print "'", myDir, "' does not exist!"
def run(self): if self.chooseRadio == 'ProjectFolder': handlerNames = listdir(self.openPath) handlerFolders = [] for handlerName in handlerNames: if isdir(join(self.openPath, handlerName)): handlerFolders.append(join(self.openPath, handlerName)) for handlerFolder in handlerFolders: dateFolders = [] dateNames = listdir(handlerFolder) for dateName in dateNames: if dateName != 'Analysis' and isdir( join(handlerFolder, dateName)): dateFolders.append(join(handlerFolder, dateName)) lotnoFolders = [] for dateFolder in dateFolders: lotNames = listdir(dateFolder) for lotName in lotNames: if isdir(join(dateFolder, lotName)): lotnoFolders.append(join(dateFolder, lotName)) fileList = [] for lotFolder in lotnoFolders: # get CSV file under the folder fileList.append(GetFileList(lotFolder, '.csv')) if not fileList: exit() # analysis folder path if argv.count('-a') == 0: analysisFolder = handlerFolder + '\Analysis' else: analysisFolder = argv[argv.index('-a') + 1] MkDir(analysisFolder) parseData = [] for i in range(len(fileList)): lotnoData = {} tempSiteData = [] tempSoftbinData = [] for file in fileList[i]: # parse file tempSiteData.append(ParseFile(file, 1)) tempSoftbinData.append(ParseFile(file, 2)) # get Date date = basename(dirname(dirname(fileList[i][0]))) lotnoData['site data'] = tempSiteData lotnoData['swbin data'] = tempSoftbinData lotnoData['date'] = date lotnoData['lotno'] = GetLotno(fileList[i][0]) parseData.append(lotnoData) self.currentLotCount += 1 self._signal.emit( str(self.currentLotCount * 100 // totalLotCount)) handler = basename(handlerFolder) analysisFile = join( analysisFolder, handler + '_Total_Analysis' + nowTime + '.xlsx') # save data SaveData(analysisFile, parseData) else: dateFolders = [] dateNames = listdir(self.openPath) for dateName in dateNames: if dateName != 'Analysis' and isdir( join(self.openPath, dateName)): dateFolders.append(join(self.openPath, dateName)) lotnoFolders = [] for dateFolder in dateFolders: lotNames = listdir(dateFolder) for lotName in lotNames: if isdir(join(dateFolder, lotName)): lotnoFolders.append(join(dateFolder, lotName)) fileList = [] for lotFolder in lotnoFolders: # get CSV file under the folder fileList.append(GetFileList(lotFolder, '.csv')) if not fileList: exit() # analysis folder path if argv.count('-a') == 0: analysisFolder = self.openPath + '\Analysis' else: analysisFolder = argv[argv.index('-a') + 1] MkDir(analysisFolder) parseData = [] for i in range(len(fileList)): lotnoData = {} tempSiteData = [] tempSoftbinData = [] for file in fileList[i]: # parse file tempSiteData.append(ParseFile(file, 1)) tempSoftbinData.append(ParseFile(file, 2)) # get Date date = basename(dirname(dirname(fileList[i][0]))) lotnoData['site data'] = tempSiteData lotnoData['swbin data'] = tempSoftbinData lotnoData['date'] = date lotnoData['lotno'] = GetLotno(fileList[i][0]) parseData.append(lotnoData) self.currentLotCount += 1 self._signal.emit( str(self.currentLotCount * 100 // totalLotCount)) handler = basename(self.openPath) analysisFile = join( analysisFolder, handler + '_Total_Analysis' + nowTime + '.xlsx') # save data SaveData(analysisFile, parseData) self._signal.emit(str(100))
pdbnames = argv[1:] #chainid = ' ' #if len(argv)>2: # chainid = argv[2] for pdbname in pdbnames: # if (pdbname[-4:] != '.pdb'): # pdbname += '.pdb' outfile = pdbname removechain = 0 if argv.count('-nochain'): removechain = 1 netpdbname = pdbname assert( exists(netpdbname)) #print 'Reading ... '+netpdbname lines = open(netpdbname,'r').readlines() #outid = open( outfile, 'w') #print 'Writing ... '+pdbname #fastafile = pdbname+'.fasta' #fastaid = open( fastafile, 'w') #print 'Writing ... '+fastafile
##################### ### DEFAULT MAIN PROC ##################### graph if __name__ == '__main__': from sys import argv setdebuglevel(debug_levels.ERRORS) ARGUMENT_LIST_SEPARATOR = ':' # Check argument validity if ((len(argv) < 4) or not (argv.count(ARGUMENT_LIST_SEPARATOR) == 1) or (argv.index(ARGUMENT_LIST_SEPARATOR) >= len(argv))): print("Need to supply at two sets of mrml file names as arguments, separated by a single ':'") exit() # Get the two argument lists of file names. separator_index = argv.index(ARGUMENT_LIST_SEPARATOR) # List of fiducial stats representing a single v****a, to be compared to the range. Ignore argv[0], as it's just the filename of this python file. range1propslist = load_vaginal_properties(argv[1:separator_index]) for props in range1propslist: pics_correct_and_verify(props) [range1propstats, range1fidstats, range1propsdisplay] = get_stats_and_display_from_properties("Range 1", range1propslist) # List of fiducial stats representing a range to compare that single one against.
return False assert( len(argv)>2) pdbname = argv[1] pdbcode = argv[1] chainid = argv[2] if (pdbname[-4:] != '.pdb' and pdbname[-8:] != '.pdb1.gz'): pdbname += '.pdb' outfile = pdbname nopdbout = 0 if argv.count('-nopdbout'): nopdbout = 1 removechain = 0 if argv.count('-Aify'):#-nochain'): removechain = 1 ignorechain = 0 if argv.count('-ignorechain'): ignorechain = 1 netpdbname = local_pdb_database + pdbname[1:3] + '/' + pdbname[0:4] + '/' + argv[1] + '_0.pdb' #print "getting the name right:", netpdbname if not exists(netpdbname): netpdbname = pdbname
def main(): mode = argv[1] if mode == '-encrypt': #If encrypting message = argv[2] cipher = argv[3] if cipher == 'caesar': rot = int(argv[4]) print(caesar(None).encrypt(message, rot)) elif cipher == 'affine': a = int(argv[4]) b = int(argv[5]) print(affine(None).encrypt(message, a, b)) elif cipher == 'scytale': key = int(argv[4]) print(scytale(None).encrypt(message, key)) else: raise ValueError('Unsupported cipher: ' + cipher) elif mode == '-decrypt': #If decrypting encrypted_text = argv[2] cipher = argv[3] language = None if argv.count( '-language') > 0: #determine if the user gave the language language = argv[1 + argv.index('-language')] if cipher == 'caesar': rot = None if argv.count('-args') > 0: rot = int(argv[argv.index('-args') + 1]) if rot == None: if language == None: message = '' legitimacy = -1 for language in supported_languages: decr = caesar(dictionary(language)) trial = decr.crack(encrypted_text) if trial[-1] > legitimacy: message = trial[0] print(message) else: print( caesar(dictionary(language)).crack(encrypted_text)[0]) else: print(caesar(None).decrypt(encrypted_text, rot)) elif cipher == 'affine': a = None b = None if argv.count('-args') > 0: a = int(argv[argv.index('-args') + 1]) b = int(argv[argv.index('-args') + 2]) if a == None or b == None: if language == None: message = '' legitimacy = -1 for language in supported_languages: trial = affine( dictionary(language)).crack(encrypted_text) if trial[-1] > legitimacy: message = trial[0] print(message) else: print( affine(dictionary(language)).crack(encrypted_text)[0]) else: print(affine(None).decrypt(encrypted_text, a, b)) elif cipher == 'scytale': key = None if argv.count('-args') > 0: key = int(argv[argv.index('-args') + 1]) if key is None: if language is None: message = '' legitimacy = -1 for language in supported_languages: trial = scytale( dictionary(language)).crack(encrypted_text) if trial[-1] > legitimacy: message = trial[0] print(message) else: print(scytale(dictionary(language)).crack(message)) else: raise ValueError('Unsupported cipher: ' + cipher) else: #The user gave an incorrect second option raise ValueError('Unsupported mode: ' + mode)
from sys import argv # script, first, second, third = argv print "This is the name of the script argv[0]", argv[0] print "This is the argv[1]", argv[1] print "This is the argv[2]", argv[2] print "This is the argv[3]", argv[3] print argv.count('3') print len(argv)
\t-i\tInteractive mode, othewise uses default values. \tserver\tServer ip address or hostname. \tport\tServer port to use. \tpassword\tServer admin password. \tmessage\tMessage (string) to send to the server.""" exit() if __name__ == '__main__': server = "localhost" port = config.port password = None message = None # "shutdown"+"\n" specServer = specPort = specPassword = specMessage = False if argv.count( "-h" ) or argv.count( "--help" ): usage() if argv.count( "-s" ): p = argv.index( "-s" ) if len(argv) > p: server = argv[ p+1 ] specServer = True if argv.count( "-p" ): p = argv.index( "-p" ) if len(argv) > p: port = int(argv[ p+1 ]) specPort = True if argv.count( "-w" ):
pdbnames = argv[1:] #chainid = ' ' #if len(argv)>2: # chainid = argv[2] for pdbname in pdbnames: if pdbname.startswith("-"): continue # skip args if not pdbname.endswith('.pdb') and not pdbname.endswith(".gz"): pdbname += '.pdb' outfile = pdbname removechain = 0 if argv.count('-nochain'): removechain = 1 netpdbname = pdbname assert( exists(netpdbname)) #print 'Reading ... '+netpdbname lines = [] if netpdbname.endswith(".gz"): lines = popen("zgrep ' CA ' "+netpdbname).readlines() else: lines = popen("grep ' CA ' "+netpdbname).readlines() if argv.count('-list'): print os.path.basename(netpdbname).replace(".gz","").replace(".pdb","")+": ",
def main(): _usage = argv[ 0] + " docker_host_ip1:port[,docker_host_ip2:port,..,docker_host_ipn] [images names]" for _argv in argv: if argv.count("--help") or argv.count("-h"): print _usage exit(0) if len(argv) < 2: print _usage exit(1) if len(argv) > 2: _image_list = argv[2] else: _image_list = "all" _images_base_dir = "/tmp" _images_base_url = "http://9.2.212.67/repo/vmimages/" _images_arch = "x86_64" if _image_list == "all": _images_names = [ "nullworkload", \ "hadoop", \ "ycsb", \ "iperf", \ "netperf", \ "nuttcp", \ "fio", \ "xping", \ "speccloud_cassandra_2111", \ "speccloud_hadoop_271" ] else: _images_names = argv[2].split(',') _images_cksum = _images_base_url + "/cloudbench/" + _images_arch + "/md5sum.txt" for _image in _images_names: if not access(_images_base_dir + "/cb_" + _image + ".tar", F_OK): _image_url = _images_base_url + "/cloudbench/" + _images_arch + "/cb_" + _image + ".tar" _msg = "Downloading from URL \"" + _image_url + "\"..." print _msg wget.download(_image_url, out=_images_base_dir + "/cb_" + _image + ".tar") print " " print " " _endpoint_list = '' for _endpoint in argv[1].split(','): _cli = docker.Client(base_url="tcp://" + _endpoint, timeout=600) _info = _cli.info() if _info["SystemStatus"]: for _item in _info["SystemStatus"]: if _item[1].count(':') == 1: _endpoint_list += _item[1] + ',' if len(_endpoint_list): _endpoint_list = _endpoint_list[0:-1] else: _endpoint_list = argv[1] _endpoint_port = 17282 for _endpoint in _endpoint_list.split(','): _cli = docker.Client(base_url="tcp://" + _endpoint, timeout=600) for _image in _images_names: _image = "cb_" + _image if not len(_cli.images(name=_image)): _image_filename = _images_base_dir + '/' + _image + ".tar" _msg = "Loading file \"" + _image_filename + "\" into Docker image store on host \"" + _endpoint + "\"..." print _msg with open(_image_filename, "rb") as f: _cli.load_image(f) else: _msg = "Image \"" + _image + " already present on Docker image store on host \"" + _endpoint + "\"..." print _msg
def main(pdbname, chainid): # remote host for downloading pdbs remote_host = 'ws0.nrb' shit_stat_insres = False shit_stat_altpos = False shit_stat_modres = False shit_stat_misdns = False # missing density! fastaseq = "" pdbfile = "" if argv.count('-h'): print_help() files_to_unlink = [] if (pdbname[-4:] != '.pdb' and pdbname[-8:] != '.pdb1.gz'): pdbname += '.pdb' #outfile = string.lower(pdbname[0:4]) + chainid + pdbname[4:] outfile = pdbname[0:-4] + "_" + chainid + ".pdb" nopdbout = 0 if argv.count('nopdbout'): nopdbout = 1 removechain = 0 if argv.count('nochain'): removechain = 1 ignorechain = 0 if argv.count('ignorechain'): ignorechain = 1 netpdbname = pdbname if not exists(netpdbname): netpdbname = pdbname fixed_pdb = pdbname print ("Looking for: ", fixed_pdb) if os.path.isfile( fixed_pdb ): print("Found preoptimised or otherwise fixed PDB file. ") netpdbname = fixed_pdb else: print("File %s doesn't exist" % ( netpdbname )) files_to_unlink.append( netpdbname ) if netpdbname[-3:]=='.gz': lines = popen( 'zcat '+netpdbname,'r').readlines() else: lines = open(netpdbname,'r').readlines() oldresnum = ' ' count = 1; modifiedres = '' residue_buffer = [] residue_letter = '' residue_invalid = False if chainid == '_': chainid = ' ' for i in range(len(lines)): line = lines[i] if len(line)>5 and line[:6]=='ENDMDL':break #Its an NMR model. if (chainid == line[21] or ignorechain or removechain): line_edit = line if line[0:3] == 'TER': continue elif (line[0:6] == 'HETATM'): ok = False ## Is it a modified residue ? if line[17:20] in modres: ## if so replace it with its canonical equivalent ! line_edit = 'ATOM '+line[6:17]+modres[line[17:20]] +line[20:] modifiedres = modifiedres + line[17:20] + ', ' ## dont count MSEs as modiied residues (cos they're so common and get_pdb deal with them previosuly) if line[17:20] != "MSE": shit_stat_modres = True ok = True ## other substitution (of atoms mainly) if (line[17:20]=='MSE'): #Selenomethionine if (line_edit[12:14] == 'SE'): line_edit = line_edit[0:12]+' S'+line_edit[14:] if len(line_edit)>75: if (line_edit[76:78] == 'SE'): line_edit = line_edit[0:76]+' S'+line_edit[78:] if not ok: continue # skip this atom if we havnt found a conversion if line_edit[0:4] == 'ATOM': #or line_edit[0:6] == 'HETATM': ## if line_edit[13:14]=='P': #Nucleic acid? Skip. ## resnum = line_edit[23:26] ## oldresnum = resnum ## while (resnum == oldresnum): ## print "HERE" ## i += 1 ## line = lines[i] ## resnum = line_edit[23:26] resnum = line_edit[22:27] insres = line[26] if insres != ' ': shit_stat_insres = True altpos = line[16] if altpos != ' ': shit_stat_altpos = True ## Is thresidue_letter if not resnum == oldresnum: if residue_buffer != []: ## is there a residue in the buffer ? if not residue_invalid: flag1, fastaseq, pdbfile = check_and_print_pdb(count, residue_buffer, residue_letter, pdbfile, fastaseq) if not flag1: ## if unsuccessful shit_stat_misdns = True else: count = count + 1 residue_buffer = [] residue_letter = '' residue_invalid = False longname = line_edit[17:20] if longname in longer_names: residue_letter = longer_names[longname] else: residue_letter = 'X' residue_invalid = True oldresnum = resnum ## What does this do ? if line_edit[16:17] == 'A': line_edit = line_edit[:16]+' '+line_edit[17:] if line_edit[16:17] != ' ': continue if removechain: line_edit = line_edit[0:21]+' '+line_edit[22:] residue_buffer.append( line_edit ) flag1, fastaseq, pdbfile = check_and_print_pdb(count, residue_buffer, residue_letter, pdbfile, fastaseq) if not flag1: ## if unsuccessful shit_stat_misdns = True else: count = count + 1 flag_altpos = "---" if shit_stat_altpos : flag_altpos = "ALT" flag_insres = "---" if shit_stat_insres : flag_insres = "INS" flag_modres = "---" if shit_stat_modres : flag_modres = "MOD" flag_misdns = "---" if shit_stat_misdns : flag_misdns = "DNS" nres = len(fastaseq) flag_successful = "OK" if nres <= 0: flag_successful = "BAD" print(netpdbname, pdbname, chainid, "%5d"%nres, flag_altpos, flag_insres, flag_modres, flag_misdns, flag_successful) if chainid == ' ': chainid = '_' if nres > 0: if ( nopdbout == 0 ): #outfile = string.lower( basename(outfile) ) outfile = outfile.replace('.pdb1.gz','.pdb') outid = open( outfile, 'w') outid.write(pdbfile) outid.write("TER\n") outid.close() fastaid = stdout fastaid.write('>'+pdbname[0:4]+chainid+'\n'); fastaid.write( fastaseq ) fastaid.write('\n') if len(files_to_unlink) > 0: for file in files_to_unlink: os.unlink(file)
from sys import argv if argv.count() == 5 and 'salary' in argv[1] and argv[2].isdigit( ) and argv[3].isdigit() and argv[4].isdigit(): print((argv[2] * argv[3]) + argv[4])
def print_help(): print "clean_pdb.py <pdb> <chain id>" print "pdb = file name of the file. Can be with or without the .pdb file handle" print "chain id = The chain id you are interested in. If more than one chain, " print "you can pass the chain id without spaces. For example \"AB\" gets you" print "chain A and B. \"A\" gets you chain A." print "\n", print "chain id = nochain. Removes chain identity from output" print "chain id = ignorechain. Gets all the chains for pdb" print "\n", print "written by Phil Bradley, Rhiju Das, Michael Tyka, TJ Brunette, and James Thompson from the Baker Lab. Edits done by Steven Combs, Sam Deluca and Jordan Willis from the Meiler Lab." sys.exit() if argv.count('-h'): print_help() files_to_unlink = [] try: assert(len(argv) > 2) except AssertionError: print_help() pdbname = argv[1] if argv[2].strip() != "ignorechain" and argv[2].strip() != "nochain": chainid = argv[2].upper() else: chainid = argv[2] if (pdbname[-4:] != '.pdb' and pdbname[-8:] != '.pdb1.gz'):
# Licensed to the .NET Foundation under one or more agreements. # The .NET Foundation licenses this file to you under the Apache 2.0 License. # See the LICENSE file in the project root for more information. ## ## Test autoimport of assemblies found in DLLs directory ## from sys import exit, argv from iptest.assert_util import * if argv.count("OKtoRun")==0 or is_cli==False: print "Bailing" exit(0) def test_sanity(): ''' Sanity checks. All of these are fairly normal imports and should work. ''' #first check that our native modules are still present and accounted for... import binascii import _collections import copy_reg import cPickle import cStringIO import datetime import errno import exceptions import gc
#!/usr/bin/env python # # (c) Copyright Rosetta Commons Member Institutions. # (c) This file is part of the Rosetta software suite and is made available under license. # (c) The Rosetta software is developed by the contributing members of the Rosetta Commons. # (c) For more information, see http://www.rosettacommons.org. Questions about this can be # (c) addressed to University of Washington CoMotion, email: [email protected]. import string import sys from sys import argv, stdout from os.path import exists length = -1 if argv.count('-symm_type') and argv.count('-nsub'): pos = argv.index('-symm_type') symm_type_name = argv[pos + 1] pos = argv.index('-nsub') nsub = argv[pos + 1] else: print "" print "usage: make_symmdef_file_denovo.py [options] " print "" print "example: make_symmdef_file_denovo.py -symm_type cn -nsub 2" print "example: make_symmdef_file_denovo.py -symm_type dn -nsub 4" print "example: make_symmdef_file_denovo.py -symm_type dn -nsub 4 -slide_type RANDOM -slide_criteria_type CEN_DOCK_SCORE" print "example: make_symmdef_file_denovo.py -symm_type cn -nsub 24 -subsystem" print "" print "common options:" print " -symm_type (cn|dn) : The type of symmetry. Currently cyclic or dihedral symmetry" print " -nsub <integer> : number of subunits"
#------------------------------------------------------------------------------ ''' Starts a Python Container. ''' #------------------------------------------------------------------------------ __version__ = "$Id: ACSStartContainerPy.py,v 1.5 2006/07/18 20:11:39 dfugate Exp $" #------------------------------------------------------------------------------ from Acspy.Util.ACSCorba import getManager from Acspy.Container import Container from sys import argv from time import sleep #------------------------------------------------------------------------------ if __name__ == "__main__": #this ensures that we don't log into manager until it's really #up and running. while getManager()==None: print "Failed to obtain the manager reference. Will keep trying!" sleep(3) g = Container(argv[1]) if argv.count("-interactive")==0 and argv.count("-i")==0: g.run() else: import atexit atexit.register(g.destroyCORBA) print "This container is now running in interactive mode." #------------------------------------------------------------------------------
from amino_acids import longer_names pdbnames = argv[1:] #chainid = ' ' #if len(argv)>2: # chainid = argv[2] for pdbname in pdbnames: # if (pdbname[-4:] != '.pdb'): # pdbname += '.pdb' outfile = pdbname removechain = 0 if argv.count('-nochain'): removechain = 1 netpdbname = pdbname assert (exists(netpdbname)) #print 'Reading ... '+netpdbname lines = open(netpdbname, 'r').readlines() #outid = open( outfile, 'w') #print 'Writing ... '+pdbname #fastafile = pdbname+'.fasta' #fastaid = open( fastafile, 'w') #print 'Writing ... '+fastafile
pdbnames = argv[1:] #chainid = ' ' #if len(argv)>2: # chainid = argv[2] for pdbname in pdbnames: if pdbname.startswith("-"): continue # skip args if not pdbname.endswith('.pdb') and not pdbname.endswith(".gz"): pdbname += '.pdb' outfile = pdbname removechain = 0 if argv.count('-nochain'): removechain = 1 netpdbname = pdbname assert( exists(netpdbname)) #print 'Reading ... '+netpdbname lines = [] if netpdbname.endswith(".gz"): lines = popen("zcat "+netpdbname).readlines() else: lines = open(netpdbname,'r').readlines() oldresnum = ' ' count = 0; extra = dict()