def Atom2pdb( atmobj ): try: if atmobj.valid != True: return '' except: return None atm_type = ljust(str(atmobj.atm_type),6).upper() atm_number = rjust(str(atmobj.atm_number),5) if atmobj.atm_name[0].isdigit() and atmobj.atm_name[-1].isdigit(): atm_name = ' '+center(str(atmobj.atm_name),4).upper() elif atmobj.atm_name[-1].isdigit(): atm_name = ' '+ljust(str(atmobj.atm_name),4).upper() elif (atmobj.atm_name[-1] =='\''): atm_name = ' '+rjust(str(atmobj.atm_name),4).upper() else: atm_name = ' '+center(str(atmobj.atm_name),4).upper() alt_loc = center(str(atmobj.alt_loc),1) resi_name = center(str(atmobj.resi_name),3).upper()+' ' chain = center(str(atmobj.chain),1).upper() resSeq = rjust( str(atmobj.resSeq),4) icode = center(str(atmobj.icode),1)+3*' ' pos = rjust( "%.3f" % atmobj.pos[0],8)+rjust("%.3f" % atmobj.pos[1],8)+rjust("%.3f" % atmobj.pos[2], 8) occ = rjust( str(atmobj.occ), 6) bfac = rjust( str(atmobj.bfac),5) + 11*' ' elem = center( str(atmobj.elem),2) charge = center(str(atmobj.charge),2) return atm_type + atm_number + atm_name + alt_loc+ resi_name + chain + resSeq + icode + pos + occ + bfac + elem + charge+'\n'
def print_data(self, *args): for x in args: if x is 'raw_data': matrix = self.data matrix.insert(0, self.data_headers) max_lens = [max([len(str(r[i])) for r in matrix]) for i in range(len(matrix[0]))] print "\n".join(["".join([string.ljust(str(e), l + 2) for e, l in zip(r, max_lens)]) for r in matrix]) if x is 'sma': for key in self.sma.keys(): print ''.join([str(key), '-day Simple Moving Average']) matrix = [] for day in range(0,len(self.date)): matrix.append([self.date[day], self.sma[key][day]]) max_lens = [max([len(str(r[i])) for r in matrix]) for i in range(len(matrix[0]))] print "\n".join(["".join([string.ljust(str(e), l + 2) for e, l in zip(r, max_lens)]) for r in matrix]) print '' if x is 'ema': for key in self.ema.keys(): print ''.join([str(key), '-day Exponential Moving Average']) matrix = [] for day in range(0,len(self.date)): matrix.append([self.date[day], self.ema[key][day]]) max_lens = [max([len(str(r[i])) for r in matrix]) for i in range(len(matrix[0]))] print "\n".join(["".join([string.ljust(str(e), l + 2) for e, l in zip(r, max_lens)]) for r in matrix]) print ''
def checkJobList( jobPars, optPars , cfgPars ) : for job in jobPars : if not jobPars[job][0].isdigit() : print 'Error: number of jobs must be a number' sys.exit(-1) for i in range(1, int(jobPars[job][0])+1) : if optPars.submit_flag and os.path.isdir(cfgPars['OutPath']+job+'_'+str(i)) : print 'Error: Output directories exist' print cfgPars['OutPath']+job+'_'+str(i) sys.exit(-1) elif optPars.collate_flag and not os.path.isdir(cfgPars['OutPath']+job+'_'+str(i)) : print 'Error: Directories to be collated do not exist' print cfgPars['OutPath']+job+'_'+str(i) sys.exit(-1) elif optPars.delete_flag and not os.path.isdir(cfgPars['OutPath']+job+'_'+str(i)) : print 'Error: Directories to be deleted do not exist' print cfgPars['OutPath']+job+'_'+str(i) sys.exit(-1) if not os.path.isfile(jobPars[job][1]) : print 'Error: Datalist does not exist' print jobPars[job][1] sys.exit(-1) # Print job list to screen for job in jobPars : print string.ljust(job,15), string.rjust(jobPars[job][0],3), string.ljust(jobPars[job][1],100)
def GetValues(fullname): # 把完整的项拆分成根项和子项两部分 name = string.split(fullname,'\\',1) # 打开相应的项,为了让该函数更通用 # 使用了多个判断语句 if name[0] == 'HKEY_LOCAL_MACHINE': key = RegOpenKey(HKEY_LOCAL_MACHINE,name[1], 0, KEY_READ) elif name[0] == 'HKEY_CURRENT_USER': key = RegOpenKey(HKEY_CURRENT_USER,name[1], 0, KEY_READ) elif name[0] == 'HKEY_CLASSES_ROOT': key = RegOpenKey(HKEY_CLASSES_ROOT,name[1], 0, KEY_READ) elif name[0] == 'HKEY_CURRENT_CONFIG': key = RegOpenKey(HKEY_CURRENT_CONFIG,name[1], 0, KEY_READ) elif name[0] == 'HKEY_USERS': key = RegOpenKey(HKEY_USERS,name[1], 0, KEY_READ) else: print 'err,no key named %s' (name[0]) # 查询项的项值数目 info = RegQueryInfoKey(key) # 遍历项值获得项值数据 for i in range(0,info[1]): ValueName = RegEnumValue(key, i) # 调整项值名称长度,使输出更好看 print string.ljust(ValueName[0],20),ValueName[1] # 关闭打开的项 RegCloseKey(key)
def checkNumberOfRules(testId): print string.ljust ("Check " + testId, rpadding), errorFlag = False value = int (readLine()) if rrPython.getNumberOfRules() != value: errorFlag = True print passMsg (errorFlag)
def run_all(self): """For each file in the test suite, run client program assuming each file represents an individual test.""" try: server = Server(self.ini["core"], self.ini["module"]) except Exception as e: print e raise RuntimeError("Unknown server: core = {0}, module = {1}".format( self.ini["core"], self.ini["module"])) if len(self.tests) == 0: # noting to test, exit return 0 server.deploy(self.ini["config"], server.find_exe(self.args.builddir, silent=False), self.args.vardir, self.args.mem, self.args.start_and_exit, self.args.gdb, self.args.valgrind, init_lua=self.ini["init_lua"], silent=False) if self.args.start_and_exit: print " Start and exit requested, exiting..." exit(0) longsep = "==============================================================================" shortsep = "------------------------------------------------------------" print longsep print string.ljust("TEST", 48), "RESULT" print shortsep failed_tests = [] self.ini["server"] = server for test in self.tests: sys.stdout.write(string.ljust(test.name, 48)) # for better diagnostics in case of a long-running test sys.stdout.flush() test_name = os.path.basename(test.name) if test_name in self.ini["disabled"]: print "[ skip ]" elif not server.debug and test_name in self.ini["release_disabled"]: print "[ skip ]" elif self.args.valgrind and test_name in self.ini["valgrind_disabled"]: print "[ skip ]" else: test.run(server) if not test.passed(): failed_tests.append(test.name) print shortsep if len(failed_tests): print "Failed {0} tests: {1}.".format(len(failed_tests), ", ".join(failed_tests)) server.stop(silent=False) server.cleanup() if self.args.valgrind and check_valgrind_log(server.valgrind_log): print " Error! There were warnings/errors in valgrind log file:" print_tail_n(server.valgrind_log, 20) return 1 return len(failed_tests)
def show(info="\n", END=False): ''' Output information to be in a table-like form. Both print on screen and save into the file of DEFINE_LOG_FILE. ''' out = str(info) outfile = open(DEFINE_LOG_FILE,"a") if isinstance(info, int): outfile.write("%s\t"%out) print string.ljust(out,5), elif isinstance(info, float): outfile.write("%s\t"%out) print "%.3f"%info, elif isinstance(info, list) or isinstance(info, set) or isinstance(info, tuple): for e in info: show(e) elif out.endswith("\n"): outfile.write(out) print out[:min(len(out)-1,78)] else: outfile.write(str(out)+"\t") print string.ljust(out[:min(len(out),25)],25), if END: print '' outfile.write("\n") outfile.close() return ''
def setAIPS(self, fd, value): """ Write parameter value to AIPS TD file opened as fd Inputs: fd = open TD file positioned for write value = data value dictionary """ ################################################################ # Is this a "GO" parameter code = self.AIPScode if (code==' ') | (code=='*') | (code=='&') | (code=='$'): myvalue = value[self.name] # extract value array from dictionary # list and scalar separate if myvalue.__class__==list: # List # AIPS only supports float and strings if self.type==float: # Float for x in myvalue: fd.write(struct.pack('f',x)) elif self.type==str: # String for x in myvalue: # Blank pad to proper length xx = string.ljust(string.strip(x),self.dim[0]) fmt = str(self.dim[0])+'s' fd.write(struct.pack(fmt,xx)) else: # Scalar if self.type==float: # Float fd.write(struct.pack('f',myvalue)) elif self.type==str: # String # Blank pad to proper length xx = string.ljust(string.strip(myvalue),self.dim[0]) fmt = str(self.dim[0])+'s' fd.write(struct.pack(fmt,xx))
def miscellaneous(): # # alias # fp.write('#\n# Alias\n#\n') results = db.sql(''' select alias, cdate = convert(char(10), creation_date, 101) from MRK_Alias_View where _Marker_key = %s order by alias ''' % (markerKey), 'auto') for r in results: fp.write(string.ljust(r['alias'],30) + TAB) fp.write(string.ljust(r['cdate'],15) + CRT) # # synonym # fp.write('#\n# Synonym\n#\n') results = db.sql(''' select synonym, cdate = convert(char(10), creation_date, 101) from MGI_Synonym where _MGIType_key = 2 and _Object_key = %s ''' % (markerKey), 'auto') for r in results: fp.write(string.ljust(r['synonym'],30) + TAB) fp.write(string.ljust(r['cdate'],15) + CRT)
def __str__(self): output = "Seq: %s\n" % self.seq output += "Parent Seq: %s\n" % self.parent_seq output += "mutations_coding_region: %s\n" % self.mutations_coding_region output += "mutations_noncoding_region: %s\n" % self.mutations_noncoding_region output += "mutations_degenerate_region: %s\n" % self.mutations_degenerate_region output += "net_fitness_effect: %s\n" % self.net_fitness_effect output += "function_effect_backbone: %s\n" % self.function_effect_backbone output += "function_effect_fluctuating: %s\n" % self.function_effect_fluctuating output += "site_function_vector:\n" ## prep the header vector_output = " " for funct in range(7): vector_output += "%s" % string.ljust(SiteFunction.tostring(funct), 4) vector_output += "\n" ## output the thing for (funct, array) in zip(range(7), self.site_function_vector): arr = "" for val in array: arr += string.ljust( str(val), 4 ) vector_output += "%s%s\n" % ( string.ljust(SiteFunction.tostring(funct), 4), arr) output += vector_output output += "Comment: %s" % self.comment return output
def report(self, prefix="", emit=None): if not emit: emit = logging.info if getattr(self, "source", None): emit(prefix + "source : %s" % (self.source)) if getattr(self, "sink", None): emit(prefix + "sink : %s" % (self.sink)) cur_time = time.time() delta = cur_time - self.prev_time c, p = self.cur, self.prev x = sorted([k for k in c.iterkeys() if "_sink_" in k]) width_k = max([5] + [len(k.replace("tot_sink_", "")) for k in x]) width_v = max([20] + [len(str(c[k])) for k in x]) width_d = max([10] + [len(str(c[k] - p[k])) for k in x]) width_s = max([10] + [len("%0.1f" % ((c[k] - p[k]) / delta)) for k in x]) emit(prefix + " %s : %s | %s | %s" % (string.ljust("", width_k), string.rjust("total", width_v), string.rjust("last", width_d), string.rjust("per sec", width_s))) verbose_set = ["tot_sink_batch", "tot_sink_msg"] for k in x: if k not in verbose_set or self.opts.verbose > 0: emit(prefix + " %s : %s | %s | %s" % (string.ljust(k.replace("tot_sink_", ""), width_k), string.rjust(str(c[k]), width_v), string.rjust(str(c[k] - p[k]), width_d), string.rjust("%0.1f" % ((c[k] - p[k]) / delta), width_s))) self.prev_time = cur_time self.prev = copy.copy(c)
def getPDBString(self): """ Returns a string of the new atom type. Uses the ATOM string output but changes the first field to either by ATOM or HETATM as necessary. This is for the pdb representation of the atom. The propka30 module depends on this being correct. No touchy! Returns str: String with ATOM/HETATM field set appropriately """ outstr = self.getCommonStringRep(chainflag=True) tstr = "%6.2f" % self.occupancy outstr += string.ljust(tstr, 6)[:6] tstr = "%6.2f" % self.tempFactor outstr += string.rjust(tstr, 6)[:6] tstr = self.segID outstr += string.ljust(tstr, 4)[:4] tstr = self.element outstr += string.ljust(tstr, 2)[:2] tstr = str(self.charge) outstr += string.ljust(tstr, 2)[:2] return outstr
def display(self, cols): # Create the maxCols list which represents the max # length of each row element. maxCols = [] for i in range(0, len(cols[0])): maxCols.append(0) for row in cols: for i in range(0, len(row)): if len(row[i]) > maxCols[i]: maxCols[i] = len(row[i]) # Print the table using maxCols list to force all # columns in the rows to line up. The first column # (hostname) is left justified, and the subsequent # columns are right justified. for row in cols: for i in range(0, len(row)): if i == 0: print string.ljust(row[i], maxCols[i]), else: print string.rjust(row[i], maxCols[i]), if i < len(row)-1: print '\t', print
def writeMol2File(self, outFile, whichXyz=None): '''writes the data to an already open file. don't close it.''' if whichXyz is None: whichXyz = range(self.xyzCount) for oneXyz in whichXyz: outFile.write("@<TRIPOS>MOLECULE\n") if self.protName == "fake": #don't write fake outFile.write(self.name + "\n") else: outFile.write(self.name + " " + self.protName + "\n") outFile.write("%5d %5d %5d %5d %5d\n" % (len(self.atomNum), \ len(self.bondNum), 0, 0, 0)) outFile.write("SMALL\nUSER_CHARGES\n\n") outFile.write("mmff94s_NoEstat = %5.2f\n" % self.inputEnergy[oneXyz]) outFile.write("@<TRIPOS>ATOM\n") for oneAtom in xrange(len(self.atomNum)): outFile.write( \ "%7d %6s % 8.4f % 8.4f % 8.4f %5s 1 <0> % 8.4f\n" % \ (self.atomNum[oneAtom], string.ljust(self.atomName[oneAtom], 6), \ self.atomXyz[oneXyz][oneAtom][0], \ self.atomXyz[oneXyz][oneAtom][1], \ self.atomXyz[oneXyz][oneAtom][2], \ string.ljust(self.atomType[oneAtom], 5), \ self.atomCharge[oneAtom])) outFile.write("@<TRIPOS>BOND\n") for oneBond in xrange(len(self.bondNum)): outFile.write( \ "%6d%5d%5d %2s\n" % \ (self.bondNum[oneBond], self.bondStart[oneBond], \ self.bondEnd[oneBond], string.ljust(self.bondType[oneBond], 2)))
def __str__(self): # display process details (OLD, doesn't show many details) c = string.ljust(self.comm, 20) u = string.ljust(self.user, 10) t = string.ljust(self.time, 10) return( '%s\t%s\t%s\t%s\t%s\t%s' % (self.pid, u, c, t, self.pcpu, self.s) )
def main(): # initialize DrQueue client client = DrQueueClient() # fetch a list of all jobs jobs = client.query_job_list() # walk through tasks of every job for job in jobs: tasks = client.query_task_list(job['_id']) print("\nJob \"%s\" (ID: %s):" % (job['name'], job['_id'])) print("Overall status: " + client.job_status(job['_id'])) print("Task id status owner completed at") for task in tasks: tmsg_id = task['msg_id'] theader = task['header'] username = theader['username'] if task['completed'] == None: status = "pending" print("%s %s %s" % (tmsg_id, string.ljust(status, 8), string.ljust(username, 10))) else: result_header = task['result_header'] result_content = task['result_content'] status = result_header['status'] cpl = task['completed'] print("%s %s %s %i-%02i-%02i %02i:%02i:%02i" % (tmsg_id, string.ljust(status, 8), string.ljust(username, 10), cpl.year, cpl.month, cpl.day, cpl.hour, cpl.minute, cpl.second)) if result_header['status'] == 'error': print " Error was: " + result_content['evalue']
def disassemble(co, lasti): code = co.co_code labels = findlabels(code) n = len(code) i = 0 while i < n: c = code[i] op = ord(c) if op = SET_LINENO and i > 0: print # Extra blank line if i = lasti: print '-->', else: print ' ', if i in labels: print '>>', else: print ' ', print string.rjust(`i`, 4), print string.ljust(opname[op], 15), i = i+1 if op >= HAVE_ARGUMENT: oparg = ord(code[i]) + ord(code[i+1])*256 i = i+2 print string.rjust(`oparg`, 5), if op in hasconst: print '(' + `co.co_consts[oparg]` + ')', elif op in hasname: print '(' + co.co_names[oparg] + ')', elif op in hasjrel: print '(to ' + `i + oparg` + ')', print
def print_self(self): print "Printing components of perceptron vectors" print string.ljust("feature",20),"\tw1\tw2\tw3" assert(self.ws[1].keys()==self.ws[2].keys()) assert(self.ws[2].keys()==self.ws[3].keys()) for feat in self.keys: print string.ljust(feat,20), "\t",self.ws[1][feat],"\t",self.ws[2][feat],"\t",self.ws[3][feat]
def report(self): if self.unknown: print '--- Unrecognized commands ---' cmds = self.unknown.keys() cmds.sort() for cmd in cmds: print string.ljust(cmd, 20), self.unknown[cmd]
def print_items(self, max_items): import os import string from findtorrent.core.colors import colors from hurry.filesize import size, si cols = int(os.popen('stty size', 'r').read().split()[1]) print colors.HEADER + \ 'No. Name' + (cols - 33) * ' ' + 'Size Files Seed Leech' print cols * '-' for index, item in enumerate(Items.sorted): if (max_items != -1 and index + 1 > max_items): break print colors.INDEX + string.ljust(str(index + 1) + '.', 5) + \ colors.NAME + string.ljust(item['name'][:cols - 31], cols - 31) + \ colors.SIZE + string.rjust(size(item['size'], system=si), 6) + \ colors.FILES + string.rjust(str(item['files']) \ .replace('-1', 'N/A'), 7) + \ colors.SEED + string.rjust(str(item['seed']) \ .replace('-1', 'N/A'), 6) + \ colors.LEECH + string.rjust(str(item['leech']) \ .replace('-1', 'N/A'), 7) + \ colors.ENDC
def usage(): print _("Usage: %s <options> ...") % sys.argv[0] print print _("The following options are understood:") opt_list = [] for r in opt_table: opt = "--" + r[1] if r[0]: opt = "-" + r[0] + ", " + opt if r[2]: opt = opt + "=<" + r[2] + ">" opt_list.append([opt + " ", r[3]]) # By appending [0,0], we insure that this will work even if # opt_list is empty (which it never should be) max_len = apply(max, map(lambda x:len(x[0]), opt_list) + [0,0]) for opt, desc_str in opt_list: if 79 - max_len > 10: desc = rcd_util.linebreak(desc_str, 79 - max_len) else: desc = [desc_str] desc_first = desc.pop(0) print string.ljust(opt, max_len) + desc_first for d in desc: print " " * max_len + d
def checkNumberOfRateRules(rrInstance, testId): print string.ljust ("Check " + testId, rpadding), errorFlag = False value = int (readLine()) if rrInstance.model.getNumRateRules() != value: errorFlag = True print passMsg (errorFlag)
def Point3DCoord(): "show the coordinate of selected point" pt = Point2D() res = kcs_ui.point2D_req("请选择要查询的点", pt) if res[0] == kcs_util.ok(): res = kcs_util.tra_coord_ship(pt.X, pt.Y, "") if res[0] == 0: pt3d = Point3D(res[1], res[2], res[3]) res, fr, fr_offset = kcs_util.coord_to_pos(1, pt3d.X) res, lp_y, lp_y_offset = kcs_util.coord_to_pos(2, pt3d.Y) res, lp_z, lp_z_offset = kcs_util.coord_to_pos(3, pt3d.Z) fr_offset=round(fr_offset,2) lp_y_offset=round(lp_y_offset,2) lp_z_offset=round(lp_z_offset,2) x=round(pt3d.X,2) y=round(pt3d.Y,2) z=round(pt3d.Z,2) Msg("--"*25) Msg("二维点:" + str(pt)) Msg("三维坐标信息:") Msg("X: %s,FR%s %s" % (string.ljust(str(x), width), str(fr), _fmt(fr_offset))) Msg("Y: %s,LP%s %s" % (string.ljust(str(y), width), str(lp_y), _fmt(lp_y_offset))) Msg("Z: %s,LP%s %s" % (string.ljust(str(z), width), str(lp_z), _fmt(lp_z_offset))) Msg("--"*25)
def write_data(toolbox, data): ''' Opens the workbook, then the worksheet, then writes the data.''' try: workbook = excel.Workbooks.Open(toolbox) except Exception as e: print "Unable to instantiate workbook!" print repr(e) raise if not workbook: print "Unable to instantiate workbook!" raise IOError("Workbook not found.") try: sheet = get_sheet('Inputs') except Exception as e: print "Unable to instantiate worksheet!" print repr(e) raise if not sheet: print "Unable to instantiate worksheet!" raise IOError("Worksheet not found") for k, d in data.items(): if d.cell: print ('{}: writing value {} to cell {}' ''.format(ljust(k, 30), d.value, d.cell)) sheet.Range(d.cell).Value = d.value else: print ('{}: skipping value {}, no cell' ''.format(ljust(k, 30), d.value)) excel.Visible = True
def short_help(self): help_str = "\nSupported modes are:\n" mode_list = Mode.mode_list( ) for mode in mode_list: aliases = mode.aliases() s = '' if aliases: s += ' (%s)' % ', '.join( aliases ) lhs = '%s%s:' % (mode.__name__, s) name_box = 15 name = string.ljust(lhs, name_box) help = toolkit.boxed_string( mode.help(), 60 ) indent = string.ljust('', name_box+4) indented_help = string.replace( help, '\n', '\n'+indent ) help_str = ( "%s %s%s\n" % ( help_str, name, indented_help ) ) print ( "%s\nType '%s mode --help' for further help on a mode" % ( help_str, self.get_prog_name() ) )
def checkGlobalParameterValues(rrInstance, testId): print string.ljust("Check " + testId, rpadding), ss = rrInstance.model.getGlobalParameterValues() compareUpcomingValuesWith(ss, 1E-6)
# metadata metadata_dir = utils.get_dir_path('models', pathfinder.METADATA_PATH) metadata_path = metadata_dir + '/%s.pkl' % expid # logs logs_dir = utils.get_dir_path('logs', pathfinder.METADATA_PATH) sys.stdout = logger.Logger(logs_dir + '/%s.log' % expid) sys.stderr = sys.stdout print 'Build model' model = config().build_model() all_layers = nn.layers.get_all_layers(model.l_out) all_params = nn.layers.get_all_params(model.l_out) num_params = nn.layers.count_params(model.l_out) print ' number of parameters: %d' % num_params print string.ljust(' layer output shapes:', 36), print string.ljust('#params:', 10), print 'output shape:' for layer in all_layers: name = string.ljust(layer.__class__.__name__, 32) num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()]) num_param = string.ljust(num_param.__str__(), 10) print ' %s %s %s' % (name, num_param, layer.output_shape) train_loss = config().build_objective(model, deterministic=False) valid_loss = config().build_objective(model, deterministic=True) learning_rate_schedule = config().learning_rate_schedule learning_rate = theano.shared(np.float32(learning_rate_schedule[0])) updates = config().build_updates(train_loss, model, learning_rate)
def copy_collection_parent(sources, dest, state_db, args): """ drive the collection copying process by delegating work to a pool of worker processes """ # ensure state db has rows for each source/dest pair for source in sources: state_db.add_source_and_dest(source, dest) # space-pad all process names so that tabular output formats line up process_names = { repr(source): "%s:%d" % (source['host'], source['port']) for source in sources } process_names['parent'] = PARENT_PROCESS_NAME max_process_name_len = max( len(name) for name in process_names.itervalues()) for key in process_names: process_names[key] = string.ljust(process_names[key], max_process_name_len) multiprocessing.current_process().name = process_names['parent'] # ----------------------------------------------------------------------- # perform initial copy, if it hasn't been done yet # ----------------------------------------------------------------------- in_initial_copy = len( state_db.select_by_state(CopyStateDB.STATE_INITIAL_COPY)) if in_initial_copy and in_initial_copy < len(sources): die("prior attempt at initial copy failed; rerun with --restart") if in_initial_copy > 0: ensure_empty_dest(dest) # each worker process copies one shard processes = [] for source in sources: name = process_names[repr(source)] process = multiprocessing.Process(target=copier.copy_collection, name=name, kwargs=dict( source=source, dest=dest, state_path=state_db._path, percent=args.percent)) process.start() processes.append(process) # wait for all workers to finish utils.wait_for_processes(processes) # ----------------------------------------------------------------------- # build indices on main process, since that only needs to be done once # ----------------------------------------------------------------------- waiting_for_indices = len( state_db.select_by_state(CopyStateDB.STATE_WAITING_FOR_INDICES)) if waiting_for_indices and waiting_for_indices < len(sources): die("not all initial copies have been completed; rerun with --restart") if waiting_for_indices > 0: log.info("building indices") copier.copy_indexes(sources[0], dest) for source in sources: state_db.update_state(source, dest, CopyStateDB.STATE_APPLYING_OPLOG)
"SELECT * from transactionLog where action LIKE '%%CASH_REMOVED%%' and date>=%s and date<=ADDDATE(%s,INTERVAL 1 DAY) order by date", (date1, date2)) else: if len(sys.argv) == 4: date1 = sys.argv[2] date2 = sys.argv[3] cursor.execute( "SELECT * from transactionLog where action LIKE '%%CASH_REMOVED%%' and info LIKE %s and date>=%s and date<=ADDDATE(%s,INTERVAL 1 DAY) order by date", ("%" + sys.argv[1] + "%", date1, date2)) else: print "usage: reportrange.py (string) date1 date2" sys.exit() rows = cursor.fetchall() total = 0 for r in rows: price = r[1] total = total + r[1] price_string = rjust("%.2f" % r[1], 10) info_string = r[4] if 'tostring' in dir(info_string): info_string = info_string.tostring() print "%s | %s | %s" % (r[2], ljust("%s" % (info_string[:50]), 50), price_string) print print "Total Sales: %.2f" % total
def process_results(in_filename, out_file): # open the input file f = open(in_filename) # read in the file's lines lines = f.readlines() #close the file f.close() #Set up search patterns pat_set = re.compile("set ") pat_keep = re.compile( 'min|typ|num_no_bridge|num_sliver|num_stub|num_miss_via|num_miss_pth') pat_root = re.compile('^min|^typ|^num') pat_under = re.compile('_') pat_equal = re.compile(' *= *') pat_notavail = re.compile('N/A') attribs_main = {} layer_list = [] longest_name = 0 # go through each line and create the necessary structures for line in lines: # Search for the pattern(s) set above, and modify # line accordingly s_res = pat_set.search(line) line = line[ s_res.end():] # keeps everything after the set - set discarded s_res = pat_keep.search(line) if (s_res): # this is taken if it is a layer attribute s_res = pat_root.search(line) # contains min or typ? if not (s_res): # This taken if prepended with a layer name s_res = pat_under.search(line) layer_end = s_res.start() layer_name = line[0:layer_end] layer_end = layer_end + 1 if not (layer_list.count(layer_name)): layer_list.append(layer_name) else: # this is taken if it is a summation (total) from layers (No layer name at beginning) layer_name = "Overall" layer_end = 0 s_res = pat_equal.search(line) attr_end = s_res.start() value_start = s_res.end() attr_name = line[layer_end:attr_end] if (len(attr_name) > longest_name): longest_name = len(attr_name) attr_val = line[value_start:].rstrip() s_res = pat_notavail.search(attr_val) if not (s_res): if not attribs_main.has_key(attr_name): temp = dict() temp[layer_name] = attr_val attribs_main[attr_name] = temp.copy() else: temp = attribs_main[attr_name].copy() temp[layer_name] = attr_val attribs_main[attr_name] = temp.copy() #end of if, matching min and typ # end of for #Data structure is now populated, and we traverse through it, writing #the information in the way that we like. out_file.write("\n") padding = ' ' out_file.write(padding + string.ljust("Attribute", longest_name)) for layer in layer_list: out_file.write(string.rjust(layer, 8)) out_file.write(string.rjust("Overall", 8) + '\n') layer_list.append("Overall") underlines = "------------------" * 20 out_file.write(padding + underlines[0:longest_name + 8 * len(layer_list)] + '\n') main_keys = attribs_main.keys() main_keys.sort() for name in main_keys: out_file.write(padding + string.ljust(name, longest_name)) temp_dict = attribs_main[name] for layer in layer_list: if (temp_dict.get(layer)): out_file.write(string.rjust(temp_dict[layer], 8)) else: out_file.write(string.rjust("N/A", 8)) out_file.write("\n") longest_name = 0
def main(n=1, num_filters=8, num_epochs=500, cudnn='no'): assert n>=0 assert num_filters>0 assert num_epochs>0 assert cudnn in ['yes', 'no'] print("Amount of bottlenecks: %d" % n) # Load the dataset print("Loading data...") X_train, y_train, X_val, y_val, X_test, y_test = load_dataset() # Prepare Theano variables for inputs and targets input_var = T.tensor4('inputs') target_var = T.ivector('targets') # Create neural network model (depending on first command line parameter) print("Building model and compiling functions...") network = build_cnn(input_var, n, num_filters, cudnn) all_layers = lasagne.layers.get_all_layers(network) num_params = lasagne.layers.count_params(network) num_conv = 0 num_nonlin = 0 num_input = 0 num_batchnorm = 0 num_elemsum = 0 num_dense = 0 num_unknown = 0 print(" layer output shapes:") for layer in all_layers: name = string.ljust(layer.__class__.__name__, 32) print(" %s %s" %(name, lasagne.layers.get_output_shape(layer))) if "Conv2D" in name: num_conv += 1 elif "NonlinearityLayer" in name: num_nonlin += 1 elif "InputLayer" in name: num_input += 1 elif "BatchNormLayer" in name: num_batchnorm += 1 elif "ElemwiseSumLayer" in name: num_elemsum += 1 elif "DenseLayer" in name: num_dense += 1 else: num_unknown += 1 print(" no. of InputLayers: %d" % num_input) print(" no. of Conv2DLayers: %d" % num_conv) print(" no. of BatchNormLayers: %d" % num_batchnorm) print(" no. of NonlinearityLayers: %d" % num_nonlin) print(" no. of DenseLayers: %d" % num_dense) print(" no. of ElemwiseSumLayers: %d" % num_elemsum) print(" no. of Unknown Layers: %d" % num_unknown) print(" total no. of layers: %d" % len(all_layers)) print(" no. of parameters: %d" % num_params) # Create a loss expression for training, i.e., a scalar objective we want # to minimize (for our multi-class problem, it is the cross-entropy loss): prediction = lasagne.layers.get_output(network) loss = lasagne.objectives.categorical_crossentropy(prediction, target_var) loss = loss.mean() # We could add some weight decay as well here, see lasagne.regularization. # Create update expressions for training, i.e., how to modify the # parameters at each training step. Here, we'll use Stochastic Gradient # Descent (SGD) with Nesterov momentum, but Lasagne offers plenty more. params = lasagne.layers.get_all_params(network, trainable=True) # several learning rates for low initial learning rates and # learning rate anealing (id is epoch) learning_rate_schedule = { 0: 0.0001, # low initial learning rate as described in paper 2: 0.01, 100: 0.001, 150: 0.0001 } learning_rate = theano.shared(np.float32(learning_rate_schedule[0])) updates = lasagne.updates.nesterov_momentum( loss, params, learning_rate=learning_rate, momentum=0.9) # Create a loss expression for validation/testing. The crucial difference # here is that we do a deterministic forward pass through the network, # disabling dropout layers. test_prediction = lasagne.layers.get_output(network, deterministic=True) test_loss = lasagne.objectives.categorical_crossentropy(test_prediction, target_var) test_loss = test_loss.mean() # As a bonus, also create an expression for the classification accuracy: test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var), dtype=theano.config.floatX) # Compile a function performing a training step on a mini-batch (by giving # the updates dictionary) and returning the corresponding training loss: train_fn = theano.function([input_var, target_var], loss, updates=updates) # Compile a second function computing the validation loss and accuracy: val_fn = theano.function([input_var, target_var], [test_loss, test_acc]) # Finally, launch the training loop. print("Starting training...") # We iterate over epochs: for epoch in range(num_epochs): if epoch in learning_rate_schedule: lr = np.float32(learning_rate_schedule[epoch]) print(" setting learning rate to %.7f" % lr) learning_rate.set_value(lr) # In each epoch, we do a full pass over the training data: train_err = 0 train_batches = 0 start_time = time.time() for batch in iterate_minibatches(X_train, y_train, 500, shuffle=True): inputs, targets = batch train_err += train_fn(inputs, targets) train_batches += 1 # And a full pass over the validation data: val_err = 0 val_acc = 0 val_batches = 0 for batch in iterate_minibatches(X_val, y_val, 500, shuffle=False): inputs, targets = batch err, acc = val_fn(inputs, targets) val_err += err val_acc += acc val_batches += 1 # Then we print the results for this epoch: print("Epoch {} of {} took {:.3f}s".format( epoch + 1, num_epochs, time.time() - start_time)) print(" training loss:\t\t{:.6f}".format(train_err / train_batches)) print(" validation loss:\t\t{:.6f}".format(val_err / val_batches)) print(" validation accuracy:\t\t{:.2f} %".format( val_acc / val_batches * 100)) # After training, we compute and print the test error: test_err = 0 test_acc = 0 test_batches = 0 for batch in iterate_minibatches(X_test, y_test, 500, shuffle=False): inputs, targets = batch err, acc = val_fn(inputs, targets) test_err += err test_acc += acc test_batches += 1 print("Final results:") print(" test loss:\t\t\t{:.6f}".format(test_err / test_batches)) print(" test accuracy:\t\t{:.2f} %".format( test_acc / test_batches * 100))
def format_string(sstrs): return string.ljust(sstrs, STRLEN)
# <directory path>/standardize_crd.py <input filename> import string, sys, math from string import ljust if len(sys.argv) < 1: print 'Need to give the input filename as the argument' sys.exit() inp = open(sys.argv[1], 'r') out = open('newoutput.cor', 'w') for each in inp: els = each.split() if each.startswith('*'): pass elif len(els) == 1: print >> out, "%5d" % (int(els[0])) else: print >> out, "%5d%5d%5s%5s%10.5f%10.5f%10.5f%5s%5s%10.5f" % ( int(els[0]), int(els[1]), ljust(' ' + els[2], 5), ljust(' ' + els[3], 5), float(els[4]), float(els[5]), float( els[6]), ljust(' ' + els[7], 5), ljust(' ' + els[1], 5), 0.0) # From the charmm documentation: # ATOMNO RESNO RES TYPE X Y Z SEGID RESID Weighting # I5 I5 1X A4 1X A4 F10.5 F10.5 F10.5 1X A4 1X A4 F10.5 inp.close() out.close()
def __str__(self): class_str = '' for name, value in self.__class__.__dict__.items() + self.__dict__.items(): class_str += string.ljust(name, 15) + '\t' + str(value) + '\n' return class_str
n_steps=int(math.ceil(total_time/engine.DT)), strict=True, non_sequences=get_shared_variables() ) #print updates #assert len(updates)==0 return outputs, updates controller = build_controller() top_layer = lasagne.layers.MergeLayer( incomings=[controller[key] for key in controller if key != "input"] ) controller_parameters = lasagne.layers.helper.get_all_params(top_layer) import string print string.ljust(" layer output shapes:",26), print string.ljust("#params:",10), print string.ljust("#data:",10), print "output shape:" def comma_seperator(v): return '{:,.0f}'.format(v) all_layers = lasagne.layers.get_all_layers(top_layer) all_params = lasagne.layers.get_all_params(top_layer, trainable=True) num_params = sum([np.prod(p.get_value().shape) for p in all_params]) for layer in all_layers[:-1]: name = string.ljust(layer.__class__.__name__, 22) num_param = sum([np.prod(p.get_value().shape) for p in layer.get_params()]) num_param = string.ljust(comma_seperator(num_param), 10) num_size = string.ljust(comma_seperator(np.prod(layer.output_shape[1:])), 10)
dev = AL.DEFAULT_DEVICE source_name = ['line', 'microphone', 'digital'] params = al.queryparams(dev) for i in range(1, len(params), 2): params[i] = -1 while 1: time.millisleep(100) old = params[:] al.getparams(dev, params) if params <> old: for i in range(0, len(params), 2): if params[i+1] <> old[i+1]: name = al.getname(dev, params[i]) if params[i] == AL.INPUT_SOURCE: if 0 <= old[i+1] < len(source_name): oldval = source_name[old[i+1]] else: oldval = '' newval = source_name[params[i+1]] else: oldval = `old[i+1]` newval = `params[i+1]` print string.ljust(name, 25), print '(' + string.rjust(oldval, 10) + ')', print '-->', print string.rjust(newval, 10) print
def Message(mystr): """Returns a header of standard length HDRLEN.""" return string.ljust(string.upper(mystr), HDRLEN)
def checkComputeSteadyStateValues(rrInstance, testId): print string.ljust("Check " + testId, rpadding), ss = rrInstance.getSteadyStateValues() compareUpcomingValuesWith(ss, 1E-6)
def checkReactionRates(rrInstance, testId): print string.ljust("Check " + testId, rpadding), ss = rrInstance.model.getReactionRates() compareUpcomingValuesWith(ss, 1E-4)
def checkScaledElasticityAmountMatrix(rrInstance, testId): print string.ljust("Check " + testId, rpadding), roadrunner.Config.setValue(Config.ROADRUNNER_JACOBIAN_MODE, Config.ROADRUNNER_JACOBIAN_MODE_AMOUNTS) ee = rrInstance.getScaledElasticityMatrix() checkMatrixVsUpcomingText(ee)
def checkBoundarySpeciesConcentrations(rrInstance, testId): print string.ljust("Check " + testId, rpadding), ss = rrInstance.model.getBoundarySpeciesConcentrations() compareUpcomingValuesWith(ss, 1E-6)
def checkScaledFluxControlCoefficientMatrix(rrInstance, testId): # Unscaled Flux Control matrix print string.ljust("Check " + testId, rpadding), st = rrInstance.getScaledFluxControlCoefficientMatrix() checkMatrixVsUpcomingText(st)
def checkGetTimeCourseSelectionList(rrInstance, testId): print string.ljust("Check " + testId, rpadding), words = divide(readLine()) result = str(rrInstance.selections) print passMsg(result == words)
def checkReducedStoichiometryMatrix(rrInstance, testId): # Stoichiometry matrix print string.ljust("Check " + testId, rpadding), st = rrInstance.getReducedStoichiometryMatrix() checkMatrixVsUpcomingText(st)
def checkUnscaledElasticityMatrix(rrInstance, testId): print string.ljust("Check " + testId, rpadding), roadrunner.Config.setValue(Config.ROADRUNNER_JACOBIAN_MODE, Config.ROADRUNNER_JACOBIAN_MODE_CONCENTRATIONS) uee = rrInstance.getUnscaledElasticityMatrix() checkMatrixVsUpcomingText(uee)
def set_nbname(self, name): self.__nbname = string.ljust(name, 17)
def checkLinkMatrix(rrInstance, testId): # Link matrix print string.ljust("Check " + testId, rpadding), st = rrInstance.getLinkMatrix() checkMatrixVsUpcomingText(st)
def checkSimulateTimepointsVsIntervals(rrInstance, testId): ''' Third positional argument is number of points. Steps keyword argument is number of intervals. ''' print(string.ljust("Check " + testId, rpadding), end="") errorFlag = False try: n1 = rrInstance.simulate(0, 10, steps=1) if n1.shape[0] != 2: errorFlag = True n2 = rrInstance.simulate(0, 10, 2) if n2.shape[0] != 2: errorFlag = True except: errorFlag = True try: m = rrInstance.simulate(0, 100, 51) n = rrInstance.simulate(0, 100, points=51) if n.shape[0] != m.shape[0]: errorFlag = True n = rrInstance.simulate(start=0, end=100, points=51) if n.shape[0] != m.shape[0]: errorFlag = True n = rrInstance.simulate(0, 100, steps=50) if n.shape[0] != m.shape[0]: errorFlag = True n = rrInstance.simulate(start=0, end=100, steps=50) if n.shape[0] != m.shape[0]: errorFlag = True if len(rrInstance.model.getFloatingSpeciesIds()) < 1: errorFlag = True else: spec_id = rrInstance.model.getFloatingSpeciesIds()[0] m = rrInstance.simulate(0, 100, 51, ['time', spec_id]) n = rrInstance.simulate(0, 100, points=51, selections=['time', spec_id]) if n.shape[0] != m.shape[0]: errorFlag = True n = rrInstance.simulate(start=0, end=100, points=51, selections=['time', spec_id]) if n.shape[0] != m.shape[0]: errorFlag = True n = rrInstance.simulate(0, 100, steps=50, selections=['time', spec_id]) if n.shape[0] != m.shape[0]: errorFlag = True n = rrInstance.simulate(start=0, end=100, steps=50, selections=['time', spec_id]) if n.shape[0] != m.shape[0]: errorFlag = True except: errorFlag = True print(passMsg(errorFlag))
def runTester(testDir=None): """ Run a series of tests from a testing dir. This enumerates all the files in a directory, and all the files ending with '.rrtest' are assumed to be testing files. """ global fHandle import os.path as p from glob import glob if testDir is None: testDir = getDefaultTestDir() if not p.isdir(testDir): raise Exception("{} is NOT a directory".format(testDir)) files = glob(p.join(testDir, "*.rrtest")) unknownTests = 0 for file in files: print "\n\nStarting Test on ", file # set the globals, these should be class instance vars... fHandle = open(file, 'r') testId = jumpToNextTest() if testId == '[SBML]': sbmlStr, testId = getSBMLStr() else: Logger.log( Logger.LOG_WARNING, "rrtest file, \"" + file + "\" missing SBML section, ignoring test file") continue # Load any initialization actions if testId == '[INITIALIZATION]': testId = jumpToNextTest() while testId != '[END_INITIALIZATION]': if functions.has_key(testId): func = functions[testId] func(rrInstance, testId) else: print 'No initialization function found for ' + testId testId = jumpToNextTest() testId = jumpToNextTest() # create a RoadRunner obj with the sbml from the test file rrInstance = roadrunner.RoadRunner(sbmlStr) print 'Successfully loaded model.\n' # Now start the tests proper while testId != '': if functions.has_key(testId): func = functions[testId] func(rrInstance, testId) else: #getFloatingSpeciesAmountRates unknownTests = unknownTests + 1 print string.ljust(testId, rpadding), 'UNKNOWN TEST' testId = jumpToNextTest() print "\n\nTotal failed tests:\t", gFailedTests, \ "\nTotal unknown tests:\t", unknownTests, \ "\nTotal passed tests:\t", gPassedTests
def checkGetSteadyStateSelectionList(rrInstance, testId): print(string.ljust("Check " + testId, rpadding), end="") words = divide(readLine()) result = str(rrInstance.steadyStateSelections) print(passMsg(result == words))
nb_train = np.size(xb_train, axis=0) ns_train = np.size(xs_train, axis=0) print "DEBUG: max train values" print(xb_train.max()) print("Building network ...") l_in, l_out = config.build_model() Xt = np.zeros((2048, 64, 1), dtype='float32') all_layers = nn.layers.get_all_layers(l_out) num_params = nn.layers.count_params(l_out) print(" number of parameters: %d" % num_params) print(" layer output shapes:") for layer in all_layers: name = string.ljust(layer.__class__.__name__, 32) print(" %s %s" % (name, nn.layers.get_output(layer, sym_x).eval({ sym_x: Xt }).shape)) print("Building cost function ...") out_train = nn.layers.get_output(l_out, sym_x, deterministic=False) out_eval = nn.layers.get_output(l_out, sym_x, deterministic=True) TOL = 1e-5 lambda_reg = config.lambda_reg params = nn.layers.get_all_params(l_out, regularizable=True) reg_term = sum(T.sum(p**2) for p in params) out_train_cutted = T.clip(out_train, TOL, 1 - TOL) cost = T.mean(utils.Cross_Ent(out_train_cutted, sym_t))
def usage(self): """Represent self in a manner suitable for printing during a command line usage() function""" # See if there ARE any command line option opts = filter(lambda x: x.isCmdLine(), self._dict.values()) # Be really fancy here - use LineWrap module, etc. maxwidth = 80 ret = 'Usage: %s' % self._exec_name if not len(opts): # Just add description ret = ret + '\n\n%s' % self._desc return ret # XXX We should change this if we ever get to mutually exclusive / # cluster options opstr = string.join(map(lambda x: '[%s]' % x.paramCmdLine(), opts), ' ') pad = len(ret) + 1 opstr = string.split(LineWrap.wraplines(opstr, maxwidth - pad), '\n') ret = ret + ' ' + opstr[0] + '\n' for l in opstr[1:]: ret = ret + ' ' * pad + l + '\n' # Add description ret = ret + '\n' + self._desc + '\n' ret = ret + '\nCommand line options:\n\n' # First pass - find the maxlen of params maxpar = max(map(lambda x: len(x.paramDesc()), self._dict.values())) # Give an extra line for too long optional parameters if maxpar > 24: maxpar = 24 # Reserve some more chars res = 6 for el in opts: # LineWrap API changed, emulate old API desc = LineWrap.wraplines(el._desc, maxwidth - maxpar - res) desc = string.split(desc, '\n') parDesc = el.paramDesc() if len(parDesc) > maxpar: ret = ret + ' %s\n' % parDesc ret = ret + ' ' * (maxpar + res - 2) + '- ' else: ret = ret + ' %s - ' % string.ljust(parDesc, maxpar) ret = ret + desc[0] + '\n' for s in desc[1:]: ret = ret + ' ' * (maxpar + res) + s + '\n' return ret
def checkFloatingSpeciesConcentrations(rrInstance, testId): print(string.ljust("Check " + testId, rpadding), end="") ss = rrInstance.model.getFloatingSpeciesConcentrations() compareUpcomingValuesWith(ss, 1E-6)
def print_info(self): keys = self.info.keys() keys.sort() for key in keys: print " " + string.ljust(key, 18) + " " + str(self.info[key])
def checkStoichiometryMatrix(rrInstance, testId): # Stoichiometry matrix print(string.ljust("Check " + testId, rpadding), end="") st = rrInstance.getFullStoichiometryMatrix() checkMatrixVsUpcomingText(st)