def dotjoin(i, machineClass, xnrefs): global TRANSITION_ARCS global ARC_TRANSITIONS global TRANSITION_NODES if len(xnrefs) == 1: # no need for join if only one xn ref return ("", xnrefs[0]) elif len(xnrefs) == 0: nodeShape = 'shape=box, width=".05", height=".05", fixedsize=true' else: nodeShape = 'style=filled, color=black, shape=circle, width=".05", height=".05", fixedsize=true' joinName = "JOIN.%s" % i joinNodeName = string.joinfields(machineClass + [ joinName ], ".") # join node declaration rv = '"%s" [label="", %s];\n' % (joinNodeName, nodeShape) joinRef = [ [], joinNodeName, "", "" ] key = string.joinfields(machineClass + [ str(i) ], ".") try: nodes = TRANSITION_NODES[key] arcs = TRANSITION_ARCS[key] except KeyError: arcs = {} nodes = {} TRANSITION_ARCS[key] = arcs TRANSITION_NODES[key] = nodes nodes[joinNodeName] = None for ref in xnrefs: arc = "%s->%s" % (ref[1], joinNodeName) ARC_TRANSITIONS[arc] = key arcs[arc] = None nodes[ref[1]] = None rv = rv + '"%s" -> "%s" [taillabel="%s", ltail="%s", label=""];\n' % (ref[1], joinNodeName, ref[2], ref[3]) return (rv, joinRef)
def excepthook(type, exc_msg, tb): cfg = self.recipe.cfg sys.excepthook = sys.__excepthook__ if cfg.debugRecipeExceptions: lines = traceback.format_exception(type, exc_msg, tb) print string.joinfields(lines, "") if self.linenum is not None: prefix = "%s:%s:" % (self.file, self.linenum) prefix_len = len(prefix) if str(exc_msg)[:prefix_len] != prefix: exc_message = "%s:%s: %s: %s" % (self.file, self.linenum, type.__name__, exc_msg) print exc_message if self.recipe.buildinfo: try: buildinfo = self.recipe.buildinfo buildinfo.error = exc_message buildinfo.file = self.file buildinfo.lastline = self.linenum buildinfo.stop() except: log.warning("could not write out to buildinfo") if cfg.debugRecipeExceptions and self.recipe.isatty(): debugger.post_mortem(tb, type, exc_msg) else: sys.exit(1)
def addTable(items, cols=COLS): divrecs = [] tabrecs = [] names = {'tab': TAB} tabrecs.append('<div id="table0" class="gtable" >') tabrecs.append('%(tab)s<table>' % names) names['tab'] = 2 * TAB i = 0 while i < len(items): tabrecs.append('%(tab)s<tr align=left>' % names) names['tab'] = 3 * TAB for col in range(cols): if i >= len(items): break ahref, adiv = items[i] names['item'] = ahref tabrecs.append('%(tab)s<td>%(item)s%(tab)s</td>' % names) names['item'] = adiv divrecs.append('%(item)s' % names) i += 1 names['tab'] = 2 * TAB tabrecs.append('%(tab)s</tr>' % names) names['tab'] = TAB tabrecs.append('%(tab)s</table>\n' % names) tabrecs.append('</div>\n') return (joinfields(tabrecs,'\n'), joinfields(divrecs,'\n'))
def translate(compilationUnit, machine, targetDirectory, opts): header = """// // This file was generated by ech2java. Do not modify. // """ # package declaration package = "" if len(getPackage(compilationUnit)) > 0: packageString = "package " + string.joinfields(getPackage(compilationUnit), ".") + ";\n" package = echartsLineReference(getPackageLineColumn(compilationUnit), packageString) + "\n" # imports imports = "import org.echarts.*;\n" imports = imports + "import org.echarts.monitor.*;\n" for imported in getImports(compilationUnit): importedPackage = getImportPackage(imported) importModifiers = getImportModifiers(imported) importString = "import %s %s;\n" % (string.joinfields(importModifiers, " "), string.joinfields(importedPackage, ".") ) imports = imports + echartsLineReference(getImportLineColumn(imported), importString) imports = imports + "\n" # the machine itself machine = javamachine(compilationUnit) # the translated program rv = header + package + imports + machine # return the program return rv
def javaconstructor(constructor, machineName, machineClass): # constructor header accessString = string.joinfields(getConstructorAccessModifiers(constructor), " ") rv = "%s %s(" % (accessString, machineClass) userParamStrings = [] for param in getConstructorParameters(constructor): userParamStrings = userParamStrings + [ javatype(param[0]) + " " + param[1] ] paramStrings = userParamStrings + [ "final Machine parentMachine", "final int machineIndex", "final MachineCode machineCode" ] rv = rv + string.joinfields(paramStrings, ", ") + ") throws Exception {\n" rv = rv + "super(%s_states, %s_messageTransitions, %s_messagelessTransitions, %s.class.getName(), parentMachine, machineIndex, machineCode);\n" % \ (machineName, machineName, machineName, machineClass) # constructor body rv = rv + javaaction(getConstructorActionBlock(constructor)) + "}\n" # constructor called when creating machine as top-level machine rv = rv + getConstructorComment(constructor) + "\n" rv = rv + "%s %s(" % (accessString, machineClass) rv = rv + string.joinfields(userParamStrings, ", ") + ") throws Exception {\n" rv = rv + "this(" userParamStrings = [] for param in getConstructorParameters(constructor): userParamStrings = userParamStrings + [ param[1] ] paramStrings = userParamStrings + [ "null", "-1", "null" ] rv = rv + string.joinfields(paramStrings, ", ") + ");\n" return rv + "}\n"
def create_test_tab(num_of_jobs, uid): # Will contain parsed auto-generated crontab entries cron_strings = {} job_num = 1 while job_num <= num_of_jobs: cron_strings.setdefault(job_num, []) create_test_intervals(cron_strings[job_num]) cron_strings[job_num].append(create_test_commands(job_num, cron_strings[job_num])) job_num += 1 with open(TAB_FILE,'w') as tab: for line in cron_strings.iterkeys(): rand = random.randrange(100) # Approximately 30% chance that we will include a comment if(rand > 85): tab.write('#') if(rand >= 70 and rand < 85): tab.write('# ') # Approximately 15% chance that we will include a blank line if(rand >= 50 and rand < 70): tab.write('\n') for item in cron_strings[line]: tab.write(item) # Will contain assembled job-interval strings test_jobs = [] with open(TAB_FILE, 'r') as tab: for job in tab: first_char = job[0] if first_char != '\n': tmp = job.strip().split(' ') interval = string.joinfields(tmp[:5], ' ') cmd = string.joinfields(tmp[5:], ' ') test_jobs.append(api.Job(interval, cmd, uid, datetime.now())) return test_jobs
def javatop(machine, machineName): machineClass = machineName if getExtendsClause(machine) == None: extends = "TransitionMachine" else: extends = string.joinfields(getExtendsClause(machine), ".") if getImplementsClause(machine) == None: implements = "" else: implementsClasses = [] for interface in getImplementsClause(machine): implementsClasses = implementsClasses + [ string.joinfields(interface, ".") ] implements = " implements %s" % string.joinfields(implementsClasses, ", ") rv = getMachineComment(machine) + "\n" rv = rv + "public class %s extends %s%s {\n" % (machineClass, extends, implements) # Define static members and methods for top-level machine and its # submachines rv = rv + javastatic(machine, machineName, machineClass) rv = rv + "// Declarations for %s\n" % machineClass # Define top-level machine constructors rv = rv + javaconstructors(machine, machineName, machineClass) # Define instance members, methods and subclasses for top-level # machine and its submachines rv = rv + javabody(machine, machineName, machineClass) return rv
def rebuild_lines(self): """Rebuild a list of lines representing the SubBlock data from the stored values.""" # If there's no data in this SubBlock, then we don't need to return any lines! # if len(self.data) == 0: # return [] # Otherwise, let's rebuild the lines self.lines = [] # Append the SubBlock title line self.lines.append('[ %s ]\n'%self.name) # Append the header comment line if len(self.headerFields) >= self.numberHeaderFields: self.lines.append(self.headerFormat%tuple(self.headerFields[0:self.numberHeaderFields]) \ + string.joinfields(self.headerFields[self.numberHeaderFields:],' ')+'\n') # Append the data lines sorted_keys = self.data.keys() sorted_keys.sort() for key in sorted_keys: if len(self.data[key]) >= self.numberDataFields: self.lines.append(self.dataFormat%tuple(self.data[key][0:self.numberDataFields]) \ + string.joinfields(self.data[key][self.numberDataFields:],' ') +'\n') # A blank line always self.lines.append('\n') return self.lines
def dotfork(i,b, machineClass, xnrefs): global TRANSITION_ARCS global ARC_TRANSITIONS global TRANSITION_NODES if len(xnrefs) == 1: # no need for fork if only one xn ref return ("", xnrefs[0]) elif len(xnrefs) == 0: nodeShape = 'shape=box, width=".05", height=".05", fixedsize=true' else: nodeShape = 'style=filled, color=black, shape=circle, width=".05", height=".05", fixedsize=true' forkName = "FORK.%s" % string.joinfields(map(str, [ i ] + b), "_") forkNodeName = string.joinfields(machineClass + [ forkName ], ".") # fork node declaration rv = '"%s" [label="", %s];\n' % (forkNodeName, nodeShape) forkRef = [ [], forkNodeName, "", "" ] key = string.joinfields(machineClass + [ str(i) ], ".") try: arcs = TRANSITION_ARCS[key] nodes = TRANSITION_NODES[key] except KeyError: arcs = {} nodes = {} TRANSITION_ARCS[key] = arcs TRANSITION_NODES[key] = nodes nodes[forkNodeName] = None for ref in xnrefs: arc = "%s->%s" % (forkNodeName, ref[1]) ARC_TRANSITIONS[arc] = key arcs[arc] = None nodes[ref[1]] = None rv = rv + '"%s" -> "%s" [headlabel="%s", lhead="%s", label=""];\n' % (forkNodeName, ref[1], ref[2], ref[3]) return (rv, forkRef)
def main(): uid = os.getuid() if len(sys.argv) < 3: jobs_old = api.get_jobs_for_user(uid) with tempfile.NamedTemporaryFile('w', delete=False) as temp: for job in jobs_old: temp.write("%s %s\n" % (job.interval, job.command)) tb_file = temp.name editor = os.getenv('EDITOR') if editor is not None: os.system("%s %s" % (editor, tb_file)) else: subprocess.call("vim %s" % tb_file, shell=True) elif sys.argv[1] == '-u': tb_file = sys.argv[2] jobs_new = [] with open(tb_file, 'r') as tab: for job in tab: tmp = job.strip().split(' ') interval = string.joinfields(tmp[:5], ' ') cmd = string.joinfields(tmp[5:], ' ') jobs_new.append(api.Job(interval, cmd, uid, datetime.now())) if len(sys.argv) < 3: os.unlink(tb_file) api.set_jobs(jobs_new, uid)
def custom_default_report(id, result, action='', no_table=0, goofy=re.compile('\W').search ): columns=result._searchable_result_columns() __traceback_info__=columns heading=('<tr>\n%s </tr>' % string.joinfields( map(lambda c: ' <th>%s</th>\n' % nicify(c['name']), columns), '' ) ) if no_table: tr, _tr, td, _td, delim = '<p>', '</p>', '', '', ',\n' else: tr, _tr, td, _td, delim = '<tr>', '</tr>', '<td>', '</td>', '\n' row=[] for c in columns: n=c['name'] if goofy(n) is not None: n='expr="_[\'%s]"' % (`'"'+n`[2:]) row.append(' %s<dtml-var %s%s>%s' % (td,n,c['type']!='s' and ' null=""' or '',_td)) row=(' %s\n%s\n %s' % (tr,string.joinfields(row,delim), _tr)) return custom_default_report_src( id=id,heading=heading,row=row,action=action,no_table=no_table)
def custom_default_zpt_report(id, result, action='', no_table=0, goofy=re.compile('\W').search ): columns=result._searchable_result_columns() __traceback_info__=columns heading=('<tr>\n%s </tr>' % string.joinfields( map(lambda c: ' <th>%s</th>\n' % nicify(c['name']), columns), '' ) ) if no_table: tr, _tr, td, _td, delim = '<p>', '</p>', '', '', ',\n' else: tr, _tr, td, _td, delim = '<tr>', '</tr>', '<td>', '</td>', '\n' row=[] for c in columns: n=c['name'] # ugh! what the hell is goofy? # if goofy(n) is not None: # n='expr="_[\'%s]"' % (`'"'+n`[2:]) row.append(' %s<span tal:replace="result/%s">%s goes here</span>%s' % (td,n,n,_td)) row=(' %s\n%s\n %s' % (tr,string.joinfields(row,delim), _tr)) return custom_default_zpt_report_src( id=id, heading=heading, row=row, action=action, no_table=no_table)
def finalize(self): length = len(self.lines) self.text = string.joinfields(self.lines, '') self.lines = [] self.open_links() self.output_links() self.close_links() links = string.joinfields(self.lines, '') self.lines = [] self.prologue = ( self.DOCTYPE + '\n<HTML><HEAD>\n' ' <!-- Converted with texi2html and Python -->\n' ' <TITLE>' + self.title + '</TITLE>\n' ' <LINK REL=Next HREF="' + makefile(self.next) + '" TITLE="' + self.next + '">\n' ' <LINK REL=Previous HREF="' + makefile(self.prev) + '" TITLE="' + self.prev + '">\n' ' <LINK REL=Up HREF="' + makefile(self.up) + '" TITLE="' + self.up + '">\n' '</HEAD><BODY>\n' + links) if length > 20: self.epilogue = '<P>\n%s</BODY></HTML>\n' % links
def xml_to_tree(filename, class_table = None): p = TreeBuildingParser(class_table = class_table) if isinstance(filename, url_directory.BasePath): try: lines = filename.Readlines() except IOError: raise NoXMLFile, filename.PathString() s = string.joinfields(lines, "") path = filename elif filename == "-": s_list = [] s = sys.stdin.read() while s: s_list.append(s) s = sys.stdin.read() s = string.joinfields(s_list, "") path = None else: try: fp = open(filename, "r") except IOError: raise NoXMLFile, filename s = fp.read() fp.close() path = url_directory.FilePath(filename) p.feed(s) p.close() p.cur_tag[0].path = path return p.cur_tag[0]
def _ctag(str, hrefs=()): """Quote, tag, and escape the text. This is a modified version of the 'ctag' function appearing in StructuredText.py. The differences include, * it uses _split, so that it avoids escaping text in quotes or in math-mode. * it processes hrefs. * it escapes LaTeX special characters. * it doesn't try to find duplicate list items - that got moved into LaTeX. """ if str is None: str = '' str = ' %s' % str # prepend a space str = _split(str) for i in xrange(len(str)): if not i%2: str[i]=regsub.gsub(quotable_re, '\\\\verb@\\g<0>@', str[i]) str[i]=regsub.gsub(slashable_re, '\\\\\\g<0>', str[i]) str[i]=regsub.gsub(ST.strong,' {\\\\bfseries \\1}\\2', str[i]) str[i]=regsub.gsub(ST.em,' {\\\\itshape \\1}\\2',str[i]) for ref, link in hrefs: tag = '{\slshape %s}\\footnote{%s}' % (ref[1:-1], link) str[i] = string.joinfields(string.split(str[i], ref), tag) return string.joinfields(str)
def docecma(): rv = "var transitionArcs = {};\n" for xn in dotmachine.TRANSITION_ARCS.keys(): rv = rv + "transitionArcs[\"%s\"] = [ " % xn rv = rv + string.joinfields(map(lambda x: "\"%s\"" % x, dotmachine.TRANSITION_ARCS[xn].keys()), ", ") rv = rv + " ];\n" rv = rv + "\n" rv = rv + "var arcTransitions = {};\n" for arc in dotmachine.ARC_TRANSITIONS.keys(): rv = rv + "arcTransitions[\"%s\"] = \"%s\";\n" % (arc, dotmachine.ARC_TRANSITIONS[arc]) rv = rv + "\n" rv = rv + "var transitionNodes = {};\n" for xn in dotmachine.TRANSITION_NODES.keys(): rv = rv + "transitionNodes[\"%s\"] = [ " % xn rv = rv + string.joinfields(map(lambda x: "\"%s\"" % x, dotmachine.TRANSITION_NODES[xn].keys()), ", ") rv = rv + " ];\n" rv = rv + "\n" rv = rv + "var nodeStates = {};\n" for node in dotmachine.NODE_STATES.keys(): rv = rv + "nodeStates[\"%s\"] = \"%s\";\n" % ( node, dotmachine.NODE_STATES[node] ) rv = rv + "\n" rv = rv + "var transitionComments = {};\n" for xn in dotmachine.TRANSITION_COMMENTS.keys(): rv = rv + "transitionComments[\"%s\"] = \"%s\";\n" % ( xn, string.replace(dotmachine.TRANSITION_COMMENTS[xn], '"', r'\"' ) ) rv = rv + "\n" rv = rv + "var stateComments = {};\n" for state in dotmachine.STATE_COMMENTS.keys(): rv = rv + "stateComments[\"%s\"] = \"%s\";\n" % ( state, string.replace(dotmachine.STATE_COMMENTS[state], '"', r'\"' ) ) rv = rv + "\n" rv = rv + "var machineComment = \"%s\";\n" % string.replace(dotmachine.MACHINE_COMMENT, '"', r'\"') rv = rv + "\n" rv = rv + "var externalStates = [ " rv = rv + string.joinfields(map(lambda x: "\"%s\"" % x, dotmachine.EXTERNAL_STATES), ", ") rv = rv + " ];\n" return rv
def docstring(self): import string input = [] output = [] for arg in self.argumentList: if arg.flags == ErrorMode or arg.flags == SelfMode: continue if arg.type == None: str = 'void' else: if hasattr(arg.type, 'typeName'): typeName = arg.type.typeName if typeName is None: # Suppressed type continue else: typeName = "?" print "Nameless type", arg.type str = typeName + ' ' + arg.name if arg.mode in (InMode, InOutMode): input.append(str) if arg.mode in (InOutMode, OutMode): output.append(str) if not input: instr = "()" else: instr = "(%s)" % string.joinfields(input, ", ") if not output or output == ["void"]: outstr = "None" else: outstr = "(%s)" % string.joinfields(output, ", ") return instr + " -> " + outstr
def read_gif_header (results, data, pos): h, l = header.unpack (data, pos) sig = string.joinfields (h['signature'], '') ver = string.joinfields (h['version'], '') if sig != 'GIF': raise ParseError, 'Not a GIF file' elif ver not in ['89a', '87a']: raise ParseError, 'Unknown GIF version "%s"' % ver return (sig, ver), l
def provide_data_for_request(request): try: query_choice = request.POST["query_choice"] frequency_choice = request.POST["freq_choice"] returned_node_type = request.POST["ret_node_type"] display_type = request.POST["display_type"] session_data = request.session['tmp_query_results'][str(query_choice)] ret_node_queries = session_data['ret_node_queries'] query_info = session_data['query_info'] use_query = session_data['use_query'] node_queries = session_data['node_queries'] #temp_node_list will be formed from whichever frequency the user choses... temp_node_list = session_data['ret_dict'][frequency_choice] #we also need node_queries and query_info if len(ret_node_queries.keys()) == 1 and ret_node_queries.has_key('Gene'): if len(ret_node_queries['Gene']) > 3: use_title = query_info['title'].replace('$$result$$', ret_node_queries['Gene'][0]['display_name'] + '...' + ret_node_queries['Gene'][-1]['display_name']) else: use_title = query_info['title'].replace('$$result$$', string.joinfields(map(lambda x: x['display_name'], ret_node_queries['Gene']), ',')) elif len(ret_node_queries.keys()) == 1 and ret_node_queries.has_key('Subject'): if len(ret_node_queries['Subject']) > 3: use_title = query_info['title'].replace('$$result$$', ret_node_queries['Subject'][0]['display_name'] + '...' + ret_node_queries['Subject'][-1]['display_name']) else: use_title = query_info['title'].replace('$$result$$', string.joinfields(map(lambda x: x['display_name'], ret_node_queries['Subject']), ',')) else: use_title = 'ERROR: Unknown title...' #convert the IDs to nodes ret_nodes = core.get_nodes(temp_node_list, returned_node_type, request, missing_param="skip") #too many nodes to render efficiently, will allow user to download csv file... if display_type == "Download": return download(request, use_title, query_info['text'], use_query, node_queries, ret_nodes, len(temp_node_list)) #ret_dict = {'is_graph':False, 'graph':{}, 'title':'Sorry, your query is too large to display. However, you may download a text version.'} else: ret_nodes = core.apply_grouping2({'nodes':ret_nodes, 'links':[]}, [])['nodes'] ret_dict = {'is_graph':True, 'graph':{'nodes':ret_nodes.tolist(), 'links':[]}, 'title':use_title} return HttpResponse(json.dumps(ret_dict),mimetype="application/json") except Exception as e: print e return HttpResponseServerError()
def docmachinehtml(compilationUnit, machine, targetDirectory, documentHeader): name = getMachineName(compilationUnit) package = getPackage(compilationUnit) # relative path from machine's html directory to root html # directory rootPath = string.joinfields(len(package) * [ ".." ], "/") htmlPath = targetDirectory + os.sep + name + ".html" htmlFile = file(htmlPath, 'w') htmlBody = ''' <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/strict.dtd"> <HTML> <HEAD> <LINK rel="stylesheet" type="text/css" href="%s/docmachine.css" /> <TITLE>%s</TITLE> </HEAD> <BODY> <table width="100%%" height="100%%" border="0" cellpadding="0" cellspacing="0"> <tr> <td> <div id="buttons_outer"> <div id="buttons_inner"> <div id="buttons"> <ul id="mainlevel-nav"> <li><a href="%s/overview-summary.html" class="mainlevel-nav">Overview</a></li> <li><a href="package-summary.html" class="mainlevel-nav">Package</a></li> </ul> </div> </div> </div></td></tr> <tr><td class="outline"> <div class="header"> <span class="left">Machine</span> <span class="right">%s</span> </div> </td></tr> <tr><td> <div id="package_name">%s</div> <div class="contentheading">%s</div> <div class="summary"></div> </td></tr> <tr><td height="100%%"> <input class="button" value="New Window" type="submit" onclick="window.open('%s')"> <div class="machine-svg"> <iframe src="%s" id="svg" name="svg" height="100%%" width="100%%" frameborder="0" marginwidth="0" marginheight="0"></iframe> </div> </td></tr> </table> </BODY> </HTML> ''' % (rootPath, name, rootPath, documentHeader, string.joinfields(package, "."), name, name + ".svg", name + ".svg") htmlFile.write(htmlBody) htmlFile.close()
def sendmail(subject, body): try: ses = boto.connect_ses(AWS_ACCESS_KEY_ID, AWS_SECRET_KEY) ses.send_email('*****@*****.**', str(subject), str(body), '*****@*****.**', format='text') except: import string, sys, traceback exc_type, exc_value, exc_traceback = sys.exc_info() f = traceback.format_exception(exc_type, exc_value, exc_traceback) print string.joinfields(f, '')
def FormattedSummarization(log_summary, task_range): top_left_list = log_summary.FindInterestingElements(PrimaryFormattedInput, task_range = task_range)[1] top_right_list = log_summary.FindInterestingElements(SecondaryFormattedInput, task_range = task_range)[1] bottom_left_list = log_summary.FindInterestingElements(PrimaryFormattedOutput, task_range = task_range)[1] bottom_right_list = log_summary.FindInterestingElements(SecondaryFormattedOutput, task_range = task_range)[1] for turn_id in range(len(top_left_list)): # Each table will have four cells. The first row is # input primary, input secondary, the second row # is output primary, empty. top_right_subset = top_right_list[turn_id][1] # In a user turn, add a concept annotation node if there is none, # and put it somewhere useful. if top_right_list[turn_id][0].turn_anchor.GetValue() == "user": for eclass, elt in top_right_subset: if eclass is ConceptAnnotation: break else: # Don't forget to give this thing a parent, # so that session.cgi can anchor the # annotation somewhere. Or, better yet, # give it an utterance ID. # When I put it in the "right place", I need to # get the appropriate turn ID not from the length # of the list I'm working through, but rather # the actual location of the interesting tag. true_turn_id, tidx = top_left_list[turn_id][0].turn_anchor.GetLocation() dp = CreateInterestingTag(log_summary.log_dir, {"type_annotation": "concept", "turnid": true_turn_id, "dtype": "string"}, "GC_DATA", "") log_summary.PlaceInTurn(None, dp, top_right_list[turn_id][0].turn_anchor) top_right_subset.append((ConceptAnnotation, dp)) top_left = map(lambda x: x[1].original_dp.Format(x[1]), top_left_list[turn_id][1]) top_right = map(lambda x: x[1].original_dp.Format(x[1]), top_right_subset) bottom_left = map(lambda x: x[1].original_dp.Format(x[1]), bottom_left_list[turn_id][1]) bottom_right = map(lambda x: x[1].original_dp.Format(x[1]), bottom_right_list[turn_id][1]) table = [] if top_left or top_right: table.append([string.joinfields(top_left, "\n"), string.joinfields(top_right, "\n")]) if bottom_left or bottom_right: table.append([string.joinfields(bottom_left, "\n"), string.joinfields(bottom_right, "\n")]) if table: log_summary.io.PrintTitle("Turn %d (%s)" % (turn_id, top_left_list[turn_id][0].turn_anchor.GetValue()), log_summary.log_dir.formatter) log_summary.io.PrintTable(None, table, log_summary.log_dir.formatter)
def get_group_R(self,G,R,R1,R2): print ("Group :",G) print ("Relation:",R) # entry id as key, group as value print ("Read groups...") gdict = {} inp = open(G,"r") inl = inp.readline() while inl != "": L = self.rnlb(inl).split("\t") if gdict.has_key(L[1]): print ("multiple assign:",L[1]) else: gdict[L[1]] = L[0] inl = inp.readline() # group as key, a list of [id1,id2] as value print ("Read relations...") edict = {} inp = open(R,"r") #first line is description inp.readline() inl = inp.readline() while inl != "": L = self.rnlb(inl).split("\t") # verify if this pair is from the same group if gdict[L[0]] == gdict[L[1]]: if edict.has_key(gdict[L[0]]): edict[gdict[L[0]]].append([L[0],L[1]]) else: edict[gdict[L[0]]] = [[L[0],L[1]]] inl = inp.readline() # go through groups and get clusters print ("Cluster each group...") oup1 = open(R+".clusters","w") oup1.write("Family\tCluster_id\tSize\tSeq_id\n") for i in edict: print (">",i) plist = edict[i] oup2 = open("TMP.R","w") for j in plist: oup2.write("%s\n" % string.joinfields(j,"\t")) oup2.close() clusters = self.get_relations("TMP.R") c = 1 for j in clusters: oup1.write("%s\t%i\t%i\t%s\n" % (i,c,len(j), string.joinfields(j,","))) c += 1 oup1.close() print ("Done!" )
def seqcon(seq, to): if to == '': if type(seq) in map(type, ['', [], ()]): return joinfields(seq, '') else: return joinfields(tuple(seq), '') if to == []: return map(None, seq) if to == (): return tuple(seq) raise TypeError
def pairs_lines(self): """Build the [ pairs ] lines from the data""" lines = [] lines.append('[ pairs ]\n') lines.append('; %6s %6s %6s '%tuple(self.pairs_headers[0:3]) + string.joinfields(self.pairs_headers[3:],' ')+'\n') sorted_keys = self.pairs.keys() sorted_keys.sort() for key in sorted_keys: lines.append('%8s %6s %6s '%tuple(self.pairs[key][0:3]) + string.joinfields(self.pairs[key][3:],' ') +'\n') lines.append('\n') return lines
def dihedrals_lines(self): """Build the [ angles ] lines from the data""" lines = [] lines.append('[ dihedrals ]\n') lines.append('; %6s %6s %6s %6s %6s '%tuple(self.dihedrals_headers[0:5]) + string.joinfields(self.dihedrals_headers[5:],' ')+'\n') sorted_keys = self.dihedrals.keys() sorted_keys.sort() for key in sorted_keys: lines.append('%8s %6s %6s %6s %6s '%tuple(self.dihedrals[key][0:5]) + string.joinfields(self.dihedrals[key][5:],' ') +'\n') lines.append('\n') return lines
def getheadertext(self, pred = None): if not pred: return string.joinfields(self.headers, '') headers = [] hit = 0 for line in self.headers: if line[0] not in string.whitespace: i = string.find(line, ':') if i > 0: hit = pred(string.lower(line[:i])) if hit: headers.append(line) return string.joinfields(headers, '')
def describe_gif_file (stuff): stuff, length = stuff print 'GIF Version: %s' % stuff['header'][1] print 'File Length: %d' % length data = stuff['data'] screen = stuff['logical screen'] global_color_table = screen['global color table'] print 'Width: %d Height: %d' % (screen['width'], screen['height']) print 'Colors: %d' % (len(global_color_table)) print 'Number of Blocks: %d' % len(data) for i in range(len(data)): thing = data[i] print '=' *50 print 'Block: %d' % i if thing[0] == 'graphic block': gce, (type, info) = thing[1] if gce: if gce['transparent color flag']: print 'Transparent Color: R:%3d G:%3d B:%3d' % ( global_color_table.table[gce['transparent color index']] ) if type == 'image': id = info['image descriptor'] lct = info['local color table'] lzw_size = info['lzw code size'] print 'Image Data: %d blocks, %d bytes' % ( len(info['image data'].blocks), info['image data'].length ) print 'Local Color Table: ', if lct: print 'yes, %d colors' % len(lct) else: print 'no' print 'Interlaced: ', if id['interlace flag']: print 'yes' else: print 'no' print 'LZW starting code size: %d' % lzw_size elif type == 'text': plain_text_extension.describe (info) elif thing[0] == 'application extension': print 'Application Extension' ae = thing[1] print 'identifier %s' % ( string.joinfields ( map (lambda x: string.upper(hex(x)[2:]), ae['identifier']), '')) print 'Sample from first Data Block: %s' % repr(ae['data'].blocks[0]) elif thing[0] == 'comment extension': print 'Comment: "%s"' % string.joinfields (thing[1]['data'].blocks, '')
def update_col(self, old_col_name, new_col_name, col_type, col_desc, col_group = None): self.__validate_col_type(col_type) self.__validate_col_name(new_col_name) connection = self.connection # this is used for the insert select query during col renaming import string old_col_names = self.__column_names() old_col_sql = string.joinfields(old_col_names,sep=",") # update column in column_definitions table sql_args = col_desc, col_type, col_group, new_col_name, old_col_name cursor = connection.cursor() cursor.execute(""" UPDATE col_definitions SET col_description = '%s' , col_type = '%s' , col_group = '%s' , col_name = '%s' WHERE col_name = '%s';""" % sql_args) connection.commit() # this is used for the insert select query during col renaming new_col_info_rows = self.col_info() new_col_list = [] for col_entry in new_col_info_rows: col_entry_name = col_entry['col_name'] col_entry_type = col_entry['col_type'] col_entry_list = [col_entry_name, col_entry_type] new_col_list.append(string.joinfields(col_entry_list,sep=" ")) new_col_creation_sql = string.joinfields(new_col_list,sep=", ") new_col_names = self.__column_names() new_col_insertion_sql = string.joinfields(new_col_names,sep=",") cursor = connection.cursor() cursor.execute(""" ALTER TABLE animals RENAME to temp_animals;""" ) cursor = connection.cursor() cursor.execute(""" CREATE TABLE animals (%s); """ % new_col_creation_sql ) sql_args = new_col_insertion_sql, old_col_sql cursor.execute(""" INSERT INTO animals (%s) SELECT %s FROM temp_animals; """ % sql_args ) cursor.execute(""" DROP TABLE temp_animals;""" ) connection.commit()
def read_1seq(self,fasta): inp = open(fasta,"r") inl = inp.readlines() idx = self.rmlb(inl[0])[1:] fasta = string.joinfields(inl[1:],"") # get rid of line breaks, sometimes \n and \r\n are mixed. if fasta.find("\r\n") != -1: sense = string.joinfields(fasta.split("\r\n"),"") if fasta.find("\n") != -1: fasta = string.joinfields(fasta.split("\n"),"") return([idx,fasta])
from scipy import zeros, array import re def mask(charlist): """Construct a mask suitable for string.translate, which marks letters in charlist as "t" and ones not as "b" """ mask = "" for i in range(256): if chr(i) in charlist: mask = mask + "t" else: mask = mask + "b" return mask ascii7bit = string.joinfields(map(chr, range(32, 127)), "") + "\r\n\t\b" ascii7bit_mask = mask(ascii7bit) class FileFormat_AlphaomegaAscii: """ CLASS: FileFormat_AlphaomegaAscii DESCR: Reads ASCII-exported data files from the 'Alpha Omega' software """ def get_channel_maps(self, ascfile): channel_name_map = {} channel_num_map = {} channel_sr_map = {} channel_count = {} while 1: ascline = ascfile.readline()
def sixpack(self,seq,lenT,lenT2,met,inc,oup0="",oup1="",oup2="",verbose=1): if verbose: if oup1 == "": print "\nSequence:",seq else: print "\nSequence:",seq[0] print "lenT :",lenT print "lenT2 :",lenT2 print "Met_flag:",met print "TL_inc :",inc if inc % 3 != 0: print "ERR: TL_inc has to be multiple of 3" if oup1 == "": sys.exit(0) else: return 1 ################### # split() start # seq_index,ori,ntseq,TL_seq,frame,lenT,met,antisense,antisense length ################### def split(idx,ori,ntseq,s,f,a=0,alen=0,verbose=1): # L coord as key, [R_coord,pep,nt] as value #print s sdict = {} s = s.split("*") #print s c = f countT = 0 if verbose: print " split_orf:",len(s) for i in s: if verbose and countT != 0 and countT % 10000 == 0: print " %i x 10k" % (countT/10000) countT += 1 #print "ORF:",i if i == "": c += 3 continue EXC = ["?","X"] #EXC = ["X"] # if orf starts or ends in ? or X, this orf will be disgarded if len(i) >= lenT and i[0] not in EXC and i[-1] not in EXC: iL = len(i) if met == 1 and i.find("M") != -1: m = i.find("M") mprime = m x = i[m:] l = len(x) #print m,x,l while bisect([lenT,lenT2+1],l) == 2: #print " .." mprime = x[1:].find("M") m += mprime + 1 x = x[1:][mprime:] l = len(x) # len(x) is between lenT and lenT2 if bisect([lenT,lenT2],l) == 1: if a: ntL = c+m*3+1 # ntseq coord ntR = c+(iL+1)*3 coordL = alen-(c+iL*3)+1-3 # sense strand coordR = alen-(c+m*3+1)+1 # sense else: ntL = c+m*3+1 # nt seq coord ntR = c+(iL+1)*3 coordL = c+m*3+1 # sense strand coordR = c+(iL+1)*3 if len(x) > 0: sdict[coordL] = [coordR,x,ntseq[ntL-1:ntR]] #print "IN :",x #print " ",ntL,ntR,coordL,coordR else: #print "OUT:",x pass elif met == 0: # NOTICE THAT lenT2 is not applied. # antisense if a: sdict[alen-(c+iL*3)+1-3] = [alen-(c+1)+1,i, ntseq[c+1-1:c+(iL+1)*3]] # sense else: sdict[c+1] = [c+(iL+1)*3,i,ntseq[c+1-1:c+(iL+1)*3]] """ PROBLEM HERE, INACTIVETED FOR NOW elif met == 2: m = i.find("M") # initial position for MET n = m # variable aa position for Met x = i while m != -1: x = x[m:] if len(x) >= lenT: # antisense if a: coordL = alen-(c+iL*3)+1 coordR = alen-(c+n*3+1)+1 # deal with ? and X if x.find("?") != -1: ... # sense else: coordL = c+n*3+1 coordR = c+iL*3 if len(x) > 0: sdict[coordL] = [coordR,x] else: break x = x[1:] m = x.find("M") n += m+1 """ c += len(i)*3 + 3 skeys = sdict.keys() skeys.sort() if verbose: print " qualified:",len(sdict) for i in skeys: #print idx,ori,f,i,sdict[i][0] #print sdict[i][1] #print sdict[i][2] # Get rid of sequences that have ? or X if sdict[i][1].find("?") != -1 or sdict[i][1].find("X") != -1: continue # 06/-6,07 for ORFs involving the 1st or the last nucleotide, # the coordinates are -3 or +3 more than they should. This is # not so much of a problem when working with chromosomes, but # an issue when looking to multiple contigs. The following # is to check if there is discrepancy between the length of sub # seq and if they are the beginning or ending entries. If so, # coordinates are corrected accordingly. err = 0 modi = 0 if i < 0: if i == -2 and sdict[i][0] == len(sdict[i][2]): modi = 1 else: " ERR:",idx,ori,f,i,sdict[i][0] #print sdict[i][0]-i+1, len(ntseq) if not modi: if sdict[i][0]-i+1 > len(ntseq): if sdict[i][0]-len(ntseq) == 2 and \ sdict[i][0]-i+1-3 == len(ntseq): sdict[i][0] -= 3 else: " ERR:",idx,ori,f,i,sdict[i][0] else: if sdict[i][0]-modi+1 > len(ntseq): if sdict[i][0]-len(ntseq) == 2 and \ sdict[i][0]-modi+1-3 == len(ntseq): sdict[i][0] -= 3 else: " ERR:",idx,ori,f,i,sdict[i][0] if not err: oup0.write(">%s|%s|%i|%i-%i\n%s\n" % \ (idx,ori,f,i,sdict[i][0],sdict[i][2])) oup1.write(">%s|%s|%i|%i-%i\n%s\n" % \ (idx,ori,f,i,sdict[i][0],sdict[i][1])) oup2.write("%s\t%s\t%i\t%i\t%i\n" % \ (idx,ori,f,i,sdict[i][0])) ################### # split() end ################### if verbose: print "Read sense strand..." # output stream not specified, must be direct call if oup1 == "": inp = open(seq,"r") inl = inp.readlines() idx = self.rmlb(inl[0])[1:] sense = string.joinfields(inl[1:],"") oup0 = open("%s_T%i-%im%i.cds" % (idx,lenT,lenT2,met),"w") oup1 = open("%s_T%i-%im%i.pep" % (idx,lenT,lenT2,met),"w") oup2 = open("%s_T%i-%im%i.coord" % (idx,lenT,lenT2,met),"w") oup2.write("SeqID\tOri\tFrame\tL\tR\n") # batch call, seq passed is a list with [idx,sequence] else: idx = seq[0] sense = seq[1] # get rid of line breaks, sometimes \n and \r\n are mixed. if sense.find("\r\n") != -1: sense = string.joinfields(sense.split("\r\n"),"") if sense.find("\n") != -1: sense = string.joinfields(sense.split("\n"),"") if verbose: print "Reverse..." antis = self.reverse2(sense) if verbose: print "Complement..." antis = self.complement(antis) # sense, frame 0,1,2 if verbose: print "Translate.." print " sense, frame 0" s0 = self.translate_passed(sense,0,inc)[0] split(idx,"+",sense,s0,0,0,0,verbose) if verbose: print " sense, frame 1" s1 = self.translate_passed(sense,1,inc)[0] split(idx,"+",sense,s1,1,0,0,verbose) if verbose: print " sense, frame 2" s2 = self.translate_passed(sense,2,inc)[0] split(idx,"+",sense,s2,2,0,0,verbose) # antisense, frame 0,1,2 if verbose: print " antisense, frame 0" a0 = self.translate_passed(antis,0,inc)[0] split(idx,"-",antis,a0,0,1,len(antis),verbose) if verbose: print " antisense, frame 1" a1 = self.translate_passed(antis,1,inc)[0] split(idx,"-",antis,a1,1,1,len(antis),verbose) if verbose: print " antisense, frame 2" a2 = self.translate_passed(antis,2,inc)[0] split(idx,"-",antis,a2,2,1,len(antis),verbose) #oup1.close() #oup2.close() if verbose: print "Done!"
def __str__(self): return string.joinfields(map(dump_address_pair, self.addresslist),", ")
def list_handle_src(self, attrs): if not self.app.prefs.GetBoolean("browser", "load-images"): return src = string.joinfields(string.split(attrs['src']), '') image = self.context.get_async_image(src, self.reload) if image: attrs['type'] = image
def ellipse(self, x, y, rlong, rshort, posangle, mode='P'): """ Draw an ellipse with centre at (x,y) and long radius rlong and short radius rshort. position_angle is the angle between the long axis and the positive x-axis in radians. 'mode' determines how it is drawn: 'P': only draw border with pen 'F': only fill interior 'PF': fill interior with fill gray value and draw border with pen gray value """ arc_magic = ARC_MAGIC sp = check_zero(sin(posangle)) cp = check_zero(cos(posangle)) x = check_zero(x) y = check_zero(y) rlong = check_zero(rlong) rshort = check_zero(rshort) # translate strlist = ['q', '1 0 0 1', str(x), str(y), 'cm'] # rotate strlist += [str(cp), str(sp), str(-sp), str(cp), '0 0 cm'] lstr = str strlist += [ lstr(rlong), '0 m', lstr(rlong), lstr(rshort * arc_magic), lstr(rlong * arc_magic), lstr(rshort), '0', lstr(rshort), 'c', lstr(-rlong * arc_magic), lstr(+rshort), lstr(-rlong), lstr(+rshort * arc_magic), lstr(-rlong), '0 c', lstr(-rlong), lstr(-rshort * arc_magic), lstr(-rlong * arc_magic), lstr(-rshort), '0', lstr(-rshort), 'c', lstr(+rlong * arc_magic), lstr(-rshort), lstr(+rlong), lstr(-rshort * arc_magic), lstr(+rlong), '0 c h' ] if mode == 'P': m = 'S' elif mode == 'F': m = 'f' elif mode == 'PF': m = 'B' else: print 'PDFDrawing.ellipse: invalid mode' m = 'S' pass strlist += [m, 'Q '] self.drawingtext.write(string.joinfields(strlist)) pass
def dotrootmachine(self, compilationUnit): name = getMachineName(compilationUnit) package = getPackage(compilationUnit) return string.joinfields(package + [name], ".")
def getgroups(self): data = self.textbox.get("1.0", END) words = string.split(data) return string.joinfields(words, "\n ")
CommentEnd = '\*/' Hexnumber = '0[xX][0-9a-fA-F]*[uUlL]*' Octnumber = '0[0-7]*[uUlL]*' Decnumber = '[1-9][0-9]*[uUlL]*' Intnumber = Hexnumber + '\|' + Octnumber + '\|' + Decnumber Exponent = '[eE][-+]?[0-9]+' Pointfloat = '\([0-9]+\.[0-9]*\|\.[0-9]+\)\(' + Exponent + '\)?' Expfloat = '[0-9]+' + Exponent Floatnumber = Pointfloat + '\|' + Expfloat Number = Floatnumber + '\|' + Intnumber # Anything else is an operator -- don't list this explicitly because of '/*' OutsideComment = (Identifier, Number, String, Char, CommentStart) OutsideCommentPattern = '\(' + string.joinfields(OutsideComment, '\|') + '\)' OutsideCommentProgram = regex.compile(OutsideCommentPattern) InsideComment = (Identifier, Number, CommentEnd) InsideCommentPattern = '\(' + string.joinfields(InsideComment, '\|') + '\)' InsideCommentProgram = regex.compile(InsideCommentPattern) def initfixline(): global Program Program = OutsideCommentProgram def fixline(line): global Program ## print '-->', `line`
def parsescript(isi): inpython=0 tab=2 res=['put=echo.text.append\ntry:\n'] ctab=' ' isfinal=0 for ln in isi.split(chr(10)): lns=ln.strip() if lns<>'' and lns[0]=='#': pass if lns=='<FINALIZE>': isfinal=1 inpython=1 res.append('finally:\n') ctab=' ' tab=2 elif lns in ('<!>','<!!>'): inpython,tab=0,ln.index('<!')+2;ctab=' '*tab elif lns=='<!': inpython=1;ctab=' '*tab elif lns=='!>': inpython,tab=0,ln.index('!')+2;ctab=' '*tab elif lns in ('<?>','<??>'): inpython,tab=0,ln.index('<?')+2;ctab=' '*tab elif lns=='<?': inpython=1;ctab=' '*tab elif lns=='?>': inpython,tab=0,ln.index('?')+2;ctab=' '*tab else: if not inpython: if ln<>'' and '^' in ln and lns.index('^')==0: res+=ctab+lns[1:]+'\n' continue if lns<>'': res.append(ctab+'put("'+parseline(ln)+' ")\n') else: res.append(ctab+'put(\'\')\n') else: if ln<>'' and '@' in ln: ps=0 n=0 fail=1 for ch in ln: if ch in ("'",'"'):ps=not ps if ch=='@' and ps==0: p=lns.index('@') tab1=ln.index('@') if p==0: res.append(' '+' '*tab1+'put("'+parseline(lns[1:])+' ")\n') else: res.append(' '+ln[0:tab1]+'put("'+parseline(ln[tab1+1:])+' ")\n') fail=0 break if fail:res.append(' '+ln+chr(10)) else: res.append(' '+ln+chr(10)) continue # get string if not isfinal:res.append('finally:pass#') data,ld,ldd='','','' mystr=[] instring,invar,de=0,0,0 for d in string.joinfields(res,''): if instring and d=='$': invar=1 elif instring and invar==1: if d=='(': invar,de=2,0 else: invar=0 if instring and invar==2: if d=='(':de+=1 if d==')':de-=1 if de==0:invar=0 if invar==0 and ((d=='"' and (not "\\" in ld+ldd))): if instring: instring=0 astr+=d data+=parsestring(astr) else: instring,astr=1,d ld,ldd=d,ld continue if instring : astr+=d else: data+=d ld,ldd=d,ld if instring: astr+=d data+=parsestring(astr) return data
def getrawheader(self, name): list = self.getfirstmatchingheader(name) if not list: return None list[0] = list[0][len(name) + 1:] return string.joinfields(list, '')
def writelines (self, lines): self.data = self.data + string.joinfields ( lines, '\r\n' ) + '\r\n'
def replace(str, old, new): # global substitution list = string.splitfields(str, old) # split around old's return string.joinfields(list, new) # rejoin, inserting new's
def process(me,conn,data,keepalive,port=0,addr=0): global running me.data.globals=globals() me.data.onfinish=None me.data.addr=addr me.data.httpheader='' me.data.headers={} me.data.PYERRORLOG='' me.data.workerno = me.no me.data.cookie.clear() me.data.recho.clear() me.data.xecho.clear() me.sessionstarted = False me.data.timer0 = time.time() me.data.form.clear() me.host.clear() me.data.socketPort= port me.data.basepath=base filemethod=0 if '; boundary=' in data: filemethod=1 if '\r\n\r\n' in data: data,chunk=data.split('\r\n\r\n',1) else: chunk='' data = data.split('\r\n') host = me.host for item in data: pitem=item.split(': ') if len(pitem)==2: host[pitem[0]]=pitem[1] request = data[0].split(' ') if len(request)<=1: return "HTTP/1.1 400 ERROR"; url = request[1].split('?',1) query = '' if filemethod: left=int(host['Content-Length'].strip()) #-len(chunk) boundary=host['Content-Type'].strip().split('boundary=')[1] datasplit=chunk while len(datasplit)<left: datasplit=datasplit+conn.recv(left) data=(datasplit).split('--'+boundary+'\r\n') boundary2='--'+boundary for chunk in data: chunksplit=chunk.split('\r\n\r\n',1) if len(chunksplit[0])<10:continue heads=chunksplit[0].split('\r\n')[0].split('form-data; ')[1] filename=0 exec(heads.replace('\\','\\\\')) if filename: afile=tDummy() afile.filename=filename afile.data=chunksplit[1] me.data.form[name]=afile else: isi=chunksplit[1].split(boundary2)[0][:-2] me.data.form[name]=isi else: if len(url)==2: query=(url[1]) if request[0]=='POST': query=(data[len(data)-1]) url = url[0].replace('.php','.py') durl=os.path.split(url) if not durl[1]:url=os.path.join(durl[0],'index.py') me.data.recho.exts=os.path.splitext(url)[1] fn = me.servePage(url,host,query) me.url = url if me.data.onfinish: try:me.data.onfinish() except:pass tipestr = type(fn) is str if tipestr: exts=os.path.splitext(url)[1] else: exts=fn.exts exts=exts.lower() hdr,com=SVdefaultHeader.get(exts,SVdefaultHeader['.html']) res=["HTTP/1.1 200 OK"] resh={} resh["Content-Type"]=hdr resh["Server"]=servername resh["Connection"]="Keep-Alive" r='' if exts=='.py': if (len(me.data.cookie)>0): cook="" for k in me.data.cookie.keys(): if cook:cook+=';\nSet-Cookie: ' cook+=k+'='+me.data.cookie[k] resh['Set-Cookie']=cook printlog( "Set Cookie:",cook) me.data.cookie.clear() resh['Cache-Control']='no-store, no-cache, must-revalidate, post-check=1, pre-check=1' for k in me.data.headers: resh[k]=me.data.headers[k] if tipestr: if webroot.fileExists(fn): tm=webroot.getFileTime(fn) #tx='"%s%x"'%(fn,tm) tx='"%x"'%(tm) if host.get('If-None-Match','')==tx:return "HTTP/1.1 304 OK\n" #res+='last-modified: %s\n'%datetime.datetime.fromtimestamp(tm).strftime("%a, %d %b %Y %H:%M:%S GMT") resh['etag']=tx resh['Cache-Control']='max-age=18000, must-revalidate' resh['Expires']=(datetime.datetime.today()+datetime.timedelta(days=365)).strftime("%a, %d %b %Y %H:%M:%S GMT") r = webroot.fileRead(fn) elif fn=='log.sys': global serverlog r=string.joinfields(serverlog,"\n") resh["Content-Type"]="text/plain" else:return "HTTP/1.1 404 OK\n" else:r = string.joinfields(fn.text,'') #if verbose:print res if com: if 'javascript' in hdr:r=compressjs(r) #r,m=compress(r) #resh['Content-Encoding']=m resh['Content-Length']=str(len(r)) # merge headers for k in resh: res.append("%s: %s" % (k,resh[k])) #res.append('\n') h=string.joinfields(res,'\n') me.data.TEMP = 0 return "%s\n\n%s" % (h,r)
def exclass(x): t = split(x) ctype = upper(t[0]) cname = joinfields(t[1:], ' ') return (ctype, cname)
def putheader(self, header, *args): str = '%s: %s\r\n' % (header, string.joinfields(args, '\r\n\t')) self.send(str)
import string from SGMLLexer import * _entref_exp = regex.compile("&\(\(#\|\)[a-zA-Z0-9][-.a-zA-Z0-9]*\)\(;\|\)") _named_chars = {'#re': '\r', '#rs': '\n', '#space': ' '} for i in range(256): _named_chars["#" + ` i `] = chr(i) # build a table suitable for string.translate() _chartable = map(chr, range(256)) for i in range(256): if chr(i) in string.whitespace: _chartable[i] = " " _chartable = string.joinfields(_chartable, '') def replace(data, entities=None): """Perform general entity replacement on a string. """ data = string.translate(data, _chartable) if '&' in data and entities: value = None pos = _entref_exp.search(data) while pos >= 0 and pos + 1 < len(data): ref, term = _entref_exp.group(1, 3) if entities.has_key(ref): value = entities[ref] elif _named_chars.has_key(string.lower(ref)): value = _named_chars[string.lower(ref)]
def _get_new_ufn(self, path=None, content_type=None, lock=1): """ Create a new unique filename """ id = self._get_zodb_id() # hack so the files are not named copy_of_foo if COPY_OF_PROTECTION: id = copy_of_protect(id) # get name and extension components from id pos = string.rfind(id, '.') if (pos + 1): id_name = id[:pos] id_ext = id[pos:] else: id_name = id id_ext = '' if not content_type: content_type = self.content_type if REPOSITORY_EXTENSIONS in (MIMETYPE_APPEND, MIMETYPE_REPLACE) and not id_ext: mime_ext = guess_extension(content_type) if mime_ext is not None: # don't change extensions of unknown binaries and text files if not (content_type in config.unknown_types and id_ext): if REPOSITORY_EXTENSIONS == MIMETYPE_APPEND: # don't append the same extension twice if id_ext != mime_ext: id_name = id_name + id_ext id_ext = mime_ext # generate directory structure if path is not None: rel_url_list = path else: rel_url_list = self._get_zodb_path() dirs = [] if REPOSITORY == SYNC_ZODB: dirs = rel_url_list elif REPOSITORY in (SLICED, SLICED_REVERSE, SLICED_HASH): if REPOSITORY == SLICED_HASH: # increase distribution by including the path in the hash hashed = ''.join(list(rel_url_list) + [id_name]) temp = base64.encodestring(sha.new(hashed).digest())[:-1] temp = temp.replace('/', '_') temp = temp.replace('+', '_') elif REPOSITORY == SLICED_REVERSE: temp = list(id_name) temp.reverse() temp = ''.join(temp) else: temp = id_name for i in range(SLICE_DEPTH): if len(temp) < SLICE_WIDTH * (SLICE_DEPTH - i): dirs.append(SLICE_WIDTH * '_') else: dirs.append(temp[:SLICE_WIDTH]) temp = temp[SLICE_WIDTH:] elif REPOSITORY == CUSTOM: method = aq_acquire(self, CUSTOM_METHOD) dirs = method(rel_url_list, id) if NORMALIZE_CASE == NORMALIZE: dirs = [d.lower() for d in dirs] # make directories dirpath = self._fsname(dirs) if not os.path.isdir(dirpath): umask = os.umask(REPOSITORY_UMASK) try: os.makedirs(dirpath) finally: os.umask(umask) # generate file name fileformat = FILE_FORMAT # time/counter (%t) if string.find(fileformat, "%t") >= 0: fileformat = string.replace(fileformat, "%t", "%c") counter = int(DateTime().strftime('%m%d%H%M%S')) else: counter = 0 if string.find(fileformat, "%c") == -1: raise ValueError("Invalid file format '%s'" % FILE_FORMAT) # user (%u) if string.find(fileformat, "%u") >= 0: if (getattr(self, 'REQUEST', None) is not None and self.REQUEST.has_key('AUTHENTICATED_USER')): user = getSecurityManager().getUser().getUserName() fileformat = string.replace(fileformat, "%u", user) else: fileformat = string.replace(fileformat, "%u", "") # path (%p) if string.find(fileformat, "%p") >= 0: temp = string.joinfields(rel_url_list, "_") fileformat = string.replace(fileformat, "%p", temp) # file and extension (%n and %e) if string.find(fileformat, "%n") >= 0 or string.find(fileformat, "%e") >= 0: fileformat = string.replace(fileformat, "%n", id_name) fileformat = string.replace(fileformat, "%e", id_ext) # lock the directory if lock: self._dir__lock(dirpath) # search for unique filename if counter: fn = join(dirpath, string.replace(fileformat, "%c", ".%s" % counter)) else: fn = join(dirpath, string.replace(fileformat, "%c", '')) while isfile(fn) or isfile(fn + '.undo') or isfile(fn + '.tmp'): counter = counter + 1 fn = join(dirpath, string.replace(fileformat, "%c", ".%s" % counter)) if counter: fileformat = string.replace(fileformat, "%c", ".%s" % counter) else: fileformat = string.replace(fileformat, "%c", '') dirs.append(fileformat) return dirs
#! /usr/bin/env python
def main(): print "mkvomit.py\n" # read classlist.txt filename = '%sPhysicsTools/TheNtupleMaker/plugins/classlist.txt' % LOCALBASE classlist = map(strip, open(filename).readlines()) clist = [] for name in classlist: t = split(name) name = joinfields(t[1:], ' ') headers = findHeaders(name) if len(headers) == 0: continue clist.append((name, headers[0])) clist.sort() classlist = clist #------------------------------------------------- # Loop over classes to be scanned #------------------------------------------------- # Make sure html and txt directories exist os.system("mkdir -p html; mkdir -p txt") count = 0 for index, (classname, header) in enumerate(classlist): # Create full pathname to header file = LOCALBASE + header if not os.path.exists(file): file = BASE + header if not os.path.exists(file): print "** file %s not found" % file continue file = os.path.abspath(file) header = file fullname = classname # For now ignore templates if find(fullname, '<') > -1: continue # Get methods and/or datamembers and write them out # Initialize map to contain info about classes, methods & datamembers k = rfind(header, "/src/") # search from right if k > 0: header = header[k + 5:] filestem = replace(header, 'interface/', '') filestem = split(filestem, '.h')[0] filestem = replace(filestem, '/', '.') db = {'version': VERSION, 'filestem': filestem, 'header': header} db['scopes'] = {} db['methods'] = {} db['datamembers'] = {} db['classname'] = fullname db['classlist'] = [] db['baseclassnames'] = [] db['signature'] = {} classMethods(fullname, db) db['baseclassnames'] = [] classDataMembers(fullname, db) nmeth = len(db['methods']) ndata = len(db['datamembers']) if nmeth > 0 or ndata > 0: count += 1 print "%5d\t%s" % (count, fullname) cname = split(fullname, '::').pop() methfilename = "txt/%s.%s.txt" % (filestem, cname) out = open(methfilename, 'w') printHeader(db, out) printMethods(db, out) printDataMembers(db, out) out.close() writeHTML(db, methfilename) else: print "*** no methods found for %s" % fullname # Create index.html print "\ncreating html/index.html.." os.system("mkindex.py") print "\n\tmkvomit.py is done!\n"
for line in lines: line = line.strip().split(" ") for number in line: try: fmem.append(chr(int(number, 16))) except ValueError: continue fmem = fmem mem = imm.readMemory(address, len(fmem)) for a in range(0, len(fmem)): try: if fmem[a] != mem[a]: imm.setStatusBar( "Unmatched: Check log window for the dump") imm.log("Unmatched at offset: %d" % a) imm.log(" File: %s" % immutils.prettyhexprint( string.joinfields(fmem[a:a + 8], ""))) imm.log(" Mem : %s" % immutils.prettyhexprint(mem[a:a + 8])) return 0x0 except IndexError: imm.setStatusBar("Unmatched: Check log window for the dump") imm.log( "Unmatch: Different string sizes= File: %d Memory: %d" % (len(fmem), len(mem))) return 0x0 imm.setStatusBar("Match!") imm.log("Match!") return 0
def setgroups(self, data): words = string.split(data) data = string.joinfields(words, "\n") self.textbox.delete("1.0", END) self.textbox.insert("1.0", data)
# -*- Mode: Python; tab-width: 4 -*- import socket import string import time import http_date now = http_date.build_http_date(time.time()) cache_request = string.joinfields([ 'GET / HTTP/1.0', 'If-Modified-Since: %s' % now, ], '\r\n') + '\r\n\r\n' nocache_request = 'GET / HTTP/1.0\r\n\r\n' def get(request, host='', port=80): s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect(host, port) s.send(request) while 1: d = s.recv(8192) if not d: break s.close() class timer: def __init__(self): self.start = time.time()
def formatOptions(options): from string import joinfields, strip, split options = joinfields(map(strip, split(strip(options), '\n')), ':') return options
def read(self): # Note: no size argument -- read until EOF only! return string.joinfields(self.readlines(), '')
def createParameters(wspace, filename): if not os.path.exists(filename): sys.exit("** can't find file %s" % filename) # read table, filter out comments and blank lines, # then split into fields records = map( split, # filter out blank lines and lines that start with # filter( lambda x: (x != '') and (x[0] != '#'), # read table and strip away leading and trailing # whitespace map(strip, open(filename).readlines()))) # create parameters, indexed by the bin number (start counting at 1) observations = [] constants = [] variables = [] for t in records: name = t[0] # variable name values = map(atof, t[1:]) # values for ii, x in enumerate(values): # indexed name varname = '%s%d' % (name, ii + 1) # create some reasonable bounds xmin = 0 jj = int(x) xmax = 4 * (jj + 1) # create parameter cmd = '%s[%f, %f, %f]' % (varname, x, xmin, xmax) wspace.factory(cmd) if name[0] == 'N': observations.append(cmd) else: constants.append(cmd) wspace.var(varname).setConstant() # if this is a "hat" variable, create the # associated un-hatted parameter, which are # not to be made constant since they are the # unknowns. if find(varname, '_hat') > -1: varname = replace(varname, '_hat', '') cmd = '%s[%f, %f, %f]' % (varname, x, xmin, xmax) variables.append(cmd) wspace.factory(cmd) # parameter of interest cmd = 'mu[1, 0, 4]' variables.append(cmd) wspace.factory(cmd) wspace.var('mu').SetTitle('#mu') print "\nobservations =>\n\t%s" % joinfields(observations, '\n\t') print "\nconstants =>\n\t%s" % joinfields(constants, '\n\t') print "\nvariables =>\n\t%s" % joinfields(variables, '\n\t') nbins = len(records[0][1:]) return nbins
def writelines(self, list): self.write(string.joinfields(list, ''))
def run_cgi(self): """Execute a CGI script.""" dir, rest = self.cgi_info i = string.rfind(rest, '?') if i >= 0: rest, query = rest[:i], rest[i + 1:] else: query = '' i = string.find(rest, '/') if i >= 0: script, rest = rest[:i], rest[i:] else: script, rest = rest, '' scriptname = dir + '/' + script scriptfile = self.translate_path(scriptname) if not os.path.exists(scriptfile): self.send_error(404, "No such CGI script (%s)" % ` scriptname `) return if not os.path.isfile(scriptfile): self.send_error( 403, "CGI script is not a plain file (%s)" % ` scriptname `) return if not executable(scriptfile): self.send_error( 403, "CGI script is not executable (%s)" % ` scriptname `) return nobody = nobody_uid() self.send_response(200, "Script output follows") self.wfile.flush() # Always flush before forking pid = os.fork() if pid != 0: # Parent pid, sts = os.waitpid(pid, 0) if sts: self.log_error("CGI script exit status x%x" % sts) return # Child try: # Reference: http://hoohoo.ncsa.uiuc.edu/cgi/env.html # XXX Much of the following could be prepared ahead of time! env = {} env['SERVER_SOFTWARE'] = self.version_string() env['SERVER_NAME'] = self.server.server_name env['GATEWAY_INTERFACE'] = 'CGI/1.1' env['SERVER_PROTOCOL'] = self.protocol_version env['SERVER_PORT'] = str(self.server.server_port) env['REQUEST_METHOD'] = self.command uqrest = urllib.unquote(rest) env['PATH_INFO'] = uqrest env['PATH_TRANSLATED'] = self.translate_path(uqrest) env['SCRIPT_NAME'] = scriptname if query: env['QUERY_STRING'] = query host = self.address_string() if host != self.client_address[0]: env['REMOTE_HOST'] = host env['REMOTE_ADDR'] = self.client_address[0] # AUTH_TYPE # REMOTE_USER # REMOTE_IDENT env['CONTENT_TYPE'] = self.headers.type length = self.headers.getheader('content-length') if length: env['CONTENT_LENGTH'] = length accept = [] for line in self.headers.getallmatchingheaders('accept'): if line[:1] in string.whitespace: accept.append(string.strip(line)) else: accept = accept + string.split(line[7:]) env['HTTP_ACCEPT'] = string.joinfields(accept, ',') ua = self.headers.getheader('user-agent') if ua: env['HTTP_USER_AGENT'] = ua # XXX Other HTTP_* headers import regsub decoded_query = regsub.gsub('+', ' ', query) try: os.setuid(nobody) except os.error: pass os.dup2(self.rfile.fileno(), 0) os.dup2(self.wfile.fileno(), 1) print scriptfile, script, decoded_query os.execve(scriptfile, [script, decoded_query], env) except: self.server.handle_error(self.request, self.client_address) os._exit(127)
def nextClicked(self, *args): try: module = self.moduleList[self.notebook.get_current_page()] except: pass if self.autoscreenshot: self.takeScreenShot() result = None #Call the apply method if it exists try: result = module.apply(self.notebook) except: import exceptionWindow (ty, value, tb) = sys.exc_info() lst = traceback.format_exception(ty, value, tb) text = string.joinfields(lst, "") result = exceptionWindow.ExceptionWindow(module, text) pass # Store the name of every module that requires a reboot. This allows # us to remove a single module if the user moves back and forth through # the UI while still knowing that other modules still require reboot. if hasattr(module, "needsReboot") and module.needsReboot: print "adding needsReboot for %s" % module.moduleName self.needsReboot.append(module.moduleName) elif hasattr(module, "needsReboot") and not module.needsReboot: if module.moduleName in self.needsReboot: print "removing needsReboot for %s" % module.moduleName self.needsReboot.remove(module.moduleName) # record the current page as the new previous page self.prevPage = self.moduleNameToIndex[module.__module__][0] if result != None: pgNum = self.moduleNameToIndex[module.__module__][0] self.pageHistory.append(pgNum) # print "self.pageHistory: %s" % self.pageHistory if self.nextPage: self.notebook.set_current_page(self.nextPage) module = self.moduleList[self.nextPage] self.nextPage = None else: self.notebook.next_page() module = self.moduleList[self.notebook.get_current_page()] #Call setPointer to make the left hand pointer move to the correct pointer self.setPointer(self.moduleNameToIndex[module.__module__][1]) if "grabFocus" in dir(module): #If the module needs to grab the focus, let it module.grabFocus() else: #Something went wrong in the module. Don't advance return #Check to see if we're on the last page. tmp = self.notebook.get_nth_page(self.notebook.get_current_page() + 1) if not tmp: self.nextButton.disconnect(self.nextHandler) self.nextHandler = self.nextButton.connect('clicked', self.finishClicked) self.nextButton.set_label(_("_Finish")) self.nextButton.set_use_underline(True) self.win.disconnect(self.winHandler) self.winHandler = self.win.connect("key-release-event", self.closeRelease) self.backButton.set_sensitive(True)
def writelines(self, lines): self.write(string.joinfields(lines, '\r\n') + '\r\n')
def loadModules(self): self.moduleList = [] self.moduleDict = {} # Generate a list of all of the module files (which becomes the list of # all non-hidden files in the directory with extensions other than .py. files = os.listdir(self.modulePath) lst = [] for f in files: if f[0] == '.': continue if f[-3:] != ".py": continue lst.append(f[:-3]) # Import each module, and filter out those for module in lst: cmd = ("import %s\nif %s.__dict__.has_key('childWindow'):" "obj = %s.childWindow()") % (module, module, module) try: exec(cmd) except: print _("module import of %s failed: %s") % (module, sys.exc_type) continue # if the exec fails, skip this module try: obj = obj except NameError: continue # XXX - hack to allow firstboot to pass in the parent class into language # this will allow the language module to change firstboot's current locale if module == "language" or hasattr(obj, "needsparent"): obj.passInParent(self) # if a module decides not to run, skip it first before trying any # of the other hooks. bz #158095 if hasattr(obj, "skipme"): # the module itself has decided for some reason that # that it should not be shown, so skip it continue # if the module needs network access, and we dont have it, skip # the module if hasattr(obj, "needsnetwork"): if not self.checkNetwork(): # we need a way to run some stuff from the modules # if they have no network if hasattr(obj, "noNetwork"): obj.noNetwork() continue # If the module defines a moduleClass, it has to match the mode # we're starting up in, otherwise it's always used. Add it to # a dictionary keyed by the module's declared priority. if hasattr(obj, "moduleClass"): if (self.doReconfig and (obj.moduleClass == "reconfig")): self.moduleDict[float(obj.runPriority)] = obj elif (not self.doReconfig and (obj.moduleClass != "reconfig")): self.moduleDict[float(obj.runPriority)] = obj else: self.moduleDict[float(obj.runPriority)] = obj # Get the list of module priorities, sort them to determine a run # order, and build a list with the modules in the proper order. modulePriorityList = self.moduleDict.keys() modulePriorityList.sort() # Add the modules to the proper lists. pages = 0 sidebarIndex = -1 self.moduleNameToIndex = {} for priority in modulePriorityList: # Add the module to the list of modules. module = self.moduleDict[priority] # Launch the module's GUI. vbox = None if self.doDebug: try: print "calling", module.moduleName result = module.launch(self.doDebug) if result is None: continue else: vbox, pix, title = result except: import exceptionWindow (ty, value, tb) = sys.exc_info() lst = traceback.format_exception(ty, value, tb) text = string.joinfields(lst, "") exceptionWindow.ExceptionWindow(module, text) pass else: try: result = module.launch() if result is None: continue else: vbox, pix, title = result except: import exceptionWindow (ty, value, tb) = sys.exc_info() lst = traceback.format_exception(ty, value, tb) text = string.joinfields(lst, "") exceptionWindow.ExceptionWindow(module, text) pass continue if vbox and title: # If it launched, add it to the module list. self.moduleList.append(module) title_label = gtk.Label("") title_label.set_alignment(0.0, 0.5) title_label.set_markup( "<span foreground='#000000' size='30000' font_family='Helvetica'><b>%s</b></span>" % (_(title))) titleBox = gtk.HBox() if pix: titleBox.pack_start(pix, False) titleBox.pack_start(title_label, True) titleBox.set_spacing(8) vbox.pack_start(titleBox, False) vbox.reorder_child(titleBox, 0) if self.lowRes: # If we're in 640x480 mode, remove the title bars vbox.remove(vbox.get_children()[0]) # If the module has a name, add it to the list of labels if hasattr(module, "moduleName"): if not hasattr(module, "noSidebar") or getattr( module, "noSidebar") == False: hbox = gtk.HBox(False, 5) pix = functions.imageFromFile("pointer-blank.png") label = gtk.Label("") label.set_markup( "<span foreground='#FFFFFF'><b>%s</b></span>" % (_(module.moduleName))) label.set_alignment(0.0, 0.5) # Wrap the lines if they're too long label.set_line_wrap(True) (w, h) = self.leftEventBox.get_size_request() label.set_size_request((int)(w * 0.8), -1) # Make sure the arrow is at the top of any wrapped line alignment = gtk.Alignment(yalign=0.2) alignment.add(pix) hbox.pack_start(alignment, False) hbox.pack_end(label, True) self.leftVBox.pack_start(hbox, False, True, 3) sidebarIndex += 1 # we need a non tranlated name for each module so we can # jump around self.moduleNameToIndex[module.__module__] = (pages, sidebarIndex) self.notebook.append_page(vbox, gtk.Label(_(module.moduleName))) else: self.notebook.append_page(vbox, gtk.Label(" ")) self.moduleNameToIndex["unamemodule-%s" % pages] = (pages, sidebarIndex) pages += 1