def generate_bot(target_name, param_set): source_name = param_set[0] source_file_name = source_name + ".py" source_dir = os.path.join("pokerbots", "player", source_name) target_file_name = target_name + ".py" target_dir = os.path.join("pokerbots", "player", target_name) target_full_file_name = os.path.join(target_dir, target_file_name) target_init_py_name = os.path.join(target_dir, "__init__.py") target_hand_evaluator_name = os.path.join(target_dir, "hand_evaluator.py") # Must copy every time, since the source can be different now # first, delete any existing bot shutil.rmtree(target_dir, True) # copy the new bot over shutil.copytree(source_dir, target_dir) # rename the template file to the final file os.rename( os.path.join(target_dir, source_file_name), target_full_file_name ) # also need to replace the line where we create a bot # commas after print statement are important! # TODO: This is really fragile, should match regex too for line in fileinput.input(target_full_file_name, inplace=1): if fileinput.filelineno() == 5: print "class %s:" % (target_name,) elif fileinput.filelineno() == 6: # TODO: Use all parameters print " def __init__(self, param1=%s, param2=%s, param5=%s, param6=%s, param7=%s, param8=%s, param9=%s):" % param_set[1:] elif fileinput.filelineno() == 11: print " self.name = \"%s\"" % (target_name,) else: print line,
def parseTag(inputfilename, outputfilename, searchExp): fin = fileinput.input(inputfilename, inplace = 0, openhook = fileinput.hook_encoded(fileencoding)) fout = codecs.open(outputfilename, "w", fileencoding) isblock = 0 for line in fin: newline = line isfirst = searchExp in line islast = "\tMedium;" in line issingleline = isfirst and islast # and "," in line fixquotes = 0 if issingleline: fixquotes = "\t" in extractThirdField(line) # If there is a comma on the third fild, quote it! if fixquotes: newline = leftQuoteThirdField(line) newline = rightQuoteThirdField(newline) print "%d: %s" % (fileinput.filelineno(), newline) # print "%d:(issingle):%s" % (fileinput.filelineno(), newline) if (not issingleline) and (isfirst and not islast): #newline = reverseReplace(line, searchExp, searchExp + '"', 1) newline = leftQuoteThirdField(line) print "quoting left" isblock = 1 if (not issingleline) and (not isfirst and islast and isblock): newline = reverseReplace(line, "\tMedium;", '"' + "\tMedium;", 1) print "quoting right" isblock = 0 #TODO: Fix the single line comma bug fout.write(newline) if issingleline: print "%d: %s" % (fileinput.filelineno(), newline) fout.close()
def main(): """Parse stream of requests and insert into MongoDB collection. This script will accept input from either stdin or one or more files as arguments. Requests that are unparseable or whose IP address cannot be mapped to a country are skipped and written as is to separate log files. """ for line in fileinput.input(): try: request = parse(line) except apachelog.ApacheLogParserError: # log unparseable requests req_log.error(line.strip('\n')) continue if request is not None: request = str_to_dt(request) try: request = add_country(request) except (pygeoip.GeoIPError, KeyError): # log unresolveable IP addresses ip_log.error(line.strip('\n')) continue request = req_to_url(request) insert(collection, request) lines = fileinput.filelineno() if not lines: sys.exit("No requests to process") print("{0} requests processed".format(fileinput.filelineno()))
def test_state_is_None(self): """Tests fileinput.filelineno() when fileinput._state is None. Ensure that it raises RuntimeError with a meaningful error message and does not modify fileinput._state""" fileinput._state = None with self.assertRaises(RuntimeError) as cm: fileinput.filelineno() self.assertEqual(("no active input()",), cm.exception.args) self.assertIsNone(fileinput._state)
def check_files(files, verbose): in_multiline = False multiline_start = 0 multiline_line = "" logical_line = "" token = False prev_file = None prev_line = "" prev_lineno = 0 for line in fileinput.input(files): if fileinput.isfirstline(): # if in_multiline when the new file starts then we didn't # find the end of a heredoc in the last file. if in_multiline: print_error('E012: heredoc did not end before EOF', multiline_line, filename=prev_file, filelineno=multiline_start) in_multiline = False # last line of a previous file should always end with a # newline if prev_file and not prev_line.endswith('\n'): print_error('E004: file did not end with a newline', prev_line, filename=prev_file, filelineno=prev_lineno) prev_file = fileinput.filename() if verbose: print "Running bash8 on %s" % fileinput.filename() # NOTE(sdague): multiline processing of heredocs is interesting if not in_multiline: logical_line = line token = starts_multiline(line) if token: in_multiline = True multiline_start = fileinput.filelineno() multiline_line = line continue else: logical_line = logical_line + line if not end_of_multiline(line, token): continue else: in_multiline = False check_no_trailing_whitespace(logical_line) check_indents(logical_line) check_for_do(logical_line) check_if_then(logical_line) check_function_decl(logical_line) prev_line = logical_line prev_lineno = fileinput.filelineno()
def main(): urlHashMap = {} urlMapFile = '{0}/{1}'.format(args.outputDir, URL_MAP_FILE) if not os.path.exists(args.outputDir): os.makedirs(args.outputDir) if os.path.exists(urlMapFile): with open(urlMapFile, 'r') as f: for line in f: parts = line.rstrip().split('\t') if len(parts) != 2: print "Invalid line in url map file: {0}".format(line) continue urlHashMap[parts[0]] = parts[1] urlsScraped = 0 for line in fileinput.input(args.inputFile, mode='r'): if args.numUrls > 0 and fileinput.filelineno() >= args.numUrls: break parts = line.rstrip().split('\t') if len(parts) < args.columnIndex: print "Invalid line, column index beyond total number of columns: line no {0}: {1}".format(fileinput.filelineno(), line) continue url = parts[args.columnIndex] if len(url) == 0: print "Empty URL: line no {0}: {1}".format(fileinput.filelineno(), line) continue print "Downloading {0} at line {1}".format(url, fileinput.filelineno()) urlHash = hashlib.sha1(url).hexdigest() if urlHash not in urlHashMap: urlHashMap[urlHash] = url outFilepath = "{0}/{1}.html".format(args.outputDir, urlHash) if os.path.exists(outFilepath): print "File already exists for {0}: {1}".format(url, outFilepath) continue try: page = urllib.urlopen(url).read() except IOError as e: print "Unable to download URL: {0}, exception: {1}".format(url, e) continue with open(outFilepath, 'w') as outFile: outFile.write(page) urlsScraped += 1 print "Downloaded {0} URLs".format(urlsScraped) with open(urlMapFile, 'w') as f: [ f.write("{0}\t{1}\n".format(urlHash, url)) for urlHash, url in urlHashMap.iteritems() ]
def backport(rootdir="."): for folder, subs, files in os.walk(rootdir): for filename in files: src_filename = os.path.join(folder, filename) # Skip non python files if not src_filename.endswith(".py"): continue if (__file__ and os.path.basename(src_filename) == os.path.basename(__file__)): continue print(src_filename) last_class = "" for line in fileinput.input(src_filename, inplace=True): if fileinput.filelineno() == 1: if line.startswith("#!"): print(line, end="") print("from __future__ import unicode_literals") else: print("from __future__ import unicode_literals") print(line, end="") continue if line.strip().startswith("class"): last_class = line.strip().split()[1] last_class = re.match(r'([a-zA-Z0-9]+)', last_class).group(1) if "__str__(" in line: line = line.replace("__str__(", "__unicode__(") if "super()" in line: old_super = "super({}, self)".format(last_class) line = line.replace("super()", old_super) print(line, end="")
def process_file(fname): # Builds `guard_stack` list for each line of a file as # [['!defined (CONFIG_DISABLE_yyy_BUILTIN)', ...], ...] # meaning that all the listed guards (conditionals) have to hold for the # line to be kept by the preprocessor. guard_stack = [] for line in fileinput.input(fname): if_match = re.match('^# *if(.*)', line) elif_match = re.match('^# *elif(.*)', line) else_match = re.match('^# *else', line) endif_match = re.match('^# *endif', line) if if_match is not None: guard_stack.append([process_guard(if_match.group(1))]) elif elif_match is not None: guards = guard_stack[-1] guards[-1] = '!(%s)' % guards[-1] guards.append(process_guard(elif_match.group(1))) elif else_match is not None: guards = guard_stack[-1] guards[-1] = '!(%s)' % guards[-1] elif endif_match is not None: guard_stack.pop() lnum = fileinput.filelineno() process_line(fname, lnum, line, guard_stack) if guard_stack: print('warning: {fname}: unbalanced preprocessor conditional ' 'directives (analysis finished with no closing `#endif` ' 'for {guard_stack})' .format(fname=fname, guard_stack=guard_stack))
def load_data(filenames, coarse=False): """ Load samples from one or more files where the format is: COARSE_CATEGORY:fine_category some sample data blah blah This is a custom data loader based on the `load_files` function in this code: https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/base.py """ data = [] target = [] fine_target = [] if coarse: data_re = re.compile(r'(\w+):(\w+) (.+)') else: data_re = re.compile(r'(\w+:\w+) (.+)') for line in fileinput.input(filenames): d = data_re.match(line) if not d: raise Exception("Invalid format in file {} at line {}" .format(fileinput.filename(), fileinput.filelineno())) if coarse: target.append(d.group(1)) fine_target.append(d.group(2)) data.append(d.group(3)) else: target.append(d.group(1)) data.append(d.group(2)) return Bunch( data=numpy.array(data), target=numpy.array(target), target_names=set(target), )
def metadataforfile(filename,filedir): '''read meta data from the first 6 line of the asc file ''' metadata=[None]*6 nrows=ncols=None x=y=None cellsize=None nodata_value=None print filedir+filename for line in fileinput.input([filedir+filename]): arg, val = str.split(line) print arg, val if arg == "nrows": nrows = int(val) metadata[0]=nrows if arg == "ncols": ncols = int(val) metadata[1]=ncols if arg == "xllcorner": x = float(val) metadata[3]=x if arg == "yllcorner": y = float(val) metadata[4]=y if arg == "cellsize": cellsize=float(val) metadata[2]=cellsize if arg == "NODATA_value": nodata_value=float(val) metadata[5]=nodata_value if fileinput.filelineno()>=6: break fileinput.close() return metadata
def t08(): """利用fileinput实现类似于grep的功能""" import re pattern = re.compile(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}') for line in fileinput.input('data.txt', backup='.bak', inplace=1): if pattern.search(line): print(fileinput.filename(), fileinput.filelineno(), line)
def main(): typesAsPrints = collections.defaultdict(list) ansiRegex = re.compile('^\"(.*)\".*?Line = (\d*).*?(\(.*?ANSI_TYPE_AS_PRINT.*?\))') for line in sys.stdin: m = ansiRegex.search(line) if m: print "Adding usage: %s to %s"%(m.group(1), m.group(2)) typesAsPrints[m.group(1)].append(int(m.group(2))) typeRegex = re.compile('t\s*y\s*p\s*e', re.IGNORECASE) # The original method for processing files, as used by Mark to correct TYPE_AS_PRINT for filename, lines in typesAsPrints.items(): if not os.path.exists(filename): print "Warning: file %s does not exist"%filename continue print "Processing file %s, line %s"%(filename, lines) for line in fileinput.input(os.path.join(filename), inplace=True): if fileinput.filelineno() in lines: subst_line = typeRegex.sub('print', line.rstrip(), count=1) for l in split_fortran_line_at_72(subst_line): print l, else: print line,
def test_missing_debug_statements(self): # Exclude explicit debug statements written in the code exclude = { 'regex.py': [240, 241], } message = "\nFound a debug missing statement at line %d or file %r: %r" filename = None file_excluded = [] files = ( glob.glob(os.path.join(self.source_dir, '*.py')) + glob.glob(os.path.join(self.source_dir, 'validators/*.py')) ) for line in fileinput.input(files): if fileinput.isfirstline(): filename = fileinput.filename() file_excluded = exclude.get(os.path.basename(filename), []) lineno = fileinput.filelineno() if lineno in file_excluded: continue match = self.missing_debug.search(line) self.assertIsNone(match, message % (lineno, filename, match.group(0) if match else None))
def main(): """Parse stream of requests and insert into MongoDB collection. This script will accept input from either stdin or one or more files as arguments. Two loggers control logging--one general purpose logger for the application and one for logging requests that fail to make it through the pipeline. The latter is configured to route different kinds of failures to different streams as configured. The failed requests will be logged unmodified, as they entered the pipeline, to make later attempts at processing easier. Failure to send any requests through the pipeline will result in an exit status of 1. """ req_buffer = [] for line in fileinput.input(): try: request = process(line) except apachelog.ApacheLogParserError: # log unparseable requests req_log.error(line.strip(), extra={'err_type': 'REQUEST_ERROR'}) continue except requests.exceptions.RequestException: req_log.error(line.strip(), extra={'err_type': 'DSPACE_ERROR'}) continue except Exception, e: log.error(e, extra={'inputfile': fileinput.filename(), 'inputline': fileinput.filelineno()}) continue if request: req_buffer.append(request) if len(req_buffer) > 999: insert(collection, req_buffer) req_buffer = []
def sample(self, files, print_every=1000): """ Determines the set of sample records from the file names given. @param self the object @param files list of filenames. Reads from STDIN if "-" is specified. @param print_every Write to STDERR the record number every print_every lines. Defaults to 1000. Set to 0 to disable printing altogether. """ recnum = 0 try: for ln in fileinput.input(files): if self.header and fileinput.filelineno() == 1: self.head_rec = ln else: recnum += 1 if print_every > 0 and recnum % print_every == 0: sys.stderr.write("%d\r" % recnum) if recnum <= self.num_samples: self.samples.append(ln) else: idx = int(random.random() * recnum) if idx < self.num_samples: self.samples[idx] = ln except IOError, msg: raise Usage("Problem reading from file '%s':\n%s" % (fileinput.filename(), msg))
def output(): for event in filtered_events(): # strip out level and get ready for filtering for level, event_items in event['Items']: # levels 1 and 2 both contain key-value pairs in the same format 'Key' 'Value' #print('Items', [i for l, i in event_items]) current_group = [parse_line(i) for l, i in event_items] # process level 2 before level 1 because...? if level == 2: # get a (possibly empty) list of wanted member names from a (possibly non-existent) attribute # and keep only those members of the event that are on the list if not filtered_attributes: # level 2 occurs before level 1 in an event. Something odd about this, but recover filtered_attributes = [{'Name': 'Level2BeforeLevel1', 'Value': (fileinput.filename(), fileinput.filelineno())}] parent = filtered_attributes[-1]['Name'] members_wanted = attribute_list.get(parent) # handle members if members_wanted: filtered_members = [r for r in current_group if r['Name'] in members_wanted] if filtered_members: # attach members to previous attribute filtered_attributes[-1]['Members'] = filtered_members if level == 1: # handle attributes # note name of last attribute: this is the parent of any members in the next level 2 (member) group parent = current_group[-1]['Name'] filtered_attributes = [r for r in current_group if r['Name'] in attribute_list] # commit changes to attributes of the current event including members of this attribute event.update({'Attributes': filtered_attributes}) del event['Items'] yield event
def realInsert(tableName,insert_data_and_dataType_arr): # 必须加一个delete标记 table_def_path=root_path+current_database+"/"+tableName table_status_path=table_def_path+"_stat" table_dat_path=table_def_path+"_dat" f = open(table_dat_path, 'a') s="" for k,v in enumerate(insert_data_and_dataType_arr): if k%2==0: if k!=0: s+=('|'+str(v) ) else: s+=str(v) #这是delete标记,删除为1 s+="|0\n" print s f.write(s) f.close() #修改stat文件的count ++ for line in fileinput.input(table_status_path, inplace=1): if fileinput.filelineno()==1: print int(line)+1,
def all_lines(args, params, linef, silent=False, pipe=True): input = fileinput.input(args) if pipe: # correct behaviour over pipes, i.e. finish execution on SIGPIPE import signal signal.signal(signal.SIGPIPE, signal.SIG_DFL) while True: try: line = input.next().strip() if '' == line: continue try: info = PipeInfo(fileinput.filename(), fileinput.filelineno()) js = json.loads(line) if not type(js) is dict: eprint( 'Non dictionary at file:', info.file , 'line:', info.line, silent=silent ) continue ret = linef(js, params, info) if ret: yield ret except ValueError as e: eprint( str(e) , 'file:', info.file , 'line:', info.line , silent=silent ) except IOError as e: eprint(str(e), silent=silent) except StopIteration: break
def _main(): parser = optparse.OptionParser(usage="usage: %prog [options] [<file>...]", description=__doc__) parser.add_option("-d", "--delimiter", dest="delimiter", default='\t', help="delimiter between defline and sequence" " [default TAB]", metavar="STRING") parser.add_option("-D", "--defline", dest="defline", choices=('before', 'after', 'omit'), default="after", help="position of defline with respect to sequence, one" " of 'before', 'after' [default], or 'omit'", metavar="POSITION") parser.add_option("-i", "--inverse", dest="inverse", action="store_true", help="do the inverse transformation (flat to FASTA)") DEFAULT_WRAP = 80 parser.add_option("-w", "--wrap", dest="wrap", type="int", default=DEFAULT_WRAP, help="for --inverse, wrap sequence to specified width" " [default %s, 0 means don't wrap at all]" % DEFAULT_WRAP, metavar="COLUMNS") parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="be verbose") parser.add_option("--copyright", action="store_true", dest="copyright", help="print copyright and exit") options, args = parser.parse_args() if options.wrap < 0: parser.print_help() sys.exit(1) if not options.inverse: if not args: files = [ sys.stdin ] else: files = [ open(fn) for fn in args ] for f in files: for locusname, defline, sequence in greylag.read_fasta_file(f): write_flattened_locus(options, defline, sequence) else: for line in fileinput.input(args): if options.defline != 'omit': parts = line.split(options.delimiter, 1) if len(parts) < 2: error("input line lacks delimiter") if options.defline == 'before': defline, sequence = parts else: sequence, defline = parts else: sequence = line defline = "%s:%s" % (fileinput.filename(), fileinput.filelineno()) sequence = sequence.strip() print defline.strip() if options.wrap: for start in range(0, len(sequence), options.wrap): print sequence[start:start+options.wrap] else: print sequence
def parse_filter(filter): if filter == "$filename": return lambda i: [fileinput.filename()] elif filter == "$lineno": return lambda i: [fileinput.lineno()] elif filter == "$filelineno": return lambda i: [fileinput.filelineno()] else: return lambda i: jpath(filter, i)
def __grep_substring(self): try: for line in input(self.file_names): if re.search(self.regex,line,re.I): print("{}: {} : {} ".format(filename(), filelineno(), line)) except Exception as e: print(e) finally: pass
def process_normal_line( self, line ): """process a normal line and check whether it is the start of a new block""" for f in re_source_block_formats: if f.start.match( line ): self.add_block_lines() self.format = f self.lineno = fileinput.filelineno() self.lines.append( line )
def editLine(filename, username, increment): setFlag = 0 lineCount = getLineCount(filename) print(lineCount, end="") dataFile = open(filename, "r") for line in dataFile: lineScan = string.rstrip(line) lineScan = string.split(lineScan) if lineScan[0] == username: setFlag = 1 dataFile.close() if setFlag == 1: """Search for the line in question""" for line in fileinput.input("witch", inplace=1): """Format the line so the whole username is line[0]""" lineTemp = string.rstrip(line) lineTemp = string.split(lineTemp) if username == lineTemp[0]: newVal = int(lineTemp[1]) + increment if newVal < 0: newVal = 0 if fileinput.filelineno() == lineCount: newLine = ( lineTemp[0] + " " + str(newVal) + " " + datetime.strftime(getCurrentDateTime(), "%d/%m/%Y/%H:%M:%S") ) else: newLine = ( lineTemp[0] + " " + str(newVal) + " " + datetime.strftime(getCurrentDateTime(), "%d/%m/%Y/%H:%M:%S") + "\n" ) line = line.replace(line, newLine) print(line, end="") else: print(line, end="") else: createdLine = ( "\n" + username + " " + str(increment) + " " + datetime.strftime(getCurrentDateTime(), "%d/%m/%Y/%H:%M:%S") ) dataFile = open(filename, "a") dataFile.write(createdLine)
def substitute_line_in_file(path, line_no, new_str): # line_no is 1-based bak_ext = '.bak'+str(random.randint(1000,9999)) input_object = fileinput.input(path, inplace=True, backup=bak_ext) for line in input_object: if fileinput.filelineno() == line_no: print new_str.rstrip("\n\r") else: print line.rstrip("\n\r") return path+bak_ext
def processinput(self): for line in self.fileinput.input(): try: self.process(line) except AwkUnhandledLine, e: raise AwkUnhandledLine( "Don't understand line %d of file %s: %s" % (fileinput.filelineno(), fileinput.filename(), line) )
def __init__(self, config_files): self.directives = [] raw_lines = fileinput.input(config_files) for raw_line in raw_lines: if self.ignored_line(raw_line): continue directive = Directive.from_raw_line(raw_line, inpfile=fileinput.filename(), lineno=fileinput.filelineno()) heapq.heappush(self.directives, (len(directive.location), directive))
def print_error(self, error, line, filename=None, filelineno=None): if self.should_ignore(error): return if not filename: filename = fileinput.filename() if not filelineno: filelineno = fileinput.filelineno() self.ERRORS = self.ERRORS + 1 self.log_error(error, line, filename, filelineno)
def print_error(error, line, filename=None, filelineno=None): if not filename: filename = fileinput.filename() if not filelineno: filelineno = fileinput.filelineno() global ERRORS ERRORS = ERRORS + 1 print("%s: '%s'" % (error, line.rstrip('\n'))) print(" - %s: L%s" % (filename, filelineno))
def do_it(): for line in fileinput.input(): d = {"username": self.name, "api_key": self.api_key, "machine_name": self.machine_name, "line": line, "line_number": fileinput.filelineno()} result = requests.post(self.api, data=json.dumps(d)) self.results.append(result) fileinput.close()
def genIndex(indexFileName, extension): fname='*.'+extension for line in fileinput.input(glob.glob(fname)): location = fileinput.filename(), fileinput.filelineno() for word in aword.findall(line.lower()): if word[0] != '<': index.setdefault(word,[]).append(location) shelf = shelve.open(indexFileName,'n') for word in index: shelf[word] = index[word] shelf.close()
def report_err(s): global err print("%s:%d: %s" % (fileinput.filename(), fileinput.filelineno(), s)) err=1
#to read specific line form file from fileinput import input, filelineno from pprint import pprint as pp print("Specific line from a file") for line in input("password.txt"): if filelineno() == 15 and line.rstrip(): print("{} => {}".format(filelineno(), line)) # Read and Write to a file try: fp = open('password.txt') #default opens it in read mode for line in fp: #print(line) print(line, end='') fp.close() except Exception as e: print(e) finally: fp.close() print() try: fp = open('password.txt', "r") users = list() for line in fp: users.append(line.split(":")[0]) users.sort() for index, user in enumerate( users, 1 ): # enumerator by default starts indexing from 0, possible to specify index also
def report_cant(builtin): sys.stderr.write("%s:%d: x86 builtin %s used, too many replacements\n" % (fileinput.filename(), fileinput.filelineno(), builtin))
# Copied from https://rosettacode.org/wiki/CSV_data_manipulation#Python import fileinput changerow, changecolumn, changevalue = 2, 4, '"Spam"' with fileinput.input('csv_data_manipulation.csv', inplace=True) as f: for line in f: if fileinput.filelineno() == changerow: fields = line.rstrip().split(',') fields[changecolumn - 1] = changevalue line = ','.join(fields) + '\n' print(line, end='')
def __init__(self, message, instead): self.message = message self.filename = fileinput.filename() self.lineno = fileinput.filelineno() self.instead = instead
def print_error(error, line): global ERRORS ERRORS = ERRORS + 1 print("%s: '%s'" % (error, line.rstrip('\n'))) print(" - %s: L%s" % (fileinput.filename(), fileinput.filelineno()))
description= "A little helper for upgrading from BOUT++ version 3 to version 4", formatter_class=argparse.RawDescriptionHelpFormatter, epilog=epilog) parser.add_argument("-r", "--replace", action="store_true", help="Actually make the fix") parser.add_argument("files", nargs='+', help="Files to process") args = parser.parse_args() # Loops over all lines across all files for line in fileinput.input(files=args.files, inplace=args.replace): filename = fileinput.filename() line_num = fileinput.filelineno() # Apply the transformations and then update the line if we're doing a replacement new_line = fix_nonmembers(line, filename, line_num, args.replace) line = new_line if args.replace else line new_line = fix_subscripts(line, filename, line_num, args.replace) line = new_line if args.replace else line new_line = fix_coordinates(line, filename, line_num, args.replace) line = new_line if args.replace else line new_line = fix_local_mesh_size(line, filename, line_num, args.replace) line = new_line if args.replace else line new_line = throw_warnings(line, filename, line_num)
def check_files(self, files, verbose): in_multiline = False multiline_start = 0 multiline_line = "" logical_line = "" token = False prev_file = None prev_line = "" prev_lineno = 0 # NOTE(mrodden): magic; replace with proper # report class when necessary report = self for fname in files: for line in fileinput.input(fname): if fileinput.isfirstline(): # if in_multiline when the new file starts then we didn't # find the end of a heredoc in the last file. if in_multiline: report.print_error( 'E012: heredoc did not end before EOF', multiline_line, filename=prev_file, filelineno=multiline_start) in_multiline = False # last line of a previous file should always end with a # newline if prev_file and not prev_line.endswith('\n'): report.print_error( 'E004: file did not end with a newline', prev_line, filename=prev_file, filelineno=prev_lineno) prev_file = fileinput.filename() if verbose: print("Running bashate on %s" % fileinput.filename()) # NOTE(sdague): multiline processing of heredocs is interesting if not in_multiline: logical_line = line token = starts_multiline(line) if token: in_multiline = True multiline_start = fileinput.filelineno() multiline_line = line continue else: logical_line = logical_line + line if not end_of_multiline(line, token): continue else: in_multiline = False check_no_trailing_whitespace(logical_line, report) check_indents(logical_line, report) check_for_do(logical_line, report) check_if_then(logical_line, report) check_function_decl(logical_line, report) prev_line = logical_line prev_lineno = fileinput.filelineno()
def report_error(self, msg): name = fileinput.filename() line = fileinput.filelineno() self.errors.append("%s:%d: %s" % (name, line, msg))
def iter_files_lines(files_list): openhook = fileinput.hook_encoded(encoding="utf-8", errors="surrogateescape") with fileinput.input(files_list, openhook=openhook) as line_iter: for line in line_iter: yield (fileinput.filename(), fileinput.filelineno(), line)
row['Units'], lowerlevel, upperlevel, Detection, lower_Exceedance, upper_Exceedance)) os.system("mode con: cols=220") with fileinput.input(sys.argv[1:]) as fileset: reader = csv.reader(fileset) for count, row in enumerate(reader): if count < 1: continue elif count == 1: fieldnames = row print(fieldnames) break reader = csv.DictReader(fileset, fieldnames=fieldnames) for count, row in enumerate(reader): if fileinput.filelineno() == 1 or fileinput.filelineno() == 2: continue EDD.append(row) if row['Qualifier'] != 'U': Detection = 'Detection' DetectionSummary.append(row) else: Detection = '' if row['Matrix'] == 'W': if GCTLs.get(row['Parameter'].lower()) != None: GCTLno = GCTLs.get(row['Parameter'].lower()) else: GCTLno = '' if NADCs.get(row['Parameter'].lower()) != None:
# 对于一系列输入了的文件,在开头添加文件名,在行末添加行号,只取前5行 import fileinput for line in fileinput.input(inplace=False): if fileinput.isfirstline(): print("# filename:", fileinput.filename(), ", isstdin:", fileinput.isstdin()) if fileinput.filelineno() >= 5: fileinput.nextfile() line = line.rstrip() print('{:<50} # {:2d} # {} # '.format(line, fileinput.lineno(), fileinput.filelineno())) fileinput.close() # 运行命令 python3 test.py test.py test.py # 运行结果如下: # filename: test.py , isstdin: False # import fileinput # 1 # 1 # # for line in fileinput.input(inplace=False): # 2 # 2 # # if fileinput.isfirstline(): # 3 # 3 # # print("# filename:", fileinput.filename(), # 4 # 4 # # ", isstdin:", fileinput.isstdin()) # 5 # 5 # # # filename: test.py , isstdin: False # import fileinput # 6 # 1 # # for line in fileinput.input(inplace=False): # 7 # 2 # # if fileinput.isfirstline(): # 8 # 3 # # print("# filename:", fileinput.filename(), # 9 # 4 # # ", isstdin:", fileinput.isstdin()) # 10 # 5 #
def grep_me(pattern, *args): for line in input(args): if re.search(pattern, line, re.I): print('{}:{}:{}'.format(filename(), filelineno(), line), end='')
import fileinput for line in fileinput.input(): meta = [ fileinput.filename(), fileinput.fileno(), fileinput.filelineno(), fileinput.isfirstline(), fileinput.isstdin()] print(*meta, end=" ") print(line, end=" ")
def report_repl(builtin, repl): sys.stderr.write("%s:%d: x86 builtin %s used, replaced with %s\n" % (fileinput.filename(), fileinput.filelineno(), builtin, repl))
example: FIXME: plugin loading should be optional """ #===================================================================== __version__ = "$Revision: 1.2 $" __author__ = "Karsten Hilbert" __license__ = "GPL v2 or later (details at https://www.gnu.org)" import string, sys, fileinput if len(sys.argv) < 2: print("Usage: find_todo.py <a_python_script> <a_python_script> ...") sys.exit(1) #print "Searching for places to fix in", sys.argv[1:] prev_file = '' for line in fileinput.input(): curr_file = fileinput.filename() if curr_file != prev_file: print('=> %s' % curr_file) prev_file = curr_file line = line.strip() if line.find('FIXME') != -1: line_no = fileinput.filelineno() line = line.replace('\015', '') line = line.replace('\012', '') print('#%s: %s' % (line_no, line))
import fileinput, sys, string # take the first argument out of sys.argv and assign it to searchterm searchterm, sys.argv[1:] = sys.argv[1], sys.argv[2:] for line in fileinput.input(): num_matches = string.count(line, searchterm) if num_matches: print "found '%s' %d times in %s on line %d." % (searchterm, num_matches, fileinput.filename(), fileinput.filelineno())
def domatch(pattern, *args): for line in input(args): if re.search(pattern, line, re.I): print(filename(), filelineno(), line)
# Rename files os.rename("sample.txt", "sample_file.txt") os.remove("dataFile.txt") os.mkdir("subDir") os.chdir("Day2") print(os.getcwd()) os.rmdir("subDir") ### fileinput # for processing contents of multiple files as a stream of lines import fileinput file_names = ['Employee.txt', 'sample_file.txt'] for line in fileinput.input(file_names): print("{}. line no {} of file {}: {}".format(fileinput.lineno(), fileinput.filelineno(), fileinput.filename(), line), end='') # skip files while processing based on conditions with fileinput.input(files=file_names) as f: for line in f: print(fileinput.filelineno(), line, end='') if (fileinput.filename() == 'dataFile.txt' and fileinput.isfirstline()): fileinput.nextfile() ### stat from stat import *
if (Status.isPaid(v.status)): total += v.amount return total # New: # Cooking: # Delivering: # Delivered: # Canceled: # Refunded: # Total amount charged def printSummary(self): summary = {} total = self.generateSummary(summary) for k, v in summary.iteritems(): print Status.asString(k).title(), ": ", v print "Total Amount Charged: ", total if __name__ == '__main__': Status.init() order_proc = OrderParser(True) order_eng = OrderEngine() for line in fileinput.input(): order_eng.addOrder(order_proc.parseOrder(fileinput.filelineno(), line)) order_eng.printSummary()
def main(args): global _stash ap = argparse.ArgumentParser() ap.add_argument('pattern', help='the pattern to match') ap.add_argument('files', nargs='*', help='files to be searched') ap.add_argument('-i', '--ignore-case', action='store_true', help='ignore case while searching') ap.add_argument('-v', '--invert', action='store_true', help='invert the search result') ap.add_argument('-c', '--count', action='store_true', help='count the search results instead of normal output') ns = ap.parse_args(args) flags = 0 if ns.ignore_case: flags |= re.IGNORECASE pattern = re.compile(ns.pattern, flags=flags) # Do not try to grep directories files = [f for f in ns.files if not os.path.isdir(f)] fileinput.close() # in case it is not closed try: counts = collections.defaultdict(int) for line in fileinput.input(files, openhook=fileinput.hook_encoded("utf-8")): if bool(pattern.search(line)) != ns.invert: if ns.count: counts[fileinput.filename()] += 1 else: if ns.invert: # optimize: if ns.invert, then no match, so no highlight color needed newline = line else: newline = re.sub( pattern, lambda m: _stash.text_color(m.group(), 'red'), line) if fileinput.isstdin(): fmt = u'{lineno}: {line}' else: fmt = u'{filename}: {lineno}: {line}' print( fmt.format(filename=fileinput.filename(), lineno=fileinput.filelineno(), line=newline.rstrip())) if ns.count: for filename, count in counts.items(): fmt = u'{count:6} {filename}' print(fmt.format(filename=filename, count=count)) except Exception as err: print("grep: {}: {!s}".format(type(err).__name__, err), file=sys.stderr) finally: fileinput.close()
import fileinput for line in fileinput.input(files=('a_test.txt', 'guitest.py')): print(fileinput.filename(), fileinput.filelineno(), line, end="") fileinput.close()
#!/usr/bin/env python import fileinput for line in fileinput.input(): print("文件名:", fileinput.filename(), "读的是第几行: ", fileinput.filelineno(), "读的行内容: ", line)
def factory_tests(test_class_builder, testfiles, suffix, check_with_lxml=False): """ Factory function for file based schema/validation cases. :param test_class_builder: the test class builder function. :param testfiles: a single or a list of testfiles indexes. :param suffix: the suffix ('xml' or 'xsd') to consider for cases. :param check_with_lxml: if `True` compare with lxml XMLSchema class, reporting \ anomalies. Works only for XSD 1.0 tests. :return: a list of test classes. """ test_classes = {} test_num = 0 debug_mode = False line_buffer = [] test_line_parser = get_test_line_args_parser() for line in fileinput.input(testfiles): line = line.strip() if not line or line[0] == '#': if not line_buffer: continue else: raise SyntaxError("Empty continuation at line %d!" % fileinput.filelineno()) elif '#' in line: line = line.split('#', 1)[0].rstrip() # Process line continuations if line[-1] == '\\': line_buffer.append(line[:-1].strip()) continue elif line_buffer: line_buffer.append(line) line = ' '.join(line_buffer) del line_buffer[:] test_args = test_line_parser.parse_args(get_test_args(line)) if test_args.locations is not None: test_args.locations = {k.strip('\'"'): v for k, v in test_args.locations} test_file = os.path.join(os.path.dirname(fileinput.filename()), test_args.filename) if os.path.isdir(test_file): logger.debug("Skip %s: is a directory.", test_file) continue elif os.path.splitext(test_file)[1].lower() != '.%s' % suffix: logger.debug("Skip %s: wrong suffix.", test_file) continue elif not os.path.isfile(test_file): logger.error("Skip %s: is not a file.", test_file) continue test_num += 1 # Debug mode activation if debug_mode: if not test_args.debug: continue elif test_args.debug: debug_mode = True msg = "Debug mode activated: discard previous %r test classes." logger.debug(msg, len(test_classes)) test_classes.clear() if test_args.version == '1.0': schema_class = ObservedXMLSchema10 if test_args.inspect else XMLSchema10 test_class = test_class_builder( test_file, test_args, test_num, schema_class, check_with_lxml ) else: schema_class = ObservedXMLSchema11 if test_args.inspect else XMLSchema11 test_class = test_class_builder( test_file, test_args, test_num, schema_class, check_with_lxml=False ) test_classes[test_class.__name__] = test_class logger.debug("Add XSD %s test class %r.", test_args.version, test_class.__name__) if line_buffer: raise ValueError("Not completed line continuation at the end!") return test_classes
import fileinput for line in fileinput.input(): meta = ["文件名:" + str(fileinput.filename()), " 文件描述符:" + str(fileinput.fileno()), " 行号:" + str(fileinput.filelineno()), " 首行:" + str(fileinput.isfirstline()), " 标准输入:" + str(fileinput.isstdin()) + " "] meta_ljust = [i.ljust(9) for i in meta] print(*meta_ljust, end="") print(line, end="")
import fileinput c=fileinput.input('valid.src.id',backup='.bak',inplace=1) for line in c: d=fileinput.filelineno() print(d)
def report_warn(s): print("%s:%d: %s" % (fileinput.filename(), fileinput.filelineno(), s))
def tests_factory(test_class_builder, suffix='xml'): """ Factory function for file based schema/validation cases. :param test_class_builder: the test class builder function. :param suffix: the suffix ('xml' or 'xsd') to consider for cases. :return: a list of test classes. """ test_classes = {} test_num = 0 debug_mode = False line_buffer = [] test_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) testfiles = [os.path.join(test_dir, 'test_cases/testfiles')] if TEST_FACTORY_OPTIONS['extra_cases']: package_dir = os.path.dirname(os.path.dirname(test_dir)) testfiles.extend(glob.glob(os.path.join(package_dir, 'test_cases/testfiles'))) for line in fileinput.input(testfiles): line = line.strip() if not line or line[0] == '#': if not line_buffer: continue else: raise SyntaxError("Empty continuation at line %d!" % fileinput.filelineno()) elif '#' in line: line = line.split('#', 1)[0].rstrip() # Process line continuations if line[-1] == '\\': line_buffer.append(line[:-1].strip()) continue elif line_buffer: line_buffer.append(line) line = ' '.join(line_buffer) del line_buffer[:] test_args = test_line_parser.parse_args(get_test_args(line)) if test_args.locations is not None: test_args.locations = {k.strip('\'"'): v for k, v in test_args.locations} test_file = os.path.join(os.path.dirname(fileinput.filename()), test_args.filename) if os.path.isdir(test_file): logger.debug("Skip %s: is a directory.", test_file) continue elif os.path.splitext(test_file)[1].lower() != '.%s' % suffix: logger.debug("Skip %s: wrong suffix.", test_file) continue elif not os.path.isfile(test_file): logger.error("Skip %s: is not a file.", test_file) continue test_num += 1 # Debug mode activation if debug_mode: if not test_args.debug: continue elif test_args.debug: debug_mode = True logger.debug("Debug mode activated: discard previous %r test classes.", len(test_classes)) test_classes.clear() if test_args.version == '1.0': schema_class = ObservedXMLSchema10 if test_args.inspect else XMLSchema10 check_with_lxml = TEST_FACTORY_OPTIONS['check_with_lxml'] else: schema_class = ObservedXMLSchema11 if test_args.inspect else XMLSchema11 check_with_lxml = False test_class = test_class_builder(test_file, test_args, test_num, schema_class, check_with_lxml) test_classes[test_class.__name__] = test_class logger.debug("Add XSD %s test class %r.", test_args.version, test_class.__name__) if line_buffer: raise ValueError("Not completed line continuation at the end!") return test_classes
def report_err(s): report_error_name_no(fileinput.filename(), fileinput.filelineno(), s)
def process_line(line): if '\r'.encode() in line: print('file:{0} line:{1}'.format(fileinput.filename(), fileinput.filelineno()))