Example #1
0
def list():
    if size == 0:
        error()
    else:
        for line in fileinput.input(filename):
            print fileinput.lineno(), line,
        print "---\nYou have", number_of_lines, "items in", filename
def get_test_function_lines(filename, list):
    for line in fileinput.input(filename, inplace=0):
        if "bool do_test" in line:
            list[2] = fileinput.lineno()

        if "return true" in line:
            list[3] = fileinput.lineno() + 1
def sum_over_all_tasks(infile, outfile):
    f = open(outfile, 'w')
    header = "##;## \n Step; Total workers \n"
    f.write(header)
    last_line = [0 for x in range(2)]
    try:
        for line in fileinput.input(infile):
            if line == '\n' or fileinput.lineno() < START_DATA_LINE:
                continue
            print "line # : ", fileinput.lineno()
            this_line = line.split(";")
            #print this_line
            #ts = line.split(";")[0]
            step = this_line[-(MAX_TASKS +1)] # from right      
            this_workers = this_line[-MAX_TASKS:]   
            total_workers = 0
            for worker in this_workers:
                total_workers += int(worker)
            out_line = step +";"+ str(total_workers) + "\n"
            #if fileinput.lineno() == START_DATA_LINE:
            #    continue # skip first data line
            #else:
            f.write(out_line)
                #break   
    except Exception, e:
        print e
Example #4
0
def movementCheck(args):
    if len(args) != 4:
        stderr.write("\n\033[1;31mError\033[0m: bad arguments passed in.\n")
        exit(1)

    currentXPosition = int(args[2])
    currentYPosition = int(args[1])
    desiredDirection = args[3]
    desiredPosition = 2*[1] # desiredPosition[xpos, ypos]

    for line in fileinput.input('map'): # for each line in the input file...
        if fileinput.isfirstline(): #get the size of the map
            xwidth = line[:line.find('x')] # grab the first part up to the x
            ywidth = line[line.find('x') + 1:] # grab the part after the x
            xwidth = int(xwidth) # convert the xwidth to an integer
            ywidth = int(ywidth) # convert the ywidth to an integer
            stderr.write("%d x %d\n" % (xwidth, ywidth))
            stderr.write("\t0123456789abcdef\n")
            currentMap = (xwidth)*[ywidth]
        else:
            if fileinput.lineno() > 1:
                currentMap[fileinput.lineno()-2] = list(line)

    for x in range(int(xwidth)):
        stderr.write("%d\t" %(x))
        for y in range(ywidth):
            #stderr.write("%s" %(currentMap[x][y]))
            if x == currentXPosition and y == currentYPosition:
                stderr.write("\033[1;31m%s\033[0m"%(currentMap[x][y]))
            elif currentMap[x][y] =='W':
                stderr.write("\033[1;34m%s\033[0m"%(currentMap[x][y]))
            elif currentMap[x][y] =='B': # check for bridges
                stderr.write("\033[1;43m%s\033[0m"%(currentMap[x][y]))
            else:
                stderr.write("\033[1;32m%s\033[0m"%(currentMap[x][y]))
        stderr.write("\n")
    #ignore variable names, they are backwards
    if desiredDirection == "left" and currentXPosition > 0:
        desiredPosition[0] = currentXPosition
        desiredPosition[1] = currentYPosition - 1
    elif desiredDirection == "right" and currentXPosition < xwidth:
        desiredPosition[0] = currentXPosition
        desiredPosition[1] = currentYPosition + 1
    elif desiredDirection == "up" and currentYPosition > 0:
        desiredPosition[0] = currentXPosition - 1
        desiredPosition[1] = currentYPosition
    elif desiredDirection == "down" and currentYPosition < ywidth:
        desiredPosition[0] = currentXPosition + 1
        desiredPosition[1] = currentYPosition

    # CHANGED  THE desiredPosition[ ]...IF INPUT  WAS 4 0 right...the desired postition was returning as 0 5, instead of 5 0
    # When trying to move past the upper boundary and left boundary, the desired postition returns -1 instead of an error message 
    # ORIGINAL INDEX VALUES                                             0                   1                              0                   1  
    stderr.write("\nDesired positoin: %d,%d is: %s\n" %(desiredPosition[1], desiredPosition[0], currentMap[desiredPosition[1]][desiredPosition[0]]))

    if currentMap[desiredPosition[0]][desiredPosition[1]] == "E" or currentMap[desiredPosition[0]][desiredPosition[1]] == "B":
        acceptable = True
    else:
        acceptable = False
    return(acceptable)
def get_generated_code_lines(filename, list):
    for line in fileinput.input(filename, inplace=0):
        if "//start writing here" in line:
            list[0] = fileinput.lineno() + 1

        if "//end writing here" in line:
            list[1] = fileinput.lineno() - 1
 def error( self, msg = "" ):
   if ( msg ): print("\n ERROR: %s" % msg)
   print("")
   for line in fileinput.input(sys.argv[0]):
     if ( not re.match( "#", line ) ): sys.exit(msg != "")
     if ((fileinput.lineno() == 3) or (fileinput.lineno() > 4)):
       print( re.sub( "^#", "", line.rstrip("\n") ) )
def find_delta_translation(infile, outfile):
    f = open(outfile, 'w')
    header = "##;## \n Time; Step; Delta Translation \n"
    f.write(header)
    last_line = [0 for x in range(2)]
    try:
        for line in fileinput.input(infile):
            if line == '\n' or fileinput.lineno() < START_DATA_LINE:
                continue
            #print "line # : ", fileinput.lineno()
            ts = line.split(";")[0]
            step = line.split(";")[2]       
            this_line = line.split(";")[4:]
            #print "last_line: ", last_line
            #print "this_line: ", this_line
            dx = (float(this_line[0]) - float(last_line[0]))
            dy = (float(this_line[1]) - float(last_line[1]))
            #print "dx:%f, dy:%f" %(dx,dy)
            delta_dist = math.sqrt(dx*dx + dy*dy)
            #print delta_dist
            out_line = ts + ";" + step +";"+ str(delta_dist) + "\n"
            last_line = this_line[:]
            if fileinput.lineno() == START_DATA_LINE:
                continue # skip first data line
            else:
                f.write(out_line)
                #break   
    except Exception, e:
        print e
Example #8
0
 def test_sort_big_file_numeric(self):
     join_fields = '0'
     sorter = mod.CSVSorter(self.dialect, join_fields, self.temp_dir, self.temp_dir)
     outfile = sorter.sort_file(self.fqfn)
     assert outfile == self.fqfn + '.sorted'
     for rec in fileinput.input(self.fqfn + '.sorted'):
         fields = rec.split(',')
         print(fields)
         if fileinput.lineno() == 1:
             assert fields[0] == '1'
         elif fileinput.lineno() == 2:
             assert fields[0] == '2'
         elif fileinput.lineno() == 3:
             assert fields[0] == '3'
         elif fileinput.lineno() == 4:
             assert fields[0] == '4'
         elif fileinput.lineno() == 5:
             assert fields[0] == '5'
         elif fileinput.lineno() == 6:
             assert fields[0] == '6'
         elif fileinput.lineno() == 7:
             assert fields[0] == '7'
         elif fileinput.lineno() == 8:
             assert fields[0] == '8'
         elif fileinput.lineno() == 9:
             assert fields[0] == '9'
         elif fileinput.lineno() == 10:
             assert fields[0] == '10'
         else:
             assert 0, 'too many rows returned'
     fileinput.close()
def find_delta_sensitization(infile, outfile):
    f = open(outfile, 'w')
    header = "##;## \n Time; Step; Delta Sensitization \n"
    last_line = [0 for x in range(MAX_SHOPTASK)]
    f.write(header)
    try:
        for line in fileinput.input(infile):
            #print "start iter"
            #if line == '\n' or fileinput.lineno() < DATA_LINE_START:
            if fileinput.lineno() < DATA_LINE_START:
                continue
                #print "line # : ", fileinput.lineno()
            else:
                ts = line.split(";")[0]
                step = line.split(";")[2]       
                this_line = line.split(";")[5:]
                #print "last_line: ", last_line
                #print "this_line: ", this_line
                dt_sz = 0
                for v in range(MAX_SHOPTASK):
                    v1 = float(this_line[v])
                    v2 = float(last_line[v])
                    #print  "idx:%d this:%f last:%f" %(v, v1, v2)
                    dt_sz += fabs(fabs(v1) - fabs(v2))
                #print dt_sz
                #print "end iter"   
                out_line = ts + ";" + step +";"+ str(dt_sz) + "\n"
                last_line = this_line
                if fileinput.lineno() == DATA_LINE_START:
                    continue # skip first data line
                else:
                    f.write(out_line)                          
    except Exception, e:
        print e
def find_delta_urgency(infile, outfile):
    f = open(outfile, 'w')
    header = "##;## \n Time; Step; DeltaUrgency \n"
    f.write(header)
    last_line = [0 for x in range(MAX_SHOPTASK)]
    dt_urgency = 0
    try:
        for line in fileinput.input(infile):
            print line
            if fileinput.lineno() <= HEADER_LINE_END:
                continue
            else:
                print "line # : ", fileinput.lineno()
                ts = line.split(";")[0]
                step = line.split(";")[1]        
                this_line = line.split(";")[2:]
                #print "last_line: ", last_line[2]
                #print "this_line: ", this_line[2:]
                dt_urgency = 0
                for v in range(MAX_SHOPTASK):                
                    dt_urgency += (float(this_line[v]) - float(last_line[v]))
                #print dt_urgency
                out_line = ts + ";" + step +";"+ str(dt_urgency) + "\n"
                last_line = this_line
                if fileinput.lineno() == (HEADER_LINE_END + 1):
                    continue # skip first data line
                else:
                    f.write(out_line)                
    except Exception, e:
        print e
Example #11
0
 def test_state_is_None(self):
     """Tests fileinput.lineno() when fileinput._state is None.
        Ensure that it raises RuntimeError with a meaningful error message
        and does not modify fileinput._state"""
     fileinput._state = None
     with self.assertRaises(RuntimeError) as cm:
         fileinput.lineno()
     self.assertEqual(("no active input()",), cm.exception.args)
     self.assertIsNone(fileinput._state)
Example #12
0
File: test.py Project: eddyb/servo
def mutate_random_line(file_name, strategy):
    line_numbers = []
    for line in fileinput.input(file_name):
        if re.search(strategy['regex'], line):
            line_numbers.append(fileinput.lineno())
    if len(line_numbers) == 0:
        return -1
    else:
        mutation_line_number = line_numbers[random.randint(0, len(line_numbers) - 1)]
        for line in fileinput.input(file_name, inplace=True):
            if fileinput.lineno() == mutation_line_number:
                line = re.sub(strategy['regex'], strategy['replaceString'], line)
            print line.rstrip()
        return mutation_line_number
Example #13
0
def filtered_events():
    loglines = levels()
    # skip header on first file
    leveltext = loglines.__next__()
    # cycle through chosen events
    while True:
        while not chosen_event_header(leveltext):
            leveltext = loglines.__next__()
        event_lines = []
        # grab wanted events for processing
        level, line = leveltext
        # skip @ sign
        event = line[1:]
        # NOTE: some entries contain spaces in their text, not just between timestamp and text
        # Uses Python 3.3 * syntax for stuffing tuples
        timestamp, *text = event.split()
        event = {'EventName': "".join(text),
                 'Timestamp': timestamp,
                 'Filename' : fileinput.filename(),
                 'lineno'   : fileinput.lineno()}
        # populate it with attributes and members
        leveltext = loglines.__next__()
        # gather raw lines
        # try block handles case of last event, where the iteration is exhausted by the while loop
        #
        try:
            while not event_header(leveltext):
                event_lines.append(leveltext)
                leveltext = loglines.__next__()
        except StopIteration:
            pass
        event.update({'Items': itertools.groupby(event_lines, linelevel)})
        yield event
def remove_generated_code(filename, list):
    if list[0] != 0 and list[1] != 0 and list[0] - 1 != list[1]:
        for line in fileinput.input(filename, inplace=1):
            newline = line.rstrip('\r\n')
            range2 = range(list[0], list[1] + 1)
            if fileinput.lineno() not in range2:
                print(newline)
Example #15
0
def main(files=None):
    index_dict = {}
    input_count = 0
    for line in fileinput.input(files=files):
        if fileinput.lineno() == 1:
            n, input_count = map(int, line.split(" "))
            # input_count = float(input_count)
            # print("input_count:{}".format(input_count))
        else:
            start, end, value = map(int,line.split(" "))
            if start in index_dict:
                index_dict[start] += value
            else:
                index_dict[start] = value
            end = end + 1
            if end in index_dict:
                index_dict[end] -= value
            else:
                index_dict[end] = 0 - value

    # index_dict.p()
    cur_value = 0
    max_value = 0
    for key in sorted(index_dict.keys()):
        cur_value += index_dict[key]
        if cur_value > max_value:
            max_value = cur_value
    print(max_value)
    return max_value
Example #16
0
def main(files=None):
    tree = RangeBST()
    input_count = 0
    for line in fileinput.input(files=files):
        if fileinput.lineno() == 1:
            n, input_count = map(int, line.split(" "))
            input_count = float(input_count)
            input_count.p()
        else:
            cur_range = map(int,line.split(" "))
            tree.add(cur_range)
            if fileinput.lineno() % 1000 == 0:
                print("finish {}".format(fileinput.lineno()/input_count))

    print(tree.max_value)
    return tree.max_value
Example #17
0
 def generateUncovereageSummary(self,bPrintHeader):
     headerList = []
     connectionCoveragePercentList = []
     uncovereageTypeList = []
     for line in fileinput.input(self.strUncoverageAnalysisFilePath):
         lineNo = fileinput.lineno()
         if lineNo ==1:
             headerList = line.strip("\n").split(",")
             headerList = headerList[:-1]
             print headerList
         elif lineNo ==2:
             connectionCoveragePercentList = line.strip("\n").split(",")
             connectionCoveragePercentList = connectionCoveragePercentList[1:-1]
         elif lineNo in range(18,39):
             tempList = line.strip("\n").split(",")
             #remove 1-4 in list
             tempList[2:] = []
             uncovereageTypeList.append(tempList)
     uncovereageTypeList = zip(*uncovereageTypeList)
     #print headerList
     #print byteCoveragePercentList
     summaryList = []
     if bPrintHeader:
         headerList[0] = "IMEI"
         summaryList.append(headerList+list(uncovereageTypeList[0]))
     info = connectionCoveragePercentList+list(uncovereageTypeList[1])
     info.insert(0,self.strUser)
     summaryList.append(info)
     return summaryList
     pass
def find_pct(infile, outfile):    
    search_pct = [True for x in xrange(MAX_TASKS+1)]
    try:
        for line in fileinput.input(infile):
            if line == '\n' or fileinput.lineno() < START_DATA_LINE:
                continue
            #print "line # : ", fileinput.lineno()
            this_line = line.split(";")
            #print this_line
            #ts = line.split(";")[0]
            step = this_line[-(MAX_TASKS +1)] # from right      
            if(int(step) > MAX_STEPS):
                break  
            urgencies = this_line[-MAX_TASKS:]
            for tid in range(0, MAX_TASKS):
                   if(search_pct[tid]):
                        u = eval(urgencies[tid])
                        if(u == 0):
                            prod_compl_step.append(int(step))
                            search_pct[tid] = False
                            out_line = str(tid+1) +";"+ step + "\n"
                            #print out_line
                            f.write(out_line)
                            #print this_line
                   else:
                       if(u >= 0):
                        task_pmw[tid].append(u)     
                          
        #tid = 0
        #print "task %d maint steps: %d" %(tid+1, len(task_pmw[tid]))    
    except Exception, e:
        print e
Example #19
0
    def parseFile(self, file, maxResolution, merge=None):
        if merge == None:
            map = {}
        else:
            map = merge

        #create regex pattern here so it is not done repeatedly while parsing file
        regexStr = '\s+(\d+)\s+([\w]+)\s+(\d+)\s+([-]?\d+\.\d+)\s+(\d+\.\d+)\s+([a-zA-Z]?)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+([,\w\-/(/) \[\]]+)'
        regexPattern = re.compile(regexStr)

        try:
            #iterate file
            for line in fileinput.input(file):

                if fileinput.lineno() < 4:
                    pass

                else:
                    protein = self.parsePDBSelectLine(line, regexPattern)
                    if not protein == None:
                        #Filter out proteins already in the list, this merges the lists giving preference to the filter
                        if not merge == None and protein[0] in merge:
                            continue

                        #check resolution range
                        if protein[2] > 0 and protein[2] <= maxResolution:
                            #print "Adding: %s" % protein
                            map[protein[0]] = protein


        finally:
            pass

        return map
    def replace(self, aDirectoryName, theFileNames):

        logging.debug("Visting directory %s", aDirectoryName)

        # Create fully quailified paths -- ensuring we do append a redundant OS
        # separator as we build the path and skipping over previously created
        # backup files ...
        theFiles = filter(lambda aFileName: not aFileName.endswith(BACKUP_EXT_SUFFIX),
                        map(aDirectoryName.endswith(os.sep) and
                            (lambda aFileName: aDirectoryName + aFileName) or
                            (lambda aFileName: aDirectoryName + os.sep + aFileName),
                        theFileNames))
        logging.debug("Scanning through %s", theFiles)

        for aLine in fileinput.input(theFiles, inplace=1, backup=self.myBackupExt):

            # Perform the replacement and write out the results.
            aProcessedLine = self.myFindExpression.sub(self.myReplacementText, aLine)
            sys.stdout.write(aProcessedLine)

            # Log changes
            if aLine != aProcessedLine:

                self.myModifiedFiles.add(fileinput.filename())
                logging.info("Replaced file %s line #%s, '%s', with '%s'",  fileinput.filename(), 
                        fileinput.lineno(), aLine.replace(os.linesep, ""), 
                        aProcessedLine.replace(os.linesep, ""))
def plot_urgency(outfile):
    #ts = []
    step = []
    delta_urgency = []
    for line in fileinput.input(outfile):
        if fileinput.lineno() <= HEADER_LINE_END:
            continue
        else:
            #t = line.split(";")[0]
            s = line.split(";")[1]
            du = line.split(";")[2]
            #ts.append(float(t))
            step.append(int(s))
            delta_urgency.append(float(du))
        
    x = numpy.array(step)
    y = numpy.array(delta_urgency)
    
    pylab.plot(x, y)

    pylab.xlabel('Time Stamp (s)')
    pylab.ylabel('Task Urgency')
    pylab.title('Sum of task urgency changes over time ')
    pylab.grid(True)
    pylab.savefig('delta_urgency_sum_plot')

    pylab.show()
 def replaceString(self, path, search, repl):
     for line in fileinput.input(path, inplace=1):
         if search != '': newline = line.strip('\n').replace(search, repl) 
         elif fileinput.lineno() == 4: newline = "%s\n%s"(line.strip('\n'), repl)
         else: newline = line.strip('\n')
         if repl == newline == '' and  line.strip('\n') != '':continue
         print newline
def parse_input_file(filename, inputs):
    for line in fileinput.input(filename, inplace=0):
        newline = line.rstrip('\r\n')
        if fileinput.lineno() > 4:
            break
        else:
            inputs.append(newline)
Example #24
0
def countFileRows(filePath):
    '''统计文件行数.
    '''
    import fileinput
    for line_str in fileinput.input(filePath):
        pass
    return fileinput.lineno()
Example #25
0
 def mutate(self, file_name):
     code_lines = []
     if_blocks = []
     for line in fileinput.input(file_name):
         code_lines.append(line)
         if re.search(self.if_block, line):
             if_blocks.append(fileinput.lineno())
     if len(if_blocks) == 0:
         return -1
     random_index, start_counter, end_counter, lines_to_delete, line_to_mutate = init_variables(if_blocks)
     while line_to_mutate <= len(code_lines):
         current_line = code_lines[line_to_mutate - 1]
         next_line = code_lines[line_to_mutate]
         if re.search(self.else_block, current_line) is not None \
                 or re.search(self.else_block, next_line) is not None:
             if_blocks.pop(random_index)
             if len(if_blocks) == 0:
                 return -1
             else:
                 random_index, start_counter, end_counter, lines_to_delete, line_to_mutate = \
                     init_variables(if_blocks)
                 continue
         lines_to_delete.append(line_to_mutate)
         for ch in current_line:
             if ch == "{":
                 start_counter += 1
             elif ch == "}":
                 end_counter += 1
             if start_counter and start_counter == end_counter:
                 deleteStatements(file_name, lines_to_delete)
                 return lines_to_delete[0]
         line_to_mutate += 1
Example #26
0
	def parse(self, file=None, nowdate=None):
		if file != None:
			self.portfoliofile=file
		if self.portfoliofile == None:
			raise IOError, "OptionsPortfolio.parse() requires file\n"
		if nowdate != None:
			self.nowtime=time.strptime(nowdate, "%Y%m%d")
		for line in fileinput.input(self.portfoliofile):
			line=string.strip(line)
			if line == "": continue
			if line[0] == "#": continue
			line = line.split('#')[0].strip() # allow line comments
			optionpack = re.split('[\s,]+', line)
			try:
				(grantdate, ticker, count, sold, strikeprice, coolofftime, maturetime) = optionpack
			except:
				raise RuntimeError, "bad file format: %s line %d: %s\n" % (self.portfoliofile, fileinput.lineno(), line)
			count=string.atoi(count)
			sold=string.atoi(sold)
			strikeprice=string.atof(strikeprice)
			coolofftime=string.atoi(coolofftime)
			maturetime=string.atoi(maturetime)
			(fullvested, partialvested, pending) = vested(grantdate, count, strikeprice, coolofftime, maturetime, self.nowtime)
			nextvest = findNextVest(grantdate, count, coolofftime, maturetime, self.nowtime)

			optionpack=(grantdate, count, sold, strikeprice, coolofftime, maturetime, fullvested, partialvested, pending, nextvest)

			self.totalcount += count - sold
			self.totalfullvest += fullvested - sold
			self.totalpartvest += partialvested
			self.portfolio.append(optionpack)
			self.totalpending += pending
Example #27
0
def get(index):
  for line in fileinput.input(STACK_FILE):
    if fileinput.lineno() == index:
      fileinput.close()
      return line
  fileinput.close()
  raise RuntimeError("The index selected does not exist.")
Example #28
0
def find_str(fname,seek):
	res_list=[]
	for line in fileinput.input(fname):
		if line.find(seek)!=-1:
			res='FilePath===>'+fname+'========LineNumber===>'+(str)(fileinput.lineno())+'\r\n'
			res_list.append(res)
	return res_list
Example #29
0
def main():
    global decorators
    global decorator_dictionary
    global line_number
    global non_blank_line_number
    global last_blank
    
    for index,value in decorator_dictionary.items():
        if args[index]:
            decorators.append(value)
        else:
            pass
    #print decorators
    
    for line in fileinput.input(args['FILE']): # Create FileInput instance to handle files.
        line_number = fileinput.lineno()
        
        if fileinput.isfirstline() == True: # reset count of non_blank_line_number for a new file.
           non_blank_line_number = 1
        elif line.isspace() == False: # if a line is blank.
           non_blank_line_number += 1
     
        
        output_line = line
        for d in decorators: # loop to apply decorators
            output_line = d(output_line)
            
        if line.isspace()==True: # update last_blank to ensure we know if a blank just passed
            last_blank = True
        else:
            last_blank = False
        
        if output_line is not None: # if the line isnt none, print it.
            print output_line,
Example #30
0
def rdfile():
    for line in fileinput.input(inplace=False):
        line = line.rstrip()
        lineno = fileinput.lineno()
        if line:
            print '%-50s # %2i' % (line, lineno)
        else:
            print '%-50s' % (line)
 def test_state_is_not_None(self):
     """Tests fileinput.lineno() when fileinput._state is not None.
        Ensure that it invokes fileinput._state.lineno() exactly once,
        returns whatever it returns, and does not modify fileinput._state
        to point to a different object."""
     lineno_retval = object()
     instance = MockFileInput()
     instance.return_values['lineno'] = lineno_retval
     fileinput._state = instance
     retval = fileinput.lineno()
     self.assertExactlyOneInvocation(instance, 'lineno')
     self.assertIs(retval, lineno_retval)
     self.assertIs(fileinput._state, instance)
Example #32
0
 def get_input(self):
     for line in fileinput.input():
         line = line.strip()
         if len(line) == 0:
             break
         if fileinput.lineno() == 1:
             self.project = line
         else:
             d = datetime.strptime(line, '%Y-%m-%d').date()
             if len(self.holidays) == 0 or d > self.holidays[-1]:
                 self.holidays.append(d)
             else:
                 raise ValueError('Date list must be in ascending order!')
Example #33
0
 def test_sort_file_with_tab_delimiter(self):
     join_fields = '0'
     self.dialect.delimiter = '\t'
     self.fqfn = create_test_file(self.temp_dir, self.dialect.delimiter)
     sorter = mod.CSVSorter(self.dialect, join_fields, self.temp_dir,
                            self.temp_dir)
     outfile = sorter.sort_file(self.fqfn)
     assert outfile == self.fqfn + '.sorted'
     for rec in fileinput.input(self.fqfn + '.sorted'):
         fields = rec.split(self.dialect.delimiter)
         print(fields)
         if fileinput.lineno() == 1:
             assert fields[0] == '1'
         elif fileinput.lineno() == 2:
             assert fields[0] == '2'
         elif fileinput.lineno() == 3:
             assert fields[0] == '3'
         elif fileinput.lineno() == 4:
             assert fields[0] == '4'
         else:
             assert 0, 'too many rows returned'
     fileinput.close()
Example #34
0
 def grepHIDD(self, file):
     global HIDDs
     for line in fileinput.input(file):
         #print line
         m = HIDD_re.search(line)
         #print m
         if m:
             ref = "%s:%s:%s" % (fileinput.filename(), fileinput.lineno(),
                                 m.span()[0])
             hidd = unicode(m.groups()[0])
             if self.verbose > 1:
                 print "%s %s" % (ref, hidd)
             HIDDs.setdefault(hidd, []).append(ref)
Example #35
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-n","--lines", help="-n --lines",type=int)
    parser.add_argument("-c","--byte", help="-c --bytes",type=int)
    parser.add_argument('filenames', nargs='*')
    args = parser.parse_args()
    a=args.lines
    b=args.byte
    c=args.filenames

    for line in fileinput.input(a):
        pass
    print(fileinput.lineno())
Example #36
0
def readRunCmds():
  try:
    for line in fileinput.input():
      z=subprocess.check_output(line, stderr=subprocess.STDOUT, shell=True)
      print(z)
  except subprocess.CalledProcessError as details:
    print 'In file: {0}, line: {1}, errno: {2}, {3}'.format(
           fileinput.filename(),
           fileinput.lineno(), 
           details.returncode,
           str(details.output).replace('\n',''))
  finally:
    fileinput.close()
Example #37
0
def process_tweets(user_id, app_config=Config):
    from twittermemories import create_app
    from twittermemories.models import User, UserSchema, Tweet, TweetSchema, db
    this_app = create_app(app_config)
    with this_app.app_context():
        # download tweet archive for user
        storage_client = storage.Client.from_service_account_json(
            this_app.config['CLOUD_STORAGE'].GCP_JSON)
        bucket = storage_client.bucket(
            this_app.config['CLOUD_STORAGE'].GCP_STORAGE_BUCKET)
        twitter_archive = bucket.blob(user_id + '.json')
        twitter_archive.download_to_filename(
            this_app.config['CELERY_CONFIG'].TEMPSTORAGE + user_id + '.json')

        # convert archive to traversable json format
        for line in fileinput.input(
                this_app.config['CELERY_CONFIG'].TEMPSTORAGE + user_id +
                '.json',
                inplace=True):
            if fileinput.lineno() == 1:
                print(line.replace('window.YTD.tweet.part0 =', ''), end='')
            else:
                print(line, end='')
        fileinput.close()
        # traverse tweets, pull out relevant info and persist instances
        curr_user = User.query.filter_by(user_id=user_id).first()
        tweetList = json.load(
            open(
                this_app.config['CELERY_CONFIG'].TEMPSTORAGE + user_id +
                '.json', 'r'))

        for tweet in tweetList:
            dateString = tweet['tweet']['created_at']
            if is_valid_date(dateString) and not is_retweet(tweet):
                # persist tweet
                month, date = get_month_and_date(dateString)
                new_tweet = Tweet(tweet_id=tweet['tweet']['id'],
                                  month=month,
                                  day=date,
                                  user=curr_user)
                db.session.add(new_tweet)
                db.session.commit()
        curr_user.file_status = 2
        db.session.commit()

        # delete the file from gcp storage and local storage
        twitter_archive.delete()
        os.remove(
            os.path.join(this_app.config['CELERY_CONFIG'].TEMPSTORAGE,
                         user_id + '.json'))
def estoTeSacaUnaListaDeSlides(slides):
    picsVerticales = []
    for line in fileinput.input():
        if not fileinput.isfirstline():  #a tomar viento la primera linea
            separada = line[:len(line) - 1].split(' ')  # Separamos por ' '
            fila = fileinput.lineno()  # n de fila
            picLeido = Pic(fila - 2, separada)  # construyo el leido...
            if (picLeido.orientation()
                ):  # es vertical, pa la lista de verticales
                picsVerticales.append(picLeido)
            else:  # no lo es, a los slides directo
                slides.append(Slide(picLeido))
    # unimos lo verticales a los slides, maximizando tags
    unirVerticales(slides, picsVerticales)
def run_miss_cache_1():
	cmd = './sim-cache -redir:sim '
	sim_file = 'results/sim_mc1.out'		#change 3 with benchmark of choice, dc-->Default Cache
	config = ' -cache:dmcache dmcache:1:l -cache:imcache imcache:1:l'	
	bm_dir = ' /home/viraj/Documents/SimpleScalar/simplesim-alpha/benchmarks/'
	bm = '1.alpha '					#change 3 with benchmark of choice
	bm_in = '</home/viraj/Documents/SimpleScalar/simplesim-alpha/benchmarks/1stmt.i>'
	asm_out = 'OUTmc1'				#change 3 with benchmark of choice, dc-->Default Cache
	run_cmd = cmd + sim_file + config + bm_dir + bm + bm_in + asm_out
	
	path = '/home/viraj/Documents/SimpleScalar/simplesim-alpha/'

	

	print ('Running ' + bm + 'for 1 entry Miss Cache config')
	os.system(run_cmd)

	second = "statistics"
	count = 0

	with open(path + sim_file,'r') as myFile:
		for num, line in enumerate(myFile, 1):
			found=False		
			if second in line:
				count+=1
				found = True
				#print ('found at line:', num)
				if count == 2:
					break

	if found == 1:
		for line in fileinput.input(path + sim_file, inplace = True):
			if fileinput.lineno() <= num:
				continue
			else:
				print (line, end='')

            
	for line in fileinput.input(path + sim_file, inplace = True):
		if " " in line:
			line = line.replace(' ',',',1)
			line = line.replace(' ','')
		if "#" in line:
			line = line.replace('#',',',1)
			print (line, end='')
		else:
			print (line, end='')
	return '0'
	gc.collect()
Example #40
0
def plot_sensitization(infile, outfile):
    ts = []
    step = []
    task1 = []
    task2 = []
    task3 = []
    task4 = []
    x = None
    y1 = None
    y2 = None
    y3 = None
    y4 = None

    for line in fileinput.input(infile):
        if fileinput.lineno() <= HEADER_LINE_END:
            continue
        else:
            tm1 = line.split(";")[0]
            tm2 = line.split(";")[1]
            s = line.split(";")[2]
            tasks = line.split(";")[5:]
            #print tasks
            t1 = tasks[0]
            t2 = tasks[1]
            t3 = tasks[2]
            t4 = tasks[3]
            #ts.append(float(tm1))
            step.append(int(s))
            task1.append(float(t1))
            task2.append(float(t2))
            task3.append(float(t3))
            task4.append(float(t4))

    x = numpy.array(step)
    print "X axis len:", len(x)
    y1 = numpy.array(task1)
    y2 = numpy.array(task2)
    y3 = numpy.array(task3)
    y4 = numpy.array(task4)

    pylab.plot(x, y1, 'r+', x, y2, 'g,', x, y3, 'b--', x, y4, 'k')
    #pylab.ylim(0,1)
    pylab.xlabel('Time Step (s)')
    pylab.ylabel('Sensitization (k)')
    #pylab.title('Task urgencies recorded at Task-Server ')
    pylab.grid(True)
    pylab.legend(('Task1', 'Task2', 'Task3', 'Task4'), loc=2)
    fn = 'Plot' + outfile
    pylab.savefig(fn)
Example #41
0
def statistic_pos(comment_file, target):
    pos_statisticistic = {}
    for line in fileinput.input(comment_file):
        for word, pos in [
            (u'#'.join(entry.split(u'#')[:-1]), entry.split(u'#')[-1])
                for entry in line.strip('\n').decode('utf-8').split(' ')
        ]:
            pos_statisticistic[pos] = pos_statisticistic.get(pos, {})
            pos_statisticistic[pos][word] = pos_statisticistic[pos].get(
                word, 0) + 1
        if fileinput.lineno() == 10**4:
            break
    fileinput.close()
    print ','.join([entry['word'] for entry in sorted([{'word':word,'count':count} for word, count in \
       pos_statisticistic.get(target,{}).iteritems()],key=lambda x:x['count'],reverse=True)[:20]])
def sum_translation(infile, outfile):
    time_start = 0
    time_end = 0
    cum_trans = 0
    iter = 1

    f = open(outfile, 'w')
    header = "##;## \n Time; Step; TranslationSum \n"
    f.write(header)
    try:
        for line in fileinput.input(infile):
            if line == '\n' or fileinput.lineno() <= HEADER_LINE_END:
                continue
            print "line # : ", fileinput.lineno()
            ts = line.split(";")[0]
            step = line.split(";")[1]
            u = line.split(";")[2]
            print u
            if fileinput.lineno() == 2:
                time_start = float(ts)
                time_end = time_start + INTERVAL
                cum_trans = float(u)
                continue
            if float(ts) <= time_end:
                cum_trans += float(u)
            else:
                print "Cumulative translation:%f at iter %d" % (cum_trans,
                                                                iter)
                outline = ts + ";" + step + ";" + str(cum_trans) + "\n"
                f.write(outline)
                iter += 1
                cum_trans = 0
                time_end = float(ts) + INTERVAL

    except Exception, e:
        print e
Example #43
0
def statistic_description_length(filename):
	case_dict = defaultdict(list)
	
	def get_statistics(data):
		return u'{0}\t{1:.2f}\t{2}\t{3}\t{4}'.format(len(data),np.mean(data),np.median(data),np.min(data),np.max(data))

	for line in fileinput.input(filename):
		if fileinput.lineno() % 10**4 == 0: sys.stdout.write(str(fileinput.lineno())+'\r'); sys.stdout.flush()
		TASKID, COORDX, COORDY, INFOSOURCENAME, DISCOVERTIME, SOLVINGTIME, \
		ADDRESS, STREETNAME, DESCRIPTION, ENDRESULT, URGENTDEGREE, USEREVALUATE, \
		INFOBCNAME, INFOSCNAME, INFOZCNAME, CASEENDBCNAME, CASEENDSCNAME = map(lambda x:x.strip(), line.decode('utf-8').split(u'\t'))
		INFOBCNAME, INFOSCNAME, INFOZCNAME = map(lambda x:re.sub(ur'\(浦东\)','',x), [INFOBCNAME, INFOSCNAME, INFOZCNAME])
		if COORDX and COORDY and float(COORDX) and float(COORDY) and DISCOVERTIME:
			case_dict[INFOSOURCENAME].append(len(DESCRIPTION))
	fileinput.close()

	lens = []
	for INFOSOURCE, DESCRIPTIONlens in case_dict.iteritems():
		print u'{0}\t{1}'.format(INFOSOURCE, get_statistics(DESCRIPTIONlens))
		lens.extend(DESCRIPTIONlens)
	plt.figure(figsize=(12,5))
	plt.hist(lens, 100, range=(0,99), histtype='stepfilled', facecolor='g', alpha=0.6)
	plt.title(u'正文长度分布'); plt.xlabel(u'长度'); plt.ylabel(u'频次')
	plt.savefig('./statistic_clustering/description_length_statistics.png')
Example #44
0
def fetch_jobs_stdin():
    """
    Return a list of jobs gathered from a series of JSON objects, one per
    line, presented on stdin. This function is used for testing of the
    flux-jobs utility, and thus, all filtering options are currently
    ignored.
    """
    jobs = []
    for line in fileinput.input("-"):
        try:
            job = JobInfo(json.loads(line))
        except ValueError as err:
            LOGGER.error("JSON input error: line %d: %s", fileinput.lineno(), str(err))
            sys.exit(1)
        jobs.append(job)
    return jobs
Example #45
0
def parse_job(job_name, job_file, a_job_line, logdir):
    with fileinput.input(
            files=job_file if not job_file is None else ('-', )) as in_h:
        job_num = 0
        for one_line in in_h:
            job_num += 1
            job_f = os.path.join(
                logdir,
                job_name.rstrip(".sh") + "_" + str(job_num) + ".sh")
            with open(job_f, 'w') as job_h:
                job_h.write(one_line)
                while fileinput.lineno() % a_job_line != 0:
                    job_h.write(next(in_h))
                #for i in range(1, a_job_line):
                #    job_h.write(next(in_h))
        return job_num
Example #46
0
def get_boards():
    """gets board data from input file"""
    boards = []
    board = []
    for line in f.input():
        if f.lineno() == 1:
            T = line.rstrip('\n')
            continue
        if line == "\n":
            boards.append(board)
            board = []
        else:
            l = line.split()
            for c in l:
                board.append(c)
    return boards
Example #47
0
    def test_format_of_1_line(self):

        # something isn't writing rows out for python 2.6
        if sys.version_info[:2] == (2, 6):
            return

        self.logger.info('Test1')

        for rec in fileinput.input(os.path.join(self.temp_dir, 'test.log')):
            pass

        assert fileinput.lineno() == 1
        fields = rec.split(':')
        assert fields[1].strip() == '__main__'
        assert fields[2].strip() == 'INFO'
        assert fields[3].strip() == 'Test1'
Example #48
0
 def __add_object(self, key, value):
     try:
         fcntl.flock(self.filepath, fcntl.LOCK_EX | fcntl.LOCK_NB)
         hash_value = self.__hash_function(key)
         for line in fileinput.input(self.filepath, inplace=True):
             line = line.strip()
             if fileinput.lineno() != hash_value:
                 print(line)
             else:
                 if line.split()[0] == key:
                     raise KeyValDataStoreException(
                         "008", "Cannot insert duplicate key.")
                 print(json.dumps(key), json.dumps(value))
         fcntl.flock(self.filepath, fcntl.LOCK_UN)
         print("Key added")
     except:
         raise KeyValDataStoreException(
             "004", "Cannot be added, file currently in use.")
def plot_stimuli(outfile):
    for line in fileinput.input(outfile):
        if fileinput.lineno() <= HEADER_LINE_END:
            continue
        else:
            tm1 = line.split(";")[0]
            tm2 = line.split(";")[1]
            s = line.split(";")[2]
            tasks = line.split(";")[4:]
            t0 = tasks[0]
            t1 = tasks[1]
            t2 = tasks[2]
            t3 = tasks[3]
            t4 = tasks[4]
            ts.append(float(tm1))
            step.append(float(s))
            task0.append(float(t0))
            task1.append(float(t1))
            task2.append(float(t2))
            task3.append(float(t3))
            task4.append(float(t4))

    x = numpy.arange(len(task1))
    y0 = numpy.array(task0)
    y1 = numpy.array(task1)
    y2 = numpy.array(task2)
    y3 = numpy.array(task3)
    y4 = numpy.array(task4)

    #pylab.plot(x, y0, 'm.', x, y1, 'r+', x, y2, 'g.',  x, y3, 'b--',\
    # x, y4, 'k')
    pylab.plot( x, y1, 'r+', x, y2, 'g-',  x, y3, 'b.',\
     x, y4, 'k')
    #pylab.ylim(0,1)
    pylab.xlabel('Time Step (s)')
    pylab.ylabel('Task Stimuli')
    #pylab.xlim()
    #pylab.title('Task urgencies recorded at Task-Server ')
    pylab.grid(True)
    #pylab.legend(('RW', 'Task1', 'Task2', 'Task3', 'Task4'))
    fn = 'Plot' + outfile.split('.')[0] + '.png'
    pylab.savefig(fn)

    pylab.show()
Example #50
0
    def GetPrqaLabels(self, verbose):
        assert self.filename != None
        assert self.IsReadable() == True

        labels = []

        for line in fileinput.input(self.filename):
            lineLabels = LocatePrqaLabelsInLine(line, self.filename)
            for lineLabel in lineLabels:
                #if verbose == True:
                #print " Cfile :: GetPrqaLabels :: Label found! Label: " + str(lineLabel.labelId) + " LineNo: " + str(fileinput.lineno())
                lineLabel.lineNo = fileinput.lineno()
                lineLabel.filename = self.filename
            labels.extend(lineLabels)

        #if verbose == True:
        #print " Cfile :: GetPrqaLabels :: Processing of file complete. Number of labels found: " + str(len(labels))

        return labels
Example #51
0
    def add_disable_pragma(self, filepath, fileline, message_code):
        """
        Add a "Pylint disable" pragma to the specified file line of the
        "patched" temporary copy.
        """
        # Normalise the filepath and check that it's valid
        filepath = os.path.normpath(filepath)
        if not filepath.startswith(self._source_path):
            raise ValueError("File %s is not inside %s" %
                             (filepath, self._source_path))

        source_file = os.path.relpath(filepath, self._source_path)
        temp_filepath = os.path.join(self._temp_patched_path, source_file)

        for line in fileinput.input(temp_filepath, inplace=True):
            if fileinput.lineno() == fileline:
                line = self._insert_comment(line, message_code)
            sys.stdout.write(line)
        fileinput.close()
Example #52
0
    def _logMeasurment(self, event):
        """
        Prints the RSSI values to a CSV file in the row with the given tag ID like:
        reader_id[0],tag_ids[0], 81, 82, 81, etc...
        reader_id[1],tag_ids[1], 120, 119, 118, 123, etc...
        reader_id[0],tag_ids[1], 72, 76, 77, 77, etc...
        reader_id[1],tag_ids[2], 72, 62, 61, etc...
        
        Note: These are sorted by which unique readerID/tagID combination comes first
        """
        try:
            tag = event.getPacket().getDecoding()['data']['tag']
            new_id = False
            # Unique id for each reader/tag combination
            uid = 'R%sT%s' % (event.getPacket().getNodeId(), tag['id'])
            if uid not in self.tag_ids:
                self.tag_ids.append(uid)
                new_id = True

            # Determine which line to write to/update:
            i = self.tag_ids.index(uid)
            if new_id:  # Append a new row
                self.log(tag="Monitor",
                         msg="[INFO ] Reader ID %s found new Tag with ID %s" %
                         (event.getPacket().getNodeId(), tag['id']))
                with open(self.log_file, (i == 0 and 'w') or 'a') as csv:
                    csv.write(
                        "%sReaderID:%s,TagID:%s,%s" %
                        (i > 0 and '\n' or '', event.getPacket().getNodeId(),
                         tag['id'], tag['rssi']))
            else:
                # Append to the end of the row
                for line in fileinput.input(self.log_file, inplace=1):
                    if fileinput.lineno() - 1 == i:
                        line = "%s,%s\n" % (line.rstrip(), tag['rssi']
                                            )  # Append the new rssi value
                    if len(line.strip()) > 0:
                        sys.stdout.write(line)
            #self.log(event.getPacket().__repr__())
            self.log(tag="Monitor",
                     msg="[DEBUG] UID=%s RSSI=%s " % (uid, tag['rssi']))
        except KeyError:
            pass
Example #53
0
def read_syscalls(input, cmd, edit_syscall, error):
    import fileinput
    syscalls = []
    state = None
    did_error = False
    for line in input:
        line = line.strip()
        state = process(error, line, state)
        if not state:
            did_error = True
            continue

        state_t, state_s = state
        if state_t == process.STATE_DONE:
            edit_syscall(state_s)
            syscalls.append(
                (fileinput.filename(), fileinput.lineno(), state_s))

    return (syscalls, not did_error)
Example #54
0
def update_cpp_code(filename, inputs, list, finalvarnamelist,
                    finalvartypelist):
    if list[0] != 0 and list[1] != 0:
        for line in fileinput.input(filename, inplace=1):
            newline = line.rstrip('\r\n')
            if fileinput.lineno() != list[0]:
                print(newline)
            else:
                input_variables = inputs[2].split(",")
                for item in input_variables:
                    vartype = item.split(":")
                    if vartype[0] == "Parameters":
                        print("        " + vartype[1] + " " + vartype[2] + ";")
                        print("        " + "from_stream(" + vartype[2] + ");")
                        finalvarnamelist.append(vartype[2])
                        finalvartypelist.append(vartype[1])

                    else:
                        print("        " + vartype[0] + " " + vartype[1] + ";")
                        print("        " + "from_stream(" + vartype[1] + ");")
                        finalvarnamelist.append(vartype[1])
                        finalvartypelist.append(vartype[0])
                print("        " + "next_line();")
                output_variables = inputs[3].split(":")
                print("        " + output_variables[1] + " __answer;")
                print("        " + "from_stream(__answer);")
                finalvarnamelist.append("__answer")
                finalvartypelist.append(output_variables[1])
                print("        cases++;")
                print(
                    "        cout << \"  Testcase #\" << cases - 1 << \" ... \";"
                )
                conditionString = "if( do_test("
                for vars in finalvarnamelist:
                    conditionString += vars
                    conditionString += ","
                conditionString = conditionString[:-1]
                conditionString += ")) {"
                print("        " + conditionString)
                print("            " + "passed++;")
                print("        }")
                print("        //end writing here")
Example #55
0
 def __remove_object(self, key):
     try:
         fcntl.flock(self.filepath, fcntl.LOCK_EX | fcntl.LOCK_NB)
         hash_value = self.__hash_function(key)
         for line in fileinput.input(self.filepath, inplace=True):
             line = line.strip()
             if fileinput.lineno() != hash_value:
                 print(line)
             else:
                 if line.startswith(json.dumps(key)):
                     print('')
                 else:
                     raise KeyValDataStoreException(
                         "006",
                         "Cannot be deleted, entered key does not exist.")
         fcntl.flock(self.filepath, fcntl.LOCK_UN)
         print("Key removed")
     except:
         raise KeyValDataStoreException(
             "005", "Cannot be deleted, file currently in use.")
Example #56
0
    def GetPrqaRefs(self, verbose):
        assert self.filename != None

        assert self.IsReadable() == True

        deviations = []

        for line in fileinput.input(self.filename):
            lineDeviations = LocatePrqaInLine(line, self.filename)
            for lineDeviation in lineDeviations:
                #if verbose == True:
                #print " Cfile :: GetPrqaRefs :: Deviation found! Line number: " + str(fileinput.lineno()) + " ID: " + str(lineDeviation.prqaId) + " Scope: " + str(lineDeviation.prqaScope)
                lineDeviation.prqaRow = fileinput.lineno()
                lineDeviation.filename = self.filename
            deviations.extend(lineDeviations)

        #if verbose == True:
        #print " Cfile :: GetPrqaRefs :: Processing of file complete. Number of deviations located: " + str(len(deviations))

        return deviations
Example #57
0
def fetch_jobs_stdin(args):
    """
    Return a list of jobs gathered from a series of JSON objects, one per
    line, presented on stdin. This function is used for testing of the
    flux-jobs utility, and thus, all filtering options are currently
    ignored.
    """
    import fileinput
    import json

    jobs = []
    for line in fileinput.input("-"):
        try:
            job = json.loads(line)
        except ValueError as err:
            logger.error(
                "JSON input error: line {}: {}".format(fileinput.lineno(), err)
            )
            sys.exit(1)
        jobs.append(job)
    return jobs
Example #58
0
File: more.py Project: zx110101/00
def more(filenames, pagesize=10, clear=False, fmt='{line}'):
    '''Display content of filenames pagesize lines at a time (cleared if specified) with format fmt for each output line'''

    fileinput.close()  # in case still open
    try:
        pageno = 1
        if clear:
            clear_screen()
        for line in fileinput.input(filenames):
            lineno, filename, filelineno = fileinput.lineno(
            ), fileinput.filename(), fileinput.filelineno()
            print(fmt.format(**locals()), end='')
            if pagesize and lineno % pagesize == 0:
                console.alert(
                    'Abort or continue', filename, 'Next page'
                )  # TODO: use less intrusive mechanism than alert
                pageno += 1
                if clear:
                    clear_screen()
    finally:
        fileinput.close()
Example #59
0
    def find_error_log_messages(directory):
        """Parse output log files for error messages

        Parameters
        ----------
        directory : str
            output directory

        """
        substrings = (
            "DUE TO TIME LIMIT",  # includes slurmstepd, but check this first
            "srun",
            "slurmstepd",
            "Traceback",
        )

        filenames = [
            os.path.join(directory, x) for x in os.listdir(directory)
            if x.endswith(".e")
        ]

        if not filenames:
            return

        for line in fileinput.input(filenames):
            for substring in substrings:
                if substring in line:
                    event = StructuredLogEvent(
                        source="submitter",
                        category=EVENT_CATEGORY_ERROR,
                        name=EVENT_NAME_ERROR_LOG,
                        message="Detected error message in log.",
                        error=substring,
                        filename=fileinput.filename(),
                        line_number=fileinput.lineno(),
                        text=line.strip(),
                    )
                    yield event
                    # Only find one match in a single line.
                    break
Example #60
0
    def initialize(self, change=None):
        """
        The Builder's main method. It stores all the changes that needs to be made
        in `self.details` for a file. Which would then be used to add Docstrings to.
        """
        result = dict()

        patches = []
        if change:
            patches = change.get('additions')

        for line in fileinput.input(self.filename):
            filename = fileinput.filename()
            lineno = fileinput.lineno()
            keywords = self.config.get('keywords')
            found = false if line.lstrip()[0] in ("#", '"')
            found = found and (
                len(
                    [
                        word.lstrip()
                        for word in line.split(' ')
                        if word.lstrip() in keywords
                    ]
                )
                > 0
            )

            if change and found:
                found = self._is_line_part_of_patches(lineno, line, patches)

            if not self.details.get(filename):
                self.details[filename] = dict()

            if found:
                length = get_file_lines(filename)
                result = self.extract_and_set_information(
                    filename, lineno, line, length
                )
                if self.validate(result):
                    self.details[filename][result.name] = result