def replace(self, aDirectoryName, theFileNames):

        logging.debug("Visting directory %s", aDirectoryName)

        # Create fully quailified paths -- ensuring we do append a redundant OS
        # separator as we build the path and skipping over previously created
        # backup files ...
        theFiles = filter(lambda aFileName: not aFileName.endswith(BACKUP_EXT_SUFFIX),
                        map(aDirectoryName.endswith(os.sep) and
                            (lambda aFileName: aDirectoryName + aFileName) or
                            (lambda aFileName: aDirectoryName + os.sep + aFileName),
                        theFileNames))
        logging.debug("Scanning through %s", theFiles)

        for aLine in fileinput.input(theFiles, inplace=1, backup=self.myBackupExt):

            # Perform the replacement and write out the results.
            aProcessedLine = self.myFindExpression.sub(self.myReplacementText, aLine)
            sys.stdout.write(aProcessedLine)

            # Log changes
            if aLine != aProcessedLine:

                self.myModifiedFiles.add(fileinput.filename())
                logging.info("Replaced file %s line #%s, '%s', with '%s'",  fileinput.filename(), 
                        fileinput.lineno(), aLine.replace(os.linesep, ""), 
                        aProcessedLine.replace(os.linesep, ""))
Beispiel #2
0
def parse_gen(**kwargs):

    file = kwargs["file"]
    fileinput = kwargs["fileinput"]

    line = file.readline()
    if line == "":
        return
    l = line.rstrip().split()
    pos = int(l[2])
    #    chrom = int(fileinput.filename().split('/')[1])  # tmp!!!
    ## -2 is a magic number...
    ## instead provide a file with chromosome-file-equivalents
    alleleA = l[3]
    alleleB = l[4]
    if kwargs["chrom"]:
        chrom = kwargs["chrom"]
    else:
        try:
            chrom = int(l[1].split(":")[0])
        except ValueError:
            try:
                chrom = int(fileinput.filename().split("/")[-2])
            except ValueError:
                try:
                    chrom = int(fileinput.filename().split("/")[-1].split(".")[0])
                except ValueError:
                    chrom = int(fileinput.filename().split("/")[-1].split(".")[0][3:])

    yield chrom, pos, alleleA, alleleB, l
Beispiel #3
0
 def test_state_is_None(self):
     """Tests fileinput.filename() when fileinput._state is None.
        Ensure that it raises RuntimeError with a meaningful error message
        and does not modify fileinput._state"""
     fileinput._state = None
     with self.assertRaises(RuntimeError) as cm:
         fileinput.filename()
     self.assertEqual(("no active input()",), cm.exception.args)
     self.assertIsNone(fileinput._state)
Beispiel #4
0
def check_files(files, verbose):
    in_multiline = False
    multiline_start = 0
    multiline_line = ""
    logical_line = ""
    token = False
    prev_file = None
    prev_line = ""
    prev_lineno = 0

    for line in fileinput.input(files):
        if fileinput.isfirstline():
            # if in_multiline when the new file starts then we didn't
            # find the end of a heredoc in the last file.
            if in_multiline:
                print_error('E012: heredoc did not end before EOF',
                            multiline_line,
                            filename=prev_file, filelineno=multiline_start)
                in_multiline = False

            # last line of a previous file should always end with a
            # newline
            if prev_file and not prev_line.endswith('\n'):
                print_error('E004: file did not end with a newline',
                            prev_line,
                            filename=prev_file, filelineno=prev_lineno)

            prev_file = fileinput.filename()

            if verbose:
                print "Running bash8 on %s" % fileinput.filename()

        # NOTE(sdague): multiline processing of heredocs is interesting
        if not in_multiline:
            logical_line = line
            token = starts_multiline(line)
            if token:
                in_multiline = True
                multiline_start = fileinput.filelineno()
                multiline_line = line
                continue
        else:
            logical_line = logical_line + line
            if not end_of_multiline(line, token):
                continue
            else:
                in_multiline = False

        check_no_trailing_whitespace(logical_line)
        check_indents(logical_line)
        check_for_do(logical_line)
        check_if_then(logical_line)
        check_function_decl(logical_line)

        prev_line = logical_line
        prev_lineno = fileinput.filelineno()
Beispiel #5
0
def populate_with_data(data):
    for line in fileinput.input():
        if fileinput.isfirstline():
            ipstr = fileinput.filename()[0:-4]
            peerid = from_ip_to_peerid(ipstr)
            data[peerid] = dict()
        try: ary = line[0:-1].split('\t')
        except: raise RuntimeError, '%s:%d: syntax error: %s' % (fileinput.filename(), fileinput.filelineno(), line)
        d = ary[1:]
        for i in [ORIGIN, FAIL, ACCEPT, SENT, RECEIVED]: d[i] = int(d[i])
        data[peerid][ary[0]] = d
Beispiel #6
0
def main(args):
    global _stash
    ap = argparse.ArgumentParser()
    ap.add_argument('pattern', help='the pattern to match')
    ap.add_argument('files', nargs='*', help='files to be searched')
    ap.add_argument('-i', '--ignore-case', action='store_true',
                    help='ignore case while searching')
    ap.add_argument('-v', '--invert', action='store_true',
                    help='invert the search result')
    ap.add_argument('-c', '--count', action='store_true',
                    help='count the search results instead of normal output')
    ns = ap.parse_args(args)

    flags = 0
    if ns.ignore_case:
        flags |= re.IGNORECASE

    pattern = re.compile(ns.pattern, flags=flags)

    # Do not try to grep directories
    files = [f for f in ns.files if not os.path.isdir(f)]

    fileinput.close()  # in case it is not closed
    try:
        counts = collections.defaultdict(int)
        for line in fileinput.input(files, openhook=fileinput.hook_encoded("utf-8")):
            if bool(pattern.search(line)) != ns.invert:
              if ns.count:
                    counts[fileinput.filename()] += 1
              else:
                if ns.invert: # optimize: if ns.invert, then no match, so no highlight color needed
                    newline = line
                else:
                    newline = re.sub(pattern, lambda m: _stash.text_color(m.group(), 'red'), line)
                if fileinput.isstdin():
                    fmt = u'{lineno}: {line}'
                else:
                    fmt = u'{filename}: {lineno}: {line}'

                print(fmt.format(filename=fileinput.filename(),
                                 lineno=fileinput.filelineno(),
                                 line=newline.rstrip()))
                
        if ns.count:
            for filename, count in counts.items():
                fmt = u'{count:6} {filename}'
                print(fmt.format(filename=filename, count=count))
                
    except Exception as err:
        print("grep: {}: {!s}".format(type(err).__name__, err), file=sys.stderr)
    finally:
        fileinput.close()
Beispiel #7
0
def parse_strings_simple():
    for line in fileinput.input():
        phrases = []
        for match in quoteregex.findall(line):
            match = match.strip()
            match = verseregex.sub("", match)
            match = dashregex.sub("", match)

            if not actual_text(match):
                continue
            phrases.append(match)
        if phrases:
            print fileinput.filename() + ": " + " ".join(phrases)
def iter_pubs(config_dir):
    """Discover local EventLogging publishers."""
    for line in fileinput.input(iter_files(config_dir)):
        match = re.match('tcp://\*:(\d+)', line)
        if match:
            name = os.path.basename(fileinput.filename())
            yield name, match.expand('tcp://127.0.0.1:\g<1>')
Beispiel #9
0
 def sample(self, files, print_every=1000):
     """
     Determines the set of sample records from the file names given.
     @param self the object
     @param files list of filenames.  Reads from STDIN if "-" is specified.
     @param print_every Write to STDERR the record number every print_every
            lines.  Defaults to 1000.  Set to 0 to disable printing
            altogether.
     """
     recnum = 0
     try:
         for ln in fileinput.input(files):
             if self.header and fileinput.filelineno() == 1:
                 self.head_rec = ln
             else:
                 recnum += 1
                 if print_every > 0 and recnum % print_every == 0:
                     sys.stderr.write("%d\r" % recnum)
                 if recnum <= self.num_samples:
                     self.samples.append(ln)
                 else:
                     idx = int(random.random() * recnum)
                     if idx < self.num_samples:
                         self.samples[idx] = ln
     except IOError, msg:
         raise Usage("Problem reading from file '%s':\n%s" % 
                     (fileinput.filename(), msg))
Beispiel #10
0
def check_files(files, verbose):
    in_multiline = False
    logical_line = ""
    token = False
    for line in fileinput.input(files):
        if verbose and fileinput.isfirstline():
            print "Running bash8 on %s" % fileinput.filename()
        # NOTE(sdague): multiline processing of heredocs is interesting
        if not in_multiline:
            logical_line = line
            token = starts_multiline(line)
            if token:
                in_multiline = True
                continue
        else:
            logical_line = logical_line + line
            if not end_of_multiline(line, token):
                continue
            else:
                in_multiline = False

        check_no_trailing_whitespace(logical_line)
        check_indents(logical_line)
        check_for_do(logical_line)
        check_if_then(logical_line)
Beispiel #11
0
def clean_clutter_in(files, tabsize=8):
  if not files: return
  n_empty = 0
  for fname in files:
    if not os.path.isdir(fname):
      for line in input(fname, inplace=1):
        if (isfirstline()):
          if (not isstdin()):
            print >> sys.__stdout__, filename() + ':'
          n_empty = 0
        clean_line = line.expandtabs(tabsize).rstrip()
        if (len(clean_line) == 0):
          n_empty += 1
        else:
          for i in xrange(n_empty): sys.stdout.write("\n")
          n_empty = 0
          sys.stdout.write(clean_line)
          sys.stdout.write("\n")
    # explicitly convert Windows linebreaks into Unix linebreaks
    wfile = open(fname,"r")
    wstr=wfile.read()
    wfile.close()
    ustr = wstr.replace("\r\n", "\n")
    ufile=open(fname,'wb')
    ufile.write(ustr)
    ufile.close()
Beispiel #12
0
def all_lines(args, params, linef, silent=False, pipe=True):
    input = fileinput.input(args)
    if pipe:  # correct behaviour over pipes, i.e. finish execution on SIGPIPE
        import signal
        signal.signal(signal.SIGPIPE, signal.SIG_DFL)
    while True:
        try:
            line = input.next().strip()
            if '' == line: continue
            try:
                info = PipeInfo(fileinput.filename(), fileinput.filelineno())
                js = json.loads(line)
                if not type(js) is dict:
                    eprint( 'Non dictionary at file:', info.file
                          , 'line:', info.line, silent=silent
                          )
                    continue
                ret = linef(js, params, info)
                if ret: yield ret
            except ValueError as e: eprint( str(e)
                                          , 'file:', info.file
                                          , 'line:', info.line
                                          , silent=silent
                                          )
        except IOError as e: eprint(str(e), silent=silent)
        except StopIteration: break
Beispiel #13
0
	def __write(self, files, path):
		try:
			dummy = tempfile.NamedTemporaryFile(mode='w+t',
				delete=False)

			files = self.__normalize(files)

			for line in fileinput.input(files):
				dummy.write('%s%s' %
					(fileinput.isfirstline() and '\n' or '', line))

			else:
				try:
					shutil.move(dummy.name, path)

				except IOError as error:
					self.log(error, True)

				else:
					dummy.close()

		except FileNotFoundError:
			self.log('the file %s was not found' %
				fileinput.filename(), True)

		else:
			self.log('the file %s was built' % path)
Beispiel #14
0
def main():
    """Parse stream of requests and insert into MongoDB collection.

    This script will accept input from either stdin or one or more files as
    arguments. Two loggers control logging--one general purpose logger for the
    application and one for logging requests that fail to make it through the
    pipeline. The latter is configured to route different kinds of failures to
    different streams as configured. The failed requests will be logged
    unmodified, as they entered the pipeline, to make later attempts at
    processing easier.

    Failure to send any requests through the pipeline will result in an exit
    status of 1.
    """
    req_buffer = []

    for line in fileinput.input():
        try:
            request = process(line)
        except apachelog.ApacheLogParserError:
            # log unparseable requests
            req_log.error(line.strip(), extra={'err_type': 'REQUEST_ERROR'})
            continue
        except requests.exceptions.RequestException:
            req_log.error(line.strip(), extra={'err_type': 'DSPACE_ERROR'})
            continue
        except Exception, e:
            log.error(e, extra={'inputfile': fileinput.filename(),
                                'inputline': fileinput.filelineno()})
            continue
        if request:
            req_buffer.append(request)
        if len(req_buffer) > 999:
            insert(collection, req_buffer)
            req_buffer = []
Beispiel #15
0
def load_data(filenames, coarse=False):
    """
    Load samples from one or more files where the format is:
    COARSE_CATEGORY:fine_category some sample data blah blah

    This is a custom data loader based on the `load_files` function in this code:
    https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/datasets/base.py
    """
    data = []
    target = []
    fine_target = []
    if coarse:
        data_re = re.compile(r'(\w+):(\w+) (.+)')
    else:
        data_re = re.compile(r'(\w+:\w+) (.+)')

    for line in fileinput.input(filenames):
        d = data_re.match(line)
        if not d:
            raise Exception("Invalid format in file {} at line {}"
                            .format(fileinput.filename(), fileinput.filelineno()))
        if coarse:
            target.append(d.group(1))
            fine_target.append(d.group(2))
            data.append(d.group(3))
        else:
            target.append(d.group(1))
            data.append(d.group(2))

    return Bunch(
        data=numpy.array(data),
        target=numpy.array(target),
        target_names=set(target),
    )
Beispiel #16
0
def t08():
    """利用fileinput实现类似于grep的功能"""
    import re
    pattern = re.compile(r'\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}')
    for line in fileinput.input('data.txt', backup='.bak', inplace=1):
        if pattern.search(line):
            print(fileinput.filename(), fileinput.filelineno(), line)
    def is_update_article(self):
        blog_dir = self.db.get_info('user', self.blog_name, 'dir')
        catedir = blog_dir + '/_posts/' + self.article_categories
        if not os.path.exists(catedir):
            return False
        bakdir = os.getcwd()
        os.chdir(catedir)
        print self.article_title
        for line in fileinput.input(os.listdir(catedir)):
            if line.startswith('title:'):
                #if line.find(self.article_title):
                if self.article_title in line:
                    print '==find the blog and get the file name=='
                    self.article_filename = fileinput.filename()
                    print self.article_filename
                    fileinput.close()
                    os.chdir(bakdir)
                    return True
                else:
                    print '==not find the blog and get the file name=='


        fileinput.close()
        os.chdir(bakdir)
        return False
Beispiel #18
0
    def test_missing_debug_statements(self):
        # Exclude explicit debug statements written in the code
        exclude = {
            'regex.py': [240, 241],
        }

        message = "\nFound a debug missing statement at line %d or file %r: %r"
        filename = None
        file_excluded = []
        files = (
            glob.glob(os.path.join(self.source_dir, '*.py')) +
            glob.glob(os.path.join(self.source_dir, 'validators/*.py'))
        )

        for line in fileinput.input(files):
            if fileinput.isfirstline():
                filename = fileinput.filename()
                file_excluded = exclude.get(os.path.basename(filename), [])
            lineno = fileinput.filelineno()

            if lineno in file_excluded:
                continue

            match = self.missing_debug.search(line)
            self.assertIsNone(match, message % (lineno, filename, match.group(0) if match else None))
Beispiel #19
0
def filtered_events():
    loglines = levels()
    # skip header on first file
    leveltext = loglines.__next__()
    # cycle through chosen events
    while True:
        while not chosen_event_header(leveltext):
            leveltext = loglines.__next__()
        event_lines = []
        # grab wanted events for processing
        level, line = leveltext
        # skip @ sign
        event = line[1:]
        # NOTE: some entries contain spaces in their text, not just between timestamp and text
        # Uses Python 3.3 * syntax for stuffing tuples
        timestamp, *text = event.split()
        event = {'EventName': "".join(text),
                 'Timestamp': timestamp,
                 'Filename' : fileinput.filename(),
                 'lineno'   : fileinput.lineno()}
        # populate it with attributes and members
        leveltext = loglines.__next__()
        # gather raw lines
        # try block handles case of last event, where the iteration is exhausted by the while loop
        #
        try:
            while not event_header(leveltext):
                event_lines.append(leveltext)
                leveltext = loglines.__next__()
        except StopIteration:
            pass
        event.update({'Items': itertools.groupby(event_lines, linelevel)})
        yield event
Beispiel #20
0
def output():
    for event in filtered_events():
        # strip out level and get ready for filtering
        for level, event_items in event['Items']:
            # levels 1 and 2 both contain key-value pairs in the same format 'Key' 'Value'
            #print('Items', [i for l, i in event_items])
            current_group = [parse_line(i) for l, i in event_items]
            # process level 2 before level 1 because...?
            if level == 2:
                # get a (possibly empty) list of wanted member names from a (possibly non-existent) attribute
                # and keep only those members of the event that are on the list
                if not filtered_attributes:
                    # level 2 occurs before level 1 in an event. Something odd about this, but recover
                    filtered_attributes = [{'Name': 'Level2BeforeLevel1',
                                            'Value': (fileinput.filename(), fileinput.filelineno())}]
                parent = filtered_attributes[-1]['Name']
                members_wanted = attribute_list.get(parent)
                # handle members
                if members_wanted:
                    filtered_members = [r for r in current_group
                                        if r['Name'] in members_wanted]
                    if filtered_members:
                        # attach members to previous attribute
                        filtered_attributes[-1]['Members'] = filtered_members
            if level == 1:
                # handle attributes
                # note name of last attribute: this is the parent of any members in the next level 2 (member) group
                parent = current_group[-1]['Name']
                filtered_attributes = [r for r in current_group
                                       if r['Name'] in attribute_list]
        # commit changes to attributes of the current event including members of this attribute
        event.update({'Attributes': filtered_attributes})
        del event['Items']
        yield event
def _main():
    parser = optparse.OptionParser(usage="usage: %prog [options] [<file>...]",
                                   description=__doc__)
    parser.add_option("-d", "--delimiter", dest="delimiter", default='\t',
                      help="delimiter between defline and sequence"
                      " [default TAB]", metavar="STRING")
    parser.add_option("-D", "--defline", dest="defline",
                      choices=('before', 'after', 'omit'), default="after",
                      help="position of defline with respect to sequence, one"
                      " of 'before', 'after' [default], or 'omit'",
                      metavar="POSITION")
    parser.add_option("-i", "--inverse", dest="inverse", action="store_true",
                      help="do the inverse transformation (flat to FASTA)")
    DEFAULT_WRAP = 80
    parser.add_option("-w", "--wrap", dest="wrap", type="int",
                      default=DEFAULT_WRAP,
                      help="for --inverse, wrap sequence to specified width"
                      " [default %s, 0 means don't wrap at all]" % DEFAULT_WRAP,
                      metavar="COLUMNS")
    parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
                      help="be verbose")
    parser.add_option("--copyright", action="store_true", dest="copyright",
                      help="print copyright and exit")
    options, args = parser.parse_args()

    if options.wrap < 0:
        parser.print_help()
        sys.exit(1)

    if not options.inverse:
        if not args:
            files = [ sys.stdin ]
        else:
            files = [ open(fn) for fn in args ]

        for f in files:
            for locusname, defline, sequence in greylag.read_fasta_file(f):
                write_flattened_locus(options, defline, sequence)
    else:
        for line in fileinput.input(args):
            if options.defline != 'omit':
                parts = line.split(options.delimiter, 1)
                if len(parts) < 2:
                    error("input line lacks delimiter")
                if options.defline == 'before':
                    defline, sequence = parts
                else:
                    sequence, defline = parts
            else:
                sequence = line
                defline = "%s:%s" % (fileinput.filename(),
                                     fileinput.filelineno())
            sequence = sequence.strip()
            print defline.strip()
            if options.wrap:
                for start in range(0, len(sequence), options.wrap):
                    print sequence[start:start+options.wrap]
            else:
                print sequence
Beispiel #22
0
def initialstate(fd,line):
     if fd.isfirstline():
         print "Files: %s" % fileinput.filename()
     if(not re.match("^#",line)):
         return "endofcomment"
     if(re.search("^#[\s]*LICENSE",line)):
        return "licensesection"
     return "initialstate"
Beispiel #23
0
def check_tidy(src_dir):
    count_lines = 0
    count_empty_lines = 0

    for (dirpath, dirnames, filenames) in os.walk(src_dir):
        if any(d in dirpath for d in skip_dirs):
            continue

        files = [os.path.join(dirpath, name) for name in filenames
                 if is_interesting(name)]

        if not files:
            continue

        contents = ""
        license_checked = False

        for line in fileinput.input(files):

            if '\t' in line:
                report_error('TAB charactor')
            if '\r' in line:
                report_error('CR charactor')
            if line.endswith(' \n') or line.endswith('\t\n'):
                report_error('trailing whitespace')
            if not line.endswith('\n'):
                report_error('line end without NEW LINE charactor')

            if len(line) -1 > column_limit:
                report_error('line exceeds %d charactors' % column_limit)

            if fileinput.isfirstline():
                contents =""
                license_checked = False

            count_lines += 1
            if not line.strip():
                count_empty_lines += 1

            if len(contents) < 700:
                contents += line
            elif not license_checked:
                if not check_license(contents):
                    report_error_name_line(fileinput.filename(),
                                           1,
                                           'incorrent license')
                license_checked = True

    print
    print "* total line of code: %d" % count_lines
    print ("* total non-black line of code: %d"
           % (count_lines - count_empty_lines))
    print "%s* total errors: %d%s" % (TERM_RED if count_err > 0 else TERM_GREEN,
                                      count_err,
                                      TERM_EMPTY)
    print

    return count_err == 0
Beispiel #24
0
def parse_filter(filter):
    if filter == "$filename":
        return lambda i: [fileinput.filename()]
    elif filter == "$lineno":
        return lambda i: [fileinput.lineno()]
    elif filter == "$filelineno":
        return lambda i: [fileinput.filelineno()]
    else:
        return lambda i: jpath(filter, i)
Beispiel #25
0
def main():
    p = optparse.OptionParser(__doc__)
    p.add_option("-n", dest="n", help="number of qual lines to test default:-1"
                 " means test until end of file or until it it possible to "
                 " determine a single file-type",
                 type='int', default=-1)

    opts, args = p.parse_args()

    if len(args) > 1:
        print("Only a single input file is supported.", file=sys.stderr)
        sys.exit(1)

    gmin = 99
    gmax = 0
    valid = []

    err_exit = False

    input_file = fileinput.input(args, openhook=fileinput.hook_compressed)

    for i, line in enumerate(input_file):
        if i == 0:
            input_filename_for_disp = fileinput.filename()

            if fileinput.isstdin():
                input_filename_for_disp = 'STDIN'

            print("# reading qualities from "
                  "{}".format(input_filename_for_disp), file=sys.stderr)

        lmin, lmax = get_qual_range(line.rstrip())

        if lmin < gmin or lmax > gmax:
            gmin, gmax = min(lmin, gmin), max(lmax, gmax)
            valid = get_encodings_in_range(gmin, gmax)

            if len(valid) == 0:
                print("no encodings for range: "
                      "{}".format((gmin, gmax)), file=sys.stderr)
                err_exit = True
                break

            if len(valid) == 1 and opts.n == -1:
                # parsed entire file and found unique guess
                break

        if opts.n > 0 and i > opts.n:
            # parsed up to specified portion; return current guess(es)
            break

    input_file.close()

    if err_exit:
        sys.exit(1)
    else:
        print("{}\t{}\t{}".format(",".join(valid), gmin, gmax))
def get_data():
    files = glob.glob('/sys/class/net/*/statistics/*')

    metrics = {}
    for line in fileinput.input(files):
        metric_name = fileinput.filename().replace(
            '/sys/class/net/', '').replace('/statistics/', '_')
        metrics[metric_name] = line.strip()
    return metrics
 def __grep_substring(self):
     try:
         for line in input(self.file_names):
             if re.search(self.regex,line,re.I):
               print("{}: {} : {} ".format(filename(), filelineno(), line))
     except Exception as e:
         print(e)
     finally:
         pass
Beispiel #28
0
def cloth():
  
  #arrays to hold raw input and size/value results 
  input = []
  dims = []

  #recursive function
  def maxVal(X, Y):
    for i in range(1, X):
      for j in range(1, Y):
        if(maxp[i][j] != -1):
          return maxp[i][j]
        else:
          a = 0
          b = 0
          for x in range(0, i):
            if(maxp[x][j]+maxp[i-x][j] > a):
              a = maxp[x][j]+maxp[i-x][j]

          for y in range(0, j):
            if(maxp[i][j-y]+maxp[i][y] > b):
              b = maxp[i][j-y]+maxp[i][y]

          maxp[i][j] = getMax(a, b, price[i][j])
          return maxVal(i-1,j-1)
  
  #Read the txt file line by line
  for line in fileinput.input():
    line = line.split() 
    input.append(line)  

  print "Reading", fileinput.filename()
  
  #set values based on first 3 inputs from the file
  X = int(input[0][0])
  Y = int(input[0][1])
  n = int(input[1][0])
  
  #X*Y for price of possible pieces and max prices
  price = [[-1]*Y for _ in range(X)]
  maxp = [[-1]*Y for _ in range(X)] 
  maxp[0][0] = 0

  #fill the dimension array from the input array
  for i in range(2, len(input)-1):
    dims.append(input[i])
  
  #fill prices from the dimension array
  for i in range(0, n-1):
    if(price[int(dims[i][0])][int(dims[i][1])] < int(dims[i][2])):
      price[int(dims[i][0])][int(dims[i][1])] = int(dims[i][2]) 
  
  #call the recursive max value function based on the size of the cloth
  maxVal(X-1, Y-1)
  
  print "Maximum Return: ", maxp[X-1][Y-1], '\n'
Beispiel #29
0
 def print_error(self, error, line,
                 filename=None, filelineno=None):
     if self.should_ignore(error):
         return
     if not filename:
         filename = fileinput.filename()
     if not filelineno:
         filelineno = fileinput.filelineno()
     self.ERRORS = self.ERRORS + 1
     self.log_error(error, line, filename, filelineno)
Beispiel #30
0
def main():
    """Read in files and display them on a pyplot window"""
    plt.figure(1)

    lines = []
    filename = None
    for line in fileinput.input():
        if not filename:
            filename = fileinput.filename()
        if fileinput.isfirstline() and len(lines):
            draw_roc_curve(filename, lines, as_string=True)
            filename = fileinput.filename()
            lines = []
        lines.append(line)

    draw_roc_curve(fileinput.filename(), lines)

    add_roc_labels
    plt.show()
Beispiel #31
0
    def initialize(self, change=None):
        """
        The Builder's main method. It stores all the changes that needs to be made
        in `self.details` for a file. Which would then be used to add Docstrings to.
        """
        result = dict()

        patches = []
        if change:
            patches = change.get("additions")

        fileLines = list(fileinput.input(self.filename))
        i = 0

        for line in fileinput.input(self.filename):
            filename = fileinput.filename()
            lineno = fileinput.lineno()
            keywords = self.config.get("keywords")
            foundList = [
                word.lstrip() for word in line.split(" ")
                if word.lstrip() in keywords
            ]
            found = len(foundList) > 0 and not is_comment(
                line, self.config.get('comments'))
            # Checking an unusual format in method declaration
            if foundList:
                openP = line.count("(")
                closeP = line.count(")")
                if openP == closeP:
                    pass
                else:
                    pos = i
                    while openP != closeP:
                        pos += 1
                        line += fileLines[pos]
                        openP = line.count("(")
                        closeP = line.count(")")
                    lineno = pos + 1
            i = i + 1

            if change and found:
                found = self._is_line_part_of_patches(lineno, line, patches)

            if not self.details.get(filename):
                self.details[filename] = dict()

            if found:
                length = get_file_lines(filename)
                result = self.extract_and_set_information(
                    filename, lineno, line, length)
                if self.validate(result):
                    self.details[filename][result.name] = result
Beispiel #32
0
def read_data():
    start = time.time()
    data = {}
    files = glob.glob(
        '/media/nickyz/Data/scriptie_data/armin/20170512_[5].csv')
    print(files)
    files.reverse()

    for line in fileinput.input(files=files):
        if fileinput.isfirstline():
            print(time.time() - start)
            print(fileinput.filename())
            start = time.time()
            continue

        splitted = line.split('\t')
        values = json.loads(splitted[0])

        try:
            sourceMac = values['sourceMac']
        except KeyError:
            sourceMac = values['sourcemac']

        try:
            localMac = int(values['localMac'])
        except KeyError:
            localMac = int(values['localmac'])

        signal = int(values['signal'])

        if localMac:
            continue

        try:
            droneId = values['droneId']
        except KeyError:
            droneId = values['droneid']

        timestamp = splitted[2]
        orig_time = datetime.datetime.fromtimestamp(int(
            timestamp[0:-3])).strftime('%Y-%m-%d %H:%M:%S')
        timestamp = datetime.datetime.fromtimestamp(int(
            timestamp[0:-3])).strftime('%Y-%m-%d %H:%M')

        try:
            data[sourceMac].append([timestamp, droneId, signal, orig_time])
        except KeyError:
            data[sourceMac] = [[timestamp, droneId, signal, orig_time]]

    print(time.time() - start)

    return data
Beispiel #33
0
def readRunCmds():
  try:
    for line in fileinput.input():
      z=subprocess.check_output(line, stderr=subprocess.STDOUT, shell=True)
      print(z)
  except subprocess.CalledProcessError as details:
    print 'In file: {0}, line: {1}, errno: {2}, {3}'.format(
           fileinput.filename(),
           fileinput.lineno(), 
           details.returncode,
           str(details.output).replace('\n',''))
  finally:
    fileinput.close()
Beispiel #34
0
 def check_line(l, s, f, t, done):
     """
     @param l: the line
     @param s: the string that denotes this line is supposed to change
     @param f: from port
     @param t: to port
     @return: the line, changed if needed
     """
     if l.find(s) >= 0 and line_has_port(l, f):
         print l.replace(f, t),
         done.add(fileinput.filename())
         return True
     return False
Beispiel #35
0
 def grepHIDD(self, file):
     global HIDDs
     for line in fileinput.input(file):
         #print line
         m = HIDD_re.search(line)
         #print m
         if m:
             ref = "%s:%s:%s" % (fileinput.filename(), fileinput.lineno(),
                                 m.span()[0])
             hidd = unicode(m.groups()[0])
             if self.verbose > 1:
                 print "%s %s" % (ref, hidd)
             HIDDs.setdefault(hidd, []).append(ref)
Beispiel #36
0
def input_stream(files=()):
    """ Handles input files similar to fileinput.
    The advantage of this function is it recovers from errors if one
    file is invalid and proceed with the next file
    """
    fileinput.close()
    try:
        if not files:
            for line in fileinput.input(files):
                yield line, '', fileinput.filelineno()

        else:
            while files:
                thefile = files.pop(0)
                try:
                    for line in fileinput.input(thefile):
                        yield line, fileinput.filename(), fileinput.filelineno(
                        )
                except IOError as e:
                    yield None, fileinput.filename(), e
    finally:
        fileinput.close()
    def test_missing_debug_statements(self):
        message = "\nFound a debug missing statement at line %d of file %r: %r"
        filename = None
        for line in fileinput.input(
                glob.glob(os.path.join(self.source_dir, '*.py'))):
            if fileinput.isfirstline():
                filename = os.path.basename(fileinput.filename())
            lineno = fileinput.filelineno()

            match = self.missing_debug.search(line)
            self.assertIsNone(
                match, message %
                (lineno, filename, match.group(0) if match else None))
Beispiel #38
0
def file_input_moudle():
    try:
        for line in fli.input([
                './testfile/testfile_1', './testfile/testfile_2',
                './testfile/testfile_3'
        ]):
            print('{}文件内容 #{} {}'.format(fli.filename(), fli.filelineno(),
                                         line.rstrip()))
    except Exception as e:
        print(e)
    finally:
        # print(fli.filename())
        fli.close()
Beispiel #39
0
def exit_with_error(message, line):
    fn = fileinput.filename()
    ln = fileinput.lineno()
    print(
        'File "{fn}", line {ln}\n'
        "  {line}\n\n"
        "Error: {message}".format(message=message,
                                  line=line.strip(),
                                  fn=fn,
                                  ln=ln),
        file=sys.stderr,
    )
    sys.exit(1)
Beispiel #40
0
 def test_state_is_not_None(self):
     """Tests fileinput.filename() when fileinput._state is not None.
        Ensure that it invokes fileinput._state.filename() exactly once,
        returns whatever it returns, and does not modify fileinput._state
        to point to a different object."""
     filename_retval = object()
     instance = MockFileInput()
     instance.return_values["filename"] = filename_retval
     fileinput._state = instance
     retval = fileinput.filename()
     self.assertExactlyOneInvocation(instance, "filename")
     self.assertIs(retval, filename_retval)
     self.assertIs(fileinput._state, instance)
Beispiel #41
0
def main():
    args = get_args()
    if args.version:
        sys.stdout.write('filter_sam version {}\n'.format(__version__))
        sys.exit(0)

    if args.output and args.output != '-':
        sys.stdout = open(args.output, 'w')
    if args.verbose:
        sys.stderr.write('Opened {} for output\n'.format(sys.stdout.name))
    if args.command == 'passtru':
        args.keep_filtered = False
    if args.keep_filtered:
        kept_file = open(args.file_kept, 'w')
        if args.verbose:
            sys.stderr.write('Opened {} for keeping filtered alignments\n'.format(args.file_kept))
    else:
        kept_file = None
    rrna_sizes = {}
    if args.command == 'rrna':
        if not args.size_file:
            sys.stderr.write("Can't filter rRNA without the reference size information.")
            sys.exit(1)
        else:
            with open(args.size_file) as sizef:
                for line in sizef:
                    parsed = line.strip().split('\t')
                    rrna_sizes[parsed[0]] = int(parsed[1])
            if args.verbose:
                sys.stderr.write('Extracted length for {} references from {}\n'.format(len(rrna_sizes), args.size_file))

    with utils.measureTime('Finished filtering {}'.format(','.join(args.files))):
        samfiles = fileinput.input(args.files)
        cur_reads = None
        while True:
            try:
                line = next(samfiles)
            except StopIteration:
                if args.command == 'filter' and cur_reads:
                    sys.stdout.write(cur_reads['lines'])
                break
            if line[0] == '@':
                if not cur_reads:
                    sys.stdout.write(line)
                if fileinput.isfirstline():
                    sys.stderr.write("Processing input file: '{}'\n".format(fileinput.filename()))
                continue
            else:
                cur_reads = process_cur_reads(cur_reads, line, args.command,
                                              args.keep_filtered, kept_file, rrna_sizes)
Beispiel #42
0
def main(bus, process, name_for_stdin, results_filter, files):
    if name_for_stdin is None and '-' in files:
        return False
    for line in fileinput.input(files):
        parsed_line = line.strip().split(';')
        if parsed_line[-1] not in results_filter:
            continue
        process(
            bus, name_for_stdin
            if fileinput.isstdin() else os.path.basename(fileinput.filename()),
            parsed_line[0], parsed_line[1], parsed_line[2],
            [parsed_line[i:i + 2] for i in range(3,
                                                 len(parsed_line) - 1, 2)])
    return True
Beispiel #43
0
def main(args):
    ap = argparse.ArgumentParser()
    ap.add_argument('pattern', help='the pattern to match')
    ap.add_argument('files', nargs='*', help='files to be searched')
    ap.add_argument('-i',
                    '--ignore-case',
                    action='store_true',
                    help='ignore case while searching')
    ap.add_argument('-v',
                    '--invert',
                    action='store_true',
                    help='invert the search result')
    ns = ap.parse_args(args)

    flags = 0
    if ns.ignore_case:
        flags |= re.IGNORECASE

    pattern = re.compile(ns.pattern, flags=flags)

    if ns.invert:

        def fn_predicate(test):
            return not test
    else:

        def fn_predicate(test):
            return test

    # Do not try to grep directories
    files = [f for f in ns.files if not os.path.isdir(f)]

    fileinput.close()  # in case it is not closed
    try:
        for line in fileinput.input(files):
            if fn_predicate(pattern.search(line)):
                if fileinput.isstdin():
                    fmt = '{lineno}: {line}'
                else:
                    fmt = '{filename}: {lineno}: {line}'

                print(
                    fmt.format(filename=fileinput.filename(),
                               lineno=fileinput.filelineno(),
                               line=line.rstrip()))
    except Exception as err:
        print("grep: {}: {!s}".format(type(err).__name__, err),
              file=sys.stderr)
    finally:
        fileinput.close()
Beispiel #44
0
def main():
    if len(sys.argv) > 1 and sys.argv[1] == "+mono":
        del sys.argv[1]
        monomorphize = True
    else:
        monomorphize = False

    if len(sys.argv) > 1 and sys.argv[1] == "+iodelay":
        del sys.argv[1]
        iodelay = True
    else:
        iodelay = False

    if len(sys.argv) > 1 and sys.argv[1] == "+diag":
        del sys.argv[1]

        def process_diagnostic(diag):
            print("\n".join(diag.render(only_line=True)))
            if diag.level == "fatal":
                exit()
    else:

        def process_diagnostic(diag):
            print("\n".join(diag.render()))
            if diag.level in ("fatal", "error"):
                exit(1)

    engine = diagnostic.Engine()
    engine.process = process_diagnostic

    buf = source.Buffer("".join(fileinput.input()).expandtabs(),
                        os.path.basename(fileinput.filename()))
    parsed, comments = parse_buffer(buf, engine=engine)
    typed = ASTTypedRewriter(engine=engine,
                             prelude=prelude.globals()).visit(parsed)
    Inferencer(engine=engine).visit(typed)
    ConstnessValidator(engine=engine).visit(typed)
    if monomorphize:
        CastMonomorphizer(engine=engine).visit(typed)
        IntMonomorphizer(engine=engine).visit(typed)
        Inferencer(engine=engine).visit(typed)
    if iodelay:
        IODelayEstimator(engine=engine, ref_period=1e6).visit_fixpoint(typed)

    printer = Printer(buf)
    printer.visit(typed)
    for comment in comments:
        if comment.text.find("CHECK") >= 0:
            printer.rewriter.remove(comment.loc)
    print(printer.rewrite().source)
Beispiel #45
0
def jsonlint():
    """ lint all files on command line or read from stdin """
    filename = ''
    contents = ''
    failures = 0
    for line in fileinput.input():
        if fileinput.isfirstline() and contents != '':
            failures += process(filename, contents)
            contents = ''
        filename = fileinput.filename()
        contents += line
    if contents != '':
        failures += process(filename, contents)
    return failures
def parse_csv(file, options):
    """Parse handcount data from Boulder spreadsheet into clean tab-separated-values
    Model is the "CountEntry" spreadsheet produced in the
    2010 Boulder audits, converted to a set of csv files by ssconvert.py
    Confused a bit by the fact that some headers are above
    the field they label (Audit Batch ID and Option...)
    and some to the left of it (Contest)

    Skip or ignore the "Entry Master" sheet.

    "Contest:","COUNTY ISSUE 1B [Countywide Open Space Sales and Use Tax Increase and Bond Authorization] Vote For 1",,,,,,,,,,
    ....
    "Audit Batch ID","System ID","Batch Number",,"Date","Count #",,,,,,
    "p300_mb_451",,,,,1,,,,,,
    ....
    "Option","Total 1","Total 2","Total 3","Total 4","Total 5","Total 6","Total 7","Total 8","MVW Count","Machine Count","Diff"
    "YES",59,24,38,17,,,,,138,138,0
    ....
    """

    #election = options.election

    parseCounts = False
    parseBatch = False
    contest = "_unknown_"
    batch = "_unknown_"

    for line in fileinput.input(file):
        fields = line.strip().split(',')

        if fields[0] == '"Contest:"':
            contest = fields[1]
        elif fields[0] == '"Option"':
            parseCounts = True
        elif fields[0] == '"Group Totals"':
            parseCounts = False
        elif fields[0] == '"Audit Batch ID"':
            parseBatch = True
            continue

        if parseBatch:
            batch = fields[0]
            parseBatch = False
            continue

        if parseCounts:
            # print diff, machine count, choice, batch, contest, filename
            print "%s\t%s\t%s\t%s\t%s\t%s" % (fields[11], fields[10],
                                              fields[0], batch, contest,
                                              fileinput.filename())
Beispiel #47
0
    def check(self, files):
        for line in fileinput.input(files):
            for i, rule in enumerate(self.rules):
                mc = rule.search(line)
                if mc:
                    self.report_error(self.err_msgs[i])

            if fileinput.isfirstline():
                if not CheckLicenser.check(fileinput.filename()):
                    self.report_error('incorrect license')

            self.count_lines += 1
            if not line.strip():
                self.count_empty_lines += 1
Beispiel #48
0
def TCfilter(files, cutoff):
    allFiles = fileinput.input(files)
    for line in allFiles:
        if line[0:3] == "TC ":
            TC = line.split()[1]
            if TC == "------":
                fileinput.nextfile()
            else:
                TC = float(TC)
            if TC < cutoff:
                shutil.copy(fileinput.filename(), out)
            else:
                fileinput.nextfile()
    fileinput.close()
Beispiel #49
0
def findValuesWithoutUnits(line,ln):
    numList = re.findall(r"\d+?[\s,.]?\d+[\s\"]", line)
    errindices = []
    for match in re.finditer(r"\d+?[\s,.]?\d+[\s\"]", line):
        errindices.append(match.start())
    l = len(numList)
    if l > 0:
        if 'MaterialFraction' in line:
            return l
        if '<?xml' in line:
            return l
        text = fileinput.filename()+': line# '+str(ln)+' warning: numerical value without units: '
        print(text)
        errorPrint(line,errindices)
    return l
def main():
    plt.figure(1)

    lines = []
    filename = None
    for line in fileinput.input():
        if not filename:
            filename = fileinput.filename()
        if fileinput.isfirstline() and len(lines):
            draw_roc_curve(filename, lines)
            filename = fileinput.filename()
            lines = []
        lines.append(line)

    draw_roc_curve(fileinput.filename(), lines)

    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('ROC Curves')
    plt.legend(loc='best')
    plt.xlim([0, 1])
    plt.ylim([0, 1])
    plt.grid()
    plt.show()
Beispiel #51
0
def find_occurences(main_file):

    occurences = []
    tex_files = [main_file]
    if len(tex_files) == 0:
        return
    for line in fileinput.input(tex_files):
        if line.find("usepackage") != -1:
            occ = {}
            occ['filename'] = fileinput.filename()
            occ['line_no'] = fileinput.filelineno()
            occ['string'] = line
            occurences.append(occ)

    return occurences
Beispiel #52
0
def bed_to_gff(args, source, field_type):
    """
    Convert input filename from BED to GFF.
    """
    if field_type is None:
        field_type = "exon"
    for line in fileinput.input(args):
        line = line.strip()
        if source is None:
            if fileinput.isstdin():
                source = "stdin"
            else:
                source = os.path.basename(fileinput.filename())
        output_line = bed_to_gff_line(line, source, field_type)
        sys.stdout.write("%s\n" %(output_line))
Beispiel #53
0
def main(args):
    global _stash
    ap = argparse.ArgumentParser()
    ap.add_argument('pattern', help='the pattern to match')
    ap.add_argument('files', nargs='*', help='files to be searched')
    ap.add_argument('-i',
                    '--ignore-case',
                    action='store_true',
                    help='ignore case while searching')
    ap.add_argument('-v',
                    '--invert',
                    action='store_true',
                    help='invert the search result')
    ns = ap.parse_args(args)

    flags = 0
    if ns.ignore_case:
        flags |= re.IGNORECASE

    pattern = re.compile(ns.pattern, flags=flags)

    # Do not try to grep directories
    files = [f for f in ns.files if not os.path.isdir(f)]

    fileinput.close()  # in case it is not closed
    try:
        for line in fileinput.input(files):
            if bool(pattern.search(line)) != ns.invert:
                if ns.invert:  # optimize: if ns.invert, then no match, so no highlight color needed
                    newline = line
                else:
                    newline = re.sub(
                        pattern, lambda m: _stash.text_color(m.group(), 'red'),
                        line)
                if fileinput.isstdin():
                    fmt = u'{lineno}: {line}'
                else:
                    fmt = u'{filename}: {lineno}: {line}'

                print(
                    fmt.format(filename=fileinput.filename(),
                               lineno=fileinput.filelineno(),
                               line=newline.rstrip()))
    except Exception as err:
        print("grep: {}: {!s}".format(type(err).__name__, err),
              file=sys.stderr)
    finally:
        fileinput.close()
Beispiel #54
0
def join_files(_filenames, outfilename, first_headers_only=True,
               headers_count=0):
    """
    function joins many text files
    """
    if len(_filenames) > 0 and outfilename:
        create_dir(fs.dirname(outfilename))
        with open(outfilename, 'w') as fout:
            for line in fileinput.input(_filenames):
                if first_headers_only \
                    and fileinput.filelineno() <= headers_count \
                    and not _filenames[0] == fileinput.filename():
                    continue
                fout.write(line)
        return True
    return False
Beispiel #55
0
    def print_error(self, error, line='', filename=None, filelineno=None):
        if self.should_ignore(error):
            return

        warn = self.should_warn(error)

        if not filename:
            filename = fileinput.filename()
        if not filelineno:
            filelineno = fileinput.filelineno()
        if warn:
            self.warning_count = self.warning_count + 1
        else:
            self.error_count = self.error_count + 1

        self.log_error(error, line, filename, filelineno, warn)
Beispiel #56
0
def clean_clutter_in(files, tabsize=8):
  if not files: return
  n_empty = 0
  for line in input([ f for f in files if not os.path.isdir(f) ], inplace=1):
    if (isfirstline()):
      if (not isstdin()):
        print >> sys.__stdout__, filename() + ':'
      n_empty = 0
    clean_line = line.expandtabs(tabsize).rstrip()
    if (len(clean_line) == 0):
      n_empty += 1
    else:
      for i in xrange(n_empty): sys.stdout.write("\n")
      n_empty = 0
      sys.stdout.write(clean_line)
      sys.stdout.write("\n")
Beispiel #57
0
    def parse(self):
        skip = -1
        skipping = None
        title = None
        brand = ''
        ingredients = None
        calories = None
        filename = ''

        for line in fileinput.input():
            if fileinput.isfirstline():
                if title:
                    self.result.append({
                        'name':
                        self.decode(title),
                        'url':
                        urllib.parse.unquote(self.name2url(filename)),
                        'brand':
                        self.decode(brand),
                        'calories':
                        calories,
                        'ingredients':
                        ingredients,
                    })
                title = None
                brand = ''
                ingredients = None
                filename = fileinput.filename()
            if title is None and '<title>' in line:
                title = line.split('>')[1].split(',')[0]
            if not brand and 'brand:' in line:
                brand = line.split("'")[1]
            if skip > 0:
                skip -= 1
            elif skip == 0:
                skip = -1
                if skipping == 'nutri':
                    ingredients = self.getIngredients(self.decode(line))
                elif skipping == 'caloric':
                    calories = self.getCalories(self.decode(line))
                    fileinput.nextfile()
            elif NUTRI_INFO in line:
                skip = 1
                skipping = 'nutri'
            elif CALORIC in line:
                skip = 1
                skipping = 'caloric'
Beispiel #58
0
def edit_file(*args, **kwargs):
    """

    :param args:
    :param kwargs:
    :return None:
    """
    into_tag = False
    tag_lines_count = 0
    tag_pattern = re.compile(r'(:?' + re.escape(opened_tag) + r'.*?' +
                             re.escape(closed_tag) + r')',
                             flags=re.U)
    for _line in fileinput.input(
            inplace=True,
            mode='rb',
    ):
        line = str(_line, encoding='utf-8')
        result = ''
        inner_list = tag_pattern.split(line)
        for tag in inner_list:
            o_tag_index = tag.find(opened_tag)
            c_tag_index = tag.find(closed_tag)
            # 1. line like '<TAG>%</TAG>'
            if o_tag_index >= 0 and c_tag_index >= 0:
                result += get_tag_text(tag, o_tag_index, c_tag_index)
                continue
            # 2. line like '%<TAG>%'
            elif o_tag_index >= 0 > c_tag_index:
                into_tag = True
            if into_tag:
                tag_lines_count += 1
                if tag_lines_count > TOO_MUCH_INTO_TAG:
                    raise IOError('TOO MUCH INTO TAG: {}:{}'.format(
                        fileinput.filename(),
                        fileinput.filelineno(),
                    ))
                # 3. line like '%</TAG>%'
                if c_tag_index >= 0:
                    into_tag = False
                    tag_lines_count = 0
                result += get_tag_text(tag, o_tag_index, c_tag_index)
            # 4. line like '%'
            else:
                result += tag
        sys.stdout.buffer.write(result.encode())
    fileinput.close()
    return None
Beispiel #59
0
def common():
    common_parser = argparse.ArgumentParser()
    common_parser.add_argument('-r',
                               '--regex',
                               metavar='REGEX',
                               required=True,
                               help='regular expression')
    common_parser.add_argument('-f',
                               '--files',
                               metavar='FILE',
                               required=True,
                               help="files to search in, STDIN is "
                               "used if not file specified")
    common_args = common_parser.parse_args()
    for line in fileinput.input(common_args.files):
        line = re.search(common_args.regex, line)
        print(fileinput.filename(), fileinput.filelineno(), line)
Beispiel #60
0
    def find_all_importing_modules(self):
        self.modules_found_infiles = set()
        file_list = self.python_files_found_dict
        # 1. find all import strings in all files
        # 2. parse all module names in them
        openhook = fileinput.hook_encoded(encoding="utf8", errors=None)
        for line in fileinput.input(files=file_list,
                                    mode="r",
                                    openhook=openhook):
            # print(f"[descriptor={fileinput.fileno():2}]\tfile=[{fileinput.filename()}]\tline=[{fileinput.filelineno()}]\t[{line}]")
            modules_found_inline = self._find_modulenames_set(line)
            self.python_files_found_dict[fileinput.filename()].update(
                modules_found_inline)
            self.modules_found_infiles.update(modules_found_inline)

        self.count_found_modules = len(self.modules_found_infiles)
        return