def replace_line(path: str, match: Callable[[str], bool], replacement: str): for line in fileinput(path, inplace=True): if match(line): end = '' if line.endswith('\r\n'): end = '\r\n' elif line.endswith('\n'): end = '\n' if replacement.endswith(end): end = '' print(replacement, end=end) else: print(line, end='')
def main(): from argparse import ArgumentParser from fileinput import input as fileinput arg_parser = ArgumentParser() arg_parser.add_argument( "-e", dest="expression", help="starting expression; if omitted, first defined term is used") arg_parser.add_argument("-g", dest="grammar", help="EBNF grammar file") arg_parser.add_argument("-v", dest="verbose", default=False, action="store_true", help="show what the parser is doing") arg_parser.add_argument("file", default="-", nargs="?", help="text file to be parsed") args = arg_parser.parse_args() if args.grammar: grammar = "" with open(args.grammar, "r") as fd: grammar = fd.read() parser = create_parser(grammar) if parser is None: print("error: grammar file cannot be parsed") exit(1) if args.expression: if args.expression not in parser.custom_defs: print("error: specified expression not defined") exit(1) term = args.expression else: term = PEGParser(EBNF_DEFS).parse( grammar, "Syntax").first_descendant("Definition/Identifier").match else: parser = PEGParser(EBNF_DEFS) term = "Syntax" parser.debug = args.verbose contents = "".join(fileinput(files=args.file)) ast, chars_parsed = parser.partial_parse(contents, term) length = len(contents) if not ast or chars_parsed != length: print("failed: only parsed {} of {} characters\n".format( chars_parsed, length)) exit(1) else: ast.pretty_print()
def prepare_mcpb_input(structures, software_version='g09', cut_off=2.8): """ Get all the substructure files and prepare the MCPB.py input """ template = dedent(""" original_pdb master.pdb group_name {name} cut_off {cut_off} ion_ids {metal_id} ion_mol2files {metal_mol2} naa_mol2files {residues_mol2} frcmod_files {residues_frcmod} large_opt 1 software_version {software_version} """) # First collect all files in the same master PDB pdbfiles = [ s['pdb'] for s in structures['metals'] + structures['residues'] ] if 'pdb' in structures['protein']: pdbfiles.append(structures['protein']['pdb']) with open('master.unfixed.pdb', 'w') as f: for line in fileinput(pdbfiles): f.write(line) # Fix residue numbering issues pdb4amber.run(arg_pdbin='master.unfixed.pdb', arg_pdbout='master.pdb') name = os.path.basename(os.getcwd()) with open('mcbp.in', 'w') as f: f.write( template.format( name=name, metal_id=' '.join( map(str, range(1, len(structures['metals']) + 1))), metal_mol2=' '.join([s['mol2'] for s in structures['metals']]), residues_mol2=' '.join( [r['mol2'] for r in structures['residues']]), residues_frcmod=' '.join( [r['frcmod'] for r in structures['residues']]), cut_off=cut_off, software_version=software_version, )) return 'mcbp.in'
def handle(self, *args, **options): count = 0 UserModel = get_user_model() for line in fileinput(args, openhook=hook_encoded("utf-8")): try: username, password = line.rstrip("\r\n").split(":", 1) except ValueError: raise CommandError( "Invalid input provided. " "Format is 'username:password', one per line.") try: user = UserModel._default_manager.get( **{UserModel.USERNAME_FIELD: username}) except UserModel.DoesNotExist: raise CommandError("User '%s' does not exist." % username) user.set_password(password) user.save() count += 1 return "%d password(s) successfully changed." % count
def main(): from argparse import ArgumentParser from fileinput import input as fileinput arg_parser = ArgumentParser() arg_parser.add_argument('-e', dest='expression', help='starting expression; if omitted, first defined term is used') arg_parser.add_argument('-g', dest='grammar', default=EBNF_GRAMMAR, help='EBNF grammar file') arg_parser.add_argument('-v', dest='verbose', default=False, action='store_true', help='show what the parser is doing') arg_parser.add_argument('file', default='-', nargs='?', help='text file to be parsed') args = arg_parser.parse_args() grammar = '' with open(args.grammar, 'r') as fd: grammar = fd.read() parser = create_parser(grammar) if args.expression: term = args.expression else: term = PEGParser(EBNF_DEFS).parse(grammar, 'Syntax').first_descendant('Definition/Identifier').match parser.debug = args.verbose contents = ''.join(fileinput(files=args.file)) parser.parse(contents, term).pretty_print()
def main(): with fileinput() as fs: data = list( set(r[4].replace('AND ', '').replace('OF ', '') for r in reader(fs))) vec = CountVectorizer(tokenizer=word_tokenize).fit_transform(data) svd = TruncatedSVD().fit_transform(vec) fig, axes = plt.subplots(2, 3) fig.subplots_adjust(wspace=1) for i, ax in enumerate(np.array(axes).flatten()): clustering = SpectralClustering(n_clusters=N_CLUSTERS - i).fit_predict(svd) labeled = np.append(svd, clustering.reshape(len(data), 1), axis=1) clusters = defaultdict(set) for c, r in zip(clustering, data): clusters[c].add(r) pprint(clusters) for c, titles in clusters.items(): # others = set(data) - titles wc = wordcount(titles) t = labeled[labeled[:, 2] == c] ax.scatter(t[:, 0], t[:, 1], c=COLORS[c], label=', '.join(w for w, _ in wc.most_common(3))) ax.set_title(f'{N_CLUSTERS - i} Clusters') ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) # ax.legend(loc='lower center', bbox_to_anchor=(0.0, -0.3)) # plt.title('Clustering of %d Job Titles' % len(data)) # plt.show() plt.savefig('clustering.png')
def main(): from argparse import ArgumentParser from fileinput import input as fileinput arg_parser = ArgumentParser() arg_parser.add_argument("-e", dest="expression", help="starting expression; if omitted, first defined term is used") arg_parser.add_argument("-g", dest="grammar", help="EBNF grammar file") arg_parser.add_argument("-v", dest="verbose", default=False, action="store_true", help="show what the parser is doing") arg_parser.add_argument("file", default="-", nargs="?", help="text file to be parsed") args = arg_parser.parse_args() if args.grammar: grammar = "" with open(args.grammar, "r") as fd: grammar = fd.read() parser = create_parser(grammar) if parser is None: print("error: grammar file cannot be parsed") exit(1) if args.expression: if args.expression not in parser.custom_defs: print("error: specified expression not defined") exit(1) term = args.expression else: term = PEGParser(EBNF_DEFS).parse(grammar, "Syntax").first_descendant("Definition/Identifier").match else: parser = PEGParser(EBNF_DEFS) term = "Syntax" parser.debug = args.verbose contents = "".join(fileinput(files=args.file)) ast, chars_parsed = parser.partial_parse(contents, term) length = len(contents) if not ast or chars_parsed != length: print("failed: only parsed {} of {} characters\n".format(chars_parsed, length)) exit(1) else: ast.pretty_print()
def main(): l = [] for line in fileinput(): l += line.strip(), print(mergeex(l))
# Bozo may be set to 1 if the feed has an error (but is still parsable). Since I don't own these feeds, there's no need to report this. if d['bozo'] == 1: if (isinstance(d['bozo_exception'], URLError) # Network error or isinstance(d['bozo_exception'], SAXException)): # XML Parsing error print(f'URLError while parsing feed: {feed_url}') print_exception(None, d['bozo_exception'], None, chain=False) return [ ] # These two errors are indicative of a critical parse failure, so there's no value in continuing. if d['status'] == 304: # etag / modified indicates no new data return [] if d['status'] == 301: print(f'Feed {feed_url} has permanently moved to {d.href}') for line in fileinput('feed_list.txt', inplace=True): print(line.replace(feed_url, d.href), end='') cache[d.href] = cache[feed_url] del cache[feed_url] feed_url = d.href if d['status'] == 410: print(f'Feed {feed_url} has been permanently deleted') for line in fileinput('feed_list.txt', inplace=True): if feed_url not in line: print(line, end='') continue print('# (Permanently deleted)') print('# ' + line, end='') return []
/ (judgements['true positive'] + judgements['false positive'] + judgements['false negative'] ) ) accuracy = ((judgements['true positive'] + judgements['true negative']) / sum(judgements.values()) ) quasi_f = 2*((precision*recall)/(precision+recall)) coverage = ((judgements['true positive'] + judgements['incorrectly split']) /(judgements['true positive'] + judgements['incorrectly split'] + judgements['false negative'])) return precision, recall, accuracy, quasi_f, coverage, error_analysis if __name__ == '__main__': if version_info < (3, 5): print("Error: Python >=3.5 required.", file=sys.stderr) exit(1) args = docopt.docopt(__doc__) spl = Splitter(language=args['--lang'], verbose=args['--verbose'], args=args) if args['--evaluate']: # Fix rounding to make 2.345 mean 2.35 L = lambda x: int(round(x+0.001, 2)*100) p, r, a, f, c, E = spl.evaluate(args['<file>'][0]) print(".{} .{} .{} .{} .{}".format(*map(L, (p, r, a, f, c))), E) else: for line in fileinput(args['<file>']): if not line.strip(): break print(line.strip(), spl.split(line.strip(), output="eval"), sep="\t")
def main(): l = () for line in fileinput(): l += line.strip(), print mergeex(l)
from fileinput import fileinput from derivation.first import first_set from derivation.follow import follow_set from derivation.select import select_set from derivation.anaslysis import analysis_table fistTable = {} followTable = {} formulas = fileinput("in.txt") for c in formulas.non_terminator(): a = first_set(c, formulas, fistTable) for c in formulas.non_terminator(): a = follow_set(c, formulas, followTable, fistTable) selectTable = select_set(formulas, followTable, fistTable) terminators = formulas.terminators() analysisTable = analysis_table(terminators, selectTable) def analysis(s, analysisTable): # todo 使用状态机的状态迁移简化过程 print("步骤\t分析栈\t剩余输入串\t所用产生式") stack = ['#', 'E'] step = 0 while True: if stack[-1] == s[0]: print(step + 1, end="\t")