def entry_point(): """ Application starting point which parses command line. Can be used from other modules too. """ import plac plac.call(main)
def test_kwargs(): def main(opt, arg1, *args, **kw): print(opt, arg1) return args, kw main.__annotations__ = dict(opt=('Option', 'option')) argskw = plac.call(main, ['arg1', 'arg2', 'a=1', 'b=2']) assert argskw == [('arg2',), {'a': '1', 'b': '2'}], argskw argskw = plac.call(main, ['arg1', 'arg2', 'a=1', '-o', '2']) assert argskw == [('arg2',), {'a': '1'}], argskw expect(SystemExit, plac.call, main, ['arg1', 'arg2', 'a=1', 'opt=2'])
def main(verbose, interactive, multiline, serve, batch, test, fname=None, *extra): "Runner for plac tools, plac batch files and plac tests" baseparser = plac.parser_from(main) if fname is None: baseparser.print_help() elif sys.argv[1] == fname: # script mode plactool = plac.import_main(fname) plactool.prog = os.path.basename(sys.argv[0]) + ' ' + fname out = plac.call(plactool, sys.argv[2:], eager=False) if plac.iterable(out): for output in out: print(output) else: print(out) elif interactive or multiline or serve: plactool = plac.import_main(fname, *extra) plactool.prog = '' i = plac.Interpreter(plactool) if interactive: i.interact(verbose=verbose) elif multiline: i.multiline(verbose=verbose) elif serve: i.start_server(serve) elif batch: run((fname,) + extra, 'execute', verbose) elif test: run((fname,) + extra, 'doctest', verbose) print('run %s plac test(s)' % (len(extra) + 1)) else: baseparser.print_usage()
def call(main, default_level='INFO', stream=sys.stdout, process_names=False): '''Enable logging and start up a main method. Parameters ---------- main : callable The main method to invoke after initialization. default_level : str, optional Logging level. Defaults to INFO. stream : file-like, optional Stream for logging output. Defaults to ``sys.stdout``. process_names : bool, optional If True, include process names in logging output. Defaults to False. ''' enable_default_logging( default_level=default_level, stream=stream, process_names=process_names) from . import flags if flags.PARSER is None: return plac.call(main) args, rest = parse_known_args() if rest: logging.debug('unknown arguments: %s', rest) logging.debug('running with arguments:') kwargs = vars(args) for k in sorted(kwargs): logging.debug('--%s = %s', k, kwargs[k]) return main(args)
def manage(): @plac.annotations( config_file=plac.Annotation("paster config file", 'positional', None, str), include_file=plac.Annotation("include file, containing test file to run (for pycharm interaction)", 'option', 'i', str), ) def _manage(config_file, include_file=None, *args): """ manage entrypoint for django, using the paster configfile ``*args`` must be kept here to allow dynamic arguments in plac """ # when we're operating in a cli, we don't have a request id, but still want to have an unique id. # therefore we'll create one here # print ('create new uuid: %s' % fallback_uuid()) # filter out all arguments that have been applied by plac already leftover_args = [arg for arg in sys.argv if not arg in (config_file, include_file, '-i', '--include-file')] # override ``sys.argv`` as nose or django will directly access them sys.argv = leftover_args log.debug('bootstrapping from config: %s', config_file) boostrap_from_config(config_file) if include_file: with file(include_file) as f: additional_file_arg = f.read().splitlines()[0] leftover_args.append(additional_file_arg) log.debug('leftover arguments: %s', leftover_args) from django.core import management from django.db.utils import OperationalError try: management.execute_from_command_line(leftover_args) except OperationalError as e: log.error('db error: %s', e) from django.db import connection cursor = connection.cursor() cursor.execute("SELECT * FROM pg_stat_activity") for row in cursor.fetchall(): log.error(' %s', row) log.error('ERROR: %s', e) # TODO amb: dunno why this happens, but it constantly fails the build # raise e plac.call(_manage, eager=False)
def entry_point(): """ Application starting point which parses command line. Can be used from other modules too. """ exit_code = plac.call(main) sys.exit(exit_code)
def call_plac(f): "Decorator to create a simple CLI from `func` using `plac`" name = inspect.currentframe().f_back.f_globals['__name__'] if name == '__main__': import plac res = plac.call(f) if callable(res): res() else: return f
def entry_point(): """ Enter the via setup.py entry_point declaration. Handle UNIX style application exit values """ import plac exitcode = plac.call(main) sys.exit(exitcode)
def main(interactive, *subcommands): """ This script works both interactively and non-interactively. Use .help to see the internal commands. """ if interactive: plac.Interpreter(ishelve.main).interact() else: for out in plac.call(ishelve.main, subcommands): print(out)
def main(): """ Defined as the `gossamer` command in setup.py. Runs the argument parser and passes settings to :func:`gossamer.main.dispatcher`. """ try: sys.exit(plac.call(initialize)) except KeyboardInterrupt: sys.stdout.write('\n') sys.stdout.flush() sys.exit(1)
def call(obj, arglist=sys.argv[1:], eager=True, config=None, default_section=None, gnu=True): if gnu: plac.ArgumentParser.add_argument = add_gnu_argument if config is None: return plac.call(obj, arglist=arglist, eager=eager) argparser = plac.parser_from(obj) argnames = argparser.argspec.args defaults = argparser.argspec.defaults cp = config_parser_from(obj, config, default_section) cfg = dict(zip_longest(argnames, defaults)) ini_values = _read_config(cp, config, default_section) for k in obj.__annotations__.keys(): a = plac.Annotation.from_(obj.__annotations__[k]) if a.type and k in ini_values: if a.type is type(True): try: ini_values[k] = cp._convert_to_boolean(ini_values[k]) except ValueError: argparser.print_usage(sys.stderr) _print_exit( "{}: error: {}={} failed conversion to <type 'bool'> in:\n{}\n".format( argparser.prog, k, ini_values[k], config)) else: try: ini_values[k] = a.type(ini_values[k]) except ValueError: argparser.print_usage(sys.stderr) _print_exit( '{}: error: {}={} failed conversion to {} in:\n{}\n'.format( argparser.prog, k, ini_values[k], a.type, config)) cfg.update(ini_values) if sys.version >= '3': items = cfg.items() else: items = cfg.iteritems() argparser.set_defaults(**dict((k, v) for k, v in items)) cmd, result = argparser.consume(arglist) if plac.iterable(result) and eager: # listify the result return list(result) return result
def main(): plac.call(entry_point)
yield "%dM iterations" % n x, y = random(), random() if x * x + y * y < 1: counts += 1 yield (4.0 * counts) / npoints def run(self): tasks = self.i.tasks() for t in tasks: t.run() try: total = 0 for task in tasks: total += task.result except: # the task was killed print(tasks) return return total / self.n_cpu if __name__ == "__main__": pc = plac.call(PiCalculator) pc.submit_tasks() try: import time t0 = time.time() print("%f in %f seconds " % (pc.run(), time.time() - t0)) finally: pc.close()
def cli_main(): #The argument parser is inferred - it also deals with too few or too many func args plac.call(main)
weights = weightsn / len(refs) distance_matrix = np.zeros((len(wvoc), len(wvoc)), dtype=np.double) for i, o in enumerate(vc.get_feature_names()): for j, c in enumerate(vc.get_feature_names()): distance_matrix[i, j] = np.sqrt( np.sum(((weights[i] * wvvecs[vocabdict[o]]) - (weights[j] * wvvecs[vocabdict[c]]))**2)) if np.sum(distance_matrix) == 0.0: return float('inf') v_obj = v_obj.astype(np.double) v_desc = v_desc.astype(np.double) v_obj /= v_obj.sum() v_desc /= v_desc.sum() distance_matrix = distance_matrix.astype(np.double) # distance_matrix /= distance_matrix.max() score = emd(v_obj, v_desc, distance_matrix) return score if __name__ == '__main__': import plac plac.call(objdescrefs)
def main(): plac.call(cli)
# Minimum number of words per sentence if len(tokens) < MIN_TOKEN_COUNT: return False # Most tokens should be words if sum([t.is_alpha for t in tokens]) / len(tokens) < MIN_WORD_TOKENS_RATIO: return False text = spacy_sentence.text # Most characters should be letters, not numbers and not special characters if sum([c.isalpha() for c in text]) / len(text) < MIN_LETTER_CHAR_RATIO: return False return True def process_paper_record(paper_record, out_file, with_body): sentences = _paper_record_to_sentences(paper_record, with_body) # print(" paper {} with {} sents".format(paper_record['id'], len(sentences))) for s in sentences: s_text = re.sub('\s+', ' ', s.text).strip() if s_text != "": out_file.write("{}\n".format(s_text)) if len(sentences) > 0: out_file.write("\n") plac.call(main, sys.argv[1:])
logger.debug("pip package: {}".format(pip_package)) cmd = [ sys.executable, "-m", "pip", "install", "--no-deps", "--no-cache-dir", pip_package, ] logger.debug(" ".join(cmd)) if subprocess.call(cmd, env=os.environ.copy()) == 0: logger.debug("linking: {} to {}".format(model_name, lang)) package_path = get_package_path(model_name) link(model_name, lang, force=True, model_path=package_path) else: raise Exception("Error to download {}".format(lang)) elif lang != value: logger.debug("downloading {}".format(value)) download(value) logger.debug("linking: {} to {}".format(value, lang)) package_path = get_package_path(value) link(value, lang, force=True, model_path=package_path) else: logger.debug("downloading {}".format(value)) download(value) if __name__ == "__main__": plac.call(download_spacy_models, sys.argv[1:])
def main(): plac.call(run)
# example10.py import plac @plac.annotations( operator=("The name of an operator", 'positional', None, str, ['add', 'mul']), numbers=("A number", 'positional', None, float, None, "n")) def main(operator, *numbers): "A script to add and multiply numbers" if operator == 'mul': op = float.__mul__ result = 1.0 else: # operator == 'add' op = float.__add__ result = 0.0 for n in numbers: result = op(result, n) return result if __name__ == '__main__': print(plac.call(main))
('http://localhost:59125/process?INPUT_TEXT={}' + '&INPUT_TYPE=TEXT' + '&OUTPUT_TYPE=PHONEMES&LOCALE=en_US').format(tx)) soup = BeautifulSoup(r.text, 'lxml') for sent in soup('s'): for token in sent('t'): try: yield dict(text=token.text.strip(), pos=token['pos'], phonetics=token['ph']) except: pass def text8_to_phon(): show_every = 5000 dataset = api.load('text8') total = 0 for chunk in dataset: for i in range(0, len(chunk), 100): text = " ".join(chunk[i:i + 100]) if text.strip(): for token in to_phon(text): print(token['text'], token['pos'], token['phonetics']) total += 100 if total % show_every == 0: print(total, text, file=sys.stderr) if __name__ == "__main__": plac.call(text8_to_phon)
def cli_main(): plac.call(main)
def main(): plac.call(dispatch)
for file in [ i for s in [glob(x) for x in ["*.py", "dataloader/*.py", "model/*.py"]] for i in s ]: shutil.copy(file, wandb.run.dir) trainer = pl.Trainer(gpus=hparams.gpus, logger=[wandb_logger]) # , tb_logger], # ------------------------ # 3 START TESTING # ------------------------ trainer.test(model) if __name__ == "__main__": """ Script entrypoint. """ # Converting dictionary to namespace hyperparams = Namespace(**plac.call(get_hparams, eager=False)) # Set the evaluation flag in hyperparamters hyperparams.eval = True # --------------------- # RUN TESTING # --------------------- main(hyperparams)
frontend=("the frontend to be used to interact with the generation program", 'positional', None, str, frontends), cudaminer=(__cudaminer_docstring__, 'positional', None, str), output_scan_interval=(__output_scan_interval_docstring__, 'positional', None, int, ), output_scan_max_count=(__output_scan_max_count_docstring__, 'positional', None, int), hash_rate_count=(__hash_rate_count_docstring__, 'positional', None, int, ), debug=("Turn on debugging output", "flag"), version=("Print information about the version of the software to stdout and exit", "flag"), ) def cudaminer_param_checker(frontend=frontend_default, cudaminer=cudaminer_default, output_scan_interval=output_scan_interval_default, output_scan_max_count=output_scan_max_count_default, hash_rate_count=hash_rate_count_default, debug=debug_default, version=False): """ @args cudaminer %(__cudaminer_docstring__)s @args output_scan_interval %(__output_scan_interval_docstring__)s @args output_scan_max_count %(__output_scan_max_count_docstring__)s @args hash_rate_count %(__hash_rate_count_docstring__)s""" % {"__cudaminer_docstring__": __cudaminer_docstring__, "__output_scan_interval_docstring__": __output_scan_interval_docstring__, "__output_scan_interval_docstring__": __output_scan_interval_docstring__, "__output_scan_max_count_docstring__": __output_scan_max_count_docstring__, "__hash_rate_count_docstring__": __hash_rate_count_docstring__} if version: print(cudaminer_param_checker_globals.app_version_string) return if debug == True: logger.setLevel(logging.DEBUG) ch.setLevel(logging.DEBUG) if frontend == FRONTEND_CONSOLE_GUI: visualize_cudaminer_param_checker_results_console_gui() elif frontend == FRONTEND_WXPYTHON_GUI: visualize_cudaminer_param_checker_results_wxpython_gui() else: raise ValueError("frontend has to be one of '%s', but is '%s'" % (str(frontends), frontend)) if __name__ == "__main__": plac.call(cudaminer_param_checker)
for rownum in range(data_start_row, s.nrows): bind_params = s.row_values(rownum) log.debug('DML %r, %r', tmp_sql, bind_params) db_cursor.execute(tmp_sql, bind_params) db_conn.commit() #Only do this if we're not working on an externally-opened db if isinstance(outfile, string_types): db_cursor.close() db_conn.close() def db2xls(infile, outfile): """ Convert an sqlite db into an xls file. Not implemented! Some issues: one needs to be able to figure out what the table names are! """ raise NotImplementedError if __name__ == "__main__": if sys.version_info >= (3, ): argv = sys.argv else: fse = sys.getfilesystemencoding() argv = [i.decode(fse) for i in sys.argv] #Apparently this thing's pretty magical. import plac plac.call(xls2db, argv[1:])
def script(main): """Call a script main function.""" enable_default_logging() plac.call(main)
def _to_jsonld(microdata): context = 'http://schema.org' properties = 'properties_' typestr = 'type' jsonld_data = {} jsonld_data["@context"] = context for data in microdata: data = flatten_json.flatten(data) for key in data.keys(): value = data[key] if context in value: value = value.replace(context + "/", "") if (properties in key): keyn = key.replace(properties, "") jsonld_data[keyn] = value if (typestr in keyn): keyn = keyn.replace(typestr, "@" + typestr) jsonld_data[keyn] = value if (typestr is key): keyn = key.replace(typestr, "@" + typestr) jsonld_data[keyn] = value del data jsonld_data = flatten_json.unflatten(jsonld_data) return [jsonld_data] if __name__ == "__main__": res = plac.call(retrieve_claimreview) print(res)
# example9.py def main(verbose: ('prints more info', 'flag', 'v'), dsn: 'connection string'): if verbose: print('connecting to %s' % dsn) # ... if __name__ == '__main__': import plac; plac.call(main)
year = int(y) month = int(m) start = datetime(year, month, 1) end = datetime(year, month+1, 1) current = start n = 0 while current <= end: files = calipso.l1_files(current.year, current.month, current.day, '*') if files: n+=len(files) for f in files: try: linkedfile = target + '/' + os.path.basename(f) os.symlink(f, linkedfile) except OSError: # catch already-linked-error silently pass current += timedelta(days=1) print(n, 'files linked in ', target) return if __name__ == '__main__': import plac plac.call(rake_caliop_l1)
def main(dry_run: ('Dry run', 'flag', 'd')): if dry_run: print('Doing nothing') else: print('Doing something') if __name__ == '__main__': import plac; plac.call(main)
def plac_call_main(): return plac.call(main)
def main(): plac.call(generate_hashes)
def plac_call_main(): try: return plac.call(main) except KeyboardInterrupt: logging.error("Canceled.") sys.exit(1)
plt.plot(group['window-size'], group['ar-train-error'], label = ' AR train', color = 'b') plt.fill_between(group['window-size'].values, group['np-test-error'].values - group['np-test-std'].values, group['np-test-error'].values + group['np-test-std'].values, alpha=0.1, linewidth=0, color = 'r') plt.plot(group['window-size'], group['np-test-error'], label = ' Eulerian test', color = 'r') plt.legend(loc = 'center right') plt.suptitle('%i Hour Forecast Performance v. Window Size' % delta_time, fontsize = 'large') plt.xlabel('window size', fontsize = 'large') plt.ylabel('RMSE $W/m^2$', fontsize = 'large') plt.subplot(212) for indx, group in gb: plt.plot(group['window-size'], 100*(group['np-test-error'].values - group['ar-test-error'].values) / group['np-test-error'].values , label = ' AR test', color = 'g') plt.plot(group['window-size'], 100*(group['np-train-error'].values - group['ar-train-error'].values) / group['np-test-error'].values, label = ' AR train', color = 'b') plt.xlabel('window size', fontsize = 'large') plt.ylabel('% better than Eulerian', fontsize = 'large') plt.legend(loc = 'lower right', fontsize = 'large') plt.savefig(use_path[:-4] + '.png') if __name__ == '__main__': #plac.call(plot_performance) plac.call(plot_weights)
import jsonlines from pymongo import MongoClient import plac import sys @plac.annotations(coll_name=("Collectionto load into", "option", "c", str), db_name=("Database to load into", "option", "d", str)) def delete(coll_name, db_name="gsr"): # quick script for updating Mongo tasks client = MongoClient('mongodb://localhost:27017/') db = client[db_name] coll = db[coll_name] count = coll.count() conf = input( "You're about to delete the collection {0}, which has {1} records. Please type this name to confirm: " .format(coll_name, count)) if conf != coll_name: print("Bye!") sys.exit(0) if conf == coll_name: coll.delete_many({}) print("Deleted all records from ", coll_name) if __name__ == "__main__": plac.call(delete)
def main(self, command, debug=False, local=local, *args): if debug: logging.basicConfig(level=logging.DEBUG) self.local = local self.load() self.proxies = {} if command == self.COMMAND_CONFIGURE: plac.call(self.configure, args) elif command == self.COMMAND_LOGIN: plac.call(self.login, args) elif command == self.COMMAND_CREATE_STACK: plac.call(self.create_stack, args) elif command == self.COMMAND_UPDATE_STACK: plac.call(self.update_stack, args) elif command == self.COMMAND_UPDATE_STACK_ACL: plac.call(self.update_stack_acl, args) elif command == self.COMMAND_CREATE_OR_UPDATE_STACK: plac.call(self.create_or_update_stack, args) elif command == self.COMMAND_GET_STACK_ID: plac.call(self.get_stack_id, args) elif command == self.COMMAND_UPDATE_REGISTRY: plac.call(self.update_registry, args) elif command == self.COMMAND_REQUEST: plac.call(self.request, args)
def main(): try: plac.call(run) except KeyboardInterrupt: pass
def test(): assert plac.call(ishelve.main, ['.clear']) == ['cleared the shelve'] assert plac.call(ishelve.main, ['a=1']) == ['setting a=1'] assert plac.call(ishelve.main, ['a']) == ['1'] assert plac.call(ishelve.main, ['.delete=a']) == ['deleted a'] assert plac.call(ishelve.main, ['a']) == ['a: not found']
def main(): plac.call(get_val)
# Updated: 2018/11/17 09:29:56 by gmonnier ### ########.fr # # # # **************************************************************************** # import sys import numpy as np import plac import data @plac.annotations( dataset=plac.Annotation("Dataset", 'positional'), alpha=plac.Annotation('Set the learning rate', 'option', 'l', float), n_epoch=plac.Annotation('Set the number of epochs', 'option', 'e', int), ) def learning(dataset, alpha, n_epoch): dat = data.Data(dataset, ',', alpha, n_epoch) dat.display() dat.feature_scaling() print("Starting to learn parameters...") dat.gradient_descent() dat.write_thetas() print("Parameters found and stored in file '.parameters'") dat.draw_line() dat.display_gradient_descent() if __name__ == "__main__": plac.call(learning)
def main(): plac.call(conv)
if __name__ == '__main__': import plac import sys from spacy.cli import download, link, info, package, train, convert from spacy.cli import vocab, profile, evaluate, validate from spacy.util import prints commands = { 'download': download, 'link': link, 'info': info, 'train': train, 'evaluate': evaluate, 'convert': convert, 'package': package, 'vocab': vocab, 'profile': profile, 'validate': validate } if len(sys.argv) == 1: prints(', '.join(commands), title="Available commands", exits=1) command = sys.argv.pop(1) sys.argv[0] = 'spacy %s' % command if command in commands: plac.call(commands[command]) else: prints("Available: %s" % ', '.join(commands), title="Unknown command: %s" % command, exits=1)
def entry_point(): exit_code = plac.call(main) return exit_code
project_base = Path('/home/jeremy/Projects') r = Redis() def create_project(name): project_name = name.lower().replace(' '.'-') project_key = f'project:{project_name}' document_path = cur_dir.joinpath('content', filename) redis_key=f'{project}:content:{snake_case(document_path.stem)}' args = ['--standalone'] args.append(f'-o {str(document_path)}') with TemporaryDirectory() as tmpdir: tmpfile = Path(tmpdir, filename) tmpfile.write_text(input()) args.append(str(tmpfile)) print(args) try: rs = run('pandoc', args, universal_newlines=True) except Exception as e: print (e) return False print (f'created document {filename}') print( redis_key ) if __name__ == '__main__': plac.call(create_document)
def main(): sys.exit(plac.call(_main))
import plac, asyncio import ffdl def main(url_or_id: ("Fanfiction.net URL or just the ID")): story_id = ffdl.get_id(url_or_id) asyncio.run(ffdl.create_epub(story_id)) if __name__ == "__main__": plac.call(main, version=ffdl.__version__)
def do_deleted(self, line): self.addfilter('deleted', useflag('alloc',0)) def do_unalloc(self, line): self.addfilter('unalloc', useflag('unalloc',1)) def do_reset(self, line): self.filters = [] self.setprompt() self.tree.clearfilters() def do_EOF(self, line): sys.exit(0) def emptyline(self): pass def main(ddpath, dbpath, mountpoint): mytskfuse = tskfuse.TskFuse(ddpath, dbpath) filters = Filters(mytskfuse.tree) t = multiprocessing.dummy.Process(target=filters.cmdloop) t.daemon = True t.start() fuse.FUSE(mytskfuse, mountpoint, foreground=True, allow_other=True) os.system('stty sane') if __name__ == '__main__': plac.call(main,sys.argv[1:])
#@+leo-ver=5-thin #@+node:maphew.20120622231339.1567: * @file a1.py ''' First attempt at using Plac module ''' import plac @plac.annotations(command=("command to run", 'positional', None, str, ['install', 'remove', 'update', 'setup']), packages=("list of packages", 'positional', None, str, None, 'pkg')) def main(command, *packages): "Operate on packages" yield "Running %s on %s" % (command, packages) if command == 'install': yield 'Installing %s' % packages elif command == 'remove': yield 'Removing %s' % packages elif command == 'update': yield 'Updating package list from mirror' elif command == 'setup': yield 'Initializing new Osgeo4W install' if __name__ == '__main__': for output in plac.call(main): print output #@-leo
if stats['failed'] == 0: print colored('All %d tests passed' % stats['total'], 'white', 'on_green') else: print colored('%d test(s) out of %d failed' % (stats['failed'], stats['total']), 'white', 'on_red') def accept_tests(files): for f in files: corrected_content = '' content = open(f).read() sections = content.split('++++') i = 0 for section in sections: if i>0: corrected_content += "\n++++\n" code, _unused = map(strip, section.split('----')) tokens_actual = prettyprint(code).strip() corrected_content += "%s\n----\n%s" % (code, tokens_actual) i += 1 open(f, 'w').write(corrected_content) print colored("Accepted %s" % f, 'green') matched_files = get_matched_files(files) run_tests(matched_files) if not accept else accept_tests(matched_files) if __name__ == '__main__': import plac plac.call(run)
with msg.loading(f"Loading model '{model}'..."): nlp = spacy.load(model) texts = (eg["text"] for eg in srsly.read_jsonl(data)) n_docs = 0 n_words = 0 start_time = timer() for doc in nlp.pipe(texts): n_docs += 1 n_words += len(doc) end_time = timer() wps = int(n_words / (end_time - start_time)) result = [ ("Docs", f"{n_docs:,}"), ("Words", f"{n_words:,}"), ("Words/s", f"{wps:,}"), ] msg.table(result, widths=(7, 12), aligns=("l", "r")) if __name__ == "__main__": opts = {"train": train_model, "evaluate": evaluate_model, "wps": wps} cmd = sys.argv.pop(1) if cmd not in opts: msg.fail(f"Unknown command: {cmd}", f"Available: {', '.join(opts)}", exits=1) try: plac.call(opts[cmd]) except KeyboardInterrupt: msg.warn("Stopped.", exits=1)
from __future__ import with_statement from Tkinter import * from importer3 import FakeImporter def taskwidget(root, task, tick=500): "A Label widget showing the output of a task every 500 ms" sv = StringVar(root) lb = Label(root, textvariable=sv) def show_outlist(): try: out = task.outlist[-1] except IndexError: # no output yet out = '' sv.set('%s %s' % (task, out)) root.after(tick, show_outlist) root.after(0, show_outlist) return lb def monitor(tasks): root = Tk() for task in tasks: task.run() taskwidget(root, task).pack() root.mainloop() if __name__ == '__main__': import plac with plac.Interpreter(plac.call(FakeImporter)) as i: tasks = [i.submit('import_file f1'), i.submit('import_file f2')] monitor(tasks)
def overlap_tokens(doc, other_doc): """Get the tokens from the original Doc that are also in the comparison Doc. """ overlap = [] other_tokens = [token.text for token in other_doc] for token in doc: if token.text in other_tokens: overlap.append(token) return overlap if __name__ == '__main__': plac.call(main) @plac.annotations( model=("Model name. Defaults to blank 'en' model.", "option", "m", str), output_dir=("Optional output directory", "option", "o", Path), n_iter=("Number of training iterations", "option", "n", int)) def main(model=None, output_dir=None, n_iter=100): """Load the model, set up the pipeline and train the entity recognizer.""" if model is not None: nlp = spacy.load(model) # load existing spaCy model print("Loaded model '%s'" % model) else: nlp = spacy.blank('en') # create blank Language class print("Created blank 'en' model") # create the built-in pipeline components and add them to the pipeline # nlp.create_pipe works for built-ins that are registered with spaCy if 'ner' not in nlp.pipe_names: ner = nlp.create_pipe('ner')
def entry_point(): plac.call(main)
test_tags_fpath = os.path.join(cross_val_folder, 'user_test_tags.dat') user_test_tags = load_dict_from_file(test_tags_fpath) return user_items_to_filter, user_validation_tags, user_test_tags @plac.annotations(db_fpath=plac.Annotation('H5 database file', type=str), db_name=plac.Annotation('H5 database name', type=str), cross_val_folder=plac.Annotation( 'Folder with cross validation files', type=str), probs_folder=plac.Annotation('Probabilities Folder', type=str)) def main(db_fpath, db_name, cross_val_folder, probs_folder): #get cross validation dicts user_items_to_filter, user_validation_tags, user_test_tags = \ load_train_test_validation(cross_val_folder) with AnnotReader(db_fpath) as reader: reader.change_table(db_name) annot_filter = FilteredUserItemAnnotations(user_items_to_filter) est = PrecomputedEstimator(probs_folder) run_exp(user_validation_tags, user_test_tags, est, annot_filter, reader) if __name__ == '__main__': sys.exit(plac.call(main))
val_tags_fpath = os.path.join(cross_val_folder, 'user_val_tags.dat') user_validation_tags = load_dict_from_file(val_tags_fpath) test_tags_fpath = os.path.join(cross_val_folder, 'user_test_tags.dat') user_test_tags = load_dict_from_file(test_tags_fpath) return user_items_to_filter, user_validation_tags, user_test_tags @plac.annotations( db_fpath = plac.Annotation('H5 database file', type=str), db_name = plac.Annotation('H5 database name', type=str), cross_val_folder = plac.Annotation('Folder with cross validation files', type=str), probs_folder = plac.Annotation('Probabilities Folder', type=str)) def main(db_fpath, db_name, cross_val_folder, probs_folder): #get cross validation dicts user_items_to_filter, user_validation_tags, user_test_tags = \ load_train_test_validation(cross_val_folder) with AnnotReader(db_fpath) as reader: reader.change_table(db_name) annot_filter = FilteredUserItemAnnotations(user_items_to_filter) est = PrecomputedEstimator(probs_folder) run_exp(user_validation_tags, user_test_tags, est, annot_filter, reader) if __name__ == '__main__': sys.exit(plac.call(main))
rvals = defaultdict(lambda: defaultdict(int)) plvals = defaultdict(lambda: defaultdict(int)) prvals = defaultdict(lambda: defaultdict(int)) roots = defaultdict(int) seen_pos = set(['ROOT', 'NONE']) for sent in sents: rdeps = defaultdict(list) for i, (w, p, h, l) in enumerate(sent): seen_pos.add(p) if i > int(h): rdeps[int(h)].append(i) for head, children in rdeps.items(): if head == -1: head_pos = 'ROOT' else: head_pos = sent[head][1] sib_pos = 'NONE' children.sort() for i, child in enumerate(children): rvals[head_pos][(sib_pos, sent[child][1])] += 1 sib_pos = sent[child][1] seen_pos = list(sorted(seen_pos)) for head in seen_pos: for sib in seen_pos: for child in seen_pos: print head, sib, child, rvals[head][(sib, child)] if __name__ == '__main__': plac.call(main)
import plac import matplotlib.pyplot as plt from os import path @plac.annotations( in_loc=('Location of input file'), out_loc=('Location to save output file') ) def main(in_loc, out_loc): # Empty lists for values of k and corresponding WSSSEs ks = [] WSSSEs = [] with open(in_loc, 'r') as f: # Iterate through lines in file, adding k and WSSSE values to lists for line in f: line = line.split() ks.append(int(line[0])) WSSSEs.append(float(line[1])) # Plot and save figure plt.plot(ks, WSSSEs) plt.savefig(path.join(out_loc, 'WSSSEs.png'), bbox_inches='tight') if __name__ == '__main__': plac.call(main)
def test_yield(): def main(): for i in (1, 2, 3): yield i assert plac.call(main, []) == [1, 2, 3]