def cli(runs: List[RunEntry], logger: Logger, columns: List[str], column_width: int, porcelain, *args, **kwargs): logger.print( string(runs=runs, columns=columns, porcelain=porcelain, column_width=column_width))
def cli(runs: List[RunEntry], logger: Logger, pprint: bool, depth, *args, **kwargs): logger.print(string( runs=runs, pprint=pprint, depth=depth, ))
def cli( runs: List[RunEntry], db: DataBase, logger: Logger, key: str, porcelain: bool, *_, **__ ): logger.print(string(runs=runs, key=key, porcelain=porcelain))
def cli(runs: List[RunEntry], flags: List[str], logger: Logger, db: DataBase, prefix: str, path: Optional[PurePath], description: str, *args, **kwargs): for string in strings( db=db, runs=runs, flags=flags, prefix=prefix, path=path, description=description, ): logger.print(string)
def cli(runs: List[RunEntry], logger: Logger, exclude: List[str], prefix: str, args: List[str], *_, **__): if not runs: logger.exit("No commands found.") exclude = set(exclude) commands = [Command.from_run(run).exclude(prefix, *args) for run in runs] spec_dict = get_spec_obj(commands=commands, exclude=exclude, prefix=prefix, logger=logger).dict() spec_dict = {k: v for k, v in spec_dict.items() if v} print(json.dumps(spec_dict, sort_keys=True, indent=4))
def get_spec_obj(commands: List[Command], exclude: Set[str], prefix: str, logger: Logger): positionals = commands[0].positionals args = defaultdict(set) flags = set() def parse(x): try: x = float(x) if x.is_integer(): x = int(x) except ValueError: pass return x def take_first(it): return tuple([parse(x) for x, _ in it]) def squeeze(x): try: y, = x if not isinstance(y, tuple): x = y except ValueError: pass return x for command in commands: if command.positionals != positionals: logger.exit( "Command:", commands[0], "and", command, "do not have the same positional arguments:", sep="\n", ) for (k, _), v in command.optionals: args[k].add(squeeze(take_first(v))) flags.add(take_first(command.flags)) flags = list(flags) args = {k: squeeze(list(v)) for k, v in args.items()} command = "".join([s for t in positionals for s in t]) return SpecObj(command=command, args=args, flags=flags)
def get_table(tag, db_path, patterns, smoothing, tensorboard_dir, use_cache): logger = Logger(quiet=False) with DataBase(db_path, logger) as db: entries = {entry.path: entry for entry in db.get(patterns)} dirs = [Path(tensorboard_dir, path) for path in entries] data_points = crawl(dirs=dirs, tag=tag, smoothing=smoothing, use_cache=use_cache, quiet=True) rewards = { event_file.relative_to(tensorboard_dir).parent: data for data, event_file in data_points } def format_flag_name(name): return name.lstrip('-').replace('-', '_') commands = [e.command for e in entries.values()] flag_names = parse_flags(commands, delimiter='=').keys() flag_names = [format_flag_name(n) for n in flag_names] Row = namedtuple('Row', ['reward'] + list(RunEntry.fields()) + list(flag_names)) def get_row(path): entry = entries[path] # type: RunEntry flags = parse_flags([entry.command], delimiter='=') flags = {format_flag_name(k): v.pop() for k, v in flags.items()} entry_dict = {str(k): str(v) for k, v in entry.asdict().items()} return Row(reward=rewards[path], **entry_dict, **flags) return Row._fields, map(get_row, rewards)
def main(): parser = argparse.ArgumentParser() parser.add_argument("yaml_file", help="input file", type=str) parser.add_argument("db_file", nargs="?", default="runs.db", help="output file", type=str) parser.add_argument("--column-width", default=100, help="output file", type=int) args = parser.parse_args() if args.yaml_file.endswith("yml") or args.yaml_file.endswith("yaml"): with Path(args.yaml_file).open() as f: data = yaml.load(f) elif args.yaml_file.endswith("pkl"): with Path(args.yaml_file).open("rb") as f: data = pickle.load(f) else: raise RuntimeError("This script works on yaml or pickle files only") with DataBase(args.db_file, Logger(quiet=False)) as db: for run in yaml_to_run_entry(data): db.append(run) print(ls.string(db=db))
def main(): parser = argparse.ArgumentParser() parser.add_argument('yaml_file', help='input file', type=str) parser.add_argument('db_file', nargs='?', default='runs.db', help='output file', type=str) parser.add_argument('--column-width', default=100, help='output file', type=int) args = parser.parse_args() if args.yaml_file.endswith('yml') or args.yaml_file.endswith('yaml'): with Path(args.yaml_file).open() as f: data = yaml.load(f) elif args.yaml_file.endswith('pkl'): with Path(args.yaml_file).open('rb') as f: data = pickle.load(f) else: raise RuntimeError('This script works on yaml or pickle files only') with DataBase(args.db_file, Logger(quiet=False)) as db: for run in yaml_to_run_entry(data): db.append(run) print(table.string(db=db))
def cli( logger: Logger, runs: List[RunEntry], value_path: Path, prefix: str, args: List[str], *_, **__, ): print("Analyzing the following runs", *[r.path for r in runs], sep="\n") logger.print( *strings(runs=runs, value_path=value_path, prefix=prefix, runsrc_args=args), sep="\n", )
def cli( runs: List[RunEntry], args: List[str], logger: Logger, db: DataBase, prefix: str, path: Optional[PurePath], description: str, porcelain: bool, *_, **__, ): for string in strings( db=db, runs=runs, args=args, prefix=prefix, path=path, description=description, porcelain=porcelain, ): logger.print(string)
def main(): size_px = (args.res, args.res) env_args = dict(map_name=args.map, step_mul=args.step_mul, game_steps_per_episode=0, screen_size_px=size_px, minimap_size_px=size_px) vis_env_args = env_args.copy() vis_env_args['visualize'] = args.vis num_vis = min(args.envs, args.max_windows) env_fns = [partial(make_sc2env, **vis_env_args)] * num_vis num_no_vis = args.envs - num_vis if num_no_vis > 0: env_fns.extend([partial(make_sc2env, **env_args)] * num_no_vis) envs = SubprocVecEnv(env_fns) agent = A2CAgent(args) current_epoch = 0 if os.path.isfile(args.save_dir + '.pth.tar') and not args.overwrite: current_epoch = agent.load_checkpoint() print("Restored from last checkpoint at epoch", current_epoch) summary_writer = Logger(args.summary_dir) runner = A2CRunner(envs=envs, agent=agent, train=args.train, summary_writer=summary_writer, discount=args.discount, n_steps=args.steps_per_batch) runner.reset() try: while True: if current_epoch % args.save_iters == 0: agent.save_checkpoint(current_epoch) result = runner.run_batch(train_summary=True) # agent.log(summary_writer, i) current_epoch += 1 except KeyboardInterrupt: pass envs.close() print('mean score: %f' % runner.get_mean_score())
def main(argv=sys.argv[1:]): config = ConfigParser( delimiters=[':'], allow_no_value=True, interpolation=ExtendedInterpolation(), converters=dict( _path=Path, _pure_path=PurePath, _pure_path_list=pure_path_list, _flag_list=flag_list)) config_filename = '.runsrc' config_path = find_up(config_filename) if config_path: config.read(str(config_path)) else: config[MAIN] = dict( root=Path('.runs').absolute(), db_path=Path('runs.db').absolute(), ) parser = argparse.ArgumentParser( epilog="The script will ask permission before running, deleting, moving, or " "permanently changing anything.") parser.add_argument( '--quiet', '-q', action='store_true', help='Suppress print output') parser.add_argument( '--db-path', help='path to sqlite file storing run database information.', type=Path) parser.add_argument( '--root', help='Custom path to directory where config directories (if any) are automatically ' 'created', type=Path) parser.add_argument( '--dir-names', type=pure_path_list, help="directories to create and sync automatically with each run") parser.add_argument( '--assume-yes', '-y', action='store_true', help='Don\'t ask permission before performing operations.') subparsers = parser.add_subparsers(dest='dest') main_config = dict(config[MAIN]) main_config.update( root=config[MAIN].get_path('root'), db_path=config[MAIN].get_path('db_path'), dir_names=config[MAIN].get_pure_path_list('dir_names', []), flags=(config[MAIN].get_flag_list(FLAGS, []))) for subparser in [parser] + [ adder(subparsers) for adder in [ new.add_subparser, rm.add_subparser, mv.add_subparser, ls.add_subparser, table.add_subparser, lookup.add_subparser, flags.add_subparser, change_description.add_subparser, reproduce.add_subparser, correlate.add_subparser, kill.add_subparser, ] ]: assert isinstance(subparser, argparse.ArgumentParser) config_section = subparser.prog.split()[-1] assert isinstance(config_section, str) subparser.set_defaults(**config['DEFAULT']) subparser.set_defaults(**main_config) if config_section in config: subparser.set_defaults(**config[config_section]) args = parser.parse_args(args=argv) logger = Logger(quiet=args.quiet) if not config_path: logger.print('Config file not found. Using default settings:\n') for section in config.sections(): for k, v in config[section].items(): logger.print('{:20}{}'.format(k + ':', v)) logger.print() msg = 'Writing default settings to ' + config_filename logger.print(msg) logger.print('-' * len(msg)) with open(config_filename, 'w') as f: config.write(f) module = import_module('runs.commands.' + args.dest.replace('-', '_')) kwargs = {k: v for k, v in vars(args).items()} try: # pluralize flags kwargs[FLAGS] = tuple(set(args.flag) | set(main_config[FLAGS])) except AttributeError: pass module.cli(**kwargs)
def open_wrapper(db_path, quiet, *args, **kwargs): logger = Logger(quiet=quiet) with DataBase(db_path, logger) as db: return func(*args, **kwargs, logger=logger, db=db)
def cli(logger: Logger, runs: List[RunEntry], delimiter: str, *_, **__): for string in strings(runs=runs, delimiter=delimiter): logger.print(string)
def cli(logger: Logger, runs: List[RunEntry], path_to_value: Path, *args, **kwargs): logger.print(*strings(runs=runs, path_to_value=path_to_value), sep='\n')
def cli(runs: List[RunEntry], logger: Logger, pprint: bool, depth, *_, **__): logger.print(string(runs=runs, pprint=pprint, depth=depth))
def cli(logger: Logger, runs: List[RunEntry], delimiter: str, *args, **kwargs): for string in strings( runs=runs, delimiter=delimiter, ): logger.print(string)