def main(): setup_logging() config_help = "\n\nConfig parameters:\n\n" + "\n".join(ConfigSchema.help()) parser = argparse.ArgumentParser( epilog=config_help, # Needed to preserve line wraps in epilog. formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument("config", help="Path to config file") parser.add_argument("-p", "--param", action="append", nargs="*") parser.add_argument( "--rank", type=int, default=SINGLE_TRAINER, help="For multi-machine, this machine's rank", ) opt = parser.parse_args() loader = ConfigFileLoader() config = loader.load_config(opt.config, opt.param) set_logging_verbosity(config.verbose) subprocess_init = SubprocessInitializer() subprocess_init.register(setup_logging, config.verbose) subprocess_init.register(add_to_sys_path, loader.config_dir.name) train(config, rank=opt.rank, subprocess_init=subprocess_init)
def main(): setup_logging() config_help = '\n\nConfig parameters:\n\n' + '\n'.join(ConfigSchema.help()) parser = argparse.ArgumentParser( epilog=config_help, # Needed to preserve line wraps in epilog. formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument('config', help="Path to config file") parser.add_argument('-p', '--param', action='append', nargs='*') parser.add_argument('--rank', type=int, default=0, help="For multi-machine, this machine's rank") opt = parser.parse_args() if opt.param is not None: overrides = chain.from_iterable(opt.param) # flatten else: overrides = None loader = ConfigFileLoader() config = loader.load_config(opt.config, overrides) set_logging_verbosity(config.verbose) subprocess_init = SubprocessInitializer() subprocess_init.register(setup_logging, config.verbose) subprocess_init.register(add_to_sys_path, loader.config_dir.name) train(config, rank=Rank(opt.rank), subprocess_init=subprocess_init)
def main(): setup_logging() parser = argparse.ArgumentParser(description='Example on FB15k') parser.add_argument('--config', default=DEFAULT_CONFIG, help='Path to config file') parser.add_argument('-p', '--param', action='append', nargs='*') parser.add_argument('--data_dir', type=Path, default='data', help='where to save processed data') parser.add_argument('--no-filtered', dest='filtered', action='store_false', help='Run unfiltered eval') args = parser.parse_args() if args.param is not None: overrides = chain.from_iterable(args.param) # flatten else: overrides = None # download data data_dir = args.data_dir fpath = download_url(FB15K_URL, data_dir) extract_tar(fpath) print('Downloaded and extracted file.') loader = ConfigFileLoader() config = loader.load_config(args.config, overrides) set_logging_verbosity(config.verbose) subprocess_init = SubprocessInitializer() subprocess_init.register(setup_logging, config.verbose) subprocess_init.register(add_to_sys_path, loader.config_dir.name) input_edge_paths = [data_dir / name for name in FILENAMES] output_train_path, output_valid_path, output_test_path = config.edge_paths convert_input_data( config.entities, config.relations, config.entity_path, config.edge_paths, input_edge_paths, lhs_col=0, rhs_col=2, rel_col=1, dynamic_relations=config.dynamic_relations, ) train_config = attr.evolve(config, edge_paths=[output_train_path]) train(train_config, subprocess_init=subprocess_init) relations = [attr.evolve(r, all_negs=True) for r in config.relations] eval_config = attr.evolve( config, edge_paths=[output_test_path], relations=relations, num_uniform_negs=0) if args.filtered: filter_paths = [output_test_path, output_valid_path, output_train_path] do_eval( eval_config, evaluator=FilteredRankingEvaluator(eval_config, filter_paths), subprocess_init=subprocess_init, ) else: do_eval(eval_config, subprocess_init=subprocess_init)
def main(): setup_logging() parser = argparse.ArgumentParser(description='Example on Livejournal') parser.add_argument('--config', default=DEFAULT_CONFIG, help='Path to config file') parser.add_argument('-p', '--param', action='append', nargs='*') parser.add_argument('--data_dir', type=Path, default='data', help='where to save processed data') args = parser.parse_args() if args.param is not None: overrides = chain.from_iterable(args.param) # flatten else: overrides = None # download data data_dir = args.data_dir data_dir.mkdir(parents=True, exist_ok=True) fpath = download_url(URL, data_dir) fpath = extract_gzip(fpath) print('Downloaded and extracted file.') # random split file for train and test random_split_file(fpath) loader = ConfigFileLoader() config = loader.load_config(args.config, overrides) set_logging_verbosity(config.verbose) subprocess_init = SubprocessInitializer() subprocess_init.register(setup_logging, config.verbose) subprocess_init.register(add_to_sys_path, loader.config_dir.name) edge_paths = [data_dir / name for name in FILENAMES.values()] convert_input_data( config.entities, config.relations, config.entity_path, edge_paths, lhs_col=0, rhs_col=1, rel_col=None, dynamic_relations=config.dynamic_relations, ) train_path = [str(convert_path(data_dir / FILENAMES['train']))] train_config = attr.evolve(config, edge_paths=train_path) train(train_config, subprocess_init=subprocess_init) eval_path = [str(convert_path(data_dir / FILENAMES['test']))] eval_config = attr.evolve(config, edge_paths=eval_path) do_eval(eval_config, subprocess_init=subprocess_init)
def main(): setup_logging() parser = argparse.ArgumentParser(description='Example on Livejournal') parser.add_argument('--config', default=DEFAULT_CONFIG, help='Path to config file') parser.add_argument('-p', '--param', action='append', nargs='*') parser.add_argument('--data_dir', type=Path, default='data', help='where to save processed data') args = parser.parse_args() # download data data_dir = args.data_dir data_dir.mkdir(parents=True, exist_ok=True) fpath = download_url(URL, data_dir) fpath = extract_gzip(fpath) print('Downloaded and extracted file.') # random split file for train and test random_split_file(fpath) loader = ConfigFileLoader() config = loader.load_config(args.config, args.param) set_logging_verbosity(config.verbose) subprocess_init = SubprocessInitializer() subprocess_init.register(setup_logging, config.verbose) subprocess_init.register(add_to_sys_path, loader.config_dir.name) input_edge_paths = [data_dir / name for name in FILENAMES] output_train_path, output_test_path = config.edge_paths convert_input_data( config.entities, config.relations, config.entity_path, config.edge_paths, input_edge_paths, TSVEdgelistReader(lhs_col=0, rhs_col=1, rel_col=None), dynamic_relations=config.dynamic_relations, ) train_config = attr.evolve(config, edge_paths=[output_train_path]) train(train_config, subprocess_init=subprocess_init) eval_config = attr.evolve(config, edge_paths=[output_test_path]) do_eval(eval_config, subprocess_init=subprocess_init)
def main(): # Late import to avoid circular dependency. from torchbiggraph.util import set_logging_verbosity, setup_logging setup_logging() parser = argparse.ArgumentParser() parser.add_argument('config', help="Path to config file") parser.add_argument('query', help="Name of param to retrieve") parser.add_argument('-p', '--param', action='append', nargs='*') opt = parser.parse_args() if opt.param is not None: overrides = chain.from_iterable(opt.param) # flatten else: overrides = None loader = ConfigFileLoader() config = loader.load_config(opt.config, overrides) set_logging_verbosity(config.verbose) print(config[opt.query])
def main(): setup_logging() config_help = '\n\nConfig parameters:\n\n' + '\n'.join(ConfigSchema.help()) parser = argparse.ArgumentParser( epilog=config_help, # Needed to preserve line wraps in epilog. formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument('config', help="Path to config file") parser.add_argument('-p', '--param', action='append', nargs='*') opt = parser.parse_args() loader = ConfigFileLoader() config = loader.load_config(opt.config, opt.param) set_logging_verbosity(config.verbose) subprocess_init = SubprocessInitializer() subprocess_init.register(setup_logging, config.verbose) subprocess_init.register(add_to_sys_path, loader.config_dir.name) do_eval(config, subprocess_init=subprocess_init)
def main(): setup_logging() parser = argparse.ArgumentParser(description="Example on FB15k") parser.add_argument("--config", default=DEFAULT_CONFIG, help="Path to config file") parser.add_argument("-p", "--param", action="append", nargs="*") parser.add_argument("--data_dir", type=Path, default="data", help="where to save processed data") parser.add_argument( "--no-filtered", dest="filtered", action="store_false", help="Run unfiltered eval", ) args = parser.parse_args() # download data data_dir = args.data_dir fpath = download_url(FB15K_URL, data_dir) extract_tar(fpath) print("Downloaded and extracted file.") loader = ConfigFileLoader() config = loader.load_config(args.config, args.param) set_logging_verbosity(config.verbose) subprocess_init = SubprocessInitializer() subprocess_init.register(setup_logging, config.verbose) subprocess_init.register(add_to_sys_path, loader.config_dir.name) input_edge_paths = [data_dir / name for name in FILENAMES] output_train_path, output_valid_path, output_test_path = config.edge_paths convert_input_data( config.entities, config.relations, config.entity_path, config.edge_paths, input_edge_paths, TSVEdgelistReader(lhs_col=0, rhs_col=2, rel_col=1), dynamic_relations=config.dynamic_relations, ) train_config = attr.evolve(config, edge_paths=[output_train_path]) train(train_config, subprocess_init=subprocess_init) relations = [attr.evolve(r, all_negs=True) for r in config.relations] eval_config = attr.evolve(config, edge_paths=[output_test_path], relations=relations, num_uniform_negs=0) if args.filtered: filter_paths = [output_test_path, output_valid_path, output_train_path] do_eval( eval_config, evaluator=FilteredRankingEvaluator(eval_config, filter_paths), subprocess_init=subprocess_init, ) else: do_eval(eval_config, subprocess_init=subprocess_init)