def parse_args(): parser = configargparse.ArgumentParser( description='preprocess.py', config_file_parser_class=configargparse.YAMLConfigFileParser, formatter_class=configargparse.ArgumentDefaultsHelpFormatter) opts.config_opts(parser) opts.add_md_help_argument(parser) opts.preprocess_opts(parser) opt = parser.parse_args() torch.manual_seed(opt.seed) check_existing_pt_files(opt) return opt
def parse_args(): parser = configargparse.ArgumentParser( description='preprocess.py', config_file_parser_class=configargparse.YAMLConfigFileParser, formatter_class=configargparse.ArgumentDefaultsHelpFormatter) # 预处理所需要的全部参数均在opts模块中 opts.config_opts(parser) opts.preprocess_opts(parser) opt = parser.parse_args() # 指定一个随机数会使得每次按顺序使用rand...产生的数值均相同 # 不知道在不同机器上是否相同, 如果相同的话结果便可以复现 torch.manual_seed(opt.seed) return opt
def parse_args(): """ Parsing arguments """ parser = argparse.ArgumentParser( description='preprocess.py', formatter_class=argparse.ArgumentDefaultsHelpFormatter) # parser.add_argument("") # group.add_argument('-train_dir', required=True, default='data/race_train.json', # help="Path to the training data") # group.add_argument('-valid_dir', required=True, default='data/race_dev.json', # help="Path to the validation data") # group.add_argument('-data_type', choices=["text"], help="""text""") # group.add_argument('-save_data', required=True, default='data/processed', # help="Output file for the prepared data") opts.add_md_help_argument(parser) opts.preprocess_opts(parser) #print (parser.parse_args()) opt = parser.parse_args() torch.manual_seed(opt.seed) return opt
def parse_args(): """ Parsing arguments """ parser = argparse.ArgumentParser( description='preprocess.py', formatter_class=argparse.ArgumentDefaultsHelpFormatter) opts.add_md_help_argument(parser) opts.preprocess_opts(parser) parser.add_argument('-parrel_run', action='store_true', default=False, help='生成靶标') parser.add_argument('-with_3d_confomer', action='store_true', default=False, help='原子特征是否在最后3个维度加上坐标') opt = parser.parse_args() torch.manual_seed(opt.seed) check_existing_pt_files(opt) return opt
fields = inputters.get_fields(opt.data_type, src_nfeats, tgt_nfeats, dynamic_dict=opt.dynamic_dict, src_truncate=opt.src_seq_length_trunc, tgt_truncate=opt.tgt_seq_length_trunc) src_reader = inputters.str2reader[opt.data_type].from_opt(opt) tgt_reader = inputters.str2reader["text"].from_opt(opt) logger.info("Building & saving training data...") train_dataset_files = build_save_dataset('train', fields, src_reader, tgt_reader, opt) if opt.valid_src and opt.valid_tgt: logger.info("Building & saving validation data...") build_save_dataset('valid', fields, src_reader, tgt_reader, opt) logger.info("Building & saving vocabulary...") build_save_vocab(train_dataset_files, fields, opt) if __name__ == "__main__": parser = ArgumentParser(description='preprocess.py') opts.config_opts(parser) opts.preprocess_opts(parser) opt = parser.parse_args() main(opt)
def _get_parser(): parser = ArgumentParser(description='preprocess.py') opts.config_opts(parser) opts.preprocess_opts(parser) return parser