def _load_all(src): model_path = src src = os.path.dirname(src) word_v = _load(src + '/word_v.pkl') pos_v = _load(src + '/pos_v.pkl') tag_v = _load(src + '/tag_v.pkl') pos_pp = torch.load(src + '/pos_pp.pkl')['pipeline'] model_args = ConfigSection() ConfigLoader.load_config('cfg.cfg', {'model': model_args}) model_args['word_vocab_size'] = len(word_v) model_args['pos_vocab_size'] = len(pos_v) model_args['num_label'] = len(tag_v) model = BiaffineParser(**model_args.data) model.load_state_dict(torch.load(model_path)) return { 'word_v': word_v, 'pos_v': pos_v, 'tag_v': tag_v, 'model': model, 'pos_pp': pos_pp, }
emb_file_name = "/home/yfshao/workdir/parser-data/word_OOVthr_30_100v.txt" # emb_file_name = "/home/yfshao/workdir/word_vector/cc.zh.300.vec" loader = CTBDataLoader() cfgfile = './cfg.cfg' processed_datadir = './save' # Config Loader train_args = ConfigSection() test_args = ConfigSection() model_args = ConfigSection() optim_args = ConfigSection() ConfigLoader.load_config( cfgfile, { "train": train_args, "test": test_args, "model": model_args, "optim": optim_args }) print('trainre Args:', train_args.data) print('test Args:', test_args.data) print('optim Args:', optim_args.data) # Pickle Loader def save_data(dirpath, **kwargs): import _pickle if not os.path.exists(dirpath): os.mkdir(dirpath) for name, data in kwargs.items(): with open(os.path.join(dirpath, name + '.pkl'), 'wb') as f: