def test_model(args): config_path = os.path.abspath(args.config_path) with open(config_path) as f: data = json.load(f, object_pairs_hook=OrderedDict) config = ModelConfig(**data) model = expertise.load_model(config.model) config = model.test(config, *args.additional_params) config.save(config_path)
def prepare_kfold(args, k): config_path = os.path.abspath(args.config_path) experiment_path = os.path.dirname(config_path) config = ModelConfig() config.update_from_file(args.config_path) old_experiment_dir = config.experiment_dir new_experiment_dir = os.path.join(old_experiment_dir, f'{config.name}{k}') if not os.path.exists(new_experiment_dir): os.mkdir(new_experiment_dir) config.update(experiment_dir=new_experiment_dir) new_config_path = os.path.join(new_experiment_dir, args.config_path) # config.config_file_path = config.config_file_path.replace(old_experiment_dir, new_experiment_dir) # config.infer_dir = config.infer_dir.replace(old_experiment_dir, new_experiment_dir) # config.train_dir = config.train_dir.replace(old_experiment_dir, new_experiment_dir) # config.setup_dir = config.setup_dir.replace(old_experiment_dir, new_experiment_dir) # config.test_dir = config.test_dir.replace(old_experiment_dir, new_experiment_dir) # config.update(kp_setup_dir=os.path.join(old_experiment_dir, 'setup')) config.update(random_seed=k) print('new_config_path', new_config_path) config.save(new_config_path)
import argparse import json from collections import OrderedDict from expertise.config import ModelConfig from .preprocess.textrank import run_textrank from .models.tfidf.train_tfidf import train from .models.tfidf.infer_tfidf import infer if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('config_path', help="a config file for a model") args = parser.parse_args() config = ModelConfig(config_file_path=args.config_path) textrank_config = run_textrank(config) textrank_config.save(args.config_path) trained_config = train(config) trained_config.save(args.config_path) inferred_config = infer(config) inferred_config.save(args.config_path)
for doc_feature in archive_features: archive_values.append(get_values(doc_feature)) if len(archive_values) == 0: archive_values = [np.zeros(768)] result = np.array(archive_values) bert_lookup[item_id] = torch.Tensor(result) return bert_lookup if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('config_path', help="a config file for a model") args = parser.parse_args() config_path = os.path.abspath(args.config_path) experiment_path = os.path.dirname(config_path) config = ModelConfig() config.update_from_file(config_path) setup_path = os.path.join(experiment_path, 'setup') if not os.path.isdir(setup_path): os.mkdir(setup_path) bert_lookup = setup_bert_lookup(config) utils.dump_pkl(os.path.join(config.setup_dir, 'bert_lookup_cls.pkl'), bert_lookup)
import argparse import json import os from collections import OrderedDict from expertise.config import ModelConfig from .preprocess.textrank import run_textrank from .models.tfidf.train_tfidf import train from .models.tfidf.infer_tfidf import infer if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('config_path', help="a config file for a model") args = parser.parse_args() config_path = os.path.abspath(args.config_path) with open(config_path) as f: data = json.load(f, object_pairs_hook=OrderedDict) config = ModelConfig(**data) textrank_config = run_textrank(config) textrank_config.save(args.config_path) trained_config = train(config) trained_config.save(args.config_path) inferred_config = infer(config) inferred_config.save(args.config_path)
import argparse import json import os from collections import OrderedDict from expertise.config import ModelConfig from .core import run_textrank if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('config_path', help="a config file for a model") args = parser.parse_args() config_path = os.path.abspath(args.config_path) with open(config_path) as f: data = json.load(f, object_pairs_hook=OrderedDict) config = ModelConfig(**data) run_textrank(config) print('saving', config_path, config) config.save(config_path)