def load_static(args): device, n_gpu = setup_device() set_seed_everywhere(args.seed, n_gpu) schemas_raw, schemas_dict = spider_utils.load_schema(args.data_dir) grammar = semQL.Grammar() model = IRNet(args, device, grammar) model.to(device) # load the pre-trained parameters model.load_state_dict( torch.load(args.model_to_load, map_location=torch.device('cpu'))) model.eval() print("Load pre-trained model from '{}'".format(args.model_to_load)) nlp = English() tokenizer = nlp.Defaults.create_tokenizer(nlp) with open(os.path.join(args.conceptNet, 'english_RelatedTo.pkl'), 'rb') as f: related_to_concept = pickle.load(f) with open(os.path.join(args.conceptNet, 'english_IsA.pkl'), 'rb') as f: is_a_concept = pickle.load(f) return args, grammar, model, nlp, tokenizer, related_to_concept, is_a_concept, schemas_raw, schemas_dict
def _find_nums(question): nums = re.findall('\d*\.?\d+', question) return nums if __name__ == '__main__': args = read_arguments_manual_inference() device, n_gpu = setup_device() set_seed_everywhere(args.seed, n_gpu) schemas_raw, schemas_dict = spider_utils.load_schema(args.data_dir) grammar = semQL.Grammar() model = IRNet(args, device, grammar) model.to(device) # load the pre-trained parameters model.load_state_dict(torch.load(args.model_to_load)) # to use cpu instead of gpu , uncomment this code # model.load_state_dict(torch.load(args.model_to_load,map_location=torch.device('cpu'))) model.eval() print("Load pre-trained model from '{}'".format(args.model_to_load)) nlp = English() tokenizer = nlp.Defaults.create_tokenizer(nlp) with open(os.path.join(args.conceptNet, 'english_RelatedTo.pkl'), 'rb') as f:
experiment_name, output_path = create_experiment_folder( args.model_output_dir, args.exp_name) print("Run experiment '{}'".format(experiment_name)) write_config_to_file(args, output_path) device, n_gpu = setup_device() set_seed_everywhere(args.seed, n_gpu) sql_data, table_data, val_sql_data, val_table_data = spider_utils.load_dataset( args.data_dir, use_small=args.toy) train_loader, dev_loader = get_data_loader(sql_data, val_sql_data, args.batch_size, True, False) grammar = semQL.Grammar() model = IRNet(args, device, grammar) model.to(device) # track the model wandb.watch(model, log='parameters') num_train_steps = len(train_loader) * args.num_epochs optimizer, scheduler = build_optimizer_encoder(model, num_train_steps, args.lr_transformer, args.lr_connection, args.lr_base, args.scheduler_gamma) tb_writer = SummaryWriter(output_path) global_step = 0 best_acc = 0.0
def _find_nums(sentence): nums = [] for word in sentence.split(): if word.isnumeric(): nums.append(word) return nums if __name__ == '__main__': args = read_arguments_manual_inference() device, n_gpu = setup_device() set_seed_everywhere(args.seed, n_gpu) schemas_raw, schemas_dict = spider_utils.load_schema(args.data_dir) grammar = semQL.Grammar() model = IRNet(args, device, grammar) model.to(device) print("loading start") print(args.model_to_load) print(args.database) # load the pre-trained parameters model.load_state_dict(torch.load(args. model_to_load, map_location=torch.device('cpu'))) model.eval() print("Load pre-trained model from '{}'".format(args.model_to_load)) nlp = English() tokenizer = nlp.Defaults.create_tokenizer(nlp) with open(os.path.join(args.conceptNet, 'english_RelatedTo.pkl'), 'rb') as f: related_to_concept = pickle.load(f)