def main(): opts = get_test_args() print("load data ...") # DHA config coll_name = 'default' dic_path = '/hanmail/projects/dha/dha_resources-2.9.41-default/' dha = pydha.Index() dha.init(dic_path, coll_name) converter = DataConverter(dha, 'data') print("load model ...") model = ELMo(opts, [converter.word_vocab_size, converter.char_vocab_size]) model.load_state_dict(torch.load('model.pt', map_location='cpu')) loss = torch.nn.CrossEntropyLoss() model.eval() print("Evaluating ...") with torch.no_grad(): while True: sentence = input() word_idx, char_idx, parsed_word, parsed_pos, cpos, clen = converter.get_id_sequence( sentence) pred = model(word_idx, char_idx) print(converter.id2word[torch.argmax(pred[-1])])
('EM', results['EM'])] if args.use_squad_v2: results_list.append(('AvNA', results['AvNA'])) results = OrderedDict(results_list) # Log to console results_str = ', '.join(f'{k}: {v:05.2f}' for k, v in results.items()) log.info(f'{args.split.title()} {results_str}') # Log to TensorBoard tbx = SummaryWriter(args.save_dir) util.visualize(tbx, pred_dict=pred_dict, eval_path=eval_file, step=0, split=args.split, num_visuals=args.num_visuals) # Write submission file sub_path = join(args.save_dir, args.split + '_' + args.sub_file) log.info(f'Writing submission file to {sub_path}...') with open(sub_path, 'w', newline='', encoding='utf-8') as csv_fh: csv_writer = csv.writer(csv_fh, delimiter=',') csv_writer.writerow(['Id', 'Predicted']) for uuid in sorted(sub_dict): csv_writer.writerow([uuid, sub_dict[uuid]]) if __name__ == '__main__': main(get_test_args())
log.info('Args: {}'.format(dumps(vars(args), indent=4, sort_keys=True))) device, gpu_ids = util.get_available_devices() args.batch_size *= max(1, len(gpu_ids)) # Get embeddings log.info('Loading embeddings...') word_vectors = util.torch_from_json(args.word_emb_file) # Get model log.info('Building model...') model = BiDAF(word_vectors=word_vectors, hidden_size=args.hidden_size) model = nn.DataParallel(model, gpu_ids) log.info('Loading checkpoint from {}...'.format(args.load_path)) model = util.load_model(model, args.load_path, gpu_ids, return_step=False) model = model.to(device) model.eval() y_pred = model(cw_idxs, qn_idxs) return y_pred if __name__ == '__main__': context = input("Enter the context: ") question = input("Enter a question: ") data = [context, question] word2idx_dict = json_load(".data/word2idx.json") char2idx_dict = json_load(".data/char2idx.json") is_test = True context_idxs, context_char_idxs, ques_idxs, ques_char_idxs = convert_to_features( get_test_args(), data, word2idx_dict, char2idx_dict, is_test) predict(get_test_args(), context_idxs, ques_idxs)
import pandas as pd from tqdm import tqdm import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as sched import torch.utils.data as data from torch.utils.data import DataLoader, TensorDataset from torch import Tensor if __name__ == '__main__': args = get_test_args() # Set up logging args.save_dir = util.get_save_dir(args.save_dir, args.name, subdir='test') log = util.get_logger(args.save_dir, args.name) device, gpu_ids = util.get_available_devices() args.batch_size *= max(1, len(gpu_ids)) log.info(f'Args: {dumps(vars(args), indent=4, sort_keys=True)}') # Load the checkpoint if given as parameter if args.load_path: log.info(f'Loading checkpoint from {args.load_path}...') model = util.load_model(args.load_path) else:
csv_writer.writerow([uuid, sub_dict[uuid]]) return results['F1'] if __name__ == '__main__': max_steps = 2 substitute_F1_values = [] add_F1_values = [] delete_F1_values = [] for nb in range(0, max_steps): proposed_actions = ('delete', nb) delete_F1_values.append(main(get_test_args(), actions=proposed_actions)) # # # # #proposed_actions = ('delete', nb) # #delete_F1_values.append(main(get_test_args(), actions = proposed_actions)) # # print("Computing add2") # for nb in range(0, max_steps): # proposed_actions = ('add2', nb) # add_F1_values.append(main(get_test_args(), actions=proposed_actions)) # print(">>>", nb, " : ", add_F1_values) # print("add OK") # print("result for ",proposed_actions[0], ": ", substitute_F1_values)