Esempio n. 1
0
    argparser.add_argument('--gpu',
                           default=-1,
                           type=int,
                           help='Use id of gpu, -1 if cpu.')

    args, extra_args = argparser.parse_known_args()
    config = Configurable(args.config_file, extra_args)
    torch.set_num_threads(args.thread)
    parser_config = ParserConfigurable(args.parser_config_file)

    dep_vocab = pickle.load(open(parser_config.load_vocab_path, 'rb'))
    dep_vec = dep_vocab.create_placeholder_embs(
        parser_config.pretrained_embeddings_file)

    parser_model = ParserModel(dep_vocab, parser_config, dep_vec)
    parser_model.load_state_dict(torch.load(parser_config.load_model_path, \
                                map_location=lambda storage, loc: storage))

    vocab = creatVocab(config.train_file, config.min_occur_count)
    vec = vocab.load_pretrained_embs(config.pretrained_embeddings_file)
    pickle.dump(vocab, open(config.save_vocab_path, 'wb'))

    config.use_cuda = False
    gpu_id = -1
    if gpu and args.gpu >= 0:
        torch.cuda.set_device(args.gpu)
        config.use_cuda = True
        print("GPU ID: ", args.gpu)
        gpu_id = args.gpu

    model = BiLSTMModel(vocab, config, parser_config, vec)
    if config.use_cuda:
Esempio n. 2
0
File: test.py Progetto: xuihau/G2GTr
        parser = pickle.load(f)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    
    print(80 * "=")
    print("INITIALIZING")
    print(80 * "=")
    start = time.time()
    debug = False
    test_data,pad_action = load_and_preprocess_data_test(opt,parser,debug)
    
    test_batched = batch_dev_test(test_data, opt.batchsize, parser.NULL, parser.P_NULL,
                                  parser ,no_sort=False)

    model = ParserModel(parser.embedding_shape, device, parser, pad_action, opt)
    
    model.load_state_dict(checkpoint['model'],strict=False)
    
    model = model.to(device)
    print("took {:.2f} seconds\n".format(time.time() - start))
    
    print(80 * "=")
    print("TESTING")
    print(80 * "=")
    print("Final evaluation on test set", )
    model.eval()

    UAS, LAS = validate(model, parser, test_batched, test_data, device, opt.batchsize,pad_action['P'],opt)
    print("- test UAS: {:.2f}".format(UAS * 100.0))
    print("- test LAS: {:.2f}".format(LAS * 100.0))
    print("Done!")
Esempio n. 3
0
File: run.py Progetto: xuihau/G2GTr
    dev_batched = batch_dev_test(dev_data, opt.batchsize, parser.NULL,
                                 parser.P_NULL, parser)
    test_batched = batch_dev_test(test_data, opt.batchsize, parser.NULL,
                                  parser.P_NULL, parser)

    start = time.time()

    model = ParserModel(embeddings.shape[0], device, parser, pad_action, opt,
                        embeddings.shape[1])
    print("number of pars:{}".format(
        sum(p.numel() for p in model.parameters() if p.requires_grad)))
    if opt.pretrained:
        state_dict = torch.load(opt.mainpath + '/output/' +
                                str(opt.modelpath) + "model.weights" +
                                "pretrained")
        model.load_state_dict(state_dict['model'])
        del state_dict

    if opt.multigpu:
        print('multi')
        print(torch.cuda.device_count())
        if torch.cuda.device_count() > 1:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
            model = nn.DataParallel(model)

    model = model.to(device)

    print("took {:.2f} seconds\n".format(time.time() - start))

    print(80 * "=")
Esempio n. 4
0
                           default=-1,
                           type=int,
                           help='Use id of gpu, -1 if cpu.')

    args, extra_args = argparser.parse_known_args()
    config = Configurable(args.config_file, extra_args)
    parser_config = ParserConfigurable(args.parser_config_file)
    torch.set_num_threads(args.thread)

    dep_vocab = pickle.load(open(parser_config.load_vocab_path, 'rb'))
    parser_model = ParserModel(dep_vocab, parser_config)
    dump_model = torch.load(parser_config.load_model_path,
                            map_location=lambda storage, loc: storage)
    dep_vec = dump_model["extword_embed.weight"].detach().cpu().numpy()
    del dump_model["extword_embed.weight"]
    parser_model.load_state_dict(dump_model)
    parser_extembed = ExtWord(dep_vocab, parser_config, dep_vec)
    torch.save(parser_model.state_dict(), config.save_model_path + ".synbasic")
    torch.save(parser_extembed.state_dict(),
               config.save_model_path + ".synvec")
    pickle.dump(dep_vocab, open(config.save_vocab_path + ".syn", 'wb'))

    vocab = creatVocab(config.train_file, config.min_occur_count)
    vec = vocab.load_initialize_embs(config.pretrained_embeddings_file)
    pickle.dump(vocab, open(config.save_vocab_path, 'wb'))

    config.use_cuda = False
    gpu_id = -1
    if gpu and args.gpu != -1:
        config.use_cuda = True
        torch.cuda.set_device(args.gpu)