pickle.dump(vocab, open(config.save_vocab_path, 'wb')) pickle.dump(char_vocab, open(config.save_char_vocab_path, 'wb')) args, extra_args = argparser.parse_known_args() config = Configurable(args.config_file, extra_args) torch.set_num_threads(args.thread) config.use_cuda = False if gpu and args.use_cuda: config.use_cuda = True print("\nGPU using status: ", config.use_cuda) # print(config.use_cuda) model = ParserModel(vocab, config, vec) classifier_model = ClassifierModel(config) char_emb_model = CharEmbModel(char_vocab, config) if args.use_pretrain: model.load_state_dict(torch.load(config.load_model_path)) classifier_model.load_state_dict( torch.load(config.load_classifier_model_path)) char_emb_model.load_state_dict(torch.load(config.load_char_model_path)) print("###Load pretrain parser ok.###") if config.use_cuda: torch.backends.cudnn.enabled = True model = model.cuda() classifier_model = classifier_model.cuda() char_emb_model = char_emb_model.cuda() print(model)
config = Configurable(args.config_file, extra_args) torch.set_num_threads(args.thread) config.use_cuda = False if gpu and args.use_cuda: config.use_cuda = True print("\nGPU using status: ", config.use_cuda) # print(config.use_cuda) model = ParserModel(vocab, config, vec) if args.use_pretrain: model.load_state_dict(torch.load(config.load_model_path)) print("###Load pretrain parser ok.###") classifier_model = ClassifierModel(config) char_emb_model = CharEmbModel(char_vocab, config) if config.use_cuda: torch.backends.cudnn.enabled = True model = model.cuda() classifier_model = classifier_model.cuda() char_emb_model = char_emb_model.cuda() print(model) print(classifier_model) print(char_emb_model) parser = BiaffineParser(model, vocab.ROOT) classifier = DomainClassifier(classifier_model) charEmbedding = CharEmb(char_emb_model) data = read_corpus(config.train_file, vocab)