Example #1
0
        'forget_bias': 1.0,
        'batch_first': True,
        'bidirectional': True
    }))
crf = CRF(Config({'label_vocab': label_vocab}))
output_linear = Linear(
    Config({
        'in_features': lstm.output_size,
        'out_features': len(label_vocab)
    }))
word_embed.load_state_dict(state['model']['word_embed'])
char_cnn.load_state_dict(state['model']['char_cnn'])
char_highway.load_state_dict(state['model']['char_highway'])
lstm.load_state_dict(state['model']['lstm'])
crf.load_state_dict(state['model']['crf'])
output_linear.load_state_dict(state['model']['output_linear'])
lstm_crf = LstmCrf(token_vocab=token_vocab,
                   label_vocab=label_vocab,
                   char_vocab=char_vocab,
                   word_embedding=word_embed,
                   char_embedding=char_cnn,
                   crf=crf,
                   lstm=lstm,
                   univ_fc_layer=output_linear,
                   embed_dropout_prob=train_args['embed_dropout'],
                   lstm_dropout_prob=train_args['lstm_dropout'],
                   linear_dropout_prob=train_args['linear_dropout'],
                   char_highway=char_highway)
lstm_crf.load_state_dict(state['model']['lstm_crf'])

if use_gpu:
Example #2
0
                   char_vocab,
                   word_embedding=word_embed,
                   char_embedding=char_embed,
                   crf=crf,
                   lstm=lstm,
                   univ_fc_layer=linear,
                   embed_dropout_prob=train_args['feat_dropout'],
                   lstm_dropout_prob=train_args['lstm_dropout'],
                   char_highway=char_hw if train_args['use_highway'] else None)

word_embed.load_state_dict(state['model']['word_embed'])
char_embed.load_state_dict(state['model']['char_embed'])
char_hw.load_state_dict(state['model']['char_hw'])
lstm.load_state_dict(state['model']['lstm'])
crf.load_state_dict(state['model']['crf'])
linear.load_state_dict(state['model']['linear'])
lstm_crf.load_state_dict(state['model']['lstm_crf'])

if use_gpu:
    lstm_crf.cuda()

# Load dataset
logger.info('Loading data')
parser = ConllParser()
test_set = SeqLabelDataset(data_file, parser=parser)
test_set.numberize(token_vocab, label_vocab, char_vocab)
idx_token = {v: k for k, v in token_vocab.items()}
idx_label = {v: k for k, v in label_vocab.items()}
processor = SeqLabelProcessor(gpu=use_gpu)

try:
Example #3
0
                   word_embedding=word_embed,
                   char_embedding=char_embed,
                   crf=crf,
                   lstm=lstm,
                   univ_fc_layer=univ_linear,
                   spec_fc_layer=spec_linear,
                   embed_dropout_prob=train_args['feat_dropout'],
                   lstm_dropout_prob=train_args['lstm_dropout'],
                   char_highway=char_hw if train_args['use_highway'] else None)

word_embed.load_state_dict(state['model']['word_embed'])
char_embed.load_state_dict(state['model']['char_embed'])
char_hw.load_state_dict(state['model']['char_hw'])
lstm.load_state_dict(state['model']['lstm'])
crf.load_state_dict(state['model']['crf'])
univ_linear.load_state_dict(state['model']['univ_linear'])
spec_linear.load_state_dict(state['model']['spec_linear'])
lstm_crf.load_state_dict(state['model']['lstm_crf'])

if use_gpu:
    lstm_crf.cuda()

# Load dataset
logger.info('Loading data')
parser = ConllParser()
test_set = SeqLabelDataset(data_file, parser=parser)
test_set.numberize(token_vocab, label_vocab, char_vocab)
idx_token = {v: k for k, v in token_vocab.items()}
idx_label = {v: k for k, v in label_vocab.items()}
processor = SeqLabelProcessor(gpu=use_gpu)