Пример #1
0
def load_abs_net(abs_dir):
    abs_meta = json.load(open(join(abs_dir, 'meta.json')))
    assert abs_meta['net'] == 'base_abstractor'
    abs_args = abs_meta['net_args']
    abs_ckpt = load_best_ckpt(abs_dir)
    word2id = pkl.load(open(join(abs_dir, 'vocab.pkl'), 'rb'))
    abstractor = CopySumm(**abs_args)
    abstractor.load_state_dict(abs_ckpt)
    return abstractor, word2id
Пример #2
0
def configure_net(abs_dir):
    abs_meta = json.load(open(join(abs_dir, 'meta.json')))
    assert abs_meta['net'] == 'base_abstractor'
    abs_meta = json.load(open(join(abs_dir, 'meta.json')))
    assert abs_meta['net'] == 'base_abstractor'
    net_args = abs_meta['net_args']

    abs_ckpt = load_best_ckpt(abs_dir)
    net = CopySumm(**net_args)
    net.load_state_dict(abs_ckpt)
    return net, net_args
 def __init__(self, abs_dir, max_len=30, cuda=True):
     abs_meta = json.load(open(join(abs_dir, 'meta.json')))
     assert abs_meta['net'] == 'base_abstractor'
     abs_args = abs_meta['net_args']
     abs_ckpt = load_best_ckpt(abs_dir)
     word2id = pkl.load(open(join(abs_dir, 'vocab.pkl'), 'rb'))
     abstractor = CopySumm(**abs_args)
     abstractor.load_state_dict(abs_ckpt)
     self._device = torch.device('cuda' if cuda else 'cpu')
     self._net = abstractor.to(self._device)
     self._word2id = word2id
     self._id2word = {i: w for w, i in word2id.items()}
     self._max_len = max_len
Пример #4
0
 def __init__(self, abs_dir, max_len=30, cuda=True):
     abs_meta = json.load(open(join(abs_dir, 'meta.json')))
     assert abs_meta['net'] == 'base_abstractor'
     abs_args = abs_meta['net_args']
     abs_ckpt = load_best_ckpt(abs_dir)
     word2id = pkl.load(open(join(abs_dir, 'vocab.pkl'), 'rb'))
     abstractor = CopySumm(**abs_args)
     abstractor.load_state_dict(abs_ckpt)
     self._device = torch.device('cuda' if cuda else 'cpu')
     self._net = abstractor.to(self._device)
     self._word2id = word2id
     self._id2word = {i: w for w, i in word2id.items()}
     self._max_len = max_len
def configure_net(vocab_size, emb_dim, n_hidden, bidirectional, n_layer):
    net_args = {}
    net_args['vocab_size'] = vocab_size
    net_args['emb_dim'] = emb_dim
    net_args['n_hidden'] = n_hidden
    net_args['bidirectional'] = bidirectional
    net_args['n_layer'] = n_layer

    net = CopySumm(**net_args)
    return net, net_args
Пример #6
0
def configure_net(vocab_size,
                  emb_dim,
                  n_hidden,
                  bidirectional,
                  n_layer,
                  load_from=None):
    net_args = {}
    net_args['vocab_size'] = vocab_size
    net_args['emb_dim'] = emb_dim
    net_args['n_hidden'] = n_hidden
    net_args['bidirectional'] = bidirectional
    net_args['n_layer'] = n_layer

    net = CopySumm(**net_args)
    if load_from is not None:
        abs_ckpt = load_best_ckpt(load_from)
        net.load_state_dict(abs_ckpt)

    return net, net_args
Пример #7
0
def configure_net(vocab_size, emb_dim, n_hidden, bidirectional, n_layer):
    net_args = {}
    net_args['vocab_size'] = vocab_size
    net_args['emb_dim'] = emb_dim
    net_args['n_hidden'] = n_hidden
    net_args['bidirectional'] = bidirectional
    net_args['n_layer'] = n_layer

    net_args[
        'dropoute'] = 0.2  # dropout to remove words from embedding layer (0 = no dropout)
    net_args[
        'dropouti'] = 0.2  # dropout for input embedding layers (0 = no dropout)
    net_args[
        'dropout'] = 0.2  # dropout applied to other layers (0 = no dropout)
    net_args[
        'wdrop'] = 0.2  # amount of weight dropout to apply to the RNN hidden to hidden matrix
    net_args['dropouth'] = 0.2  # dropout for rnn layers (0 = no dropout)

    net = CopySumm(**net_args)
    return net, net_args