Ejemplo n.º 1
0
 def __init__(self,
              args=None,
              vocab=None,
              emb_matrix=None,
              model_file=None,
              use_cuda=False,
              training_mode=False):
     self.use_cuda = use_cuda
     self.training_mode = training_mode
     if model_file is not None:
         # load from file
         self.load(model_file, use_cuda)
     else:
         self.args = args
         self.model = None if args['dict_only'] else Seq2SeqModel(
             args,
             emb_matrix=emb_matrix,
             use_cuda=use_cuda,
             training_mode=training_mode)
         self.vocab = vocab
         self.expansion_dict = dict()
     if not self.args['dict_only']:
         self.crit = SequenceLoss(self.vocab.size)
         self.parameters = [
             p for p in self.model.parameters() if p.requires_grad
         ]
         if use_cuda:
             self.model.cuda()
             self.crit.cuda()
         else:
             self.model.cpu()
             self.crit.cpu()
         self.optimizer = get_optimizer(self.args['optim'], self.parameters,
                                        self.args['lr'])
Ejemplo n.º 2
0
 def __init__(self, args=None, vocab=None, emb_matrix=None, model_file=None, use_cuda=False, training_mode=False):
     self.use_cuda = use_cuda
     self.training_mode = training_mode
     if model_file is not None:
         # load everything from file
         self.load(model_file, use_cuda)
     else:
         # build model from scratch
         self.args = args
         self.model = None if args['dict_only'] else Seq2SeqModel(args, emb_matrix=emb_matrix, use_cuda=use_cuda,
                                                                  training_mode=training_mode)
         self.vocab = vocab
         # dict-based components
         self.word_dict = dict()
         self.composite_dict = dict()
     if not self.args['dict_only']:
         if self.args.get('edit', False):
             self.crit = MixLoss(self.vocab['char'].size, self.args['alpha'])
         else:
             self.crit = SequenceLoss(self.vocab['char'].size)
         self.parameters = [p for p in self.model.parameters() if p.requires_grad]
         if use_cuda:
             self.model.cuda()
             self.crit.cuda()
         else:
             self.model.cpu()
             self.crit.cpu()
         self.optimizer = get_optimizer(self.args['optim'], self.parameters, self.args['lr'])
Ejemplo n.º 3
0
 def load(self, filename, use_cuda=False):
     try:
         checkpoint = torch.load(filename, lambda storage, loc: storage)
     except BaseException:
         raise
     self.args = checkpoint['config']
     self.expansion_dict = checkpoint['dict']
     if not self.args['dict_only']:
         self.model = Seq2SeqModel(self.args, use_cuda=use_cuda)
         self.model.load_state_dict(checkpoint['model'])
     else:
         self.model = None
     self.vocab = Vocab.load_state_dict(checkpoint['vocab'])