Example #1
0
class Model(nn.Module):
  def __init__(self, config, word_emb_layer, char_emb_layer, use_cuda=False):
    super(Model, self).__init__()
    self.use_cuda = use_cuda
    self.config = config

    if config['token_embedder']['name'].lower() == 'cnn':
      self.token_embedder = ConvTokenEmbedder(config, word_emb_layer, char_emb_layer, use_cuda)
    elif config['token_embedder']['name'].lower() == 'lstm':
      self.token_embedder = LstmTokenEmbedder(config, word_emb_layer, char_emb_layer, use_cuda)

    if config['encoder']['name'].lower() == 'elmo':
      self.encoder = ElmobiLm(config, use_cuda)
    elif config['encoder']['name'].lower() == 'lstm':
      self.encoder = LstmbiLm(config, use_cuda)

    self.output_dim = config['encoder']['projection_dim']

  def forward(self, word_inp, chars_package, mask_package):
    token_embedding = self.token_embedder(word_inp, chars_package, (mask_package[0].size(0), mask_package[0].size(1)))
    if self.config['encoder']['name'] == 'elmo':
      mask = Variable(mask_package[0]).cuda() if self.use_cuda else Variable(mask_package[0])
      encoder_output = self.encoder(token_embedding, mask)
      sz = encoder_output.size()
      token_embedding = torch.cat([token_embedding, token_embedding], dim=2).view(1, sz[1], sz[2], sz[3])
      encoder_output = torch.cat([token_embedding, encoder_output], dim=0)
    elif self.config['encoder']['name'] == 'lstm':
      encoder_output = self.encoder(token_embedding)
    return encoder_output

  def load_model(self, path):
    self.token_embedder.load_state_dict(torch.load(os.path.join(path, 'token_embedder.pkl'),
                                                   map_location=lambda storage, loc: storage))
    self.encoder.load_state_dict(torch.load(os.path.join(path, 'encoder.pkl'),
                                            map_location=lambda storage, loc: storage))
Example #2
0
    def __init__(self,
                 config,
                 word_emb_layer,
                 char_emb_layer,
                 n_class,
                 use_cuda=False):
        super(Model, self).__init__()
        self.use_cuda = use_cuda
        self.config = config

        if config['token_embedder']['name'].lower() == 'cnn':
            self.token_embedder = ConvTokenEmbedder(config, word_emb_layer,
                                                    char_emb_layer, use_cuda)
        elif config['token_embedder']['name'].lower() == 'lstm':
            self.token_embedder = LstmTokenEmbedder(config, word_emb_layer,
                                                    char_emb_layer, use_cuda)

        if config['encoder']['name'].lower() == 'elmo':
            self.encoder = ElmobiLm(config, use_cuda)
        elif config['encoder']['name'].lower() == 'lstm':
            self.encoder = LstmbiLm(config, use_cuda)

        self.output_dim = config['encoder']['projection_dim']
        if config['classifier']['name'].lower() == 'softmax':
            self.classify_layer = SoftmaxLayer(self.output_dim, n_class)
        elif config['classifier']['name'].lower() == 'cnn_softmax':
            self.classify_layer = CNNSoftmaxLayer(
                self.token_embedder, self.output_dim, n_class,
                config['classifier']['n_samples'],
                config['classifier']['corr_dim'], use_cuda)
        elif config['classifier']['name'].lower() == 'sampled_softmax':
            self.classify_layer = SampledSoftmaxLayer(
                self.output_dim, n_class, config['classifier']['n_samples'],
                use_cuda)
Example #3
0
  def __init__(self, config, word_emb_layer, char_emb_layer, use_cuda=False):
    super(Model, self).__init__()
    self.use_cuda = use_cuda
    self.config = config

    if config['token_embedder']['name'].lower() == 'cnn':
      self.token_embedder = ConvTokenEmbedder(config, word_emb_layer, char_emb_layer, use_cuda)
    elif config['token_embedder']['name'].lower() == 'lstm':
      self.token_embedder = LstmTokenEmbedder(config, word_emb_layer, char_emb_layer, use_cuda)

    if config['encoder']['name'].lower() == 'elmo':
      self.encoder = ElmobiLm(config, use_cuda)
    elif config['encoder']['name'].lower() == 'lstm':
      self.encoder = LstmbiLm(config, use_cuda)

    self.output_dim = config['encoder']['projection_dim']
Example #4
0
class Model(nn.Module):
  def __init__(self, config, word_emb_layer, char_emb_layer, n_class, use_cuda=False):
    super(Model, self).__init__() 
    self.use_cuda = use_cuda
    self.config = config

    if config['token_embedder']['name'].lower() == 'cnn':
      self.token_embedder = ConvTokenEmbedder(config, word_emb_layer, char_emb_layer, use_cuda)
    elif config['token_embedder']['name'].lower() == 'lstm':
      self.token_embedder = LstmTokenEmbedder(config, word_emb_layer, char_emb_layer, use_cuda)

    if config['encoder']['name'].lower() == 'elmo':
      self.encoder = ElmobiLm(config, use_cuda)
    elif config['encoder']['name'].lower() == 'lstm':
      self.encoder = LstmbiLm(config, use_cuda)

    self.output_dim = config['encoder']['projection_dim']
    if config['classifier']['name'].lower() == 'softmax':
      self.classify_layer = SoftmaxLayer(self.output_dim, n_class)
    elif config['classifier']['name'].lower() == 'cnn_softmax':
      self.classify_layer = CNNSoftmaxLayer(self.token_embedder, self.output_dim, n_class,
                                            config['classifier']['n_samples'], config['classifier']['corr_dim'],
                                            use_cuda)
    elif config['classifier']['name'].lower() == 'sampled_softmax':
      self.classify_layer = SampledSoftmaxLayer(self.output_dim, n_class, config['classifier']['n_samples'], use_cuda)

  def forward(self, word_inp, chars_inp, mask_package):
    """

    :param word_inp:
    :param chars_inp:
    :param mask_package: Tuple[]
    :return:
    """
    classifier_name = self.config['classifier']['name'].lower()

    if self.training and classifier_name == 'cnn_softmax' or classifier_name == 'sampled_softmax':
      self.classify_layer.update_negative_samples(word_inp, chars_inp, mask_package[0])
      self.classify_layer.update_embedding_matrix()

    token_embedding = self.token_embedder(word_inp, chars_inp, (mask_package[0].size(0), mask_package[0].size(1)))
    token_embedding = F.dropout(token_embedding, self.config['dropout'], self.training)

    encoder_name = self.config['encoder']['name'].lower()
    if encoder_name == 'elmo':
      mask = Variable(mask_package[0].cuda()).cuda() if self.use_cuda else Variable(mask_package[0])
      encoder_output = self.encoder(token_embedding, mask)
      encoder_output = encoder_output[1]
      # [batch_size, len, hidden_size]
    elif encoder_name == 'lstm':
      encoder_output = self.encoder(token_embedding)
    else:
      raise ValueError('')

    encoder_output = F.dropout(encoder_output, self.config['dropout'], self.training)
    forward, backward = encoder_output.split(self.output_dim, 2)

    word_inp = Variable(word_inp)
    if self.use_cuda:
      word_inp = word_inp.cuda()

    mask1 = Variable(mask_package[1].cuda()).cuda() if self.use_cuda else Variable(mask_package[1])
    mask2 = Variable(mask_package[2].cuda()).cuda() if self.use_cuda else Variable(mask_package[2])

    forward_x = forward.contiguous().view(-1, self.output_dim).index_select(0, mask1)
    forward_y = word_inp.contiguous().view(-1).index_select(0, mask2)

    backward_x = backward.contiguous().view(-1, self.output_dim).index_select(0, mask2)
    backward_y = word_inp.contiguous().view(-1).index_select(0, mask1)

    return self.classify_layer(forward_x, forward_y), self.classify_layer(backward_x, backward_y)

  def save_model(self, path, save_classify_layer):
    torch.save(self.token_embedder.state_dict(), os.path.join(path, 'token_embedder.pkl'))    
    torch.save(self.encoder.state_dict(), os.path.join(path, 'encoder.pkl'))
    if save_classify_layer:
      torch.save(self.classify_layer.state_dict(), os.path.join(path, 'classifier.pkl'))

  def load_model(self, path):
    self.token_embedder.load_state_dict(torch.load(os.path.join(path, 'token_embedder.pkl')))
    self.encoder.load_state_dict(torch.load(os.path.join(path, 'encoder.pkl')))
    self.classify_layer.load_state_dict(torch.load(os.path.join(path, 'classifier.pkl')))