Beispiel #1
0
    def __init__(self,
                 config,
                 word_emb_layer,
                 char_emb_layer,
                 n_class,
                 use_cuda=False):
        super(Model, self).__init__()
        self.use_cuda = use_cuda
        self.config = config

        if config['token_embedder']['name'].lower() == 'cnn':
            self.token_embedder = ConvTokenEmbedder(config, word_emb_layer,
                                                    char_emb_layer, use_cuda)
        elif config['token_embedder']['name'].lower() == 'lstm':
            self.token_embedder = LstmTokenEmbedder(config, word_emb_layer,
                                                    char_emb_layer, use_cuda)

        if config['encoder']['name'].lower() == 'elmo':
            self.encoder = ElmobiLm(config, use_cuda)
        elif config['encoder']['name'].lower() == 'lstm':
            self.encoder = LstmbiLm(config, use_cuda)

        self.output_dim = config['encoder']['projection_dim']
        if config['classifier']['name'].lower() == 'softmax':
            self.classify_layer = SoftmaxLayer(self.output_dim, n_class)
        elif config['classifier']['name'].lower() == 'cnn_softmax':
            self.classify_layer = CNNSoftmaxLayer(
                self.token_embedder, self.output_dim, n_class,
                config['classifier']['n_samples'],
                config['classifier']['corr_dim'], use_cuda)
        elif config['classifier']['name'].lower() == 'sampled_softmax':
            self.classify_layer = SampledSoftmaxLayer(
                self.output_dim, n_class, config['classifier']['n_samples'],
                use_cuda)
Beispiel #2
0
  def __init__(self, config, word_emb_layer, char_emb_layer, use_cuda=False):
    super(Model, self).__init__()
    self.use_cuda = use_cuda
    self.config = config

    if config['token_embedder']['name'].lower() == 'cnn':
      self.token_embedder = ConvTokenEmbedder(config, word_emb_layer, char_emb_layer, use_cuda)
    elif config['token_embedder']['name'].lower() == 'lstm':
      self.token_embedder = LstmTokenEmbedder(config, word_emb_layer, char_emb_layer, use_cuda)

    if config['encoder']['name'].lower() == 'elmo':
      self.encoder = ElmobiLm(config, use_cuda)
    elif config['encoder']['name'].lower() == 'lstm':
      self.encoder = LstmbiLm(config, use_cuda)

    self.output_dim = config['encoder']['projection_dim']