Esempio n. 1
0
 def init(self):
     # embeddings
     initialization.init_embeddings(self.embs)
     # rnn
     initialization.init_rnn(self.rnn, scheme=self.init_rnn)
     # linear
     initialization.init_linear(self.proj)
Esempio n. 2
0
    def __init__(self,
                 num_embeddings,
                 embedding_dim,
                 padding_idx=None,
                 **kwargs):
        self.num_embeddings = num_embeddings
        super().__init__(embedding_dim, hidden_size=embedding_dim, **kwargs)
        self.embedding_dim = embedding_dim * 2  # bidirectional

        self.emb = nn.Embedding(num_embeddings,
                                embedding_dim,
                                padding_idx=padding_idx)
        initialization.init_embeddings(self.emb)
Esempio n. 3
0
    def __init__(self, num_embeddings, embedding_dim, padding_idx=None,
                 custom_lstm=False, cell='LSTM', init_rnn='default',
                 num_layers=1, dropout=0.0):
        self.num_embeddings = num_embeddings
        self.num_layers = num_layers
        self.embedding_dim = embedding_dim * 2  # bidirectional
        super().__init__()

        self.emb = nn.Embedding(num_embeddings, embedding_dim, padding_idx=padding_idx)
        initialization.init_embeddings(self.emb)

        if custom_lstm:
            self.rnn = CustomBiLSTM(
                embedding_dim, embedding_dim, num_layers=num_layers, dropout=dropout)
        else:
            self.rnn = getattr(nn, cell)(
                embedding_dim, embedding_dim, bidirectional=True,
                num_layers=num_layers, dropout=dropout if num_layers > 1 else 0)
            initialization.init_rnn(self.rnn, scheme=init_rnn)
Esempio n. 4
0
    def __init__(self,
                 label_encoder,
                 wemb_dim,
                 cemb_dim,
                 hidden_size,
                 num_layers,
                 dropout=0.0,
                 word_dropout=0.0,
                 merge_type='concat',
                 cemb_type='RNN',
                 cemb_layers=1,
                 cell='LSTM',
                 custom_cemb_cell=False,
                 init_rnn='xavier_uniform'):

        self.label_encoder = label_encoder
        self.wemb_dim = wemb_dim
        self.cemb_dim = cemb_dim
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        # kwargs
        self.cell = cell
        self.dropout = dropout
        self.word_dropout = word_dropout
        self.merge_type = merge_type
        self.cemb_type = cemb_type
        self.cemb_layers = cemb_layers
        self.custom_cemb_cell = custom_cemb_cell
        super().__init__()

        self.wemb = None
        if self.wemb_dim > 0:
            self.wemb = nn.Embedding(len(label_encoder.word),
                                     wemb_dim,
                                     padding_idx=label_encoder.word.get_pad())
            # init embeddings
            initialization.init_embeddings(self.wemb)

        self.cemb = None
        if cemb_type.upper() == 'RNN':
            self.cemb = RNNEmbedding(len(label_encoder.char),
                                     cemb_dim,
                                     padding_idx=label_encoder.char.get_pad(),
                                     custom_lstm=custom_cemb_cell,
                                     dropout=dropout,
                                     num_layers=cemb_layers,
                                     cell=cell,
                                     init_rnn=init_rnn)
        elif cemb_type.upper() == 'CNN':
            self.cemb = CNNEmbedding(len(label_encoder.char),
                                     cemb_dim,
                                     padding_idx=label_encoder.char.get_pad())

        self.merger = None
        if self.cemb is not None and self.wemb is not None:
            if merge_type.lower() == 'mixer':
                if self.cemb.embedding_dim != self.wemb.embedding_dim:
                    raise ValueError(
                        "EmbeddingMixer needs equal embedding dims")
                self.merger = EmbeddingMixer(wemb_dim)
                in_dim = wemb_dim
            elif merge_type == 'concat':
                self.merger = EmbeddingConcat()
                in_dim = wemb_dim + self.cemb.embedding_dim
            else:
                raise ValueError("Unknown merge method: {}".format(merge_type))
        elif self.cemb is None:
            in_dim = wemb_dim
        else:
            in_dim = self.cemb.embedding_dim

        # Encoder
        self.encoder = RNNEncoder(in_dim,
                                  hidden_size,
                                  num_layers=num_layers,
                                  cell=cell,
                                  dropout=dropout,
                                  init_rnn=init_rnn)

        # decoders
        self.lm_fwd_decoder = LinearDecoder(label_encoder.word, hidden_size)
        self.lm_bwd_decoder = LinearDecoder(label_encoder.word, hidden_size)
Esempio n. 5
0
 def init(self):
     initialization.init_embeddings(self.emb)
     for conv in self.convs:
         initialization.init_conv(conv)
Esempio n. 6
0
    def __init__(self,
                 label_encoder,
                 tasks,
                 wemb_dim,
                 cemb_dim,
                 hidden_size,
                 num_layers,
                 dropout=0.0,
                 word_dropout=0.0,
                 merge_type='concat',
                 cemb_type='RNN',
                 cemb_layers=1,
                 cell='LSTM',
                 custom_cemb_cell=False,
                 scorer='general',
                 include_lm=True,
                 lm_shared_softmax=True,
                 init_rnn='xavier_uniform',
                 linear_layers=1,
                 **kwargs):
        # args
        self.wemb_dim = wemb_dim
        self.cemb_dim = cemb_dim
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        # kwargs
        self.cell = cell
        self.dropout = dropout
        self.word_dropout = word_dropout
        self.merge_type = merge_type
        self.cemb_type = cemb_type
        self.cemb_layers = cemb_layers
        self.scorer = scorer
        self.include_lm = include_lm
        self.lm_shared_softmax = lm_shared_softmax
        self.custom_cemb_cell = custom_cemb_cell
        self.linear_layers = linear_layers
        # only during training
        self.init_rnn = init_rnn
        super().__init__(label_encoder, tasks)

        # Embeddings
        self.wemb = None
        if self.wemb_dim > 0:
            self.wemb = nn.Embedding(len(label_encoder.word),
                                     wemb_dim,
                                     padding_idx=label_encoder.word.get_pad())
            # init embeddings
            initialization.init_embeddings(self.wemb)

        self.cemb = None
        if cemb_type.upper() == 'RNN':
            self.cemb = RNNEmbedding(len(label_encoder.char),
                                     cemb_dim,
                                     padding_idx=label_encoder.char.get_pad(),
                                     custom_lstm=custom_cemb_cell,
                                     dropout=dropout,
                                     num_layers=cemb_layers,
                                     cell=cell,
                                     init_rnn=init_rnn)
        elif cemb_type.upper() == 'CNN':
            self.cemb = CNNEmbedding(len(label_encoder.char),
                                     cemb_dim,
                                     padding_idx=label_encoder.char.get_pad())

        self.merger = None
        if self.cemb is not None and self.wemb is not None:
            if merge_type.lower() == 'mixer':
                if self.cemb.embedding_dim != self.wemb.embedding_dim:
                    raise ValueError(
                        "EmbeddingMixer needs equal embedding dims")
                self.merger = EmbeddingMixer(wemb_dim)
                in_dim = wemb_dim
            elif merge_type == 'concat':
                self.merger = EmbeddingConcat()
                in_dim = wemb_dim + self.cemb.embedding_dim
            else:
                raise ValueError("Unknown merge method: {}".format(merge_type))
        elif self.cemb is None:
            in_dim = wemb_dim
        else:
            in_dim = self.cemb.embedding_dim

        # Encoder
        self.encoder = None
        needs_encoder = False
        for task in self.tasks.values():
            if task['level'] == 'token':
                needs_encoder = True
                break
            elif task.get('context', '').lower() in ('sentence', 'both'):
                needs_encoder = True
                break
        if not needs_encoder:
            print("Model doesn't need sentence encoder, leaving uninitialized")
        else:
            self.encoder = RNNEncoder(in_dim,
                                      hidden_size,
                                      num_layers=num_layers,
                                      cell=cell,
                                      dropout=dropout,
                                      init_rnn=init_rnn)

        # Decoders
        decoders = {}
        for tname, task in self.tasks.items():
            if task['level'].lower() == 'char':
                if self.cemb is None:
                    raise ValueError(
                        "Char-level decoder requires char embeddings")

                # TODO: add sentence context to decoder
                if task['decoder'].lower() == 'linear':
                    decoder = LinearDecoder(label_encoder.tasks[tname],
                                            self.cemb.embedding_dim)
                elif task['decoder'].lower() == 'crf':
                    decoder = CRFDecoder(label_encoder.tasks[tname],
                                         self.cemb.embedding_dim)
                elif task['decoder'].lower() == 'attentional':
                    # get context size
                    context_dim = 0
                    if task['context'].lower() == 'sentence':
                        context_dim = hidden_size * 2  # bidirectional encoder
                    elif task['context'].lower() == 'word':
                        context_dim = wemb_dim
                    elif task['context'].lower() == 'both':
                        context_dim = hidden_size * 2 + wemb_dim

                    decoder = AttentionalDecoder(label_encoder.tasks[tname],
                                                 cemb_dim,
                                                 self.cemb.embedding_dim,
                                                 context_dim=context_dim,
                                                 scorer=scorer,
                                                 num_layers=cemb_layers,
                                                 cell=cell,
                                                 dropout=dropout,
                                                 init_rnn=init_rnn)

                else:
                    raise ValueError(
                        "Unknown decoder type {} for char-level task: {}".
                        format(task['decoder'], tname))

            elif task['level'].lower() == 'token':
                # linear
                if task['decoder'].lower() == 'linear':
                    decoder = LinearDecoder(label_encoder.tasks[tname],
                                            hidden_size * 2,
                                            highway_layers=linear_layers - 1)
                # crf
                elif task['decoder'].lower() == 'crf':
                    decoder = CRFDecoder(label_encoder.tasks[tname],
                                         hidden_size * 2,
                                         highway_layers=linear_layers - 1)

            else:
                raise ValueError(
                    "Unknown decoder type {} for token-level task: {}".format(
                        task['decoder'], tname))

            self.add_module('{}_decoder'.format(tname), decoder)
            decoders[tname] = decoder

        self.decoders = decoders

        # - LM
        if self.include_lm:
            self.lm_fwd_decoder = LinearDecoder(label_encoder.word,
                                                hidden_size)
            if lm_shared_softmax:
                self.lm_bwd_decoder = self.lm_fwd_decoder
            else:
                self.lm_bwd_decoder = LinearDecoder(label_encoder.word,
                                                    hidden_size)