Ejemplo n.º 1
0
    def __init__(self, n_labels, settings, unfactorized, lonely_only=True):
        super().__init__()
        self.unfactorized = unfactorized

        if lonely_only:
            fnn_input = settings.hidden_lstm * 2
        else:
            fnn_input = settings.hidden_lstm * 2 * 2

        self.label_head_fnn = nn.Linear(fnn_input, settings.dim_mlp)
        self.label_dep_fnn = nn.Linear(fnn_input, settings.dim_mlp)

        self.label_attention = Attention.label_factory(settings.dim_mlp,
                                                       n_labels,
                                                       settings.attention)

        self.dim_lstm = settings.hidden_lstm

        self.dropout_label = settings.dropout_label
        self.dropout_main_ff = settings.dropout_main_ff
        self.locked_dropout = LockedDropout()

        if not self.unfactorized:
            self.edge_head_fnn = nn.Linear(fnn_input, settings.dim_mlp)
            self.edge_dep_fnn = nn.Linear(fnn_input, settings.dim_mlp)

            self.edge_attention = Attention.edge_factory(
                settings.dim_mlp, settings.attention)

            self.dropout_edge = settings.dropout_edge
Ejemplo n.º 2
0
    def __init__(self, settings):
        super().__init__()

        fnn_input = settings.hidden_lstm * 2
        self.lstm_to_fnn = nn.Linear(fnn_input, settings.hidden_lstm)

        main_lstm_input = settings.hidden_lstm

        self.lstm = EnhancedLSTM(
            settings.lstm_implementation,
            main_lstm_input,
            settings.hidden_lstm,
            num_layers=settings.layers_lstm,
            ff_dropout=settings.dropout_main_ff,
            recurrent_dropout=settings.dropout_main_recurrent,
            bidirectional=True)

        self.dim_lstm = settings.hidden_lstm
        self.dim_embedding = settings.dim_embedding

        self.dropout_embedding = settings.dropout_embedding
        self.dropout_label = settings.dropout_label
        self.dropout_main_recurrent = settings.dropout_main_recurrent
        self.dropout_main_ff = settings.dropout_main_ff
        self.locked_dropout = LockedDropout()
    def __init__(self,
                 input_size: int,
                 hidden_size: int,
                 num_layers: int,
                 ff_dropout: float = 0.0,
                 recurrent_dropout: float = 0.0,
                 bidirectional=True) -> None:
        super().__init__()

        self.locked_dropout = LockedDropout()
        self.lstms = [
            torch.nn.LSTM(input_size if l == 0 else hidden_size *
                          (1 + int(bidirectional)),
                          hidden_size,
                          num_layers=1,
                          dropout=0,
                          bidirectional=bidirectional,
                          batch_first=True) for l in range(num_layers)
        ]
        if recurrent_dropout:
            self.lstms = [
                WeightDrop(lstm, ['weight_hh_l0'], dropout=recurrent_dropout)
                for lstm in self.lstms
            ]

        self.lstms = torch.nn.ModuleList(self.lstms)
        self.ff_dropout = ff_dropout
        self.num_layers = num_layers
Ejemplo n.º 4
0
    def __init__(self, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5,
                 dropouth=0.5, dropouti=0.5, dropoute=0.1, wdrop=0,
                 tie_weights=False, emmbed_func=nn.functional.embedding):
        super(RNNModel, self).__init__()
        self.lockdrop = LockedDropout()
        self.idrop = nn.Dropout(dropouti)
        self.hdrop = nn.Dropout(dropouth)
        self.drop = nn.Dropout(dropout)
        self.encoder = nn.Embedding(ntoken, ninp)
        assert rnn_type in ['LSTM', 'QRNN', 'GRU'], 'RNN type is not supported'
        if rnn_type == 'LSTM':
            self.rnns = [torch.nn.LSTM(ninp if l == 0 else nhid, nhid if l != nlayers - 1 else (
                ninp if tie_weights else nhid), 1, dropout=0) for l in range(nlayers)]
            if wdrop:
                self.rnns = [WeightDrop(
                    rnn, ['weight_hh_l0'], dropout=wdrop) for rnn in self.rnns]
        if rnn_type == 'GRU':
            self.rnns = [torch.nn.GRU(ninp if l == 0 else nhid, nhid if l !=
                                      nlayers - 1 else ninp, 1, dropout=0) for l in range(nlayers)]
            if wdrop:
                self.rnns = [WeightDrop(
                    rnn, ['weight_hh_l0'], dropout=wdrop) for rnn in self.rnns]
        elif rnn_type == 'QRNN':
            from torchqrnn import QRNNLayer
            self.rnns = [QRNNLayer(input_size=ninp if l == 0 else nhid, hidden_size=nhid if l != nlayers - 1 else (
                ninp if tie_weights else nhid), save_prev_x=True, zoneout=0, window=2 if l == 0 else 1, output_gate=True) for l in range(nlayers)]
            for rnn in self.rnns:
                rnn.linear = WeightDrop(rnn.linear, ['weight'], dropout=wdrop)
        print(self.rnns)
        self.rnns = torch.nn.ModuleList(self.rnns)
        self.decoder = nn.Linear(nhid, ntoken)

        # Optionally tie weights as in:
        # "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
        # https://arxiv.org/abs/1608.05859
        # and
        # "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
        # https://arxiv.org/abs/1611.01462
        if tie_weights:
            # if nhid != ninp:
            #    raise ValueError('When using the tied flag, nhid must be equal to emsize')
            self.decoder.weight = self.encoder.weight

        self.init_weights()

        self.rnn_type = rnn_type
        self.ninp = ninp
        self.nhid = nhid
        self.nlayers = nlayers
        self.dropout = dropout
        self.dropouti = dropouti
        self.dropouth = dropouth
        self.dropoute = dropoute
        self.tie_weights = tie_weights

        # Custum embedding with matrix multiplication
        self.embed_func = emmbed_func
Ejemplo n.º 5
0
    def __init__(self, vocabs, external, settings):
        super().__init__()
        self.use_external = not settings.disable_external
        self.use_lemma = not settings.disable_lemma
        self.use_pos = not settings.disable_pos
        self.use_form = not settings.disable_form
        self.use_char = not settings.disable_char
        self.unfactorized = settings.unfactorized
        self.emb_dropout_type = settings.emb_dropout_type

        self.use_elmo = settings.use_elmo

        self.init_embeddings(vocabs, external, settings)

        if self.use_char:
            self.char_model = AbstractCharModel.char_model_factory(
                model_type=settings.char_implementation,
                lstm_type=settings.lstm_implementation,
                char_vocab=vocabs.chars,
                char_emb_size=settings.dim_char_embedding,
                word_emb_size=settings.dim_embedding,
                hidden_size=settings.hidden_char_lstm,
                ff_dropout=settings.dropout_char_ff,
                recurrent_dropout=settings.dropout_recurrent_char,
                dropout_char_linear=settings.dropout_char_linear,
                emb_dropout_type=settings.emb_dropout_type,
            )
        main_lstm_input = (
            settings.dim_embedding *
            (self.use_form + self.use_pos + self.use_lemma + self.use_char) +
            (external.dim * self.use_external + self.use_elmo * 100))
        self.main_lstm_input = main_lstm_input
        self.lstm = EnhancedLSTM(
            settings.lstm_implementation,
            main_lstm_input,
            settings.hidden_lstm,
            num_layers=settings.layers_lstm,
            ff_dropout=settings.dropout_main_ff,
            recurrent_dropout=settings.dropout_main_recurrent,
            bidirectional=True)

        self.dim_lstm = settings.hidden_lstm
        self.dim_embedding = settings.dim_embedding

        self.dropout_embedding = settings.dropout_embedding
        self.dropout_label = settings.dropout_label
        self.dropout_main_recurrent = settings.dropout_main_recurrent
        self.dropout_main_ff = settings.dropout_main_ff
        self.locked_dropout = LockedDropout()
Ejemplo n.º 6
0
    def __init__(self, lstm_type, char_vocab, char_emb_size, word_emb_size,
                 hidden_size, ff_dropout, recurrent_dropout,
                 dropout_char_linear, emb_dropout_type):
        super().__init__()
        self.char_lstm = EnhancedLSTM(lstm_type,
                                      char_emb_size,
                                      hidden_size,
                                      num_layers=1,
                                      ff_dropout=ff_dropout,
                                      recurrent_dropout=recurrent_dropout,
                                      bidirectional=False)
        self.char_embedding = nn.Embedding(len(char_vocab), char_emb_size)
        self.char_transform = torch.nn.Linear(hidden_size, word_emb_size)
        self.dropout_char_linear = dropout_char_linear
        self.locked_dropout = LockedDropout()

        if emb_dropout_type == "replace":
            self.drop_token = create_parameter(1, word_emb_size)
        elif emb_dropout_type == "zero":
            self.drop_token = torch.zeros(1, word_emb_size)
        else:
            raise "Unsupported embedding dropout type"