Пример #1
0
 def __init__(self,
              hidden_size,
              num_labels,
              crf=False) -> None:
     super().__init__()
     self.classifier = torch.nn.Linear(hidden_size, num_labels)
     self.crf = CRF(num_labels) if crf else None
Пример #2
0
    def __init__(self, hidden_size, num_labels, crf=False) -> None:
        """A linear layer with an optional CRF (:cite:`lafferty2001conditional`) layer on top of it.

        Args:
            hidden_size: Size of hidden states.
            num_labels: Size of tag set.
            crf: ``True`` to enable CRF (:cite:`lafferty2001conditional`).
        """
        super().__init__()
        self.classifier = torch.nn.Linear(hidden_size, num_labels)
        self.crf = CRF(num_labels) if crf else None
Пример #3
0
 def __init__(self,
              encoder: TransformerEncoder,
              num_labels,
              crf=False,
              secondary_encoder=None) -> None:
     """
     A shallow tagging model use transformer as decoder.
     Args:
         encoder: A pretrained transformer.
         num_labels: Size of tagset.
         crf: True to enable CRF.
         crf_constraints: The allowed transitions (from_label_id, to_label_id).
     """
     super().__init__()
     self.encoder = encoder
     self.secondary_encoder = secondary_encoder
     # noinspection PyUnresolvedReferences
     self.classifier = nn.Linear(encoder.transformer.config.hidden_size, num_labels)
     self.crf = CRF(num_labels) if crf else None
Пример #4
0
 def __init__(self,
              n_rels,
              hidden_size,
              n_mlp_rel=300,
              mlp_dropout=0.2,
              crf=False) -> None:
     super().__init__()
     self.mlp_rel_h = MLP(n_in=hidden_size,
                          n_out=n_mlp_rel,
                          dropout=mlp_dropout)
     self.mlp_rel_d = MLP(n_in=hidden_size,
                          n_out=n_mlp_rel,
                          dropout=mlp_dropout)
     self.rel_attn = Biaffine(n_in=n_mlp_rel,
                              n_out=n_rels,
                              bias_x=True,
                              bias_y=True)
     bias = 1 / math.sqrt(self.rel_attn.weight.size(1))
     nn.init.uniform_(self.rel_attn.weight, -bias, bias)
     self.crf = CRF(n_rels) if crf else None
Пример #5
0
    def __init__(self,
                 embed: Union[nn.Embedding, int],
                 rnn_input,
                 rnn_hidden,
                 n_out,
                 drop=0.5,
                 crf=True,
                 crf_constraints=None):
        super(RNNTaggingModel, self).__init__()

        # the embedding layer
        if isinstance(embed, nn.Module):
            self.embed = embed
            n_embed = embed.embedding_dim
        else:
            self.embed = None
            n_embed = embed

        if rnn_input:
            self.embed_to_rnn = nn.Linear(n_embed, rnn_input)
        else:
            self.embed_to_rnn = None
            rnn_input = n_embed

        # the word-lstm layer
        self.word_lstm = nn.LSTM(input_size=rnn_input,
                                 hidden_size=rnn_hidden,
                                 batch_first=True,
                                 bidirectional=True)

        # the output layer
        self.out = nn.Linear(rnn_hidden * 2, n_out)
        # the CRF layer
        self.crf = CRF(n_out, crf_constraints) if crf else None

        self.drop = nn.Dropout(drop)
        # self.drop = SharedDropout(drop)
        # self.drop = LockedDropout(drop)

        self.reset_parameters()
Пример #6
0
 def __init__(self,
              encoder: TransformerEncoder,
              num_labels,
              crf=False,
              secondary_encoder=None,
              extra_embeddings: EmbeddingDim = None) -> None:
     """
     A shallow tagging model use transformer as decoder.
     Args:
         encoder: A pretrained transformer.
         num_labels: Size of tagset.
         crf: True to enable CRF.
         extra_embeddings: Extra embeddings which will be concatenated to the encoder outputs.
     """
     super().__init__()
     self.encoder = encoder
     self.secondary_encoder = secondary_encoder
     self.extra_embeddings = extra_embeddings
     # noinspection PyUnresolvedReferences
     feature_size = encoder.transformer.config.hidden_size
     if extra_embeddings:
         feature_size += extra_embeddings.get_output_dim()
     self.classifier = nn.Linear(feature_size, num_labels)
     self.crf = CRF(num_labels) if crf else None