Пример #1
0
 def __init__(self,
              hidden_size,
              num_intents,
              num_slots,
              dropout=0.0,
              use_transformer_pretrained=True,
              **kwargs):
     super().__init__(**kwargs)
     self.dropout = nn.Dropout(dropout)
     self.slot_mlp = MultiLayerPerceptron(hidden_size,
                                          num_classes=num_slots,
                                          device=self._device,
                                          num_layers=2,
                                          activation='relu',
                                          log_softmax=False)
     self.intent_mlp = MultiLayerPerceptron(
         hidden_size,
         num_classes=num_intents,
         device=self._device,
         num_layers=2,
         activation='relu',
         log_softmax=False,
     )
     if use_transformer_pretrained:
         self.apply(
             lambda module: transformer_weights_init(module, xavier=False))
Пример #2
0
    def __init__(
        self,
        hidden_size,
        punct_num_classes,
        capit_num_classes,
        punct_num_layers=2,
        capit_num_layers=2,
        activation='relu',
        log_softmax=True,
        dropout=0.0,
        use_transformer_pretrained=True,
    ):
        # Pass name up the module class hierarchy.
        super().__init__()
        self.dropout = nn.Dropout(dropout)
        self.punct_mlp = MultiLayerPerceptron(hidden_size, punct_num_classes,
                                              self._device, punct_num_layers,
                                              activation, log_softmax)
        self.capit_mlp = MultiLayerPerceptron(hidden_size, capit_num_classes,
                                              self._device, capit_num_layers,
                                              activation, log_softmax)

        if use_transformer_pretrained:
            self.apply(
                lambda module: transformer_weights_init(module, xavier=False))
Пример #3
0
 def __init__(
     self,
     hidden_size,
     num_classes,
     activation='relu',
     log_softmax=True,
     dropout=0.0,
     use_transformer_pretrained=True,
 ):
     super().__init__()
     if activation not in ACT2FN:
         raise ValueError(f'activation "{activation}" not found')
     self.dense = nn.Linear(hidden_size, hidden_size)
     self.act = ACT2FN[activation]
     self.norm = nn.LayerNorm(hidden_size, eps=1e-12)
     self.mlp = MultiLayerPerceptron(hidden_size,
                                     num_classes,
                                     self._device,
                                     num_layers=1,
                                     activation=activation,
                                     log_softmax=log_softmax)
     self.dropout = nn.Dropout(dropout)
     if use_transformer_pretrained:
         self.apply(
             lambda module: transformer_weights_init(module, xavier=False))
     self.to(self._device)
Пример #4
0
 def __init__(
     self,
     hidden_size,
     num_classes,
     num_layers=2,
     activation='relu',
     log_softmax=True,
     dropout=0.0,
     use_transformer_pretrained=True,
 ):
     super().__init__()
     self.mlp = MultiLayerPerceptron(hidden_size, num_classes, self._device,
                                     num_layers, activation, log_softmax)
     self.dropout = nn.Dropout(dropout)
     if use_transformer_pretrained:
         self.apply(
             lambda module: transformer_weights_init(module, xavier=False))