def __init__(self, tokens_encoder=None, **kwargs ): super(OneEdgeModel, self).__init__() if tokens_encoder is None: tokens_encoder = modules.ConvWordsEncoder(**kwargs) self._tokens_encoder: nn.Module = tokens_encoder
def __init__(self, tokens_encoder=None, **kwargs ): super(PooledEdgesModel, self).__init__() if tokens_encoder is None: tokens_encoder = modules.ConvWordsEncoder(**kwargs) self._tokens_encoder: nn.Module = tokens_encoder self._pool = self._tokens_encoder._pool
def __init__(self, tokens_encoder=None, **kwargs ): super(STAGGModel, self).__init__() if tokens_encoder is None: tokens_encoder = modules.ConvWordsEncoder(**kwargs) self._tokens_encoder: nn.Module = tokens_encoder self._weights_layer = nn.Sequential(nn.Linear(in_features=9, out_features=1), nn.ReLU() )
def __init__(self, tokens_encoder=None, **kwargs): super(GNNModel, self).__init__() if tokens_encoder is None: tokens_encoder = modules.ConvWordsEncoder(**kwargs) self._tokens_encoder: nn.Module = tokens_encoder self.output_vector_size = tokens_encoder.output_vector_size // 2 self._gnn: nn.Module = GNN( self._tokens_encoder._word_embedding.embedding_dim, tokens_encoder.output_vector_size, hp_dropout=kwargs.get("hp_dropout", 0.1), hp_gated=kwargs.get("hp_gated", True)) # self._pool = nn.AdaptiveMaxPool1d(1) self._question_layer = nn.Sequential( nn.Linear(in_features=tokens_encoder.output_vector_size, out_features=self.output_vector_size), nn.ReLU()) self._graph_layer = nn.Sequential( nn.Linear(in_features=tokens_encoder.output_vector_size, out_features=self.output_vector_size), nn.ReLU())