Пример #1
0
    def __init__(self,
                 input_size,
                 num_tree_layers,
                 single_hidden_size,
                 depth_embedding_dim=None,
                 normalize=False,
                 dropout=0.):
        super().__init__()
        self.input_size = input_size
        self.num_tree_layers = num_tree_layers
        self.single_hidden_size = single_hidden_size
        self.depth_embedding_dim = depth_embedding_dim
        self.normalize = normalize
        self.dropout = dropout

        self.depths_dim = self.num_tree_layers

        if self.depth_embedding_dim is not None:
            self.depths_dim = self.depth_embedding_dim
            self.depth_embeddings = nn.Linear(
                in_features=self.num_tree_layers,
                out_features=self.depth_embedding_dim,
                bias=False)
            init_layers_uniform(-0.05, 0.05, [self.depth_embeddings])

        if self.normalize:
            self.norm = NormalizationLayer(features_num=self.input_size +
                                           self.depths_dim)

        self.layered_recurrent = LSTMCellDropout(
            input_size=self.input_size + self.depths_dim,
            hidden_size=self.single_hidden_size,
            dropout=dropout)
Пример #2
0
    def __init__(self, num_embeddings, embedding_dim, sparse=False):
        super().__init__()
        self.sparse = sparse

        self.model = nn.Embedding(num_embeddings=num_embeddings,
                                  embedding_dim=embedding_dim,
                                  sparse=sparse)

        init_layers_uniform(min_value=-0.1, max_value=0.1, layers=[self.model])
Пример #3
0
    def __init__(self, method, hidden_size):
        super(Attn, self).__init__()

        self.method = method
        self.hidden_size = hidden_size

        if self.method == 'general':
            self.attn = nn.Linear(self.hidden_size, self.hidden_size)
            init_layers_uniform(-0.05, 0.05, [self.attn])
Пример #4
0
    def __init__(self, input_size, output_size, bias=True):
        super().__init__()
        self.input_size = input_size
        self.output_size = output_size

        self.affine = nn.Linear(in_features=self.input_size,
                                out_features=self.output_size,
                                bias=bias)

        init_layers_uniform(min_value=-0.05,
                            max_value=0.05,
                            layers=[self.affine])
Пример #5
0
 def __init__(self, features_num):
     super().__init__()
     self.features_num = features_num
     self.norm = torch.nn.BatchNorm1d(features_num)
     init_layers_uniform(-0.05, 0.05, [self.norm])