Exemple #1
0
    def __init__(self,
                 d_model,
                 nhead,
                 dim_feedforward=2048,
                 dropout=0.1,
                 activation: ActivationFunction = F.relu):
        super(TransformerDecoderLayer, self).__init__()

        self.self_attn = MultiHeadAttention(d_model, nhead, dropout=dropout)
        self.multihead_attn = MultiHeadAttention(d_model,
                                                 nhead,
                                                 dropout=dropout)
        # Implementation of Feedforward model
        self.linear1 = Linear(d_model, dim_feedforward)
        self.dropout = torch.nn.Dropout(dropout)
        self.linear2 = Linear(dim_feedforward, d_model)

        self.norm1 = LayerNorm(d_model)
        self.norm2 = LayerNorm(d_model)
        self.norm3 = LayerNorm(d_model)
        self.dropout1 = torch.nn.Dropout(dropout)
        self.dropout2 = torch.nn.Dropout(dropout)
        self.dropout3 = torch.nn.Dropout(dropout)

        self.activation = activation
        self.reset_parameters()
Exemple #2
0
    def __init__(self,
                 heads,
                 node_channel_in,
                 memory_channel_in,
                 node_channel_mid,
                 node_channel_out,
                 num_nodes=25,
                 kernel_size=5):
        super().__init__()

        self.temporal_self_attention = TemporalSelfAttention(
            heads=heads,
            embedding_in=num_nodes * node_channel_in,
            embedding_out=num_nodes * node_channel_mid[0])

        self.temporal_input_attention = TemporalInputAttention(
            heads=heads,
            embedding_in=num_nodes * heads * node_channel_mid[0],
            embedding_out=num_nodes * node_channel_mid[1],
            memory_in=num_nodes * memory_channel_in)

        self.spatial_gcn = SpatialGCN(in_channels=heads * node_channel_mid[1],
                                      out_channels=node_channel_out,
                                      kernel_size=kernel_size)

        self.norm_1 = LayerNorm(node_channel_mid[0] * heads * num_nodes)
        self.norm_2 = LayerNorm(node_channel_mid[1] * heads * num_nodes)
        self.norm_3 = LayerNorm(node_channel_out * num_nodes)
Exemple #3
0
def NormLayer(type, num_features, num_classes=-1, affine=True):
    if type == 'batchnorm':
        return nn.BatchNorm2d(num_features, affine=affine)
    elif type == 'layernorm':
        return LayerNorm(num_features, affine=affine)
    elif type == 'conditional_batchnorm':
        if num_classes == -1:
            raise ValueError('expected positive value (got -1)')
        return ConditionalBatchNorm2d(num_features, num_classes, affine=affine)
Exemple #4
0
 def __init__(self, layer, N):
     super(Decoder, self).__init__()
     self.layers = clones(layer, N)
     self.norm = LayerNorm(layer.size)
Exemple #5
0
 def __init__(self, layer, N):
     super().__init__()
     self.layers = clones(layer, N)  # self.layer is a DecoderLayer object
     self.norm = LayerNorm(layer.size)