def __init__(self): super().__init__() self.spatial_gcn_1 = SpatialGCN(3, 12, conf_kernel_size) self.spatial_gcn_2 = SpatialGCN(12, conf_encoding_per_node, conf_kernel_size) self.encoder_1, self.encoder_2, self.encoder_3 = clones( AttentionWithGCNEncoder(heads=conf_heads, node_channel_in=conf_encoding_per_node, node_channel_mid=conf_internal_per_node, node_channel_out=conf_encoding_per_node, num_nodes=conf_num_nodes, kernel_size=conf_kernel_size), 3) self.decoder_1, self.decoder_2, self.decoder_3 = clones( AttentionWithGCNDecoder(heads=3, node_channel_in=conf_encoding_per_node, memory_channel_in=conf_encoding_per_node, node_channel_mid=(conf_internal_per_node, conf_internal_per_node), node_channel_out=conf_encoding_per_node, num_nodes=conf_num_nodes, kernel_size=conf_kernel_size), 3) self.generate_nodes_1 = GenerateNodes( total_nodes=conf_num_nodes, node_channel_in=conf_encoding_per_node, num_seeds=0, new_nodes=ntu_ss_1['num_nodes']) new_count = ntu_ss_2['num_nodes'] - ntu_ss_1['num_nodes'] self.generate_nodes_2 = GenerateNodes( total_nodes=conf_num_nodes, node_channel_in=conf_encoding_per_node, num_seeds=ntu_ss_1['num_nodes'], new_nodes=new_count) new_count = ntu_ss_3['num_nodes'] - ntu_ss_2['num_nodes'] self.generate_nodes_3 = GenerateNodes( total_nodes=conf_num_nodes, node_channel_in=conf_encoding_per_node, num_seeds=ntu_ss_2['num_nodes'], new_nodes=new_count)
def __init__(self, layer, N): """ :param layer: a connection layer with two layers :param N: in the paper, N is 6 """ super(Conv_block, self).__init__() self.layers = clones( layer, N ) # each layer have a encodersublayer with two-sublayers, there are 6 layers self.norm = Layernorm(layer.size)
def __init__(self, h, d_model, dropout=0.1): "Take in model size and number of heads." super(MultiHeadedAttention, self).__init__() assert d_model % h == 0 # We assume d_v always equals d_k self.d_k = d_model // h self.h = h self.linears = clones(nn.Linear(d_model, d_model), 4) # input size = 512, output size = 512 self.attn = None self.dropout = nn.Dropout(p=dropout)
def __init__(self, total_nodes, node_channel_in, num_seeds, new_nodes, node_channel_out=3): super().__init__() # self.linear = nn.Linear(total_nodes*node_channel_in + num_seeds*node_channel_out, node_channel_out) linear_input_size = total_nodes * node_channel_in + num_seeds * node_channel_out self.node_projections = clones( nn.Linear(linear_input_size, node_channel_out), new_nodes)
def __init__(self, h, d_model, dropout=0.1): """ :param h: number of heads :param d_model: dimension of model :param dropout: drop-out rate """ super(MultiHeadAttention, self).__init__() # we assume d_v always equals d_k self.d_k = d_model // h self.h = h self.linears = clones(nn.Linear(d_model, d_model), 4) self.attn = None self.dropout = nn.Dropout(p=dropout)
def __init__(self, layer, N): super().__init__() self.layers = clones(layer, N) self.norm = LayerNorm(layer.size)
def __init__(self, layer, N): super().__init__() self.layers = clones(layer, N) # self.layer is a DecoderLayer object self.norm = LayerNorm(layer.size)
def __init__(self, size, self_attn, feed_forward, dropout): super(EncoderSublayer, self).__init__() self.self_attn = self_attn self.feed_ward = feed_forward self.sublayer = clones(SublayerConnection(size, dropout), 2) self.size = size
def __init__(self, decoder_unit, qty): super().__init__() self.decoder_units = clones(decoder_unit, qty)
def __init__(self, encoder_unit, qty): super().__init__() self.encoder_units = clones(encoder_unit, qty)