def __init__(self, conv_dim, m_dim, b_dim, dropout): super(Discriminator, self).__init__() graph_conv_dim, aux_dim, linear_dim = conv_dim # discriminator self.gcn_layer = GraphConvolution(m_dim, graph_conv_dim, b_dim) self.agg_layer = GraphAggregation(graph_conv_dim[-1]+m_dim, aux_dim, torch.nn.Tanh()) self.multi_dense_layer = MultiDenseLayer(aux_dim, linear_dim, torch.nn.Tanh()) self.output_layer = nn.Linear(linear_dim[-1], 1)
def __init__(self, conv_dim, m_dim, b_dim, z_dim, with_features=False, f_dim=0, dropout_rate=0.): super(EncoderVAE, self).__init__() graph_conv_dim, aux_dim, linear_dim = conv_dim # discriminator self.gcn_layer = GraphConvolution(m_dim, graph_conv_dim, b_dim, with_features, f_dim, dropout_rate) self.agg_layer = GraphAggregation(graph_conv_dim[-1]+m_dim, aux_dim, torch.nn.Tanh(), with_features, f_dim, dropout_rate) self.multi_dense_layer = MultiDenseLayer(aux_dim, linear_dim, torch.nn.Tanh(), dropout_rate=dropout_rate) self.emb_mean = nn.Linear(linear_dim[-1], z_dim) self.emb_logvar = nn.Linear(linear_dim[-1], z_dim)
def __init__(self, conv_dim, m_dim, b_dim, with_features=False, f_dim=0, dropout_rate=0.): super(Discriminator, self).__init__() self.activation_f = torch.nn.Tanh() graph_conv_dim, aux_dim, linear_dim = conv_dim # discriminator self.gcn_layer = GraphConvolution(m_dim, graph_conv_dim, b_dim, with_features, f_dim, dropout_rate) self.agg_layer = GraphAggregation(graph_conv_dim[-1] + m_dim, aux_dim, self.activation_f, with_features, f_dim, dropout_rate) self.multi_dense_layer = MultiDenseLayer(aux_dim, linear_dim, self.activation_f, dropout_rate=dropout_rate) self.output_layer = nn.Linear(linear_dim[-1], 1)
def __init__(self, conv_dim, m_dim, b_dim, dropout): super(Discriminator, self).__init__() graph_conv_dim, aux_dim, linear_dim = conv_dim # [[128, 64], 128, [128, 64]] # discriminator self.gcn_layer = GraphConvolution(m_dim, graph_conv_dim, b_dim, dropout) self.agg_layer = GraphAggregation(graph_conv_dim[-1], aux_dim, m_dim, dropout) # multi dense layer layers = [] for c0, c1 in zip([aux_dim]+linear_dim[:-1], linear_dim): layers.append(nn.Linear(c0,c1)) layers.append(nn.Dropout(dropout)) self.linear_layer = nn.Sequential(*layers) self.output_layer = nn.Linear(linear_dim[-1], 1)