def __init__( self, in_feats: int, hidden_size: int, out_feats: int, pooling_layer: int, pooling_rates: List[float], n_dropout: float = 0.5, adj_dropout: float = 0.3, activation: str = "elu", improved: bool = False, aug_adj: bool = False, ): super(GraphUnet, self).__init__() self.improved = improved self.n_dropout = n_dropout self.adj_dropout = adj_dropout self.act = get_activation(activation) assert pooling_layer <= len(pooling_rates) pooling_rates = pooling_rates[:pooling_layer] pooling_rates = [float(x) for x in pooling_rates] self.unet = GraphUnetLayer(hidden_size, pooling_layer, pooling_rates, activation, n_dropout, aug_adj) self.in_gcn = GCNLayer(in_feats, hidden_size) self.out_gcn = GCNLayer(hidden_size, out_feats)
def __init__( self, hidden_size: int, pooling_layer: int, pooling_rates: List[float], activation: str = "elu", dropout: float = 0.5, aug_adj: bool = False, ): super(GraphUnetLayer, self).__init__() self.dropout = dropout self.activation = activation self.pooling_layer = pooling_layer self.gcn = GCNLayer(hidden_size, hidden_size) self.act = get_activation(activation) self.down_gnns = nn.ModuleList( [GCNLayer(hidden_size, hidden_size) for _ in range(pooling_layer)]) self.up_gnns = nn.ModuleList( [GCNLayer(hidden_size, hidden_size) for _ in range(pooling_layer)]) self.poolings = nn.ModuleList([ Pool(hidden_size, pooling_rates[i], aug_adj, dropout) for i in range(pooling_layer) ]) self.unpoolings = nn.ModuleList( [UnPool() for _ in range(pooling_layer)])
def __init__(self, nfeat, nhid, nclass, dropout, pooling_ratio, pooling_layer_type): def __get_layer_from_str__(str): if str == "gcnconv": return GCNLayer return GCNLayer super(SAGPoolNetwork, self).__init__() self.nfeat = nfeat self.nhid = nhid self.nclass = nclass self.dropout = dropout self.pooling_ratio = pooling_ratio self.conv_layer_1 = GCNLayer(self.nfeat, self.nhid) self.conv_layer_2 = GCNLayer(self.nhid, self.nhid) self.conv_layer_3 = GCNLayer(self.nhid, self.nhid) self.pool_layer_1 = SAGPoolLayers( self.nhid, Conv=__get_layer_from_str__(pooling_layer_type), ratio=self.pooling_ratio) self.pool_layer_2 = SAGPoolLayers( self.nhid, Conv=__get_layer_from_str__(pooling_layer_type), ratio=self.pooling_ratio) self.pool_layer_3 = SAGPoolLayers( self.nhid, Conv=__get_layer_from_str__(pooling_layer_type), ratio=self.pooling_ratio) self.lin_layer_1 = torch.nn.Linear(self.nhid * 2, self.nhid) self.lin_layer_2 = torch.nn.Linear(self.nhid, self.nhid // 2) self.lin_layer_3 = torch.nn.Linear(self.nhid // 2, self.nclass)
def __init__(self, num_features, hidden_size): super(VGAE, self).__init__() self.num_features = num_features self.hidden_size = hidden_size self.conv1 = GCNLayer(self.num_features, self.hidden_size) self.conv2_mean = GCNLayer(self.hidden_size, self.hidden_size) self.conv2_var = GCNLayer(self.hidden_size, self.hidden_size)
def __init__( self, in_feats, hidden_size, out_feats, num_layers, dropout, activation="relu", residual=False, norm=None, ): super(GCN, self).__init__() shapes = [in_feats] + [hidden_size] * (num_layers - 1) + [out_feats] self.layers = nn.ModuleList( [ GCNLayer( shapes[i], shapes[i + 1], dropout=dropout if i != num_layers - 1 else 0, residual=residual if i != num_layers - 1 else None, norm=norm if i != num_layers - 1 else None, activation=activation if i != num_layers - 1 else None, ) for i in range(num_layers) ] ) self.num_layers = num_layers
def __init__( self, in_feats, out_feats, hidden_size, num_layers, dropout=0.5, drop_edge_rate=0.1, activation="relu", norm="batchnorm", group=2, ): super(RevGCN, self).__init__() self.dropout = dropout self.drop_edge_rate = drop_edge_rate self.num_layers = num_layers self.layers = nn.ModuleList() self.norm = get_norm_layer(norm, hidden_size) self.act = get_activation(activation) for i in range(num_layers): if i == 0: self.layers.append( GCNLayer( in_feats, hidden_size, residual=True, ) ) elif i == num_layers - 1: self.layers.append(GCNLayer(hidden_size, out_feats, residual=True)) else: conv = GCNLayer( hidden_size // group, hidden_size // group, ) res_conv = ResGNNLayer( conv, hidden_size // group, activation=activation, norm=norm, out_norm=norm, out_channels=hidden_size // group, ) self.layers.append(RevGNNLayer(res_conv, group))
def __init__(self, nfeat, nhid, nclass, dropout, alpha, t, k, eps, gdctype): super(GDC_GCN, self).__init__() # preproc params self.alpha = alpha self.t = t self.k = k self.eps = eps self.gdc_type = gdctype self.data = None # GCN init self.nfeat = nfeat self.gc1 = GCNLayer(nfeat, nhid) self.gc2 = GCNLayer(nhid, nclass) self.dropout = dropout
def __init__(self, num_features, num_classes, hidden_size, num_layers, dropout, norm=None, activation="relu"): super(DrGCN, self).__init__() self.num_features = num_features self.num_classes = num_classes self.hidden_size = hidden_size self.num_layers = num_layers self.dropout = dropout shapes = [num_features] + [hidden_size] * (num_layers - 1) + [num_classes] self.convs = nn.ModuleList( [ GCNLayer(shapes[layer], shapes[layer + 1], activation=activation, norm=norm) for layer in range(num_layers - 1) ] ) self.convs.append(GCNLayer(shapes[-2], shapes[-1])) self.ses = nn.ModuleList( [SELayer(shapes[layer], se_channels=int(np.sqrt(shapes[layer]))) for layer in range(num_layers)] )
def __init__( self, in_feats: int, out_feats: int, num_layers: int, activation: str = "relu", ): super(GraceEncoder, self).__init__() shapes = [in_feats] + [2 * out_feats] * (num_layers - 1) + [out_feats] self.layers = nn.ModuleList( [GCNLayer(shapes[i], shapes[i + 1]) for i in range(num_layers)]) self.activation = get_activation(activation)
def gcn_model(in_feats, hidden_size, num_layers, out_feats, dropout, residual, norm, activation): shapes = [in_feats] + [hidden_size] * (num_layers - 1) + [out_feats] return nn.ModuleList([ GCNLayer( shapes[i], shapes[i + 1], dropout=dropout if i != num_layers - 1 else 0, residual=residual if i != num_layers - 1 else None, norm=norm if i != num_layers - 1 else None, activation=activation if i != num_layers - 1 else None, ) for i in range(num_layers) ])
def __init__(self, num_features, num_classes, hidden_size, num_layers, dropout): super(DrGCN, self).__init__() self.num_features = num_features self.num_classes = num_classes self.hidden_size = hidden_size self.num_layers = num_layers self.dropout = dropout shapes = [num_features ] + [hidden_size] * (num_layers - 1) + [num_classes] self.convs = nn.ModuleList([ GCNLayer(shapes[layer], shapes[layer + 1]) for layer in range(num_layers) ]) self.ses = nn.ModuleList([ SELayer(shapes[layer], se_channels=int(np.sqrt(shapes[layer]))) for layer in range(num_layers) ])
def __init__(self, num_edge, num_channels, w_in, w_out, num_class, num_nodes, num_layers): super(GTN, self).__init__() self.num_edge = num_edge self.num_channels = num_channels self.num_nodes = num_nodes self.w_in = w_in self.w_out = w_out self.num_class = num_class self.num_layers = num_layers layers = [] for i in range(num_layers): if i == 0: layers.append(GTLayer(num_edge, num_channels, num_nodes, first=True)) else: layers.append(GTLayer(num_edge, num_channels, num_nodes, first=False)) self.layers = nn.ModuleList(layers) self.cross_entropy_loss = nn.CrossEntropyLoss() self.gcn = GCNLayer(in_features=self.w_in, out_features=w_out) self.linear1 = nn.Linear(self.w_out * self.num_channels, self.w_out) self.linear2 = nn.Linear(self.w_out, self.num_class)
def __init__(self, in_feats, hidden_size, out_feats, dropout): super(Gnn, self).__init__() self.conv1 = GCNLayer(in_feats, hidden_size) self.conv2 = GCNLayer(hidden_size, out_feats) self.dropout = nn.Dropout(dropout)
def __init__(self, num_features, hidden_size, num_classes, dropout): super(M3S, self).__init__() self.dropout = dropout self.gcn1 = GCNLayer(num_features, hidden_size) self.gcn2 = GCNLayer(hidden_size, num_classes)
def __init__(self, in_feats, out_feats, hidden_size, num_layers): super(JKNet, self).__init__() shapes = [in_feats] + [hidden_size] * num_layers self.layers = nn.ModuleList( [GCNLayer(shapes[i], shapes[i + 1]) for i in range(num_layers)]) self.fc = nn.Linear(hidden_size * num_layers, out_feats)