def __init__(self, output_dim, num_node_feats, num_edge_feats, latent_dim=[32, 32, 32, 1], k=30, conv1d_channels=[16, 32], conv1d_kws=[0, 5]): print('Initializing DVGCN') super(DVGCN, self).__init__() self.latent_dim = latent_dim self.output_dim = output_dim self.num_node_feats = num_node_feats self.num_edge_feats = num_edge_feats self.k = k self.total_latent_dim = sum(latent_dim) conv1d_kws[0] = self.total_latent_dim self.conv_params = nn.ModuleList() self.conv_params.append(nn.Linear(num_node_feats, latent_dim[0])) for i in range(1, len(latent_dim)): self.conv_params.append(nn.Linear(latent_dim[i - 1], latent_dim[i])) self.conv1d_params1 = nn.Conv1d(1, conv1d_channels[0], conv1d_kws[0], conv1d_kws[0]) self.maxpool1d = nn.MaxPool1d(2, 2) self.conv1d_params2 = nn.Conv1d(conv1d_channels[0], conv1d_channels[1], conv1d_kws[1], 1) # add batchNorm1d self.batch_norms = nn.ModuleList() for i in range(0, len(latent_dim)): self.batch_norms.append(nn.BatchNorm1d(latent_dim[i])) self.batch_norms1 = nn.BatchNorm1d(conv1d_channels[0]) self.batch_norms2 = nn.BatchNorm1d(conv1d_channels[1]) dense_dim = int((k - 2) / 2 + 1) self.dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1] if num_edge_feats > 0: self.w_e2l = nn.Linear(num_edge_feats, latent_dim) if output_dim > 0: self.out_params = nn.Linear(self.dense_dim, output_dim) weights_init(self)
def __init__(self, latent_dim, output_dim, num_node_feats, num_edge_feats, max_lv = 3): super(EmbedMeanField, self).__init__() self.latent_dim = latent_dim self.output_dim = output_dim self.num_node_feats = num_node_feats self.num_edge_feats = num_edge_feats self.max_lv = max_lv self.w_n2l = nn.Linear(num_node_feats, latent_dim) if num_edge_feats > 0: self.w_e2l = nn.Linear(num_edge_feats, latent_dim) if output_dim > 0: self.out_params = nn.Linear(latent_dim, output_dim) self.conv_params = nn.Linear(latent_dim, latent_dim) weights_init(self)
def __init__(self, output_dim, num_node_feats, num_edge_feats, latent_dim=[32, 32, 32, 1], k=30, conv1d_channels=[16, 32], conv1d_kws=[0, 5]): print('Initializing GUNet') super(GUNet, self).__init__() self.latent_dim = latent_dim self.output_dim = output_dim self.num_node_feats = num_node_feats self.num_edge_feats = num_edge_feats self.k = k self.total_latent_dim = sum(latent_dim) conv1d_kws[0] = self.total_latent_dim self.conv_params = nn.ModuleList() self.conv_params.append(nn.Linear(num_node_feats, latent_dim[0])) for i in range(1, len(latent_dim)): self.conv_params.append(nn.Linear(latent_dim[i - 1], latent_dim[i])) self.conv1d_params1 = nn.Conv1d(1, conv1d_channels[0], conv1d_kws[0], conv1d_kws[0]) self.maxpool1d = nn.MaxPool1d(2, 2) self.conv1d_params2 = nn.Conv1d(conv1d_channels[0], conv1d_channels[1], conv1d_kws[1], 1) dense_dim = int((k - 2) / 2 + 1) self.dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1] if num_edge_feats > 0: self.w_e2l = nn.Linear(num_edge_feats, latent_dim) if output_dim > 0: self.out_params = nn.Linear(self.dense_dim, output_dim) # ks = [4000, 3000, 2000, 1000] ks = [0.9, 0.7, 0.6, 0.5] # self.gUnet = ops.GraphUnet(ks, num_node_feats, 97).cuda() self.gUnet = ops.GraphUnet(ks, num_node_feats, 97) weights_init(self)
def __init__(self, output_dim, num_node_feats, num_edge_feats, latent_dim=[32, 32, 32, 1], k=30, conv1d_channels=[16, 32], conv1d_kws=[0, 5], conv1d_activation='ReLU'): print('Initializing DGCNN') super(DGCNN, self).__init__() self.latent_dim = latent_dim self.output_dim = output_dim self.num_node_feats = num_node_feats self.num_edge_feats = num_edge_feats self.k = k self.total_latent_dim = sum(latent_dim) conv1d_kws[0] = self.total_latent_dim self.conv_params = nn.ModuleList() self.conv_params.append( nn.Linear(num_node_feats + num_edge_feats, latent_dim[0])) for i in range(1, len(latent_dim)): self.conv_params.append(nn.Linear(latent_dim[i - 1], latent_dim[i])) self.conv1d_params1 = nn.Conv1d(1, conv1d_channels[0], conv1d_kws[0], conv1d_kws[0]) self.maxpool1d = nn.MaxPool1d(2, 2) self.conv1d_params2 = nn.Conv1d(conv1d_channels[0], conv1d_channels[1], conv1d_kws[1], 1) dense_dim = int((k - 2) / 2 + 1) self.dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1] #if num_edge_feats > 0: # self.w_e2l = nn.Linear(num_edge_feats, num_node_feats) if output_dim > 0: self.out_params = nn.Linear(self.dense_dim, output_dim) self.conv1d_activation = eval('nn.{}()'.format(conv1d_activation)) weights_init(self)
def __init__(self, input_dim, node_type_num, initial_dim=8, latent_dim=[16, 24, 32], max_node = 12): print('Initializing Policy Nets') super(PolicyNN, self).__init__() self.latent_dim = latent_dim self.input_dim = input_dim self.node_type_num =node_type_num self.initial_dim = initial_dim # self.stop_mlp_hidden = 16 self.start_mlp_hidden = 16 self.tail_mlp_hidden = 24 self.input_mlp = nn.Linear(self.input_dim, initial_dim) self.gcns = nn.ModuleList() self.layer_num = len(latent_dim) self.gcns.append(GCN(self.initial_dim, self.latent_dim[0])) for i in range(1, len(latent_dim)): self.gcns.append(GCN(self.latent_dim[i-1], self.latent_dim[i])) self.dense_dim = latent_dim[-1] # self.stop_mlp1 = nn.Linear(self.dense_dim, self.stop_mlp_hidden) # self.stop_mlp_non_linear= nn.ReLU6() # self.stop_mlp2 = nn.Linear(self.stop_mlp_hidden, 2) self.start_mlp1= nn.Linear(self.dense_dim, self.start_mlp_hidden) self.start_mlp_non_linear = nn.ReLU6() self.start_mlp2= nn.Linear(self.start_mlp_hidden, 1) self.tail_mlp1= nn.Linear(2*self.dense_dim, self.tail_mlp_hidden) self.tail_mlp_non_linear = nn.ReLU6() self.tail_mlp2= nn.Linear(self.tail_mlp_hidden, 1) weights_init(self)
def __init__(self, input_size, hidden_size): super(MLPRegression, self).__init__() self.h1_weights = nn.Linear(input_size, hidden_size) self.h2_weights = nn.Linear(hidden_size, 1) weights_init(self)
def __init__(self, input_size, num_labels): super(LogisticRegression, self).__init__() self.linear = nn.Linear(input_size, num_labels) weights_init(self)
def __init__(self, latent_dim, output_dim, num_node_feats, num_edge_feats, multi_h_emb_weight, max_k=3, max_block=3, dropout=0.3, reg=1): print('Attentional Embedding') super(AttentionEmbedMeanField, self).__init__() self.latent_dim = latent_dim self.output_dim = output_dim self.num_node_feats = num_node_feats self.num_edge_feats = num_edge_feats self.dropout = dropout self.reg = reg self.multi_h_emb_weight = multi_h_emb_weight self.max_k = max_k self.max_block = max_block self.k_hop_embedding = True self.graph_level_attention = True self.w_n2l = nn.Linear( num_node_feats, latent_dim) # layer 1, linear for input X-->latent size self.bn1 = nn.BatchNorm1d(latent_dim) if num_edge_feats > 0: # fixme: never been used self.w_e2l = nn.Linear( num_edge_feats, latent_dim ) # layer1.5, if edge_feats is not none, from X.edges --> latent self.bne1 = nn.BatchNorm1d(latent_dim) if output_dim > 0: self.out_params = nn.Linear( latent_dim, output_dim) # layer 6 final layer of model self.bne6 = nn.BatchNorm1d(output_dim) # two ways to initial weights, # a) multiple convolution layer share the weights. # self.conv_params = nn.Linear(latent_dim, latent_dim) # layer 2, graph convolution between k # self.bn2 = nn.BatchNorm1d(latent_dim) #b> every convolution layer have their own weights. self.conv_params = nn.ModuleList( nn.Linear(latent_dim, latent_dim) for i in range(max_k)) self.bn2 = nn.ModuleList( nn.BatchNorm1d(latent_dim) for i in range(max_k)) # Node level attention self.k_weight = Parameter( torch.Tensor(self.max_k * latent_dim, latent_dim)) #layer 3, node level attention self.bn3 = nn.BatchNorm1d(latent_dim) # Graph level attention self.att_params_w1 = nn.Linear(latent_dim, latent_dim) #layer 4 self.bn4 = nn.BatchNorm1d(latent_dim) self.att_params_w2 = nn.Linear( latent_dim, multi_h_emb_weight) #layer 5 both for self-attention. self.bn5 = nn.BatchNorm1d(multi_h_emb_weight) print('K hop convolutional:', self.k_hop_embedding) print('graph_level_attention:', self.graph_level_attention) print("Max_block", self.max_block) print("Dropout:", self.dropout) print("max_k:", self.max_k) print("multi_head:", self.multi_h_emb_weight) weights_init(self)
def __init__(self, input_size, hidden_size, num_class): super(MLPClassifier, self).__init__() self.h1_weights = nn.Linear(input_size, hidden_size) self.h2_weights = nn.Linear(hidden_size, num_class) weights_init(self)
def __init__(self, num_node_feats, n_heads=4, latent_dim=[32, 32, 32], k=30, conv1d_channels=[16, 32], conv1d_kws=[0, 5], lstm_hidden=20, embedding_size=3): print('\033[31m--Initializing Model...\033[0m\n') super(Model, self).__init__() self.num_node_feats = num_node_feats self.latent_dim = latent_dim if args.model == 'concat' or args.model == 'no-att' or args.model == 'gin': # attention in concat feature或者不使用attention或者gin,GCN后的feature均进行拼接 self.k = k self.att_in_size = sum(latent_dim) elif args.model == 'separate': # Separate attention each layer self.k = k * len(latent_dim) self.att_in_size = latent_dim[0] elif args.model == 'fusion': # Fusion attention multi scale self.k = k self.att_in_size = latent_dim[0] if args.embedding == True: self.embedding = nn.Embedding(num_node_feats, embedding_size) ''' GCN weights ''' self.conv_params = nn.ModuleList() if args.model == 'gin': for i in range(0, len(latent_dim)): if i == 0: self.conv_params.append( nn.Linear(num_node_feats, latent_dim[i])) self.conv_params.append( nn.Linear(latent_dim[i], latent_dim[i])) else: self.conv_params.append( nn.Linear(latent_dim[i - 1], latent_dim[i])) self.conv_params.append( nn.Linear(latent_dim[i], latent_dim[i])) else: if args.embedding == True: self.conv_params.append( nn.Linear(embedding_size, latent_dim[0])) else: self.conv_params.append( nn.Linear(num_node_feats, latent_dim[0])) # X * W => N * F' # latent_dim是图卷积层的channel数 # 添加GCN的weights参数 for i in range(1, len(latent_dim)): self.conv_params.append( nn.Linear(latent_dim[i - 1], latent_dim[i])) att_out_size = latent_dim[-1] ''' Sort pool attention ''' if args.model != 'gin' or args.model != 'no-att': self.attention = [ SpGraphAttentionLayer(in_features=self.att_in_size, out_features=att_out_size, layer=len(latent_dim), concat=True) for _ in range(n_heads) ] for i in range(n_heads): self.attention[i] = self.attention[i].cuda() if args.model == 'fusion': # fusion 不需要对attention进行concat self.att_out_size = att_out_size elif args.model == 'no-att': # 普通的GCN,对几次GCN的feature进行concat,此时的att_out_size只是输出维度 self.att_out_size = sum(latent_dim) elif args.model == 'gin': self.att_out_size = sum(latent_dim) + num_node_feats else: # separate和concat都是使用的multi-head GAT,所以 * n_heads self.att_out_size = att_out_size * n_heads conv1d_kws[0] = self.att_out_size ''' 2layers 1DCNN for classification ''' # 最后两层1D卷积和全连接层用于分类 # sort pooling最后的输出是batch_size, 1, k * 97 # 所以kernel_size为97,stride为97,对图的每一个顶点的feature(即WL signature)进行一次卷积操作 # self.conv1d_params1 = nn.Conv1d(1, conv1d_channels[0], conv1d_kws[0], conv1d_kws[0]) # self.conv1d_params1 = self.conv1d_params1.cuda() # # batch_size * channels[0] * k # self.maxpool1d = nn.MaxPool1d(2, 2) # # batch_size * channels[0] * ((k-2) / 2 + 1) # self.conv1d_params2 = nn.Conv1d(conv1d_channels[0], conv1d_channels[1], conv1d_kws[1]) # # 最后一层卷积的kernel_size为5,stride为1,输出维度为 batch_size * channels[1] * ((k-2) / 2 + 1) if args.model != 'gin': dense_dim = int((self.k - 2) / 2 + 1) if args.concat == 0: # not concat self.dense_dim = (dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1] elif args.concat == 1: # concat 1DCNN self.dense_dim = ( dense_dim - conv1d_kws[1] + 1) * conv1d_channels[1] + dense_dim * conv1d_channels[0] else: # self.dense_dim = sum(latent_dim) + args.feat_dim self.dense_dim = latent_dim[-1] # self.lstm = torch.nn.LSTM(self.total_latent_dim, lstm_hidden, 2) # self.dense_dim = self.k * lstm_hidden # MLP层的输入维度,即logits=conv1d_res.view(len(graph_sizes), -1)的维度 weights_init(self)
def __init__(self, in_dim, out_dim, p=0.3): super(GCN, self).__init__() self.proj = nn.Linear(in_dim, out_dim) self.drop = nn.Dropout(p=p) weights_init(self)
def __init__(self, outputDim, numNodeFeats, numEdgeFeats=0, latentDims=[32, 32, 32, 1], k=30, poolingType='sort', endingLayers='conv1d', conv2dChannel=64, conv1dChannels=[16, 32], conv1dKernSz=[0, 5], conv1dMaxPl=[2, 2]): """ Args outputDim: dimension of the DGCNN. If equals zero, it will be computed as the output of the final 1d conv layer; Otherwise, an extra dense layer will be appended after the final 1d conv layer to produce exact output size. numNodesFeats, numEdgeFeats: dim of the node/edge attributes. latend_dim: sizes of graph convolution layers. poolingType: type of pooling graph vertices, 'sort' or 'adaptive'. endingLayers: 'conv1d' or 'weight_vertices'. NOT used if pooling layer is 'adaptive'. conv2dChannel: channel dimension of the 2d conv layer before adaptive max pooling. NOT used if pooling layer is 'sort'. Default 64 to compatible with VGG11. conv1dChannels: channel dimension of the 2 conv1d layers conv1dKernSz: kernel size of the 2 1d conv layers. conv1dKernSz[0] is manually set to sum(latentDims). conv1dMaxPl: maxpool kernel size and stride between 2 conv1d layers. """ log.info('Initializing DGCNN') super(DGCNN, self).__init__() self.latentDims = latentDims self.outputDim = outputDim self.k = k self.totalLatentDim = sum(latentDims) conv1dKernSz[0] = self.totalLatentDim self.graphConvParams = nn.ModuleList() self.graphConvParams.append(nn.Linear(numNodeFeats, latentDims[0])) for i in range(1, len(latentDims)): self.graphConvParams.append( nn.Linear(latentDims[i - 1], latentDims[i])) self.poolingType = poolingType if poolingType == 'adaptive': log.info(f'Unify graph sizes with ADAPTIVE pooling') self.conv2dParam = nn.Conv2d(in_channels=1, out_channels=conv2dChannel, kernel_size=13, stride=1, padding=6) self.adptPl = nn.AdaptiveMaxPool2d((self.k, self.totalLatentDim)) else: log.info(f'Unify graph sizes with SORT pooling') self.endingLayers = endingLayers if endingLayers == 'weight_vertices': log.info(f'Ending with weight vertices layers') self.vertexParams = nn.Conv1d(in_channels=1, out_channels=1, kernel_size=self.k, stride=self.k) self.denseDim = self.totalLatentDim else: # conv1d if not specified log.info(f'Ending with conv1d since remLayers not specified') self.conv1dParams1 = nn.Conv1d(in_channels=1, out_channels=conv1dChannels[0], kernel_size=conv1dKernSz[0], stride=conv1dKernSz[0]) self.maxPool1d = nn.MaxPool1d(kernel_size=conv1dMaxPl[0], stride=conv1dMaxPl[1]) self.conv1dParams2 = nn.Conv1d(in_channels=conv1dChannels[0], out_channels=conv1dChannels[1], kernel_size=conv1dKernSz[1]) tmp = int((k - conv1dMaxPl[0]) / conv1dMaxPl[1] + 1) self.denseDim = (tmp - conv1dKernSz[1] + 1) * conv1dChannels[1] if numEdgeFeats > 0: self.wE2L = nn.Linear(numEdgeFeats, latentDims) if outputDim > 0: self.outParams = nn.Linear(self.denseDim, outputDim) weights_init(self)