def forward(self, data): data.x = F.elu(self.conv1(data.x, data.edge_index)) data.x = self.bn1(data.x) cluster = voxel_grid(data.pos, data.batch, size=[4,4]) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv2(data.x, data.edge_index)) data.x = self.bn2(data.x) cluster = voxel_grid(data.pos, data.batch, size=[6,6]) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv3(data.x, data.edge_index)) data.x = self.bn3(data.x) cluster = voxel_grid(data.pos, data.batch, size=[20,20]) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv4(data.x, data.edge_index)) data.x = self.bn4(data.x) cluster = voxel_grid(data.pos, data.batch, size=[32,32]) x = max_pool_x(cluster, data.x, data.batch, size=32) x = x.view(-1, self.fc1.weight.size(1)) x = F.elu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1)
def forward(self, data): row, col = data.edge_index data.edge_attr = (data.pos[col] - data.pos[row]) / (2 * 28 * cutoff) + 0.5 # print(data.edge_index.shape) # print(data.edge_index[:, -20:]) data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data.edge_attr = None data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) row, col = data.edge_index data.edge_attr = (data.pos[col] - data.pos[row]) / (2 * 28 * cutoff) + 0.5 data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) row, col = data.edge_index data.edge_attr = (data.pos[col] - data.pos[row]) / (2 * 28 * cutoff) + 0.5 data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr)) x = global_mean_pool(data.x, data.batch) return self.fc1(x) x = F.elu(self.fc1(x)) x = F.dropout(x, training=self.training) return F.log_softmax(self.fc2(x), dim=1)
def forward(self, data): x, edge_index = data.x, data.edge_index x = self.conv1(x, edge_index) x = x.relu() cluster1 = graclus(edge_index, num_nodes=x.shape[0]) pooled_1 = data pooled_1.x = x pooled_1 = max_pool(cluster1, pooled_1) edge_index_2 = pooled_1.edge_index x2 = pooled_1.x x2 = self.conv2(x2, edge_index_2) x2 = x2.relu() cluster2 = graclus(edge_index_2, num_nodes=x2.shape[0]) pooled_2 = pooled_1 pooled_2.x = x2 pooled_2 = max_pool(cluster2, pooled_2) edge_index_3 = pooled_2.edge_index x3 = pooled_2.x x3 = self.conv3(x3, edge_index_3) x3 = x3.relu() x3 = self.conv4(x3, edge_index_3) x3 = x3.relu() x3 = knn_interpolate(x3, pooled_2.pos, pooled_1.pos) x3 = torch.cat((x2, x3), dim=1) x3 = self.conv5(x3, edge_index_2) x3 = x3.relu() x3 = knn_interpolate(x3, pooled_1.pos, data.pos) x = torch.cat((x, x3), dim=1) x = self.lin1(x) x = x.relu() x = self.lin2(x) x = x.relu() x = torch.sigmoid(self.out(x)) return x
def forward(self, data): data.x = F.elu( self.bn1(self.conv1(data.x, data.edge_index, data.edge_attr))) cluster = voxel_grid(data.pos, data.batch, size=4) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data = self.block1(data) cluster = voxel_grid(data.pos, data.batch, size=6) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data = self.block2(data) cluster = voxel_grid(data.pos, data.batch, size=24) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data = self.block3(data) cluster = voxel_grid(data.pos, data.batch, size=64) x = max_pool_x(cluster, data.x, data.batch, size=8) # if your torch-geometric version is below 1.3.2(roughly, we do not test all versions), use x.view() instead of x[0].view() # x = x.view(-1, self.fc1.weight.size(1)) x = x[0].view(-1, self.fc1.weight.size(1)) x = self.fc1(x) x = F.elu(x) x = self.bn(x) x = self.drop_out(x) x = self.fc2(x) return F.log_softmax(x, dim=1)
def forward(self, data): data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data.edge_attr = None data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) row, col = data.edge_index data.edge_attr = (data.pos[col] - data.pos[row]) / (2 * self.args.cutoff) + 0.5 data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) row, col = data.edge_index data.edge_attr = (data.pos[col] - data.pos[row]) / (2 * self.args.cutoff) + 0.5 data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr)) x = global_mean_pool(data.x, data.batch) x = F.elu(self.fc1(x)) x = F.dropout(x, training=self.training, p=self.args.disc_dropout) y = self.fc2(x) if (self.args.wgan): return y return torch.sigmoid(y)
def forward(self, data): data.x = F.elu( self.bn1(self.conv0(data.x, data.edge_index, data.edge_attr))) cluster = voxel_grid(data.pos, data.batch, size=[4, 3]) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data = self.conv1(data) cluster = voxel_grid(data.pos, data.batch, size=[16, 12]) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data = self.conv2(data) cluster = voxel_grid(data.pos, data.batch, size=[30, 23]) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data = self.conv3(data) cluster = voxel_grid(data.pos, data.batch, size=[60, 45]) x = max_pool_x(cluster, data.x, data.batch, size=16) # x = max_pool_x(cluster, data.x, data.batch) x = x[0].view(-1, self.fc1.weight.size(1)) x = self.fc1(x) x = F.elu(x) x = self.bn(x) x = self.drop_out(x) x = self.fc2(x) return F.log_softmax(x, dim=1)
def forward(self, data): data.x = F.elu(self.conv1a(data.x, data.edge_index, data.edge_attr)) data.x = F.elu(self.conv1b(data.x, data.edge_index, data.edge_attr)) # data.x = F.elu(self.conv1c(data.x, data.edge_index, data.edge_attr)) # data.x = self.bn1(data.x) weight = normalized_cut_2d(data.edge_index, data.pos) cluster1 = graclus(data.edge_index, weight, data.x.size(0)) pos1 = data.pos edge_index1 = data.edge_index batch1 = data.batch if hasattr(data, 'batch') else None # weights1 = bweights(data, cluster1) data = max_pool(cluster1, data, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv2a(data.x, data.edge_index, data.edge_attr)) data.x = F.elu(self.conv2b(data.x, data.edge_index, data.edge_attr)) # data.x = F.elu(self.conv2c(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster2 = graclus(data.edge_index, weight, data.x.size(0)) pos2 = data.pos edge_index2 = data.edge_index batch2 = data.batch if hasattr(data, 'batch') else None # weights2 = bweights(data, cluster2) data = max_pool(cluster2, data, transform=T.Cartesian(cat=False)) # upsample # data = recover_grid_barycentric(data, weights=weights2, pos=pos2, edge_index=edge_index2, cluster=cluster2, # batch=batch2, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv3a(data.x, data.edge_index, data.edge_attr)) data.x = F.elu(self.conv3b(data.x, data.edge_index, data.edge_attr)) data = recover_grid(data, pos2, edge_index2, cluster2, batch=batch2, transform=T.Cartesian(cat=False)) # data = recover_grid_barycentric(data, weights=weights1, pos=pos1, edge_index=edge_index1, cluster=cluster1, # batch=batch1, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv4a(data.x, data.edge_index, data.edge_attr)) data.x = F.elu(self.conv4b(data.x, data.edge_index, data.edge_attr)) data = recover_grid(data, pos1, edge_index1, cluster1, batch=batch1, transform=T.Cartesian(cat=False)) # TODO handle contract on trainer and evaluator data.x = F.elu(self.convout(data.x, data.edge_index, data.edge_attr)) x = data.x # return F.sigmoid(x) return x
def forward(self, data): x, edge_index = data.x, data.edge_index x = self.pre_lin(x) if self.masif_descr else x x = self.conv1(x, edge_index) x = self.s1(x) x = self.conv2(x, edge_index) x = self.s2(x) x = self.conv3(x, edge_index) x = self.s3(x) cluster = graclus(edge_index, num_nodes=x.shape[0]) inter = data inter.x = x inter = max_pool(cluster, inter) interx = self.inters1(self.interconv1(inter.x, inter.edge_index)) inter = knn_interpolate(interx, inter.pos, data.pos) x1 = self.affine1(x) x1 += inter x = self.conv4(x, edge_index) x = self.s4(x) x = self.conv5(x, edge_index) x = self.s5(x) x = self.conv6(x, edge_index) x = self.s6(x) inter = data inter.x = x inter = max_pool(cluster, inter) interx = self.inters2(self.interconv1(inter.x, inter.edge_index)) inter = knn_interpolate(interx, inter.pos, data.pos) x2 = self.affine1(x) x2 += inter x = self.conv7(x, edge_index) x = self.s7(x) x = self.conv8(x, edge_index) x = self.s8(x) x = self.conv9(x, edge_index) x = self.s9(x) x = x + x1 + x2 x = self.conv10(x, edge_index) x = self.s10(x) x = self.lin1(x) x = self.s11(x) x = self.lin2(x) x = self.s12(x) x = self.out(x) x = torch.sigmoid(x) return x
def forward(self, data): data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data.edge_attr = None data = max_pool(cluster, data, transform=transform) data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) x, batch = max_pool_x(cluster, data.x, data.batch) #x = global_mean_pool(x, batch) x_min = torch_scatter.scatter_min(x, batch, dim=0)[0] gather_idxs = batch.expand(x.shape[1], -1).t() gather_mins = torch.gather(x_min, 0, gather_idxs) s = F.relu(-gather_mins) x = x + s x = self.aggregator(x, batch) s_out = self.aggregator(s, batch) x = x - s_out x = F.elu(self.fc1(x)) x = F.dropout(x, training=self.training) return F.log_softmax(self.fc2(x), dim=1)
def forward(self, data): data.x = self.datanorm * data.x data.x = self.inputnet(data.x) data.edge_index = to_undirected( knn_graph(data.x, self.k, data.batch, loop=False, flow=self.edgeconv1.flow)) data.x = self.edgeconv1(data.x, data.edge_index) weight = normalized_cut_2d(data.edge_index, data.x) cluster = graclus(data.edge_index, weight, data.x.size(0)) data.edge_attr = None data = max_pool(cluster, data) data.edge_index = to_undirected( knn_graph(data.x, self.k, data.batch, loop=False, flow=self.edgeconv2.flow)) data.x = self.edgeconv2(data.x, data.edge_index) weight = normalized_cut_2d(data.edge_index, data.x) cluster = graclus(data.edge_index, weight, data.x.size(0)) x, batch = max_pool_x(cluster, data.x, data.batch) x = global_max_pool(x, batch) return self.output(x).squeeze(-1)
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch if self.encode_edge: x = self.atom_encoder(x) x = self.conv1(x, edge_index, data.edge_attr) else: x = self.conv1(x, edge_index) x = F.relu(x) xs = [global_mean_pool(x, batch)] for i, conv in enumerate(self.convs): x = F.relu(conv(x, edge_index)) xs += [global_mean_pool(x, batch)] if self.pooling_type != 'none': if self.pooling_type == 'complement': complement = batched_negative_edges(edge_index=edge_index, batch=batch, force_undirected=True) cluster = graclus(complement, num_nodes=x.size(0)) elif self.pooling_type == 'graclus': cluster = graclus(edge_index, num_nodes=x.size(0)) data = Batch(x=x, edge_index=edge_index, batch=batch) data = max_pool(cluster, data) x, edge_index, batch = data.x, data.edge_index, data.batch if not self.no_cat: x = self.jump(xs) else: x = global_mean_pool(x, batch) x = F.relu(self.lin1(x)) x = self.lin2(x) return x
def forward(self, data): inputs = data.x x = self.uconv0(inputs) x = self.activate_feature(x, self.bn0) if (not self.is_strided): x = self.kp_conv(pos=(data.points[self.layer_ind], data.points[self.layer_ind]), neighbors=data.list_neigh[self.layer_ind], x=x) x = self.activate_feature(x, self.bn1) else: x = self.kp_conv(pos=(data.points[self.layer_ind], data.points[self.layer_ind + 1]), neighbors=data.list_pool[self.layer_ind], x=x) x = self.activate_feature(x, self.bn1) x = self.uconv1(x) x = self.activate_feature(x, self.bn2) if (not self.is_strided): data.x = x + self.shortcut_op(inputs) else: data.x = x + self.shortcut_op( max_pool(inputs, data.list_pool[self.layer_ind])) return data
def forward(self, data): data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data.edge_attr = None data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr)) x = global_mean_pool(data.x, data.batch) x = F.elu(self.fc1(x)) x = F.dropout(x, training=self.training) return F.log_softmax(self.fc2(x), dim=1)
def forward(self, data): data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr)) cluster = voxel_grid(data.pos, data.batch, size=5, start=0, end=28) data = max_pool(cluster, data, transform=transform) data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr)) cluster = voxel_grid(data.pos, data.batch, size=7, start=0, end=28) data = max_pool(cluster, data, transform=transform) data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr)) cluster = voxel_grid(data.pos, data.batch, size=14, start=0, end=27.99) x = max_pool_x(cluster, data.x, data.batch, size=4) x = x.view(-1, self.fc1.weight.size(1)) x = F.elu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1)
def cluster_grid(grid): data = grid # data.x = conv1(data.x, data.edge_index, data.edge_attr) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data.edge_attr = None data.batch = None data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) return data, cluster
def forward(self, data): inputs = data.x if (data.pools[self.layer_ind].shape[1] > 2): x = max_pool(inputs, data.pools[self.layer_ind]) else: raise NotImplementedError("implement for list of edges") x = None data.x = x return data
def forward(self, data): x, edge_index = data.x, data.edge_index x = self.pre_lin(x) if self.masif_descr else x x = self.conv1(x, edge_index) x = self.s1(x) x = self.conv2(x, edge_index) x = self.s2(x) x = self.conv3(x, edge_index) x = self.s3(x) cluster1 = graclus(edge_index, num_nodes=x.shape[0]) inter1 = data inter1.x = x inter1 = max_pool(cluster1, inter1) x = self.s4(self.conv4(inter1.x, inter1.edge_index)) edge_index = inter1.edge_index x = self.conv5(x, edge_index) x = self.s5(x) x = self.conv6(x, edge_index) x = self.s6(x) cluster2 = graclus(edge_index, num_nodes=x.shape[0]) inter2 = inter1 inter2.x = x inter2 = max_pool(cluster2, inter2) x = self.s7(self.conv7(inter2.x, inter2.edge_index)) x = knn_interpolate(x, inter2.pos, inter1.pos) x = self.conv8(x, edge_index) x = self.s8(x) x = knn_interpolate(x, inter1.pos, data.pos) edge_index = data.edge_index x = self.conv9(x, edge_index) x = self.s9(x) x = self.conv10(x, edge_index) x = self.s10(x) x = self.lin1(x) x = self.s11(x) x = self.lin2(x) x = self.s12(x) x = self.out(x) x = torch.sigmoid(x) return x
def forward(self, data): x, edge_index_1 = data.x, data.edge_index # define downscaled samples. cluster1 = graclus(edge_index_1, num_nodes=x.shape[0]) downsample_1 = avg_pool(cluster1, data) edge_index_2 = downsample_1.edge_index cluster2 = graclus(edge_index_2, num_nodes=downsample_1.x.shape[0]) downsample_2 = avg_pool(cluster2, downsample_1) edge_index_3 = downsample_2.edge_index x = self.conv1(x, edge_index_1) x = self.s1(x) inter1 = data inter1.x = x inter1 = max_pool(cluster1, inter1) x2 = inter1.x x2 = torch.cat((self.affine1(downsample_1.x), x2), dim=1) x2 = self.conv2(x2, edge_index_2) x2 = self.s2(x2) inter2 = inter1 inter2.x = x2 inter2 = max_pool(cluster2, inter2) x3 = inter2.x x3 = torch.cat((self.affine2(downsample_2.x), x3), dim=1) x3 = self.conv3(x3, edge_index_3) x3 = self.s3(x3) x3 = knn_interpolate(x3, downsample_2.pos, downsample_1.pos) x2 = torch.cat((x2, x3), dim=1) x2 = knn_interpolate(x2, downsample_1.pos, data.pos) x = torch.cat((x, x2), dim=1) x = self.conv4(x, edge_index_1) x = self.s4(x) x = self.conv5(x, edge_index_1) x = self.s5(x) x = self.s6(self.lin1(x)) x = self.s7(self.lin2(x)) return torch.sigmoid(self.out(x))
def forward(self, data): data = self.conv1(data) cluster = voxel_grid(data.pos, data.batch, size=2) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data = self.conv2(data) cluster = voxel_grid(data.pos, data.batch, size=4) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data = self.conv3(data) cluster = voxel_grid(data.pos, data.batch, size=7) x = max_pool_x(cluster, data.x, data.batch, size=25) # x = max_pool_x(cluster, data.x, data.batch) x = x[0].view(-1, self.fc1.weight.size(1)) x = self.fc1(x) x = F.elu(x) x = self.bn(x) x = self.drop_out(x) x = self.fc2(x) return F.log_softmax(x, dim=1)
def forward(self, data): data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data.edge_attr = None data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr)) x = F.elu(self.fc1(data.x)) x = F.dropout(x, training=self.training, p=self.dropout) x = self.fc2(x) y = global_mean_pool(x, data.batch) if (self.wgan): return y return torch.sigmoid(y)
def forward(self, data): x = F.relu(self.conv1(data.x, data.edge_index)) cluster = graclus(data.edge_index, num_nodes=x.shape[0]) data = max_pool( cluster, Data(x=x, batch=data.batch, edge_index=data.edge_index)) x = F.relu(self.conv2(data.x, data.edge_index)) cluster = graclus(data.edge_index, num_nodes=x.shape[0]) x, batch = max_pool_x(cluster, x, data.batch) x = global_mean_pool(x, batch) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return x
def forward(self, data): for i in range(self.layers_num): data.x = self.conv_layers[i](data.x, data.pos, data.edge_index) if self.use_cluster_pooling: weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data.x = global_mean_pool(data.x, data.batch) x = self.fc1(data.x) return F.log_softmax(x, dim=1)
def forward(self, data): x, edge_index, batch = data.x, data.edge_index, data.batch x = F.relu(self.conv1(x, edge_index)) xs = [global_mean_pool(x, batch)] for i, conv in enumerate(self.convs): x = F.relu(conv(x, edge_index)) xs += [global_mean_pool(x, batch)] if i % 2 == 0 and i < len(self.convs) - 1: cluster = graclus(edge_index, num_nodes=x.size(0)) data = Batch(x=x, edge_index=edge_index, batch=batch) data = max_pool(cluster, data) x, edge_index, batch = data.x, data.edge_index, data.batch x = self.jump(xs) x = F.relu(self.lin1(x)) x = F.dropout(x, p=0.5, training=self.training) x = self.lin2(x) return F.log_softmax(x, dim=-1)
def forward(self, data): x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr x = self.conv1(x) x = F.elu(x) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, x.size(0)) data.edge_attr = None data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) x = F.elu(self.conv1(x, edge_index, edge_attr)) x = self.conv2(x, edge_index, edge_attr) x = F.elu(self.conv3(x, edge_index, edge_attr)) x = self.conv4(x, edge_index, edge_attr) x = F.elu(self.conv5(x, edge_index, edge_attr)) x = self.conv6(x, edge_index, edge_attr) x = F.dropout(x, training=self.training) return F.log_softmax(x, dim=1)
def forward(self, sub_data): """ polyline vector set in torch_geometric.data.Data format args: sub_data (Data): [x, y, cluster, edge_index, valid_len] """ x, edge_index = sub_data.x, sub_data.edge_index for name, layer in self.layer_seq.named_modules(): if isinstance(layer, GraphLayerProp): x = layer(x, edge_index) sub_data.x = x out_data = max_pool(sub_data.cluster, sub_data) # try: assert out_data.x.shape[0] % int(sub_data.time_step_len[0]) == 0 # except: # from pdb import set_trace; set_trace() out_data.x = out_data.x / out_data.x.norm(dim=0) return out_data
def forward(self, data): data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr)) cluster = graclus( data.edge_index, torch.reshape(data.edge_attr, (data.edge_attr.shape[0], )), data.x.size(0)) data = max_pool(cluster, data) data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr)) cluster = graclus( data.edge_index, torch.reshape(data.edge_attr, (data.edge_attr.shape[0], )), data.x.size(0)) x, batch = max_pool_x(cluster, data.x, data.batch) x = global_mean_pool(x, batch) x = F.elu(self.fc1(x)) x = F.dropout(x, training=self.training) return F.log_softmax(self.fc2(x), dim=1)
def test_max_pool(): cluster = torch.tensor([0, 1, 0, 1, 2, 2]) x = torch.Tensor([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]]) pos = torch.Tensor([[0, 0], [1, 1], [2, 2], [3, 3], [4, 4], [5, 5]]) edge_index = torch.tensor([[0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5], [1, 2, 3, 0, 2, 3, 0, 1, 3, 0, 1, 2, 5, 4]]) edge_attr = torch.Tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) batch = torch.tensor([0, 0, 0, 0, 1, 1]) data = Batch(x=x, pos=pos, edge_index=edge_index, edge_attr=edge_attr, batch=batch) data = max_pool(cluster, data, transform=lambda x: x) assert data.x.tolist() == [[5, 6], [7, 8], [11, 12]] assert data.pos.tolist() == [[1, 1], [2, 2], [4.5, 4.5]] assert data.edge_index.tolist() == [[0, 1], [1, 0]] assert data.edge_attr.tolist() == [4, 4] assert data.batch.tolist() == [0, 0, 1]
def forward(self, graph): data = graph data.x = torch.cat([data.pos, data.x], dim=1) for i, monet_layer in enumerate(self.monet_layers[:-1]): data.x = F.relu( monet_layer(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) if i == 0: data.edge_attr = None data = max_pool(cluster, data, transform=T.Cartesian(cat=False)) data.x = self.monet_layers[-1](data.x, data.edge_index, data.edge_attr) for linear_layer in self.linear_layers[:-1]: x = global_mean_pool(data.x, data.batch) x = F.relu(linear_layer(x)) x = F.dropout(x) return F.log_softmax(self.linear_layers[-1](x), dim=1)
def forward(self, data): data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr)) ######## calculate similarity between nodes weight = normalized_cut_2d(data.edge_index, data.pos) ######### graph clustering without the need of eigenvector cluster = graclus(data.edge_index, weight, data.x.size(0)) data.edge_attr = None ########## Pools and coarsens a graph. All nodes within the same cluster will be represented as one node and appply transform data = max_pool(cluster, data, transform=transform) ########## 2nd conv net data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr)) weight = normalized_cut_2d(data.edge_index, data.pos) cluster = graclus(data.edge_index, weight, data.x.size(0)) ############## Max-Pools node features according to the clustering defined in cluster x, batch = max_pool_x(cluster, data.x, data.batch) ############## Returns batch-wise graph-level-outputs by averaging node features across the node dimension x = global_mean_pool(x, batch) x = F.elu(self.fc1(x)) x = F.dropout(x, training=self.training) return F.log_softmax(self.fc2(x), dim=1)
def forward(self, data): inputs = data.x x = self.kp_conv0(data.points[self.layer_ind], data.points[self.layer_ind], data.list_neigh[self.layer_ind], inputs) x = self.activate_feature(x, self.bn0) if (not self.is_strided): x = self.kp_conv1(data.points[self.layer_ind], data.points[self.layer_ind], data.list_neigh[self.layer_ind], x) x = self.activate_feature(x, self.bn1) data.x = x + self.shortcut_op(inputs) else: x = self.kp_conv(data.points[self.layer_ind + 1], data.points[self.layer_ind], data.list_pool[self.layer_ind], x) x = self.activate_feature(x, self.bn1) shortcut = self.shortcut_op( max_pool(inputs, data.list_pool[self.layer_ind])) data.x = x + shortcut return data