Exemplo n.º 1
0
    def forward(self, data):
        data.x = F.elu(
            self.bn1(self.conv0(data.x, data.edge_index, data.edge_attr)))
        cluster = voxel_grid(data.pos, data.batch, size=[4, 3])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.conv1(data)
        cluster = voxel_grid(data.pos, data.batch, size=[16, 12])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.conv2(data)
        cluster = voxel_grid(data.pos, data.batch, size=[30, 23])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.conv3(data)
        cluster = voxel_grid(data.pos, data.batch, size=[60, 45])
        x = max_pool_x(cluster, data.x, data.batch, size=16)
        # x = max_pool_x(cluster, data.x, data.batch)

        x = x[0].view(-1, self.fc1.weight.size(1))
        x = self.fc1(x)
        x = F.elu(x)
        x = self.bn(x)
        x = self.drop_out(x)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
Exemplo n.º 2
0
    def __init__(self,
                 data_dir='data/GMNIST',
                 batch_size=32,
                 test_rate=0.2,
                 validation=False,
                 background=False):
        self.data_dir = data_dir
        self.batch_size = batch_size
        self.test_rate = test_rate
        self.validation = validation

        train_dataset = _GMNIST(self.data_dir,
                                True,
                                transform=T.Cartesian(),
                                background=background)
        test_dataset = _GMNIST(self.data_dir,
                               False,
                               transform=T.Cartesian(),
                               background=background)

        train = GraphDataset(train_dataset,
                             batch_size=self.batch_size,
                             shuffle=True)
        test = GraphDataset(test_dataset,
                            batch_size=self.batch_size,
                            shuffle=False)

        super(GMNIST, self).__init__(train=train, test=test, val=test)
Exemplo n.º 3
0
    def forward(self, data):
        row, col = data.edge_index
        data.edge_attr = (data.pos[col] - data.pos[row]) / (2 * 28 *
                                                            cutoff) + 0.5

        # print(data.edge_index.shape)
        # print(data.edge_index[:, -20:])

        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data.edge_attr = None
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        row, col = data.edge_index
        data.edge_attr = (data.pos[col] - data.pos[row]) / (2 * 28 *
                                                            cutoff) + 0.5

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        row, col = data.edge_index
        data.edge_attr = (data.pos[col] - data.pos[row]) / (2 * 28 *
                                                            cutoff) + 0.5

        data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr))

        x = global_mean_pool(data.x, data.batch)
        return self.fc1(x)

        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        return F.log_softmax(self.fc2(x), dim=1)
Exemplo n.º 4
0
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index))
        data.x = self.bn1(data.x)
        cluster = voxel_grid(data.pos, data.batch, size=[4,4])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data.x = F.elu(self.conv2(data.x, data.edge_index))
        data.x = self.bn2(data.x)
        cluster = voxel_grid(data.pos, data.batch, size=[6,6])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))
        
        data.x = F.elu(self.conv3(data.x, data.edge_index))
        data.x = self.bn3(data.x)
        cluster = voxel_grid(data.pos, data.batch, size=[20,20])
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))
        
        data.x = F.elu(self.conv4(data.x, data.edge_index))
        data.x = self.bn4(data.x)
        cluster = voxel_grid(data.pos, data.batch, size=[32,32])
        x = max_pool_x(cluster, data.x, data.batch, size=32)
        
        x = x.view(-1, self.fc1.weight.size(1))

        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)

        return F.log_softmax(x, dim=1)
Exemplo n.º 5
0
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data.edge_attr = None
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        row, col = data.edge_index
        data.edge_attr = (data.pos[col] -
                          data.pos[row]) / (2 * self.args.cutoff) + 0.5

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        row, col = data.edge_index
        data.edge_attr = (data.pos[col] -
                          data.pos[row]) / (2 * self.args.cutoff) + 0.5

        data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr))

        x = global_mean_pool(data.x, data.batch)
        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training, p=self.args.disc_dropout)
        y = self.fc2(x)

        if (self.args.wgan):
            return y

        return torch.sigmoid(y)
    def forward(self, data):
        data.x = F.elu(
            self.bn1(self.conv1(data.x, data.edge_index, data.edge_attr)))
        cluster = voxel_grid(data.pos, data.batch, size=4)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.block1(data)
        cluster = voxel_grid(data.pos, data.batch, size=6)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.block2(data)
        cluster = voxel_grid(data.pos, data.batch, size=24)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.block3(data)
        cluster = voxel_grid(data.pos, data.batch, size=64)
        x = max_pool_x(cluster, data.x, data.batch, size=8)

        # if your torch-geometric version is below 1.3.2(roughly, we do not test all versions), use x.view() instead of x[0].view()
        # x = x.view(-1, self.fc1.weight.size(1))
        x = x[0].view(-1, self.fc1.weight.size(1))
        x = self.fc1(x)
        x = F.elu(x)
        x = self.bn(x)
        x = self.drop_out(x)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
Exemplo n.º 7
0
    def forward(self, data):
        data.x = F.elu(self.conv1a(data.x, data.edge_index, data.edge_attr))
        data.x = F.elu(self.conv1b(data.x, data.edge_index, data.edge_attr))
        # data.x = F.elu(self.conv1c(data.x, data.edge_index, data.edge_attr))
        # data.x = self.bn1(data.x)

        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster1 = graclus(data.edge_index, weight, data.x.size(0))
        pos1 = data.pos
        edge_index1 = data.edge_index
        batch1 = data.batch if hasattr(data, 'batch') else None
        # weights1 = bweights(data, cluster1)
        data = max_pool(cluster1, data, transform=T.Cartesian(cat=False))

        data.x = F.elu(self.conv2a(data.x, data.edge_index, data.edge_attr))
        data.x = F.elu(self.conv2b(data.x, data.edge_index, data.edge_attr))
        # data.x = F.elu(self.conv2c(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster2 = graclus(data.edge_index, weight, data.x.size(0))
        pos2 = data.pos
        edge_index2 = data.edge_index
        batch2 = data.batch if hasattr(data, 'batch') else None
        # weights2 = bweights(data, cluster2)
        data = max_pool(cluster2, data, transform=T.Cartesian(cat=False))

        # upsample
        # data = recover_grid_barycentric(data, weights=weights2, pos=pos2, edge_index=edge_index2, cluster=cluster2,
        #                                  batch=batch2, transform=T.Cartesian(cat=False))
        data.x = F.elu(self.conv3a(data.x, data.edge_index, data.edge_attr))
        data.x = F.elu(self.conv3b(data.x, data.edge_index, data.edge_attr))

        data = recover_grid(data,
                            pos2,
                            edge_index2,
                            cluster2,
                            batch=batch2,
                            transform=T.Cartesian(cat=False))

        # data = recover_grid_barycentric(data, weights=weights1, pos=pos1, edge_index=edge_index1, cluster=cluster1,
        #                                  batch=batch1, transform=T.Cartesian(cat=False))
        data.x = F.elu(self.conv4a(data.x, data.edge_index, data.edge_attr))
        data.x = F.elu(self.conv4b(data.x, data.edge_index, data.edge_attr))
        data = recover_grid(data,
                            pos1,
                            edge_index1,
                            cluster1,
                            batch=batch1,
                            transform=T.Cartesian(cat=False))

        # TODO handle contract on trainer and  evaluator
        data.x = F.elu(self.convout(data.x, data.edge_index, data.edge_attr))

        x = data.x

        # return F.sigmoid(x)
        return x
Exemplo n.º 8
0
    def __init__(self, data_dir=VESSEL_DIR, batch_size=32, test_rate=0.2, annotated_slices=False, pre_transform=None):
        self.data_dir = data_dir
        self.batch_size = batch_size
        self.test_rate = test_rate

        train_dataset = _GSVESSEL(self.data_dir, train=True, transform=T.Cartesian(), test_rate=test_rate,
                                   pre_transform=pre_transform)
        test_dataset = _GSVESSEL(self.data_dir, train=False, transform=T.Cartesian(), test_rate=test_rate,
                                  pre_transform=pre_transform)

        train = GraphDataset(train_dataset, batch_size=self.batch_size, shuffle=True)
        test = GraphDataset(test_dataset, batch_size=self.batch_size, shuffle=False)

        super(GSVESSEL, self).__init__(train=train, test=test, val=test)
Exemplo n.º 9
0
def test_compose():
    transform = T.Compose([T.Cartesian(cat=False), T.Cartesian(cat=True)])
    expected_output = ('Compose([\n'
                       '    Cartesian(cat=False),\n'
                       '    Cartesian(cat=True),\n'
                       '])')
    assert transform.__repr__() == expected_output

    pos = torch.Tensor([[-1, 0], [0, 0], [2, 0]])
    edge_index = torch.LongTensor([[0, 1], [1, 2]])
    data = Data(edge_index=edge_index, pos=pos)
    expected_output = [[0.75, 0.5, 0.75, 0.5], [1, 0.5, 1, 0.5]]

    output = transform(data)
    assert output.edge_attr.tolist() == expected_output
Exemplo n.º 10
0
def get_train_val_loader(path,
                         train_batch_size=64,
                         val_batch_size=64,
                         val_split=1 / 12):
    train_dataset = MNISTSuperpixels(path, "train", transform=T.Cartesian())
    dataset_size = len(train_dataset)
    indices = list(range(dataset_size))
    split = int(val_split * dataset_size)
    np.random.seed(43)
    np.random.shuffle(indices)
    train_indices = indices[split:]
    val_indices = indices[:split]

    train_sampler = SubsetRandomSampler(train_indices)
    val_sampler = SubsetRandomSampler(val_indices)

    validation_loader = DataLoader(train_dataset,
                                   batch_size=val_batch_size,
                                   sampler=val_sampler,
                                   shuffle=False)
    train_loader = DataLoader(train_dataset,
                              batch_size=train_batch_size,
                              sampler=train_sampler,
                              shuffle=False)
    return train_loader, validation_loader
Exemplo n.º 11
0
 def __init__(self, heart='case3', mfree=1230, K=6):
     """
     """
     self.path_in = osp.join(osp.dirname(osp.realpath('__file__')), 'data', 'training', heart)
     self.pre_transform = T.NNGraph(k=K)
     self.transform = T.Cartesian(cat=False)
     self.filename = osp.join(self.path_in, 'raw', heart)
     self.mfree = mfree
Exemplo n.º 12
0
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=5, start=0, end=28)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=7, start=0, end=28)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=14, start=0, end=27.99)
        x = max_pool_x(cluster, data.x, data.batch, size=4)

        x = x.view(-1, self.fc1.weight.size(1))
        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
Exemplo n.º 13
0
def cluster_grid(grid):
    data = grid
    # data.x = conv1(data.x, data.edge_index, data.edge_attr)
    weight = normalized_cut_2d(data.edge_index, data.pos)
    cluster = graclus(data.edge_index, weight, data.x.size(0))
    data.edge_attr = None
    data.batch = None
    data = max_pool(cluster, data, transform=T.Cartesian(cat=False))
    return data, cluster
Exemplo n.º 14
0
    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data.edge_attr = None
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data.x = F.elu(self.conv2(data.x, data.edge_index, data.edge_attr))
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, data.x.size(0))
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data.x = F.elu(self.conv3(data.x, data.edge_index, data.edge_attr))

        x = global_mean_pool(data.x, data.batch)
        x = F.elu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        return F.log_softmax(self.fc2(x), dim=1)
Exemplo n.º 15
0
    def forward(self, data):
        data = self.conv1(data)
        cluster = voxel_grid(data.pos, data.batch, size=2)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.conv2(data)
        cluster = voxel_grid(data.pos, data.batch, size=4)
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data = self.conv3(data)
        cluster = voxel_grid(data.pos, data.batch, size=7)
        x = max_pool_x(cluster, data.x, data.batch, size=25)
        # x = max_pool_x(cluster, data.x, data.batch)

        x = x[0].view(-1, self.fc1.weight.size(1))
        x = self.fc1(x)
        x = F.elu(x)
        x = self.bn(x)
        x = self.drop_out(x)
        x = self.fc2(x)
        return F.log_softmax(x, dim=1)
def test_compose():
    transform = T.Compose([T.Cartesian(), T.TargetIndegree()])
    assert transform.__repr__() == ('Compose([\n'
                                    '    Cartesian(cat=True),\n'
                                    '    TargetIndegree(cat=True),\n'
                                    '])')

    pos = torch.tensor([[-1, 0], [0, 0], [2, 0]], dtype=torch.float)
    edge_index = torch.tensor([[0, 1], [1, 2]])
    data = Data(edge_index=edge_index, pos=pos)

    out = transform(data).edge_attr.tolist()
    assert out == [[0.75, 0.5, 1], [1, 0.5, 1]]
Exemplo n.º 17
0
    def __init__(self,
                 n_neigh=9,
                 rad_neigh=0.1,
                 knn=None,
                 self_loop=True,
                 edge_attr=None,
                 flow='source_to_target'):
        super(GraphReg, self).__init__()
        # defining graph transform
        graph_transform_list = []
        self.del_edge_attr = False
        self.knn = knn
        self.n_neigh = n_neigh
        self.rad_neigh = rad_neigh
        self.self_loop = self_loop
        self.edge_attr = edge_attr
        if self.knn == True:
            graph_transform_list.append(
                T.KNNGraph(n_neigh, loop=self_loop, flow=flow))

        elif self.knn == False:  # 1.使用这一个,挑选出邻居,不使用knn,使用R方法
            graph_transform_list.append(
                T.RadiusGraph(self.rad_neigh,
                              loop=self_loop,
                              max_num_neighbors=n_neigh,
                              flow=flow))  # 0.15  false  100
        else:
            print("Connectivity of the graph will not be re-generated")

        # edge attr
        if edge_attr is not None:
            self.del_edge_attr = True
            if type(edge_attr) == str:
                if edge_attr:
                    edge_attr = [attr.strip() for attr in edge_attr.split('-')]
                else:
                    edge_attr = []
            for attr in edge_attr:
                attr = attr.strip().lower()

                if attr == 'poscart':
                    graph_transform_list.append(
                        T.Cartesian(norm=False, cat=True))

                elif attr == 'posspherical':  # 2.像每条边添加属性
                    graph_transform_list.append(
                        ext.Spherical(norm=False, cat=True))

                else:
                    raise RuntimeError('{} is not supported'.format(attr))
        self.graph_transform = T.Compose(graph_transform_list)  # 3.完成
Exemplo n.º 18
0
    def forward(self, data):
        for i in range(self.layers_num):
            data.x = self.conv_layers[i](data.x, data.pos, data.edge_index)

            if self.use_cluster_pooling:
                weight = normalized_cut_2d(data.edge_index, data.pos)
                cluster = graclus(data.edge_index, weight, data.x.size(0))
                data = max_pool(cluster,
                                data,
                                transform=T.Cartesian(cat=False))

        data.x = global_mean_pool(data.x, data.batch)
        x = self.fc1(data.x)

        return F.log_softmax(x, dim=1)
Exemplo n.º 19
0
    def forward(self, data):
        x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr
        x = self.conv1(x)
        x = F.elu(x)

        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster = graclus(data.edge_index, weight, x.size(0))
        data.edge_attr = None
        data = max_pool(cluster, data, transform=T.Cartesian(cat=False))
        x = F.elu(self.conv1(x, edge_index, edge_attr))
        x = self.conv2(x, edge_index, edge_attr)
        x = F.elu(self.conv3(x, edge_index, edge_attr))
        x = self.conv4(x, edge_index, edge_attr)
        x = F.elu(self.conv5(x, edge_index, edge_attr))
        x = self.conv6(x, edge_index, edge_attr)
        x = F.dropout(x, training=self.training)
        return F.log_softmax(x, dim=1)
Exemplo n.º 20
0
    def forward(self, graph):
        data = graph
        data.x = torch.cat([data.pos, data.x], dim=1)
        for i, monet_layer in enumerate(self.monet_layers[:-1]):
            data.x = F.relu(
                monet_layer(data.x, data.edge_index, data.edge_attr))
            weight = normalized_cut_2d(data.edge_index, data.pos)
            cluster = graclus(data.edge_index, weight, data.x.size(0))
            if i == 0:
                data.edge_attr = None
            data = max_pool(cluster, data, transform=T.Cartesian(cat=False))

        data.x = self.monet_layers[-1](data.x, data.edge_index, data.edge_attr)

        for linear_layer in self.linear_layers[:-1]:
            x = global_mean_pool(data.x, data.batch)
            x = F.relu(linear_layer(x))
            x = F.dropout(x)

        return F.log_softmax(self.linear_layers[-1](x), dim=1)
Exemplo n.º 21
0
def fetch_dataloader(data_dir, batch_size, validation_split):
    transform = T.Cartesian(cat=False)
    dataset = METDataset(data_dir)
    dataset_size = len(dataset)
    indices = list(range(dataset_size))
    split = int(np.floor(validation_split * dataset_size))
    random_seed = 42
    np.random.seed(random_seed)
    np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]
    dataloaders = {
        'train':
        DataLoader(dataset,
                   batch_size=batch_size,
                   sampler=SubsetRandomSampler(train_indices)),
        'test':
        DataLoader(dataset,
                   batch_size=batch_size,
                   sampler=SubsetRandomSampler(val_indices))
    }
    return dataloaders
Exemplo n.º 22
0
def fetch_dataloader(data_dir, batch_size, validation_split):
    transform = T.Cartesian(cat=False)
    dataset = METDataset(data_dir)
    #print(dataset)
    dataset_size = len(dataset)
    indices = list(range(dataset_size))
    split = int(np.floor(validation_split * dataset_size))
    print(split)
    random_seed = 42
    # fix the random generator for train and test
    # taken from https://pytorch.org/docs/1.5.0/notes/randomness.html
    torch.manual_seed(random_seed)
    train_subset, val_subset = torch.utils.data.random_split(
        dataset, [dataset_size - split, split])  #,
    #                                                             generator=torch.Generator().manual_seed(random_seed))
    print('length of train/val data: ', len(train_subset), len(val_subset))
    dataloaders = {
        'train': DataLoader(train_subset, batch_size=batch_size,
                            shuffle=False),
        'test': DataLoader(val_subset, batch_size=batch_size, shuffle=False)
    }
    return dataloaders
Exemplo n.º 23
0
def get_dataset(name, sparse=True, feat_str="deg+ak3+reall", root=None):
    if root is None or root == '':
        path = osp.join(osp.expanduser('~'), 'pyG_data', name)
    else:
        path = osp.join(root, name)
    degree = feat_str.find("deg") >= 0
    onehot_maxdeg = re.findall("odeg(\d+)", feat_str)
    onehot_maxdeg = int(onehot_maxdeg[0]) if onehot_maxdeg else None
    k = re.findall("an{0,1}k(\d+)", feat_str)
    k = int(k[0]) if k else 0
    groupd = re.findall("groupd(\d+)", feat_str)
    groupd = int(groupd[0]) if groupd else 0
    remove_edges = re.findall("re(\w+)", feat_str)
    remove_edges = remove_edges[0] if remove_edges else 'none'
    centrality = feat_str.find("cent") >= 0
    coord = feat_str.find("coord") >= 0

    pre_transform = FeatureExpander(degree=degree,
                                    onehot_maxdeg=onehot_maxdeg,
                                    AK=k,
                                    centrality=centrality,
                                    remove_edges=remove_edges,
                                    group_degree=groupd).transform

    if 'MNIST' in name or 'CIFAR' in name:
        if name == 'MNIST_SUPERPIXEL':
            train_dataset = MNISTSuperpixels(path,
                                             True,
                                             pre_transform=pre_transform,
                                             transform=T.Cartesian())
            test_dataset = MNISTSuperpixels(path,
                                            False,
                                            pre_transform=pre_transform,
                                            transform=T.Cartesian())
        else:
            train_dataset = ImageDataset(path,
                                         name,
                                         True,
                                         pre_transform=pre_transform,
                                         coord=coord,
                                         processed_file_prefix="data_%s" %
                                         feat_str)
            test_dataset = ImageDataset(path,
                                        name,
                                        False,
                                        pre_transform=pre_transform,
                                        coord=coord,
                                        processed_file_prefix="data_%s" %
                                        feat_str)
        dataset = (train_dataset, test_dataset)
    elif 'QM9' in name:
        dataset = QM9Ext(path,
                         pre_transform=pre_transform,
                         processed_filename="data_%s.pt" % feat_str)
    elif 'ModelNet' in name:
        pre_transform = FeatureExpander(
            degree=degree,
            onehot_maxdeg=onehot_maxdeg,
            AK=k,
            centrality=centrality,
            remove_edges=remove_edges,
            group_degree=groupd).cloud_point_transform
        train_dataset = ModelNetExT(path,
                                    train=True,
                                    pre_transform=pre_transform,
                                    processed_file_prefix="data_%s" % feat_str)
        test_dataset = ModelNetExT(path,
                                   train=True,
                                   pre_transform=pre_transform,
                                   processed_file_prefix="data_%s" % feat_str)
        dataset = (train_dataset, test_dataset)
    elif 'TOSCA' in name:
        # pre_transform = FeatureExpander(
        #     degree=degree, onehot_maxdeg=onehot_maxdeg, AK=k,
        #     centrality=centrality, remove_edges=remove_edges,
        #     group_degree=groupd).cloud_point_transform
        dataset = TOSCAEXT(path,
                           pre_transform=pre_transform,
                           processed_file_prefix="data_%s" % feat_str)
    else:
        dataset = TUDatasetExt(path,
                               name,
                               pre_transform=pre_transform,
                               use_node_attr=True,
                               processed_filename="data_%s.pt" % feat_str)

        dataset.data.edge_attr = None

    return dataset
Exemplo n.º 24
0
import os.path as osp

import torch
import torch.nn.functional as F
from torch_geometric.datasets import MNISTSuperpixels
import torch_geometric.transforms as T
from torch_geometric.data import DataLoader
from torch_geometric.nn import SplineConv, voxel_grid, max_pool, max_pool_x

path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'MNIST')
transform = T.Cartesian(cat=False)
train_dataset = MNISTSuperpixels(path, True, transform=transform)
test_dataset = MNISTSuperpixels(path, False, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64)
d = train_dataset


class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = SplineConv(d.num_features, 32, dim=2, kernel_size=5)
        self.conv2 = SplineConv(32, 64, dim=2, kernel_size=5)
        self.conv3 = SplineConv(64, 64, dim=2, kernel_size=5)
        self.fc1 = torch.nn.Linear(4 * 64, 128)
        self.fc2 = torch.nn.Linear(128, d.num_classes)

    def forward(self, data):
        data.x = F.elu(self.conv1(data.x, data.edge_index, data.edge_attr))
        cluster = voxel_grid(data.pos, data.batch, size=5, start=0, end=28)
        data.edge_attr = None
Exemplo n.º 25
0
import os.path as osp

import torch
import torch.nn.functional as F
from torch_geometric.datasets import MNISTSuperpixels
from torch_geometric.data import DataListLoader
import torch_geometric.transforms as T
from torch_geometric.nn import SplineConv, global_mean_pool, DataParallel

path = osp.join(osp.dirname(osp.realpath(__file__)), '../../data', 'MNIST')
dataset = MNISTSuperpixels(path, transform=T.Cartesian()).shuffle()
loader = DataListLoader(dataset, batch_size=1024, shuffle=True)


class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = SplineConv(dataset.num_features, 32, dim=2, kernel_size=5)
        self.conv2 = SplineConv(32, 64, dim=2, kernel_size=5)
        self.lin1 = torch.nn.Linear(64, 128)
        self.lin2 = torch.nn.Linear(128, dataset.num_classes)

    def forward(self, data):
        print('Inside Model:  num graphs: {}, device: {}'.format(
            data.num_graphs, data.batch.device))

        x, edge_index, edge_attr = data.x, data.edge_index, data.edge_attr
        x = F.elu(self.conv1(x, edge_index, edge_attr))
        x = F.elu(self.conv2(x, edge_index, edge_attr))
        x = global_mean_pool(x, data.batch)
        x = F.elu(self.lin1(x))
Exemplo n.º 26
0
import os.path as osp

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.datasets import MNISTSuperpixels
import torch_geometric.transforms as T
from torch_geometric.data import DataLoader
from torch_geometric.utils import normalized_cut
from torch_geometric.nn import (NNConv, graclus, max_pool, max_pool_x,
                                global_mean_pool)

path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'MNIST')
train_dataset = MNISTSuperpixels(path, True, transform=T.Cartesian())
test_dataset = MNISTSuperpixels(path, False, transform=T.Cartesian())
train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=64)
d = train_dataset


def normalized_cut_2d(edge_index, pos):
    row, col = edge_index
    edge_attr = torch.norm(pos[row] - pos[col], p=2, dim=1)
    return normalized_cut(edge_index, edge_attr, num_nodes=pos.size(0))


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        n1 = nn.Sequential(nn.Linear(2, 25), nn.ReLU(), nn.Linear(25, 32))
        self.conv1 = NNConv(d.num_features, 32, n1)
Exemplo n.º 27
0
from torch.utils.data.dataset import Subset
import torch.nn as nn 
from torch.nn import Sequential, Linear, ReLU
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
from inputsdata import MyOwnDataset
import torch_geometric.transforms as T
from torch_geometric.data import DataLoader
from torch_geometric.utils import normalized_cut
from torch_geometric.nn import voxel_grid, max_pool, max_pool_x, graclus, global_mean_pool, GCNConv,  global_mean_pool




transform = T.Cartesian(cat=False)


class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        
        self.conv1 = GCNConv(1, 64)
        self.bn1 = torch.nn.BatchNorm1d(64)
        
        self.conv2 = GCNConv(64, 128)
        self.bn2 = torch.nn.BatchNorm1d(128)
        
        self.conv3 = GCNConv(128, 256)
        self.bn3 = torch.nn.BatchNorm1d(256)
        
Exemplo n.º 28
0
model_path = dir_path + "/cmodels/12_global_edge_attr_test/C_300.pt"
dataset_path = dir_path + '/dataset/cartesian/'

dataset_path += str(num) + "s/" if num != -1 else "all_nums"


def pf(data):
    return data.y == num


pre_filter = pf if num != -1 else None

train_dataset = MNISTSuperpixels(dataset_path,
                                 True,
                                 pre_transform=T.Cartesian(),
                                 pre_filter=pre_filter)
train_loader = DataLoader(train_dataset, batch_size=128)

print("loaded data")

pretrained_model = torch.load(model_path, map_location=device)
C = MoNet(25)
C.load_state_dict(pretrained_model.state_dict())

torch.save(pretrained_model.state_dict(),
           "../mnist_superpixels/evaluation/C_state_dict.pt")

print("loaded model)")

# Testing Classification - remember to comment the first return in MoNet.forward()
Exemplo n.º 29
0
            data_s = self.transform(data_s)
            data_t = self.transform(data_t)

        data = Data(num_nodes=pos_s.size(0))
        for key in data_s.keys:
            data['{}_s'.format(key)] = data_s[key]
        for key in data_t.keys:
            data['{}_t'.format(key)] = data_t[key]

        return data


transform = T.Compose([
    T.Constant(),
    T.KNNGraph(k=8),
    T.Cartesian(),
])
train_dataset = RandomGraphDataset(30, 60, 0, 20, transform=transform)
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True,
                          follow_batch=['x_s', 'x_t'])

path = osp.join('..', 'data', 'PascalPF')
test_datasets = [PascalPF(path, cat, transform) for cat in PascalPF.categories]

device = 'cuda' if torch.cuda.is_available() else 'cpu'
psi_1 = SplineCNN(1, args.dim, 2, args.num_layers, cat=False, dropout=0.0)
psi_2 = SplineCNN(args.rnd_dim, args.rnd_dim, 2, args.num_layers, cat=True,
                  dropout=0.0)
model = DGMC_modified_v2(psi_1, psi_2, num_steps=args.num_steps).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
Exemplo n.º 30
0
    def forward(self, data):
        # (1/32,V_0/V_1)
        # pre-pool1
        pos1 = data.pos
        edge_index1 = data.edge_index
        x_pre = data.x.clone().detach()
        batch1 = data.batch if hasattr(data, 'batch') else None
        # convolution
        data.x = F.elu(self.conv1a(data.x, data.edge_index, data.edge_attr))
        data.x = F.elu(self.conv1b(data.x, data.edge_index, data.edge_attr))
        # clustering
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster1 = graclus(data.edge_index, weight, data.x.size(0))
        weights1 = pweights(x_pre, cluster1)
        # pooling
        data = avg_pool(cluster1, data, transform=T.Cartesian(cat=False))

        # (32/64,V_1/V_2)

        # pre-pool2
        pos2 = data.pos
        edge_index2 = data.edge_index
        x_pre = data.x.clone().detach()
        batch2 = data.batch if hasattr(data, 'batch') else None
        # convolution
        data.x = F.elu(self.conv2a(data.x, data.edge_index, data.edge_attr))
        data.x = F.elu(self.conv2b(data.x, data.edge_index, data.edge_attr))
        data.x = self.bn2(data.x)
        # clustering
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster2 = graclus(data.edge_index, weight, data.x.size(0))
        weights2 = pweights(x_pre, cluster2)
        # pooling
        data = avg_pool(cluster2, data, transform=T.Cartesian(cat=False))
        pool2 = data.clone()

        # 64/128,V_2/V_3
        # pre-pool1
        pos3 = data.pos
        edge_index3 = data.edge_index
        x_pre = data.x.clone().detach()
        batch3 = data.batch if hasattr(data, 'batch') else None
        # convolution
        data.x = F.elu(self.conv3a(data.x, data.edge_index, data.edge_attr))
        data.x = F.elu(self.conv3b(data.x, data.edge_index, data.edge_attr))
        data.x = self.bn3(data.x)
        # clustering
        weight = normalized_cut_2d(data.edge_index, data.pos)
        cluster3 = graclus(data.edge_index, weight, data.x.size(0))
        weights3 = pweights(x_pre, cluster3)
        # pooling
        data = avg_pool(cluster3, data, transform=T.Cartesian(cat=False))

        # upsample
        # data = recover_grid_barycentric(data, weights=weights2, pos=pos2, edge_index=edge_index2, cluster=cluster2,
        #                                  batch=batch2, transform=None)
        data.x = F.elu(self.score_fr1(data.x, data.edge_index, data.edge_attr))
        data = recover_grid_barycentric(data,
                                        weights=weights3,
                                        pos=pos3,
                                        edge_index=edge_index3,
                                        cluster=cluster3,
                                        batch=batch3,
                                        transform=T.Cartesian(cat=False))
        data.x = F.elu(self.score_fr2(data.x, data.edge_index, data.edge_attr))

        pool2.x = F.elu(
            self.score_pool2(pool2.x, pool2.edge_index, pool2.edge_attr))

        # data = recover_grid_barycentric(data, weights=weights1, pos=pos1, edge_index=edge_index1, cluster=cluster1,
        #                                  batch=batch1, transform=None)
        data.x = data.x + pool2.x
        data = recover_grid_barycentric(data,
                                        weights=weights2,
                                        pos=pos2,
                                        edge_index=edge_index2,
                                        cluster=cluster2,
                                        batch=batch2,
                                        transform=T.Cartesian(cat=False))

        data = recover_grid_barycentric(data,
                                        weights=weights1,
                                        pos=pos1,
                                        edge_index=edge_index1,
                                        cluster=cluster1,
                                        batch=batch1,
                                        transform=T.Cartesian(cat=False))

        #
        data.x = F.elu(self.convout(data.x, data.edge_index, data.edge_attr))

        x = data.x

        return x