Ejemplo n.º 1
0
def test_knn_graph():
    assert KNNGraph().__repr__() == 'KNNGraph(k=6)'

    pos = [[0, 0], [1, 0], [2, 0], [0, 1], [-2, 0], [0, -2]]
    pos = torch.tensor(pos, dtype=torch.float)
    data = Data(pos=pos)

    row, col = KNNGraph(2)(data).edge_index
    expected_row = [0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5]
    expected_col = [1, 2, 3, 4, 5, 0, 2, 3, 5, 0, 1, 0, 1, 4, 0, 3, 0, 1]

    assert row.tolist() == expected_row
    assert col.tolist() == expected_col
Ejemplo n.º 2
0
def test_knn_graph():
    assert KNNGraph().__repr__() == 'KNNGraph(k=6)'

    pos = torch.Tensor([[0, 0], [1, 0], [2, 0], [0, 1], [-2, 0], [0, -2]])

    expected_row = [0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5]
    expected_col = [1, 2, 3, 4, 5, 0, 2, 3, 5, 0, 1, 0, 1, 4, 0, 3, 0, 1]

    data = Data(pos=pos)
    data = KNNGraph(k=2)(data)
    assert len(data) == 2
    assert data.pos.tolist() == pos.tolist()
    assert data.edge_index[0].tolist() == expected_row
    assert data.edge_index[1].tolist() == expected_col
Ejemplo n.º 3
0
 def edge_creator(dat):
     KNNGraph(k=5, loop=False, force_undirected = False)(dat)
     dat.adj_t = None
     ToUndirected()(dat)
     AddSelfLoops()(dat)
     dat.edge_index = dat.edge_index.flip(dims=[0])
     return dat
 def __init__(
     self,
     root: str = "data/",
     n_segments: int = 400,
     k: int = 10,
     loop: bool = True,
     transform: Optional[Callable] = None,
     pre_transform: Optional[Callable] = None,
     **kwargs,
 ):
     self.data_dir = root
     self.n_segments = n_segments
     self.k = k
     self.loop = loop
     self.slic_kwargs = kwargs
     self.base_transform = T.Compose([
         T.ToTensor(),
         ToSLIC(n_segments=n_segments,
                add_img=True,
                compactness=1,
                **kwargs),
         KNNGraph(k=k, loop=loop),
     ])
     super().__init__(os.path.join(root, "STL10"), transform, pre_transform)
     self.data, self.slices = torch.load(self.processed_paths[0])
Ejemplo n.º 5
0
    def __init__(self,path,subdir,event,neighbours=6):
        super(duneGraph,self).__init__()

        self.dataFeatures = [0,0,0]
        self.dataPositions = [0,0,0]
        self.truthInfo = truthInfo()

        self.viewNodes = [0,0,0]
        self.nodeFeatures = 0
        self.nodeCoords = 0

        self.neighbours = neighbours
        
        # Get the number of nodes and features from the info file
        infofiles = [open(path+"/"+subdir+"/event_"+event+"_0.info","r"),   
                     open(path+"/"+subdir+"/event_"+event+"_1.info","r"),   
                     open(path+"/"+subdir+"/event_"+event+"_2.info","r")]
        
        # Extract the truth information and graph sizes
        self.readInfoFiles(infofiles)

        # Load the data files for this event
        datafiles = [open(path+"/"+subdir+"/event_"+event+"_0.gz","rb"),    
                     open(path+"/"+subdir+"/event_"+event+"_1.gz","rb"),    
                     open(path+"/"+subdir+"/event_"+event+"_2.gz","rb")]

        # Unpack the data files and fill the feature and position tensors
        for d in range(3):
            elements = np.fromstring(zlib.decompress(datafiles[d].read()), dtype=np.float32, sep='')
            elements = elements.reshape((self.viewNodes[d],self.nodeFeatures+self.nodeCoords))
            split = np.hsplit(elements,self.nodeCoords)
            pos = split[0]
            features = split[1]
            for i in range(len(features)):
                if features[i][0] > 1000:
                    features[i][0] = 1000.
                features[i][0] = features[i][0] / 1000.
                if features[i][1] > 10:
                    features[i][1] = 24.
                features[i][1] = features[i][1] / 24.
                #print(features[i][0],features[i][1])
            self.dataFeatures[d] = torch.from_numpy(features)
            self.dataPositions[d] = torch.from_numpy(pos)
            datafiles[d].close();

        y = torch.zeros([1], dtype=torch.long)
        y[0] = self.getFlavour()

        # Now we make the actual graph data objects with the same prediction tensor
        self.data0 = Data(x=self.dataFeatures[0],pos=self.dataPositions[0],y=y)
        self.data1 = Data(x=self.dataFeatures[1],pos=self.dataPositions[1],y=y)
        self.data2 = Data(x=self.dataFeatures[2],pos=self.dataPositions[2],y=y)

        # Use the KNNGraph to calculate our edges - request self.neighbours edges per node
        edgeFinder = KNNGraph(k=self.neighbours)
        self.data0 = edgeFinder(self.data0)
        self.data1 = edgeFinder(self.data1)
        self.data2 = edgeFinder(self.data2)
Ejemplo n.º 6
0
def edge_creators(iteration):
    if iteration == 1:
        from torch_geometric.transforms import KNNGraph
        return KNNGraph(loop=True)
    
    if iteration == 2:
        from torch_geometric.transforms import KNNGraph, ToUndirected, AddSelfLoops
        def edge_creator(dat):
            KNNGraph(k=5, loop=False, force_undirected = False)(dat)
            dat.adj_t = None
            ToUndirected()(dat)
            AddSelfLoops()(dat)
            dat.edge_index = dat.edge_index.flip(dims=[0])
            return dat
        return edge_creator
Ejemplo n.º 7
0
from torch_geometric.datasets import ShapeNet
from torch_geometric.transforms import KNNGraph

# # --------读取的是点云数据------------
# point_dataset = ShapeNet(root='../datasets/ShapeNet', categories=['Airplane'])
# print(point_dataset)
#
# point_data = point_dataset[0]
# print(point_data)
#
# print('\n')

# --------利用transform转换成图数据-----------
graph_dataset = ShapeNet(root='../datasets/ShapeNet',
                         categories=['Airplane'],
                         pre_transform=KNNGraph(k=6))
print(graph_dataset)

graph_data = graph_dataset[0]
print(graph_data)

# Tips: 两个数据集读取,都会在数据集文件夹下生成processed文件,所以每次换读取方式记得删除文件