Пример #1
0
def transform_setup(graph_u=False,
                    graph_gcn=False,
                    rotation=180,
                    samplePoints=1024,
                    mesh=False,
                    node_translation=0.01):
    if not graph_u and not graph_gcn:
        # Default transformation for scale noralization, centering, point sampling and rotating
        pretransform = T.Compose([T.NormalizeScale(), T.Center()])
        transform = T.Compose([
            T.SamplePoints(samplePoints),
            T.RandomRotate(rotation[0], rotation[1])
        ])
        print("pointnet rotation {}".format(rotation))
    elif graph_u:
        pretransform = T.Compose([T.NormalizeScale(), T.Center()])
        transform = T.Compose([
            T.NormalizeScale(),
            T.Center(),
            T.SamplePoints(samplePoints, True, True),
            T.RandomRotate(rotation[0], rotation[1]),
            T.KNNGraph(k=graph_u)
        ])
    elif graph_gcn:

        pretransform = T.Compose([T.NormalizeScale(), T.Center()])

        if mesh:
            if mesh == "extraFeatures":
                transform = T.Compose([
                    T.RandomRotate(rotation[0], rotation[1]),
                    T.GenerateMeshNormals(),
                    T.FaceToEdge(True),
                    T.Distance(norm=True),
                    T.TargetIndegree(cat=True)
                ])  # ,
            else:
                transform = T.Compose([
                    T.RandomRotate(rotation[0], rotation[1]),
                    T.GenerateMeshNormals(),
                    T.FaceToEdge(True),
                    T.Distance(norm=True),
                    T.TargetIndegree(cat=True)
                ])
        else:
            transform = T.Compose([
                T.SamplePoints(samplePoints, True, True),
                T.KNNGraph(k=graph_gcn),
                T.Distance(norm=True)
            ])
            print("no mesh")
        print("Rotation {}".format(rotation))
        print("Meshing {}".format(mesh))

    else:
        print('no transfom')

    return transform, pretransform
Пример #2
0
def get_adj_list(data, max_adj=None):
    data.edge_index = None
    data = T.FaceToEdge(remove_faces=False)(data)
    edge, _ = add_self_loops(data.edge_index)

    data = T.ToDense()(data)
    adj_mat = data.adj

    num_list = adj_mat.sum(1).long().unsqueeze(1)

    if max_adj is None:
        max_adj = num_list.max().item()
    else:
        max_list = torch.full_like(num_list, max_adj).long()
        num_list = torch.where(num_list > max_adj, max_list, num_list)

    adj_list = torch.full_like(adj_mat, -1).long()
    N = data.pos.shape[0]

    for n in range(N):
        adj = adj_mat[n].nonzero()
        num = num_list[n]
        adj_list[n, :num] = adj.t()[:, :num]

    adj_list = torch.cat([adj_list[:, :max_adj], num_list],
                         dim=1)  # N * max_adj
    return adj_list.type_as(edge), edge
Пример #3
0
 def process(self):
     data_list = []
     f2e = transforms.FaceToEdge(remove_faces=False)
     for pindex, path in enumerate(tqdm.tqdm(self.raw_paths)):
         mesh = torch_geometric.io.read_ply(path)
         f2e(mesh)
         tmp = split(path)[1].split(".")[0].split("_")
         model_str, pose_str = tmp[-2], tmp[-1]
         category = "_".join(tmp[:-2])
         mesh.model = int(model_str[5:])
         mesh.pose = int(pose_str[4:])
         mesh.y = self.categories.index(category)
         if self.pre_filter is not None and not self.pre_filter(mesh) : continue
         if self.pre_transform is not None: mesh = self.pre_transform(mesh)
         data_list.append(mesh)
     data, slices = self.collate(data_list)
     torch.save( (data, slices), self.processed_paths[0])
Пример #4
0
 def process(self):
     # Read data into huge `Data` list.
     face2edge = transforms.FaceToEdge(remove_faces=False)
     data_list = []
     num_classes = 10
     for path in tqdm.tqdm(self.raw_paths):
         i = int(basename(path)[:-4])
         mesh = torch_geometric.io.read_obj(path)
         mesh.y = i % num_classes
         mesh.subject = int(i / num_classes)
         face2edge(mesh)
         mesh.idx = i
         if self.pre_filter is not None and not self.pre_filter(mesh):
             continue
         if self.pre_transform is not None: mesh = self.pre_transform(mesh)
         data_list.append(mesh)
     data, slices = self.collate(data_list)
     torch.save((data, slices), self.processed_paths[0])
    def __init__(self, batch_size=32, num_point_samples=4096):
        """ This code runs the preprocessing pipeline ones and stores the standardised
        coordinates. It seems overkill to run the entire pipeline just to normalise the landmarks
        but it is required to find the bounding box. """

        root = '../data/CurvatureComplete_normalisation_only/'

        prep_dir = os.path.join(root, 'prep_parameters')

        # only perform preprocessing upto b-norm of the landmarks
        pre_transform = local_transforms.InvertibleCompose(
            [
                # convert mesh faces to graph edges
                ptg_transforms.FaceToEdge(remove_faces=False),
                local_transforms.LabelCloner('x', 'continuous_curvature'),

                # standardise x so that x ∈ [0,1]
                local_transforms.UnitNormaliseScalar(),

                # propagate curvature feature to local extreme
                local_transforms.PropageteFeaturesToLocalExtremes(c=80),

                # remove vertices based on curvature
                local_transforms.FilterVerts(
                    operator=lambda data: data.x[:, 0] < 0.85),

                # b-normalise
                local_transforms.NormalizeScale(invertible=True),
                local_transforms.LabelCloner('x', 'discrete_curvature'),
            ],
            invertible=True,
            skip_non_invertible=True,
            store_directory=prep_dir).load_parameters()

        super().__init__(root,
                         batch_size=batch_size,
                         transform=None,
                         pre_transform=pre_transform)
parser.add_argument('--num_layers', type=int, default=2)
parser.add_argument('--num_steps', type=int, default=10)
parser.add_argument('--lr', type=float, default=0.001)
parser.add_argument('--batch_size', type=int, default=512)
parser.add_argument('--pre_epochs', type=int, default=15)
parser.add_argument('--epochs', type=int, default=15)
parser.add_argument('--runs', type=int, default=20)
parser.add_argument('--test_samples', type=int, default=100)
args = parser.parse_args()

pre_filter1 = lambda d: d.num_nodes > 0  # noqa
pre_filter2 = lambda d: d.num_nodes > 0 and d.name[:4] != '2007'  # noqa

transform = T.Compose([
    T.Delaunay(),
    T.FaceToEdge(),
    T.Distance() if args.isotropic else T.Cartesian(),
])

path = osp.join('..', 'data', 'PascalVOC-WILLOW')
pretrain_datasets = []
for category in PascalVOC.categories:
    dataset = PascalVOC(path,
                        category,
                        train=True,
                        transform=transform,
                        pre_filter=pre_filter2
                        if category in ['car', 'motorbike'] else pre_filter1)
    pretrain_datasets += [ValidPairDataset(dataset, dataset, sample=True)]
pretrain_dataset = torch.utils.data.ConcatDataset(pretrain_datasets)
pretrain_loader = DataLoader(pretrain_dataset,
Пример #7
0
import os.path as osp

import torch
import torch.nn.functional as F
from torch_geometric.datasets import FAUST
import torch_geometric.transforms as T
from torch_geometric.loader import DataLoader
from torch_geometric.nn import SplineConv

path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'FAUST')
pre_transform = T.Compose([T.FaceToEdge(), T.Constant(value=1)])
train_dataset = FAUST(path, True, T.Cartesian(), pre_transform)
test_dataset = FAUST(path, False, T.Cartesian(), pre_transform)
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1)
d = train_dataset[0]


class Net(torch.nn.Module):
    def __init__(self):
        super().__init__()
        self.conv1 = SplineConv(1, 32, dim=3, kernel_size=5, aggr='add')
        self.conv2 = SplineConv(32, 64, dim=3, kernel_size=5, aggr='add')
        self.conv3 = SplineConv(64, 64, dim=3, kernel_size=5, aggr='add')
        self.conv4 = SplineConv(64, 64, dim=3, kernel_size=5, aggr='add')
        self.conv5 = SplineConv(64, 64, dim=3, kernel_size=5, aggr='add')
        self.conv6 = SplineConv(64, 64, dim=3, kernel_size=5, aggr='add')
        self.lin1 = torch.nn.Linear(64, 256)
        self.lin2 = torch.nn.Linear(256, d.num_nodes)

    def forward(self, data):
Пример #8
0
    def __call__(self, data):
        data.x = data.pos
        data = T.FaceToEdge()(data)

        return data
Пример #9
0
    return False


class MyTransform(object):
    def __call__(self, data):
        data.face, data.x = None, torch.ones(data.num_nodes, 1)
        return data


def norm(x, edge_index):
    deg = degree(edge_index[0], x.size(0), x.dtype, x.device) + 1
    return x / deg.unsqueeze(-1)


path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'FAUST')
pre_transform = T.Compose([T.FaceToEdge(), MyTransform()])
train_dataset = FAUST(path, True, T.Cartesian(), pre_transform)
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
d = train_dataset[0]


class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = SplineConv(1, 32, dim=3, kernel_size=5, norm=False)
        self.conv2 = SplineConv(32, 64, dim=3, kernel_size=5, norm=False)
        self.conv3 = SplineConv(64, 64, dim=3, kernel_size=5, norm=False)
        self.conv4 = SplineConv(64, 64, dim=3, kernel_size=5, norm=False)
        self.conv5 = SplineConv(64, 64, dim=3, kernel_size=5, norm=False)
        self.conv6 = SplineConv(64, 64, dim=3, kernel_size=5, norm=False)
        self.fc1 = torch.nn.Linear(64, 256)
Пример #10
0
from torch_geometric.utils import degree


class MyTransform(object):
    def __call__(self, data):
        data.face, data.x = None, torch.ones(data.num_nodes, 1)
        return data


def norm(x, edge_index):
    deg = degree(edge_index[0], x.size(0), x.dtype) + 1
    return x / deg.unsqueeze(-1)


path = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'FAUST')
pre_transform = T.Compose([T.FaceToEdge(), MyTransform()])
train_dataset = FAUST(path, True, T.Cartesian(), pre_transform)
test_dataset = FAUST(path, False, T.Cartesian(), pre_transform)
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1)
d = train_dataset[0]


class Net(torch.nn.Module):
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = SplineConv(1, 32, dim=3, kernel_size=5, norm=False)
        self.conv2 = SplineConv(32, 64, dim=3, kernel_size=5, norm=False)
        self.conv3 = SplineConv(64, 64, dim=3, kernel_size=5, norm=False)
        self.conv4 = SplineConv(64, 64, dim=3, kernel_size=5, norm=False)
        self.conv5 = SplineConv(64, 64, dim=3, kernel_size=5, norm=False)
Пример #11
0
TRIPLET = False
EPOCH_softmax = args.epoch_softmax
EPOCH_hardloss = args.epoch_hardloss
K = args.wavelet_scales + 1
SAVE_NAME = args.saving_name
CPOINT_NAME = args.loading_name
LEARNING_RATE = args.learning_rate_softmax
WEIGHT_DECAY = args.weight_decay_softmax

path = osp.join(osp.dirname(osp.realpath(__file__)), 'datasets', 'Faust')
path_output = osp.join(osp.abspath('.'), 'outputs', SAVE_NAME)
if not os.path.exists(path_output):
    os.makedirs(path_output)
LOG_FOUT = open(path_output + '/log.out', 'w')

pre_transform = T.FaceToEdge()
train_dataset = FAUST_WAVELET(path, True, None, pre_transform)
test_dataset = FAUST_WAVELET(path, False, None, pre_transform)
train_loader = DataLoader(train_dataset, batch_size=1, shuffle=True)
train_loader_tri = DataListLoader(train_dataset, batch_size=2, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)
d = test_dataset[0]
d.num_nodes = args.n_corr_points


class L2Norm(torch.nn.Module):
    def __init__(self):
        super(L2Norm, self).__init__()
        # self.eps = 1e-10
    def forward(self, x):
        norm = torch.sqrt(torch.sum(x * x, dim=1))  # + self.eps
Пример #12
0
 def set_mesh_graph(self):
     mesh_transform = T.Compose([T.FaceToEdge(), T.Cartesian()])
     mesh_graph = mesh_transform(self.mesh_graph)
     return mesh_graph.x, mesh_graph.edge_index, mesh_graph.edge_attr
Пример #13
0
def mesh_edge_subdiv(data):
    # face unpooling
    #                          v0
    #                          /\
    #                      n0 /__\ n2
    #                        /\  /\
    #                       /__\/__\
    #                     v1   n1   v2
    # [v0, v1, v2] -> [v0, n0, n2], [n0, v1, n1], [n1, v2, n2], [n0, n1, n2]
    vert = data.pos  # V * 3
    V = vert.shape[0]

    # use egde for index (since tensor cannot used for dict keys)
    data.edge_index = None
    data = T.FaceToEdge(remove_faces=False)(data)

    edge_list, _ = data.edge_index.sort(0)  # 2 * E
    edge_sort_list, _ = coalesce(edge_list, None, V, V)  # 2 * E'
    edge_sort_list = edge_sort_list.t()  # E' * 2

    face = data.face.t()  # F * 3
    F = face.shape[0]

    verts = [face[:, i] for i in range(3)]  # [v0 v1 v2]
    edges = [face[:, :2], face[:, 1:],
             face[:, [-1, 0]]]  # [e0(v0, v1), e1(v1, v2), e2(v2, v0)]
    edges_sort = [edge.sort(1)[0] for edge in edges]

    edge_dict = {}
    start_index = vert.shape[0]

    new_verts = []
    pool_idx = torch.zeros(1, 2).type_as(edge_list)
    for f in range(F):
        ns = []
        for e in range(3):
            edge_sort = edges_sort[e][f]
            idx = find_idx(edge_sort, edge_sort_list)
            if idx in edge_dict.keys():
                n = edge_dict[idx]
            else:
                n = start_index
                edge_dict[idx] = torch.tensor([n]).type_as(edge_list)
                pool_idx = torch.cat([pool_idx, edge_sort.unsqueeze(0)], 0)
                start_index += 1
            ns.append(edge_dict[idx])

        if len(new_verts) == 0:
            new_verts.extend(ns)
        else:
            new_verts = [torch.cat([new_verts[i], ns[i]], 0) for i in range(3)]

    pool_idx = pool_idx[1:]  # E" * 2

    v0, v1, v2 = verts
    n0, n1, n2 = new_verts

    new_face0 = torch.stack([v0, n0, n2], 1)
    new_face1 = torch.stack([n0, v1, n1], 1)
    new_face2 = torch.stack([n1, v2, n2], 1)
    new_face3 = torch.stack([n0, n1, n2], 1)
    new_faces = torch.cat([new_face0, new_face1, new_face2, new_face3],
                          0)  # F" * 3

    return pool_idx, new_faces
Пример #14
0
    def __init__(self,
                 landmark='IP',
                 batch_size=64,
                 num_point_samples=2048,
                 patch_size=8.):
        root = '../data/PatchBasedCompleteDentalDataModule_{}/'.format(
            landmark)

        self.prep_dir = os.path.join(root, 'prep_parameters')
        labels = [landmark + '_X', landmark + '_Y', landmark + '_Z']
        gt_labels = [
            'gt_' + landmark + '_X', 'gt_' + landmark + '_Y',
            'gt_' + landmark + '_Z'
        ]

        # read ids used in training
        training_ids = pd.read_csv(os.path.join(
            root, 'normalised_train_data_landmarks.csv'),
                                   header=0,
                                   index_col=0)['StudyID']

        # read predicted landmarks for training examples
        landmarks = pd.concat([
            pd.read_csv(f, header=0, index_col=0)
            for f in glob.glob(os.path.join(root, 'v0*.csv'))
        ])
        landmarks = landmarks[landmarks['StudyID'].isin(training_ids.values)]

        assert len(landmarks) == len(training_ids)

        # get ground truth and predicted labels
        gt_landmarks = landmarks[gt_labels].astype(float)
        pred_landmarks = landmarks[labels].astype(float)

        # sample weights are proportional to the error, since these are less likely to occur
        self.sample_weights = torch.from_numpy(
            (pred_landmarks - gt_landmarks.values).abs().sum(1).values)

        # setup preprocessing steps, load them if they were executed before
        pre_transform = local_transforms.InvertibleCompose(
            [
                # convert mesh faces to graph edges
                ptg_transforms.FaceToEdge(remove_faces=False),

                # # add edge_attr containing relative euclidean distance
                ptg_transforms.Distance(norm=False, cat=False),
                # extract patch around
                local_transforms.ExtractGeodesicPatch(patch_size,
                                                      key='patch_center'),

                # b-normalise
                local_transforms.NormalizeScale(invertible=True),

                # save vertex features
                local_transforms.LabelCloner('x', 'continuous_curvature'),
                ptg_transforms.GenerateMeshNormals(),

                # remove labels used in preprocessing
                local_transforms.LabelCloner('pos', 'mesh_vert'),
                local_transforms.LabelCleaner(['edge_index', 'edge_attr'])
            ],
            invertible=True,
            skip_non_invertible=True,
            store_directory=self.prep_dir).load_parameters()

        pre_process = self.set_patch_centers

        transform = ptg_transforms.Compose([
            local_transforms.LabelCloner('continuous_curvature', 'x'),
            local_transforms.SamplePoints(num_point_samples,
                                          remove_faces=False,
                                          include_normals=True,
                                          include_features=True),
            local_transforms.MergeLabels('norm'),
            local_transforms.ZNormalise('x'),
            local_transforms.LabelCleaner([
                'mesh_vert', 'norm', 'continuous_curvature', 'patch_center',
                'face', 'mesh_norm', 'mesh_color'
            ])
        ])

        super().__init__(root,
                         batch_size=batch_size,
                         pre_process=pre_process,
                         transform=transform,
                         labels=labels,
                         pre_transform=pre_transform)
Пример #15
0
    def __init__(self, batch_size=32, num_point_samples=4096):
        root = '../data/CurvatureComplete/'

        prep_dir = os.path.join(root, 'prep_parameters')

        # Generate standardised landmarks to setup pca
        training_set_landmarks = torch.tensor(
            pd.read_csv(os.path.join(root,
                                     'normalised_train_data_landmarks.csv'),
                        header=0,
                        index_col=0).values[:, 1:].astype('float32')).view(
                            -1, 7, 3)

        pre_transform = local_transforms.InvertibleCompose(
            [
                # convert mesh faces to graph edges
                ptg_transforms.FaceToEdge(remove_faces=False),
                local_transforms.LabelCloner('x', 'continuous_curvature'),

                # standardise x so that x ∈ [0,1]
                local_transforms.UnitNormaliseScalar(),

                # propagate curvature feature to local extreme
                local_transforms.PropageteFeaturesToLocalExtremes(c=80),

                # remove vertices based on curvature
                local_transforms.FilterVerts(
                    operator=lambda data: data.x[:, 0] < 0.85),

                # b-normalise
                local_transforms.NormalizeScale(invertible=True),
                local_transforms.LabelCloner('x', 'discrete_curvature'),

                # normalise landmarks
                local_transforms.PCAProjectLandmarks(
                    training_set_landmarks, n_components=3, invertible=True),

                # generate vertex features
                ptg_transforms.GenerateMeshNormals(),

                # remove labels used in preprocessing
                local_transforms.LabelCloner('pos', 'mesh_vert'),
                local_transforms.LabelCleaner(['edge_index'])
            ],
            invertible=True,
            skip_non_invertible=True,
            store_directory=prep_dir).load_parameters()

        transform = local_transforms.InvertibleCompose(
            [
                local_transforms.LabelCloner('continuous_curvature', 'x'),
                local_transforms.SamplePoints(num_point_samples,
                                              remove_faces=False,
                                              include_normals=True,
                                              include_features=True),
                local_transforms.MergeLabels('norm'),
                local_transforms.ZNormalise('x'),
                local_transforms.LabelCleaner([
                    'mesh_vert', 'norm', 'continuous_curvature', 'face',
                    'mesh_norm', 'discrete_curvature'
                ])
            ],
            skip_non_invertible=True,
            invertible=True)

        super().__init__(root,
                         batch_size=batch_size,
                         transform=transform,
                         pre_transform=pre_transform)