示例#1
0
    def process(self):
        progress = Progress('Processing', end=self.num_graphs, type='')

        train_data_list, test_data_list = [], []

        for y, category in enumerate(self.categories):
            path = osp.join(self.raw_dir, category)

            for filename in glob.glob('{}/train/*.off'.format(path)):
                data = read_off(filename)
                data.y = torch.LongTensor([y])
                train_data_list.append(data)
                progress.inc()

            for filename in glob.glob('{}/test/*.off'.format(path)):
                data = read_off(filename)
                data.y = torch.LongTensor([y])
                test_data_list.append(data)
                progress.inc()

        train_dataset, train_slices = collate_to_set(train_data_list)
        torch.save((train_dataset, train_slices), self._processed_files[0])

        test_dataset, test_slices = collate_to_set(test_data_list)
        torch.save((test_dataset, test_slices), self._processed_files[1])

        progress.success()
示例#2
0
    def process(self):
        data_list = []
        for ip_path in glob.glob('{}/*.off'.format(self.raw_paths[0])):
            ip_data = read_off(ip_path)
            shape_id = osp.basename(ip_path).rsplit('.', 1)[0]
            gt_path = osp.join(self.raw_paths[1], shape_id + '.off')
            gt_data = read_off(gt_path)

            data = Data()
            data.shape_id = torch.tensor(
                [int(shape_id.split('_')[1]),
                 int(shape_id.split('_')[2])])
            data.x = ip_data.pos
            data.gt_x = gt_data.pos
            data.face = ip_data.face
            data.gt_face = gt_data.face

            # if int(shape_id) < 10:
            data_list.append(data)

        if self.pre_filter is not None:
            data_list = [d for d in data_list if self.pre_filter(d)]

        if self.pre_transform is not None:
            data_list = [self.pre_transform(d) for d in data_list]

        val_split = 0.15
        val_size = int(len(data_list) * val_split)

        torch.save(self.collate(data_list[:len(data_list) - val_size]),
                   self.processed_paths[0])
        torch.save(self.collate(data_list[len(data_list) - val_size:]),
                   self.processed_paths[1])
示例#3
0
    def process(self):
        data_list = []
        for off_path in glob.glob('{}/*.off'.format(self.raw_paths[0])):
            data = read_off(off_path)
            shape_id = osp.basename(off_path).rsplit('.', 1)[0]
            label_path = osp.join(self.raw_paths[1], shape_id + '.seg')
            data.y = read_txt_array(label_path) - 1  # start from 0
            if self.classification is not None:
                if self.classification in data.y:
                    data.y = torch.tensor([1])
                else:
                    data.y = torch.tensor([0])

            data.shape_id = torch.tensor([int(shape_id)])
            # if int(shape_id) < 10:
            data_list.append(data)

        if self.pre_filter is not None:
            data_list = [d for d in data_list if self.pre_filter(d)]

        if self.pre_transform is not None:
            data_list = [self.pre_transform(d) for d in data_list]

        val_split = 0.15
        val_size = int(len(data_list) * val_split)

        torch.save(self.collate(data_list[:len(data_list) - val_size]), self.processed_paths[0])
        torch.save(self.collate(data_list[len(data_list) - val_size:]), self.processed_paths[1])
示例#4
0
    def get_pcd(self, cat_num, model_num):

        path = os.path.join(self.raw_dir, self.categories[cat_num],
                            self.dataset,
                            self.get_model_name(cat_num, model_num))
        data = read_off(path)
        data.y = torch.tensor([cat_num, model_num])
        if self.pre_filter is not None:
            data = self.pre_filter(data)
        if self.pre_transform is not None:
            data = self.pre_transform(data)
        return data
示例#5
0
    def process_set(self, dataset):
        categories = glob.glob(osp.join(self.raw_dir, '*', ''))
        categories = sorted([x.split(os.sep)[-2] for x in categories])

        data_list = []
        for target, category in enumerate(categories):
            folder = osp.join(self.raw_dir, category, dataset)
            paths = glob.glob('{}/{}_*.off'.format(folder, category))
            for path in paths:
                data = read_off(path)
                data.y = torch.tensor([target])

                pos = data.pos
                face = data.face.contiguous() - 1

                random_list = sorted(random.sample(range(int(face.size(1)) - 1),
                                                   int(face.size(1) / 10)))
                face = face[:, random_list]
                face_set_a = set(face[0, :])
                face_set_b = set(face[1, :])
                face_set_c = set(face[2, :])
                pos_list = sorted(list(face_set_a | face_set_b | face_set_c))
                for i, _ in enumerate(pos_list):
                    pos_list[i] = int(pos_list[i])
                dict_key = {}
                for i, j in enumerate(pos_list):
                    dict_key[int(j)] = i
                for i in range(face.size(1)):
                    face[0][i] = dict_key[int(face[0][i])]
                    face[1][i] = dict_key[int(face[1][i])]
                    face[2][i] = dict_key[int(face[2][i])]
                pos = pos[pos_list]

                assert pos.size(1) == 3 and face.size(0) == 3

                edge_index = torch.cat([face[:2], face[1:], face[::2]], dim=1)
                edge_index = to_undirected(edge_index, num_nodes=self.num)

                data.pos = pos
                data.face = face
                data.edge_index = edge_index

                data_list.append(data)

        if self.pre_filter is not None:
            data_list = [d for d in data_list if self.pre_filter(d)]

        if self.pre_transform is not None:
            data_list = [self.pre_transform(d) for d in data_list]

        return self.collate(data_list)
示例#6
0
    def process(self):
        ref_data = read_off(
            osp.join(self.raw_paths[0], 'null', '{}.off'.format(self.cat)))

        train_list = []
        name = '{}_{}_*.off'.format(self.part, self.cat)
        paths = glob.glob(osp.join(self.raw_paths[0], self.part, name))
        paths = [path[:-4] for path in paths]
        paths = sorted(paths, key=lambda e: (len(e), e))

        for path in paths:
            data = read_off('{}.off'.format(path))
            y = read_txt_array('{}.baryc_gt'.format(path))
            data.y = y[:, 0].to(torch.long) - 1
            data.y_baryc = y[:, 1:]
            train_list.append(data)

        test_list = []
        name = '{}_{}_*.off'.format(self.part, self.cat)
        paths = glob.glob(osp.join(self.raw_paths[1], self.part, name))
        paths = [path[:-4] for path in paths]
        paths = sorted(paths, key=lambda e: (len(e), e))

        for path in paths:
            test_list.append(read_off('{}.off'.format(path)))

        if self.pre_filter is not None:
            train_list = [d for d in train_list if self.pre_filter(d)]
            test_list = [d for d in test_list if self.pre_filter(d)]

        if self.pre_transform is not None:
            ref_data = self.pre_transform(ref_data)
            train_list = [self.pre_transform(d) for d in train_list]
            test_list = [self.pre_transform(d) for d in test_list]

        torch.save(ref_data, self.processed_paths[0])
        torch.save(self.collate(train_list), self.processed_paths[1])
        torch.save(self.collate(test_list), self.processed_paths[2])
示例#7
0
    def process_set(self, dataset):
        categories = glob.glob(osp.join(self.raw_dir, '*', ''))
        categories = sorted([x.split('/')[-2] for x in categories])

        data_list = []
        for target, category in enumerate(categories):
            folder = osp.join(self.raw_dir, category, dataset)
            paths = glob.glob('{}/{}_*.off'.format(folder, category))
            for path in paths:
                data = read_off(path)
                data.y = torch.tensor([target])
                data_list.append(data)

        if self.pre_transform is not None:
            data_list = [self.pre_transform(d) for d in data_list]

        return self.collate(data_list)
示例#8
0
 def process_set(self, dataset):
     if self.cache_pcds:
         data_list = []
         for target, category in enumerate(self.categories):
             folder = os.path.join(self.raw_dir, category, dataset)
             paths = sorted(
                 glob.glob('{}/{}_*.off'.format(folder, category)))
             for path in paths:
                 data = read_off(path)
                 model_num = int(os.path.basename(path)[-8:-4])
                 data.y = torch.tensor([target, model_num])
                 data_list.append(data)
         if self.pre_filter is not None:
             data_list = [d for d in data_list if self.pre_filter(d)]
         if self.pre_transform is not None:
             data_list = [self.pre_transform(d) for d in data_list]
         return self.collate(data_list)
     else:
         pass
示例#9
0
    def process_set(self, dataset):
        categories = glob.glob(osp.join(self.raw_dir, '*', ''))
        categories = sorted([x.split(os.sep)[-2] for x in categories])

        data_list = []
        for target, category in enumerate(categories):
            folder = osp.join(self.raw_dir, category, dataset)
            paths = glob.glob('{}/*.off'.format(folder))
            for path in paths:
                data = read_off(path)
                data.pos = data.pos - data.pos.mean(dim=0, keepdim=True)
                data.y = torch.tensor([target])
                data_list.append(data)

        if self.pre_filter is not None:
            data_list = [d for d in data_list if self.pre_filter(d)]

        if self.pre_transform is not None:
            data_list = [self.pre_transform(d) for d in data_list]

        return self.collate(data_list)
    def process_set(self, dataset):
        categories = glob.glob(osp.join(self.raw_dir, '*', ''))
        categories = sorted([x.split(os.sep)[-2] for x in categories])

        poisson = Poisson(1024)
        normalize = normalization.Normalize()
        data_list = []
        for target, category in enumerate(categories):
            folder = osp.join(self.raw_dir, category, dataset)
            paths = glob.glob('{}/{}_*.off'.format(folder, category))
            for path in paths:
                data = read_off(path)
                data.y = torch.tensor([target])
                data = normalize(data)
                data = poisson(data)
                data_list.append(data)

        if self.pre_filter is not None:
            data_list = [d for d in data_list if self.pre_filter(d)]

        if self.pre_transform is not None:
            data_list = [self.pre_transform(d) for d in data_list]

        return self.collate(data_list)