def make_dataset_by_class(self, dir, class_to_idx, phase, dataset_mode): meshes = [] dir = os.path.expanduser(dir) for target in sorted(os.listdir(dir)): d = os.path.join(dir, target) if not os.path.isdir(d): continue for root, _, fnames in sorted(os.walk(d)): for fname in sorted(fnames): if self.opt.verbose: print("DEBUG fname in meshretrieve ", fname) if is_mesh_file(fname) and (root.count(phase) == 1): path = os.path.join(root, fname) if self.opt.verbose: print("DEBUG meshretrieve path ", path) if dataset_mode == 'regression': # Retrieves additional info from metadata file as labels - use filename as key # filename format CC00839XX23_23710.obj filename_key = fname[:-4] # for the specific case where we are using both halves in the same dataset the file will end in L or R if filename_key[-1] in ('L', 'R'): filename_key = filename_key[:-2] item = (path, class_to_idx[filename_key]) else: item = (path, class_to_idx[target]) meshes.append(item) return meshes
def make_dataset(path): meshes = [] assert os.path.isdir(path), '%s is not a valid directory' % path for root, _, fnames in sorted(os.walk(path)): for fname in fnames: if is_mesh_file(fname): path = os.path.join(root, fname) meshes.append(path) return meshes
def make_target_dataset(dir, subroot, phase): meshes = [] if subroot not in os.listdir(dir): print('Error: No class %s in %s' % (subroot, dir)) exit() dir = os.path.join(dir, subroot) for root, _, fnames in sorted(os.walk(dir)): for fname in sorted(fnames): if is_mesh_file(fname) and (root.count(phase) == 1): path = os.path.join(root, fname) meshes.append(path) return meshes
def make_dataset_by_class(dir, class_to_idx, phase): meshes = [] dir = os.path.expanduser(dir) for target in sorted(os.listdir(dir)): d = os.path.join(dir, target) if not os.path.isdir(d): continue for root, _, fnames in sorted(os.walk(d)): for fname in sorted(fnames): if is_mesh_file(fname) and (root.count(phase)==1): path = os.path.join(root, fname) item = (path, class_to_idx[target]) meshes.append(item) return meshes
train_dir = os.path.join(converter.dataset, 'train') train_graph_dir = os.path.join(converter.hybrid_graphs, 'train') mkdir(train_graph_dir) elif converter.portion == 'test': test_npz_dir = os.path.join(converter.dataset, 'test', 'cache') test_dir = os.path.join(converter.dataset, 'test') test_graph_dir = os.path.join(converter.hybrid_graphs, 'test') mkdir(test_graph_dir) meshes = [] classes = np.loadtxt(classes_file) if converter.portion == 'train': for root, _, file_names in os.walk(train_dir): for fname in file_names: if is_mesh_file(fname): mesh_file = os.path.join(root, fname) npz_file = os.path.join(train_npz_dir, fname.split('.')[0] + '_000.npz') seg_file = os.path.join(segmentation_dir, fname.split('.')[0] + '.eseg') soft_seg_file = os.path.join( soft_segmentation_dir, fname.split('.')[0] + '.seseg') face_seg_file = os.path.join(face_segmentation_dir, fname.split('.')[0] + '.eseg') node_seg_file = os.path.join(node_segmentation_dir, fname.split('.')[0] + '.eseg') mesh_files_dict = { 'mesh': mesh_file, 'npz': npz_file,