def run_fuse(self): """ Run fusion. """ assert os.path.exists(self.options.depth_dir) common.makedir(self.options.out_dir) files = self.read_directory(self.options.depth_dir) timer = common.WallTimer() Rs = self.get_views() for filepath in files: # As rendering might be slower, we wait for rendering to finish. # This allows to run rendering and fusing in parallel (more or less). depths = common.read_hdf5(filepath) timer.reset() tsdf = self.fusion(depths, Rs) tsdf = tsdf[0] vertices, triangles = libmcubes.marching_cubes(-tsdf, 0) vertices /= self.options.resolution vertices -= 0.5 off_file = os.path.join(self.options.out_dir, ntpath.basename(filepath)[:-3]) exporter.export_off(vertices, triangles, off_file) print('[Data] wrote %s (%f seconds)' % (off_file, timer.elapsed()))
def run_sample(self, filepath): """ Run sampling. """ timer = common.Timer() Rs = self.get_views() # As rendering might be slower, we wait for rendering to finish. # This allows to run rendering and fusing in parallel (more or less). depths = common.read_hdf5(filepath) timer.reset() tsdf = self.fusion(depths, Rs) xs = np.linspace(-0.5, 0.5, tsdf.shape[0]) ys = np.linspace(-0.5, 0.5, tsdf.shape[1]) zs = np.linspace(-0.5, 0.5, tsdf.shape[2]) tsdf_func = rgi((xs, ys, zs), tsdf) modelname = os.path.splitext(os.path.splitext(os.path.basename(filepath))[0])[0] points = self.get_random_points(tsdf) values = tsdf_func(points) t_loc, t_scale = self.get_transform(modelname) occupancy = (values <= 0.) out_file = self.get_outpath(filepath) np.savez(out_file, points=points, occupancy=occupancy, loc=t_loc, scale=t_scale) print('[Data] wrote %s (%f seconds)' % (out_file, timer.elapsed()))
def run_fuse(self, filepath): """ Run fusion. """ timer = common.Timer() Rs = self.get_views() # As rendering might be slower, we wait for rendering to finish. # This allows to run rendering and fusing in parallel (more or less). depths = common.read_hdf5(filepath) timer.reset() tsdf = self.fusion(depths, Rs) # To ensure that the final mesh is indeed watertight tsdf = np.pad(tsdf, 1, 'constant', constant_values=1e6) vertices, triangles = libmcubes.marching_cubes(-tsdf, 0) # Remove padding offset vertices -= 1 # Normalize to [-0.5, 0.5]^3 cube vertices /= self.options.resolution vertices -= 0.5 modelname = os.path.splitext(os.path.splitext(os.path.basename(filepath))[0])[0] t_loc, t_scale = self.get_transform(modelname) vertices = t_loc + t_scale * vertices off_file = self.get_outpath(filepath) libmcubes.export_off(vertices, triangles, off_file) print('[Data] wrote %s (%f seconds)' % (off_file, timer.elapsed()))
def __call__(self): self.treatment = read_hdf5(self.treatment_file) self.control = read_hdf5(self.control_file) name_counts = np.unique(np.concatenate( [self.treatment['name'], self.control['name']]), return_counts=True) common_names = name_counts[0][name_counts[1] == 2] if self.names is None: self.names = common_names[:self.max_plots] from matplotlib.backends.backend_pdf import PdfPages self.logger.info('open pdf file: {}'.format(self.outfile)) make_dir(os.path.dirname(self.outfile)) with PdfPages(self.outfile) as pdf: for seqname in self.names: self.logger.info('plot track: {}'.format(seqname)) pdf.savefig(self.plot_tracks(seqname))
def analyze(self): self.icshape = read_hdf5(self.icshape_file) self.background = read_hdf5(self.background_file) self.target = read_hdf5(self.target_file) name_counts = np.unique(np.concatenate([ self.icshape['name'], self.background['name'], self.target['name'] ]), return_counts=True) common_names = name_counts[0][name_counts[1] == 3] if self.names is None: self.names = common_names[:self.max_plots] from matplotlib.backends.backend_pdf import PdfPages self.logger.info('open pdf file: {}'.format(self.outfile)) make_dir(os.path.dirname(self.outfile)) with PdfPages(self.outfile) as pdf: for seqname in self.names: self.logger.info('plot track: {}'.format(seqname)) pdf.savefig(self.plot_tracks(seqname))
""" timer.reset() mesh = common.Mesh.from_off(scaled_off_path) depths = fusion_tools.render(mesh, Rs) depth_file_path = os.path.join(options.depth_dir, os.path.basename(filepath) + '.h5') common.write_hdf5(depth_file_path, np.array(depths)) print('----- [Depth] wrote %s (%f seconds)' % (depth_file_path, timer.elapsed())) """ Performs TSDF fusion. As rendering might be slower, we wait for rendering to finish. This allows to run rendering and fusing in parallel (more or less). """ depths = common.read_hdf5(depth_file_path) timer.reset() tsdf = fusion_tools.fusion(depths, Rs) tsdf = tsdf[0] vertices, triangles = libmcubes.marching_cubes(-tsdf, 0) vertices /= options.resolution vertices -= 0.5 # vertices = vertices[:, [2, 1, 0]] # print(vertices.shape) off_file = os.path.join(options.out_dir, '0_scaled_' + ntpath.basename(filepath)) libmcubes.export_off(vertices, triangles, off_file)
def run_fuse(self): """ Run fusion. """ assert os.path.exists(self.options.depth_dir) common.makedir(self.options.out_dir) common.makedir('4_tsdf') files = self.read_directory(self.options.depth_dir) timer = common.Timer() Rs = self.get_views() for filepath in files: # As rendering might be slower, we wait for rendering to finish. # This allows to run rendering and fusing in parallel (more or less). depths = common.read_hdf5(filepath) timer.reset() tsdf = self.fusion(depths, Rs) tsdf = tsdf[0] vertices, triangles = libmcubes.marching_cubes(-tsdf, 0) # vertices, triangles, _, _ = measure.marching_cubes_lewiner(-tsdf, 0) print tsdf.shape np.save(os.path.join('4_tsdf', ntpath.basename(filepath)[:-3]).replace('.off', ''), -tsdf) vertices /= self.options.resolution vertices -= 0.5 off_file = os.path.join(self.options.out_dir, ntpath.basename(filepath)[:-3]) libmcubes.export_off(vertices, triangles, off_file) print('[Data] wrote %s (%f seconds)' % (off_file, timer.elapsed())) mesh = common.Mesh.from_off(off_file) s_t = scipy.io.loadmat(off_file.replace('2_watertight', '1_s_t').replace('.off', '.mat')) # scales_ori = (1./s_t['scales'][0][0], 1./s_t['scales'][0][1], 1./s_t['scales'][0][2]) # translation_ori = (-s_t['translation'][0][0], -s_t['translation'][0][1], -s_t['translation'][0][2]) sizes_ori = (s_t['sizes'][0][0], s_t['sizes'][0][1], s_t['sizes'][0][2]) # print scales, translation min, max = mesh.extents() total_min = np.min(np.array(min)) total_max = np.max(np.array(max)) # Set the center (although this should usually be the origin already). centers = ( (min[0] + max[0]) / 2, (min[1] + max[1]) / 2, (min[2] + max[2]) / 2 ) # Scales all dimensions equally. sizes = ( total_max - total_min, total_max - total_min, total_max - total_min ) translation = ( -centers[0], -centers[1], -centers[2] ) mesh.translate(translation) mesh.scale((sizes_ori[0]/sizes[0], sizes_ori[1]/sizes[1], sizes_ori[2]/sizes[2])) mesh.to_off(off_file)