def mesh_binary_array(path_str,step_size): """ carries out marching cubes meshing on the filled binary array with a given step_size inputs: pathName: path to root image folder as a string step_size: step for marching cubes returns: None outputs: .obj file saved in root image folder """ pathName = Path(str(path_str)) globbed = pathName.glob('*filled.npy') array_list = [x for x in globbed if x.is_file()] if str(array_list[0].name).startswith('full'): meshName = 'fullSeries_particleMesh.obj' elif str(array_list[0].name).startswith('half'): meshName = 'halfSeries_particleMesh.obj' filledArray = np.load(array_list[0]).astype(np.uint8) verts, faces, _, _ = marching_cubes(filledArray.astype(np.uint8), level=0.5, spacing=(1,1,1), allow_degenerate=False, step_size=step_size,gradient_direction='ascent') mcubes.export_obj(verts, faces,str( pathName / meshName))
def convert_voxel(file): """ Converts a mat file containing a voxel grid into a shape layer representation. """ import mcubes d = sio.loadmat(file) voxel = d['voxel'] shape_layer = encode_shape(voxel, args.numlayers, id1, id2, id3) decoded = decode_shape(shape_layer, id1, id2, id3) print('%s: %.3f ' % (file, np.sum(np.logical_and(voxel, decoded)) / np.sum(np.logical_or(voxel, decoded)))) if False: output_dir = './test_hsp' filename = file.split('/')[-1] n_x, n_y, n_z = decoded.shape decoded = np.flip(np.flip(np.flip(decoded, 2), 1), 0) occ_padded = np.pad(decoded, 1, 'constant', constant_values=0) vertices, triangles = mcubes.marching_cubes(occ_padded, 0.5) #vertices -= 0.5 #vertices -= 1 #vertices /= np.array([n_x-1, n_y-1, n_z-1]) #vertices = vertices - 0.5 vertices = (vertices - 0.5) / n_x - 0.5 print(vertices.shape, triangles.shape) mcubes.export_obj(vertices, triangles, output_dir + "/" + filename[:-8] + ".obj")
def ExportMesh(tnsFileName, meshFileName): tensor = ReadTNS(tnsFileName) # Extract the isosurface vertices, triangles = mcubes.marching_cubes(tensor, 0.5) # Export the result mcubes.export_obj(vertices, triangles, meshFileName) print("Mesh file exported")
def write_isosurface(voxel_data, id): ## voxel data: the 64^3 numpy array,with the really value of (0,1) but not the bool value print(voxel_data.shape) vertices, triangles = mcubes.marching_cubes(voxel_data, 0.2) mcubes.export_obj(vertices, triangles, 'demo/' + id + ".obj") data = voxel_data.reshape(-1, 1) """
def test_export(): u = np.zeros((10, 10, 10)) u[2:-2, 2:-2, 2:-2] = 1.0 vertices, triangles = mcubes.marching_cubes(u, 0.5) mcubes.export_obj(vertices, triangles, "output/test.obj") mcubes.export_off(vertices, triangles, "output/test.off") mcubes.export_mesh(vertices, triangles, "output/test.dae")
def save_search_result(self): import mcubes super(OpenGLSearchUI, self).save_search_result() model_name = str(self.output_n) + '.obj' model_float = self.current_data thres = 0.5 vertices, triangles = mcubes.marching_cubes(model_float, thres) mcubes.export_obj(vertices, triangles, self.output_base_path + '/' + model_name)
def process_MSD(root, task='Heart2', num_surf=5): img_list, gt_list = get_MSD_list(root, task) save_dir = os.path.join(root, task) n = len(img_list) for i in range(n): print('Process img: {}'.format(img_list[i])) img = ants.image_read(img_list[i]) gt = ants.image_read(gt_list[i]) # iso-resample img_ = iso_resample(img, [1.5, 1.5, 1.5], islabel=False) gt_ = iso_resample(gt, [1.5, 1.5, 1.5], islabel=True) # crop img_np = img_.numpy() gt_np = gt_.numpy() img_np = crop(img_np) gt_np = crop(gt_np) # normal img_np = normalize(img_np) # sample surf init for j in tqdm(range(num_surf)): gt_dfm = elasticdeform.deform_random_grid(gt_np, 4, 4, 0) gt_dfm_smooth = mcubes.smooth_gaussian(gt_dfm, 1) v, e = mcubes.marching_cubes(gt_dfm_smooth, 0) mcubes.export_obj( v, e, os.path.join( save_dir, 'surfs_unaligned', '{:0>2d}_{:0>2d}surf_init.obj'.format(i + 1, j + 1))) # write image img_nii = ants.from_numpy(img_np, img_.origin, img_.spacing, img_.direction, img_.has_components, img_.is_rgb) gt_nii = ants.from_numpy(gt_np, gt_.origin, gt_.spacing, gt_.direction, gt_.has_components, gt_.is_rgb) ants.image_write( img_nii, os.path.join(save_dir, 'images', '{:0>2d}img.nii'.format(i + 1))) ants.image_write( gt_nii, os.path.join(save_dir, 'labels', '{:0>2d}gt.nii'.format(i + 1))) gt_smooth = mcubes.smooth_gaussian(gt_np, 1) v, e = mcubes.marching_cubes(gt_smooth, 0) mcubes.export_obj( v, e, os.path.join(save_dir, 'surfs_unaligned', '{:0>2d}surf.obj'.format(i + 1)))
def save_search_result(self): import mcubes super(OpenGLRandomEvaluationUI, self).save_search_result() for i in range(len(self.best_choices)): model_name = self.current_output_path + '/' + str(i) + '.obj' model_float = self.best_choices[i]['data'] thres = 0.5 vertices, triangles = mcubes.marching_cubes(model_float, thres) mcubes.export_obj(vertices, triangles, model_name) model_name = self.current_output_path + '/target.obj' model_float = self.target_data thres = 0.5 vertices, triangles = mcubes.marching_cubes(model_float, thres) mcubes.export_obj(vertices, triangles, model_name)
def export_obj(filename, cutout, level=0): """ Converts a dense annotation to a obj, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".obj" not in filename: filename = filename + ".obj" vs, fs = mcubes.marching_cubes(cutout, level) mcubes.export_obj(vs, fs, filename)
def marching_cube(resolution, bounding_box, model_no): u = np.load('leabels.npy') print(u.shape) # Extract the 0-isosurface vertices, triangles = mcubes.marching_cubes(u, 0.1) print(np.max(vertices[:, 2])) print(bounding_box, resolution) for i in range(vertices.shape[0]): for j in range(3): vertices[i, j] = vertices[i, j] / resolution * (bounding_box[ 2 * j + 1] - bounding_box[2 * j]) + bounding_box[2 * j] print(np.max(vertices[:, 1])) # mcubes.export_mesh(vertices, triangles, "./dae/sphere_"+str(model_no)+".dae", "MySphere") mcubes.export_obj(vertices, triangles, "./obj/sphere_" + str(model_no) + ".obj") return vertices
def main(): logging.basicConfig(level=logging.INFO) args = parse_args() filenames = (args.x, args.y, args.z) logging.info("Loading images...") images = [imread_nochannels(i) > 128 for i in filenames] if not args.find_best_transform: # Use the user-given transformation transf_x = Transformation(args.mirrorx, args.rotx) transf_y = Transformation(args.mirrory, args.roty) transf_z = Transformation(args.mirrorz, args.rotz) transforms = (transf_x, transf_y, transf_z) logging.info("Building trip-let volume...") volume, mistakes, _ = transform_and_build_volume(transforms, images) else: logging.info("Finding the best transformation...") volume, mistakes, best_transformation = find_best_transform(images) logging.info("Best transformation: %s", best_transformation) num_mistakes = sum(np.sum(i) for i in mistakes) if num_mistakes > 0: logging.warning("%d reprojection errors", num_mistakes) # Smoothing volume, isovalue = smooth(volume, args.smoothing) # Marching cubes logging.info("Marching cubes...") vertices, triangles = mcubes.marching_cubes(volume, isovalue) # Center the mesh if not args.no_center: vertices = vertices - vertices.mean(axis=0)[None] # Export the mesh logging.info("Exporting...") mcubes.export_obj(vertices, triangles, args.o) logging.info("Done.")
def test_z(self, config, batch_z, dim): could_load, checkpoint_counter = self.load(self.checkpoint_dir) if could_load: print(" [*] Load SUCCESS") else: print(" [!] Load failed...") return dima = self.test_size multiplier = int(dim / dima) multiplier2 = multiplier * multiplier multiplier3 = multiplier * multiplier * multiplier # get coords 256 aux_x = np.zeros([dima, dima, dima], np.int32) aux_y = np.zeros([dima, dima, dima], np.int32) aux_z = np.zeros([dima, dima, dima], np.int32) for i in range(dima): for j in range(dima): for k in range(dima): aux_x[i, j, k] = i * multiplier aux_y[i, j, k] = j * multiplier aux_z[i, j, k] = k * multiplier coords = np.zeros([multiplier3, dima, dima, dima, 3], np.float32) for i in range(multiplier): for j in range(multiplier): for k in range(multiplier): coords[i * multiplier2 + j * multiplier + k, :, :, :, 0] = aux_x + i coords[i * multiplier2 + j * multiplier + k, :, :, :, 1] = aux_y + j coords[i * multiplier2 + j * multiplier + k, :, :, :, 2] = aux_z + k coords = (coords + 0.5) / dim * 2.0 - 1.0 coords = np.reshape(coords, [multiplier3, self.batch_size, 3]) for t in tqdm(range(batch_z.shape[0])): model_float = np.zeros([dim + 2, dim + 2, dim + 2], np.float32) for i in tqdm(range(multiplier)): for j in range(multiplier): for k in range(multiplier): # print(t, i, j, k) minib = i * multiplier2 + j * multiplier + k model_out = self.sess.run(self.zG, feed_dict={ self.z_vector: batch_z[t:t + 1], self.point_coord: coords[minib], }) model_float[aux_x + i + 1, aux_y + j + 1, aux_z + k + 1] = np.reshape( model_out, [dima, dima, dima]) img1 = np.clip(np.amax(model_float, axis=0) * 256, 0, 255).astype(np.uint8) img2 = np.clip(np.amax(model_float, axis=1) * 256, 0, 255).astype(np.uint8) img3 = np.clip(np.amax(model_float, axis=2) * 256, 0, 255).astype(np.uint8) cv2.imwrite(config.sample_dir + "/" + str(t) + "_1t.png", img1) cv2.imwrite(config.sample_dir + "/" + str(t) + "_2t.png", img2) cv2.imwrite(config.sample_dir + "/" + str(t) + "_3t.png", img3) thres = 0.5 vertices, triangles = mcubes.marching_cubes(model_float, thres) # mcubes.export_mesh(vertices, triangles, config.sample_dir + "/" + "out" + str(t) + ".dae", str(t)) mcubes.export_obj( vertices, triangles, config.sample_dir + "/" + "out" + str(t) + ".obj")
def test(self, config): could_load, checkpoint_counter = self.load(self.checkpoint_dir) if could_load: print(" [*] Load SUCCESS") else: print(" [!] Load failed...") return dima = self.test_size dim = self.real_size multiplier = int(dim / dima) multiplier2 = multiplier * multiplier for t in range(16): model_float = np.zeros( [self.real_size + 2, self.real_size + 2, self.real_size + 2], np.float32) batch_voxels = self.data_voxels[t:t + 1] for i in range(multiplier): for j in range(multiplier): for k in range(multiplier): minib = i * multiplier2 + j * multiplier + k model_out = self.sess.run(self.sG, feed_dict={ self.vox3d: batch_voxels, self.point_coord: self.coords[minib], }) model_float[self.aux_x + i + 1, self.aux_y + j + 1, self.aux_z + k + 1] = np.reshape( model_out, [ self.test_size, self.test_size, self.test_size ]) img1 = np.clip(np.amax(model_float, axis=0) * 256, 0, 255).astype(np.uint8) img2 = np.clip(np.amax(model_float, axis=1) * 256, 0, 255).astype(np.uint8) img3 = np.clip(np.amax(model_float, axis=2) * 256, 0, 255).astype(np.uint8) cv2.imwrite(config.sample_dir + "/ae/" + str(t) + "_1t.png", img1) cv2.imwrite(config.sample_dir + "/ae/" + str(t) + "_2t.png", img2) cv2.imwrite(config.sample_dir + "/ae/" + str(t) + "_3t.png", img3) thres = 0.5 # Generated sample vertices, triangles = mcubes.marching_cubes(model_float, thres) # mcubes.export_mesh(vertices, triangles, config.sample_dir + "/" + "out" + str(t) + ".dae", str(t)) mcubes.export_obj( vertices, triangles, config.sample_dir + "/ae/" + "out" + str(t) + ".obj") # Original sample batch_voxels = batch_voxels[0, ..., 0] vertices, triangles = mcubes.marching_cubes(batch_voxels, thres) mcubes.export_obj( vertices, triangles, config.sample_dir + "/ae/" + "out" + str(t) + "_original" + ".obj") print("[sample]")
def test_interp(self, config): could_load, checkpoint_counter = self.load(self.checkpoint_dir) if could_load: print(" [*] Load SUCCESS") else: print(" [!] Load failed...") return interp_size = 8 idx1 = 0 idx2 = 3 batch_voxels1 = self.data_voxels[idx1:idx1 + 1] batch_voxels2 = self.data_voxels[idx2:idx2 + 1] model_z1 = self.sess.run(self.sE, feed_dict={ self.vox3d: batch_voxels1, }) model_z2 = self.sess.run(self.sE, feed_dict={ self.vox3d: batch_voxels2, }) batch_z = np.zeros([interp_size, self.z_dim], np.float32) for i in range(interp_size): batch_z[i] = model_z2 * i / (interp_size - 1) + model_z1 * ( interp_size - 1 - i) / (interp_size - 1) dima = self.test_size dim = self.real_size multiplier = int(dim / dima) multiplier2 = multiplier * multiplier for t in range(interp_size): model_float = np.zeros( [self.real_size + 2, self.real_size + 2, self.real_size + 2], np.float32) for i in range(multiplier): for j in range(multiplier): for k in range(multiplier): minib = i * multiplier2 + j * multiplier + k model_out = self.sess.run(self.zG, feed_dict={ self.z_vector: batch_z[t:t + 1], self.point_coord: self.coords[minib], }) model_float[self.aux_x + i + 1, self.aux_y + j + 1, self.aux_z + k + 1] = np.reshape( model_out, [ self.test_size, self.test_size, self.test_size ]) img1 = np.clip(np.amax(model_float, axis=0) * 256, 0, 255).astype(np.uint8) img2 = np.clip(np.amax(model_float, axis=1) * 256, 0, 255).astype(np.uint8) img3 = np.clip(np.amax(model_float, axis=2) * 256, 0, 255).astype(np.uint8) cv2.imwrite(config.sample_dir + "/interp/" + str(t) + "_1t.png", img1) cv2.imwrite(config.sample_dir + "/interp/" + str(t) + "_2t.png", img2) cv2.imwrite(config.sample_dir + "/interp/" + str(t) + "_3t.png", img3) thres = 0.5 vertices, triangles = mcubes.marching_cubes(model_float, thres) # mcubes.export_mesh(vertices, triangles, config.sample_dir + "/interp/" + "out" + str(t) + ".dae", str(t)) mcubes.export_obj( vertices, triangles, config.sample_dir + "/interp/" + "out" + str(t) + ".obj") print("[sample interpolation]")
def worker(cat_mod): t0 = time.time() cat, mod = cat_mod ###load loc, scale pointcloud_file = os.path.join(raw_pointcloud_dir, cat, mod, 'pointcloud.npz') pointcloud = np.load(pointcloud_file) raw_loc = pointcloud['loc'] raw_scale = pointcloud['scale'] ##load upsampled skeleton points and apply scale and translation skeleton_file = os.path.join(upsample_skeleton_dir, cat, mod + '.ply') skeleton = PlyData.read(skeleton_file)['vertex'].data points = skeleton.view(np.dtype('float32')).reshape(-1, 7)[:, 0:3] points = ((points * raw_scale) + raw_loc).astype('f4') print('raw_scale', raw_scale, 'raw_loc', raw_loc) print('the number of points:', len(points)) points = np.require(points, 'float32', 'C') #voxelization voxel_data = points * FLAGS.vx_res + (FLAGS.vx_res + 1.0) / 2.0 # discard voxels that fall outsvx_resdims xyz = (voxel_data.T).astype(np.int) valid_ix = ~np.any((xyz < 0) | (xyz >= FLAGS.vx_res), 0) xyz = xyz[:, valid_ix] voxel_data = np.zeros((FLAGS.vx_res, FLAGS.vx_res, FLAGS.vx_res), dtype='bool') voxel_data[tuple(xyz)] = True print('the number voxels before maxpool:', voxel_data.sum()) #maxpooling for coarsening with torch.no_grad(): voxel_data = torch.from_numpy( voxel_data[None, None, :, :, :].astype('float32')) voxel_data = voxel_data.cuda() voxel_max = maxpool3d(voxel_data) voxel_max = torch.squeeze(voxel_max.data).cpu().numpy() print('the number of voxels after maxpool:', voxel_max.sum()) #hole fill shape_layer = encode_shape(voxel_max, num_layers=1, id1=id1, id2=id2, id3=id3) voxels = decode_shape(shape_layer, id1=id1, id2=id2, id3=id3) print('the number of voxels after holefilled:', voxels.sum()) outdir = os.path.join(outroot, cat, mod) if not os.path.exists(outdir): os.makedirs(outdir) if FLAGS.saveobj: #ouput .obj outfile = os.path.join(outdir, '%d_max_fill.obj' % FLAGS.vx_res) vertices, faces = mcubes.marching_cubes(voxels, 0) mcubes.export_obj(vertices, faces, outfile) print(outfile, vertices.shape, faces.shape, time.time() - t0) else: # output .npz outfile = os.path.join(outdir, '%d_max_fill.h5' % FLAGS.vx_res) f1 = h5py.File(outfile, 'w') f1.create_dataset('occupancies', data=voxels.astype('bool'), compression='gzip', compression_opts=4) f1.close() print(outfile, voxels.shape, time.time() - t0)
def mise_voxel(get_sdf, bound, initial_voxel_resolution, final_voxel_resolution, voxel_size, centroid_diff, save_path, verbose=False): ''' get_sdf: map from query points to SDF (assume everything else already embedded in func (i.e., point cloud/embedding)). bound: sample within [-bound, bound] in x,y,z. initial/final_voxel_resolution: powers of two representing voxel resolution to evaluate at. voxel_size: size of each voxel (in final res) determined by view. centroid_diff: offset if needed. ''' # Number to evaluate in single pass. sdf_count_ = 8192 # Active voxels: voxels we want to evaluate grid points of. active_voxels = [] # Full voxelization. voxelized = np.zeros((final_voxel_resolution,final_voxel_resolution,final_voxel_resolution), dtype=np.float32) # Intermediate voxelization. This represents the grid points for the voxels (so is resolution + 1 in each dim). partial_voxelized = None # Init active voxels to all voxels in the initial resolution. for x in range(initial_voxel_resolution): for y in range(initial_voxel_resolution): for z in range(initial_voxel_resolution): active_voxels.append([x, y, z]) active_voxels = np.array(active_voxels) # Start main loop that ups resolution. current_voxel_resolution = initial_voxel_resolution while current_voxel_resolution <= final_voxel_resolution: # print(current_voxel_resolution) # Setup voxelizations at this dimension. partial_voxelized = np.zeros((current_voxel_resolution + 1,current_voxel_resolution + 1,current_voxel_resolution + 1), dtype=np.float32) # Get the grid points for this resolution. grid_pts = get_grid_points(active_voxels, current_voxel_resolution, bound) try: pt_splits = np.array_split(grid_pts, grid_pts.shape[0] // sdf_count_) except ValueError: pt_splits = [grid_pts] # print(len(pt_splits)) # For all points sample SDF given the point cloud. for pts_ in pt_splits: sdf_ = get_sdf(pts_) for pt_, sdf in zip(np.reshape(pts_, (-1,3)), np.reshape(sdf_, (-1,))): if sdf <= 0.0: # Convert points into grid voxels and set. x_ = int(round(((pt_[0] + bound)/(2 * bound)) * float(current_voxel_resolution))) y_ = int(round(((pt_[1] + bound)/(2 * bound)) * float(current_voxel_resolution))) z_ = int(round(((pt_[2] + bound)/(2 * bound)) * float(current_voxel_resolution))) partial_voxelized[x_,y_,z_] = 1.0 # Determine filled and active voxels. new_active_voxels = [] for x,y,z in active_voxels: if is_occupied(x, y, z, partial_voxelized): # Set all associated voxels on in full voxelization. voxels_per_voxel = final_voxel_resolution // current_voxel_resolution # Set all corresponding voxels in the full resolution to on. for x_ in range(voxels_per_voxel*x, voxels_per_voxel*x + voxels_per_voxel): for y_ in range(voxels_per_voxel*y, voxels_per_voxel*y + voxels_per_voxel): for z_ in range(voxels_per_voxel*z, voxels_per_voxel*z + voxels_per_voxel): voxelized[x_, y_, z_] = 1.0 elif is_active(x, y, z, partial_voxelized): # If final resolution, just set it as active. if current_voxel_resolution == final_voxel_resolution: voxelized[x,y,z] = 1.0 continue # Up voxel position to match doubling of voxel resolution. x_base = 2*x y_base = 2*y z_base = 2*z # Add new voxels for higher resolution. Each voxel gets split into 8 new. new_active_voxels.append([x_base, y_base, z_base]) new_active_voxels.append([x_base, y_base, z_base+1]) new_active_voxels.append([x_base, y_base+1, z_base]) new_active_voxels.append([x_base, y_base+1, z_base+1]) new_active_voxels.append([x_base+1, y_base, z_base]) new_active_voxels.append([x_base+1, y_base, z_base+1]) new_active_voxels.append([x_base+1, y_base+1, z_base]) new_active_voxels.append([x_base+1, y_base+1, z_base+1]) active_voxels = np.array(new_active_voxels) current_voxel_resolution = current_voxel_resolution * 2 # print("Done with extraction.") # Padding to prevent holes if go up to edge. voxels = voxelized voxelized = np.pad(voxelized, ((1,1),(1,1),(1,1)), mode='constant') # Mesh w/ mcubes. vertices, triangles = mcubes.marching_cubes(voxelized, 0) vertices = vertices * voxel_size # Center mesh. vertices[:,0] -= voxel_size * (((final_voxel_resolution) / 2) + 1) vertices[:,1] -= voxel_size * (((final_voxel_resolution) / 2) + 1) vertices[:,2] -= voxel_size * (((final_voxel_resolution) / 2) + 1) vertices[:,0] -= centroid_diff[0] vertices[:,1] -= centroid_diff[1] vertices[:,2] -= centroid_diff[2] #save_file = os.path.join(save_path, view + '.off') mcubes.export_obj(vertices, triangles, save_path) # Display mesh. if verbose: gen_mesh = trimesh.load(save_path) gen_mesh.show() return None # convert_to_sparse_voxel_grid(voxels, threshold=0.5)
combined, (1, 2, 0)) # better orientation of final array for stl generation np.save(str(pathName / filledArrayName), reshaped.astype('uint8')) erosion = binary_erosion(reshaped).astype(reshaped.dtype) shell = (reshaped != erosion).astype(reshaped.dtype) verts, faces, normals, values = marching_cubes(reshaped.astype(np.uint8), level=0.5, spacing=(1, 1, 1), allow_degenerate=False, step_size=10, gradient_direction='ascent') mcubes.export_obj(verts, faces, str(pathName / meshName)) # particleMesh.save(str( pathName / meshName)) np.save(str(pathName / shellArrayName), shell.astype('uint8')) else: filledArray = np.load(str(pathName / (filledArrayName + '.npy'))).astype( np.uint8) # erosion = binary_erosion(filledArray).astype(np.uint8) # shell = (filledArray != erosion).astype(np.uint8) verts, faces, normals, values = marching_cubes(filledArray.astype( np.uint8), level=0.5, spacing=(1, 1, 1), allow_degenerate=False,