def on_tri_d_run_clicked(self): """ Slot documentation goes here. """ thickness = 1 liver = self.liver liver_recon = np.zeros([thickness * liver.shape[0], 512, 512]) for i in range(liver.shape[0]): for s in range(thickness): liver_recon[(s + 1) * i + s] = liver[i] vertices, triangles = mcubes.marching_cubes(liver_recon, 0) mcubes.export_mesh(vertices, triangles, "liver.dae", "liver") tumor = self.tumor tumor_recon = np.zeros([thickness * tumor.shape[0], 512, 512]) for i in range(tumor.shape[0]): for s in range(thickness): tumor_recon[(s + 1) * i + s] = tumor[i] vertices_2, triangles_2 = mcubes.marching_cubes(tumor_recon, 0) mcubes.export_mesh(vertices_2, triangles_2, "tumor.dae", "tumor") QMessageBox.information(self, "Warning", "3D model exported successfully")
def test_dae3(self, config): could_load, checkpoint_counter = self.load(self.checkpoint_dir) if could_load: print(" [*] Load SUCCESS") else: print(" [!] Load failed...") return dima = self.test_size dim = self.real_size multiplier = int(dim / dima) multiplier2 = multiplier * multiplier for t in range(config.start, min(len(self.data_voxels), config.end)): model_float = np.zeros( [self.real_size + 2, self.real_size + 2, self.real_size + 2], np.float32) batch_voxels = self.data_voxels[t:t + 1] out_m, out_b = self.sess.run([self.sE_m, self.sE_b], feed_dict={ self.vox3d: batch_voxels, }) for i in range(multiplier): for j in range(multiplier): for k in range(multiplier): minib = i * multiplier2 + j * multiplier + k model_out = self.sess.run(self.zG, feed_dict={ self.plane_m: out_m, self.plane_b: out_b, self.point_coord: self.coords[minib:minib + 1], }) model_float[self.aux_x + i + 1, self.aux_y + j + 1, self.aux_z + k + 1] = np.reshape( model_out, [ self.test_size, self.test_size, self.test_size ]) vertices, triangles = mcubes.marching_cubes(model_float, 0.5) vertices = (vertices - 0.5) / self.real_size - 0.5 #output prediction write_ply_triangle(config.sample_dir + "/" + str(t) + "_vox.ply", vertices, triangles) vertices, triangles = mcubes.marching_cubes( batch_voxels[0, :, :, :, 0], 0.5) vertices = (vertices - 0.5) / self.real_size - 0.5 #output ground truth write_ply_triangle(config.sample_dir + "/" + str(t) + "_gt.ply", vertices, triangles) print("[sample]")
def test_ae_all(self): self._load_dataset() self.get_devices() self.get_coords_for_training() self.build_model() self.loadCheckpoint() self.bae_model.eval() shape_num = len(self.data_voxels) print("testing samples ", shape_num) multiplier = int(self.config.frame_grid_size / self.config.test_size) multiplier2 = multiplier * multiplier for t in range(shape_num): model_float = np.zeros([ self.config.frame_grid_size + 2, self.config.frame_grid_size + 2, self.config.frame_grid_size + 2 ], np.float32) batch_voxels = self.data_voxels[t:t + 1].astype(np.float32) sq_batch_voxel = np.squeeze(batch_voxels) vertices_gt, triangles_gt = mcubes.marching_cubes( sq_batch_voxel, self.config.sampling_threshold) write_ply_triangle( self.config.sample_dir + "/" + str(t) + "gt_vox.ply", vertices_gt, triangles_gt) batch_voxels = torch.from_numpy(batch_voxels) batch_voxels = batch_voxels.to(self.device) z_vector, _, _ = self.bae_model(batch_voxels, None, None, is_training=False) for i in range(multiplier): for j in range(multiplier): for k in range(multiplier): minib = i * multiplier2 + j * multiplier + k point_coord = self.coords[minib:minib + 1] _, _, net_out = self.bae_model(None, z_vector, point_coord, is_training=False) model_float[self.aux_x + i + 1, self.aux_y + j + 1, self.aux_z + k + 1] = np.reshape( net_out.detach().cpu().numpy(), [ self.test_size, self.test_size, self.test_size ]) vertices, triangles = mcubes.marching_cubes( model_float, self.config.sampling_threshold) vertices = (vertices.astype(np.float32) - 0.5) / self.config.frame_grid_size - 0.5 # output ply sum write_ply_triangle( self.config.sample_dir + "/" + str(t) + "_vox.ply", vertices, triangles) print("[sample]")
def process_MSD(root, task='Heart2', num_surf=5): img_list, gt_list = get_MSD_list(root, task) save_dir = os.path.join(root, task) n = len(img_list) for i in range(n): print('Process img: {}'.format(img_list[i])) img = ants.image_read(img_list[i]) gt = ants.image_read(gt_list[i]) # iso-resample img_ = iso_resample(img, [1.5, 1.5, 1.5], islabel=False) gt_ = iso_resample(gt, [1.5, 1.5, 1.5], islabel=True) # crop img_np = img_.numpy() gt_np = gt_.numpy() img_np = crop(img_np) gt_np = crop(gt_np) # normal img_np = normalize(img_np) # sample surf init for j in tqdm(range(num_surf)): gt_dfm = elasticdeform.deform_random_grid(gt_np, 4, 4, 0) gt_dfm_smooth = mcubes.smooth_gaussian(gt_dfm, 1) v, e = mcubes.marching_cubes(gt_dfm_smooth, 0) mcubes.export_obj( v, e, os.path.join( save_dir, 'surfs_unaligned', '{:0>2d}_{:0>2d}surf_init.obj'.format(i + 1, j + 1))) # write image img_nii = ants.from_numpy(img_np, img_.origin, img_.spacing, img_.direction, img_.has_components, img_.is_rgb) gt_nii = ants.from_numpy(gt_np, gt_.origin, gt_.spacing, gt_.direction, gt_.has_components, gt_.is_rgb) ants.image_write( img_nii, os.path.join(save_dir, 'images', '{:0>2d}img.nii'.format(i + 1))) ants.image_write( gt_nii, os.path.join(save_dir, 'labels', '{:0>2d}gt.nii'.format(i + 1))) gt_smooth = mcubes.smooth_gaussian(gt_np, 1) v, e = mcubes.marching_cubes(gt_smooth, 0) mcubes.export_obj( v, e, os.path.join(save_dir, 'surfs_unaligned', '{:0>2d}surf.obj'.format(i + 1)))
def test_1(self, config, name): multiplier = int(self.real_size / self.test_size) multiplier2 = multiplier * multiplier if config.phase == 0: thres = 0.5 else: thres = 0.99 t = np.random.randint(len(self.data_voxels)) model_float = np.zeros( [self.real_size + 2, self.real_size + 2, self.real_size + 2], np.float32) batch_voxels_ = self.data_voxels[t:t + 1].astype(np.float32) batch_voxels = torch.from_numpy(batch_voxels_) batch_voxels = batch_voxels.to(self.device) _, out_m, _, _ = self.bsp_network(batch_voxels, None, None, None, is_training=False) vertices, triangles = mcubes.marching_cubes( batch_voxels_[0, 0, :, :, :], 0.5) vertices = (vertices - 0.5) / self.real_size - 0.5 #output ground truth write_ply_triangle(config.sample_dir + "/" + name + "_gt.ply", vertices, triangles) for i in range(multiplier): for j in range(multiplier): for k in range(multiplier): minib = i * multiplier2 + j * multiplier + k point_coord = self.coords[minib:minib + 1] _, _, _, net_out = self.bsp_network(None, None, out_m, point_coord, is_training=False) if config.phase != 0: net_out = torch.clamp(1 - net_out, min=0, max=1) model_float[self.aux_x + i + 1, self.aux_y + j + 1, self.aux_z + k + 1] = np.reshape( net_out.detach().cpu().numpy(), [ self.test_size, self.test_size, self.test_size ]) vertices, triangles = mcubes.marching_cubes(model_float, thres) vertices = (vertices - 0.5) / self.real_size - 0.5 #output ply sum write_ply_triangle(config.sample_dir + "/" + name + ".ply", vertices, triangles) print("[sample]")
def test_dae3(self, config): if self.checkpoint_manager.latest_checkpoint: self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint) print(" [*] Load SUCCESS") else: print(" [!] Load failed...") return dima = self.test_size dim = self.real_size multiplier = int(dim / dima) multiplier2 = multiplier * multiplier for t in range(config.start, min(len(self.data_voxels), config.end)): model_float = np.zeros( [self.real_size + 2, self.real_size + 2, self.real_size + 2], np.float32) batch_voxels = self.data_voxels[t:t + 1] _, out_m, _, _ = self.bsp_network(batch_voxels, None, None, None, is_training=False) for i in range(multiplier): for j in range(multiplier): for k in range(multiplier): minib = i * multiplier2 + j * multiplier + k point_coord = self.coords[minib:minib + 1] _, _, _, model_out = self.bsp_network( None, None, out_m, point_coord, is_training=False) model_float[self.aux_x + i + 1, self.aux_y + j + 1, self.aux_z + k + 1] = np.reshape( model_out, [ self.test_size, self.test_size, self.test_size ]) vertices, triangles = mcubes.marching_cubes(model_float, 0.5) vertices = (vertices - 0.5) / self.real_size - 0.5 #output prediction write_ply_triangle(config.sample_dir + "/" + str(t) + "_vox.ply", vertices, triangles) vertices, triangles = mcubes.marching_cubes( batch_voxels[0, :, :, :, 0], 0.5) vertices = (vertices - 0.5) / self.real_size - 0.5 #output ground truth write_ply_triangle(config.sample_dir + "/" + str(t) + "_gt.ply", vertices, triangles) print("[sample]")
def test_dae3(self, config): #load previous checkpoint if not self.load(): exit(-1) dima = self.test_size dim = self.real_size multiplier = int(dim / dima) multiplier2 = multiplier * multiplier self.bsp_network.eval() for t in range(config.start, min(len(self.data_voxels), config.end)): model_float = np.zeros( [self.real_size + 2, self.real_size + 2, self.real_size + 2], np.float32) batch_voxels_ = self.data_voxels[t:t + 1].astype(np.float32) batch_voxels = torch.from_numpy(batch_voxels_) batch_voxels = batch_voxels.to(self.device) _, out_m, _, _ = self.bsp_network(batch_voxels, None, None, None, is_training=False) for i in range(multiplier): for j in range(multiplier): for k in range(multiplier): minib = i * multiplier2 + j * multiplier + k point_coord = self.coords[minib:minib + 1] _, _, _, model_out = self.bsp_network( None, None, out_m, point_coord, is_training=False) model_float[self.aux_x + i + 1, self.aux_y + j + 1, self.aux_z + k + 1] = np.reshape( model_out.detach().cpu().numpy(), [ self.test_size, self.test_size, self.test_size ]) vertices, triangles = mcubes.marching_cubes(model_float, 0.5) vertices = (vertices - 0.5) / self.real_size - 0.5 #output prediction write_ply_triangle(config.sample_dir + "/" + str(t) + "_vox.ply", vertices, triangles) vertices, triangles = mcubes.marching_cubes( batch_voxels_[0, 0, :, :, :], 0.5) vertices = (vertices - 0.5) / self.real_size - 0.5 #output ground truth write_ply_triangle(config.sample_dir + "/" + str(t) + "_gt.ply", vertices, triangles) print("[sample]")
def test_1(self, config, name): multiplier = int(self.real_size/self.test_size) multiplier2 = multiplier*multiplier if config.phase==0: outG = self.zG thres = 0.5 else: outG = self.zG_max thres = 0.99 t = np.random.randint(len(self.data_voxels)) model_float = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32) batch_voxels = self.data_voxels[t:t+1] out_m, out_b = self.sess.run([self.sE_m, self.sE_b], feed_dict={ self.vox3d: batch_voxels, }) for i in range(multiplier): for j in range(multiplier): for k in range(multiplier): minib = i*multiplier2+j*multiplier+k model_out = self.sess.run(outG, feed_dict={ self.plane_m: out_m, self.plane_b: out_b, self.point_coord: self.coords[minib:minib+1], }) model_float[self.aux_x+i+1,self.aux_y+j+1,self.aux_z+k+1] = np.reshape(model_out, [self.test_size,self.test_size,self.test_size]) vertices, triangles = mcubes.marching_cubes(model_float, thres) vertices = (vertices-0.5)/self.real_size-0.5 write_ply_triangle(config.sample_dir+"/"+name+".ply", vertices, triangles) print("[sample]")
def marching_cubes_completion(points, **kwargs): """ :param points: :param kwargs: :return: """ patch_size = kwargs.get("patch_size", 120) percent_offset = kwargs.get("percent_offset", (0.5, 0.5, 0.45)) percent_patch_size = kwargs.get("percent_patch_size", 0.8) marching_cubes_resolution = kwargs.get("marching_cubes_resolution", 0.5) smooth = kwargs.get("smooth", False) voxel_grid, voxel_center, voxel_resolution, center_point_in_voxel_grid = pc_vox_utils.pc_to_binvox( points=points, patch_size=patch_size, percent_offset=percent_offset, percent_patch_size=percent_patch_size) vertices, faces = mcubes.marching_cubes(voxel_grid.data, marching_cubes_resolution) vertices = pc_vox_utils.rescale_mesh(vertices, voxel_center, voxel_resolution, center_point_in_voxel_grid) ply_data = generate_ply_data(vertices, faces) # If we are smoothing use meshlabserver to smooth over mesh if smooth: ply_data = smooth_ply(ply_data) # Export to plyfile type return ply_data
def binvox_to_ply(voxel_grid, **kwargs): """ :param voxel_grid: :type voxel_grid: binvox_rw.Voxels :param kwargs: :return: """ percent_offset = kwargs.get("percent_offset", (0.5, 0.5, 0.45)) marching_cubes_resolution = kwargs.get("marching_cubes_resolution", 0.5) patch_size = voxel_grid.dims[0] pc_center_in_voxel_grid = (patch_size * percent_offset[0], patch_size * percent_offset[1], patch_size * percent_offset[2]) voxel_resolution = voxel_grid.scale / patch_size center_point_in_voxel_grid = voxel_grid.translate + numpy.array( pc_center_in_voxel_grid) * voxel_resolution vertices, faces = mcubes.marching_cubes(voxel_grid.data, marching_cubes_resolution) vertices = vertices * voxel_resolution - numpy.array( pc_center_in_voxel_grid) * voxel_resolution + numpy.array( center_point_in_voxel_grid) ply_data = generate_ply_data(vertices, faces) # Export to plyfile type return ply_data
def test_all(self, config): self.saver = tf.train.Saver() could_load, checkpoint_counter = self.load(self.checkpoint_dir) if could_load: print(" [*] Load SUCCESS") else: print(" [!] Load failed...") return num_of_points = 4096 add_out = "./test_out/" if not os.path.exists(add_out): os.makedirs(add_out) offset_x = int(self.crop_edge/2) offset_y = int(self.crop_edge/2) test_num = self.data_pixel.shape[0] for t in range(test_num): print(t,test_num) batch_view = self.data_pixel[t,23] batch_view = batch_view[offset_y:offset_y+self.crop_size, offset_x:offset_x+self.crop_size] batch_view = np.reshape(batch_view/255.0, [1,self.crop_size,self.crop_size,1]) model_z = self.sess.run(self.sE, feed_dict={ self.view_test: batch_view, }) model_float = self.z2voxel(model_z) vertices, triangles = mcubes.marching_cubes(model_float, self.sampling_threshold) vertices = (vertices-1)/256-0.5 #save mesh write_ply(add_out+str(t)+".ply", vertices, triangles)
def render(self, batch): pts = batch['pts'] sh = pts.shape inside = batch['inside'][0].bool() pts = pts[0][inside][None] pts = pts.view(sh[0], -1, 1, 3) pts = self.pts_to_can_pts(pts, batch) sp_input = self.prepare_sp_input(batch) grid_coords = self.get_grid_coords(pts, sp_input, batch) grid_coords = grid_coords.view(sh[0], -1, 3) if grid_coords.size(1) < 1024 * 32: alpha = self.net(sp_input, grid_coords) else: alpha = self.batchify_rays(sp_input, grid_coords, 1024 * 32, None) alpha = alpha[0, :, 0].detach().cpu().numpy() cube = np.zeros(sh[1:-1]) inside = inside.detach().cpu().numpy() cube[inside == 1] = alpha cube = np.pad(cube, 10, mode='constant') vertices, triangles = mcubes.marching_cubes(cube, cfg.mesh_th) mesh = trimesh.Trimesh(vertices, triangles) ret = {'cube': cube, 'mesh': mesh} return ret
def main2(): logging.info("(this might take a while...)") samples = 50 t = time.time() # from stl import mesh diameter = 10 radius = diameter / 2 half = radius / 2 # X, Y, Z = np.mgrid[:diameter, :diameter, :diameter] # print "X" # print X # print "Y" # print Y # print "Z" # print Z # u = (X-50)**2 + (Y-50)**2 + (Z-50)**2 - 25**2 + pnoise3(X,Y,Z,octaves=3)*3 # u = (X-radius)**2 + (Y-radius)**2 + (Z-radius)**2 - half**2 #+ pnoise3(X,Y,Z,octaves=3)*3 # u = [[[(X)**2 + (Y)**2 + (Z)**2 - 25**2 + pnoise3(X,Y,Z,octaves=3)*3 for X in range(-100,100)] for Y in range(-100,100)] for Z in range(-100,100)] u = np.ndarray((200, 200, 200)) for X in range(-100, 100): for Y in range(-100, 100): for Z in range(-100, 100): # print v u[X + 100, Y + 100, Z + 100] = ((X) ** 2 + (Y) ** 2 + (Z) ** 2) + pnoise3(X * 0.05, Y * 0.05, Z * 0.05, octaves=3) * 30 # print u # Extract the 0-isosurface logging.debug("u") logging.debug(u) vertices, triangles = mcubes.marching_cubes(u, 60) logging.info("mesh completed in %f seconds" % (time.time() - t)) meshexport.export(vertices, triangles)
def test_sphere(): # Create sphere with radius 25 centered at (50, 50, 50) x, y, z = np.mgrid[:100, :100, :100] levelset = np.sqrt((x - 50)**2 + (y - 50)**2 + (z - 50)**2) - 25 # vertices, triangles = mcubes.marching_cubes(levelset, 0) # mcubes.export_obj(vertices, triangles, 'sphere1.obj') binary_levelset = levelset > 0 smoothed_levelset = mcubes.smooth( binary_levelset, method='constrained', max_iters=500, rel_tol=1e-4 ) vertices, _ = mcubes.marching_cubes(smoothed_levelset, 0.0) # Check all vertices have same distance to (50, 50, 50) dist = np.sqrt(np.sum((vertices - [50, 50, 50])**2, axis=1)) assert dist.min() > 24.5 and dist.max() < 25.5 assert np.all(np.abs(smoothed_levelset - levelset) < 1) assert np.all((smoothed_levelset > 0) == binary_levelset)
def test_z(self, config, batch_z, dim): # GAN # TODO: could_load, checkpoint_counter = self.load(self.checkpoint_dir) if could_load: print(" [*] Load SUCCESS") else: print(" [!] Load failed...") return # load previous checkpoint if os.path.exists(self.checkpoint_path): self.ckpt.restore(tf.train.latest_checkpoint(self.checkpoint_path)) print(" [*] Load SUCCESS") else: print(" [!] Load failed...") for t in range(batch_z.shape[0]): model_z = batch_z[t:t + 1] model_z = tf.convert_to_tensor(model_z) model_float = self.z2voxel(model_z) # img1 = np.clip(np.amax(model_float, axis=0)*256, 0,255).astype(np.uint8) # img2 = np.clip(np.amax(model_float, axis=1)*256, 0,255).astype(np.uint8) # img3 = np.clip(np.amax(model_float, axis=2)*256, 0,255).astype(np.uint8) # cv2.imwrite(config.sample_dir+"/"+str(t)+"_1t.png",img1) # cv2.imwrite(config.sample_dir+"/"+str(t)+"_2t.png",img2) # cv2.imwrite(config.sample_dir+"/"+str(t)+"_3t.png",img3) vertices, triangles = mcubes.marching_cubes( model_float, self.sampling_threshold) vertices = (vertices.astype(np.float32) - 0.5) / self.real_size - 0.5 # vertices = self.optimize_mesh(vertices,model_z) write_ply(config.sample_dir + "/" + "out" + str(t) + ".ply", vertices, triangles) print("[sample Z]")
def test(model, dataset, weights_filepath="weights_current.h5"): model.load_weights(weights_filepath) train_iterator = dataset.iterator(batch_size=batch_size, num_batches=nb_test_batches, flatten_y=False) batch_x, batch_y = train_iterator.next() results_dir = 'results' if not os.path.exists(results_dir): os.mkdir(results_dir) pred = model._predict(batch_x) pred = pred.reshape(batch_size, patch_size, 1, patch_size, patch_size) pred_as_b012c = pred.transpose(0, 3, 4, 1, 2) for i in range(batch_size): v, t = mcubes.marching_cubes(pred_as_b012c[i, :, :, :, 0], 0.5) mcubes.export_mesh(v, t, results_dir + '/drill_' + str(i) + '.dae', 'drill') viz.visualize_batch_x(pred, i, str(i), results_dir + "/pred_" + str(i)) viz.visualize_batch_x(batch_x, i, str(i), results_dir + "/input_" + str(i)) viz.visualize_batch_x(batch_y, i, str(i), results_dir + "/expected_" + str(i))
def export(obj_filename, tile, translate) : print("Marching Cubes %s" % obj_filename) # print translate start = time.time() dim = tile.shape v, t = mcubes.marching_cubes(tile, 0.3) print("Reconstruction in %f" % (time.time() - start)) # decimated_obj = "./dec.%s.obj" objFile = open(obj_filename, "w"); # write obj vertices for i in range(len(v)): objFile.write("v %f %f %f\n" % (v[i][0], v[i][1], v[i][2])) # write obj triangles for i in range(len(t)): objFile.write("f %d %d %d\n" % (t[i][0]+1, t[i][1]+1, t[i][2]+1 )) print("\t Exporting %s (%d verts , %d tris) in %fs" % (obj_filename, len(v), len(t), (time.time() - start))) objFile.close() # subprocess.call(["commandlineDecimater", "-M", "AR", "-M", "NF", "-M", "ND:50", "-n", "0.5", "-i", obj_filename, "-o", decimated_obj % name]); translate_obj(obj_filename, translate)
def test_mesh_point(self, config): # load previous checkpoint # This checkpoint file records the most recent checkpoint.. otherwise there is no record. self.load_checkpoint() self.im_network.eval() for t in range(config.start, min(len(self.data_voxels), config.end)): batch_voxels_ = self.data_voxels[t:t + 1].astype(np.float32) batch_voxels = torch.from_numpy(batch_voxels_) batch_voxels = batch_voxels.to(self.device) model_z, _ = self.im_network(batch_voxels, None, None, is_training=False) model_float = self.z2voxel(model_z) vertices, triangles = mcubes.marching_cubes( model_float, self.sampling_threshold) vertices = (vertices.astype(np.float32) - 0.5) / self.real_size - 0.5 # vertices = self.optimize_mesh(vertices,model_z) write_ply_triangle(config.sample_dir + "/" + str(t) + "_vox.ply", vertices, triangles) print("[sample]") # sample surface points sampled_points_normals = sample_points_triangle( vertices, triangles, 4096) np.random.shuffle(sampled_points_normals) write_ply_point_normal( config.sample_dir + "/" + str(t) + "_pc.ply", sampled_points_normals) print("[sample]")
def run_fuse(self): """ Run fusion. """ assert os.path.exists(self.options.depth_dir) common.makedir(self.options.out_dir) files = self.read_directory(self.options.depth_dir) timer = common.WallTimer() Rs = self.get_views() for filepath in files: # As rendering might be slower, we wait for rendering to finish. # This allows to run rendering and fusing in parallel (more or less). depths = common.read_hdf5(filepath) timer.reset() tsdf = self.fusion(depths, Rs) tsdf = tsdf[0] vertices, triangles = libmcubes.marching_cubes(-tsdf, 0) vertices /= self.options.resolution vertices -= 0.5 off_file = os.path.join(self.options.out_dir, ntpath.basename(filepath)[:-3]) exporter.export_off(vertices, triangles, off_file) print('[Data] wrote %s (%f seconds)' % (off_file, timer.elapsed()))
def generate_mesh(self, data): inputs = data['inputs'].to(self.device) logits_list = [] for points in self.grid_points_split: with torch.no_grad(): logits = self.model(points,inputs) logits_list.append(logits.squeeze(0).detach().cpu()) logits = torch.cat(logits_list, dim=0) return logits.numpy() logits = np.reshape(logits.numpy(), (self.resolution,)*3) #padding to be able to retrieve object close to bounding box bondary logits = np.pad(logits, ((1, 1), (1, 1), (1, 1)), 'constant', constant_values=0) threshold = np.log(self.threshold) - np.log(1. - self.threshold) vertices, triangles = mcubes.marching_cubes( logits, threshold) #remove translation due to padding vertices -= 1 #rescale to original scale step = (self.max - self.min) / (self.resolution - 1) vertices = np.multiply(vertices, step) vertices += [self.min, self.min, self.min] mesh = trimesh.Trimesh(vertices, triangles) return mesh
def make_3D_plots(background, filename, fig=None): from mpl_toolkits.mplot3d import Axes3D # PyMCubes package is required for `visual_callback_3d` try: import mcubes except ImportError: raise ImportError("PyMCubes is required for 3D `visual_callback_3d`") # Prepare the visual environment. if fig is None: fig = plt.figure() fig.clf() ax = fig.add_subplot(111, projection='3d') if ax.collections: del ax.collections[0] coords, triangles = mcubes.marching_cubes(background, 0.5) ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], triangles=triangles) plt.pause(0.1) fig.savefig( os.path.join('/home/agapi/Desktop/MasterThesis/Morphsnakes', filename + ".png"))
def test_z(self, config, batch_z, dim): could_load, checkpoint_counter = self.load(self.checkpoint_dir) if could_load: print(" [*] Load SUCCESS") else: print(" [!] Load failed...") return for t in range(batch_z.shape[0]): model_float = self.z2voxel(batch_z[t:t + 1]) img1 = np.clip(np.amax(model_float, axis=0) * 256, 0, 255).astype(np.uint8) img2 = np.clip(np.amax(model_float, axis=1) * 256, 0, 255).astype(np.uint8) img3 = np.clip(np.amax(model_float, axis=2) * 256, 0, 255).astype(np.uint8) cv2.imwrite(config.sample_dir + "/" + str(t) + "_1t.png", img1) cv2.imwrite(config.sample_dir + "/" + str(t) + "_2t.png", img2) cv2.imwrite(config.sample_dir + "/" + str(t) + "_3t.png", img3) vertices, triangles = mcubes.marching_cubes( model_float, self.sampling_threshold) write_ply(config.sample_dir + "/" + "out" + str(t) + ".ply", vertices, triangles) print("[sample GAN]")
def __init__(self,segmentation, mtype="DAE"): self.segmentation = segmentation self.image = segmentation.dicomimage threshold=segmentation.getThreshold() if mtype.lower() == "collada" or mtype.lower() == "dae": self.extention=".dae" self.name = segmentation.name +"_" + mtype+ "_Tresh-" + threshold self.vertices, self.triangles = mcubes.marching_cubes(self.segmentation.image_Threshold, 0) mcubes.export_mesh(self.vertices, self.triangles, GlobalData.ModelPath + "/" +self.name + self.extention, self.name) if mtype.lower() == "stl": self.extention=".stl" self.name = segmentation.name + "_" + mtype +"_Tresh-" + threshold self.vertices, self.triangles = mcubes.marching_cubes(self.segmentation.image_Threshold, 0) mcubes.export_mesh(self.vertices, self.triangles, GlobalData.ModelPath + "/" +self.name + self.extention, self.name) daepath = GlobalData.ModelPath + "/" + self.name self.name = daepath
def test_mesh(self, config): # load previous checkpoint # checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint") if os.path.exists(self.checkpoint_path): self.ckpt.restore(tf.train.latest_checkpoint(self.checkpoint_path)) print(" [*] Load SUCCESS") else: print(" [!] Load failed...") return for t in range(config.start, min(len(self.data_voxels), config.end)): batch_voxels_ = self.data_voxels[t:t + 1].astype(np.float32) batch_voxels = batch_voxels.transpose(0, 2, 3, 4, 1) batch_voxels = tf.convert_to_tensor(batch_voxels_) model_z, _ = self.im_network(batch_voxels, None, None, training=False) model_float = self.z2voxel(model_z) vertices, triangles = mcubes.marching_cubes( model_float, self.sampling_threshold) vertices = (vertices.astype(np.float32) - 0.5) / self.real_size - 0.5 # vertices = self.optimize_mesh(vertices,model_z) write_ply_triangle(config.sample_dir + "/" + str(t) + "_vox.ply", vertices, triangles) print("[sample]")
def npy2point_datagenerator(mask=None, number_points=300, dim=3, crop_size=112, tocrop=False, fps=True): """ convert .npy to point cloud (for data generator) :param mask: the ground truth image :param number_points: :param dim: :param crop_size: :param tocrop: :param fps: whether to apply farthest point sampling :return: """ import mcubes mask = np.where(mask > 0, 1, 0) mask = np.moveaxis(mask, -1, 0) vertices = np.zeros((number_points, dim)) if mask.sum() > 50: if tocrop: mask = crop_volume(mask, crop_size=crop_size) mask = np.concatenate([mask, mask, mask], axis=0) # vol = mcubes.smooth(mask) vertices, triangles = mcubes.marching_cubes(mask, 0) if fps and (len(vertices) > 0): vertices = graipher(vertices, number_points, dim=dim) vertices = np.array(vertices, dtype=np.int) return vertices
def interpolation(self, A=None, B=None): if A == None: name_list = json.load( open( os.path.join(self.data_path, "train_val_test_split", self.category + ".test.json"), "r")) # random.shuffle(name_list) A = name_list[5]['anno_id'] + ".h5" B = name_list[6]['anno_id'] + ".h5" else: A = str(A) + ".h5" B = str(B) + ".h5" with h5py.File(os.path.join(self.data_path, self.category, A), 'r') as fp: A = np.array(np.clip(fp["shape_voxel64"], 0, 1), dtype="float32") with h5py.File(os.path.join(self.data_path, self.category, B), 'r') as fp: B = np.array(np.clip(fp["shape_voxel64"], 0, 1), dtype="float32") save_dir = 'interpolation' if not os.path.exists(save_dir): os.mkdir(save_dir) self.load_ckpt() zA = self.net.getz(torch.tensor(A).unsqueeze(0).cuda()) zB = self.net.getz(torch.tensor(B).unsqueeze(0).cuda()) for i in tqdm(range(11)): latentz = (zA * i + zB * (10 - i)) / 10 d_voxel = np.zeros((64, 64, 64)) for x in range(64): points = [] for y in range(64): for z in range(64): points.append([x, y, z]) points = torch.FloatTensor(points).unsqueeze(0).cuda() / 64.0 output = self.net.testz(latentz, points) cnt = 0 for y in range(64): for z in range(64): d_voxel[x][y][z] = output[cnt] > 0.5 cnt += 1 fig = plt.figure() ax = fig.gca(projection='3d') ax.voxels(d_voxel, facecolors='b', edgecolors='k') plt.savefig(os.path.join(save_dir, "{}_voxel.png".format(i))) vertices, triangles = libmcubes.marching_cubes(d_voxel, 0) mesh = trimesh.Trimesh(vertices, triangles) mesh.export(os.path.join(save_dir, "{}_result.obj".format(i))) mesh = trimesh.smoothing.filter_laplacian(mesh) mesh.export( os.path.join(save_dir, "{}_smooth_result.obj".format(i)))
def export_mesh(model, dataset, upsample, mcubes_threshold=0.005): res = 3 * (upsample * opt.res, ) model.octant_size = model.octant_size * upsample print('Export: calculating occupancy...') mrc_fname = os.path.join(opt.logging_root, opt.experiment_name, f"{opt.experiment_name}.mrc") occupancy = utils.write_occupancy_multiscale_summary(res, dataset, model, None, None, None, None, None, output_mrc=mrc_fname, oversample=upsample, mode='hq') print('Export: running marching cubes...') vertices, faces = mcubes.marching_cubes(occupancy, mcubes_threshold) print('Export: exporting mesh...') out_fname = os.path.join(opt.logging_root, opt.experiment_name, f"{opt.experiment_name}.dae") mcubes.export_mesh(vertices, faces, out_fname)
def voxels_to_mesh(pred_vol, thresh=0.5): pred_vol_thresholded = np.pad( pred_vol, [(1, 1), (1, 1), (1, 1)], 'constant', constant_values=(0, )) > thresh v_all, f_all = mcubes.marching_cubes(pred_vol_thresholded, 0.5) v_all = v_all - 1 + 0.5 # undo padding offset return v_all, f_all + 1 # 1-indexing
def mesh_from_logits(self, logits, min, max): #logits = np.reshape(logits, (self.resolution,) * 3) # padding to ba able to retrieve object close to bounding box bondary #logits = np.pad(logits, ((1, 1), (1, 1), (1, 1)), 'constant', constant_values=0) logits = np.reshape(logits, (self.resolution, ) * 3) # padding to ba able to retrieve object close to bounding box bondary logits = np.pad(logits, ((1, 1), (1, 1), (1, 1)), 'constant', constant_values=0) threshold = np.log(self.threshold) - np.log(1. - self.threshold) threshold = 0.5 #logits = (-1)*logits vertices, triangles = mcubes.marching_cubes(logits, threshold) # remove translation due to padding max = max + self.padding min = min - self.padding #vertices -= 1 #rescale to original scale step = (max - min) / (self.mise_res) #step = (max - min) / (self.resolution*self.upsampling_steps - 1) vertices = np.multiply(vertices, step) vertices += [min, min, min] #vertices= vertices*self.global_scale return trimesh.Trimesh(vertices, triangles)
def mkoutersurf(image, radius, outfile): #radius information is currently ignored #it is a little tougher to deal with the morphology in python fill = nib.load( image ) filld = fill.get_data() filld[filld==1] = 255 gaussian = np.ones((2,2))*.25 image_f = np.zeros((256,256,256)) for slice in xrange(256): temp = filld[:,:,slice] image_f[:,:,slice] = convolve(temp, gaussian, 'same') image2 = np.zeros((256,256,256)) image2[np.where(image_f <= 25)] = 0 image2[np.where(image_f > 25)] = 255 strel15 = generate_binary_structure(3, 1) BW2 = grey_closing(image2, structure=strel15) thresh = np.max(BW2)/2 BW2[np.where(BW2 <= thresh)] = 0 BW2[np.where(BW2 > thresh)] = 255 v, f = marching_cubes(BW2, 100) v2 = np.transpose( np.vstack( ( 128 - v[:,0], v[:,2] - 128, 128 - v[:,1], ))) write_surface(outfile, v2, f)
def test_mesh(self, config): #load previous checkpoint checkpoint_txt = os.path.join(self.checkpoint_path, "checkpoint") if os.path.exists(checkpoint_txt): fin = open(checkpoint_txt) model_dir = fin.readline().strip() fin.close() self.im_network.load_state_dict(torch.load(model_dir)) print(" [*] Load SUCCESS") else: print(" [!] Load failed...") return self.im_network.eval() for t in range(config.start, min(len(self.data_voxels), config.end)): batch_voxels_ = self.data_voxels[t:t + 1].astype(np.float32) batch_voxels = torch.from_numpy(batch_voxels_) batch_voxels = batch_voxels.to(self.device) model_z, _ = self.im_network(batch_voxels, None, None, is_training=False) model_float = self.z2voxel(model_z) vertices, triangles = mcubes.marching_cubes( model_float, self.sampling_threshold) vertices = (vertices.astype(np.float32) - 0.5) / self.real_size - 0.5 #vertices = self.optimize_mesh(vertices,model_z) write_ply_triangle(config.sample_dir + "/" + str(t) + "_vox.ply", vertices, triangles) print("[sample]")
def make_isomesh(self, val, name="", update=False): """makes a mesh based off the marching cubes algorithm, for given volume data Arguments: val {float} -- value between 0 and 1 that determines where the mesh is drawn. Mesh is an isosurface based off volumetric data. 0 takes the minimum value in the volume and tries to make a surface on that value, 1 takes the maximum. Keyword Arguments: name {str} -- given name for isomesh (default: {""}) update {bool} -- update isomesh or not? (default: {False}) """ if not name: name = self.name + '.dae' elif not name.endswith('.dae'): name = name + '.dae' ipath = self.check_file(name, update) if ipath is None: return print('making isosurface...') start = time.time() field_max = np.amax(self.field.field) field_min = np.amin(self.field.field) isoval = val * (field_max - field_min) + field_min vertices, triangles = mcubes.marching_cubes(self.field.field, isoval) mcubes.export_mesh(vertices, triangles, ipath, "Iso{}".format(val)) end = time.time() print('mesh created, time elapsed = {}s'.format(end - start))
def test_z(self, config, batch_z, dim): could_load, checkpoint_counter = self.load(self.checkpoint_dir) if could_load: print(" [*] Load SUCCESS") else: print(" [!] Load failed...") return for t in range(batch_z.shape[0]): model_z = batch_z[t:t + 1] model_z = torch.from_numpy(model_z) model_z = model_z.to(self.device) model_float = self.z2voxel(model_z) #img1 = np.clip(np.amax(model_float, axis=0)*256, 0,255).astype(np.uint8) #img2 = np.clip(np.amax(model_float, axis=1)*256, 0,255).astype(np.uint8) #img3 = np.clip(np.amax(model_float, axis=2)*256, 0,255).astype(np.uint8) #cv2.imwrite(config.sample_dir+"/"+str(t)+"_1t.png",img1) #cv2.imwrite(config.sample_dir+"/"+str(t)+"_2t.png",img2) #cv2.imwrite(config.sample_dir+"/"+str(t)+"_3t.png",img3) vertices, triangles = mcubes.marching_cubes( model_float, self.sampling_threshold) vertices = (vertices.astype(np.float32) - 0.5) / self.real_size - 0.5 #vertices = self.optimize_mesh(vertices,model_z) write_ply(config.sample_dir + "/" + "out" + str(t) + ".ply", vertices, triangles) print("[sample Z]")
def export_ply(filename, cutout, level=0): """ Converts a dense annotation to a .PLY, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".ply" not in filename: filename = filename + ".ply" vs, fs = mcubes.marching_cubes(cutout, level) with open(filename, 'w') as fh: lines = [ "ply" "format ascii 1.0", "comment generated by ndio", "element vertex " + str(len(vs)), "property float32 x", "property float32 y", "property float32 z", "element face " + str(len(fs)), "property list uint8 int32 vertex_index", "end_header" ] fh.writelines(lines) for v in vs: fh.write("{} {} {}".format(v[0], v[1], v[2])) for f in fs: fh.write("3 {} {} {}".format(f[0], f[1], f[2]))
def triangulate_cube(cube): """ :param cube: nd.array NxNxN where cube[i,j,k]==0 defines algebraic surface :return:(list of vertices, list of triangles) of polygonized algebraic surface """ vert, triang = mcubes.marching_cubes(cube, 0) vertn = vert / (len(cube) - 1) return vertn, triang
def callback(levelset): counter[0] += 1 if (counter[0] % plot_each) != 0: return if ax.collections: del ax.collections[0] coords, triangles = mcubes.marching_cubes(levelset, 0.5) ax.plot_trisurf(coords[:, 0], coords[:, 1], coords[:, 2], triangles=triangles) plt.pause(0.1)
def plot_3d_probs(Wx,X,Y,Z, cutoff, resolution, ax=None, color='b', alpha=1.0, xlim=None, ylim=None, zlim=None): ''' Create a 3D plot showing an isosurface for a probability value of 'cutoff' The vertices of the polygons are generated using the Marching Cubes algorithm as implemented in the PyMCubes library: https://github.com/pmneila/PyMCubes :param Wx: The probability values on a grid. :param X: The X grid (generated using meshgrid) :param Y: The Y grid (generated using meshgrid) :param Z: The Z grid (generated using meshgrid) :param cutoff: The cutoff value for the isosurface. :param resolution: The width, height and length of the grid (as a single scalar value) :param ax: An axis object. If None, then we create our own :param color: The color for the surface :param alpha: How transparent the surface should be :param xlim: The limits of the x-axis (and data) :param ylim: The limits of the y-axis (and data) :param zlim: The limits of the z-axis (and data) ''' import mcubes vertices, triangles = mcubes.marching_cubes(Wx, cutoff) scaled_vertices = np.array([xlim[0] + (xlim[1] - xlim[0]) * vertices[:,0] / resolution, ylim[0] + (ylim[1] - ylim[0]) * vertices[:,1] / resolution, zlim[0] + (zlim[1] - zlim[0]) * vertices[:,2] / resolution]).T verts = [[scaled_vertices[i] for i in t] for t in triangles] from mpl_toolkits.mplot3d.art3d import Poly3DCollection import matplotlib.pyplot as plt #print "ax 2", ax if ax is None: #print "here3" fig = plt.figure(figsize=(6,6)) ax = fig.add_subplot(1,1,1,projection='3d') ax.add_collection3d(Poly3DCollection(verts, facecolors=color, alpha=alpha)) ax.set_xlim(xlim[0], xlim[1]) ax.set_ylim(ylim[0], ylim[1]) ax.set_zlim(zlim[0], zlim[1]) ax.set_xlabel('$d$') ax.set_ylabel('$\phi$') ax.set_zlabel('$\psi$') return ax
def torus(): size = 100 X, Y, Z = np.mgrid[:size, :size, :size] r = size / 8 R = r * 2 u = ( (X-size/2)**2 + (Y-size/2)**2 + (Z-size/2)**2 + R**2 -r**2)**2 - 4*(R**2)*((X-size/2)**2 + (Y-size/2)**2) # Extract the 0-isosurface vertices, triangles = mcubes.marching_cubes(u, 0) # Export the result to sphere.dae mcubes.export_mesh(vertices, triangles, "torus1.dae", "MyTorus")
def export_dae(filename, cutout, level=0): """ Converts a dense annotation to a DAE, using Marching Cubes (PyMCubes). Arguments: filename (str): The filename to write out to cutout (numpy.ndarray): The dense annotation level (int): The level at which to run mcubes Returns: boolean success """ if ".dae" not in filename: filename = filename + ".dae" vs, fs = mcubes.marching_cubes(cutout, level) mcubes.export_mesh(vs, fs, filename, "ndioexport")
def sphere1(): # Create a data volume (30 x 30 x 30) X, Y, Z = np.mgrid[:50, :50, :50] u = (X-25)**2 + (Y-25)**2 + (Z-25)**2 - 20**2 # Extract the 0-isosurface vertices, triangles = mcubes.marching_cubes(u, 0) print vertices.shape print triangles.shape #for t in vertices: # print t # Export the result to sphere.dae mcubes.export_mesh(vertices, triangles, "sphere3.dae", "MySphere")
def plan(): # Create a data volume (30 x 30 x 30) X, Y, Z = np.mgrid[:50, :50, :50] u = (3*X) + (2*Y) + (1*Z) # Extract the 0-isosurface vertices, triangles = mcubes.marching_cubes(u, 0) print vertices.shape print triangles.shape #for t in vertices: # print t # Export the result to sphere.dae mcubes.export_mesh(vertices, triangles, "plan.dae", "MyPlane")
def main3(): logging.info("(this might take a while...)") t = time.time() # from stl import mesh samples = 50 radius = samples / 2.0 half = radius / 2.0 array = np.ndarray((samples, samples, samples)) for x in range(samples): for y in range(samples): for z in range(samples): noise = AMPLITUDE * pnoise3(x * FREQUENCY, y * FREQUENCY, z * FREQUENCY, octaves=2) array[x, y, z] = x ** 2 + y ** 2 + z ** 2 - half ** 2 + noise logging.debug("array") logging.debug(array) # Extract the 0-isosurface vertices, triangles = mcubes.marching_cubes(array, 0) logging.info("mesh completed in %f seconds" % (time.time() - t)) meshexport.export(vertices, triangles)
def test(model, dataset, weights_filepath=BEST_WEIGHT_FILE): abs_weights_filepath = '/home/jvarley/3d_conv/keras/examples/' + weights_filepath model.load_weights(abs_weights_filepath) train_iterator = dataset.iterator(batch_size=batch_size, num_batches=nb_test_batches, flatten_y=False) batch_x, batch_y = train_iterator.next() results_dir = DATA_DIR + 'results' if not os.path.exists(results_dir): os.makedirs(results_dir) pred = model._predict(batch_x) pred = pred.reshape(batch_size, patch_size, 1, patch_size, patch_size) pred_as_b012c = pred.transpose(0, 3, 4, 1, 2) for i in range(batch_size): v, t = mcubes.marching_cubes(pred_as_b012c[i, :, :, :, 0], 0.5) mcubes.export_mesh(v, t, results_dir + '/toilet_' + str(i) + '.dae', 'drill') viz.visualize_batch_x(pred, i, str(i), results_dir + "/pred_" + str(i)) viz.visualize_batch_x(batch_x, i, str(i), results_dir + "/input_" + str(i)) viz.visualize_batch_x(batch_y, i, str(i), results_dir + "/expected_" + str(i)) # for i in range(batch_size): # viz.visualize_batch_x_y_overlay(batch_x, batch_y, pred, i=i, title=str(i)) # viz.visualize_batch_x(pred, i, 'pred_' + str(i), ) # viz.visualize_batch_x(batch_x, i,'batch_x_' + str(i), ) # viz.visualize_batch_x(batch_y, i, 'batch_y_' + str(i), ) import IPython IPython.embed()
import numpy as np import mcubes print("Example 1: Isosurface in NumPy volume...") # Create a data volume (30 x 30 x 30) X, Y, Z = np.mgrid[:100, :100, :100] u = (X-50)**2 + (Y-50)**2 + (Z-50)**2 - 25**2 # Extract the 0-isosurface vertices1, triangles1 = mcubes.marching_cubes(u, 0) # Export the result to sphere.dae mcubes.export_mesh(vertices1, triangles1, "sphere1.dae", "MySphere") print("Done. Result saved in 'sphere1.dae'.") print("Example 2: Isosurface in Python function...") print("(this might take a while...)") # Create the volume def f(x, y, z): return x**2 + y**2 + z**2 # Extract the 16-isosurface vertices2, triangles2 = mcubes.marching_cubes_func( (-10,-10,-10), (10,10,10), # Bounds 100, 100, 100, # Number of samples in each dimension f, # Implicit function 16) # Isosurface value
parser.add_argument('--image', dest='image', help="The background image to display") parser.add_argument('--volume', dest='volume', help="The volume to render") parser.add_argument('--obj', dest='obj', help="The file path of the object") args = parser.parse_args() im = scipy.misc.imread(args.image, False, 'RGB') vol = np.fromfile(args.volume, dtype=np.int8) vol = vol.reshape((200,192,192)) vol = vol.astype(float) vertices, triangles = mcubes.marching_cubes(vol, 10) vertices = vertices[:,(2,1,0)] vertices[:,2] *= 0.5 # scale the Z component correctly r = im[:,:,0].flatten() g = im[:,:,1].flatten() b = im[:,:,2].flatten() vcx,vcy = np.meshgrid(np.arange(0,192),np.arange(0,192)) vcx = vcx.flatten() vcy = vcy.flatten() vc = np.vstack((vcx, vcy, r, g, b)).transpose() neigh = NearestNeighbors(n_neighbors=1) neigh.fit(vc[:,:2]) n = neigh.kneighbors(vertices[:,(0,1)], return_distance=False) colour = vc[n,2:].reshape((vertices.shape[0],3)).astype(float) / 255
mask = morphology.binary_opening(mask, iterations=args.opening) if args.closing is not None: mask = morphology.binary_closing(mask, iterations=args.closing) # Label fill if args.max_label: label_objects, nb_labels = ndi.label(mask) sizes = np.bincount(label_objects.ravel()) sizes[0] = 0 # ingnore zero voxel max_label = np.argmax(sizes) max_mask = (label_objects == max_label) mask = max_mask # Extract marching cube surface from mask vertices, triangles = mcubes.marching_cubes(mask, args.value) # Generate mesh mesh = TriMesh_Vtk(triangles.astype(np.int), vertices) # transformation if args.world_lps: rotation = volume_nib.get_affine()[:3,:3] translation = volume_nib.get_affine()[:3,3] voxel_space = nib.aff2axcodes(volume_nib.get_affine()) new_vertice = vertices.dot(rotation) new_vertice = new_vertice + translation # voxel_space -> LPS print str(voxel_space), "-to-> LPS" if voxel_space[0] != 'L':
coordinates[_x,_y,_z,1]=x_grad[_x] coordinates[_x,_y,_z,2]=y_grad[_y] coordinates[_x,_y,_z,3]=z_grad[_z] coordinates[_x,_y,_z,4]=x_grad[_x]**2+y_grad[_y]**2+z_grad[_z]**2 coordinates=coordinates.reshape((sz_x*sz_y*sz_z,coords)) tot_vox = sz_x*sz_y*sz_z voxels = numpy.zeros((tot_vox,4)) for val in xrange(tot_vox): voxels[val,0] = sum( (coordinates[val,1:])**2 ) voxels[val,1] = ((1.0 - coordinates[val,1])+(coordinates[val,2]**2))/2.0 #np.random.random((3)) voxels = voxels.reshape((sz_x,sz_y,sz_z,4)) thresh = 0.5 #verts, faces = measure.marching_cubes(abs(voxels[:,:,:,0]), thresh) _verts,faces = mcubes.marching_cubes(voxels[:,:,:,0],thresh) """ mlab.triangular_mesh([vert[0] for vert in verts], [vert[1] for vert in verts], [vert[2] for vert in verts], faces) # doctest: +SKIP mlab.show() # doctest: +SKIP """ #import pygame #pygame.init() #pygame.display.set_mode((1,1), pygame.OPENGL|pygame.DOUBLEBUF) from OpenGL.GL import * from OpenGL.GLU import *
def main(): # Use NumPy to create a 2D array of complex numbers on [-2,2]x[-2,2] Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005] print 'Y shape: ', Y.shape print 'X shape: ', X.shape Z = X+1j*Y xs = tf.constant(Z.astype("complex64")) zs = tf.Variable(xs) ns = tf.Variable(tf.zeros_like(xs, "float32")) not_diverged = tf.Variable(np.ones(Z.shape, dtype=np.bool)) Z_mod_at_div = tf.Variable(2 * tf.ones_like(xs, "float32")) for i in range(MAX_ITERS): # Compute the new values of z: z^2 + x zs_ = zs*zs + xs # Have we diverged with this new value? cur_mod = tf.complex_abs(zs_) not_diverged_ = cur_mod < 4 # Operation to update the zs and the iteration count. # Note: We keep computing zs after they diverge! This # is very wasteful! There are better, if a little # less simple, ways to do this. ns_ = ns + tf.cast(not_diverged_, "float32") diverged_this_step = tf.logical_and(tf.logical_not(not_diverged_), not_diverged) Z_mod_at_div = tf.select(diverged_this_step, cur_mod, Z_mod_at_div) zs = zs_ ns = ns_ not_diverged = not_diverged_ mus = tf.select(not_diverged, ns, ns + 1 - tf.log(tf.log(Z_mod_at_div)) / np.log(2)) with tf.Session() as sess: tf.initialize_all_variables().run() print 'running!' ns_evaled, Z_mod_at_div_evaled, mus_evaled = sess.run([ns, Z_mod_at_div, mus]) print 'done running!' print Z_mod_at_div_evaled non_zeros_z_mod = np.where(np.abs(Z_mod_at_div_evaled) > 0.01) print non_zeros_z_mod print 'max mod: %f, min mod: %f' % (np.max(Z_mod_at_div_evaled), np.min(Z_mod_at_div_evaled[non_zeros_z_mod])) print 'diff between mus and ns: ' diff = mus_evaled - ns_evaled print diff print 'max: %f, min: %f' % (np.max(diff), np.min(diff)) DisplayFractal(mus_evaled, 'mandelbrot.png') DisplayFractal(ns_evaled, 'mandelbrot_notfrac.png') img_int_ext = interior_exterior_map(ns_evaled) print "img_int_ext.max, %f, img_int_ext.min: %f" % \ (img_int_ext.max(), img_int_ext.min()) location = (X.shape[0] / 2, (4 * X.shape[1]) / 5) radius = (X.shape[0] / 10) ext_radius = (X.shape[0] / 70) img_int_ext_pendant = add_pendant(img_int_ext, location, radius, ext_radius) DisplayFractal(img_int_ext_pendant, "mandelbrot_int_ext_pendant.png") img_int_ext_pendant_noend = np.copy(img_int_ext_pendant) for i in xrange(X.shape[1] / 5): for j in xrange(X.shape[0]): img_int_ext_pendant_noend[j, i] = 1 DisplayFractal(img_int_ext_pendant_noend, "mandelbrot_int_ext_pendant_noend.png") img_int_ext_pendant_noend_bigmiddle = np.copy(img_int_ext_pendant_noend) location_bigmiddle = np.array((X.shape[0] / 2, int(X.shape[1] / 2.5))) radius_bigmiddle = X.shape[1] / 25 for i in xrange(X.shape[1]): for j in xrange(X.shape[0]): dist = norm(np.array((j, i)) - location_bigmiddle) if dist < radius_bigmiddle: img_int_ext_pendant_noend_bigmiddle[j, i] = -1 DisplayFractal(img_int_ext_pendant_noend_bigmiddle, "mandelbrot_int_ext_pendant_noend_bigmiddle.png") print "img_int_ext: " print img_int_ext DisplayFractal(img_int_ext, "mandelbrot_int_ext.png") contours = measure.find_contours(img_int_ext_pendant_noend_bigmiddle, 0.0) len_contours = [len(contour_i) for contour_i in contours] print len_contours contours_sorted_by_size = sorted(contours, key=len) len_contours = [len(contour_i) for contour_i in contours] print len_contours bigcontour = contours_sorted_by_size[0] #embed() #bigcontour_int_ext = contour_to_int_ext_map(bigcontour, X, Y) #embed() #fig, ax = plt.subplots() #ax.imshow(img_int_ext, interpolation='nearest', cmap=plt.cm.gray) #for n, contour in enumerate(contours): #ax.plot(contour[:, 1], contour[:, 0], linewidth=2) #ax.plot(bigcontour[:, 1], bigcontour[:, 0], linewidth=2) #ax.axis('image') #ax.set_xticks([]) #ax.set_yticks([]) dwg = svgwrite.Drawing('mandelbrot.svg', profile='tiny') for j in [-1, -2]: contour = contours_sorted_by_size[j] for i in xrange(contour.shape[0] - 1): #print tuple(bigcontour[i]) dwg.add(dwg.line(tuple(contour[i]), tuple(contour[i + 1]), \ stroke=svgwrite.rgb(10, 10, 16, '%'))) #dwg.add(dwg.text('Test', insert=(0, 0.2), fill='red')) dwg.save() #plt.show() #error = 100 * np.abs(mus_evaled - ns_evaled) #print error.shape #error = error.reshape(list(error.shape)+[1]) #print error.shape #error_img = np.concatenate([error, error, error], 2) #error_img = np.uint8(np.clip(error_img, 0, 255)) #scipy.misc.imsave('mandelbrot_errors.png', error_img) # 3d mandelbrot! max_dist = 10 #tsdf = gen_tsdf(img_int_ext_pendant_noend_bigmiddle, max_dist) tsdf = np.load("tsdf_10.npy") #np.save("tsdf_10", tsdf) #plt.imshow(tsdf) #plt.show() # a = -3 / 20 # b = 53 / 20 # c = -1 # for 10.5 at 10, 8.5 at 5, and 1.5 at 1 # with a * x**2 + b * x + c formula for height int_ext_3d_map = gen_int_ext_3d_map_from_tsdf(tsdf, max_dist) #embed() vertices, triangles = mcubes.marching_cubes(int_ext_3d_map, 0) mcubes.export_mesh(vertices, triangles, "mandelbrot_smoothed.dae", "Mandelbrot_pendant") #embed() from mayavi import mlab mlab.triangular_mesh( vertices[:, 0], vertices[:, 1], vertices[:, 2], triangles) mlab.show()
def import_cube_iso(context, report, filepath, iso_val='VOLFRAC', vol_frac=0.7, absolute=100, origin_to_com=False, ): """ Format specification from http://h5cube-spec.readthedocs.io/en/latest/cubeformat.html """ found_mcubes = import_mcubes(context, report) if not found_mcubes: return False bpy.ops.object.select_all(action="DESELECT") with open(filepath, "r") as fin: next(fin) next(fin) ls = next(fin).split() nat = int(ls[0]) dset_ids_present = (nat < 0) nat = abs(nat) origin = Vector(list(map(float, ls[1:4]))) nval = int(ls[-1]) if len(ls) == 5 else 1 if nval != 1 and dset_ids_present: report({'ERROR'}, "{}".filepath + "NVAL != 1 and NAT < 0 is not compatible.") return False nvoxel = np.zeros(3, dtype=int) voxel_vec = np.zeros((3,3)) for i in range(3): n, x, y, z = next(fin).split() nvoxel[i] = int(n) voxel_vec[i,:] = list(map(float, (x, y, z))) if (nvoxel<0).all(): unit = 1 elif (nvoxel<0).any(): msg = ( "{} ".format(filepath) + "seems to contain mixed units (+/- mixed in lines 4-6). " "Please make sure either all units are in Bohr (+) or " "Angstrom (-)." ) report({'ERROR'}, msg) return False else: unit = A_per_Bohr voxel_vec *= unit origin *= unit # skip atom info for n in range(nat): next(fin) if dset_ids_present: orbitals = [] ls = list(map(int, next(fin).split())) m = ls[0] orbitals.extend(ls[1:]) while len(orbital) < m: orbitals.extend(list(map(int, next(fin).split()))) else: m = 1 all_data = np.zeros(np.product(nvoxel)*m*nval) pos = 0 for line in fin: ls = line.split() all_data[pos:(pos+len(ls))] = list(map(float, ls)) pos += len(ls) all_data = all_data.reshape(list(nvoxel)+[m*nval]) all_data = np.rollaxis(all_data, -1, 0) red = (.8, 0, 0) blue = (0, 0, .8) n_gt_1 = (len(all_data) > 1) for n, data in enumerate(all_data): plusminus = (data.min() < 0 and data.max() > 0) for fac in (1, -1): # for positive and negative valued wavefunction part = data[data*fac > 0]*fac if len(part) > 0: if iso_val == 'VOLFRAC': flat = np.sort(part.flatten())[::-1] cs = np.cumsum(flat) # find first index to be larger than the percent volume # we want to enclose cut = cs[-1]*vol_frac idx = np.argmax(cs > cut) # linearly interpolate (this should be good enough for most # purposes) rat = (cs[idx]-cut) / (cs[idx]-cs[idx-1]) iso = rat * (flat[idx-1]-flat[idx]) + flat[idx] elif iso_val == 'ABSOLUTE': iso = absolute # Use marching cubes to obtain the surface mesh of ellipsoids verts, faces = mcubes.marching_cubes(data*fac, iso) # displace by have a voxel to account for the voxel volume verts += np.array((0.5, 0.5, 0.5)) # convert to cartesian coordinates verts = verts.dot(voxel_vec) verts = verts.tolist() faces = faces.astype(int).tolist() base = bpy.path.display_name_from_filepath(filepath) orb = ("_{}".format(n) if n_gt_1 else "") ext = ("_p" if fac == 1 else "_n") if plusminus else "" name = "{}{}{}".format(base, orb, ext) me = bpy.data.meshes.new(name) me.from_pydata(verts, [], faces) ob = bpy.data.objects.new(name, me) context.scene.objects.link(ob) context.scene.objects.active = ob ob.select = True ob.location = origin bpy.ops.object.mode_set(mode='EDIT') bpy.ops.mesh.remove_doubles(threshold=0.06) bpy.ops.mesh.normals_make_consistent() bpy.ops.mesh.delete_loose() bpy.ops.object.mode_set(mode='OBJECT') bpy.ops.object.shade_smooth() mod = ob.modifiers.new("Subsurf", 'SUBSURF') mod.show_viewport = False mod.show_render = False mod = ob.modifiers.new("Remesh", 'REMESH') mod.octree_depth = 8 mod.scale = 0.99 mod.use_smooth_shade = True mod.use_remove_disconnected = False mod.mode = 'SMOOTH' mod.show_viewport = False mod.show_render = False if len(ob.material_slots) < 1: ob.data.materials.append(None) # get or create element material per molecule material = bpy.data.materials.get(name) if not material: color = (red if fac == 1 else blue) # get material color from elements list, # and Default if not an element material = mb_utils.new_material(name, color=color) # finally, assign material to first slot. ob.material_slots[0].link = 'DATA' ob.material_slots[0].material = material if origin_to_com: bpy.ops.object.origin_set(type="ORIGIN_GEOMETRY")
def create_mesh(k, intensity): volume = data.load_data(k) vertices, triangles = mcubes.marching_cubes(volume, intensity) print_obj(vertices, triangles) # TODO use Catmull-Clarke to smooth voxel mesh print "#", k, intensity
def execute_cb(self, goal): start_time = time.time() self._feedback = graspit_shape_completion.msg.CompleteMeshFeedback() self._result = graspit_shape_completion.msg.CompleteMeshResult() rospy.loginfo('Received Msg') single_view_pointcloud_filepath = '/srv/data/shape_completion_data/test_1/pcd_8_310_143723.pcd' point_array = np.asarray(goal.partial_mesh.vertices) pc = np.zeros((len(point_array), 3), np.float32) for i in range(len(point_array)): pc[i][0] = point_array[i].x pc[i][1] = point_array[i].y pc[i][2] = point_array[i].z batch_x = np.zeros((1, self.patch_size, self.patch_size, self.patch_size, 1), dtype=np.float32) batch_x[0, :, :, :, :], voxel_resolution, offset = reconstruction_utils.build_test_from_pc_scaled(pc, self.patch_size) #make batch B2C01 rather than B012C batch_x = batch_x.transpose(0, 3, 4, 1, 2) pred = self.model._predict(batch_x) pred = pred.reshape(1, self.patch_size, 1, self.patch_size, self.patch_size) pred_as_b012c = pred.transpose(0, 3, 4, 1, 2) batch_x = batch_x.transpose(0, 3, 4, 1, 2) mask = reconstruction_utils.get_occluded_voxel_grid(batch_x[0, :, :, :, 0]) completed_region = pred_as_b012c[0, :, :, :, 0] * mask scale = 4 high_res_voxel_grid, voxel_resolution, offset = reconstruction_utils.build_high_res_voxel_grid(pc, scale, self.patch_size) indices = np.mgrid[0:scale*self.patch_size:1, 0:scale*self.patch_size:1, 0:scale*self.patch_size:1] scaled_completed_region = map_coordinates(completed_region, indices/scale, order = 0, mode = 'constant', cval=0.0) output = high_res_voxel_grid[:, :, :, 0] + scaled_completed_region #output = batch_x[0, :, :, :, 0] + completed_region v, t = mcubes.marching_cubes(output, 0.25) if self.smooth_mesh: coarse_mesh = '/srv/data/temp/' + 'coarse.dae' smooth_mesh = '/srv/data/temp/' + 'smooth.off' script_file = '/srv/data/temp/' + 'poisson_remesh.mlx' mcubes.export_mesh(v, t, coarse_mesh, 'model') cmd_string = 'meshlabserver -i ' + coarse_mesh cmd_string = cmd_string + ' -o ' + smooth_mesh cmd_string = cmd_string + ' -s ' + script_file process = subprocess.call(cmd_string, shell=True) off = off_handler.OffHandler() off.read(smooth_mesh) v = off.vertices t = off.faces #viz.visualize_batch_x(pred, 0, str(1), 'results/' + "pred_" + str(1)) #viz.visualize_batch_x(batch_x, 0, str(1), 'results/' + "input_" + str(1)) v *= voxel_resolution v += offset for i in range(len(v)): self._result.completed_mesh.vertices.append(geometry_msgs.msg.Point(v[i, 0], v[i, 1], v[i, 2])) for i in range(len(t)): self._result.completed_mesh.triangles.append(shape_msgs.msg.MeshTriangle((t[i, 0], t[i, 1], t[i, 2]))) end_time = time.time() self._result.completion_time = int(1000*(end_time - start_time)) self._as.set_succeeded(self._result)
def render( voxels, bg_color=[0.5, 0.5, 0.5], angle1=45, angle2=10, save=None, amb=0.2, spec=1.0, shiny=100, lighting=True, diff=0.5, ): global disp_sz, init_done if not init_done: do_init() sz_x, sz_y, sz_z, channels = voxels.shape thresh = 0.5 # print "LIGHTING",lighting # raw_input() # verts, faces = measure.marching_cubes(abs(voxels[:,:,:,0]), thresh) _verts, faces = mcubes.marching_cubes(voxels[:, :, :, 0], thresh) glClearColor(0.0, 0.0, 0.0, 1.0) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) glMatrixMode(GL_MODELVIEW) if True: # print t # render a helix (this is similar to the previous example, but # this time we'll render to a texture) # initialize projection glClearColor(0.0, 0.0, 0.0, 1.0) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) glMatrixMode(GL_PROJECTION) glLoadIdentity() gluPerspective(90, 1, 0.01, 1000) gluLookAt(0, 0, sz_z, 0, 0, 0, 0, 1, 0) glMatrixMode(GL_MODELVIEW) glShadeModel(GL_SMOOTH) if lighting: glEnable(GL_COLOR_MATERIAL) glEnable(GL_LIGHTING) glEnable(GL_LIGHT0) glEnable(GL_LIGHT1) glEnable(GL_LIGHT2) light = diff glLightfv(GL_LIGHT0, GL_DIFFUSE, [light, light, light, 1.0]) glLightfv(GL_LIGHT1, GL_DIFFUSE, [light, light, light, 1.0]) glLightfv(GL_LIGHT2, GL_DIFFUSE, [light, light, light, 1.0]) glColorMaterial(GL_FRONT, GL_AMBIENT_AND_DIFFUSE) glLightModelfv(GL_LIGHT_MODEL_AMBIENT, [amb, amb, amb, 1.0]) glMaterialfv(GL_FRONT, GL_SPECULAR, [spec, spec, spec]) glMaterialfv(GL_FRONT, GL_SHININESS, shiny) glLightfv(GL_LIGHT0, GL_POSITION, [0.0, 2.0, -1.0, 0.0]) glLightfv(GL_LIGHT1, GL_POSITION, [10.0, -5.0, 20.0, 0.0]) glLightfv(GL_LIGHT2, GL_POSITION, [-10.0, 0.0, 10.0, 0.0]) glPushMatrix() glRotatef(angle1, 0.0, 1.0, 0.0) glRotatef(angle2, 0.1, 0.0, 0.0) glEnable(GL_CULL_FACE) glEnable(GL_DEPTH_TEST) # glDisable(GL_CULL_FACE) # glDisable(GL_DEPTH_TEST) # Black background for the Helix glClearColor(bg_color[0], bg_color[1], bg_color[2], 1.0) glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # Fallback to white # lightZeroPosition = [0.,50.,-2.,1.] # lightZeroColor = [1.8,1.0,0.8,1.0] #green tinged # glLightfv(GL_LIGHT0, GL_POSITION, lightZeroPosition) # glLightf(GL_LIGHT0, GL_CONSTANT_ATTENUATION, 0.1) # glLightf(GL_LIGHT0, GL_LINEAR_ATTENUATION, 0.05) # glEnable(GL_LIGHT0) # The helix # color = [1.0,0.,0.,1.] # glMaterialfv(GL_FRONT,GL_DIFFUSE,color) # glBegin(GL_TRIANGLES); color_idx = np.asarray(_verts, dtype=int) colors = abs(voxels[color_idx[:, 0], color_idx[:, 1], color_idx[:, 2], 1:]) colors = np.clip(colors, 0, 1) colors = hsv_to_rgb(colors) verts = _verts - numpy.array((sz_x / 2, sz_y / 2, sz_z / 2)) # Create an indexed view into the vertex array using the array of three indices for triangles tris = verts[faces] tricols = colors[faces] # print faces.shape if save != None: saveply.save(save, verts, colors, faces) # Calculate the normal for all the triangles, by taking the cross product of the vectors v1-v0, and v2-v0 in each triangle n = numpy.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0]) # n is now an array of normals per triangle. The length of each normal is dependent the vertices, # we need to normalize these, so that our next step weights each normal equally. n = normalize_v3(n) vnorms = numpy.zeros(verts.shape, dtype=numpy.float32) if True: # angle1%2==0: for idx in xrange(len(tris)): face = faces[idx] vnorms[face[0]] += n[idx] vnorms[face[1]] += n[idx] vnorms[face[2]] += n[idx] else: vnorms[faces[:, 0]] += n vnorms[faces[:, 1]] += n vnorms[faces[:, 2]] += n vnorms = normalize_v3(vnorms) verts = numpy.asarray(verts, dtype=numpy.float32) colors = numpy.asarray(colors, dtype=numpy.float32) vnorms = numpy.asarray(vnorms, dtype=numpy.float32) glEnableClientState(GL_VERTEX_ARRAY) glEnableClientState(GL_COLOR_ARRAY) glEnableClientState(GL_NORMAL_ARRAY) glVertexPointer(3, GL_FLOAT, 0, verts) glColorPointer(3, GL_FLOAT, 0, colors) glNormalPointer(GL_FLOAT, 0, vnorms) faces = numpy.asarray(faces, dtype=numpy.uint) glDrawElements(GL_TRIANGLES, faces.flatten().shape[0], GL_UNSIGNED_INT, faces.flatten()) glDisableClientState(GL_VERTEX_ARRAY) glDisableClientState(GL_COLOR_ARRAY) glDisableClientState(GL_NORMAL_ARRAY) glPopMatrix() out = glReadPixels(0, 0, disp_sz, disp_sz, GL_RGB, GL_FLOAT) return out