コード例 #1
0
def test(model, dataset, weights_filepath="weights_current.h5"):

    model.load_weights(weights_filepath)

    train_iterator = dataset.iterator(batch_size=batch_size,
                                      num_batches=nb_test_batches,
                                      flatten_y=False)

    batch_x, batch_y = train_iterator.next()

    results_dir = 'results'
    if not os.path.exists(results_dir):
        os.mkdir(results_dir)

    pred = model._predict(batch_x)
    pred = pred.reshape(batch_size, patch_size, 1, patch_size, patch_size)

    pred_as_b012c = pred.transpose(0, 3, 4, 1, 2)

    for i in range(batch_size):
        v, t = mcubes.marching_cubes(pred_as_b012c[i, :, :, :, 0], 0.5)
        mcubes.export_mesh(v, t, results_dir + '/drill_' + str(i) + '.dae',
                           'drill')
        viz.visualize_batch_x(pred, i, str(i), results_dir + "/pred_" + str(i))
        viz.visualize_batch_x(batch_x, i, str(i),
                              results_dir + "/input_" + str(i))
        viz.visualize_batch_x(batch_y, i, str(i),
                              results_dir + "/expected_" + str(i))
コード例 #2
0
def export_mesh(model, dataset, upsample, mcubes_threshold=0.005):
    res = 3 * (upsample * opt.res, )
    model.octant_size = model.octant_size * upsample

    print('Export: calculating occupancy...')
    mrc_fname = os.path.join(opt.logging_root, opt.experiment_name,
                             f"{opt.experiment_name}.mrc")
    occupancy = utils.write_occupancy_multiscale_summary(res,
                                                         dataset,
                                                         model,
                                                         None,
                                                         None,
                                                         None,
                                                         None,
                                                         None,
                                                         output_mrc=mrc_fname,
                                                         oversample=upsample,
                                                         mode='hq')

    print('Export: running marching cubes...')
    vertices, faces = mcubes.marching_cubes(occupancy, mcubes_threshold)

    print('Export: exporting mesh...')
    out_fname = os.path.join(opt.logging_root, opt.experiment_name,
                             f"{opt.experiment_name}.dae")
    mcubes.export_mesh(vertices, faces, out_fname)
コード例 #3
0
 def make_isomesh(self, val, name="", update=False):
     """makes a mesh based off the marching cubes algorithm, for given volume data
     
     Arguments:
         val {float} -- value between 0 and 1 that determines where the mesh is drawn. Mesh is
         an isosurface based off volumetric data. 0 takes the minimum value in the volume and
         tries to make a surface on that value, 1 takes the maximum. 
     
     Keyword Arguments:
         name {str} -- given name for isomesh (default: {""})
         update {bool} -- update isomesh or not? (default: {False})
     """
     if not name:
         name = self.name + '.dae'
     elif not name.endswith('.dae'):
         name = name + '.dae'
     ipath = self.check_file(name, update)
     if ipath is None:
         return
     print('making isosurface...')
     start = time.time()
     field_max = np.amax(self.field.field)
     field_min = np.amin(self.field.field)
     isoval = val * (field_max - field_min) + field_min
     vertices, triangles = mcubes.marching_cubes(self.field.field, isoval)
     mcubes.export_mesh(vertices, triangles, ipath, "Iso{}".format(val))
     end = time.time()
     print('mesh created, time elapsed = {}s'.format(end - start))
コード例 #4
0
def test(model, dataset, weights_filepath="weights_current.h5"):

    model.load_weights(weights_filepath)

    train_iterator = dataset.iterator(batch_size=batch_size,
                                          num_batches=nb_test_batches,
                                          flatten_y=False)

    batch_x, batch_y = train_iterator.next()

    results_dir = 'results'
    if not os.path.exists(results_dir):
        os.mkdir(results_dir)

    pred = model._predict(batch_x)
    pred = pred.reshape(batch_size, patch_size, 1, patch_size, patch_size)


    pred_as_b012c = pred.transpose(0, 3, 4, 1, 2)

    for i in range(batch_size):
        v, t = mcubes.marching_cubes(pred_as_b012c[i, :, :, :, 0], 0.5)
        mcubes.export_mesh(v, t, results_dir + '/drill_' + str(i) + '.dae', 'drill')
        viz.visualize_batch_x(pred, i, str(i), results_dir + "/pred_" + str(i))
        viz.visualize_batch_x(batch_x, i, str(i), results_dir + "/input_" + str(i))
        viz.visualize_batch_x(batch_y, i, str(i), results_dir + "/expected_" + str(i))
コード例 #5
0
    def on_tri_d_run_clicked(self):
        """
        Slot documentation goes here.
        """
        thickness = 1

        liver = self.liver
        liver_recon = np.zeros([thickness * liver.shape[0], 512, 512])
        for i in range(liver.shape[0]):
            for s in range(thickness):
                liver_recon[(s + 1) * i + s] = liver[i]

        vertices, triangles = mcubes.marching_cubes(liver_recon, 0)
        mcubes.export_mesh(vertices, triangles, "liver.dae", "liver")

        tumor = self.tumor
        tumor_recon = np.zeros([thickness * tumor.shape[0], 512, 512])
        for i in range(tumor.shape[0]):
            for s in range(thickness):
                tumor_recon[(s + 1) * i + s] = tumor[i]

        vertices_2, triangles_2 = mcubes.marching_cubes(tumor_recon, 0)
        mcubes.export_mesh(vertices_2, triangles_2, "tumor.dae", "tumor")

        QMessageBox.information(self, "Warning",
                                "3D model exported successfully")
コード例 #6
0
ファイル: model.py プロジェクト: zebrajack/implicit-decoder
	def test_interp(self, config):
		could_load, checkpoint_counter = self.load(self.checkpoint_dir)
		if could_load:
			print(" [*] Load SUCCESS")
		else:
			print(" [!] Load failed...")
			return
		
		interp_size = 8
		idx1 = 0
		idx2 = 2
		
		batch_voxels1 = self.data_voxels[idx1:idx1+1]
		batch_voxels2 = self.data_voxels[idx2:idx2+1]
		
		model_z1 = self.sess.run(self.sE,
			feed_dict={
				self.vox3d: batch_voxels1,
			})
		model_z2 = self.sess.run(self.sE,
			feed_dict={
				self.vox3d: batch_voxels2,
			})
		
		batch_z = np.zeros([interp_size,self.z_dim], np.float32)
		for i in range(interp_size):
			batch_z[i] = model_z2*i/(interp_size-1) + model_z1*(interp_size-1-i)/(interp_size-1)
		

		dima = self.test_size
		dim = self.real_size
		multiplier = int(dim/dima)
		multiplier2 = multiplier*multiplier
		
		for t in range(interp_size):
			model_float = np.zeros([self.real_size+2,self.real_size+2,self.real_size+2],np.float32)
			for i in range(multiplier):
				for j in range(multiplier):
					for k in range(multiplier):
						minib = i*multiplier2+j*multiplier+k
						model_out = self.sess.run(self.zG,
							feed_dict={
								self.z_vector: batch_z[t:t+1],
								self.point_coord: self.coords[minib],
							})
						model_float[self.aux_x+i+1,self.aux_y+j+1,self.aux_z+k+1] = np.reshape(model_out, [self.test_size,self.test_size,self.test_size])
			img1 = np.clip(np.amax(model_float, axis=0)*256, 0,255).astype(np.uint8)
			img2 = np.clip(np.amax(model_float, axis=1)*256, 0,255).astype(np.uint8)
			img3 = np.clip(np.amax(model_float, axis=2)*256, 0,255).astype(np.uint8)
			cv2.imwrite(config.sample_dir+"/"+str(t)+"_1t.png",img1)
			cv2.imwrite(config.sample_dir+"/"+str(t)+"_2t.png",img2)
			cv2.imwrite(config.sample_dir+"/"+str(t)+"_3t.png",img3)
			
			thres = 0.5
			vertices, triangles = mcubes.marching_cubes(model_float, thres)
			mcubes.export_mesh(vertices, triangles, config.sample_dir+"/"+"out"+str(t)+".dae", str(t))
			
			print("[sample interpolation]")
コード例 #7
0
ファイル: model.py プロジェクト: zebrajack/implicit-decoder
	def test_z(self, config, batch_z, dim):
		could_load, checkpoint_counter = self.load(self.checkpoint_dir)
		if could_load:
			print(" [*] Load SUCCESS")
		else:
			print(" [!] Load failed...")
			return
		
		dima = self.test_size
		multiplier = int(dim/dima)
		multiplier2 = multiplier*multiplier
		multiplier3 = multiplier*multiplier*multiplier
		
		#get coords 256
		aux_x = np.zeros([dima,dima,dima],np.int32)
		aux_y = np.zeros([dima,dima,dima],np.int32)
		aux_z = np.zeros([dima,dima,dima],np.int32)
		for i in range(dima):
			for j in range(dima):
				for k in range(dima):
					aux_x[i,j,k] = i*multiplier
					aux_y[i,j,k] = j*multiplier
					aux_z[i,j,k] = k*multiplier
		coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
		for i in range(multiplier):
			for j in range(multiplier):
				for k in range(multiplier):
					coords[i*multiplier2+j*multiplier+k,:,:,:,0] = aux_x+i
					coords[i*multiplier2+j*multiplier+k,:,:,:,1] = aux_y+j
					coords[i*multiplier2+j*multiplier+k,:,:,:,2] = aux_z+k
		coords = (coords+0.5)/dim*2.0-1.0
		coords = np.reshape(coords,[multiplier3,self.batch_size,3])
		
		for t in range(batch_z.shape[0]):
			model_float = np.zeros([dim+2,dim+2,dim+2],np.float32)
			for i in range(multiplier):
				for j in range(multiplier):
					for k in range(multiplier):
						print(t,i,j,k)
						minib = i*multiplier2+j*multiplier+k
						model_out = self.sess.run(self.zG,
							feed_dict={
								self.z_vector: batch_z[t:t+1],
								self.point_coord: coords[minib],
							})
						model_float[aux_x+i+1,aux_y+j+1,aux_z+k+1] = np.reshape(model_out, [dima,dima,dima])
			img1 = np.clip(np.amax(model_float, axis=0)*256, 0,255).astype(np.uint8)
			img2 = np.clip(np.amax(model_float, axis=1)*256, 0,255).astype(np.uint8)
			img3 = np.clip(np.amax(model_float, axis=2)*256, 0,255).astype(np.uint8)
			cv2.imwrite(config.sample_dir+"/"+str(t)+"_1t.png",img1)
			cv2.imwrite(config.sample_dir+"/"+str(t)+"_2t.png",img2)
			cv2.imwrite(config.sample_dir+"/"+str(t)+"_3t.png",img3)
			
			thres = 0.5
			vertices, triangles = mcubes.marching_cubes(model_float, thres)
			mcubes.export_mesh(vertices, triangles, config.sample_dir+"/"+"out"+str(t)+".dae", str(t))
			
			print("[sample GAN]")
コード例 #8
0
ファイル: test_mcubes.py プロジェクト: zhDai/PyMCubes
def test_export():

    u = np.zeros((10, 10, 10))
    u[2:-2, 2:-2, 2:-2] = 1.0
    vertices, triangles = mcubes.marching_cubes(u, 0.5)

    mcubes.export_obj(vertices, triangles, "output/test.obj")
    mcubes.export_off(vertices, triangles, "output/test.off")
    mcubes.export_mesh(vertices, triangles, "output/test.dae")
コード例 #9
0
ファイル: test01.py プロジェクト: jonathanlurie/pythonStuff
def sphere2():
    # Create the volume
    f = lambda x, y, z: x**2 + y**2 + z**2

    # Extract the 16-isosurface
    vertices, triangles = mcubes.marching_cubes_func((-10,-10,-10), (10,10,10),100, 100, 100, f, 16)

    # Export the result to sphere2.dae
    mcubes.export_mesh(vertices, triangles, "sphere2.dae", "MySphere")
    def plotFromVoxels(voxels, title=''):
        #        print('plotfromvoxel')
        voxel2obj(title + 'voxels.obj', voxels)
        if len(voxels.shape) > 3:
            x_d = voxels.shape[0]
            y_d = voxels.shape[1]
            z_d = voxels.shape[2]
            v = voxels[:, :, :, 0]

            v = np.reshape(v, (x_d, y_d, z_d))
        else:
            v = voxels

        print(
            "voxels_plot", v.shape
        )  ###############################################  (32, 32, 32)  ##################################

        u = voxels
        vertices, triangles = mcubes.marching_cubes(u, 0)
        mcubes.export_mesh(vertices, triangles, title + "recon.dae",
                           "MySphere")
        export_obj(vertices, triangles, title + 'recon.obj')

        x, y, z = v.nonzero()
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.scatter(x, y, z, zdir='z', c='red')
        ax.set_xlabel('X')
        ax.set_ylabel('y')
        ax.set_zlabel('z')
        ax.set_aspect('equal')
        ax.view_init(-90, 90)

        max_range = np.array(
            [x.max() - x.min(),
             y.max() - y.min(),
             z.max() - z.min()]).max()
        print("max_range", max_range)

        Xb = 0.5 * max_range * np.mgrid[
            -1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (x.max() + x.min())
        Yb = 0.5 * max_range * np.mgrid[
            -1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (y.max() + y.min())
        Zb = 0.5 * max_range * np.mgrid[
            -1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (z.max() + z.min())
        # Comment or uncomment following both lines to test the fake bounding box:
        for xb, yb, zb in zip(Xb, Yb, Zb):
            ax.plot([xb], [yb], [zb], 'w')
        plt.grid()

        plt.show()
        plt.title(title)
        from matplotlib.pyplot import show
        show(block=False)
コード例 #11
0
ファイル: Script.py プロジェクト: emscape2/GitCasEmiel
def evaluate(dimX,dimY,dimZ):
    vertices, triangles = mcubes.marching_cubes_func(
            (boundingArea['minX'],boundingArea['minY'],boundingArea['minZ']), 
            (boundingArea['maxX'], boundingArea['maxY'], boundingArea['maxZ']),  # Bounds
            dimX, dimY, dimZ,                                                    # Number of samples in each dimension
            implicit,                                                            # Implicit function
            0)                                                                   # Isosurface value                                                                

    # Export the result to sphere2.dae
    mcubes.export_mesh(vertices, triangles, "/Users/Emscape/Documents/Blender Projects/result.dae", "MLS result")
    print("Done. Result saved in 'result.dae'.")
コード例 #12
0
ファイル: meshexport.py プロジェクト: StephenLujan/meshtoy
def export(vertices, triangles, object_name="MyShape", file_name="my_shape"):
    t = time.time()
    logging.info("exporting dae...")
    # get untaken file_name
    full_path = file_name + ".dae"
    iterator = 2
    while (os.path.isfile(full_path)):
        full_path = file_name + str(iterator) + ".dae"
        iterator += 1

    mcubes.export_mesh(vertices, triangles, full_path, object_name)
    logging.info("Done in %f seconds. Result saved in '%s'" % (time.time() - t, full_path))
コード例 #13
0
    def test(self, config):
        could_load, checkpoint_counter = self.load(self.checkpoint_dir)
        if could_load:
            print(" [*] Load SUCCESS")
        else:
            print(" [!] Load failed...")
            return

        dima = self.test_size
        dim = self.real_size
        multiplier = int(dim / dima)
        multiplier2 = multiplier * multiplier

        for t in range(self.data_voxels.shape[0]):
            model_float = np.zeros(
                [self.real_size + 2, self.real_size + 2, self.real_size + 2],
                np.float32)
            for i in range(multiplier):
                for j in range(multiplier):
                    for k in range(multiplier):
                        minib = i * multiplier2 + j * multiplier + k
                        # batch_voxels = self.data_voxels[t:t+1]
                        batch_z_vector = self.z_vectors[t:t + 1]
                        model_out = self.sess.run(self.zG,
                                                  feed_dict={
                                                      self.z_vector:
                                                      batch_z_vector,
                                                      self.point_coord:
                                                      self.coords[minib],
                                                  })
                        model_float[self.aux_x + i + 1, self.aux_y + j + 1,
                                    self.aux_z + k + 1] = np.reshape(
                                        model_out, [
                                            self.test_size, self.test_size,
                                            self.test_size
                                        ])
            img1 = np.clip(np.amax(model_float, axis=0) * 256, 0,
                           255).astype(np.uint8)
            img2 = np.clip(np.amax(model_float, axis=1) * 256, 0,
                           255).astype(np.uint8)
            img3 = np.clip(np.amax(model_float, axis=2) * 256, 0,
                           255).astype(np.uint8)
            cv2.imwrite(config.sample_dir + "/" + str(t) + "_1t.png", img1)
            cv2.imwrite(config.sample_dir + "/" + str(t) + "_2t.png", img2)
            cv2.imwrite(config.sample_dir + "/" + str(t) + "_3t.png", img3)

            thres = 0.5
            vertices, triangles = mcubes.marching_cubes(model_float, thres)
            mcubes.export_mesh(
                vertices, triangles,
                config.sample_dir + "/" + "out" + str(t) + ".dae", str(t))

            print("[sample]")
コード例 #14
0
def ros_mesh_msg_to_daefile(mesh, dae_filepath):
    import mcubes

    vs = []
    for vert in mesh.vertices:
        vs.append((vert.x, vert.y, vert.z))
    vertices = np.array(vs)

    ts = []
    for tri in mesh.triangles:
        ts.append(tri.vertex_indices)
    triangles = np.array(ts)

    mcubes.export_mesh(vertices, triangles, dae_filepath, "model")
コード例 #15
0
ファイル: test01.py プロジェクト: jonathanlurie/pythonStuff
def torus():

    size = 100
    X, Y, Z = np.mgrid[:size, :size, :size]

    r = size / 8
    R = r * 2
    u = ( (X-size/2)**2 + (Y-size/2)**2 + (Z-size/2)**2 + R**2 -r**2)**2 - 4*(R**2)*((X-size/2)**2 + (Y-size/2)**2)


    # Extract the 0-isosurface
    vertices, triangles = mcubes.marching_cubes(u, 0)

    # Export the result to sphere.dae
    mcubes.export_mesh(vertices, triangles, "torus1.dae", "MyTorus")
コード例 #16
0
 def __init__(self,segmentation, mtype="DAE"):
     self.segmentation = segmentation
     self.image = segmentation.dicomimage
     threshold=segmentation.getThreshold()
     if mtype.lower() == "collada" or mtype.lower() == "dae":
         self.extention=".dae"
         self.name = segmentation.name +"_" + mtype+ "_Tresh-" + threshold
         self.vertices, self.triangles = mcubes.marching_cubes(self.segmentation.image_Threshold, 0)
         mcubes.export_mesh(self.vertices, self.triangles, GlobalData.ModelPath + "/"  +self.name + self.extention, self.name)
     if mtype.lower() == "stl":
         self.extention=".stl"
         self.name = segmentation.name + "_" + mtype +"_Tresh-" + threshold
         self.vertices, self.triangles = mcubes.marching_cubes(self.segmentation.image_Threshold, 0)
         mcubes.export_mesh(self.vertices, self.triangles, GlobalData.ModelPath + "/"  +self.name + self.extention, self.name)
     daepath = GlobalData.ModelPath + "/" + self.name
     self.name = daepath  
コード例 #17
0
def marching_cube(resolution, bounding_box, number_fo_anim):
    u = np.load('leabels.npy')
    print(u.shape)

    # Extract the 0-isosurface
    vertices, triangles = mcubes.marching_cubes(u, 0.1)
    print(np.max(vertices[:, 2]))

    print(bounding_box, resolution)
    for i in range(vertices.shape[0]):
        for j in range(3):
            vertices[i, j] = vertices[i, j] / resolution * (bounding_box[
                2 * j + 1] - bounding_box[2 * j]) + bounding_box[2 * j]
    print(np.max(vertices[:, 1]))
    mcubes.export_mesh(vertices, triangles,
                       "sphere_" + str(number_fo_anim) + ".dae", "MySphere")
    return vertices
    def execute_cb(self, goal):
        rospy.loginfo("received new goal")
        points = []

        gen = pc2.read_points(goal.partial_cloud,
                              skip_nans=True,
                              field_names=("x", "y", "z"))
        for p in gen:
            points.append(p)

        pc = np.array(points)
        patch_size = 120
        vox_resolution = get_voxel_resolution(pc, patch_size)

        center = get_center(pc)

        pc_center_in_voxel_grid = (patch_size * PERCENT_X,
                                   patch_size * PERCENT_Y,
                                   patch_size * PERCENT_Z)

        voxel_grid = create_voxel_grid_around_point_scaled(
            pc, center, vox_resolution, patch_size, pc_center_in_voxel_grid)

        rospy.loginfo("about to run mcubes")
        v, t = mcubes.marching_cubes(voxel_grid[:, :, :, 0], 0.5)
        v = rescale_mesh(v, center, vox_resolution, pc_center_in_voxel_grid)

        unsmoothed_handle, unsmoothed_filename = tempfile.mkstemp(
            suffix=".dae")
        smoothed_handle, smoothed_filename = tempfile.mkstemp(suffix=".ply")
        mcubes.export_mesh(v, t, unsmoothed_filename, "model")
        cmd_str = "meshlabserver -i " + unsmoothed_filename + " -o " + smoothed_filename + " -s " + str(
            self.mlx_script_filepath)
        subprocess.call(cmd_str.split())

        mesh = mesh_conversions.read_mesh_msg_from_ply_filepath(
            smoothed_filename)

        if os.path.exists(unsmoothed_filename):
            os.remove(unsmoothed_filename)
        if os.path.exists(smoothed_filename):
            os.remove(smoothed_filename)

        self._result.mesh = mesh
        rospy.loginfo('Succeeded')
        self._as.set_succeeded(self._result)
コード例 #19
0
ファイル: mesh.py プロジェクト: jhuapl-boss/ndio
def export_dae(filename, cutout, level=0):
    """
    Converts a dense annotation to a DAE, using Marching Cubes (PyMCubes).

    Arguments:
        filename (str): The filename to write out to
        cutout (numpy.ndarray): The dense annotation
        level (int): The level at which to run mcubes

    Returns:
        boolean success
    """
    if ".dae" not in filename:
        filename = filename + ".dae"

    vs, fs = mcubes.marching_cubes(cutout, level)
    mcubes.export_mesh(vs, fs, filename, "ndioexport")
コード例 #20
0
ファイル: test01.py プロジェクト: jonathanlurie/pythonStuff
def plan():
    # Create a data volume (30 x 30 x 30)
    X, Y, Z = np.mgrid[:50, :50, :50]
    u = (3*X) + (2*Y) + (1*Z)

    # Extract the 0-isosurface
    vertices, triangles = mcubes.marching_cubes(u, 0)

    print vertices.shape

    print triangles.shape
    #for t in vertices:
    #    print t



    # Export the result to sphere.dae
    mcubes.export_mesh(vertices, triangles, "plan.dae", "MyPlane")
コード例 #21
0
ファイル: test01.py プロジェクト: jonathanlurie/pythonStuff
def sphere1():
    # Create a data volume (30 x 30 x 30)
    X, Y, Z = np.mgrid[:50, :50, :50]
    u = (X-25)**2 + (Y-25)**2 + (Z-25)**2 - 20**2

    # Extract the 0-isosurface
    vertices, triangles = mcubes.marching_cubes(u, 0)

    print vertices.shape

    print triangles.shape
    #for t in vertices:
    #    print t



    # Export the result to sphere.dae
    mcubes.export_mesh(vertices, triangles, "sphere3.dae", "MySphere")
コード例 #22
0
def remesh_binvox_model(model_dir, model_name, results_dir):
    with open(model_dir + model_name + '.binvox', 'rb') as f:
        model = binvox_rw.read_as_3d_array(f)

    points = model.data
    binvox_scale = model.scale
    binvox_offset = model.translate
    dims = model.dims

    binvox_offset = np.array(binvox_offset).reshape(1, 3)
    num_voxels_per_dim = max(dims)

    voxel_grid = np.zeros((num_voxels_per_dim + 2, num_voxels_per_dim + 2,
                           num_voxels_per_dim + 2))

    voxel_grid[1:num_voxels_per_dim + 1, 1:num_voxels_per_dim + 1,
               1:num_voxels_per_dim + 1] = points

    v, t = mcubes.marching_cubes(voxel_grid, 0.5)
    v = v * binvox_scale / num_voxels_per_dim + binvox_offset
    mcubes.export_mesh(v, t, results_dir + model_name + '.dae', model_name)
コード例 #23
0
def test(model, dataset, weights_filepath=BEST_WEIGHT_FILE):

    abs_weights_filepath = '/home/jvarley/3d_conv/keras/examples/' + weights_filepath

    model.load_weights(abs_weights_filepath)

    train_iterator = dataset.iterator(batch_size=batch_size,
                                      num_batches=nb_test_batches,
                                      flatten_y=False)

    batch_x, batch_y = train_iterator.next()

    results_dir = DATA_DIR + 'results'
    if not os.path.exists(results_dir):
        os.makedirs(results_dir)

    pred = model._predict(batch_x)
    pred = pred.reshape(batch_size, patch_size, 1, patch_size, patch_size)

    pred_as_b012c = pred.transpose(0, 3, 4, 1, 2)

    for i in range(batch_size):
        v, t = mcubes.marching_cubes(pred_as_b012c[i, :, :, :, 0], 0.5)
        mcubes.export_mesh(v, t, results_dir + '/toilet_' + str(i) + '.dae', 'drill')
        viz.visualize_batch_x(pred, i, str(i), results_dir + "/pred_" + str(i))
        viz.visualize_batch_x(batch_x, i, str(i), results_dir + "/input_" + str(i))
        viz.visualize_batch_x(batch_y, i, str(i), results_dir + "/expected_" + str(i))

    # for i in range(batch_size):
    #     viz.visualize_batch_x_y_overlay(batch_x, batch_y, pred, i=i,  title=str(i))
    #     viz.visualize_batch_x(pred, i, 'pred_' + str(i), )
    #     viz.visualize_batch_x(batch_x, i,'batch_x_' + str(i), )
    #     viz.visualize_batch_x(batch_y, i, 'batch_y_' + str(i), )


    import IPython
    IPython.embed()
コード例 #24
0
ファイル: test_meshing.py プロジェクト: towardthesea/PointSDF
#!/usr/bin/env python

import numpy as np
import mcubes

from generate_voxels import voxelize_mesh

# Shows simple marching cubes using PyMCubes

mesh_file = '/home/markvandermerwe/YCB/ycb_meshes/002_master_chef_can.stl'

voxels = voxelize_mesh(mesh_file, np.eye(4), 0.1, True)

vertices, triangles = mcubes.marching_cubes(voxels, 0)
mcubes.export_mesh(vertices, triangles, 'test.dae', 'test')
コード例 #25
0
ファイル: CreateVolumetricGrid.py プロジェクト: Jaswir/3DM
def main(context):

#1. Dimension of bounding box of object (USED TO SAMPLE 3D GRID)  #
##################################################    #


    #1. HERE THE BOUNDARIES OF THE OBJECT ARE COMPUTED ---
    name = context.active_object.data.name


    xDimensionsHalve = bpy.data.objects[name].dimensions.x/2  
    xMin = 0 - xDimensionsHalve
    xMax = 0 + xDimensionsHalve

    yDimensionsHalve = bpy.data.objects[name].dimensions.y/2
    yMin = 0 - yDimensionsHalve
    yMax = 0 + yDimensionsHalve

    zDimensionsHalve = bpy.data.objects[name].dimensions.z/2
    zMin = 0 - zDimensionsHalve
    zMax = 0 + zDimensionsHalve



#2. COMPUTE MLS FUNCTION ##########################   #
##################################################    #

    #0. Define Help functions:
    #
    #Wendland weight function
    def Wendland ( r , h ):
         return (1 - r/h)**4*(4*r/h+1)

    #b^T for degree 0, 1 or 2.
    def ChooseB (degree, point):
        x = point[0]
        y = point[1]
        z = point[2]
        if degree == 0:
             A = np.matrix([[1]])        
        elif degree == 1:
             A = np.matrix([[1],  [x], [y], [z], [x*y], [x*z], [y*z], [x*y*z]])
        elif degree == 2:
             A = np.matrix([[1], [x], [y], [z], [x**2], [y**2], [z**2], [x*y], [x*z], [y*z], [x**2*y], [x**2*z], [x*y**2], [x*z**2], [y**2*z], [y*z**2], [x*y*z], [x**2*y*z], [x*y**2*z], [x*y*z**2], [x**2*y**2], [x**2*z**2], [y**2*z**2], [x**2*y**2*z], [x**2*y*z**2], [x*y**2*z**2], [x**2*y**2*z**2]])
        return np.transpose(A)

    #Checks whether a point P is within a range of the given point C
    #which in 3D means is within the sphere with C as its center and the specified
    #, range as its radius
    def insideSphere(C, R, P):
        return ( P[0]- C[0] ) ** 2 + (P[1]- C[1]) ** 2 + (P[2]-C[2]) ** 2 < R**2 

    #Gets the appropriate length that matches to a degree k
    def matchingLengthofDegreeK(k):
        if k == 0:
             l = 1
        elif k == 1:
             l = 8
        elif k == 2:
             l = 27
        return l  

    #Can be used for visualization purposes
    def makeMaterial(name, diffuse, specular, alpha):
        mat = bpy.data.materials.new(name)
        mat.diffuse_color = diffuse
        mat.diffuse_shader = 'LAMBERT'
        mat.diffuse_intensity = 1.0
        return mat

    def setMaterial(ob, mat):
        me = ob.data
        me.materials.append(mat)

    #1. SETUP EPSILON, POINTCLOUD POINTS (FIRST n POINTS), POINTCLOUD POINT NORMALS and N (NECESSARY VARIABLES FOR COMPUTING CONSTRAINT POINTS), POINTCLOUD CENTER --- 
    #
    #Computes N (amount of points in pointCloud)
    PointCloud_points = []
    PointCloud_points  = context.active_object.data.vertices
    N = len(PointCloud_points )

    #Computes Normals of point cloud points
    PointCloud_point_normals = []
    for n in context.active_object['vertex_normal_list']:
        PointCloud_point_normals.append(np.array([n[0], n[1], n[2]]))


    #Fixes an ε value, for instance ε = 0.01 times the diagonal of the bounding box to the object. EXPERIMENTABLE PARAMETER
    v1 =  np.array((xMin, yMin,zMax))
    v2 =  np.array((xMax, yMax,zMin))
    diagonalOfBoundingBox = np.linalg.norm(v1-v2)
    epsilon = 0.01 * diagonalOfBoundingBox

    #2. IMPLEMENT SPATIAL INDEX: KD-TREE ---
    #
    #Computes a Spatial Index: KD-TREE from the PointCloud_points
    #(for faster nearest neighborhood calculations)
    kd = mathutils.kdtree.KDTree(N)
    for index, point in enumerate(PointCloud_points):
        kd.insert(point.co, index)
        
    #Must have been called before using any of the kd.find methods.
    kd.balance()


    #Compute the origin of the axis aligned bounding box
    origin = mathutils.Vector((0,0,0))
    for index, point in enumerate(PointCloud_points):
         origin += point.co

    origin /= N 
    polyDegree = 0
    def MLS(x , y ,z):
                 P = np.array([x ,  y,  z])
                 #Get closest points Pi within wendland radius from P. EXPERIMENTABLE PARAMETER
                 wendlandRadius = diagonalOfBoundingBox/20
                 pointsWithinRange = kd.find_range(P, wendlandRadius)
                 numpyPointsWithinRange = []
                 redPoints = []
                 redPointsEpsilonValues = []
                 greenPoints = []
                 greenPointsEpsilonValues = []
                 for (co,index, dist) in pointsWithinRange:
                     Ni = PointCloud_point_normals[index]
                     pos = co
                     PiNumpy = np.array((pos[0], pos[1], pos[2]))
                     epsilonBackup = epsilon
                     result = PiNumpy + epsilonBackup*Ni  
                     #Range to look for closest points from Pi+N. EXPERIMENTABLE PARAMETER              
                     Range = diagonalOfBoundingBox/20
                     Pi_Not_Closest = True
                     #Change datatype of Pi, to make computations easier
                     Pi = pos 
                     #Check whether Pi is the closest point to Pi+N
                     while Pi_Not_Closest :
                         #Calculates distances between Pi+N = result and the points that are within a range distance from Pi+N
                         dist, closestPos = min([(np.linalg.norm(result - co), co) for (co,index, dist) in kd.find_range(result, Range)])
  
                         #If Pi is not the closest point to Pi+N, 
                         # divide ε by 2 and recompute pi+N until this is the case
                         if closestPos != Pi:
                             epsilonBackup /= 2
                             result = Pi + epsilonBackup * Ni
                         else:
                             Pi_Not_Closest = False
                             break    

                     #check whether the green and red point are within wendlandRadius of P  
                     if insideSphere(C = P, R = wendlandRadius, P = result - 2*(epsilonBackup * Ni)):
                         greenPoints.append(result - 2*(epsilonBackup * Ni))
                         greenPointsEpsilonValues.append(- epsilonBackup)

                     if insideSphere(C = P, R = wendlandRadius ,P = result):                      
                         redPoints.append((result))
                         redPointsEpsilonValues.append(epsilonBackup)
                     numpyPointsWithinRange.append(PiNumpy)

                 #Create lists of the Pi's and Di's belonging to P
                 PointsPi = numpyPointsWithinRange + redPoints + greenPoints
                 PointsDi = [0]*len(pointsWithinRange) + redPointsEpsilonValues + greenPointsEpsilonValues

                 #sqrt(ti) = sqrt(theta(|P-Pi|)) (see MLS reference sheet: http://www.cs.uu.nl/docs/vakken/ddm/MLS%20Reference%20Sheet.pdf)
                 sqrtTiValues = [] 
                 #smoothing value for wendland weight function. EXPERIMENTABLE PARAMETER
                 h = 1
                 for Pi in PointsPi: 
                     value = np.sqrt(Wendland(np.linalg.norm(P-Pi), h))        
                     sqrtTiValues.append(value)
                 # A is a one-column matrix filled by sqrtTi multiplied with b^T
                 A = np.empty((0,matchingLengthofDegreeK(polyDegree)))
                 for index, sqrtTi in enumerate(sqrtTiValues):
                     #Here we choose B for a degree 0, 1 or 2. EXPERIMENTABLE PARAMETER
                     bT = ChooseB(polyDegree, PointsPi[index])
                     value = sqrtTi*bT
                     A = np.insert(A, index, value, 0)

                    
                 # r = sqrtTi_x_Di
                 r = np.empty((0, len(sqrtTiValues)))
                 for index, sqrtTi in enumerate(sqrtTiValues):
                     r = np.insert(r, index, sqrtTi * PointsDi[index])
                 

                 if(len(A) != 0):
                     A_T = np.transpose(A)
                     A_T_x_A = np.dot(A_T, A)        
                     A_T_x_r = np.dot(A_T, r)
                     #a = (A^T*A)^-1 * A^T*r
                     a = np.dot(np.linalg.inv(A_T_x_A), A_T_x_r)
                     #Finally add a to the samples    
                     return np.dot(ChooseB(polyDegree, [x,y,z]), a)

                 else:
                     return 10000

    def f(x, y, z):
        return MLS(x, y, z)


    #4. INPUTS SAMPLED GRID TO MARCHING CUBES PLUGIN ---
    #
    lowerLeft = origin - mathutils.Vector((diagonalOfBoundingBox/2*0.9, diagonalOfBoundingBox/2*0.9, diagonalOfBoundingBox/2*0.9))
    upperRight = origin + mathutils.Vector((diagonalOfBoundingBox/2*1.1, diagonalOfBoundingBox/2*1.1, diagonalOfBoundingBox/2*1.1))

    vertices, triangles = mcubes.marching_cubes_func((lowerLeft[0], lowerLeft[1], lowerLeft[2]),(upperRight[0], upperRight[1], upperRight[2]), 100, 100, 100, f, 0)

    # Export the result
    mcubes.export_mesh(vertices, triangles, "C:\\Users\\jaswir\\Documents\\GameTechnology\\3DM\\3DM_Practical1\\DAE_Files\\Bunnyk1.dae", "Bunny_k1")
コード例 #26
0
ファイル: spheres.py プロジェクト: gphani123/PyMCubes
import numpy as np
import mcubes

print("Example 1: Isosurface in NumPy volume...")

# Create a data volume (30 x 30 x 30)
X, Y, Z = np.mgrid[:100, :100, :100]
u = (X-50)**2 + (Y-50)**2 + (Z-50)**2 - 25**2

# Extract the 0-isosurface
vertices1, triangles1 = mcubes.marching_cubes(u, 0)

# Export the result to sphere.dae
mcubes.export_mesh(vertices1, triangles1, "sphere1.dae", "MySphere")

print("Done. Result saved in 'sphere1.dae'.")

print("Example 2: Isosurface in Python function...")
print("(this might take a while...)")

# Create the volume
def f(x, y, z):
    return x**2 + y**2 + z**2

# Extract the 16-isosurface
vertices2, triangles2 = mcubes.marching_cubes_func(
        (-10,-10,-10), (10,10,10),  # Bounds
        100, 100, 100,              # Number of samples in each dimension
        f,                          # Implicit function
        16)                         # Isosurface value
コード例 #27
0
ファイル: model.py プロジェクト: zebrajack/implicit-decoder
	def test_z_pc(self, config, batch_z, dim):
		could_load, checkpoint_counter = self.load(self.checkpoint_dir)
		if could_load:
			print(" [*] Load SUCCESS")
		else:
			print(" [!] Load failed...")
			return
		
		dima = self.test_size
		multiplier = int(dim/dima)
		multiplier2 = multiplier*multiplier
		multiplier3 = multiplier*multiplier*multiplier
		
		#get coords 256
		aux_x = np.zeros([dima,dima,dima],np.int32)
		aux_y = np.zeros([dima,dima,dima],np.int32)
		aux_z = np.zeros([dima,dima,dima],np.int32)
		for i in range(dima):
			for j in range(dima):
				for k in range(dima):
					aux_x[i,j,k] = i*multiplier
					aux_y[i,j,k] = j*multiplier
					aux_z[i,j,k] = k*multiplier
		coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
		for i in range(multiplier):
			for j in range(multiplier):
				for k in range(multiplier):
					coords[i*multiplier2+j*multiplier+k,:,:,:,0] = aux_x+i
					coords[i*multiplier2+j*multiplier+k,:,:,:,1] = aux_y+j
					coords[i*multiplier2+j*multiplier+k,:,:,:,2] = aux_z+k
		coords = (coords+0.5)/dim*2.0-1.0
		coords = np.reshape(coords,[multiplier3,self.batch_size,3])
		
		n_pc_points = 2048
		thres = 0.5
		hdf5_file = h5py.File(self.dataset_name + "_im_gan_sample.hdf5", 'w')
		hdf5_file.create_dataset("points", [batch_z.shape[0],n_pc_points,3], np.float32)
		
		for t in range(batch_z.shape[0]):
			print(t)
			model_float = np.zeros([dim+2,dim+2,dim+2],np.float32)
			for i in range(multiplier):
				for j in range(multiplier):
					for k in range(multiplier):
						minib = i*multiplier2+j*multiplier+k
						model_out = self.sess.run(self.zG,
							feed_dict={
								self.z_vector: batch_z[t:t+1],
								self.point_coord: coords[minib],
							})
						model_float[aux_x+i+1,aux_y+j+1,aux_z+k+1] = np.reshape(model_out, [dima,dima,dima])

			vertices, triangles = mcubes.marching_cubes(model_float, thres)
			mcubes.export_mesh(vertices, triangles, config.sample_dir+"/"+"out"+str(t)+".dae", str(t))
			np.random.shuffle(vertices)
			vertices = (vertices - dim/2 - 0.5)/dim
			
			vertices_out = np.zeros([n_pc_points,3], np.float32)
			vertices_len = vertices.shape[0]
			for i in range(n_pc_points):
				vertices_out[i] = vertices[i%vertices_len]
			
			hdf5_file["points"][t,:,:] = vertices_out
			
		hdf5_file.close()
コード例 #28
0
def test(model, dataset, epoch=-1):

    train_iterator = dataset.train_iterator(batch_size=args.BATCH_SIZE,
                                            flatten_y=False)

    holdout_view_iterator = dataset.holdout_view_iterator(batch_size=args.BATCH_SIZE,
                                                          flatten_y=False)

    holdout_model_iterator = dataset.holdout_model_iterator(batch_size=args.BATCH_SIZE,
                                                            flatten_y=False)

    if epoch == -1:
        base_dir = 'final/'
    else:
        base_dir = 'epoch_' + str(epoch) + '/'

    sub_dir = base_dir + 'trained_views/'
    os.makedirs(args.TEST_OUTPUT_DIR + sub_dir)

    batch_x, batch_y = train_iterator.next()

    pred = model._predict(batch_x)
    # Prediction comes in format [batch number, z-axis, patch number, x-axis,
    #                             y-axis].
    pred = pred.reshape(args.BATCH_SIZE, args.PATCH_SIZE, 1, args.PATCH_SIZE, args.PATCH_SIZE)
    # Convert prediction to format [batch number, x-axis, y-axis, z-axis,
    #                               patch number].
    pred_as_b012c = pred.transpose(0, 3, 4, 1, 2)

    for i in range(args.BATCH_SIZE):

        v, t = mcubes.marching_cubes(pred_as_b012c[i, :, :, :, 0], 0.5)
        # Save predicted object mesh.
        mcubes.export_mesh(v, t, args.TEST_OUTPUT_DIR + sub_dir + 'model_' + str(i) + '.dae',
                           'model')

        # Save visualizations of the predicted, input, and expected occupancy
        # grids.
        viz.visualize_batch_x(pred, i, str(i),
                              args.TEST_OUTPUT_DIR + sub_dir + "pred_" + str(i))
        viz.visualize_batch_x(batch_x, i, str(i),
                              args.TEST_OUTPUT_DIR + sub_dir + "input_" + str(i))
        viz.visualize_batch_x(batch_y, i, str(i),
                              args.TEST_OUTPUT_DIR + sub_dir + "expected_" + str(i))

    sub_dir = base_dir + 'holdout_views/'
    os.makedirs(args.TEST_OUTPUT_DIR + sub_dir)

    batch_x, batch_y = holdout_view_iterator.next()
    pred = model._predict(batch_x)
    pred = pred.reshape(args.BATCH_SIZE, args.PATCH_SIZE, 1, args.PATCH_SIZE, args.PATCH_SIZE)
    pred_as_b012c = pred.transpose(0, 3, 4, 1, 2)

    for i in range(args.BATCH_SIZE):
        v, t = mcubes.marching_cubes(pred_as_b012c[i, :, :, :, 0], 0.5)
        mcubes.export_mesh(v, t, args.TEST_OUTPUT_DIR + sub_dir + 'model_' + str(i) + '.dae',
                           'model')
        # Save visualizations of the predicted, input, and expected occupancy
        # grids.
        viz.visualize_batch_x(pred, i, str(i),
                              args.TEST_OUTPUT_DIR + sub_dir + "pred_" + str(i))
        viz.visualize_batch_x(batch_x, i, str(i),
                              args.TEST_OUTPUT_DIR + sub_dir + "input_" + str(i))
        viz.visualize_batch_x(batch_y, i, str(i),
                              args.TEST_OUTPUT_DIR + sub_dir + "expected_" + str(i))

    sub_dir = base_dir + 'holdout_models/'
    os.makedirs(args.TEST_OUTPUT_DIR + sub_dir)

    batch_x, batch_y = holdout_model_iterator.next()

    pred = model._predict(batch_x)
    pred = pred.reshape(args.BATCH_SIZE, args.PATCH_SIZE, 1, args.PATCH_SIZE, args.PATCH_SIZE)
    pred_as_b012c = pred.transpose(0, 3, 4, 1, 2)

    for i in range(args.BATCH_SIZE):
        v, t = mcubes.marching_cubes(pred_as_b012c[i, :, :, :, 0], 0.5)
        mcubes.export_mesh(v, t, args.TEST_OUTPUT_DIR + sub_dir + 'model_' + str(i) + '.dae',
                           'model')
        # Save visualizations of the predicted, input, and expected occupancy
        # grids.
        viz.visualize_batch_x(pred, i, str(i),
                              args.TEST_OUTPUT_DIR + sub_dir + "pred_" + str(i))
        viz.visualize_batch_x(batch_x, i, str(i),
                              args.TEST_OUTPUT_DIR + sub_dir + "input_" + str(i))
        viz.visualize_batch_x(batch_y, i, str(i),
                              args.TEST_OUTPUT_DIR + sub_dir + "expected_" + str(i))
コード例 #29
0
def main():

# Use NumPy to create a 2D array of complex numbers on [-2,2]x[-2,2]

    Y, X = np.mgrid[-1.3:1.3:0.005, -2:1:0.005]
    print 'Y shape: ', Y.shape
    print 'X shape: ', X.shape

    Z = X+1j*Y

    xs = tf.constant(Z.astype("complex64"))
    zs = tf.Variable(xs)
    ns = tf.Variable(tf.zeros_like(xs, "float32"))
    not_diverged = tf.Variable(np.ones(Z.shape, dtype=np.bool))
    Z_mod_at_div = tf.Variable(2 * tf.ones_like(xs, "float32"))

    for i in range(MAX_ITERS):
        # Compute the new values of z: z^2 + x
        zs_ = zs*zs + xs
        # Have we diverged with this new value?
        cur_mod = tf.complex_abs(zs_)
        not_diverged_ = cur_mod < 4
        # Operation to update the zs and the iteration count.

        # Note: We keep computing zs after they diverge! This
        #       is very wasteful! There are better, if a little
        #       less simple, ways to do this.
        ns_ = ns + tf.cast(not_diverged_, "float32")
        diverged_this_step = tf.logical_and(tf.logical_not(not_diverged_), not_diverged)
        Z_mod_at_div = tf.select(diverged_this_step, cur_mod, Z_mod_at_div)

        zs = zs_

        ns = ns_
        not_diverged = not_diverged_
    mus = tf.select(not_diverged, ns, ns + 1 - tf.log(tf.log(Z_mod_at_div)) / np.log(2))

    with tf.Session() as sess:
        tf.initialize_all_variables().run()


        print 'running!'
        ns_evaled, Z_mod_at_div_evaled, mus_evaled = sess.run([ns, Z_mod_at_div, mus])
        print 'done running!'
        print Z_mod_at_div_evaled
        non_zeros_z_mod = np.where(np.abs(Z_mod_at_div_evaled) > 0.01)
        print non_zeros_z_mod
        print 'max mod: %f, min mod: %f' % (np.max(Z_mod_at_div_evaled), np.min(Z_mod_at_div_evaled[non_zeros_z_mod]))

        print 'diff between mus and ns: '
        diff = mus_evaled - ns_evaled
        print diff
        print 'max: %f, min: %f' % (np.max(diff), np.min(diff))

    DisplayFractal(mus_evaled, 'mandelbrot.png')
    DisplayFractal(ns_evaled, 'mandelbrot_notfrac.png')

    img_int_ext = interior_exterior_map(ns_evaled)
    print "img_int_ext.max, %f, img_int_ext.min: %f" % \
        (img_int_ext.max(), img_int_ext.min())

    location = (X.shape[0] / 2, (4 * X.shape[1]) / 5)
    radius = (X.shape[0] / 10)
    ext_radius = (X.shape[0] / 70)
    img_int_ext_pendant = add_pendant(img_int_ext, location, radius, ext_radius)

    DisplayFractal(img_int_ext_pendant, "mandelbrot_int_ext_pendant.png")

    img_int_ext_pendant_noend = np.copy(img_int_ext_pendant)
    for i in xrange(X.shape[1] / 5):
        for j in xrange(X.shape[0]):
            img_int_ext_pendant_noend[j, i] = 1

    DisplayFractal(img_int_ext_pendant_noend, "mandelbrot_int_ext_pendant_noend.png")

    img_int_ext_pendant_noend_bigmiddle = np.copy(img_int_ext_pendant_noend)
    location_bigmiddle = np.array((X.shape[0] / 2, int(X.shape[1] / 2.5)))
    radius_bigmiddle = X.shape[1] / 25
    for i in xrange(X.shape[1]):
        for j in xrange(X.shape[0]):
            dist = norm(np.array((j, i)) - location_bigmiddle)
            if dist < radius_bigmiddle:
                img_int_ext_pendant_noend_bigmiddle[j, i] = -1

    DisplayFractal(img_int_ext_pendant_noend_bigmiddle, "mandelbrot_int_ext_pendant_noend_bigmiddle.png")

    
    print "img_int_ext: "
    print img_int_ext
    DisplayFractal(img_int_ext, "mandelbrot_int_ext.png")
    contours = measure.find_contours(img_int_ext_pendant_noend_bigmiddle, 0.0)
    len_contours = [len(contour_i) for contour_i in contours]
    print len_contours
    contours_sorted_by_size = sorted(contours, key=len)
    len_contours = [len(contour_i) for contour_i in contours]
    print len_contours
    bigcontour = contours_sorted_by_size[0]
    #embed()

    #bigcontour_int_ext = contour_to_int_ext_map(bigcontour, X, Y)

    #embed()

    #fig, ax = plt.subplots()
    #ax.imshow(img_int_ext, interpolation='nearest', cmap=plt.cm.gray)
    #for n, contour in enumerate(contours):
        #ax.plot(contour[:, 1], contour[:, 0], linewidth=2)
    #ax.plot(bigcontour[:, 1], bigcontour[:, 0], linewidth=2)

    #ax.axis('image')
    #ax.set_xticks([])
    #ax.set_yticks([])


    dwg = svgwrite.Drawing('mandelbrot.svg', profile='tiny')
    for j in [-1, -2]:
        contour = contours_sorted_by_size[j]
        for i in xrange(contour.shape[0] - 1):
            #print tuple(bigcontour[i])
            dwg.add(dwg.line(tuple(contour[i]), tuple(contour[i + 1]), \
                    stroke=svgwrite.rgb(10, 10, 16, '%')))
    #dwg.add(dwg.text('Test', insert=(0, 0.2), fill='red'))
    dwg.save()
    #plt.show()

    #error = 100 * np.abs(mus_evaled - ns_evaled)
    #print error.shape
    #error = error.reshape(list(error.shape)+[1])
    #print error.shape
    #error_img = np.concatenate([error, error, error], 2)
    #error_img = np.uint8(np.clip(error_img, 0, 255))
    #scipy.misc.imsave('mandelbrot_errors.png', error_img)

    # 3d mandelbrot!

    max_dist = 10
    #tsdf = gen_tsdf(img_int_ext_pendant_noend_bigmiddle, max_dist)
    tsdf = np.load("tsdf_10.npy")
    #np.save("tsdf_10", tsdf)
    #plt.imshow(tsdf)
    #plt.show()

    # a = -3 / 20 
    # b = 53 / 20 
    # c = -1
    # for 10.5 at 10, 8.5 at 5, and 1.5 at 1
    # with a * x**2 + b * x + c formula for height

    int_ext_3d_map = gen_int_ext_3d_map_from_tsdf(tsdf, max_dist)
    #embed()

    vertices, triangles = mcubes.marching_cubes(int_ext_3d_map, 0)
    mcubes.export_mesh(vertices, triangles, "mandelbrot_smoothed.dae", "Mandelbrot_pendant")
    #embed()

    from mayavi import mlab
    mlab.triangular_mesh(
        vertices[:, 0], vertices[:, 1], vertices[:, 2],
        triangles)
    mlab.show()
コード例 #30
0
ファイル: final_code.py プロジェクト: Tanya05/RSTstacker
# for i in range(1501, 1735):
#   image = Image.open('./hola/1/'+str(i)+'.png').convert('RGBA')
#   pixeldata = list(image.getdata())
#   for j,pixel in enumerate(pixeldata):
#       if pixel[:3] == (255,255,255):
#           pixeldata[j] = (255,255,255,0)
#   image.putdata(pixeldata)
#   image.save('./hola/1/'+str(i)+'.png')
# PATIENTS_FOLDER = './hola/1/'
# patients = os.listdir(PATIENTS_FOLDER) #listing all directories and files within
# patients.sort()

#------------------------Running marching cubes on the images with ROI---------------------

X_data = []
files = glob.glob ("./Head/roi_images/*.png")
for myFile in files:
    #print(myFile)
    image = cv2.imread (myFile)
    X_data.append (image)

print('X_data shape:', np.array(X_data).shape)

patient_scans = np.array(X_data)[:,:,:,0]
print patient_scans.shape
print type(patient_scans)
# verts, faces, normals, values = measure.marching_cubes_lewiner(patient_scans, 1)
vertices, triangles = mcubes.marching_cubes(patient_scans, 0)
mcubes.export_mesh(vertices, triangles, "final_image.dae", "MyROI")
コード例 #31
0
	def test_interp(self, config):
		self.saver = tf.train.Saver()
		could_load, checkpoint_counter = self.load(self.checkpoint_dir)
		if could_load:
			print(" [*] Load SUCCESS")
		else:
			print(" [!] Load failed...")
			return
		
		interp_size = 8
		
		idx1 = 0
		idx2 = 1
		
		thres = 0.6
		
		add_out = "./out/"
		
		dim = 128
		dima = self.test_size
		multiplier = int(dim/dima)
		multiplier2 = multiplier*multiplier
		multiplier3 = multiplier*multiplier*multiplier
		
		#get coords 64
		aux_x = np.zeros([dima,dima,dima],np.int32)
		aux_y = np.zeros([dima,dima,dima],np.int32)
		aux_z = np.zeros([dima,dima,dima],np.int32)
		for i in range(dima):
			for j in range(dima):
				for k in range(dima):
					aux_x[i,j,k] = i*multiplier
					aux_y[i,j,k] = j*multiplier
					aux_z[i,j,k] = k*multiplier
		coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
		for i in range(multiplier):
			for j in range(multiplier):
				for k in range(multiplier):
					coords[i*multiplier2+j*multiplier+k,:,:,:,0] = aux_x+i
					coords[i*multiplier2+j*multiplier+k,:,:,:,1] = aux_y+j
					coords[i*multiplier2+j*multiplier+k,:,:,:,2] = aux_z+k
		coords = (coords+0.5)/dim*2.0-1.0
		coords = np.reshape(coords,[multiplier3,self.batch_size,3])
		
		offset_x = int(self.crop_edge/2)
		offset_y = int(self.crop_edge/2)
		batch_view1 = self.data_pixel[idx1,0]
		batch_view1 = batch_view1[offset_y:offset_y+self.crop_size, offset_x:offset_x+self.crop_size]
		batch_view1 = np.reshape(batch_view1/255.0, [1,self.crop_size,self.crop_size,1])
		batch_view2 = self.data_pixel[idx2,0]
		batch_view2 = batch_view2[offset_y:offset_y+self.crop_size, offset_x:offset_x+self.crop_size]
		batch_view2 = np.reshape(batch_view2/255.0, [1,self.crop_size,self.crop_size,1])
		
		model_z1 = self.sess.run(self.sE,
			feed_dict={
				self.view_test: batch_view1,
			})
		model_z2 = self.sess.run(self.sE,
			feed_dict={
				self.view_test: batch_view2,
			})
		
		batch_z = np.zeros([interp_size,self.z_dim], np.float32)
		for i in range(interp_size):
			batch_z[i] = model_z2*i/(interp_size-1) + model_z1*(interp_size-1-i)/(interp_size-1)
		
		for t in range(interp_size):
			model_float = np.zeros([dim+2,dim+2,dim+2],np.float32)
			for i in range(multiplier):
				for j in range(multiplier):
					for k in range(multiplier):
						minib = i*multiplier2+j*multiplier+k
						model_out = self.sess.run(self.zG,
							feed_dict={
								self.z_vector_test: batch_z[t],
								self.point_coord: coords[minib],
							})
						model_float[aux_x+i+1,aux_y+j+1,aux_z+k+1] = np.reshape(model_out, [dima,dima,dima])
			
			vertices, triangles = mcubes.marching_cubes(model_float, thres)
			mcubes.export_mesh(vertices, triangles, add_out+str(t)+".dae", str(t))
			print("[sample]")
コード例 #32
0
data_dict = h5py.File('02691156_vox.hdf5', 'r')
i = 0
shape = 'airplane'
vox = data_dict['voxels'][:]

batch_vox = vox[i:i+1]
batch_vox = np.reshape(batch_vox,[64,64,64])
img1 = np.clip(np.amax(batch_vox, axis=0)*256, 0,255).astype(np.uint8)
cv2.imwrite(shape + '_' + str(i)+"_vox_1.png",img1)
img2 = np.clip(np.amax(batch_vox, axis=1)*256, 0,255).astype(np.uint8)
cv2.imwrite(shape + '_' + str(i)+"_vox_2.png",img2)
img3 = np.clip(np.amax(batch_vox, axis=2)*256, 0,255).astype(np.uint8)
cv2.imwrite(shape + '_' + str(i)+"_vox_3.png",img3)
vertices, triangles = mcubes.marching_cubes(batch_vox, 0.5)
mcubes.export_mesh(vertices, triangles, shape + '_' + str(i)+"_vox.dae", str(i))


points16 = data_dict['points_16'][:]
data_values16 = data_dict['values_16'][:]

batch_points = points16[i,:]
batch_values = data_values16[i,:]
real_model = np.zeros([16,16,16],np.uint8)
real_model[batch_points[:,0],batch_points[:,1],batch_points[:,2]] = np.reshape(batch_values, [-1])
img1 = np.clip(np.amax(real_model, axis=0)*256, 0,255).astype(np.uint8)
cv2.imwrite(shape + '_' + str(i)+"_16_1.png",img1)
img2 = np.clip(np.amax(real_model, axis=1)*256, 0,255).astype(np.uint8)
cv2.imwrite(shape + '_' + str(i)+ "_16_2.png",img2)
img3 = np.clip(np.amax(real_model, axis=2)*256, 0,255).astype(np.uint8)
cv2.imwrite(shape + '_' + str(i)+"_16_3.png",img3)
コード例 #33
0
	def test(self, config):
		self.saver = tf.train.Saver()
		could_load, checkpoint_counter = self.load(self.checkpoint_dir)
		if could_load:
			print(" [*] Load SUCCESS")
		else:
			print(" [!] Load failed...")
			return
		
		thres = 0.6
		
		add_out = "./out/"
		add_image = "./image/"
		
		dim = 128
		dima = self.test_size
		multiplier = int(dim/dima)
		multiplier2 = multiplier*multiplier
		multiplier3 = multiplier*multiplier*multiplier
		
		#get coords 64
		aux_x = np.zeros([dima,dima,dima],np.int32)
		aux_y = np.zeros([dima,dima,dima],np.int32)
		aux_z = np.zeros([dima,dima,dima],np.int32)
		for i in range(dima):
			for j in range(dima):
				for k in range(dima):
					aux_x[i,j,k] = i*multiplier
					aux_y[i,j,k] = j*multiplier
					aux_z[i,j,k] = k*multiplier
		coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
		for i in range(multiplier):
			for j in range(multiplier):
				for k in range(multiplier):
					coords[i*multiplier2+j*multiplier+k,:,:,:,0] = aux_x+i
					coords[i*multiplier2+j*multiplier+k,:,:,:,1] = aux_y+j
					coords[i*multiplier2+j*multiplier+k,:,:,:,2] = aux_z+k
		coords = (coords+0.5)/dim*2.0-1.0
		coords = np.reshape(coords,[multiplier3,self.batch_size,3])
		
		offset_x = int(self.crop_edge/2)
		offset_y = int(self.crop_edge/2)
		
		#test_num = self.data_pixel.shape[0]
		test_num = 16
		for t in range(test_num):
			print(t,test_num)
			
			batch_view = self.data_pixel[t,0]
			batch_view = batch_view[offset_y:offset_y+self.crop_size, offset_x:offset_x+self.crop_size]
			batch_view = np.reshape(batch_view/255.0, [1,self.crop_size,self.crop_size,1])
			
			model_z = self.sess.run(self.sE,
				feed_dict={
					self.view_test: batch_view,
				})
			
			model_float = np.zeros([dim+2,dim+2,dim+2],np.float32)
			for i in range(multiplier):
				for j in range(multiplier):
					for k in range(multiplier):
						minib = i*multiplier2+j*multiplier+k
						model_out = self.sess.run(self.zG,
							feed_dict={
								self.z_vector_test: model_z,
								self.point_coord: coords[minib],
							})
						model_float[aux_x+i+1,aux_y+j+1,aux_z+k+1] = np.reshape(model_out, [dima,dima,dima])
			'''
			img1 = np.clip(np.amax(model_float, axis=0)*256, 0,255).astype(np.uint8)
			img2 = np.clip(np.amax(model_float, axis=1)*256, 0,255).astype(np.uint8)
			img3 = np.clip(np.amax(model_float, axis=2)*256, 0,255).astype(np.uint8)
			cv2.imwrite(config.sample_dir+"/"+str(t)+"_1t.png",img1)
			cv2.imwrite(config.sample_dir+"/"+str(t)+"_2t.png",img2)
			cv2.imwrite(config.sample_dir+"/"+str(t)+"_3t.png",img3)
			img1 = (np.reshape(batch_view, [self.crop_size,self.crop_size])*255).astype(np.uint8)
			cv2.imwrite(config.sample_dir+"/"+str(t)+"_v.png",img1)
			'''
			vertices, triangles = mcubes.marching_cubes(model_float, thres)
			mcubes.export_mesh(vertices, triangles, add_out+str(t)+".dae", str(t))
			
			cv2.imwrite(add_image+str(t)+".png", self.data_pixel[t,0])
			
			print("[sample]")
コード例 #34
0
	def test_image(self, config):
		self.saver = tf.train.Saver()
		could_load, checkpoint_counter = self.load(self.checkpoint_dir)
		if could_load:
			print(" [*] Load SUCCESS")
		else:
			print(" [!] Load failed...")
			return
		
		thres = 0.6
		
		add_out = "./out/"
		add_image = "./image/"
		
		dim = 128
		dima = self.test_size
		multiplier = int(dim/dima)
		multiplier2 = multiplier*multiplier
		multiplier3 = multiplier*multiplier*multiplier
		
		#get coords 64
		aux_x = np.zeros([dima,dima,dima],np.int32)
		aux_y = np.zeros([dima,dima,dima],np.int32)
		aux_z = np.zeros([dima,dima,dima],np.int32)
		for i in range(dima):
			for j in range(dima):
				for k in range(dima):
					aux_x[i,j,k] = i*multiplier
					aux_y[i,j,k] = j*multiplier
					aux_z[i,j,k] = k*multiplier
		coords = np.zeros([multiplier3,dima,dima,dima,3],np.float32)
		for i in range(multiplier):
			for j in range(multiplier):
				for k in range(multiplier):
					coords[i*multiplier2+j*multiplier+k,:,:,:,0] = aux_x+i
					coords[i*multiplier2+j*multiplier+k,:,:,:,1] = aux_y+j
					coords[i*multiplier2+j*multiplier+k,:,:,:,2] = aux_z+k
		coords = (coords+0.5)/dim*2.0-1.0
		coords = np.reshape(coords,[multiplier3,self.batch_size,3])
		
		offset_x = int(self.crop_edge/2)
		offset_y = int(self.crop_edge/2)
		
		for t in range(16):
			img_add = add_image+str(t)+".png"
			print(img_add)
			imgo_ = cv2.imread(img_add, cv2.IMREAD_GRAYSCALE)

			img = cv2.resize(imgo_, (self.view_size,self.view_size))
			batch_view = np.reshape(img,(1,self.view_size,self.view_size,1))
			batch_view = batch_view[:, offset_y:offset_y+self.crop_size, offset_x:offset_x+self.crop_size, :]
			batch_view = batch_view/255.0
			
			model_z = self.sess.run(self.sE,
				feed_dict={
					self.view_test: batch_view,
				})
			
			model_float = np.zeros([dim+2,dim+2,dim+2],np.float32)
			for i in range(multiplier):
				for j in range(multiplier):
					for k in range(multiplier):
						minib = i*multiplier2+j*multiplier+k
						model_out = self.sess.run(self.zG,
							feed_dict={
								self.z_vector_test: model_z,
								self.point_coord: coords[minib],
							})
						model_float[aux_x+i+1,aux_y+j+1,aux_z+k+1] = np.reshape(model_out, [dima,dima,dima])
			
			vertices, triangles = mcubes.marching_cubes(model_float, thres)
			mcubes.export_mesh(vertices, triangles, add_out+str(t)+".dae", str(t))
			
			print("[sample]")
コード例 #35
0
ファイル: run_sdf_model.py プロジェクト: jtpils/PointSDF
def extract_voxel(get_model, model_path, loss_function, train_path,
                  validation_path, mesh):

    # Read in training and validation files.
    validation_files = [
        os.path.join(validation_path, filename)
        for filename in os.listdir(validation_path) if ".tfrecord" in filename
    ]

    sdf_count_ = 2048
    voxel_resolution = 32

    # Fetch the data.
    validation_dataset = get_sdf_dataset(validation_files,
                                         batch_size=1,
                                         sdf_count=sdf_count_)

    # Setup iterators.
    val_iterator = validation_dataset.make_initializable_iterator()
    val_next_point_cloud, val_next_xyz, val_next_label = val_iterator.get_next(
    )

    # Setup model operations.
    points = tf.placeholder(tf.float32)
    xyz_in = tf.placeholder(tf.float32)
    sdf_labels = tf.placeholder(tf.float32)
    is_training = tf.placeholder(tf.bool)

    sdf_prediction, loss, _ = get_model(points,
                                        xyz_in,
                                        sdf_labels,
                                        is_training,
                                        None,
                                        batch_size=1,
                                        alpha=0.5,
                                        loss_function=loss_function,
                                        sdf_count=sdf_count_)

    # Generate points to sample.
    pts = []
    for x in range(voxel_resolution):
        for y in range(voxel_resolution):
            for z in range(voxel_resolution):
                x_ = -0.5 + ((1.0 / float(voxel_resolution - 1)) * x)
                y_ = -0.5 + ((1.0 / float(voxel_resolution - 1)) * y)
                z_ = -0.5 + ((1.0 / float(voxel_resolution - 1)) * z)
                pts.append([x_, y_, z_])
    pts = np.array(pts)
    pt_splits = np.split(pts, pts.shape[0] // sdf_count_)

    # Save/Restore model.
    saver = tf.train.Saver()

    with tf.Session() as sess:
        saver.restore(sess, os.path.join(model_path, 'model.ckpt'))

        # Setup function that predicts SDF for (x,y,z) given a point cloud.
        def get_sdf(point_cloud, pts):

            prediction = sess.run(sdf_prediction,
                                  feed_dict={
                                      points: point_cloud,
                                      xyz_in: pts,
                                      sdf_labels: None,
                                      is_training: False,
                                  })

            # print(xyz)
            # print(prediction)

            return prediction

        sess.run(val_iterator.initializer)
        for i in range(20):
            point_clouds_, xyzs_, labels_ = sess.run(
                (val_next_point_cloud, val_next_xyz, val_next_label))

            # Setup a voxelization based on the SDF.
            voxelized = np.zeros(
                (voxel_resolution, voxel_resolution, voxel_resolution),
                dtype=np.float32)

            filled_pts = []

            # For all points sample SDF given the point cloud and include points inside the object to a point cloud.
            for pts_ in pt_splits:
                sdf_ = get_sdf(point_clouds_,
                               np.reshape(pts_, (1, sdf_count_, 3)))

                for pt_, sdf in zip(np.reshape(pts_, (sdf_count_, 3)),
                                    np.reshape(sdf_, (sdf_count_, ))):
                    if sdf <= 0.0 and sdf >= -0.05:
                        filled_pts.append(pt_)
                        x_ = int(
                            round(
                                (pt_[0] + 0.5) * float(voxel_resolution - 1)))
                        y_ = int(
                            round(
                                (pt_[1] + 0.5) * float(voxel_resolution - 1)))
                        z_ = int(
                            round(
                                (pt_[2] + 0.5) * float(voxel_resolution - 1)))
                        voxelized[x_, y_, z_] = 1.0

            # Plot.
            plot_3d_points(point_clouds_[0])
            plot_3d_points(np.reshape(filled_pts, (-1, 3)))
            if mesh:
                # Mesh w/ mcubes.
                #plot_voxel(convert_to_sparse_voxel_grid(voxelized), voxel_res=(voxel_resolution, voxel_resolution, voxel_resolution))
                vertices, triangles = mcubes.marching_cubes(voxelized, 0)
                mcubes.export_mesh(vertices, triangles, 'test.dae', 'test')

                meshed_object = trimesh.load('test.dae')
                meshed_object.show()
コード例 #36
0
    def execute_cb(self, goal):

        start_time = time.time()
        self._feedback = graspit_shape_completion.msg.CompleteMeshFeedback()
        self._result = graspit_shape_completion.msg.CompleteMeshResult()

        rospy.loginfo('Received Msg')
        single_view_pointcloud_filepath = '/srv/data/shape_completion_data/test_1/pcd_8_310_143723.pcd'

        point_array = np.asarray(goal.partial_mesh.vertices)
        pc = np.zeros((len(point_array), 3), np.float32)
        for i in range(len(point_array)):
            pc[i][0] = point_array[i].x
            pc[i][1] = point_array[i].y
            pc[i][2] = point_array[i].z

        batch_x = np.zeros((1, self.patch_size, self.patch_size, self.patch_size, 1), dtype=np.float32)

        batch_x[0, :, :, :, :], voxel_resolution, offset = reconstruction_utils.build_test_from_pc_scaled(pc, self.patch_size)

        #make batch B2C01 rather than B012C
        batch_x = batch_x.transpose(0, 3, 4, 1, 2)

        pred = self.model._predict(batch_x)
        pred = pred.reshape(1, self.patch_size, 1, self.patch_size, self.patch_size)

        pred_as_b012c = pred.transpose(0, 3, 4, 1, 2)

        batch_x = batch_x.transpose(0, 3, 4, 1, 2)
        mask = reconstruction_utils.get_occluded_voxel_grid(batch_x[0, :, :, :, 0])
        completed_region = pred_as_b012c[0, :, :, :, 0] * mask

        scale = 4
        high_res_voxel_grid, voxel_resolution, offset = reconstruction_utils.build_high_res_voxel_grid(pc, scale, self.patch_size)
        indices = np.mgrid[0:scale*self.patch_size:1, 0:scale*self.patch_size:1, 0:scale*self.patch_size:1]
        scaled_completed_region = map_coordinates(completed_region, indices/scale, order = 0, mode = 'constant', cval=0.0)

        output = high_res_voxel_grid[:, :, :, 0] + scaled_completed_region
        #output = batch_x[0, :, :, :, 0] + completed_region

        v, t = mcubes.marching_cubes(output, 0.25)
        if self.smooth_mesh:
            coarse_mesh = '/srv/data/temp/' + 'coarse.dae'
            smooth_mesh = '/srv/data/temp/' + 'smooth.off'
            script_file = '/srv/data/temp/' + 'poisson_remesh.mlx'
            mcubes.export_mesh(v, t, coarse_mesh, 'model')
            cmd_string = 'meshlabserver -i ' + coarse_mesh
            cmd_string = cmd_string + ' -o ' + smooth_mesh
            cmd_string = cmd_string + ' -s ' + script_file
            process = subprocess.call(cmd_string, shell=True)
            off = off_handler.OffHandler()
            off.read(smooth_mesh)
            v = off.vertices
            t = off.faces

        #viz.visualize_batch_x(pred, 0, str(1), 'results/' + "pred_" + str(1))
        #viz.visualize_batch_x(batch_x, 0, str(1), 'results/' + "input_" + str(1))

        v *= voxel_resolution
        v += offset

        for i in range(len(v)):
            self._result.completed_mesh.vertices.append(geometry_msgs.msg.Point(v[i, 0], v[i, 1], v[i, 2]))
        for i in range(len(t)):
            self._result.completed_mesh.triangles.append(shape_msgs.msg.MeshTriangle((t[i, 0], t[i, 1], t[i, 2])))

        end_time = time.time()
        self._result.completion_time = int(1000*(end_time - start_time))
        self._as.set_succeeded(self._result)
コード例 #37
0
import numpy as np
import pandas as pd
from pyntcloud import PyntCloud
import mcubes

cloud = PyntCloud.from_file("../data/points.xyz", sep=" ")
# k_neighbors = cloud.get_neighbors(k=10)
# cloud.add_scalar_field("normals", k_neighbors=k_neighbors)

voxelgrid_id = cloud.add_structure("voxelgrid", n_x=32, n_y=32, n_z=32)

voxelgrid = cloud.structures[voxelgrid_id]

x_cords = voxelgrid.voxel_x
y_cords = voxelgrid.voxel_y
z_cords = voxelgrid.voxel_z

voxel = np.zeros((32, 32, 32))

for x, y, z in zip(x_cords, y_cords, z_cords):
    voxel[x][y][z] = 1

# smooth = mcubes.smooth(voxel)
vertices, triangles = mcubes.marching_cubes(voxel, 0)

mcubes.export_mesh(vertices, triangles, "../outputs/scene.dae", "MyScene")
コード例 #38
0
    def execute_cb(self, goal):

        start_time = time.time()
        self._feedback = graspit_shape_completion.msg.CompleteMeshFeedback()
        self._result = graspit_shape_completion.msg.CompleteMeshResult()

        rospy.loginfo('Received Msg')
        single_view_pointcloud_filepath = '/srv/data/shape_completion_data/test_1/pcd_8_310_143723.pcd'

        point_array = np.asarray(goal.partial_mesh.vertices)
        pc = np.zeros((len(point_array), 3), np.float32)
        for i in range(len(point_array)):
            pc[i][0] = point_array[i].x
            pc[i][1] = point_array[i].y
            pc[i][2] = point_array[i].z

        batch_x = np.zeros(
            (1, self.patch_size, self.patch_size, self.patch_size, 1),
            dtype=np.float32)

        batch_x[
            0, :, :, :, :], voxel_resolution, offset = reconstruction_utils.build_test_from_pc_scaled(
                pc, self.patch_size)

        #make batch B2C01 rather than B012C
        batch_x = batch_x.transpose(0, 3, 4, 1, 2)

        pred = self.model._predict(batch_x)
        pred = pred.reshape(1, self.patch_size, 1, self.patch_size,
                            self.patch_size)

        pred_as_b012c = pred.transpose(0, 3, 4, 1, 2)

        batch_x = batch_x.transpose(0, 3, 4, 1, 2)
        mask = reconstruction_utils.get_occluded_voxel_grid(batch_x[0, :, :, :,
                                                                    0])
        completed_region = pred_as_b012c[0, :, :, :, 0] * mask

        scale = 4
        high_res_voxel_grid, voxel_resolution, offset = reconstruction_utils.build_high_res_voxel_grid(
            pc, scale, self.patch_size)
        indices = np.mgrid[0:scale * self.patch_size:1,
                           0:scale * self.patch_size:1,
                           0:scale * self.patch_size:1]
        scaled_completed_region = map_coordinates(completed_region,
                                                  indices / scale,
                                                  order=0,
                                                  mode='constant',
                                                  cval=0.0)

        output = high_res_voxel_grid[:, :, :, 0] + scaled_completed_region
        #output = batch_x[0, :, :, :, 0] + completed_region

        v, t = mcubes.marching_cubes(output, 0.25)
        if self.smooth_mesh:
            coarse_mesh = '/srv/data/temp/' + 'coarse.dae'
            smooth_mesh = '/srv/data/temp/' + 'smooth.off'
            script_file = '/srv/data/temp/' + 'poisson_remesh.mlx'
            mcubes.export_mesh(v, t, coarse_mesh, 'model')
            cmd_string = 'meshlabserver -i ' + coarse_mesh
            cmd_string = cmd_string + ' -o ' + smooth_mesh
            cmd_string = cmd_string + ' -s ' + script_file
            process = subprocess.call(cmd_string, shell=True)
            off = off_handler.OffHandler()
            off.read(smooth_mesh)
            v = off.vertices
            t = off.faces

        #viz.visualize_batch_x(pred, 0, str(1), 'results/' + "pred_" + str(1))
        #viz.visualize_batch_x(batch_x, 0, str(1), 'results/' + "input_" + str(1))

        v *= voxel_resolution
        v += offset

        for i in range(len(v)):
            self._result.completed_mesh.vertices.append(
                geometry_msgs.msg.Point(v[i, 0], v[i, 1], v[i, 2]))
        for i in range(len(t)):
            self._result.completed_mesh.triangles.append(
                shape_msgs.msg.MeshTriangle((t[i, 0], t[i, 1], t[i, 2])))

        end_time = time.time()
        self._result.completion_time = int(1000 * (end_time - start_time))
        self._as.set_succeeded(self._result)
コード例 #39
0
# @Last Modified time: 2020-10-22 16:49:55
import numpy as np
import mcubes

print("Example 1: Isosurface in NumPy volume...")

# Create a data volume (30 x 30 x 30)
X, Y, Z = np.mgrid[:100, :100, :100]
u = (X - 50)**2 + (Y - 50)**2 + (Z - 50)**2 - 25**2

# Extract the 0-isosurface
vertices1, triangles1 = mcubes.marching_cubes(u, 0)
print(vertices1.shape, triangles1.shape)

# Export the result to sphere.dae
mcubes.export_mesh(vertices1, triangles1, "sphere1.dae", "MySphere")

print("Done. Result saved in 'sphere1.dae'.")

print("Example 2: Isosurface in Python function...")
print("(this might take a while...)")

# Create the volume


def f(x, y, z):
    return x**2 + y**2 + z**2


# Extract the 16-isosurface
vertices2, triangles2 = mcubes.marching_cubes_func(
コード例 #40
0
data_points64 = data_dict['points_64'][:]
data_values64 = data_dict['values_64'][:]
data_voxels = data_dict['voxels'][:]

dxb = 0

batch_voxels = data_voxels[dxb:dxb + 1]
batch_voxels = np.reshape(batch_voxels, [64, 64, 64])
img1 = np.clip(np.amax(batch_voxels, axis=0) * 256, 0, 255).astype(np.uint8)
img2 = np.clip(np.amax(batch_voxels, axis=1) * 256, 0, 255).astype(np.uint8)
img3 = np.clip(np.amax(batch_voxels, axis=2) * 256, 0, 255).astype(np.uint8)
cv2.imwrite(str(dxb) + "_vox_1.png", img1)
cv2.imwrite(str(dxb) + "_vox_2.png", img2)
cv2.imwrite(str(dxb) + "_vox_3.png", img3)
vertices, triangles = mcubes.marching_cubes(batch_voxels, 0.5)
mcubes.export_mesh(vertices, triangles, str(dxb) + "_vox.dae", str(dxb))

batch_points_int = data_points16[dxb, :]
batch_values = data_values16[dxb, :]
real_model = np.zeros([16, 16, 16], np.uint8)
real_model[batch_points_int[:, 0], batch_points_int[:, 1],
           batch_points_int[:, 2]] = np.reshape(batch_values, [-1])
img1 = np.clip(np.amax(real_model, axis=0) * 256, 0, 255).astype(np.uint8)
img2 = np.clip(np.amax(real_model, axis=1) * 256, 0, 255).astype(np.uint8)
img3 = np.clip(np.amax(real_model, axis=2) * 256, 0, 255).astype(np.uint8)
cv2.imwrite(str(dxb) + "_16_1.png", img1)
cv2.imwrite(str(dxb) + "_16_2.png", img2)
cv2.imwrite(str(dxb) + "_16_3.png", img3)
vertices, triangles = mcubes.marching_cubes(batch_voxels, 0.5)
mcubes.export_mesh(vertices, triangles, str(dxb) + "_16.dae", str(dxb))