Пример #1
0
 def read_ply(self, file_name):
     num_samples = self.num_samples // len(self.files_list)
     if self.file_index == len(self.files_list) - 1:
         num_samples = num_samples + (self.num_samples - (num_samples * len(self.files_list)))
     
     root, ext = os.path.splitext(file_name)
     if not os.path.isfile(root + ".npy"):
         ply = PlyData.read(file_name)
         vertex = ply['vertex']
         (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
         points = zip(x.ravel(), y.ravel(), z.ravel())
         np.save(root + ".npy", points)
     else:
         points = np.load(root + ".npy")
     
     #load normals
     if os.path.isfile(root + "_normals" + ".ply"):
         if not os.path.isfile(root + "_normals" + ".npy"):
             ply1 = PlyData.read(root + "_normals" + ".ply")
             vertex = ply1['vertex']
             (nx, ny, nz) = (vertex[t] for t in ('nx', 'ny', 'nz'))
             self.normals = np.asarray(zip(nx.ravel(), ny.ravel(), nz.ravel()))
             np.save(root + "_normals" + ".npy", self.normals)
         else:
             self.normals = np.load(root + "_normals" + ".npy")
     
     if self.add_noise:
         self.data = utils.add_noise_normal(points, std=self.nois_std)
     else:
         self.data = np.asarray(points)
     
     self.pc_diameter = utils.get_pc_diameter(self.data)
     self.l = self.relL*self.pc_diameter
     
     rot = utils.angle_axis_to_rotation(self.rotation_angle, self.rotation_axis)
     self.data = utils.transform_pc(self.data, rot)
     
     #plotutils.show_pc(self.data)
     #mlab.show()
             
     #TODO: better sampling
     print "sampling file: ", file_name
     self.samples, self.sample_indices = Sampler.sample(self.data, -1, min_num_point=-1, file_name=file_name, sampling_algorithm=self.sampling_algorithm)
     #self.samples, self.sample_indices = Sampler.sample(self.data, -1, num_samples, file_name=file_name, sampling_algorithm=self.sampling_algorithm)
     #self.samples = self.samples[0:num_samples]
     #self.sample_indices = self.sample_indices[0:num_samples]
     
     self.tree = spatial.KDTree(self.data)
     return self.data
Пример #2
0
def readMesh_PLY(filename, output="soup"):

    if output != "soup":
        raise Exception("Mesh types other than soup not yet supported")

    # Read the actual file
    # TODO This takes a long time, maybe try to replace with something faster of my own?
    plydata = PlyData.read(filename)

    # Read vertices
    # If the mesh has more than three columns of vertex data, ignore the later columns
    # (for instance, Stanford Mesh Repo meshes store intensity and confidence here)
    nVerts = plydata["vertex"].count
    verts = np.zeros((nVerts, 3))
    verts[:, 0] = np.array(plydata["vertex"].data["x"])
    verts[:, 1] = np.array(plydata["vertex"].data["y"])
    verts[:, 2] = np.array(plydata["vertex"].data["z"])

    # Read faces
    faces = make2d(plydata["face"].data["vertex_indices"])

    # Build a mesh from these vertices and faces
    mesh = TriSoupMesh(verts, faces)

    return mesh
Пример #3
0
def main():
    res_folder = opt.result_dir
    ply_folder = opt.scan_path
    output_dir = opt.output_dir
    os.makedirs(output_dir, exist_ok=True)

    reader_ins = Benchmark_reader(res_folder)
    for folder in os.listdir(res_folder):
        if os.path.isdir(os.path.join(res_folder, folder)):
            continue
        print(folder)
        # ply reader
        ply_file = os.path.join(ply_folder, folder.split('.')[0], folder.split('.')[0]+'_vh_clean_2.ply')
        ply_data = PlyData.read(ply_file)
        points = []
        for point in ply_data.elements[0].data:
            points.append([point[0], point[1], point[2]])
        points = np.array(points)
        colors = np.zeros_like(points)

        # instance reader
        instances = reader_ins[folder]
        for instance_idx, instance_key in enumerate(instances.keys()):
            r, g, b = create_color_palette()[int((instance_idx + 1)%41)]
            colors[instances[instance_key]['points'].nonzero()[0].astype(np.int32)] = [r,g,b]

        output_file = os.path.join(output_dir, folder.split('.')[0] + '.ply')
        write_ply(points, colors, None, output_file)
Пример #4
0
def ply_plot(ply_file, opacity = 1, color = (1,1,1)):
    ply = PlyData.read(ply_file)

    '''
    Plot vertices and triangles from a PlyData instance. Assumptions:
        `ply' has a 'vertex' element with 'x', 'y', and 'z'
            properties;
        `ply' has a 'face' element with an integral list property
            'vertex_indices', all of whose elements have length 3.
    '''
    vertex = ply['vertex'].data

    (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))

    # mlab.points3d(x, y, z, color=(1, 1, 1), mode='point')

    tri_idx = ply['face'].data['vertex_indices']
    idx_dtype = tri_idx[0].dtype

    triangles = numpy.fromiter(tri_idx, [('data', idx_dtype, (3,))],
                               count=len(tri_idx))['data']

    mesh = mlab.triangular_mesh(x, y, z, triangles,
                                color=color,
                                opacity = opacity)
    return mesh
Пример #5
0
def main():
    parser = ArgumentParser()
    parser.add_argument("ply_filename")

    args = parser.parse_args()

    plot(PlyData.read(args.ply_filename))
Пример #6
0
    def read_ply(self, file_name, num_samples=1000, sample_class_start=0, add_noise =False,
                  noise_prob=0.3, noise_factor=0.02, noise_std=0.1, sampling_algorithm=SampleAlgorithm.Uniform,
                  rotation_axis=[0, 0, 1], rotation_angle=0):
         
        root, ext = os.path.splitext(file_name)
        if not os.path.isfile(root + ".npy"):
            ply = PlyData.read(file_name)
            vertex = ply['vertex']
            (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
            points = zip(x.ravel(), y.ravel(), z.ravel())
            np.save(root + ".npy", points)
        else:
            points = np.load(root + ".npy")
        
        #load normals
        if os.path.isfile(root + "_normals" + ".ply"):
            if not os.path.isfile(root + "_normals" + ".npy"):
                ply1 = PlyData.read(root + "_normals" + ".ply")
                vertex = ply1['vertex']
                (nx, ny, nz) = (vertex[t] for t in ('nx', 'ny', 'nz'))
                self.normals = np.asarray(zip(nx.ravel(), ny.ravel(), nz.ravel()))
                np.save(root + "_normals" + ".npy", self.normals)
            else:
                self.normals = np.load(root + "_normals" + ".npy")
        
        if add_noise:
            print "adding noise to model.."
            mr = utils.model_resolution(np.array(points))
            #mr = 0.404
            print "model resolution: ", mr
            self.data = utils.add_noise_normal(np.array(points), mr, noise_std)
        else:
            self.data = np.asarray(points)
        rot = utils.angle_axis_to_rotation(rotation_angle, rotation_axis)
        self.data = utils.transform_pc(self.data, rot)
        #plotutils.show_pc(self.data)
        #mlab.show()
#TODO: better sampling
        self.samples, self.sample_indices = Sampler.sample(self.data, -1, num_samples-1, file_name=file_name, pose=rot, sampling_algorithm=sampling_algorithm)
        self.tree = spatial.KDTree(self.data) 
        self.sample_class_start = sample_class_start
        self.sample_class_current = sample_class_start
        self.num_samples = self.samples.shape[0]
        print "num samples: ", self.num_samples
        logging.basicConfig(filename='example.log',level=logging.DEBUG)
        return self.data
Пример #7
0
def main():
    parser = ArgumentParser()
    parser.add_argument('ply_filename')

    args = parser.parse_args()

    plot(PlyData.read(args.ply_filename))
    mlab.show()
Пример #8
0
def write_read(ply, tmpdir, name="test.ply"):
    """
    Utility: serialize/deserialize a PlyData instance through a
    temporary file.

     """
    filename = tmpdir.join(name)
    ply.write(str(filename))
    return PlyData.read(str(filename))
Пример #9
0
def test_write_stream(tmpdir, tet_ply_txt):
    ply0 = tet_ply_txt
    test_file = tmpdir.join("test.ply")

    with test_file.open("wb") as f:
        tet_ply_txt.write(f)

    ply1 = PlyData.read(str(test_file))
    verify(ply0, ply1)
Пример #10
0
def read_str(string, tmpdir, name="test.ply"):
    """
    Utility: create a PlyData instance from a string.

    """
    filename = tmpdir.join(name)
    with filename.open("wb") as f:
        f.write(string)
    return PlyData.read(str(filename))
Пример #11
0
def read_ply_cloud(filename):
    ply_data = PlyData.read(filename)
    points = ply_data['vertex'].data.copy()
    cloud = np.empty([2048, 3])
    for i in range(len(points)):
        point = points[i]
        p = np.array([point[0], point[1], point[2]])
        cloud[i] = p
    return np.array(cloud)
Пример #12
0
def read_mesh_vertices(filename):
    assert os.path.isfile(filename)
    with open(filename, 'rb') as f:
        plydata = PlyData.read(f)
        num_verts = plydata['vertex'].count
        vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32)
        vertices[:,0] = plydata['vertex'].data['x']
        vertices[:,1] = plydata['vertex'].data['y']
        vertices[:,2] = plydata['vertex'].data['z']
    return vertices
Пример #13
0
 def load_ply(self, path):
     plyData = PlyData.read(path)
     data = plyData['vertex']
     vertex_number = len(data[:])
     vertex_data = np.zeros((vertex_number,3),dtype='int64')
     for i in range(0, vertex_number):
         vertex_data[i][0] = data[i][0]
         vertex_data[i][1] = data[i][1]
         vertex_data[i][2] = data[i][2]
     #print(vertex_data)
     return vertex_data
Пример #14
0
def main():
    parser = ArgumentParser()
    parser.add_argument('ply_filename')

    args = parser.parse_args()
    # file1 = open(args.ply_filename,'r')

    
    # plot(PlyData.read())
    # plot(file1)
    plot(PlyData.read(args.ply_filename))
Пример #15
0
def test_obj_info(tmpdir):
    ply0 = PlyData([], text=True, obj_info=["test obj_info"])
    test_file = tmpdir.join("test.ply")
    ply0.write(str(test_file))

    ply0_str = test_file.read("rb").decode("ascii")
    assert ply0_str.startswith("ply\r\nformat ascii 1.0\r\n" "obj_info test obj_info\r\n")

    ply1 = PlyData.read(str(test_file))
    assert len(ply1.obj_info) == 1
    assert ply1.obj_info[0] == "test obj_info"
Пример #16
0
def convert(inputfp, outputfp):
    print 'converting %s to %s'%(inputfp, outputfp)
    plydata = PlyData.read(inputfp)
    vertex = np.array([each for each in plydata.elements[0].data.tolist()], dtype=np.float32)
    faces = plydata.elements[1].data.tolist()
    faces = np.array([each[0] for each in faces], dtype=np.int32)
    gv = gifti.GiftiDataArray.from_array(vertex, intent=1008)
    gf = gifti.GiftiDataArray.from_array(faces, intent=1009)
    g = gifti.GiftiImage()
    g.add_gifti_data_array(gv)
    g.add_gifti_data_array(gf)
    gio.write(g, outputfp)
Пример #17
0
def ply_to_patch(ply_file_path,connection_string,pcid,writing_query,additional_offset,grouping_rules):
    """ This function read a ply file, group points into 1M3 patches, convert patches
    """
    from plyfile import PlyData
    import datetime
    
    print '\t working on ply file : ',ply_file_path 
    print '\t importing ply file ',datetime.datetime.now()
    plydata = PlyData.read(ply_file_path)
    print '\t grouping points',datetime.datetime.now()
    numpy_spec_patch = grouping_ply_data(plydata, grouping_rules)
    
    #to order the patch
    #sorted_points = np.sort(patch[1], axis=0, kind='quicksort', order=('GPS_time'))

    #send patch to database*
    print '\t sending patch to database ply file ',datetime.datetime.now()
    return making_pgpatch( numpy_spec_patch, connection_string, pcid, writing_query, ply_file_path, additional_offset)
Пример #18
0
    def load_from_ply(self, filename, rotationFlag):

        plydata = PlyData.read(filename)
        self.plydata = plydata

        self.f = np.vstack(plydata['face'].data['vertex_indices'])	
	if rotationFlag is not None:
        	x = -plydata['vertex'].data['z']
        	y = plydata['vertex'].data['x']
        	z = -plydata['vertex'].data['y']
	else:
        	x = plydata['vertex'].data['x']
        	y = plydata['vertex'].data['y']
        	z = plydata['vertex'].data['z']
        self.v = np.zeros([x.size, 3])
        self.v[:,0] = x
        self.v[:,1] = y
        self.v[:,2] = z
Пример #19
0
def readPLY(fileName):
    plydata = PlyData.read(fileName)
    verts = []
    faces = []
    for ele in plydata.elements:
        if ele.name == 'vertex':
            for v in plydata['vertex']:
                if len(v) == 3:
                    verts.append(np.asarray([v[0], v[1], v[2]]))
                else:
                    verts.append(np.asarray([v[0][0], v[0][1], v[0][2]]))
        if ele.name == 'face':
            for f in plydata['face']:
                if len(f) == 3:
                    faces.append(np.asarray([f[0], f[1], f[2]]))
                else:
                    faces.append(np.asarray([f[0][0], f[0][1], f[0][2]]))
    normals = []
    return verts, faces,normals
Пример #20
0
def readPlyFile(filename):
    """
    Usese plyfile python pacakage to read a ply file.
    Gets around issues with pcl having a bad ply writer for pointclouds
    :param filename:
    :type filename: str
    :return: vtkPolyData
    :rtype:
    """

    from plyfile import PlyData

    plydata = PlyData.read(filename)
    vertex_data = plydata['vertex'].data # numpy array with fields ['x', 'y', 'z']
    pts = np.zeros([vertex_data.size, 3])
    pts[:, 0] = vertex_data['x']
    pts[:, 1] = vertex_data['y']
    pts[:, 2] = vertex_data['z']

    return vnp.numpyToPolyData(pts)
Пример #21
0
def run_point_cloud(self, voxel_world_id, threshold):
    import voxel_globe.tools

    import voxel_globe.meta.models as models

    import boxm2_adaptor
    import boxm2_mesh_adaptor

    from plyfile import PlyData

    voxel_world = models.VoxelWorld.objects.get(id=voxel_world_id)

    with voxel_globe.tools.task_dir("voxel_viewer") as processing_dir:
        scene_path = os.path.join(voxel_world.directory, "scene.xml")
        scene, cache = boxm2_adaptor.load_cpp(scene_path)
        ply_filename = os.path.join(processing_dir, "model.ply")
        boxm2_mesh_adaptor.gen_color_point_cloud(scene, cache, ply_filename, 0.5, "")

        ply = PlyData.read(str(ply_filename))

        return ply.elements[0].data
Пример #22
0
 def sample_ISS(file_name, min_num_point, pose):
     root, ext = os.path.splitext(file_name)
     in_file = root + ".ply"
     out_file = root + "_iss.ply"
     if (not os.path.isfile(out_file)):
         print "file doen't exits.................."
         args = ["./iss_detect", in_file, out_file]
         popen = subprocess.Popen(args, stdout=subprocess.PIPE)
         popen.wait()
         output = popen.stdout.read()
         print output
     pc = np.load(root + '.npy')
     tree = spatial.KDTree(pc)
     ply = PlyData.read(out_file)
     vertex = ply['vertex']
     (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
     pc_iss = np.asarray(zip(x.ravel(), y.ravel(), z.ravel()))
     indices = np.zeros((pc_iss.shape[0],))
     for pt_i, samplept in enumerate(pc_iss):
         _, index = tree.query(samplept, k=1)
         indices[pt_i] = index    
     pc_iss = utils.transform_pc(pc_iss, pose)
     
     #min_num_point = min(int(pc_iss.shape[0] / 10), 200)
     if min_num_point < 0:
         #min_num_point = min(int(pc_iss.shape[0] / 10), 200)
         #min_num_point = min(int(pc_iss.shape[0] / 1), 300)
         #min_num_point = min(int(pc_iss.shape[0] / 1), 500)
         min_num_point = int(pc_iss.shape[0] / 1) 
         
     if min_num_point >= pc_iss.shape[0]:
         return pc_iss, indices
     
     sample_step = int(pc_iss.shape[0] / min_num_point)
     pc_iss_samples, _ = Sampler.sample_uniform(pc_iss, sample_step)
     indices_samples, _ = Sampler.sample_uniform(indices, sample_step)
     assert(pc_iss_samples.shape[0] == indices_samples.shape[0])
     print ",,,,,,,,,,,,,,,,,,,,,,,,,,,,,pc_iss shape:", pc_iss_samples.shape
     return pc_iss_samples, indices_samples
def Slab_SVM(file_name, v, sigma):
    plydata = PlyData.read(open(file_name))
    vertexes = plydata['vertex'][:]

    x = np.zeros((len(vertexes),3))
    for i in range(len(vertexes)):
        x[i] = np.array([vertexes[i][0],vertexes[i][1],vertexes[i][2]])

    file_name_points = len(x)

    x_0 = plydata['vertex']['x']
    x_1 = plydata['vertex']['y']
    x_2 = plydata['vertex']['z']
    #createPlot(x_0,x_1,x_2,file_name)

    y = np.zeros(len(x))
    x,ignore_x,y,ignore_y=train_test_split(x,y,train_size=0.01,random_state=8)
    len_x = len(x)

    risk = createHyperPlane(x,v,sigma)

    print file_name,'points:',file_name_points,'subsample:',len_x,'v',v,'sigma',sigma,'risk',risk
Пример #24
0
def ply2obj(ply_path: str, obj_path: str):
    ply = PlyData.read(ply_path)

    with open(obj_path, "w") as f:
        f.write("# OBJ file\n")

        verteces = ply["vertex"]

        for v in verteces:
            p = [v["x"], v["y"], v["z"]]
            if "red" in v and "green" in v and "blue" in v:
                c = [v["red"] / 256, v["green"] / 256, v["blue"] / 256]
            else:
                c = [0, 0, 0]
            a = p + c
            f.write("v %.6f %.6f %.6f %.6f %.6f %.6f \n" % tuple(a))

        for v in verteces:
            if "nx" in v and "ny" in v and "nz" in v:
                n = (v["nx"], v["ny"], v["nz"])
                f.write("vn %.6f %.6f %.6f\n" % n)

        for v in verteces:
            if "s" in v and "t" in v:
                t = (v["s"], v["t"])
                f.write("vt %.6f %.6f\n" % t)

        if "face" in ply:
            for i in ply["face"]["vertex_index"]:
                f.write("f")
                for j in range(i.size):
                    # ii = [ i[j]+1 ]
                    ii = [i[j] + 1, i[j] + 1, i[j] + 1]
                    # f.write(" %d" % tuple(ii) )
                    f.write(" %d/%d/%d" % tuple(ii))
                f.write("\n")
Пример #25
0
    def get_offset(self, fp):
        try:
            plydata = PlyData.read(fp)
            if plydata['vertex'].count == 0:
                return

            cor = np.vstack((plydata['vertex']['x'], plydata['vertex']['y'],
                             plydata['vertex']['z'])).transpose()

            if self.x_offset is None:
                self.x_offset = min(cor[:, 0])
                self.y_offset = min(cor[:, 1])
                self.z_offset = min(cor[:, 2])
            else:
                self.x_offset = min(self.x_offset, min(cor[:, 0]))
                self.y_offset = min(self.y_offset, min(cor[:, 1]))
                self.z_offset = min(self.z_offset, min(cor[:, 2]))

        except:  # noqa: E722
            cor, f = ply_parser(fp)

            for i in range(0, len(f)):
                for j in range(0, len(f[i])):
                    f[i][j] = int(f[i][j])
                del f[i][0]

            for face_index in f:
                face_cor = cor[face_index]
                if self.x_offset is None:
                    self.x_offset = min(face_cor[:, 0])
                    self.y_offset = min(face_cor[:, 1])
                    self.z_offset = min(face_cor[:, 2])
                else:
                    self.x_offset = min(self.x_offset, min(face_cor[:, 0]))
                    self.y_offset = min(self.y_offset, min(face_cor[:, 1]))
                    self.z_offset = min(self.z_offset, min(face_cor[:, 2]))
Пример #26
0
def readPlyCloud(fileName, device=None):
    plydata = PlyData.read(fileName)
    verts = plydata["vertex"]
    if verts.count == 0:
        print("Warning: empty file!")
    zeros = [0] * verts.count
    ones = [1] * verts.count
    minusones = [-1] * verts.count
    x = verts["x"]
    y = verts["y"]
    z = verts["z"]
    try:
        nx = verts["nx"]
        ny = verts["ny"]
        nz = verts["nz"]
    except ValueError:
        nx = zeros
        ny = zeros
        nz = ones
    try:
        r = verts["red"] / 255.0
        g = verts["green"] / 255.0
        b = verts["blue"] / 255.0
    except ValueError:
        try:
            r = verts["r"] / 255.0
            g = verts["g"] / 255.0
            b = verts["b"] / 255.0
        except ValueError:
            r = ones
            g = ones
            b = ones
    points = torch.tensor([x,y,z,nx,ny,nz,r,g,b], dtype=torch.float, device=device)
    points = points.transpose(0,1)
    points[:,3:6] = normalize(points[:, 3:6], 1)
    return points
Пример #27
0
def ply_to_obj(ply_path, obj_path, texture_size=(1024, 1024)):
    ply_path = Path(ply_path)
    obj_path = Path(obj_path)
    ply_copied_path = obj_path.parent / ply_path.name
    is_same = ply_copied_path == ply_path
    if not is_same:
        shutil.copy(ply_path, ply_copied_path)

    ply = PlyData.read(ply_path)
    ply_texture = None
    for c in ply.comments:
        if 'TextureFile' in c:
            ply_texture = c.split(' ')[-1]

    if ply_texture is None:
        template = _get_template('template_vertexcolor_to_texture.mlx')
        out_texture_path = obj_path.with_suffix('').name + '_texture.png'
        script = template.format(out_texture_path=out_texture_path)
        run_meshlab_script(ply_copied_path,
                           obj_path,
                           script,
                           cd_dir=obj_path.parent)
    else:
        template = _get_template('template_ply_texture_to_obj.mlx')
        script = template
        ply_texture_name = ply_texture.split('.')[0]
        out_texture_path = obj_path.parent / (ply_texture_name +
                                              '_texture.png')
        shutil.copy(ply_path.parent / ply_texture, out_texture_path)
        Image.open(out_texture_path).resize(
            texture_size, resample=PIL.Image.BILINEAR).save(out_texture_path)
        run_meshlab_script(ply_path, obj_path, template)
        add_texture_to_mtl(obj_path)
    if not is_same:
        ply_copied_path.unlink()
    return
Пример #28
0
    def process_deprecated(self):
        LOG.info("Processing dataset...")

        for p in range(len(self.raw_paths)):
            path_cloud = self.raw_paths[p]
            path_joints = (path_cloud[:len(path_cloud) - 3] + "json").replace(
                "cloud", "joints")
            LOG.info("Processing cloud {0} out of {1}".format(
                p, len(self.raw_paths)))
            LOG.info(path_cloud)

            LOG.info(path_joints)
            hands_ = self.read_joints_json(path_joints)
            labels = hands_["left_hand"] + hands_["right_hand"]
            #print(labels)

            with open(self.raw_paths[p], 'rb') as f:
                print(self.raw_paths[p])
                cloud_ = PlyData.read(f)
                graph_ = self._create_graph(cloud_, self.k, labels)
                torch.save(
                    graph_,
                    os.path.join(self.processed_dir,
                                 "unrealhands_k{0}_{1}.pt".format(self.k, p)))
Пример #29
0
    def prune(self):
        call(["docker", "run", "-it", "-v", "./data:/data", "deeplab"])
        plydata = PlyData.read("data/depthmaps/merged.ply")

        filename = "data/reconstruction.json"
        file = open(filename, "r")
        data = json.loads(file.read())

        for f in data.files:
            fdata = json.loads(open(f.name, "r").read())
            pco = pyclipper.PyclipperOffset()
            pco.AddPath(fdata.path)

            elements = []
            for element in plydata.elements:
                dist = element.x * f.gps.nx + element.y * f.gps.ny + element.y * f.gps.nz
                xm = element.x + (f.gps.x - element.x) / dist
                ym = element.y + (f.gps.y - element.y) / dist
                s = pco.Execute([xm, ym])
                if len(s) > 0:
                    elements.append(element)
            plydata.elements = elements

        PlyData(plydata).write("pure.ply")
Пример #30
0
def map_fn(param):
    save_folder = param[0]
    prediction_path = param[1]
    name = prediction_path.split('/')[-1][:-4]
    object = name[:-5]
    gt_path = '../data/ModelNet10_normalize/' + object + '/train/' + name + '.off'
    gt_path = '../data/perfect_models_test/mesh_off_norm/'+ name + '.off'
    save_ply_path = save_folder + '/' + name + ".ply"
    hd_cmd = 'meshlabserver -i %s -i %s -o %s -s ../third_party/calculate_HD.mlx -om vq' % (
    gt_path, prediction_path, save_ply_path)
    if os.system(hd_cmd + "> /dev/null 2>&1"):
        print "cannot calculate HD for file: %s" % (prediction_path)
        return None

    with open(save_ply_path, 'rb') as f:
        plydata = PlyData.read(f)
    dist = np.asarray(plydata['vertex']['quality'])
    dist = dist[:19244]
    print len(dist)
    # if len(dist)!=30000:
    #     print name
    #     return None

    return dist
Пример #31
0
def generate_annotation_single(path,obj_id,diameter):    
    obj = objs[obj_id]    
    file_name = str(obj_id)
    while len(file_name)<2:
        file_name = '0'+file_name
    processed = {'class':obj_id,'diam':diameter,'obj_name':obj,'img_path':os.path.join(path,'data',file_name,'rgb')}
    processed['depth_path']=os.path.join(path,'data',file_name,'depth')
    processed['mask_path']=os.path.join(path,'data',file_name,'mask')
    gts = yaml.load(open(os.path.join(path,'data',file_name,'gt.yml')),Loader=yaml.FullLoader) 
    obj_mesh = PlyData.read(os.path.join(path,'models',f'obj_{file_name}.ply'))
    xyz = np.stack((obj_mesh['vertex']['x'],obj_mesh['vertex']['y'],obj_mesh['vertex']['z']))
    corners = get_3D_corners(xyz)
    kps = np.concatenate((xyz.mean(1).reshape(3,1),corners),1)#objcet center of preprocessed is not at zero
    bar = Bar('Processing',max = len(gts))
    processed['trainlist']=os.path.join(path,'data',file_name,'train.txt')
    processed['testlist']=os.path.join(path,'data',file_name,'test.txt')
    processed['gt_num']=len(gts)
    for idx in gts:
        annos = gts[idx]
        keep = []
        anno ={}
        for item in annos:
            if item['obj_id'] != obj_id:
                continue
            projs = calculate_projections(kps,np.array(item['cam_R_m2c']),np.array(item['cam_t_m2c']))
            item['kps'] = list(projs.reshape(-1))
            keep.append(item)
        img_name = str(idx)
        img_name = '0'*(4-len(img_name))+img_name
        assert os.path.exists(os.path.join(path,'data',file_name,'rgb',f'{img_name}.png'))
        anno['img_name'] = img_name
        anno['gt'] = keep
        processed[idx] = anno
        bar.next()
    bar.finish()
    json.dump(processed,open(f'{obj}.json','w'))
Пример #32
0
def read_mesh_from_zip(zip_fname, fname_in_zip):
    """
    Read ply file from zip. Currently does not support normals!

    Parameters
    ----------
    zip_fname : str
    fname_in_zip : str

    Returns
    -------
    np.array, np.array, np.array
    """
    with zipfile.ZipFile(zip_fname, allowZip64=True) as z:
        txt = z.open(fname_in_zip)
        plydata = PlyData.read(txt)
        vert = plydata['vertex'].data
        vert = vert.view((np.float32, len(vert.dtype.names))).flatten()
        ind = np.array(
            plydata['face'].data['vertex_indices'].tolist()).flatten()
        # TODO: support normals
        # norm = plydata['normals'].data
        # norm = vert.view((np.float32, len(vert.dtype.names))).flatten()
    return [ind, vert, None]
Пример #33
0
def read_ply(filename):
    """convert from a ply file. include the label and the object number"""
    #---read the ply file--------
    plydata = PlyData.read(filename)
    xyz = np.stack([plydata['vertex'][n] for n in ['x', 'y', 'z']], axis=1)
    try:
        rgb = np.stack(
            [plydata['vertex'][n] for n in ['red', 'green', 'blue']],
            axis=1).astype(np.uint8)
    except ValueError:
        rgb = np.stack([plydata['vertex'][n] for n in ['r', 'g', 'b']],
                       axis=1).astype(np.float32)
    if np.max(rgb) > 1:
        rgb = rgb
    try:
        object_indices = plydata['vertex']['object_index']
        labels = plydata['vertex']['label']
        return xyz, rgb, labels, object_indices
    except ValueError:
        try:
            labels = plydata['vertex']['label']
            return xyz, rgb, labels
        except ValueError:
            return xyz, rgb
Пример #34
0
def rdp_fun(basic_path, eps):
    for i in range(0, 16):
        file_name = basic_path + '/debug/cut-' + str(i) + '.ply'
        print('read cut cloud file name = ', file_name)
        plydata = PlyData.read(file_name)

        vertex = plydata['vertex']

        (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))

        x = np.reshape(x, (x.size, 1))
        y = np.reshape(y, (y.size, 1))
        z = np.reshape(z, (z.size, 1))

        data = np.concatenate((x, y, z), axis=1)

        rdp_result = rdp(data, epsilon=eps)

        result_file_name = basic_path + '/debug/rdp_result' + str(i) + '.txt'
        np.savetxt(result_file_name, rdp_result, fmt='%.7f', delimiter=' ')

        #dbscan.dbscan(rdp_result.t, 0.05, 500)
        print(rdp_result.shape)
        print(rdp_result)
#size_block = 1 * sampling_frequency
#window = np.ones(size_block)
#window /= sum(window)
#
#reference_velocity.x = np.convolve(reference_velocity.x, window, mode='same')
#reference_velocity.y = np.convolve(reference_velocity.y, window, mode='same')
#reference_velocity.z = np.convolve(reference_velocity.z, window, mode='same')
#
#odometry_velocity.x = np.convolve(odometry_velocity.x, window, mode='same')
#odometry_velocity.y = np.convolve(odometry_velocity.y, window, mode='same')
#odometry_velocity.z = np.convolve(odometry_velocity.z, window, mode='same')

########################
# Load Terrain DEM
########################
plydata = PlyData.read(open(esa_arl_dem_file))

vertex = plydata['vertex'].data

[px, py, pz] = (vertex[t] for t in ('x', 'y', 'z'))

# define grid.
npts=100
xi = np.linspace(min(px), max(px), npts)
yi = np.linspace(min(py), max(py), npts)

# grid the data.
zi = griddata(px, py, pz, xi, yi, interp='linear')

#################
## RE-SAMPLE   ##
Пример #36
0
    def read_pcd(self, file_path):
        """ Read in a point cloud from to-be-specified file format into numpy array """
        file_name, file_extension = os.path.splitext(file_path)

        if file_extension == ".yml" or file_extension == ".yaml":
            stream = io.open(filepath, "r")
            data_loaded = yaml.safe_load(stream)
            #~ data_numpy = numpy.zeros((len(data_loaded), len(data_loaded[0].keys())))
            #~ for index in range(len(data_loaded)):
            #~ data_numpy[index] = data_loaded[index]["X"], data_loaded[index]["Y"], data_loaded[index]["Z"]
            self.points[:] = data_loaded[:]["X"], data_loaded[:][
                "Y"], data_loaded[:]["Z"]

        elif file_extension == ".mat":
            # QUICK HACK
            #~ self.points = scipy.io.loadmat(file_path)["pc"] # inner_sheet.mat
            self.points = scipy.io.loadmat(file_path)["pci"]
            if self.points.shape[1] > 3:
                raise Exception(
                    "Currently only point information can be loaded within this format! Switch to .xyz or .ply format!"
                )

        elif file_extension == ".xyz" or file_extension == ".txt":
            self.points = numpy.loadtxt(file_path)
            # We have xyz files with additional normals and maybe curvature as columns
            if self.points.shape[1] > 3 and self.points.shape[1] < 7:
                self.normals = self.points[:, 3:6]
                self.points = numpy.delete(self.points, [3, 4, 5], axis=1)
            else:
                pass
            if self.points.shape[1] > 6:
                self.normals = self.points[:, 3:6]
                self.curvature = self.points[:, 6]
                self.curvature = self.curvature.reshape(
                    (self.curvature.shape[0], 1))
                self.points = numpy.delete(self.points, [3, 4, 5, 6], axis=1)
            else:
                pass

        elif file_extension == ".ply":
            with open(file_path, "rb") as f:
                plydata = PlyData.read(f)

                properties = plydata.elements[0].data.dtype.names
                self.points = numpy.zeros((plydata.elements[0].data.size, 3))
                self.points.T[0], self.points.T[1], self.points.T[
                    2] = plydata.elements[0].data["x"][:], plydata.elements[
                        0].data["y"][:], plydata.elements[0].data["z"][:]
                # We may have more than just point information
                if len(properties) > 3:
                    self.normals = numpy.zeros(
                        (plydata.elements[0].data.size, 3))
                    self.normals.T[0], self.normals.T[1], self.normals.T[
                        2] = plydata.elements[0].data[
                            "nx"][:], plydata.elements[0].data[
                                "ny"][:], plydata.elements[0].data["nz"][:]
                else:
                    pass
                # We may have additional curvature information. Meshlab saves this under "quality"
                if len(properties) > 6:
                    self.curvature = plydata.elements[0].data["quality"]
                    self.curvature = self.curvature.reshape(
                        (self.curvature.shape[0], 1))
                else:
                    pass

        elif file_extension == ".asc" or ".csv":
            with open(file_path) as f:
                data = csv.reader(f, delimiter=" ")
                point_list = []
                normals_list = []
                curvature_list = []
                for row in data:
                    point_list.append(row[0:3])
                    if len(row) > 3:
                        normals_list.append(row[3:6])
                    else:
                        pass
                    if len(row) > 6:
                        curvature_list.append(row[6])
                    else:
                        pass
                self.points = numpy.array(point_list, dtype=numpy.float64)
                if normals_list:
                    self.normals = numpy.array(normals_list,
                                               dtype=numpy.float64)
                else:
                    pass
                if curvature_list:
                    self.curvature = numpy.array(curvature_list,
                                                 dtype=numpy.float64)
                else:
                    pass

        elif file_extension == ".stl":  # This extension might get cancelled and we use meshlab for big data
            model_mesh = mesh.Mesh.from_file(file_path)
            model_mesh.vectors  # This will give a triplet of vectors for each vertex
            self.normals = model_mesh.normals
            # TODO Clear this process! Which format do we get from this? We need to delete duplicate points, because we dont care about triangle information!
            ipdb.set_trace()
            self.points = numpy.vstack(
                (model_mesh.v0, model_mesh.v1, model_mesh.v2))

        else:
            raise Exception(
                "File format not supported. Only input .xyz or .ply point clouds!"
            )

        if self.points is None == True:
            raise Exception("Loaded file was empty")

        return
Пример #37
0
f1 = h5py.File("/home/misumi/Desktop/Write_hdf5File/hdf5_data/data_testtest1.h5", 'w')
#f2 = h5py.File("/home/misumi/Desktop/Write_hdf5File/hdf5_data/data_test2.h5", 'w')
#f3 = h5py.File("/home/misumi/Desktop/Write_hdf5File/hdf5_data/data_test3.h5", 'w')
#f4 = h5py.File("/home/misumi/Desktop/Write_hdf5File/hdf5_data/data_test4.h5", 'w')
#f5 = h5py.File("/home/misumi/Desktop/Write_hdf5File/hdf5_data/data_test5.h5", 'w')
#f = h5py.File("/home/misumi/Desktop/pointnet-master_misumi/misuminet5_ply_hdf5_2048/data_testing0.h5", 'w')

a_data1 = np.zeros((len(filename1), 2048, 3))
#a_data2 = np.zeros((len(filename2), 2048, 3))
#a_data3 = np.zeros((len(filename3), 2048, 3))
#a_data4 = np.zeros((len(filename4), 2048, 3))
#a_data5 = np.zeros((len(filename5), 2048, 3))
#a_pid = np.zeros((len(filenames), 2048), dtype = np.uint8)	

for i in range(0, len(filename1)):
	plydata = PlyData.read("/home/misumi/Desktop/Write_hdf5File/data/" + filename1[i])
	#piddata = [line.rstrip() for line in open("./points_label/" + filenames[i] + ".seg", 'r')]
	for j in range(0, 2048):
		a_data1[i, j] = [plydata['vertex']['x'][j], plydata['vertex']['y'][j], plydata['vertex']['z'][j]]
		#a_pid[i,j] = piddata[j]

data = f1.create_dataset("data1", data = a_data1)

#for i in range(0, len(filename2)):
#	plydata = PlyData.read("/home/misumi/Desktop/Write_hdf5File/data/" + filename2[i])
#	#piddata = [line.rstrip() for line in open("./points_label/" + filenames[i] + ".seg", 'r')]
#	for j in range(0, 2048):
#		a_data2[i, j] = [plydata['vertex']['x'][j], plydata['vertex']['y'][j], plydata['vertex']['z'][j]]
#		#a_pid[i,j] = piddata[j]
#
#data = f2.create_dataset("data2", data = a_data2)
Пример #38
0
    def __init__(self, dataset, kpnum, kptype, is_train, resume=True):
        # Prepare
        self.root = opj('/home/penggao/data/sixd', dataset)
        self.kpnum = kpnum
        self.kptype = kptype
        self.is_train = is_train

        self.pklpath = opj(self.root, 'libs/benchmark.%s-%d-%s.pkl' %
            ('train' if is_train else 'test', self.kpnum, self.kptype))
        self.seq_num = 15

        self.cam = np.zeros((3, 3))
        self.models = dict()
        self.models_info = dict()
        self.kps = dict()
        self.frames = dict()

        # Try to load from disk
        if resume == True:
            try:
                self._load_from_disk()
                print("[LOG] Load SIXD from pkl file success")
                return
            except Exception as e:
                print("[ERROR]", str(e))
                print("[ERROR] Load from pkl file failed. Load all anew")
        else:
            print("[LOG] Load SXID all anew")

        # Load camera matrix
        print("[LOG] Load camera matrix")
        with open(os.path.join(self.root, 'camera.yml')) as f:
            content = yaml.load(f)
            self.cam = np.array([[content['fx'], 0, content['cx']],
                                 [0, content['fy'], content['cy']],
                                 [0, 0, 1]])

        # Load models and keypoints
        print("[LOG] Load models and keypoints")
        MODEL_ROOT = os.path.join(self.root, 'models')
        KP_ROOT = os.path.join(
            self.root, 'kps', str(self.kpnum), self.kptype)
        with open(os.path.join(MODEL_ROOT, 'models_info.yml')) as f:
            content = yaml.load(f)
            for key, val in tqdm(content.items()):
                name = '%02d' % int(key)  # e.g. '01'
                self.models_info[name] = val

                ply_path = os.path.join(MODEL_ROOT, 'obj_%s.ply' % name)
                data = PlyData.read(ply_path)
                self.models[name] = np.stack((np.array(data['vertex']['x']),
                                              np.array(data['vertex']['y']),
                                              np.array(data['vertex']['z'])), axis=1)

                kp_path = os.path.join(KP_ROOT, 'obj_%s.ply' % name)
                data = PlyData.read(kp_path)
                self.kps[name] = np.stack((np.array(data['vertex']['x']),
                                           np.array(data['vertex']['y']),
                                           np.array(data['vertex']['z'])), axis=1)

        # Load annotations
        print("[LOG] Load annotations")
        for seq in tqdm(['%02d' % i for i in range(1, self.seq_num+1)]):
            frames = list()
            seq_root = opj(
                self.root, 'train' if self.is_train else 'test', str(seq))
            imgdir = opj(seq_root, 'rgb')
            with open(opj(seq_root, 'gt.yml')) as f:
                content = yaml.load(f)
                for key, val in content.items():
                    frame = dict()
                    frame['path'] = opj(imgdir, '%04d.png' % int(key))
                    frame['annots'] = list()
                    obj_ids = []
                    for v in val:
                        annot = dict()
                        rot = np.array(v['cam_R_m2c']).reshape(3, 3)
                        tran = np.array(v['cam_t_m2c']).reshape(3, 1)
                        annot['pose'] = np.concatenate((rot, tran), axis=1)
                        # x1 y1 w h => x1 y1 x2 y2
                        bbox = np.array(v['obj_bb'])
                        bbox[2] += bbox[0]
                        bbox[3] += bbox[1]
                        annot['bbox'] = bbox
                        annot['obj_id'] = v['obj_id']
                        annot['kps'] = self.project_vertices(
                            self.kps['%02d' % v['obj_id']], annot['pose'])
                        frame['annots'].append(annot)
                        obj_ids.append(v['obj_id'])
                    frame['obj_ids'] = obj_ids
                    frames.append(frame)
            self.frames[seq] = frames

        # Save to pickle path
        try:
            self._save_to_disk()
            print("[LOG] Save benchmark to disk")
        except Exception as e:
            print("[ERROR]", str(e))
            print("[ERROR] Save to disk failed")
Пример #39
0
def load_ply(fname_in):
    try:
        plydata = PlyData.read(fname_in)
    except:
        raise ValueError('Could not read file %s' % fname_in)
    return plydata
Пример #40
0
parser = argparse.ArgumentParser(description='Modifies ply file to be used for rendering.\n Example: ./plyScale.py -f input.ply -o out.ply -r 210 -cutX 10.0')
parser.add_argument('-f','--inputFile', help='Input file name', required=True)
parser.add_argument('-o','--outputFile', help='Output file name', required=True)
parser.add_argument('--lx', help='Desired size of bounding box (X axis)', required=False, default="0")
parser.add_argument('--ly', help='Desired size of bounding box (Y axis)', required=False, default="0")
parser.add_argument('--lz', help='Desired size of bounding box (Z axis)', required=False, default="0")
parser.add_argument('-r','--order', help='Reorder axis. By default 012, to swap x and z use 210', required=False, default="012")
helpStringForCut = 'Remove all the faces which are above specified value for %s. The origin is in the center of mass. If axis reordering was applied, axis are in new coordinates.'
parser.add_argument('--cutX', help=helpStringForCut%('X'), required=False, default="none")
parser.add_argument('--cutY', help=helpStringForCut%('Y'), required=False, default="none")
parser.add_argument('--cutZ', help=helpStringForCut%('Z'), required=False, default="none")
args = vars(parser.parse_args())

desiredBox = [float(args['lx']), float(args['ly']), float(args['lz'])]

plydata = PlyData.read(args['inputFile'])
vertices = plydata['vertex'].data

# swap coords
order = args['order']
if (order != "012"):
    idx =  [int(order[i]) for i in range(0, len(order))]
    assert(len(idx) == 3)
    print "Swapping axis!"
    for i in range(0, len(vertices)):
        v = copy.deepcopy(vertices[i])
        for dim in range(0, 3):
            vertices[i][dim] = v[ idx[dim] ]


# Current box
def load_ply_normal(filename, point_num):
    plydata = PlyData.read(filename)
    pc = plydata['normal'].data[:point_num]
    pc_array = np.array([[x, y, z] for x, y, z in pc])
    return pc_array
Пример #42
0
    def __init__(self, config):
        self.config = config
        self.pp = pprint.PrettyPrinter(indent=4)
        self.pp.pprint(config)

        rgb_image_sub = message_filters.Subscriber(
            "/camera/rgb/image_color",
            ImageSensor_msg,
            queue_size=10000
        )
        rgb_info_sub = message_filters.Subscriber(
            "/camera/rgb/camera_info",
            CameraInfo
        )
        depth_info_sub = message_filters.Subscriber(
            "/camera/depth/camera_info",
            CameraInfo
        )
        depth_image_sub = message_filters.Subscriber(
            "/camera/depth/image",
            ImageSensor_msg,
            queue_size=10000
        )
        self.depth_scale = 100.0
        
        self.depth_image_cache = message_filters.Cache(depth_image_sub, 10000, allow_headerless=False)
        self.cv_bridge = CvBridge()
        self.output_path = config['label_gen']['output_path']
        model_path = get_model_path(config['label_gen']['model_dir'], config['label_gen']['object_name'], model_type="upright")
        self.initial_rotation = get_initial_rotation(config['label_gen']['object_label'])

        # model_path = "/media/aditya/A69AFABA9AFA85D9/Datasets/YCB_Video_Dataset/models/004_sugar_box/textured.ply"
        # self.output_path = "./bag_output/sugar_1"
        # self.initial_rotation = tf.transformations.quaternion_from_euler(0, 0, -math.pi/2) #sugar_1
        # self.output_path = "./bag_output/sugar_2"
        # self.initial_rotation = tf.transformations.quaternion_from_euler(0, 0, -2.0 * math.pi/3) #sugar_2
        # self.output_path = "./bag_output/sugar_3"
        # self.initial_rotation = tf.transformations.quaternion_from_euler(0, 0, 2.0 * math.pi/3) #sugar_3


        # model_path = "/media/aditya/A69AFABA9AFA85D9/Datasets/YCB_Video_Dataset/models/035_power_drill/textured_upright.ply"
        # self.output_path = "./bag_output/drill_1"
        # self.initial_rotation = tf.transformations.quaternion_from_euler(0, 0, math.pi) #drill_1
        # self.output_path = "./bag_output/drill_2"
        # self.initial_rotation = tf.transformations.quaternion_from_euler(0, 0, 2.5/3 * math.pi) #drill_2
        # self.output_path = "./bag_output/drill_3"
        # self.initial_rotation = tf.transformations.quaternion_from_euler(0, 0, -2.5/3 * math.pi) #drill_3

        # model_path = "/media/aditya/A69AFABA9AFA85D9/Datasets/YCB_Video_Dataset/models/005_tomato_soup_can/textured.ply"
        # self.output_path = "./bag_output/soup_1"
        # self.initial_rotation = tf.transformations.quaternion_from_euler(0, 0, 0) #soup_1

        # model_path = "/media/aditya/A69AFABA9AFA85D9/Datasets/YCB_Video_Dataset/models/006_mustard_bottle/textured.ply"
        # self.output_path = "./bag_output/mustard_1"
        # self.initial_rotation = tf.transformations.quaternion_from_euler(0, 0, 0) #mustard_1
        # self.output_path = "./bag_output/mustard_2"
        # self.initial_rotation = tf.transformations.quaternion_from_euler(0, 0, 2.5/3 * math.pi) #mustard_2
        # self.output_path = "./bag_output/sugar_3"
        # self.initial_rotation = tf.transformations.quaternion_from_euler(0, 0, 2.0 * math.pi/3) #sugar_3

        mkdir_if_missing(self.output_path)
        self.world_frame =  "/base_footprint"
        self.camera_frame =  "/camera_rgb_optical_frame"

        self.tf_listener = tf.TransformListener()
        self.camera_pose = None
        self.rgb_camera_instrinc_matrix = None
        self.depth_camera_instrinc_matrix = None
        self.counter = 0
        self.MAX_COUNTER = 400
        self.pub_filtered_cloud = \
            rospy.Publisher(
                "image_node/filtered_cloud",
                PointCloud2,
                queue_size=10
            )
        self.pub_pose_cloud = \
            rospy.Publisher(
                "image_node/pose_cloud",
                PointCloud2,
                queue_size=10
            )
        
        # Read Model
        cloud = PlyData.read(model_path).elements[0].data
        cloud = np.transpose(np.vstack((cloud['x'], cloud['y'], cloud['z'])))
        
        cloud_pose = pcl.PointCloud()
        cloud_pose.from_array(cloud)
        sor = cloud_pose.make_voxel_grid_filter()
        sor.set_leaf_size(0.015, 0.015, 0.015)
        cloud_pose = sor.filter()
        self.mesh_cloud = np.asarray(cloud_pose)
        # Z point up
        self.object_height = np.max(self.mesh_cloud[:,2]) - np.min(self.mesh_cloud[:,2])
        self.cloud_filter_params = {
            "xmin" : 0.1,
            "xmax" : 0.6,
            "ymin" : 0.0,
            "ymax" : 1.7,
            "zmin" : 0.75, #drill, sugar, mustard
            # "zmin" : 0.76, #soup can
            "object_height" : self.object_height,
            "downsampling_leaf_size" : 0.015
        }
        print(self.cloud_filter_params)

        print ("Num points after downsample and filter : {}".format(
            self.mesh_cloud.shape[0]))

        rgb_image_sub.registerCallback(self.image_callback)
        rgb_info_sub.registerCallback(self.rgb_info_callback)
        depth_info_sub.registerCallback(self.depth_info_callback)
def read_ply(path):
    ply_data = PlyData.read(path).elements[0][0:]
    return ply_data
Пример #44
0
import sys
import os
sys.path.insert(0, './src/core')
from numpy.random import uniform, seed
from matplotlib.mlab import griddata
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d
from matplotlib import cm
from pylab import *
import numpy as np
from plyfile import PlyData, PlyElement
import scipy

matplotlib.style.use('classic') #in matplotlib >= 1.5.1

plydata = PlyData.read(open(os.path.expanduser(decos_dem_file)))

vertex = plydata['vertex'].data

[px, py, pz] = (vertex[t] for t in ('x', 'y', 'z'))

# define grid.
npts=500
xi = np.linspace(min(px), max(px), npts)
yi = np.linspace(min(py), max(py), npts)

# grid the data.
zi = griddata(px, py, pz, xi, yi, interp='linear')

############
### PLOT ###
Пример #45
0
    def __init__(self, args, voxel_path=None, bbox_path=None, shared_values=None):
        super().__init__(args)
        # read initial voxels or learned sparse voxels
        self.voxel_path = voxel_path if voxel_path is not None else args.voxel_path
        self.bbox_path = bbox_path if bbox_path is not None else getattr(args, "initial_boundingbox", None)
        assert (self.bbox_path is not None) or (self.voxel_path is not None), \
            "at least initial bounding box or pretrained voxel files are required."
        self.voxel_index = None
        self.scene_scale = getattr(args, "scene_scale", 1.0)
 
        if self.voxel_path is not None:
            # read voxel file
            assert os.path.exists(self.voxel_path), "voxel file must exist"
            
            if Path(self.voxel_path).suffix == '.ply':
                from plyfile import PlyData, PlyElement
                plyvoxel = PlyData.read(self.voxel_path)
                elements = [x.name for x in plyvoxel.elements]
                assert 'vertex' in elements
                plydata = plyvoxel['vertex']
                fine_points = torch.from_numpy(
                    np.stack([plydata['x'], plydata['y'], plydata['z']]).astype('float32').T)

                if 'face' in elements:
                    # read voxel meshes... automatically detect voxel size
                    faces = plyvoxel['face']['vertex_indices']
                    t = fine_points[faces[0].astype('int64')]
                    voxel_size = torch.abs(t[0] - t[1]).max()

                    # indexing voxel vertices
                    fine_points = torch.unique(fine_points, dim=0)

                    # vertex_ids, _ = discretize_points(fine_points, voxel_size)
                    # vertex_ids_offset = vertex_ids + 1
                    
                    # # simple hashing
                    # vertex_ids = vertex_ids[:, 0] * 1000000 + vertex_ids[:, 1] * 1000 + vertex_ids[:, 2]
                    # vertex_ids_offset = vertex_ids_offset[:, 0] * 1000000 + vertex_ids_offset[:, 1] * 1000 + vertex_ids_offset[:, 2]

                    # vertex_ids = {k: True for k in vertex_ids.tolist()}
                    # vertex_inside = [v in vertex_ids for v in vertex_ids_offset.tolist()]
                    
                    # # get voxel centers
                    # fine_points = fine_points[torch.tensor(vertex_inside)] + voxel_size * .5
                    # fine_points = fine_points + voxel_size * .5   --> use all corners as centers
                
                else:
                    # voxel size must be provided
                    assert getattr(args, "voxel_size", None) is not None, "final voxel size is essential."
                    voxel_size = args.voxel_size

                if 'quality' in elements:
                    self.voxel_index = torch.from_numpy(plydata['quality']).long()
               
            else:
                # supporting the old style .txt voxel points
                fine_points = torch.from_numpy(np.loadtxt(self.voxel_path)[:, 3:].astype('float32'))
        else:
            # read bounding-box file
            bbox = np.loadtxt(self.bbox_path)
            voxel_size = bbox[-1] if getattr(args, "voxel_size", None) is None else args.voxel_size
            fine_points = torch.from_numpy(bbox2voxels(bbox[:6], voxel_size))
        
        half_voxel = voxel_size * .5
        
        # transform from voxel centers to voxel corners (key/values)
        fine_coords, _ = discretize_points(fine_points, half_voxel)
        fine_keys0 = offset_points(fine_coords, 1.0).reshape(-1, 3)
        fine_keys, fine_feats = torch.unique(fine_keys0, dim=0, sorted=True, return_inverse=True)
        fine_feats = fine_feats.reshape(-1, 8)
        num_keys = torch.scalar_tensor(fine_keys.size(0)).long()
        
        # ray-marching step size
        if getattr(args, "raymarching_stepsize_ratio", 0) > 0:
            step_size = args.raymarching_stepsize_ratio * voxel_size
        else:
            step_size = args.raymarching_stepsize
        
        # register parameters (will be saved to checkpoints)
        self.register_buffer("points", fine_points)          # voxel centers
        self.register_buffer("keys", fine_keys.long())       # id used to find voxel corners/embeddings
        self.register_buffer("feats", fine_feats.long())     # for each voxel, 8 voxel corner ids
        self.register_buffer("num_keys", num_keys)
        #self.register_buffer("points_labels", fine_points)          # voxel centers
        self.register_buffer("keep", fine_feats.new_ones(fine_feats.size(0)).long())  # whether the voxel will be pruned

        self.register_buffer("voxel_size", torch.scalar_tensor(voxel_size))
        self.register_buffer("step_size", torch.scalar_tensor(step_size))
        self.register_buffer("max_hits", torch.scalar_tensor(args.max_hits))

        logger.info("loaded {} voxel centers, {} voxel corners".format(fine_points.size(0), num_keys))

        # set-up other hyperparameters and initialize running time caches
        self.embed_dim = getattr(args, "voxel_embed_dim", None)
        self.deterministic_step = getattr(args, "deterministic_step", False)
        self.use_octree = getattr(args, "use_octree", False)
        self.track_max_probs = getattr(args, "track_max_probs", False)    
        self._runtime_caches = {
            "flatten_centers": None,
            "flatten_children": None,
            "max_voxel_probs": None
        }

        # sparse voxel embeddings     
        if shared_values is None and self.embed_dim > 0:
            self.values = Embedding(num_keys, self.embed_dim, None)
        else:
            self.values = shared_values
def create_angle_data(ply_path, json_path, out_dir, convt):

    print("Start processing " + os.path.basename(os.path.basename(ply_path)))

    windowSize = 4
    xyScale = 3

    # fetch input files from dirs
    jsonf, plyf, gImf, pImf = find_files(ply_path, json_path)
    if jsonf == [] or plyf == [] or gImf == [] or pImf == []:
        return

    if not os.path.isdir(out_dir):
        os.mkdir(out_dir)

    # if json file in dist dir, it's been processed, we skip it.
    json_basename = os.path.basename(jsonf)
    json_dst = os.path.join(out_dir, json_basename)
    if os.path.exists(json_dst):
        return

    # obtain scan position and offsets from metadata
    metadata = terra_common.lower_keys(terra_common.load_json(jsonf))
    center_position = get_position(metadata)
    yOffset = get_offset_from_metadata(metadata)

    plyFile = PlyData.read(plyf)
    plyData = plyFile.elements[0]

    # get relationship between ply files and png files, that means each point in the ply file
    # should have a corresponding pixel in png files, both depth png and gray png
    pImg = cv2.imread(pImf, -1)
    gImg = cv2.imread(gImf, -1)
    pHei, pWid = pImg.shape[:2]
    gHei, gWid = gImg.shape[:2]
    if pWid == gWid:
        gPix = np.array(gImg).ravel()
        gIndex = (np.where(gPix > 32))
        tInd = gIndex[0]
    else:
        pPix = np.array(pImg)
        pPix = pPix[:, 2:].ravel()
        pIndex = (np.where(pPix != 0))

        gPix = np.array(gImg).ravel()
        gIndex = (np.where(gPix > 33))
        tInd = np.intersect1d(gIndex[0], pIndex[0])

    nonZeroSize = tInd.size

    pointSize = plyData.count

    # if point size do not match, return
    if nonZeroSize != pointSize:
        return

    # Initial data structures
    gIndexImage = np.zeros(gWid * gHei)

    gIndexImage[tInd] = np.arange(1, pointSize + 1)

    gIndexImage_ = np.reshape(gIndexImage, (-1, gWid))

    angle_data = []
    for i in range(0, PLOT_COL_NUM):
        angle_data.append(np.zeros((1, 6)))

    # move ROI in a window size to do the meta analysis
    for i in np.arange(0 + windowSize * xyScale, gWid - windowSize * xyScale,
                       windowSize * xyScale * 2):
        for j in np.arange(0 + windowSize, gHei - windowSize, windowSize * 2):
            # fetch points in the ROI
            plyIndices = gIndexImage_[j - windowSize:j + windowSize + 1,
                                      i - windowSize * xyScale:i +
                                      windowSize * xyScale + 1]
            plyIndices = plyIndices.ravel()
            plyIndices_ = np.where(plyIndices > 0)

            localIndex = plyIndices[plyIndices_[0]].astype('int64')
            if plyIndices_[0].size < 50:
                continue
            localP = plyData.data[localIndex - 1]
            xCoord = np.mean(localP["x"])
            yCoord = np.mean(localP["y"])
            # get ROI column number(32 columns in total)
            area_ind = get_area_index(xCoord, yCoord, yOffset,
                                      center_position[0], convt) - 1
            if area_ind < 0:
                continue
            # calculate angle data and store point position in localNormal
            localNormal = calcAreaNormalSurface(localP)
            if localNormal != []:
                angle_data[area_ind] = np.append(angle_data[area_ind],
                                                 [localNormal],
                                                 axis=0)

    file_ind = 0
    # save output data in npy files
    for save_data in angle_data:
        file_ind = file_ind + 1
        out_basename = str(file_ind) + '.npy'
        out_file = os.path.join(out_dir, out_basename)
        np.save(out_file, save_data)

    shutil.copyfile(jsonf, json_dst)

    return
def main(argv):
    if (len(argv) < 2):
        print('filename to npy/ply is not passed as argument. terminated.')
        return

    pathToFile = argv[1]

    filename, file_extension = os.path.splitext(pathToFile)
    folder = os.path.dirname(pathToFile)
    filename = os.path.basename(pathToFile)

    # for the moment supports npy and ply
    if (file_extension == '.npy'):
        pclTime = np.load(pathToFile)
        pclTimeSize = np.shape(pclTime)
    elif (file_extension == '.npz'):
        pclTime = np.load(pathToFile)
        pclTime = pclTime['pred']
        pclTimeSize = np.shape(pclTime)
    elif (file_extension == '.ply'):
        ply = PlyData.read(pathToFile)
        vertex = ply['vertex']
        (x, y, z) = (vertex[t] for t in ('x', 'y', 'z'))
        pclTime = np.column_stack((x, y, z))
    else:
        print('unsupported file format.')
        return

    if (len(np.shape(pclTime)) < 3):
        pclTimeSize = [1, np.shape(pclTime)[0], np.shape(pclTime)[1]]
        pclTime.resize(pclTimeSize)

    for pcli in range(0, pclTimeSize[0]):
        pcl = pclTime[pcli, :, :]

        pcl = standardize_bbox(pcl, 2048)
        pcl = pcl[:, [2, 0, 1]]
        pcl[:, 0] *= -1
        pcl[:, 2] += 0.0125

        xml_segments = [xml_head]
        for i in range(pcl.shape[0]):
            color = colormap(pcl[i, 0] + 0.5, pcl[i, 1] + 0.5,
                             pcl[i, 2] + 0.5 - 0.0125)
            xml_segments.append(
                xml_ball_segment.format(pcl[i, 0], pcl[i, 1], pcl[i, 2],
                                        *color))
        xml_segments.append(xml_tail)

        xml_content = str.join('', xml_segments)

        xmlFile = ("%s/%s_%02d.xml" % (folder, filename, pcli))

        with open(xmlFile, 'w') as f:
            f.write(xml_content)
        f.close()

        exrFile = ("%s/%s_%02d.exr" % (folder, filename, pcli))
        if (not os.path.exists(exrFile)):
            print(['Running Mitsuba, writing to: ', xmlFile])
            subprocess.run([PATH_TO_MITSUBA2, xmlFile])
        else:
            print('skipping rendering because the EXR file already exists')

        png = ("%s/%s_%02d.jpg" % (folder, filename, pcli))

        print(['Converting EXR to JPG...'])
        ConvertEXRToJPG(exrFile, png)
from mpl_toolkits.mplot3d import Axes3D
from plyfile import PlyData, PlyElement


# Every 100 data samples, we save 1. If things run too
# slow, try increasing this number. If things run too fast,
# try decreasing it... =)
reduce_factor = 10


# Look pretty...
matplotlib.style.use('ggplot')


# Load up the scanned armadillo
plyfile = PlyData.read('Datasets/stanford_armadillo.ply')
armadillo = pd.DataFrame({
  'x':plyfile['vertex']['z'][::reduce_factor],
  'y':plyfile['vertex']['x'][::reduce_factor],
  'z':plyfile['vertex']['y'][::reduce_factor]
})



def do_PCA(armadillo):
  #
  # TODO: Write code to import the libraries required for PCA.
  # Then, train your PCA on the armadillo dataframe. Finally,
  # drop one dimension (reduce it down to 2D) and project the
  # armadillo down to the 2D principal component feature space.
  #
Пример #49
0
def read_ply(filename):
    """ read XYZ point cloud from filename PLY file """
    plydata = PlyData.read(filename)
    pc = plydata['vertex'].data
    pc_array = np.array([[x, y, z] for x, y, z in pc])
    return pc_array
def color_code_depth_img(ply_path, json_path, out_dir, convt):

    windowSize = 4
    xyScale = 3

    jsonf, plyf, gImf, pImf = find_files(ply_path, json_path)
    if jsonf == [] or plyf == [] or gImf == [] or pImf == []:
        return

    if not os.path.isdir(out_dir):
        os.mkdir(out_dir)

    metadata = terra_common.lower_keys(terra_common.load_json(jsonf))
    yOffset = get_offset_from_metadata(metadata)

    gIm = Image.open(gImf)

    [gWid, gHei] = gIm.size

    # color code gray image
    codeImg = Image.new("RGB", gIm.size, "black")

    pix = np.array(gIm).ravel()

    gIndex = (np.where(pix > 32))
    nonZeroSize = gIndex[0].size

    plydata = PlyData.read(plyf)

    pointSize = plydata.elements[0].count

    if nonZeroSize != pointSize:
        print('Point counts do not match.')
        return

    gIndexImage = np.zeros(gWid * gHei)

    gIndexImage[gIndex[0]] = np.arange(1, pointSize + 1)

    gIndexImage_ = np.reshape(gIndexImage, (-1, gWid))

    angle_data = []
    for i in range(0, 32):
        angle_data.append(np.zeros((1, 6)))

    icount = 0
    # get top angle
    for i in np.arange(0 + windowSize * xyScale, gWid - windowSize * xyScale,
                       windowSize * xyScale * 2):
        icount = icount + 1
        jcount = 0
        for j in np.arange(0 + windowSize, gHei - windowSize, windowSize * 2):
            jcount = jcount + 1
            plyIndices = gIndexImage_[j - windowSize:j + windowSize + 1,
                                      i - windowSize * xyScale:i +
                                      windowSize * xyScale + 1]
            plyIndices = plyIndices.ravel()
            plyIndices_ = np.where(plyIndices > 0)

            localIndex = plyIndices[plyIndices_[0]].astype('int64')
            if plyIndices_[0].size < 100:
                continue
            localP = plydata.elements[0].data[localIndex - 1]
            yCoord = np.mean(localP["y"])
            area_ind = get_area_index(yCoord, yOffset, convt) - 1
            localNormal = calcAreaNormalSurface(localP)
            if localNormal != []:
                angle_data[area_ind] = np.append(angle_data[area_ind],
                                                 [localNormal],
                                                 axis=0)

    hist_data = np.zeros((32, 90))
    pix_height = np.zeros(32)
    disp_window = np.zeros(32)
    min_z = np.zeros(32)
    ind = 0
    for meta_angle in angle_data:
        if meta_angle.size < 10:
            continue

        pix_height[ind] = get_scanned_height(meta_angle)
        leaf_angle = remove_soil_points(meta_angle)
        hist_data[ind] = gen_angle_hist_from_raw(meta_angle)
        disp_window[ind] = np.argmax(hist_data[ind])
        min_z[ind] = np.amin(meta_angle[1:, 5]) + 55
        ind = ind + 1

    # color code
    for i in np.arange(0 + windowSize * xyScale, gWid - windowSize * xyScale,
                       windowSize * xyScale * 2):
        icount = icount + 1
        jcount = 0
        for j in np.arange(0 + windowSize, gHei - windowSize, windowSize * 2):
            jcount = jcount + 1
            plyIndices = gIndexImage_[j - windowSize:j + windowSize + 1,
                                      i - windowSize * xyScale:i +
                                      windowSize * xyScale + 1]
            plyIndices = plyIndices.ravel()
            plyIndices_ = np.where(plyIndices > 0)

            localIndex = plyIndices[plyIndices_[0]].astype('int64')
            if plyIndices_[0].size < 100:
                continue
            localP = plydata.elements[0].data[localIndex - 1]
            localNormal = calcAreaNormalSurface(localP)

            yCoord = np.mean(localP["y"])
            area_ind = get_area_index(yCoord, yOffset, convt) - 1
            if localNormal == []:
                continue
            #if localNormal[5] < min_z[area_ind]:
            #    continue
            #if angle_in_range(disp_window[area_ind], localNormal):
            rgb = normals_to_rgb_2(localNormal)
            codeImg.paste(rgb,
                          (i - windowSize * xyScale, j - windowSize,
                           i + windowSize * xyScale + 1, j + windowSize + 1))

            #save_points(localP, '/Users/nijiang/Desktop/normal_plot.png', 4)

    img1 = Image.open(gImf)
    img1 = img1.convert('RGB')

    img3 = Image.blend(img1, codeImg, 0.5)
    save_png_file = os.path.join(out_dir, os.path.basename(gImf))
    img3.save(save_png_file)

    file_ind = 0
    for save_data in angle_data:
        file_ind = file_ind + 1
        out_basename = str(file_ind) + '.npy'
        out_file = os.path.join(out_dir, out_basename)
        np.save(out_file, save_data)

    json_basename = os.path.basename(jsonf)
    json_dst = os.path.join(out_dir, json_basename)
    shutil.copyfile(jsonf, json_dst)
    return
Пример #51
0
def load_ply(path):
    ply = PlyData.read(path)
    data = ply.elements[0].data
    x, y, z = data['x'], data['y'], data['z']
    model = np.stack([x, y, z], axis=-1)
    return model
def format_converter(current_path, model_name):
    
    model_file = current_path + model_name
    
    print("Converting file format for 3D point cloud model {}...\n".format(model_name))
    
    model_name_base = os.path.splitext(model_file)[0]
    
    
    # load the model file
    try:
        with open(model_file, 'rb') as f:
            plydata = PlyData.read(f)
            num_vertex = plydata.elements[0].count
            
            print("Ply data structure: \n")
            print(plydata)
            print("\n")
            print("Number of 3D points in current model: {0} \n".format(num_vertex))
        
    except:
        print("Model file does not exist!")
        sys.exit(0)
        
    
    #Parse the ply format file and Extract the data
    Data_array_ori = np.zeros((num_vertex, 3))
    
    Data_array_ori[:,0] = plydata['vertex'].data['x']
    Data_array_ori[:,1] = plydata['vertex'].data['y']
    Data_array_ori[:,2] = plydata['vertex'].data['z']
    
    #sort point cloud data based on Z values
    Data_array = np.asarray(sorted(Data_array_ori, key = itemgetter(2), reverse = False))
    

   
    

    '''
    #accquire data range
    min_x = Data_array[:, 0].min()
    max_x = Data_array[:, 0].max()
    min_y = Data_array[:, 1].min()
    max_y = Data_array[:, 1].max()
    min_z = Data_array[:, 2].min()
    max_z = Data_array[:, 2].max()
    
    range_data_x = max_x - min_x
    range_data_y = max_y - min_y
    range_data_z = max_z - min_z
    
    print (range_data_x, range_data_y, range_data_z)
    
    print(min_x,max_x)
    print(min_y,max_y)
    print(min_z,max_z)
    '''
    
    
    #Normalize data
    #min_max_scaler = preprocessing.MinMaxScaler(feature_range = (0,1000000))
    
    min_max_scaler = preprocessing.MinMaxScaler(feature_range = (0,10000))

    point_normalized = min_max_scaler.fit_transform(Data_array)
    
    #point_normalized_scale = [i * 1 for i in point_normalized]
   
    # Pass xyz to Open3D.o3d.geometry.PointCloud 
    pcd = o3d.geometry.PointCloud()
    
    pcd.points = o3d.utility.Vector3dVector(point_normalized)
    
    o3d.visualization.draw_geometries([pcd])
    
     #Save modelfilea as ascii format in xyz
    
    '''
def load_ply_data(filename):
    plydata = PlyData.read(filename)
    pc = plydata['vertex'].data
    pc_array = np.array([[x, y, z]
                         for x, y, z, _nx, _ny, _nz, _r, _g, _b, _a in pc])
    return pc_array
Пример #54
0
def get_point_cloud(point_cloud_id, number_points=None, history=None):
  from voxel_globe.meta import models
  from vpgl_adaptor import convert_local_to_global_coordinates_array, create_lvcs
  import os
  import numpy as np
  from plyfile import PlyData

  point_cloud = models.PointCloud.objects.get(id=point_cloud_id).history(history)

  lvcs = create_lvcs(point_cloud.origin[1], point_cloud.origin[0], point_cloud.origin[2], 'wgs84')

  ply = PlyData.read(str(os.path.join(point_cloud.directory, 'error.ply')))
  data = ply.elements[0].data

  if number_points:
    try:
      import heapq
      data = np.array(heapq.nlargest(number_points, ply.elements[0].data, 
                                     key=lambda x:x['prob']))
    except IndexError: #not a correctly formated ply file. HACK A CODE!
      #This is a hack-a-code for Tom's ply file
      data = ply.elements[0].data.astype([('x', '<f4'), ('y', '<f4'), 
          ('z', '<f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1'), 
          ('prob', '<f4')])
      import copy
      blah = copy.deepcopy(data['y'])
      data['y'] = data['z']
      data['z'] = -blah
      blah = copy.deepcopy(data['blue'])
      data['blue'] = data['green']
      data['green'] = blah

      data['prob'] = abs(data['x'] - 10 - sum(data['x'])/len(data['x'])) \
                   + abs(data['y'] + 30 - sum(data['y'])/len(data['y'])) \
                   + abs(data['z'] - sum(data['z'])/len(data['z']))
      data['prob'] = max(data['prob']) - data['prob']

      data = np.array(heapq.nlargest(number_points, data, 
                                     key=lambda x:x['prob']))
      print data['prob']



  
  lla = convert_local_to_global_coordinates_array(lvcs, data['x'].tolist(), data['y'].tolist(), data['z'].tolist());

  latitude = np.array(lla[0])
  longitude = np.array(lla[1])
  altitude = np.array(lla[2])
  color = map(lambda r,b,g:'#%02x%02x%02x' % (r, g, b), data['red'], data['green'], data['blue'])

  return_data = {"latitude": latitude, "longitude": longitude,
                 "altitude": altitude, "color": color}

  try:
    return_data['le'] = data['le']
  except ValueError:
    return_data['le'] = (-np.ones(len(latitude))).tolist()
  try:
    return_data['ce'] = data['ce']
  except ValueError:
    return_data['ce'] = (-np.ones(len(latitude))).tolist()

  return return_data
Пример #55
0
from mpl_toolkits.mplot3d import Axes3D
from plyfile import PlyData, PlyElement

from sklearn.decomposition import PCA, RandomizedPCA

# Every 100 data samples, we save 1. If things run too
# slow, try increasing this number. If things run too fast,
# try decreasing it... =)
reduce_factor = 100

# Look pretty...
matplotlib.style.use('ggplot')

# Load up the scanned armadillo
plyfile = PlyData.read('Datasets/stanford_armadillo.ply')
armadillo = pd.DataFrame({
    'x': plyfile['vertex']['z'][::reduce_factor],
    'y': plyfile['vertex']['x'][::reduce_factor],
    'z': plyfile['vertex']['y'][::reduce_factor]
})


def do_PCA(armadillo):
    #
    # TODO: Write code to import the libraries required for PCA.
    # Then, train your PCA on the armadillo dataframe. Finally,
    # drop one dimension (reduce it down to 2D) and project the
    # armadillo down to the 2D principal component feature space.
    #
    # NOTE: Be sure to RETURN your projected armadillo!
Пример #56
0
import os
import glob
import numpy as np
from mayavi import mlab
from plyfile import PlyData, PlyElement

folder_path = 'objects/tools/ply_wrench'
saving_folder_path = 'objects/tools/pts_wrench'
saving_ext = '.pts'

if not os.path.exists(saving_folder_path):
    os.makedirs(saving_folder_path)

file_paths = glob.glob(folder_path + '/' + '*.ply')

for (i, file_path) in enumerate(file_paths):
    with open(file_path, 'rb') as f:
        file_name = os.path.basename(file_path)
        name = os.path.splitext(file_name)[0]

        saving_file_path = saving_folder_path + '/' + name + saving_ext

        plydata = PlyData.read(f)
        xyz = (plydata['vertex']['x'], plydata['vertex']['y'],
               plydata['vertex']['z'])
        xyz = np.array(xyz).transpose()
        np.savetxt(fname=saving_file_path, X=xyz, fmt='%f')
        print(file_name + ' Done')
Пример #57
0
    parser.add_argument('--no_line',
                        '-n',
                        action='store_true',
                        default=False,
                        help='Do not plot lines between vertices and normals')

    parser.add_argument('input',
                        metavar='INPUT',
                        type=str,
                        nargs=1,
                        help='input file')

    args = parser.parse_args()

    plydata = PlyData.read(args.input[0])

    normals = get_vertex_normals(plydata)
    vertices = get_vertices(plydata)

    if args.sample < 1:
        sample_indices = np.random.choice(vertices.shape[0],
                                          int(vertices.shape[0] * args.sample),
                                          replace=False)

        vertices_sampled = vertices[sample_indices, :]

        increased = args.length * normals[sample_indices, :] + vertices_sampled
    else:
        vertices_sampled = vertices
        increased = args.length * normals + vertices
    # plt.draw()
    # fig = plt.figure()
    # ax = fig.gca(projection='3d')
    # ax.plot_trisurf(x, y, np.array(v).flatten(), cmap=cm.jet, linewidth=0.2)
    # ax.set_title('Actual')
    # plt.draw()
    # plt.show()

    return a, D, indices_chosen


def main(x, y, z, ftype):
    if (ftype == 'quad'):
        a = QuadSurfFit(x, y, z)

    elif (ftype == 'lin'):
        a = LinSurfFit(x, y, z)

    return a


if __name__ == "__main__":
    plydata = PlyData.read('example-1.ply')
    x = (plydata['vertex']['x'])
    y = (plydata['vertex']['y'])
    z = (plydata['vertex']['z'])

    a = main(x, y, z, 'quad')
    print 'final a = '
    print a
Пример #59
0
    f.seek(0)
    head = "ply\nformat ascii 1.0\ncomment VCGLIB generated\nelement vertex " + '2048' + "\nproperty float x\nproperty float y\nproperty float z\nelement face " + str(
        0) + "\nproperty list uchar int vertex_indices\nend_header\n"
    f.write(head)
    for line in lines[:]:
        f.write(line + "\n")
    f.close()


if __name__ == '__main__':
    import numpy as np
    import time

    for file in os.listdir('../../data'):
        file = os.path.join('../../data', file)
        plydata = PlyData.read(file)
        a_data = []
        for i in range(plydata['vertex'].count):
            line = [
                plydata['vertex']['x'][i], plydata['vertex']['y'][i],
                plydata['vertex']['z'][i]
            ]
            a_data.append(line)
        pc = np.array(a_data)
    coords = pc[:, :3]
    pts = tf.expand_dims(tf.constant(pc), axis=0)
    # pts = tf.transpose(pts,perm=[0, 2, 1])

    # pc_input = tf.placeholder(tf.float32, shape=(2, 2048, 3))
    #
    pts = np.tile(pts, reps=(1, 1, 1))
    return all_neighbors


def get_neighbors(root_id, plydata):
    neighbors = Set()
    for face in plydata.elements[1]:
        if root_id in face[0]:
            for neighbor in face[0]:
                if root_id != neighbor:
                    neighbors.add(neighbor)
    return neighbors


def get_faces(root_id, plydata):
    faces = []
    for face in plydata.elements[1]:
        if root_id in face[0]:
            faces.append(face)
    return faces


def get_all_vertexes(plydata):
    vertexes = Set()
    for face in plydata.elements[1]:
        vertexes.add(face[0][0])
    return vertexes

if __name__=='__main__':
    plydata = PlyData.read('big.ply')
    print get_neighbors_second_level(0, plydata)