Exemplo n.º 1
0
 def process_chunk(vertex_chunk, label_chunk, has_labels, xyz, rgb, labels):
     xyz_full = np.ascontiguousarray(
         np.array(vertex_chunk.values[:, 0:3], dtype='float32'))
     rgb_full = np.ascontiguousarray(
         np.array(vertex_chunk.values[:, 4:7], dtype='uint8'))
     if has_labels:
         labels_full = label_chunk.values.squeeze()
     else:
         labels_full = None
     if voxel_width > 0:
         if has_labels > 0:
             xyz_sub, rgb_sub, labels_sub, objets_sub = libply_c.prune(
                 xyz_full, voxel_width, rgb_full, labels_full,
                 np.zeros(1, dtype='uint8'), n_class, 0)
             labels = np.vstack((labels, labels_sub))
             del labels_full
         else:
             xyz_sub, rgb_sub, l, o = libply_c.prune(
                 xyz_full, voxel_width, rgb_full, np.zeros(1,
                                                           dtype='uint8'),
                 np.zeros(1, dtype='uint8'), 0, 0)
         xyz = np.vstack((xyz, xyz_sub))
         rgb = np.vstack((rgb, rgb_sub))
     else:
         xyz = xyz_full
         rgb = xyz_full
         labels = labels_full
     return xyz, rgb, labels
Exemplo n.º 2
0
def read_semantic3d_format2(data_file, n_class, file_label_path, voxel_width,
                            ver_batch):
    """read the format of semantic3d. 
    ver_batch : if ver_batch>0 then load the file ver_batch lines at a time.
                useful for huge files (> 5millions lines)
    voxel_width: if voxel_width>0, voxelize data with a regular grid
    n_class : the number of class; if 0 won't search for labels (test set)
    implements batch-loading for huge files
    and pruning"""

    xyz = np.zeros((0, 3), dtype='float32')
    rgb = np.zeros((0, 3), dtype='uint8')
    labels = np.zeros((0, n_class + 1), dtype='uint32')
    #---the clouds can potentially be too big to parse directly---
    #---they are cut in batches in the order they are stored---
    i_rows = 0
    while True:
        try:
            head = None
            if ver_batch > 0:
                print("Reading lines %d to %d" % (i_rows, i_rows + ver_batch))
                vertices = np.genfromtxt(data_file,
                                         delimiter=' ',
                                         max_rows=ver_batch,
                                         skip_header=i_rows)
                #if i_rows > 0:
                #    head = i_rows-1
                #vertices = pd.read_csv(data_file
                #         , sep=' ', nrows=ver_batch
                #         , header=head).values

            else:
                #vertices = np.genfromtxt(data_file, delimiter=' ')
                vertices = np.pd.read_csv(data_file, sep=' ',
                                          header=None).values
                break

        except (StopIteration, pd.errors.ParserError):
            #end of file
            break
        if len(vertices) == 0:
            break
        xyz_full = np.ascontiguousarray(
            np.array(vertices[:, 0:3], dtype='float32'))
        rgb_full = np.ascontiguousarray(
            np.array(vertices[:, 4:7], dtype='uint8'))
        del vertices
        if n_class > 0:
            #labels_full = pd.read_csv(file_label_path, dtype="u1"
            #             , nrows=ver_batch, header=head).values.squeeze()
            labels_full = np.genfromtxt(file_label_path,
                                        dtype="u1",
                                        delimiter=' ',
                                        max_rows=ver_batch,
                                        skip_header=i_rows)

        if voxel_width > 0:
            if n_class > 0:
                xyz_sub, rgb_sub, labels_sub, objets_sub = libply_c.prune(
                    xyz_full, voxel_width, rgb_full, labels_full,
                    np.zeros(1, dtype='uint8'), n_class, 0)
                labels = np.vstack((labels, labels_sub))
            else:
                xyz_sub, rgb_sub, l, o = libply_c.prune(
                    xyz_full, voxel_width, rgb_full, np.zeros(1,
                                                              dtype='uint8'),
                    np.zeros(1, dtype='uint8'), 0, 0)
            del xyz_full, rgb_full
            xyz = np.vstack((xyz, xyz_sub))
            rgb = np.vstack((rgb, rgb_sub))
        i_rows = i_rows + ver_batch
    print("Reading done")
    if n_class > 0:
        return xyz, rgb, labels
    else:
        return xyz, rgb
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser(description='Large-scale Point Cloud Semantic Segmentation with Superpoint Graphs')
    
    parser.add_argument('--ROOT_PATH', default='datasets/s3dis')
    parser.add_argument('--dataset', default='s3dis')
    #parameters
    parser.add_argument('--compute_geof', default=1, type=int, help='compute hand-crafted features of the local geometry')
    parser.add_argument('--k_nn_local', default=20, type=int, help='number of neighbors to describe the local geometry')
    parser.add_argument('--k_nn_adj', default=5, type=int, help='number of neighbors for the adjacency graph')
    parser.add_argument('--voxel_width', default=0.03, type=float, help='voxel size when subsampling (in m)')
    parser.add_argument('--plane_model', default=1, type=int, help='uses a simple plane model to derive elevation')
    parser.add_argument('--use_voronoi', default=0.0, type=float, help='uses the Voronoi graph in combination to knn to build the adjacency graph, useful for sparse aquisitions. If 0., do not use voronoi. If >0, then is the upper length limit for an edge to be kept. ')
    parser.add_argument('--ver_batch', default=5000000, type=int, help='batch size for reading large files')
    args = parser.parse_args()
    
    #path to data
    if args.ROOT_PATH[-1]=='/':
        root = args.ROOT_PATH
    else:
        root = args.ROOT_PATH+'/'
        
    if not os.path.exists(root + 'features_supervision'):
        os.makedirs(root + 'features_supervision')
    
    #list of subfolders to be processed
    if args.dataset == 's3dis':
        folders = ["Area_1/", "Area_2/", "Area_3/", "Area_4/", "Area_5/", "Area_6/"]
        n_labels = 13
    elif args.dataset == 'sema3d':
        folders = ["train/", "test_reduced/", "test_full/"]
        n_labels = 8
    elif args.dataset == 'vkitti':
        folders = ["01/", "02/","03/", "04/","05/", "06/"]
        n_labels = 13 #number of classes
    elif args.dataset == 'custom_dataset':
        folders = ["train/", "test/"]
        n_labels = 10 #number of classes
    else:
        raise ValueError('%s is an unknown data set' % args.dataset)

    pruning = args.voxel_width > 0
    #------------------------------------------------------------------------------
    for folder in folders:
        print("=================\n   "+folder+"\n=================")
        data_folder = root + "data/"              + folder
        str_folder  = root + "features_supervision/"  + folder
        
        if not os.path.isdir(data_folder):
            raise ValueError("%s does not exist" % data_folder)
           # os.mkdir(data_folder)
        if not os.path.isdir(str_folder):
            os.mkdir(str_folder)
            
        if args.dataset == 's3dis':
            files = [os.path.join(data_folder, o) for o in os.listdir(data_folder) 
                        if os.path.isdir(os.path.join(data_folder,o))]
        elif args.dataset == 'sema3d':
            files = glob.glob(data_folder + "*.txt")
        elif args.dataset == 'vkitti':
            files = glob.glob(data_folder + "*.npy")
            
        if (len(files) == 0):
            continue
            #raise ValueError('%s is empty' % data_folder)
        n_files = len(files)
        i_file = 0
        for file in files:
            file_name = os.path.splitext(os.path.basename(file))[0]
            if args.dataset=='s3dis':
                data_file   = data_folder + file_name + '/' + file_name + ".txt"
                str_file    = str_folder       + file_name + '.h5'
            elif args.dataset=='sema3d':
                file_name_short = '_'.join(file_name.split('_')[:2])
                data_file  = data_folder + file_name + ".txt"
                label_file = data_folder + file_name + ".labels"
                str_file    = str_folder + file_name_short + '.h5'
            elif args.dataset=='vkitti':
                data_file   = data_folder + file_name + ".npy"
                str_file    = str_folder  + file_name + '.h5'
            i_file = i_file + 1
            print(str(i_file) + " / " + str(n_files) + "---> "+file_name)
            if os.path.isfile(str_file):
                print("    graph structure already computed - delete for update...")
            else:
                #--- build the geometric feature file h5 file ---
                print("    computing graph structure...")
                #--- read the data files and compute the labels---
                if args.dataset == 's3dis':
                    xyz, rgb, labels, objects = read_s3dis_format(data_file)
                    if pruning:
                        n_objects = int(objects.max()+1)
                        xyz, rgb, labels, objects = libply_c.prune(xyz, args.voxel_width, rgb, labels, objects, n_labels, n_objects)
                        #hard_labels = labels.argmax(axis=1)
                        objects = objects[:,1:].argmax(axis=1)+1
                    else: 
                    #hard_labels = labels
                        objects = objects
                elif args.dataset=='sema3d':
                    has_labels = (os.path.isfile(label_file))
                    if (has_labels):
                        xyz, rgb, labels = read_semantic3d_format(data_file, n_labels, label_file, args.voxel_width, args.ver_batch)
                    else:
                        xyz, rgb = read_semantic3d_format(data_file, 0, '', args.voxel_width, args.ver_batch)
                        labels = np.array([0])
                        objects = np.array([0])
                        is_transition = np.array(False)
                elif args.dataset == 'vkitti':
                    xyz, rgb, labels = read_vkitti_format(data_file)
                    if pruning:
                        xyz, rgb, labels, o = libply_c.prune(xyz.astype('f4'), args.voxel_width, rgb.astype('uint8'), labels.astype('uint8'), np.zeros(1, dtype='uint8'), n_labels, 0)
                    #---compute nn graph-------
                n_ver = xyz.shape[0]    
                print("computing NN structure")
                graph_nn, local_neighbors = compute_graph_nn_2(xyz, args.k_nn_adj, args.k_nn_local, voronoi = args.use_voronoi)
                
                if args.dataset=='s3dis':
                    is_transition = objects[graph_nn["source"]]!=objects[graph_nn["target"]]
                elif args.dataset=='sema3d' and has_labels:
                    #sema has no object, we make them ourselves with label inpainting
                    hard_labels = np.argmax(labels[:,1:], 1)+1
                    no_labels = (labels[:,1:].sum(1)==0).nonzero()
                    hard_labels[no_labels] = 0
                    is_transition = hard_labels[graph_nn["source"]]!=hard_labels[graph_nn["target"]] * (hard_labels[graph_nn["source"]]!=0) \
                    * (hard_labels[graph_nn["target"]]!=0)
                   
                    edg_source = graph_nn["source"][(is_transition==0).nonzero()].astype('uint32')
                    edg_target = graph_nn["target"][(is_transition==0).nonzero()].astype('uint32')
                    edge_weight = np.ones_like(edg_source).astype('f4')
                    node_weight = np.ones((n_ver,),dtype='f4')
                    node_weight[no_labels] = 0
                    print("Inpainting labels")
                    dump, objects = libcp.cutpursuit2(np.array(hard_labels).reshape((n_ver,1)).astype('f4'), edg_source, edg_target, edge_weight, node_weight, 0.01)
                    is_transition = objects[graph_nn["source"]]!=objects[graph_nn["target"]]
                elif args.dataset=='vkitti':
                    #we define the objects as the constant connected components of the labels
                    hard_labels = np.argmax(labels, 1)
                    is_transition = hard_labels[graph_nn["source"]]!=hard_labels[graph_nn["target"]]
                    
                    dump, objects = libply_c.connected_comp(n_ver \
                       , graph_nn["source"].astype('uint32'), graph_nn["target"].astype('uint32') \
                       , (is_transition==0).astype('uint8'), 0)
                    
                if (args.compute_geof):
                    geof = libply_c.compute_geof(xyz, local_neighbors, args.k_nn_local).astype('float32')
                    geof[:,3] = 2. * geof[:,3]
                else:
                    geof = 0
                
                if args.plane_model: #use a simple palne model to the compute elevation
                    low_points = ((xyz[:,2]-xyz[:,2].min() < 0.5)).nonzero()[0]
                    reg = RANSACRegressor(random_state=0).fit(xyz[low_points,:2], xyz[low_points,2])
                    elevation = xyz[:,2]-reg.predict(xyz[:,:2])
                else:
                    elevation = xyz[:,2] - xyz[:,2].min()
                
                #compute the xy normalized position
                ma, mi = np.max(xyz[:,:2],axis=0,keepdims=True), np.min(xyz[:,:2],axis=0,keepdims=True)
                xyn = (xyz[:,:2] - mi) / (ma - mi + 1e-8) #global position
                    
                write_structure(str_file, xyz, rgb, graph_nn, local_neighbors.reshape([n_ver, args.k_nn_local]), \
                    is_transition, labels, objects, geof, elevation, xyn)