Exemplo n.º 1
0
def sample_usage_with_texture_gpu():
    from data_loaders.load_3d_models import load_obj_file
    from utilities.geometry_calculations import rotate_point_cloud
    import os

    obj_file = os.path.expanduser("~/Downloads/3d models/Porsche_911_GT2.obj")
    vertices, polygons, uv_coordinates, uv_coordinate_indices = load_obj_file(
        obj_file, texture=True)
    vertices = rotate_point_cloud(vertices, -.5)
    point_cloud, ray_hit_uv = \
        Lidar(delta_azimuth=2 * np.pi / 3000,
              delta_elevation=np.pi / 800,
              position=(0, -10, 1)).sample_3d_model_with_texture_gpu(vertices,
                                                                     polygons,
                                                                     uv_coordinates,
                                                                     uv_coordinate_indices)
    print(len(ray_hit_uv[np.any(ray_hit_uv != 0, axis=1)]))
    print(len(point_cloud[np.any(point_cloud != 0, axis=1)]))

    import pptk
    v = pptk.viewer(point_cloud[np.any(point_cloud != 0, axis=1)])
    v.set(point_size=.003)
Exemplo n.º 2
0
    def test_get_point_cloud(self):
        panoId_2019 = "BM1Qt23drK3-yMWxYfOfVg"
        panoId_2019 = "AZK1jDGIZC1zmuooSZCzEg"  # Walker Ave to Franlin Elementary

        # lat, lon = 40.7093514, -74.2453146  # road bridge ramp tilt, z = 37 m

        # lat, lon = 40.780667, -73.9610365
        pano1 = GSV_pano(panoId=panoId_2019, crs_local=6526)
        #     pano1 = GSV_pano(request_lon=lon, request_lat=lat, crs_local=6526, saved_path=os.getcwd())
        dm = pano1.get_depthmap(zoom=0, saved_path=os.getcwd())
        # point_cloud = pano1.get_point_cloud(distance_threshole=200, zoom=0)['point_cloud']
        point_cloud = pano1.get_ground_points(color=True, zoom=4)
        # point_cloud = pano1.get_DOM_points()

        print(point_cloud.shape)

        P = point_cloud
        v = pptk.viewer(P[:, :3])
        v.set(point_size=0.001, show_axis=False, show_grid=False)
        # v.attributes(P[:, 4:7] / 255.0, P[:, 3], P[:, 8:11]/255.0, P[:, 7])
        # v.attributes(P[:, 3:6] / 255.0)  # for DOM points
        v.attributes(P[:, 4:7] / 255.0)
Exemplo n.º 3
0
def show_sdf(sdf: kaolin.rep.SDF,
             mode='mesh',
             bbox_center: float = 0.,
             bbox_dim: float = 1.,
             num_points: int = 100000,
             colors=[.7, .2, .2]):
    r""" Visualizer for voxel array

    Args:
        sdf (kaolin.rep.SDF): sdf class object.
        mode (str): visualization mode, can render as a mesh, a pointcloud,
                or a colourful sdf pointcloud.
        colors (list): RGB colour values for rendered array.
    """
    assert mode in ['mesh', 'pointcloud', 'sdf']

    if mode == 'mesh':
        verts, faces = kal.conversions.sdf_to_trianglemesh(
            sdf, bbox_center, bbox_dim)
        mesh = trimesh.Trimesh(vertices=verts.data.cpu().numpy(),
                               faces=faces.data.cpu().numpy())
        mesh.visual.vertex_colors = colors
        mesh.show()

    elif mode == 'pointcloud':
        points = torch.rand(num_points, 3)
        points = bbox_dim * (points + (bbox_center - .5))
        distances = sdf(points)
        points = points[distances <= 0]
        kal.visualize.show_point(points)

    elif mode == 'sdf':
        points = torch.rand(num_points, 3)
        points = bbox_dim * (points + (bbox_center - .5))
        distances = sdf(points)
        v = pptk.viewer(points.data.cpu().numpy())
        v.attributes(distances.data.cpu().numpy())
        input()
        v.close()
Exemplo n.º 4
0
    def rgbd_to_pcl(self, rgb_image, depth_image, visualize_cloud=False):
        """ Converts RGB-D image to pointcloud. Adapted from
            https://github.com/ethz-asl/scenenet_ros_tools/blob/master/nodes/scenenet_to_rosbag.py
        """
        center_x = self.cx
        center_y = self.cy

        constant_x = 1 / self.fx
        constant_y = 1 / self.fy

        vs = np.array([(v - center_x) * constant_x
                       for v in range(0, depth_image.shape[1])])
        us = np.array([(u - center_y) * constant_y
                       for u in range(0, depth_image.shape[0])])

        # Find z coordinate of each pixel given the depth in ray length.
        z = euclidean_ray_length_to_z_coordinate(depth_image, self)
        # For datasets where the depth data is stored as the z coordinate, change the above line to:
        # z = depth_image

        # Convert the z coordinate from mm to m.
        z = z / 1000.0

        x = np.multiply(z, vs)
        y = z * us[:, np.newaxis]

        stacked = np.ma.dstack((x, y, z, rgb_image))
        compressed = stacked.compressed()
        pointcloud = compressed.reshape((int(compressed.shape[0] / 6), 6))

        # Optional: visualize point cloud.
        if visualize_cloud:
            xyz = pointcloud[:, :3]
            rgb = pointcloud[:, 3:6] / 255
            import pptk
            v = pptk.viewer(xyz, rgb)

        return pointcloud
Exemplo n.º 5
0
def plotDvsSpaceTime(inDicts, **kwargs):
    if isinstance(inDicts, list):
        for inDict in inDicts:
            plotDvsSpaceTime(inDict, **kwargs)
        return
    else:
        inDict = inDicts
    if not isinstance(inDict, dict):
        return
    if 'ts' not in inDict:
        #title = kwargs.pop('title', '')
        if 'info' in inDict and isinstance(inDict, dict):
            fileName = inDict['info'].get('filePathOrName')
            if fileName is not None:
                print('plotDvsContrast was called for file ' + fileName)
                #title = (title + ' ' + fileName).lstrip()
        for key in inDict.keys():
            #    kwargs['title'] = (title + ' ' + key).lstrip()
            plotDvsSpaceTime(inDict[key], **kwargs)
        return
    # From this point onwards, it's a data-type container
    if 'pol' not in inDict:
        return
    # From this point onwards, it's a dvs container

    inDict = cropSpaceTime(inDict, **kwargs)

    # scale x and y to match time range
    timeRange = inDict['ts'][-1] - inDict['ts'][0]
    spatialDim = max(max(inDict['x']), max(inDict['y']))
    scalingFactor = timeRange / spatialDim
    events = np.concatenate(
        (inDict['x'][:, np.newaxis] * scalingFactor,
         inDict['y'][:, np.newaxis] * scalingFactor, inDict['ts'][:,
                                                                  np.newaxis]),
        axis=1)
    pptkViewer = pptk.viewer(events)
    return pptkViewer
Exemplo n.º 6
0
def visualize(xs, ys, zs, genOption):

	scale = 1.0
	if genOption == "geo":
		scale = 111111.0
	elif genOption == "xyz":
		scale = 1.0
	else:
		print("Zła opcja")
		return
	xyz_v = np.array((xs, ys, zs), dtype=float)
	xaad = np.random.rand(len(xyz_v[0]),3)
	for j in range(0, len(xyz_v[0]) - 1):
		xaad[j][0] = xs[j]*scale
		xaad[j][1] = ys[j]*scale
		xaad[j][2] = zs[j]
	xaa = np.random.rand(len(xyz_v[0]),3)

	
	v = pptk.viewer(xaad)
	v.attributes(xaa)
	v.set(point_size=0.01)
	v.set(lookat=[xaad[j][0],xaad[j][1],xaad[j][2]])
Exemplo n.º 7
0
def loadGUI(filename):
    #Create instruction window
    instructionWindow = {
    }  #We'll save the interactive Buttons and Labels here.
    instructionWindow["filename"] = filename  #Save the filename
    root = Tk()  #Create the window
    instructionWindow["window"] = root  #Save a pointer to the window
    root.title("Human segmentation manual annotation")
    #What file did we load:
    Label(root, text="Loaded file: " + filename).pack(side=TOP, pady=10)
    #Instruction text:
    instructionTxt = Label(root, text="Click Next to get started.", fg="red")
    instructionWindow["instructionTxt"] = instructionTxt
    instructionTxt.pack(side=TOP, pady=10)
    #Action button:
    actionBtn = Button(root,
                       text='Next',
                       fg="darkgreen",
                       command=actionBtnCallback)
    instructionWindow["actionBtn"] = actionBtn
    actionBtn.pack(side=LEFT, padx=10)
    #Back button:
    backBtn = Button(root,
                     text='Go Back',
                     fg="darkgreen",
                     command=backBtnCallback,
                     state="disabled")
    instructionWindow["backBtn"] = backBtn
    backBtn.pack(side=LEFT, padx=10)
    #Quit button:
    Button(root, text='Quit', command=callQuit).pack(side=RIGHT, padx=10)
    root.protocol("WM_DELETE_WINDOW", callQuit)  #If you x out, call callQuit()
    #Create pptk point cloud viewer:
    plyViewerWindow = pptk.viewer([0, 0, 0], debug=True)
    plyViewerWindow.set(point_size=0.001, show_grid=True)
    plyViewerWindow.color_map('jet')
    return instructionWindow, plyViewerWindow
Exemplo n.º 8
0
def test_extract_low():
    import pickle, pptk
    close = False
    filename = "/home/henry/Documents/projects/pylidarmot/src/road_estimation/data/pcd.pkl"
    with open(filename, 'rb') as fp:
        pcd = pickle.load(fp)
    # data = np.array([
    #     [0, 1, 2, 3, 4, 5, 6],
    #     [2, 3, 4, 3, 4, 5, 3],
    #     [0, 1, 0, 1, 0, 1, 0]
    # ]).T
    # data = np.random.rand(1000, 3)
    # data = data * np.array([[10, 9, 1]]) - np.array([[0, 3, 0]])
    # pcd = PointCloud(data.T)
    pcd, _ = pcd.clip_by_x([-10, 60])
    vec1 = np.array([1,0,0])
    vec2 = np.array([0,0,1])
    pt1 = np.array([0,0,0])
    threshold = [-3.0, 6.0]
    plane_from_vec = Plane3D.create_plane_from_vectors_and_point(vec1, vec2, pt1)
    pcd_close = clip_pcd_by_distance_plane(pcd, plane_from_vec, threshold, in_only=True)
    
    pcd_low = extract_low(pcd_close.data.T)
    # pcd_low = np.zeros(pcd_low.shape)
    vis_data = np.vstack([pcd_close.data.T, pcd_low])

    attribute = np.ones(vis_data.shape).astype(np.float)
    attribute[-pcd_low.shape[0]:-1, 1::] = 0 
    v = pptk.viewer(vis_data)
    v.attributes(attribute, vis_data[:,2]/ np.max(vis_data[:,2]))
    v.set(point_size=0.01)
    # pose = [20,10,-10,1,-np.pi/2,10]
    # pose = [40,15,-40,-np.pi/6,-np.pi/3,5]
    # v.play(pose, ts = [0], tlim=[0, 1], repeat=False)
    if close:
        time.sleep(1)
        v.close()
Exemplo n.º 9
0
def sample_usage():
    from lidar import Lidar
    from data_loaders.load_3d_models import load_Porsche911
    from data_loaders.create_test_data import create_box, create_rectangle
    import pptk
    import os

    scene = Scene(spawn_area=Polygon(((-10, -10), (-10, 10), (10, 10), (10,
                                                                        -10))))
    scene.add_model_to_shelf(*load_Porsche911(), "car")
    scene.add_model_to_shelf(*create_box(position=(0, 0, 2), size=(4, 6, 4)),
                             "box")
    scene.add_model_to_shelf(
        *create_rectangle(position=(0, 0, 0), size=(25, 25)), "ground")

    scene.place_object_randomly("car")
    scene.place_object_randomly("car")
    scene.place_object_randomly("car")
    scene.place_object_randomly("box")
    scene.place_object_randomly("box")
    scene.place_object("ground", position=(0, 0, 0), angle=0)

    scene_vertices, scene_polygons = scene.build_scene()
    path = os.path.join(os.path.dirname(__file__), "tmp", "scene_sample.png")
    scene.visualize(scene_vertices, path=path)

    # point_cloud = Lidar(delta_azimuth=2 * np.pi / 4000,
    #                     delta_elevation=np.pi / 500,
    #                     position=(0, -40, 1)).sample_3d_model(scene_vertices,
    #                                                           scene_polygons,
    #                                                           rays_per_cycle=400)
    point_cloud = Lidar(delta_azimuth=2 * np.pi / 4000,
                        delta_elevation=np.pi / 1000,
                        position=(0, -40, 1)).sample_3d_model_gpu(
                            scene_vertices, scene_polygons)
    v = pptk.viewer(point_cloud)
    v.set(point_size=.01)
Exemplo n.º 10
0
def main(shot):
    depth, depth_pretty, bgr = open_shot(
        shot)  # open RGB and D data of the shot
    cv2.imshow('Depth', depth_pretty)  # display depth
    cv2.imshow('RGB', bgr)  # display RGB

    # depth map in meters
    m_depth = raw_depth_to_meters(depth)

    # show in 3D point cloud
    xyz = xyz_from_depth_meters(m_depth)
    # no-colored version
    v = pptk.viewer(xyz)  # show 3D point cloud
    v.set(point_size=0.001)

    #add color information
    rgb_colors = color_of_xyz(xyz, bgr)
    v.attributes(rgb_colors / 255.)

    while 1:
        if cv2.waitKey(10) == 27:
            cv2.destroyAllWindows()
            v.close()
            break
Exemplo n.º 11
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 11 13:15:57 2020

@author: ken
"""

import pptk
from utils import LivoxFileRead

# written out filename
data = LivoxFileRead("2020-06-11_17-38-30.lvx")
# Visualize with pptk
pptk.viewer(data[:, 0:3], data[:, 3])
Exemplo n.º 12
0
args = parser.parse_args()

filepath = os.path.join(args.dataset, args.file)
z = cv2.imread(filepath, cv2.IMREAD_ANYCOLOR | cv2.IMREAD_ANYDEPTH)

z = np.asarray(z).T
h, w = z.shape

x = np.arange(0, w)
y = np.arange(0, h)
xx, yy = np.meshgrid(x, y)
p = np.dstack([xx, yy, -z]).reshape(-1, 3)

import pptk
v = pptk.viewer(p, point_size=0.33)

#import trimesh
#v = trimesh.points.PointCloud(p)
#v.show()

#from mpl_toolkits.mplot3d import Axes3D
#import matplotlib.pyplot as plt
#import matplotlib.tri as mtri

#fig = plt.figure()
#ax = fig.gca(projection='3d')
# ax.scatter3D(xx, yy, z, c=z, cmap='Greens') #super slow
# ax.plot_surface(xx, yy, z, rstride=1, cstride=1, cmap='viridis', edgecolor='none') #memory error
#plt.show()
Exemplo n.º 13
0
def showNeighbor_pointClouds(panoId="", lat=40.7303117, lon=-74.1815408, color=True, saved_path=''):
    lat, lon = 40.7301114, -74.1806459
    lat, lon =40.7083166,-74.2559286
    lat, lon = 40.6677323,-74.1797492
    lat, lon = 40.6580514,-74.2094375   # large error
    lat, lon = 40.6594416,-74.2086
    lat, lon = 40.679933,-74.277889
    lat, lon = 40.6749695,-74.2957822
    lat, lon = 40.6740651,-74.2958849
    lat, lon = 40.7068861,-74.2569793
    lat, lon = 40.7064717,-74.2576984
    lat, lon = 40.7092409,-74.2527648
    lat, lon =40.7092581,-74.2452077
    lat, lon =40.672149,-74.1862878
    lat, lon =40.6650419,-74.1821079  # mosaic precision is very bad on bridge.
    lat, lon = 40.6645449,-74.1825446
    lat, lon = 40.6511743,-74.2034643
    lat, lon = 40.6506152,-74.1853549


    lat, lon = 40.7065675,-74.2822872  # black hole in middle
    lat, lon = 40.7059479,-74.2829309

    lon, lat = -77.0685390, 38.9265898  # Watchington, DC.
    lat, lon = 40.7093514, -74.2453146  # road bridge tilt
    lat, lon = 40.6490476, -74.1921442

    lat, lon = 40.7068861, -74.2569793  # Franklin elem.
    lat, lon = 40.6693928, -74.2276427 # Elizebth, NJ, Topical area
    lat, lon = 40.780667, -73.9610365  # Good example, single side, school marking.
    lat, lon = 40.7303117, -74.1815408  # NJIT kinney street
    lat, lon = 40.7093514, -74.2453146  # road bridge ramp tilt, z = 37 m
    lat, lon = 40.7115964, -74.2443227 # under ramp, z =28.3m
    lat, lon = 40.7088847,-74.2451582 # under ramp, z =24.7m
    lat, lon = 40.7096628,-74.244780 # middle ramp, z =32.1m
    lat, lon =40.7065092,-74.2565972  # Near Franklin elem. school, NJ
    lat, lon = 40.7045795, -74.2571879

    lat, lon = 40.7303117, -74.1815408  # NJIT kinney street
    lat, lon = 40.7092976,-74.2531686  # Millrun Manor Dr.


    jdata = gpano.getPanoJsonfrmLonat(lon, lat)

    panoId = jdata['Location']['panoId']

    # lat, lon = 40.7093514, -74.2453146  # road bridge ramp tilt, z = 37 m
    # panoId = "eCxNsAw4zyWgE94PA2t8EA"  # under ramp, z =31.9m


    P = getPointCloud_from_PanoId(panoId = panoId)

    elevation_egm96_m = jdata['Location']['elevation_egm96_m']
    elevation_egm96_m = float(elevation_egm96_m)
    P[:, 2:3] += elevation_egm96_m


    # crs_4326 = CRS.from_epsg(4326)
    crs_local =  CRS.from_proj4(f"+proj=tmerc +lat_0={lat} +lon_0={lon} +datum=WGS84 +units=m +no_defs")
    print(crs_local)

    transformer = Transformer.from_crs(4326, crs_local)
    new_center = transformer.transform(lat, lon)
    print("new_center: ", new_center)

    links = jdata.get("Links", [])

    show_neighbor = False
    show_neighbor = True
    if show_neighbor:
        for link in links:
            p = link["panoId"]
            pts = getPointCloud_from_PanoId(panoId = p)

            jdata1 = gpano.getJsonfrmPanoID(p)
            lat1 = jdata1['Location']['lat']
            lon1 = jdata1['Location']['lng']
            elevation_egm96_m = jdata1['Location']['elevation_egm96_m']
            elevation_egm96_m = float(elevation_egm96_m)

            new_center = transformer.transform(lat1, lon1)
            pts[:, 0:1] += new_center[1]
            pts[:, 1:2] += new_center[0]
            pts[:, 2:3] += elevation_egm96_m
            print("new_center: ", new_center)
            P = np.concatenate([P, pts], axis=0)

    # print(P[:, :3])
    # print(P[:, 3:6])
    print(P.shape)

    v = pptk.viewer(P[:, :3])
    # transparent = np.concatenate([P[:, 3:6] / 255.0, 1-(P[:, -2:-1])/255.0], axis=1)
    # v.attributes(P[:, 3:6] / 255.0, P[:, 6], P[:, 7:10]/255.0)
    v.attributes(P[:, 3:6] / 255.0)
    # P = np.concatenate([P, colors, planes, normalvectors, normalvectors[:, 2]], axis=1)

    v.set(point_size=0.001, show_axis=True, show_grid=True)

    img, worldfile = gsv.pointCloud_to_image(P, 0.02)

    saved_path = r'K:\Research\street_view_depthmap'
    new_name = os.path.join(saved_path, "pointcloud_img.jpg")
    img_pil = Image.fromarray(img)
    img_pil.save(new_name)

    new_name = os.path.join(saved_path, "pointcloud_img.jgw")
    # worldfile_name = new_file_name.replace(".png", '.pgw')
    print("worldfile:", worldfile)
    with open(new_name, 'w') as wf:
        for line in worldfile:
            print(line)
            wf.write(str(line) + '\n')
Exemplo n.º 14
0
        vis_voxel_size = [0.1, 0.1, 0.1]
        vis_point_range = [-50, -30, -3, 50, 30, 1]
        bev_map = simplevis.point_to_vis_bev(points, vis_voxel_size,
                                             vis_point_range)
        bev_map = simplevis.draw_box_in_bev(bev_map, vis_point_range,
                                            boxes_lidar, [0, 255, 0], 2)
        plt.imshow(bev_map)
        plt.show()


if __name__ == "__main__":
    FLAGS = argparser()

    detector = SecondDetector(FLAGS.config_path, FLAGS.ckpt_path)
    # pptk viewer from HERE
    v = pptk.viewer(np.random.rand(100, 3))
    v.set(point_size=0.01, lookat=(0.0, 0.0, 0.0))

    # https://www.nuscenes.org/data-collection
    # v_path = "/media/zzhou/data/data-lyft-3D-objects/lidar/host-a007_lidar1_1230485630301986856.bin"
    # points_lyft = np.fromfile(v_path, dtype=np.float32, count=-1).reshape([-1, 5])
    # points = np.zeros([points_lyft.shape[0], 4], dtype=np.float32)
    # points[:, 0] = points_lyft[:, 1]
    # points[:, 1] = points_lyft[:, 0] * (-1)
    # points[:, 2] = points_lyft[:, 2]
    # points[:, 3] = points_lyft[:, 3]
    #
    v_path = "/media/zzhou/data/data-KITTI/object/testing/velodyne/000050.bin"
    points = np.fromfile(v_path, dtype=np.float32, count=-1).reshape([-1, 4])

    # v_path = "/media/zzhou/data/data-lyft-3D-objects/train_kitti/testing/velodyne/004993.bin"
Exemplo n.º 15
0
    args = parser.parse_args()

    pc = np.load(args.point_cloud)
    prediction = np.load(args.prediction)

    # Check if number of columns is greater than 3
    assert pc.shape[1] >= 3

    # Extract pure x, y, z coordinates
    pc_coord = pc[:, :3]

    labels = append_labels(pc_coord, prediction)

    if args.vis:
        v = pptk.viewer(pc_coord, labels)

    # Add saving functionality

    if args.metrics:
        # Calculate accuracy
        cm = confusion_matrix(pc[:, 3].T, labels)
        print(cm)

        acc = accuracy_score(pc[:, 3].T, labels)
        print(acc)

        cr = classification_report(pc[:, 3].T, labels)
        print(cr)

    # Add occupancy_grid functionality
Exemplo n.º 16
0
X = (X / X[3, :]).T

# getting color values using the mask

color = []
visPts = []
for i in range(len(mask)):
    if mask[i] == 255:
        color.append(c[i])
        visPts.append(X[i])

color = np.array(color) / 255
visPts = np.array(visPts)

points = pptk.points(visPts[:, :3])

v = pptk.viewer(points)
v.attributes(pptk.points(color))
v.set(show_grid=False)
v.set(point_size=0.2)

#img1 = cv2.imread("Data/1.jpg")
#img2 = cv2.imread("Data/2.jpg")
#cv2.imshow("Img1", img1)
#cv2.imshow("Img2", img2)

# Non- linear triangulation
I = np.eye(3)
P1 = K @ I @ np.hstack((I, np.zeros((3, 1))))
P2 = K @ R @ np.hstack((I, t))
def show_point_cloud(pc_data):
    # pc_data: point cloud data
    pptk.viewer(pc_data[:, :3])
Exemplo n.º 18
0
    # distances, indexes = feature_matching(discriptors)
    # print("--- %s seconds ---" % (time.time() - start_time))
    # sys.stdout.flush()
    # store_query(indexes)
    indexes = read_query()
    candidates = select_matching_candidates(name_idx, indexes)
    image_pair = []
    F_matrix = []
    for i, data in enumerate(name_idx):
        img_name, start, end = data
        print('finding match for {}'.format(img_name))
        matched_img, fundamental_matrix, inlier_number, inlier_set = \
         image_matching(i, candidates[i], name_idx, indexes, features)
        image_pair.append((matched_img, inlier_set))
        F_matrix.append(fundamental_matrix)
        print("--- %s seconds ---" % (time.time() - start_time))
    components, linked = component_track(image_pair)
    print('number of points: {}'.format(len(components)))
    print("--- %s seconds ---" % (time.time() - start_time))
    focal_length = read_focal_length(len(name_idx))
    camera_parameter = cam_parameter(linked, focal_length, F_matrix)
    points = []
    for comp in components:
        point = point_cloud(comp[:-1], linked, camera_parameter)
        points.append(point)
    # print(points)
    np.save('points_front', points)
    print("--- %s seconds ---" % (time.time() - start_time))
    v = pptk.viewer(np.array(points))
    v.set(point_size=0.01)
def view_pc(ply_points):
    v = pptk.viewer(ply_points, ply_points[:, 1])
    v.color_map('hsv')
Exemplo n.º 20
0
def read_points(f, columns):
    col_dtype = {'x': np.float32, 'y': np.float32, 'z': np.float32}
    return pd.read_csv(f, header=0, sep=",", names=columns, dtype=col_dtype)


def convert_visualizable(pc):
    filtered = pc.drop(
        columns=['time', 'intensity', 'id', 'azimuth', 'range', 'pid'])
    return filtered


# user inputs
columns = ['time', 'intensity', 'id', 'x', 'y', 'z', 'azimuth', 'range', 'pid']
file_1 = sys.argv[1]
file_2 = sys.argv[2]

# make them visualizable
pc_vis_1 = convert_visualizable(read_points(file_1, columns))
pc_vis_2 = convert_visualizable(read_points(file_2, columns))

# combine two pointcoulds
combined = pd.concat([pc_vis_1, pc_vis_2])
v_c = pptk.viewer(combined)
v_c.set(point_size=0.01)

# add colors
color_1 = [1, 1, 1]
color_2 = [0.11982556, 0.80928971, 0.082647]
colors = [color_1] * (len(pc_vis_1.index)) + [color_2] * (len(pc_vis_2.index))
v_c.attributes(colors)
Exemplo n.º 21
0
def show_3d_cloud(points_cloud):
    import pptk
    pptk.viewer(points_cloud).set()
Exemplo n.º 22
0
 def display_point_cloud(self, msg):
     self._logger.debug('@{}: {} received message'.format(
         msg.timestamp, self._name))
     pptk.viewer(msg.point_cloud.points)
Exemplo n.º 23
0
import numpy as np

#This data have only the trees
tree_data = pd.read_csv('tree_data.csv')

#here we make the selection in z axis
tree_data = tree_data[tree_data['z'] > 1]

#here we make selection in the y axis
tree_data = tree_data[tree_data['y'] <= 4182508]

#Here we make filter by color
tree_data = tree_data[(tree_data['g'] > 0.20) & (tree_data['b'] < 0.20)]

#tree_data.to_csv('tree_data.csv',index=False,encoding='utf8')
v = pptk.viewer(tree_data[['x', 'y', 'z']].values, tree_data[['r', 'g',
                                                              'b']].values)

build_data = pd.read_csv('build_data.csv')
build_data = build_data[(build_data['z'] > 8) & (build_data['z'] < 12)]
build_data = build_data[(build_data['y'] >= 4182499)
                        & (build_data['y'] <= 4182513)]
#build_data.to_csv('build_data.csv',index=False,encoding='utf8')

v = pptk.viewer(build_data[['x', 'y', 'z']].values, build_data[['r', 'g',
                                                                'b']].values)

#start from  x_min
y = build_data['x'].min()
graph = []

#Look now we use x axis
Exemplo n.º 24
0
three = np.delete(datanp, 3, 1)  #delete last column (axis 3 of 1[column])

datapdtwo = pd.read_csv('filetwo.csv')  #CSV to Pandas
datanptwo = datapdtwo.to_numpy()  #Pandas to NumPy
threetwo = np.delete(datanptwo, 6,
                     1)  #delete last column (axis 3 of 1[column])
threetwo = np.delete(threetwo, 5, 1)  #delete the normals
threetwo = np.delete(threetwo, 4, 1)
threetwo = np.delete(threetwo, 3, 1)
print(threetwo)

datapdthree = pd.read_csv('filethree.csv')  #CSV to Pandas
datanpthree = datapdthree.to_numpy()  #Pandas to NumPy
threethree = np.delete(datanpthree, 6,
                       1)  #delete last column (axis 3 of 1[column])
threethree = np.delete(threethree, 5, 1)  #delete the normals
threethree = np.delete(threethree, 4, 1)
threethree = np.delete(threethree, 3, 1)

xyz = np.concatenate((three, threetwo, threethree), axis=0)  #xyz

first_np = np.full((three.shape[0], 3), [255., 0, 0])
second_np = np.full((threetwo.shape[0], 3), [0, 255., 0])
third_np = np.full((threethree.shape[0], 3), [0, 0, 255.])
rgb = np.concatenate((first_np, second_np, third_np), axis=0)  #rgb

print(three.shape[0])
v = pptk.viewer(xyz)
v.set(point_size=10)
v.attributes(rgb / 255.)
Exemplo n.º 25
0
import numpy as np
import os
import pptk

# p1 = "/media/yuqiong/DATA/city/nyc/nyc_tri_pcds_sample_2048"
p2 = "/media/yuqiong/DATA/city/nyc/nyc_tri_pcds_4096_sample"

# allf1 = os.listdir(p1)
allf2 = os.listdir(p2)
# allf = list(set(allf1) & set(allf2))

for f in allf2:
    print(f)
    path = os.path.join(p2, f)
    dat = np.load(path)
    v = pptk.viewer(dat)
    input("Press Enter to continue...")
    v.close()

    # path = os.path.join(p2, f)
    # dat = np.load(path)
    # v = pptk.viewer(dat)
    # input("Press Enter to continue...")
    # v.close()
Exemplo n.º 26
0
    def extract_points_3D(self, pointcloud_msg, now, proc_results, calibrator_instance):
        # print("lidar_points:", len(lidar_points), ":", [x.shape for x in lidar_points])
        # Log PID
        rospy.loginfo('3D Picker PID: [%d]' % os.getpid())

        # Extract points data
        points = ros_numpy.point_cloud2.pointcloud2_to_array(pointcloud_msg)
        points = np.asarray(points.tolist())
        if (len(points.shape)>2):
            points = points.reshape(-1, points.shape[-1])
        points = points[~np.isnan(points).any(axis=1), :]
        points2pcd(points, os.path.join(PKG_PATH, CALIB_PATH, self.camera_name+"_pcd", str(self.lidar_points_num)+".pcd"))
        if (self.args.transform_pointcloud):
            points = (self.lidar_rotation_matrix.dot(points[:,:3].T) + self.lidar_translation_vector.reshape(-1,1)).T

        # Select points within chessboard range
        inrange = np.where((np.abs(points[:, 0]) < 7) &
                        (points[:, 1] < 25) &
                        (points[:, 1] > 0) &
                        # (points[:, 2] < 4) &
                        (points[:, 2] > 0.2))
        points = points[inrange[0]]
        if points.shape[0] < 10:
            rospy.logwarn('Too few PCL points available in range')
            return

        # pptk viewer
        viewer = pptk.viewer(points[:, :3])
        viewer.set(lookat=(0,0,4))
        viewer.set(phi=-1.57)
        viewer.set(theta=0.4)
        viewer.set(r=4)
        viewer.set(floor_level=0)
        viewer.set(point_size=0.02)
        rospy.loginfo("Press [Ctrl + LeftMouseClick] to select a chessboard point. Press [Enter] on viewer to finish selection.")
        pcl_pointcloud = pcl.PointCloud(points[:,:3].astype(np.float32))
        region_growing = pcl_pointcloud.make_RegionGrowing(searchRadius=self.chessboard_diagonal/5.0)
        indices = []
        viewer.wait()
        indices = viewer.get('selected')
        if (len(indices) < 1):
            rospy.logwarn("No point selected!")
            # self.terminate_all()
            viewer.close()
            return
        rg_indices = region_growing.get_SegmentFromPoint(indices[0])
        points_color = np.zeros(len(points))
        points_color[rg_indices] = 1
        viewer.attributes(points_color)
        # viewer.wait()
        chessboard_pointcloud = pcl_pointcloud.extract(rg_indices)
        plane_model = pcl.SampleConsensusModelPlane(chessboard_pointcloud)
        pcl_RANSAC = pcl.RandomSampleConsensus(plane_model)
        pcl_RANSAC.set_DistanceThreshold(0.01)
        pcl_RANSAC.computeModel()
        inliers = pcl_RANSAC.get_Inliers()
        # random select a fixed number of lidar points to reduce computation time
        inliers = np.random.choice(inliers, self.args.lidar_points)
        chessboard_points = np.asarray(chessboard_pointcloud)
        proc_results.put([np.asarray(chessboard_points[inliers, :3])])
        points_color = np.zeros(len(chessboard_points))
        points_color[inliers] = 1
        viewer.load(chessboard_points[:,:3])
        viewer.set(lookat=(0,0,4))
        viewer.set(phi=-1.57)
        viewer.set(theta=0.4)
        viewer.set(r=2)
        viewer.set(floor_level=0)
        viewer.set(point_size=0.02)
        viewer.attributes(points_color)
        viewer.attributes(points_color)
        rospy.loginfo("Check the chessboard segmentation in the viewer.")
        viewer.wait()
        viewer.close()
Exemplo n.º 27
0
        os.makedirs(FLAGS.npy_dir, exist_ok=True)

    dataset_file = FLAGS.dataset_dir + '/folds/fold{}.txt'.format(FLAGS.fold)

    with open(dataset_file) as f:
        save_dir = FLAGS.npy_dir + "/" + FLAGS.type
        data_dir = FLAGS.dataset_dir + "/objects/"
        file_list = f.readlines()
        for file in file_list:
            file_name = file.split('\n')[0]
            file_path = data_dir + file_name
            print('processsing {}'.format(file_name))

            cloud = helper.load_points_from_bin(file_path)
            print(cloud.shape)
            v=pptk.viewer(cloud)
            # 12 perform better than 18
            aug_steps = 12
            # VoxelNet data augmentation make VoxNet perform better accuracy = 0.6762148 > 0.6769073
            cloud_list = helper.aug_data(cloud, aug_steps)
            # cloud_list = helper.aug_data(cloud, aug_steps, uniform_rotate_only=True)

            # save pre-process pointcloud voxel
            idx = 0
            if not os.path.exists(save_dir):
                os.makedirs(save_dir, exist_ok=True)

            for points in cloud_list:
                voxels, inside_points = \
                    helper.voxelize(points, voxel_size=(24,24,24), padding_size=(32,32,32), resolution=0.1)
                print(voxels.shape)
Exemplo n.º 28
0
x = np.linspace(0,len(x),len(x))
xy = np.vstack((x,y)).T

rotor = Rotor()
rotor.fit_rotate(xy)
elbow_idx = rotor.get_elbow_index()
rotor.plot_elbow()
eps = distances[elbow_idx]/2
del x,y,xy

clustering = DBSCAN( algorithm = 'kd_tree',eps=eps, min_samples=5).fit(xyz_nn) #the number of samples is D+1=4
labels = clustering.labels_

colors = [int(i % 23) for i in labels] # 554 labels to 23 distinguished colors

v = pptk.viewer(data,colors)
v.set(point_size=0.01)

# matplotlib
core_samples_mask = np.zeros_like(clustering.labels_, dtype=bool)
core_samples_mask[clustering.core_sample_indices_] = True
labels = clustering.labels_

# Number of clusters in labels, ignoring noise if present.
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)

# Plot result

fig = plt.figure()
ax = Axes3D(fig)
Exemplo n.º 29
0
def displayPoints(data, pointSize):
    v = pptk.viewer(data)
    v.set(point_size=pointSize)
Exemplo n.º 30
0
        'z': np.float32,
        'i': np.int32,
        'r': np.uint8,
        'g': np.uint8,
        'b': np.uint8
    }
    return pd.read_csv(f,
                       names=col_names,
                       dtype=col_dtype,
                       delim_whitespace=True)


def read_labels(f):
    # reads Semantic3D .labels file f into a pandas dataframe
    return pd.read_csv(f, header=None)[0].values


if __name__ == '__main__':
    points = read_points('bildstein_station1_xyz_intensity_rgb.txt')
    labels = read_labels('bildstein_station1_xyz_intensity_rgb.labels')

    # v = pptk.viewer(points[['x', 'y', 'z']])
    # v.attributes(points[['r', 'g', 'b']] / 255., points['i'])
    # v.set(point_size=0.001)

    mask = labels != 0
    P = points[mask]
    L = labels[mask]
    v = pptk.viewer(P[['x', 'y', 'z']])
    v.attributes(P[['r', 'g', 'b']] / 255., P['i'], L)
    v.set(point_size=0.001)