def __getitem__(self, index):
        pcd = self.data.iloc[index, 0]
        pcd_origin = self.data.iloc[index, 1]
        point = pcl.PointCloud_PointXYZRGBA()
        point_origin = pcl.PointCloud_PointXYZRGBA()
        point.from_file(pcd)
        point_origin.from_file(pcd_origin)
        point_np = np.zeros((point.size, 4), np.float32)
        point_origin_np = np.zeros((point.size, 6), np.float32)
        for i in range(point.size):
            point_np[i][0] = point[i][0]
            point_np[i][1] = point[i][1]
            point_np[i][2] = point[i][2]
            point_np[i][3] = int(point[i][3]) >> 16 & 0x000ff

        if point_np.shape[0] < self.num_point:
            row_idx = np.random.choice(point_np.shape[0],
                                       self.num_point,
                                       replace=True)
        else:
            row_idx = np.random.choice(point_np.shape[0],
                                       self.num_point,
                                       replace=False)
        # row_idx = np.sort(row_idx)
        point_out = torch.from_numpy(point_np[row_idx, :3])  ## need to revise
        label = torch.from_numpy(point_np[row_idx, 3])  ## need to revise

        origin_list = []
        for i in range(point.size):
            point_origin_np[i][0] = point_origin[i][0]
            point_origin_np[i][1] = point_origin[i][1]
            point_origin_np[i][2] = point_origin[i][2]
            point_origin_np[i][3] = int(point_origin[i][3]) >> 0 & 0x000ff
            point_origin_np[i][4] = int(point_origin[i][3]) >> 8 & 0x000ff
            point_origin_np[i][5] = int(point_origin[i][3]) >> 16 & 0x000ff

        #print point_origin_np[0] , point_origin_np[1]

        point_origin_np = point_origin_np[row_idx]
        # print origin_list[0] , origin_list[1]

        point_out = np.transpose(point_out, (1, 0))

        target = torch.zeros((self.num_point, 2))
        #target = torch.zeros((self.num_point), dtype = torch.long)
        for i in range(self.num_point):
            #print label[i]
            #target[i] = int(label[i])
            target[i][int(label[i])] = 1
        out = {
            'x': point_out,
            'y': target,
            'origin': point_origin_np,
            'path': pcd_origin
        }
        return out
Beispiel #2
0
def prediction(dataiter, network):

    batch = dataiter.next()
    inputs = Variable(batch['x'].cuda())
    origin = Variable(batch['origin'])[0]
    labels = Variable(batch['y'].cuda())[0]
    output = network(inputs)[0][0]
    point = pcl.PointCloud_PointXYZRGBA()
    point_origin = pcl.PointCloud_PointXYZRGBA()
    print batch['path']
    _point_list = []
    _origin_list = []
    in_point = np.transpose(inputs[0].cpu().numpy(), (1, 0))
    print labels, output
    for i in range(len(in_point)):
        if labels[i].argmax() == 1:
            print output[i], labels[i]
        red = float(origin[i][3] >> 16)
        green = float(origin[i][4] >> 8)
        blue = float(origin[i][5] >> 0)
        rgb = float(
            int(origin[i][5] << 16) | int(origin[i][4] << 8)
            | int(origin[i][3]))
        #if output[i].argmax() == labels[i] and output[i].argmax() == 1:
        if output[i].argmax() == labels[i].argmax() and output[i].argmax(
        ) == 1:
            # if output[i][0] < -0.1:

            # # red = np.right_shift(red, 8).astype(np.uint8)
            # # green = np.right_shift(green, 8).astype(np.uint8)
            # # blue = np.right_shift(blue, 8).astype(np.uint8)

            # red = np.asarray(red).astype(np.uint32)
            # green = np.asarray(green).astype(np.uint32)
            # blue = np.asarray(blue).astype(np.uint32)
            # rgb = np.left_shift(red, 16) + np.left_shift(green, 8) + np.left_shift(blue, 0)
            _point_list.append(
                [inputs[0][0][i], inputs[0][1][i], inputs[0][2][i], rgb])
        else:
            _origin_list.append(
                [inputs[0][0][i], inputs[0][1][i], inputs[0][2][i], rgb])
        # rgb = np.left_shift(red, 16) + np.left_shift(green, 8) + np.left_shift(blue, 0)
        # ptcloud = np.vstack((in_point[i][0], in_point[i][1], in_point[i][2], rgb)).transpose()

    point.from_array(np.asarray(_point_list, dtype=np.float32))
    point.to_file("./out.pcd")
    point_origin.from_array(np.asarray(_origin_list, dtype=np.float32))
    point_origin.to_file("./origin.pcd")
Beispiel #3
0
def prediction(dataiter, network):

    batch = dataiter.next()
    inputs = Variable(batch['x'].cuda())
    origin = Variable(batch['origin'])[0]
    labels = Variable(batch['y'].cuda())[0]
    output = network(inputs)[0][0]
    point_origin = pcl.PointCloud_PointXYZRGBA()
    _origin_list = []
    in_point = np.transpose(inputs[0].cpu().numpy(), (1, 0))
    print batch['id']
    print output.argmax()
    print output
    if output.argmax() == labels:
        print("right")
    else:
        print("wrong")
    for i in range(len(in_point)):
        red = float(origin[i][3] >> 16)
        green = float(origin[i][4] >> 8)
        blue = float(origin[i][5] >> 0)
        rgb = float(
            int(origin[i][5] << 16) | int(origin[i][4] << 8)
            | int(origin[i][3]))
        _origin_list.append(
            [inputs[0][0][i], inputs[0][1][i], inputs[0][2][i], rgb])

    point_origin.from_array(np.asarray(_origin_list, dtype=np.float32))
    point_origin.to_file("./origin.pcd")
Beispiel #4
0
def frame_to_world(opt):
    """
    project pixel frame to world using matrix operation, for pointCloud generation
    input depth, color, intrinsic, camera pose
    :param opt: opt has depth color intrinsic camera pose path
    :return: pointCloud save in .ply
    """
    # input depth, color
    depth = misc.imread(os.path.join(opt.depth_path, opt.frame + '.png'))
    height, width = depth.shape
    color = misc.imread(os.path.join(opt.color_path, opt.frame + '.jpg'))
    color = misc.imresize(color, (height, width)).astype(np.int64)
    # input intrinsic
    intrinsic = []
    with open(opt.intrinsic, 'r') as f:
        line = f.readline()
        while line:
            intrinsic.append(line.split())
            line = f.readline()
    intrinsic = np.array(intrinsic, dtype=np.float32)
    intrinsic = adjust_intrinsic(intrinsic, [640, 480], [height, width])
    intrinsic_inv = np.linalg.inv(intrinsic)
    # camera pose = extrinsic_inv
    pose = []
    with open(os.path.join(opt.pose_path, opt.frame + '.txt'), 'r') as f:
        line = f.readline()
        while line:
            pose.append(line.split())
            line = f.readline()
    extrinsic_inv = np.array(pose, dtype=np.float32)
    # extrinsic = np.linalg.inv(extrinsic_inv)
    # reshape depth color to [height * width,]
    depth = depth.reshape(-1)
    color = color.reshape(-1, 3)
    # row -> i col -> j
    row = torch.arange(height).view(height, 1).repeat(
        1, width).view(-1).float().cpu().numpy()
    col = torch.arange(width).view(1, width).repeat(
        1, height).view(-1).float().cpu().numpy()
    # pixel frame [depth * j, depth * i, depth, 1]
    pixel_frame = np.zeros((4, depth.shape[0]))
    pixel_frame[0, :][depth != 0] = depth[depth != 0] * col[depth != 0]
    pixel_frame[1, :][depth != 0] = depth[depth != 0] * row[depth != 0]
    pixel_frame[2, :][depth != 0] = depth[depth != 0]
    pixel_frame[3, :][depth != 0] = 1
    # camera coordinate [Xc, Yc, Zc, 1]
    camera_coor = np.dot(intrinsic_inv, pixel_frame)
    # world coordinate [X, Y, Z, 1]
    world_coor = np.dot(extrinsic_inv, camera_coor)
    # RGBA for pointCloud
    RGBA = np.zeros((1, height * width)).astype(np.float64)
    RGBA[0, :][depth != 0] = (0xff & color[:, 0][depth != 0]) << 16 | (
        0xff & color[:, 1][depth != 0]) << 8 | (0xff & color[:, 0][depth != 0])
    # generate points [X, Y, Z, RGBA]
    points = np.concatenate((world_coor[0:3], RGBA), axis=0).T
    points = np.array(points, dtype=np.float32)
    # PCL Cloud
    cloud = pcl.PointCloud_PointXYZRGBA()
    cloud.from_array(points)
    pcl.save(cloud, opt.frame + '.ply', format='ply')
Beispiel #5
0
def main():
    # RGB : NG
    # f = file.File('28XXX10000075-18.las', mode='r')
    f = file.File('28W0608011101-1.las', mode='r')
    # f = file.File('28XXX00020001-1.las', mode='r')
    # f = file.File('simple1_4.las', mode='r')

    # check las file version
    # RGB contains
    if f._header.data_format_id in (2, 3, 5):
        red = (f.red)
        green = (f.green)
        blue = (f.blue)
        # 16bit to convert 8bit data(data Storage First 8 bits case)
        red = np.right_shift(red, 8).astype(np.uint8)
        green = np.right_shift(green, 8).astype(np.uint8)
        blue = np.right_shift(blue, 8).astype(np.uint8)
        # (data Storage After 8 bits case)
        # red = red.astype(np.uint8)
        # green = green.astype(np.uint8)
        # blue = blue.astype(np.uint8)
        red = red.astype(np.uint32)
        green = green.astype(np.uint32)
        blue = blue.astype(np.uint32)
        rgb = np.left_shift(red, 16) + np.left_shift(green, 8) + np.left_shift(
            blue, 0)
        ptcloud = np.vstack((f.x, f.y, f.z, rgb)).transpose()
        cloud = pcl.PointCloud_PointXYZRGBA()
        # set raw points
        # cloud.from_array(np.array(ptcloud, dtype=np.float32))
        # set point centered
        mean_param = np.concatenate([np.mean(ptcloud, 0)[0:3], np.zeros(1)])
        ptcloud_centred = ptcloud - mean_param
        # print(ptcloud_centred)
        cloud.from_array(np.array(ptcloud_centred, dtype=np.float32))

        # Visualization
        visual = pcl.pcl_visualization.CloudViewing()
        visual.ShowColorACloud(cloud, b'cloud')

    else:
        ptcloud = np.vstack((f.x, f.y, f.z)).transpose()
        mean_param = np.mean(ptcloud, 0)
        cloud = pcl.PointCloud()
        # set raw points
        # cloud.from_array(np.array(ptcloud, dtype=np.float32))
        # set point centered
        # mean_param = np.concatenate([np.mean(ptcloud, 0)[0:3], np.zeros(1)])
        ptcloud_centred = ptcloud - mean_param
        # print(ptcloud_centred)
        cloud.from_array(np.array(ptcloud_centred, dtype=np.float32))

        # Visualization
        visual = pcl.pcl_visualization.CloudViewing()
        visual.ShowMonochromeCloud(cloud, b'cloud')

    v = True
    while v:
        v = not (visual.WasStopped())
    def setUp(self):
        rng = np.random.RandomState(42)
        # Define two dense sets of points of sizes 30 and 170, resp.
        a = rng.randn(100, 4).astype(np.float32)
        a[:30] -= 42

        self.pc = pcl.PointCloud_PointXYZRGBA(a)
        self.kd = pcl.KdTreeFLANN_PointXYZRGBA(self.pc)
Beispiel #7
0
def view_cloud(pointcloud):
    cloud = pcl.PointCloud_PointXYZRGBA()
    cloud.from_array(pointcloud)

    try:
        visual = pcl.pcl_visualization.CloudViewing()
        visual.ShowColorACloud(cloud)
        v = True
        while v:
            v = not (visual.WasStopped())
    except:
        pass
Beispiel #8
0
    def setUp(self):
        self.p = pcl.load("tutorials/table_scene_lms400.pcd")
        self.fil = self.p.make_ApproximateVoxelGrid()

        self.p2 = pcl.PointCloud_PointXYZI()
        self.fil2 = self.p2.make_ApproximateVoxelGrid()

        self.p3 = pcl.PointCloud_PointXYZRGB()
        self.fil3 = self.p3.make_ApproximateVoxelGrid()

        self.p4 = pcl.PointCloud_PointXYZRGBA()
        self.fil4 = self.p4.make_ApproximateVoxelGrid()
Beispiel #9
0
    def createPointCloud(self, images, poses, cameraMatrix):
        ## Fill cloud structure
        # pcl_points = pcl.PointCloud()

        print("\nCreating resulting point cloud...")
        print("This may take a few minutes...")

        ## Per camera
        pcl_p = pcl.PointCloud_PointXYZRGBA()
        all_points = np.zeros(shape=(1, 4), dtype=np.float32)

        bad_depths = []
        for c in range(len(poses)):
            image = images[c]
            R = poses[c].R
            t = poses[c].t

            if R.size == 0:
                continue

            for i in range(image.dep.shape[0]):
                rgbs = image.rgb[i]
                deps = image.dep[i]

                for j in range(image.dep.shape[1]):
                    rgb = rgbs[j]
                    dep = deps[j]

                    if dep < self.low_thresh or dep > self.high_thresh:
                        bad_depths.append(dep)
                        continue

                    point = util.backproject3D(j, i, dep, cameraMatrix)
                    gPoint = np.transpose(R) @ point - t
                    point = gPoint

                    rgb_temp = rgb[2] << 16 | rgb[1] << 8 | rgb[0]
                    point_ext = np.array(
                        [[point[0][0], point[1][0], point[2][0], rgb_temp]],
                        dtype=np.float32)
                    single_point = point_ext

                    all_points = np.concatenate((all_points, single_point),
                                                axis=0)

        pcl_p.from_array(all_points)
        # pcl_p_reduced = self.reduceCloud(pcl_p)

        print("Generated {} points.".format(len(pcl_p.to_list())))
        return pcl_p
Beispiel #10
0
def DisplayNumpyColorCloudPCL(cloud):
    visual = pcl.pcl_visualization.CloudViewing()
    if type(cloud).__module__ == np.__name__:
        #         ptcloud_centred = pcl.PointCloud()
        #         ptcloud_centred = pcl.PointCloud_PointXYZI()
        ptcloud_centred = pcl.PointCloud_PointXYZRGBA()
        ptcloud_centred.from_array(np.array(cloud, dtype=np.float32))
        #         visual.ShowGrayCloud(ptcloud_centred, b'cloud')
        visual.ShowColorACloud(ptcloud_centred, b'cloud')
    #         visual.ShowMonochromeCloud(ptcloud_centred, b'cloud')
    else:
        visual.ShowMonochromeCloud(cloud, b'cloud')
    v = True
    while v:
        v = not (visual.WasStopped())
Beispiel #11
0
def viewPointClouds(folderDirectory, full):
    """
    Function which allows for the viewing of pointClouds 
    
    Parameters
    ----------
    folderDirectory : str
        Directory containing .pickle files of saved frames from capture

    full : int
        0 if you wish to view every 10 frames, 1 if you wish to view every frame
    

    """
    p = multiprocessing.Pool(
        6
    )  #create a multiprocessing pool to efficiently create pointclouds in parallel
    visual = pcl.pcl_visualization.CloudViewing(
    )  #handle for PCL point cloud viewer
    signal.signal(signal.SIGINT, signalHandler)
    i = 0
    while True:
        if not full:
            i += 10

        print(i)
        fname = folderDirectory + '\\' + str(format(i, '05d')) + '.pickle'
        file = open(fname, 'rb')
        frame = pickle.load(file)
        file.close()
        #i+=1
        cloud = pcl.PointCloud_PointXYZRGBA()
        listP = [data for serial, data in frame.items()]
        try:
            cameraPoints = p.map(
                depthFrametoPC, listP
            )  #process point cloud for each camera in a separate thread
            allPoints = np.empty((0, 4))
            for camera in cameraPoints:
                allPoints = np.append(allPoints, camera, axis=0)
            cloud.from_array(allPoints.astype('float32'))
            visual.ShowColorACloud(cloud)
            i += 1
        except KeyboardInterrupt:
            p.terminate()
 def setUp(self):
     self.a = np.array(np.mat(SEGDATA, dtype=np.float32))
     self.p = pcl.PointCloud_PointXYZRGBA()
     self.p.from_array(self.a)
 def test_asarray(self):
     p = pcl.PointCloud_PointXYZRGBA(self.p)  # copy
     # old0 = p[0]
     a = np.asarray(p)  # view
     a[:] += 6
     assert_array_almost_equal(p[0], a[0])
Beispiel #14
0
    # 求解相机坐标到世界坐标的转换矩阵
    pose = poses[i]
    R, t = get_transform(pose)

    # 求解相机坐标系坐标
    row_mat = np.array([list(range(size[0]))] * size[1]).transpose()
    column_mat = np.array([list(range(size[1]))] * size[0])
    z_mat = depth_img / depthScale
    x_mat = np.multiply((column_mat - cx) / fx, z_mat)
    y_mat = np.multiply((row_mat - cy) / fy, z_mat)

    # 去除零点
    Pc_mat = np.stack((x_mat, y_mat, z_mat), axis=-1).reshape((-1, 3))
    rgb_mat = np.expand_dims(
        (color_img[:, :, 2] * 16**4 + color_img[:, :, 1] * 16**2 +
         color_img[:, :, 0]).flatten(),
        axis=-1)
    zero_columns = np.where(Pc_mat[:, 2] == 0)[0]
    Pc_mat = np.delete(Pc_mat, zero_columns, 0)
    rgb_mat = np.delete(rgb_mat, zero_columns, 0)

    # 求解世界坐标系坐标
    Pw_mat = np.dot(R, Pc_mat.transpose()) + np.expand_dims(t, axis=-1)

    points.append(np.concatenate((Pw_mat.transpose(), rgb_mat), axis=-1))

points = np.concatenate(points)
p = pcl.PointCloud_PointXYZRGBA([[point[0], point[1], point[2],
                                  int(point[3])] for point in points])
pcl.save_XYZRGBA(p, 'map.pcd')
 def setUp(self):
     self.p = pcl.PointCloud_PointXYZRGBA(
         np.arange(12, dtype=np.float32).reshape(3, 4))
 def setUp(self):
     self.p = pcl.PointCloud_PointXYZRGBA(_data)
def test_copy():
    a = np.random.randn(100, 4).astype(np.float32)
    p1 = pcl.PointCloud_PointXYZRGBA(a)
    p2 = pcl.PointCloud_PointXYZRGBA(p1)
    assert_array_equal(p2.to_array(), a)
def main():
    # | fx  0   cx |
    # | 0   fy  cy |
    # | 0   0   1  |
    # vals = np.array(
    #     [525., 0.  , 319.5,
    #      0.  , 525., 239.5,
    #      0.  , 0.  , 1.])
    # cameraMatrix = vals.reshape((3, 3))
    # grabber_sequences/pclzf/*.xml
    cameraMatrix = np.array([[525., 0., 320.0], [0., 525., 240.0],
                             [0., 0., 1.]])

    # color0 = cv2.imread('rgb/0.png')
    color0 = cv2.imread(
        'grabber_sequences/tiff/frame_20121214T142255.814212_rgb.tiff')
    # 16 bit Image
    # https://github.com/eiichiromomma/CVMLAB/wiki/OpenCV-16bitImage
    # depth0 = cv2.imread('depth/0.png', -1)
    # depth0 = cv2.imread('depth/0.png', cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)
    depth0 = cv2.imread(
        'grabber_sequences/tiff/frame_20121214T142255.814212_depth.tiff',
        cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)
    print("Color: ", color0.dtype)
    print("Depth: ", depth0.dtype)

    # colorImage1 = cv2.imread('rgb/1.png')
    # depth1 = cv2.imread('depth/1.png', -1)

    # if (color0.empty() || depth0.empty() || colorImage1.empty() || depth1.empty()):
    #     cout << "Data (rgb or depth images) is empty.";
    #     return -1;

    # gray0 = cv2.cvtColor(color0, cv2.COLOR_BGR2GRAY)
    # grayImage1 = cv2.cvtColor(colorImage1, cv2.COLOR_BGR2GRAY)
    # depthFlt0 = depth0.convertTo(cv2.CV_32FC1, 1. / 1000.0)
    # depthFlt1 = depth1.convertTo(cv2.CV_32FC1, 1. / 1000.0)
    depthFlt0 = np.float32(depth0) / 1000.0
    # depthFlt0 = depth0 / 1000.0
    # depthFlt1 = np.float32(depth1) / 1000.0

    import pcl
    # points0 = cvtDepth2Cloud(depthFlt0, cameraMatrix)
    # cloud0 = pcl.PointCloud()
    points0 = cvtDepthColor2Cloud(depthFlt0, color0, cameraMatrix)
    cloud0 = pcl.PointCloud_PointXYZRGBA()
    cloud0.from_array(points0)
    print(cloud0)

    # points1 = cvtDepth2Cloud(depthFlt1, cameraMatrix)
    # cloud1 = pcl.PointCloud()
    # cloud1.from_array(points1)

    # wait
    try:
        import pcl.pcl_visualization
        visual = pcl.pcl_visualization.CloudViewing()
        # xyz only
        # visual.ShowMonochromeCloud(cloud0)
        # visual.ShowMonochromeCloud(cloud1)
        # color(rgba)
        visual.ShowColorACloud(cloud0)
        v = True
        while v:
            v = not (visual.WasStopped())
    except:
        pass
Beispiel #19
0
def main(cfg):

    instrinsic = []
    with open(os.path.join(cfg.intrinsic_seg), 'r') as f:
        while True:
            line = f.readline()
            if not line:
                break
            eles = line.split()
            for ele in eles:
                instrinsic.append(ele)
    instrinsic = np.array(instrinsic, dtype=np.float32).reshape([4, 4])
    instrinsic_inv = np.linalg.inv(instrinsic)

    depth_files = os.listdir(cfg.inputdepthdir)
    depth_files.sort()
    seg_files = os.listdir(cfg.inputsegdir)
    seg_files.sort()

    for depth_file, seg_file in zip(depth_files, seg_files):

        trajectory = []
        with open(os.path.join(cfg.trajectorydir, depth_file[:-4]+'.txt'), 'r') as ft:
            while True:
                line = ft.readline()
                if not line:
                    break
                eles = line.split()
                for ele in eles:
                    trajectory.append(ele)
        trajectory = np.array(trajectory, dtype=np.float32).reshape([4, 4])

        depth = cv2.imread(os.path.join(cfg.inputdepthdir, depth_file), -1)
        seg = cv2.imread(os.path.join(cfg.inputsegdir, seg_file))
        cloud = pcl.PointCloud_PointXYZRGBA()
        rows = seg.shape[0]
        cols = seg.shape[1]
        depth = cv2.resize(depth, (cols, rows))

        points = np.zeros([rows*cols, 4])
        camera = np.zeros([4, 1])

        for i in range(rows):
            for j in range(cols):
                df = depth[i, j]
                if df == 0:
                    pass
                else:
                    camera[0, 0] = df*j
                    camera[1, 0] = df*i
                    camera[2, 0] = df
                    camera[3, 0] = 1

                    camera_cor = np.dot(instrinsic_inv, camera)
                    world = np.dot(trajectory, camera_cor)

                    x = np.int((world[0, 0]))
                    y = np.int((world[1, 0]))
                    z = np.int((world[2, 0]))
                    r = (0xff & seg[i, j, 2]) << 16
                    g = (0xff & seg[i, j, 1]) << 8
                    b = (0xff & seg[i, j, 0])
                    rgb = r | g | b
                    # print(rgb)
                    points[i*rows+j, :] = [x, y, z, rgb]

        points = np.array(points, dtype=np.float32)
        cloud.from_array(points)
        pcl.save(cloud, os.path.join(cfg.saveplydir, depth_file[:-4]+'.ply'), format='ply')