Exemplo n.º 1
0
    def __getitem__(self, idx):
        file = self.files[idx]
        name = None
        if self.is_chunks:
            name = os.path.splitext(os.path.basename(file))[0]
            
            inputs, targets, dims, world2grid, target_known, target_hierarchy = data_util.load_train_file(file)
        else:
            input_file = file[0]
            target_file = file[1]
            name = os.path.splitext(os.path.basename(input_file))[0]
            
            inputs, dims, world2grid = data_util.load_scene(input_file)
            targets, dims, world2grid = data_util.load_scene(target_file)
            target_known = data_util.load_scene_known(os.path.splitext(target_file)[0] + '.knw')
            targets = data_util.sparse_to_dense_np(targets[0], targets[1], dims[2], dims[1], dims[0], -float('inf'))
            target_hierarchy = None
        
        orig_dims = torch.LongTensor(targets.shape)
        if not self.is_chunks: 
            # add padding
            hierarchy_factor = pow(2, self.num_hierarchy_levels-1)
            max_input_dim = np.array(sdf.shape)
            if self.max_input_height > 0 and max_input_dim[self.UP_AXIS] > self.max_input_height:
                max_input_dim[self.UP_AXIS] = self.max_input_height
                mask_input = input[0][:,self.UP_AXIS] < self.max_input_height
                input[0] = input[0][mask_input]
                input[1] = input[1][mask_input]
            max_input_dim = ((max_input_dim + (hierarchy_factor*4) - 1) // (hierarchy_factor*4)) * (hierarchy_factor*4)
            # pad target to max_input_dim
            padded = np.zeros((max_input_dim[0], max_input_dim[1], max_input_dim[2]), dtype=np.float32)
            padded.fill(-float('inf'))
            padded[:min(self.max_input_height, sdf.shape[0]), :sdf.shape[1], :sdf.shape[2]] = sdf[:self.max_input_height, :, :]
            sdf = padded
            if target_known is not None:
                known_pad = np.ones((max_input_dim[0], max_input_dim[1], max_input_dim[2]), dtype=np.uint8) * 255
                known_pad[:min(self.max_input_height,target_known.shape[0]), :target_known.shape[1], :target_known.shape[2]] = target_known[:self.max_input_height, :, :]
                target_known = known_pad
        else:
            if self.num_hierarchy_levels < 4:
                target_hierarchy = target_hierarchy[4-self.num_hierarchy_levels:]

        mask = np.abs(inputs[1]) < self.truncation
        input_locs = inputs[0][mask]
        input_vals = inputs[1][mask]
        inputs = [torch.from_numpy(input_locs).long(), torch.from_numpy(input_vals[:,np.newaxis]).float()]

        targets = targets[np.newaxis,:]
        targets = torch.from_numpy(targets)
        if target_hierarchy is not None:
            for h in range(len(target_hierarchy)):
                target_hierarchy[h] = torch.from_numpy(target_hierarchy[h][np.newaxis,:])
        world2grid = torch.from_numpy(world2grid)
        target_known = target_known[np.newaxis,:]
        target_known = torch.from_numpy(target_known)
        sample = {'name': name, 'input': inputs, 'sdf': targets, 'world2grid': world2grid, 'known': target_known, 'hierarchy': target_hierarchy, 'orig_dims': orig_dims}
        return sample
Exemplo n.º 2
0
    def tsdf_callback(self, data):

        # self.test()
        # print("hhahahahha")

        time1 = time.time()
        gen = point_cloud2.read_points(data.points, field_names=("x", "y", "z", "intensity"), skip_nans=True)
        center = data.center

        points = np.array(list(gen)).reshape([-1, 4])
        print(points.shape, points[:, -1].max())

        offset_x = center[0] - args.voxel_size * args.dimx / 2
        offset_y = center[1] - args.voxel_size * args.dimx / 2
        offset_z = - 2.7 # points[:, 2].min() -0.1

        points[:, 0] -= offset_x
        points[:, 1] -= offset_y
        points[:, 2] -= offset_z
        # print("minz: ", offset_z)

        points = points / args.voxel_size
        locs = np.floor(points[:, :3]).astype(np.int32)
        feats = points[:, 3]

        # print("feats = ",feats)
        mask = (locs[:, 0] < args.dimx)  & (locs[:, 1] < args.dimx) & (locs[:, 2] < args.dimz) & (locs[:, 0] >= 0) & (locs[:, 1] >= 0) & (locs[:, 2] >= 0)
        locs = locs[mask]
        feats = feats[mask]
        input = data_util.sparse_to_dense_np(locs, feats, args.dimx, args.dimx, args.dimz, -float('inf'))
        input = input.astype(np.float32)
        # print("shape after s to d", input.shape)

        input[input > args.truncation] = args.truncation
        input[input < -args.truncation] = -args.truncation

        input.reshape([1, args.dimx, args.dimx, args.dimz])

        # input = data_util.tsdf_to_bool(input, trunc=2.0)

        # for grids whose tsdf value > -2, we consider it as already known
        known_mask = (input > -2).squeeze() # (80,80,48)
        original_occ = (np.abs(input) < args.occ_thresord).squeeze()
        original_free = (input > args.occ_thresord).squeeze()
        original_unkown = (input < - 2).squeeze()
        # print("INPUT: %f  occ, %f  free, %f  unkwn"%(original_occ.float().sum()/SUM, original_free.float().sum()/SUM, original_unkown.float().sum()/SUM))

        time2 = time.time()
        print("prepocess time: %fs"%(time2 - time1))
    
        output_occ = self.trt_runner.inference(input)

        time3 = time.time()
        print("model time: %fs"%(time3 - time2))

        output_occ = np.abs(output_occ) < args.occ_thresord
            # fix conflicts with input in known grids
        # embed()
        output_occ[known_mask] = original_occ[known_mask]
        # added_occ = output_occ.float() > original_occ.float()
        added_occ = output_occ > original_occ

        if output_occ is not None:

            # added_occ = added_occ.squeeze()
            # todo: compare of np and torch+cuda
            occ_coords_1 = POINTER[added_occ].reshape([-1, 3])
            num_points = occ_coords_1.shape[0]
            # added_color_1 = torch.ones(num_points).unsqueeze(1) * 10
            added_color_1 = np.ones([num_points, 1], dtype=np.float32) * 10

            # occ_coords_1 = torch.cat((occ_coords_1, added_color_1), 1)
            occ_coords_1 = np.concatenate((occ_coords_1, added_color_1), 1)

            # occ_coords = torch.cat((occ_coords, occ_coords_1), 0) 
            occ_coords = occ_coords_1
            print("added size: ",occ_coords.shape, added_occ.shape)    

            if occ_coords.shape[0] > 0:
                occ_coords *= args.voxel_size
                occ_coords[:, 0] += offset_x
                occ_coords[:, 1] += offset_y
                occ_coords[:, 2] += offset_z
                # occ_coords = occ_coords.cpu().numpy()

                msg = data.points
                msg.height = 1
                msg.width = occ_coords.shape[0]
                msg.fields = [
                    PointField('x', 0, PointField.FLOAT32, 1),
                    PointField('y', 4, PointField.FLOAT32, 1),
                    PointField('z', 8, PointField.FLOAT32, 1),
                    PointField('intensity', 12, PointField.FLOAT32, 1)]
                msg.is_bigendian = False
                msg.point_step = 16 #12
                msg.row_step = 16 * occ_coords.shape[0]
                msg.is_dense = int(np.isfinite(occ_coords).all())
                msg.data = occ_coords.tostring() #np.asarray(occ_coords, np.float32).tostring()
                print("msg: ", occ_coords.shape, len(msg.data), msg.row_step)
                # print("postpocess time: %fs"%(time.time() - time3))
                self.added_occ_pub.publish(msg)
            
            #publish added occ
            # original_unkown = original_input <= -2

            # original_unkown = original_unkown.squeeze()
            # original_occ = original_occ.squeeze()
            occ_coords = POINTER[original_occ].reshape([-1, 3])
            print("known occ size: ",occ_coords.shape)

            if occ_coords.shape[0] > 0:
                occ_coords *= args.voxel_size
                occ_coords[:, 0] += offset_x
                occ_coords[:, 1] += offset_y
                occ_coords[:, 2] += offset_z
                # occ_coords = occ_coords.cpu().numpy()

                msg2 = data.points
                msg2.height = 1
                msg2.width = occ_coords.shape[0]
                msg2.fields = [
                    PointField('x', 0, PointField.FLOAT32, 1),
                    PointField('y', 4, PointField.FLOAT32, 1),
                    PointField('z', 8, PointField.FLOAT32, 1),]
                msg2.is_bigendian = False
                msg2.point_step = 12 #12
                msg2.row_step = 12 * occ_coords.shape[0]
                msg2.is_dense = int(np.isfinite(occ_coords).all())
                msg2.data = occ_coords.tostring() #np.asarray(occ_coords, np.float32).tostring()
                print("msg2: ", occ_coords.shape, len(msg2.data), msg2.row_step)

                self.occ_pub.publish(msg2)        

        time4 = time.time()
        print("post pocess time: %fs"%(time4 - time3))    

        return
Exemplo n.º 3
0
    def __getitem__(self, idx):
        inputsdf_file = self.files[idx][0]
        sdf_file = self.files[idx][1]
        name = os.path.splitext(os.path.basename(inputsdf_file))[0]

        color_file = None if self.is_chunks else os.path.splitext(
            sdf_file)[0] + '.colors'
        sdf, world2grid, known, colors = data_util.load_sdf(
            sdf_file,
            load_sparse=self.load_tgt_sparse,
            load_known=self.load_known and self.is_chunks,
            load_colors=True,
            color_file=color_file)
        if sdf is None:
            return {'name': None}
        if self.load_known and not self.is_chunks:
            file_info = os.path.split(sdf_file)
            prefix = file_info[0] + '-complete' if 'color' in file_info[
                0] else file_info[0] + '_scanned'
            pad_known = (3, 6, 6) if 'color' in file_info[0] else (3, 3, 3)
            known_file = os.path.join(
                prefix,
                os.path.splitext(file_info[1])[0] + '.knw')
            known_file = known_file.replace('_trunc32-complete', '-complete')
            known = data_util.load_known(known_file,
                                         pad_known=pad_known,
                                         scale_to_dims=sdf.shape)
        input_color_file = None if self.is_chunks else os.path.splitext(
            inputsdf_file)[0] + '.colors'
        input, _, _, _, input_colors = data_util.load_sdf(
            inputsdf_file,
            load_sparse=True,
            load_known=False,
            load_colors=True,
            color_file=input_color_file)
        if input is None:
            return {'name': None}
        if self.color_truncation > 0:
            locs = input[0][np.abs(input[1]) >
                            self.color_truncation]  # to mask out
            input_colors[locs[:, 0], locs[:, 1], locs[:, 2], :] = 0
        max_input_dim = np.max(input[0], 0)
        if max_input_dim[0] >= sdf.shape[0] or max_input_dim[1] >= sdf.shape[
                1] or max_input_dim[2] >= sdf.shape[2]:
            mask = np.logical_and(
                input[0][:, 0] < sdf.shape[0],
                np.logical_and(input[0][:, 1] < sdf.shape[1],
                               input[0][:, 2] < sdf.shape[2]))
            input[0] = input[0][mask]
            input[1] = input[1][mask]

        image_depth = None
        image_color = None
        image_pose = None
        image_intrinsic = None
        if self.frame_info_path and self.frame_path:
            image_depth, image_color, image_pose, image_intrinsic, image_frameids = data_util.load_frames(
                [name], [world2grid],
                self.frame_info_path,
                self.frame_path,
                randomize_frames=self.randomize_frames,
                depth_image_dims=self.image_dims,
                color_image_dims=self.image_dims,
                color_normalization=None,
                load_depth=self.load_depth,
                load_color=self.load_color)

            if image_color is not None:
                if self.load_depth:
                    image_depth = image_depth.squeeze(1)
                if image_color is not None:
                    if len(image_color.shape) == 3:
                        image_color = image_color.squeeze(1)
                        image_pose = image_pose.squeeze(1)
                        image_intrinsic = image_intrinsic.squeeze(1)
                    else:
                        image_color = image_color.squeeze(0)
                        image_pose = image_pose.squeeze(0)
                        image_intrinsic = image_intrinsic.squeeze(0)
                if self.subsamp2d_factor > 1:
                    sz = image_color.shape[2:]
                    image_color = torch.nn.functional.interpolate(
                        image_color,
                        scale_factor=1.0 / self.subsamp2d_factor,
                        mode='bilinear',
                        align_corners=False)
                    image_color = torch.nn.functional.interpolate(
                        image_color,
                        size=sz,
                        mode='bilinear',
                        align_corners=False)

        input_dense = data_util.sparse_to_dense_np(input[0],
                                                   input[1][:, np.newaxis],
                                                   sdf.shape[2], sdf.shape[1],
                                                   sdf.shape[0], -float('inf'))
        if (self.is_chunks and
            (self.input_dim[0] != 96 and self.input_dim[0] != 128
             and self.input_dim[0] != 160)) or self.scene_subsample_factor > 1:
            scale_factor = float(
                self.input_dim[0]) / 128 if self.is_chunks else 1.0 / float(
                    self.scene_subsample_factor)
            input_dense = torch.nn.functional.interpolate(
                torch.from_numpy(input_dense).unsqueeze(0).unsqueeze(0),
                scale_factor=scale_factor) * scale_factor
            input_dense = input_dense[0, 0].numpy()
            input_colors = torch.nn.functional.interpolate(
                torch.from_numpy(input_colors).permute(
                    3, 0, 1, 2).contiguous().unsqueeze(0).float(),
                scale_factor=scale_factor)
            input_colors = input_colors[0].permute(
                1, 2, 3, 0).contiguous().numpy().astype(np.uint8)
            sdf = torch.nn.functional.interpolate(
                torch.from_numpy(sdf).unsqueeze(0).unsqueeze(0),
                scale_factor=scale_factor) * scale_factor
            sdf = sdf[0, 0].numpy()
            colors = torch.nn.functional.interpolate(
                torch.from_numpy(colors).permute(
                    3, 0, 1, 2).contiguous().unsqueeze(0).float(),
                scale_factor=scale_factor)
            colors = colors[0].permute(1, 2, 3,
                                       0).contiguous().numpy().astype(np.uint8)
            if known is not None:
                known = torch.nn.functional.interpolate(
                    torch.from_numpy(known).float().unsqueeze(0).unsqueeze(0),
                    scale_factor=scale_factor).byte()
                known = known[0, 0].numpy()
            world2grid = np.matmul(
                data_util.make_scale_transform(scale_factor),
                world2grid).astype(np.float32)

        if self.augment_rgb_scaling:
            scale = np.random.rand(1) * (
                self.aug_scale_range[1] -
                self.aug_scale_range[0]) + self.aug_scale_range[0]

            input_colors = data_util.convert_rgbgrid_to_hsvgrid(
                input_colors.astype(np.float32) / 255.0)
            colors = data_util.convert_rgbgrid_to_hsvgrid(
                colors.astype(np.float32) / 255.0)
            # hue augmentation
            scaled = input_colors[:, :, :, 0] * scale[0]
            cmask = scaled >= 360
            if np.sum(cmask) > 0:
                scaled[cmask] = scaled[cmask] % 360
            input_colors[:, :, :, 0] = scaled
            input_colors = np.clip(
                data_util.convert_hsvgrid_to_rgbgrid(input_colors) * 255, 0,
                255).astype(np.uint8)
            scaled = colors[:, :, :, 0] * scale[0]
            cmask = scaled >= 360
            if np.sum(cmask) > 0:
                scaled[cmask] = scaled[cmask] % 360
            colors[:, :, :, 0] = scaled
            colors = np.clip(
                data_util.convert_hsvgrid_to_rgbgrid(colors) * 255, 0,
                255).astype(np.uint8)
            if image_color is not None:
                image_color = data_util.convert_rgbgrid_to_hsvgrid(
                    image_color.permute(0, 2, 3, 1).numpy())
                scaled = image_color[:, :, :, 0] * scale[0]
                cmask = scaled >= 360
                if np.sum(cmask) > 0:
                    scaled[cmask] = scaled[cmask] % 360
                image_color[:, :, :, 0] = scaled
                image_color = data_util.convert_hsvgrid_to_rgbgrid(image_color)
                image_color = torch.from_numpy(image_color).permute(
                    0, 3, 1, 2).contiguous()

        if self.color_space == 'lab':
            colors = skcolor.rgb2lab(colors.astype(np.float32) / 255).astype(
                np.float32)
            input_colors = skcolor.rgb2lab(
                input_colors.astype(np.float32) / 255).astype(np.float32)
            # normalize tgt to 255 (expected in loss)
            colors[:, :, :, 0] = (colors[:, :, :, 0] / 100.0) * 255.0
            colors[:, :, :, 1:] = (colors[:, :, :, 1:] + 100.0) / 200.0 * 255.0
            # normalize input to 0/255
            input_colors[:, :, :, 0] = input_colors[:, :, :, 0] / 100.0
            input_colors[:, :, :,
                         1:] = (input_colors[:, :, :, 1:] + 100.0) / 200.0
            input_colors *= 255.0
            # images
            if image_color is not None:
                image_color = image_color.permute(0, 2, 3,
                                                  1).contiguous().view(
                                                      1, -1, 3).numpy()
                image_color = skcolor.rgb2lab(image_color).astype(np.float32)
                image_color = np.transpose(image_color, [0, 2, 1]).reshape(
                    1, 3, self.image_dims[1], self.image_dims[0])
                # normalize to 0/1
                image_color[:, 0] = image_color[:, 0] / 100.0
                image_color[:, 1:] = (image_color[:, 1:] + 100.0) / 200.0
                image_color = torch.from_numpy(image_color.astype(np.float32))

        empty = np.abs(input_dense) > self.truncation
        mask = np.zeros(input_dense.shape, dtype=np.float32)
        mask[input_dense <= -1] = 1
        mask[empty] = 0
        input_dense = data_util.preprocess_sdf_pt(input_dense, self.truncation)
        input_colors = input_colors.astype(np.float32) / 255.0
        input_colors[empty] = 0
        colors = torch.from_numpy(colors)
        input_dense = torch.from_numpy(input_dense).unsqueeze(0)
        input_colors = torch.from_numpy(input_colors).permute(3, 0, 1,
                                                              2).contiguous()
        input = torch.cat([input_dense, input_colors], 0)
        mask = torch.from_numpy(mask).unsqueeze(0)

        sdf = sdf[np.newaxis, :]
        sdf = torch.from_numpy(sdf)
        world2grid = torch.from_numpy(world2grid)
        if self.load_known:
            known = known[np.newaxis, :]
            known = torch.from_numpy(known)

        sample = {
            'name': name,
            'input': input,
            'sdf': sdf,
            'world2grid': world2grid,
            'known': known,
            'colors': colors,
            'image_depth': image_depth,
            'image_color': image_color,
            'image_pose': image_pose,
            'image_intrinsic': image_intrinsic,
            'mask': mask
        }
        return sample
Exemplo n.º 4
0
def tsdf_callback(data):
    # gen = point_cloud2.read_points(data, skip_nans=True)
    # print(type(gen))
    # exit()
    time1 = time.time()
    gen = point_cloud2.read_points(data.points,
                                   field_names=("x", "y", "z", "intensity"),
                                   skip_nans=True)
    center = data.center

    points = np.array(list(gen)).reshape([-1, 4])
    print(points.shape, points[:, -1].max())

    offset_x = center[0] - args.voxel_size * args.dimx / 2
    offset_y = center[1] - args.voxel_size * args.dimx / 2
    offset_z = -2.7  # points[:, 2].min() -0.1

    points[:, 0] -= offset_x
    points[:, 1] -= offset_y
    points[:, 2] -= offset_z
    # print("minz: ", offset_z)

    points = points / args.voxel_size
    locs = np.floor(points[:, :3]).astype(np.int32)
    feats = points[:, 3]

    # print("feats = ",feats)
    mask = (locs[:, 0] < args.dimx) & (locs[:, 1] < args.dimx) & (
        locs[:, 2] < args.dimz) & (locs[:, 0] >= 0) & (locs[:, 1] >=
                                                       0) & (locs[:, 2] >= 0)
    locs = locs[mask]
    feats = feats[mask]
    input = data_util.sparse_to_dense_np(locs, feats, args.dimx, args.dimx,
                                         args.dimz, -float('inf'))
    input = torch.from_numpy(input)  #.unsqueeze(0)
    # print("shape after s to d", input.shape)

    original_input = input
    print("original_input: ", original_input.shape)
    # for grids whose tsdf value > -2, we consider it as already known
    known_mask = (original_input > -2)

    original_occ = (original_input.abs() < args.occ_thresord)
    original_free = original_input > args.occ_thresord
    original_unkown = original_input < -2
    # print("INPUT: %f  occ, %f  free, %f  unkwn"%(original_occ.float().sum()/SUM, original_free.float().sum()/SUM, original_unkown.float().sum()/SUM))

    if args.flipped:
        input[input.abs() > args.truncation] = 0
        input = torch.sign(input) * (args.truncation - input.abs())
    else:
        input[input > args.truncation] = args.truncation
        input[input < -args.truncation] = -args.truncation

    # print("input mean:",input.abs().mean(), input.mean(), "  max=",input.max())
    # print("known: %f %%"%(known_mask.float().sum() * 100 /(64*64*48)))

    time2 = time.time()
    print("prepocess time: %fs" % (time2 - time1))

    with torch.no_grad():
        # print("input_dim: ", np.array(locs.shape))
        input = input.unsqueeze(0)
        if not args.cpu:
            input = input.cuda()

        output_occs = None

        # try:
        output_sdf, output_occs = model(input, WEIGHTS)
        # print("out mean: ", output_sdf.abs().mean())

        # except:
        #     print("MODEL FAIL")
        #     # embed()
        # output_sdf = output_sdf/args.voxel_size

        # fix conflicts with input in known grids
        known_mask = known_mask.unsqueeze(0).unsqueeze(0)
        output_sdf[known_mask] = input[known_mask]

        if not args.flipped:
            sdf_to_occ = output_sdf.abs(
            ) < args.occ_thresord  # < n (n>0, meaner when n get smaller)
        else:
            sdf_to_occ = output_sdf.abs(
            ) > args.occ_thresord  # > n (n<3, meaner when n get bigger)

        sdf_to_occ = sdf_to_occ.cpu()

        # wlz: original occ:input, sdf_to_occ:output, added_occ:added
        sdf_to_occ = sdf_to_occ.squeeze(0).squeeze(0)
        # sdf_to_occ = output_occs[-1][0].cpu().squeeze(0)
        # sdf_to_occ = torch.sigmoid(sdf_to_occ) > 0.5
        added_occ = sdf_to_occ.float() > original_occ.float()

        time3 = time.time()
        print("model time: %fs" % (time3 - time2))

        if sdf_to_occ is not None:
            # # output_occs = output_occs.cpu().squeeze()
            # original_occ = original_occ.squeeze()
            # occ_coords = POINTER[original_occ].reshape([-1, 3])
            # num_points = occ_coords.shape[0]
            # added_color = torch.ones(num_points).unsqueeze(1).cuda() * (-0.2)
            # occ_coords = torch.cat((occ_coords, added_color), 1)
            # print("original occ size: ",occ_coords.shape)

            added_occ = added_occ.squeeze()
            occ_coords_1 = POINTER[added_occ].reshape([-1, 3])
            num_points = occ_coords_1.shape[0]
            added_color_1 = torch.ones(num_points).unsqueeze(1).cuda() * 10
            occ_coords_1 = torch.cat((occ_coords_1, added_color_1), 1)
            # print("added size: ",occ_coords_1.shape)

            # occ_coords = torch.cat((occ_coords, occ_coords_1), 0)
            occ_coords = occ_coords_1

            if occ_coords.shape[0] > 0:
                occ_coords *= args.voxel_size
                occ_coords[:, 0] += offset_x
                occ_coords[:, 1] += offset_y
                occ_coords[:, 2] += offset_z
                occ_coords = occ_coords.cpu().numpy()

                msg = data.points
                msg.height = 1
                msg.width = occ_coords.shape[0]
                msg.fields = [
                    PointField('x', 0, PointField.FLOAT32, 1),
                    PointField('y', 4, PointField.FLOAT32, 1),
                    PointField('z', 8, PointField.FLOAT32, 1),
                    PointField('intensity', 12, PointField.FLOAT32, 1)
                ]
                msg.is_bigendian = False
                msg.point_step = 16  #12
                msg.row_step = 16 * occ_coords.shape[0]
                msg.is_dense = int(np.isfinite(occ_coords).all())
                msg.data = occ_coords.tostring(
                )  #np.asarray(occ_coords, np.float32).tostring()
                # print(occ_coords.shape, len(msg.data), msg.row_step)
                # print("postpocess time: %fs"%(time.time() - time3))
                publish_pcl(msg)

            #publish added occ
            # original_unkown = original_input <= -2
            original_unkown = original_unkown.squeeze()
            occ_coords = POINTER[original_occ].reshape([-1, 3])
            # print("known occ size: ",occ_coords.shape)

            if occ_coords.shape[0] > 0:
                occ_coords *= args.voxel_size
                occ_coords[:, 0] += offset_x
                occ_coords[:, 1] += offset_y
                occ_coords[:, 2] += offset_z
                occ_coords = occ_coords.cpu().numpy()

                msg = data.points
                msg.height = 1
                msg.width = occ_coords.shape[0]
                msg.fields = [
                    PointField('x', 0, PointField.FLOAT32, 1),
                    PointField('y', 4, PointField.FLOAT32, 1),
                    PointField('z', 8, PointField.FLOAT32, 1),
                ]
                msg.is_bigendian = False
                msg.point_step = 12  #12
                msg.row_step = 12 * occ_coords.shape[0]
                msg.is_dense = int(np.isfinite(occ_coords).all())
                msg.data = occ_coords.tostring(
                )  #np.asarray(occ_coords, np.float32).tostring()
                publish_added_pcl(msg)
    return