示例#1
0
def main():
    from core.kitti import KITTI, ALL_OBJECTS, CARS_ONLY
    DS_DIR = '/home/salam/datasets/KITTI/training'
    reader = KITTI(DS_DIR, CARS_ONLY)
    ids = reader.get_ids('train')
    for t in [ids[4]]:
        img = reader.get_image(t)
        pts, ref = reader.get_velo(t, use_fov_filter=False)
        boxes_2D = reader.get_boxes_2D(t)
        boxes_3D = reader.get_boxes_3D(t)
        P2 = reader.get_calib(t)[2]

        for b in boxes_3D:
            b.text = f"{np.random.uniform(0.0, 1.0, (1,))[0]:0.2f}"

        imshow(bev(pts, pred_boxes=boxes_3D, title="GT"))
                    return fmap

                rv2bev_2x = process_fmap(rv2bev_2x)
                bev_2x = process_fmap(bev_2x)
                rv2bev_8x = process_fmap(rv2bev_8x)
                bev_8x = process_fmap(bev_8x)
                bevout = process_fmap(bevout)
                rv_high = process_fmap(rv_high)
                rv_low = process_fmap(rv_low)
                f2 = process_fmap(f2)
                f4 = process_fmap(f4)

                outmap = np.squeeze(np.concatenate((obj3d, geo), axis=-1))
                decoded_boxes = target_encoder.decode(outmap, 0.2)
                filtered_boxes = nms(decoded_boxes)
                pts, _ = kitti.get_velo(ids[0], use_fov_filter=True)
                boxes = kitti.get_boxes_3D(ids[0])
                cnvs = bev(pts=pts.T,
                           gt_boxes=boxes,
                           pred_boxes=filtered_boxes)
                cnvs = np.rot90(cnvs)
                plt.imshow(cnvs)
                plt.axis('off')
                buf = io.BytesIO()
                plt.savefig(buf,
                            format='png',
                            bbox_inches='tight',
                            pad_inches=0)
                buf.seek(0)
                plot = tf.image.decode_png(buf.getvalue(), channels=4)
                plot = tf.expand_dims(plot, 0)
                                               P2,
                                               parts=4)
    print('world_pts.shape', world_pts.shape)
    print('nearest.shape  ', nearest.shape)
    geo_feature = nearest - world_pts
    nearest_projected = project(P2, nearest).astype(
        np.int32).T // image_downsampling_factor
    return {
        'mapping': nearest_projected.reshape((bev_length, bev_width, 2)),
        'geo_feature': geo_feature.reshape((bev_length, bev_width, 3))
    }


for i, id in enumerate(train_ids + val_ids):
    pc, _ = kitti.get_velo(id,
                           workspace_lim=((-40, 40), (-1, 3), (0, 70)),
                           use_fov_filter=True)
    _, _, P2 = kitti.get_calib(id)
    #     img = kitti.get_image(id)
    #     img = cv2.resize(img, (311, 94))
    #     print('img.shape', img.shape)
    bev_shape = (448, 512)
    world_pts2x, nearest2x = get_nearest_neighbour(pc, bev_shape[1] // 2,
                                                   bev_shape[0] // 2, 2, P2)
    # mapping2x[:,:,(0,1)] = mapping2x[:,:,(1,0)]

    world_pts4x, nearest4x = get_nearest_neighbour(pc, bev_shape[1] // 4,
                                                   bev_shape[0] // 4, 4, P2)
    # mapping4x[:,:,(0,1)] = mapping4x[:,:,(1,0)]

    world_pts8x, nearest8x = get_nearest_neighbour(pc, bev_shape[1] // 8,
示例#4
0
def is_box_valid(limit, box_bev_pts):
    (x1, y1), (x2, y2) = limit
    limit_p = Polygon([(0, 0), (x1, y1), (x2, y2)])
    box_p = Polygon([(box_bev_pts[0][0], box_bev_pts[0][1]),
                     (box_bev_pts[1][0], box_bev_pts[1][1]),
                     (box_bev_pts[2][0], box_bev_pts[2][1]),
                     (box_bev_pts[3][0], box_bev_pts[3][1])])
    if limit_p.intersection(box_p).area > 0:
        return False
    return True


t = '000013'  #ids[4]
boxes = reader.get_boxes_3D(t)
pts, ref = reader.get_velo(t, use_fov_filter=False)

# open3d(pts, boxes)
# imshow(bev(pts, pred_boxes=boxes, title="GT"))

lidar_src = (0, 0)
border_1 = (70, 40), (70, -40)
border_2 = np.array([40, 0]), np.array([40, 70])
border_3 = np.array([-40, 0]), np.array([-40, 70])

circles = []
d1_clr, d2_clr = (255, 0, 0), (0, 255, 0)
box_limits = []
all_limits = []
rand_num = 30
random_cars_dir = 'data_utils/aug_utils/annotations/cars/'
        cur_metrics = compute_metric(model, pc, (obj, geo), True)

        for metric_name, metric_val in cur_metrics.items():
            with file_writer.as_default():
                tf.summary.scalar(metric_name,
                                  data=metric_val,
                                  step=train_steps)

        if train_steps % configs['vis_every'] is 0 and train_steps is not 0:
            for var in model.trainable_variables:
                with hist_writer.as_default():
                    tf.summary.histogram(var.name, var, step=train_steps)

        if train_steps % configs['vis_every'] is 0 and train_steps is not 0:
            for chosen_id in configs['chosen_ids']:
                pts, _ = kitti.get_velo(chosen_id, use_fov_filter=False)
                boxes = kitti.get_boxes_3D(chosen_id)

                if pts.shape[1] != 3:
                    pts = pts.T

                pc = np.expand_dims(pc_encoder.encode(pts), axis=0)
                tar = target_encoder.encode(boxes)
                obj_map, obj_mask = tar[0][..., 0], tar[0][..., 1]
                obj_map = np.expand_dims(np.expand_dims(np.squeeze(obj_map),
                                                        axis=0),
                                         axis=-1)
                obj_mask = np.expand_dims(np.expand_dims(np.squeeze(obj_mask),
                                                         axis=0),
                                          axis=-1)
                with img_writer.as_default():

import deepdish as dd

from core.kitti import KITTI, ALL_OBJECTS, CARS_ONLY
from data_utils.augmentation import PointCloudAugmenter

DS_DIR = '/home/salam/datasets/KITTI/training'
reader = KITTI(DS_DIR, CARS_ONLY)

ids = reader.get_ids('train')

for id in ids:
    boxes = reader.get_boxes_3D(id)
    pts, _ = reader.get_velo(id, use_fov_filter=False)
    
    for i, box in enumerate(boxes):
        pts_inside_ids = PointCloudAugmenter.find_containing_points(box, pts.T)
        if len(pts_inside_ids) > 5:
            pts_inside = pts[:, pts_inside_ids].T
            
            data = {
                'frame_id': id,
                'num_points': len(pts_inside_ids),
                'pts': pts_inside,
                'box_x': box.x,
                'box_y': box.y,
                'box_z': box.z,
                'box_w': box.w,
                'box_l': box.l,
                'box_h': box.h,