コード例 #1
0
 def get_calib(self, idx):
     calib_file = os.path.join(self.calib_dir, '{:06d}.txt'.format(idx))
     # assert os.path.isfile(calib_file)
     return Calibration(calib_file)
コード例 #2
0
    with torch.no_grad():
        for sample_idx in range(len(demo_dataset)):
            metadatas, bev_map, img_rgb = demo_dataset.load_bevmap_front(sample_idx)
            detections, bev_map, fps = do_detect(configs, model, bev_map, is_front=True)

            # Draw prediction in the image
            bev_map = (bev_map.permute(1, 2, 0).numpy() * 255).astype(np.uint8)
            bev_map = cv2.resize(bev_map, (cnf.BEV_WIDTH, cnf.BEV_HEIGHT))
            bev_map = draw_predictions(bev_map, detections, configs.num_classes)

            # Rotate the bev_map
            bev_map = cv2.rotate(bev_map, cv2.ROTATE_180)

            img_path = metadatas['img_path'][0]
            img_bgr = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR)
            calib = Calibration(configs.calib_path)
            kitti_dets = convert_det_to_real_values(detections)
            if len(kitti_dets) > 0:
                kitti_dets[:, 1:] = lidar_to_camera_box(kitti_dets[:, 1:], calib.V2C, calib.R0, calib.P2)
                img_bgr = show_rgb_image_with_boxes(img_bgr, kitti_dets, calib)

            out_img = merge_rgb_to_bev(img_bgr, bev_map, output_width=configs.output_width)
            write_credit(out_img, (80, 210), text_author='Cre: github.com/maudzung', org_fps=(80, 250), fps=fps)
            if out_cap is None:
                out_cap_h, out_cap_w = out_img.shape[:2]
                fourcc = cv2.VideoWriter_fourcc(*'MJPG')
                out_path = os.path.join(configs.results_dir, '{}_front.avi'.format(configs.foldername))
                print('Create video writer at {}'.format(out_path))
                out_cap = cv2.VideoWriter(out_path, fourcc, 30, (out_cap_w, out_cap_h))

            out_cap.write(out_img)
コード例 #3
0
    # lidar_aug = OneOf([
    #     Random_Rotation(limit_angle=np.pi / 4, p=1.),
    #     Random_Scaling(scaling_range=(0.95, 1.05), p=1.),
    # ], p=1.)
    lidar_aug = None

    dataset = KittiDataset(configs,
                           mode='val',
                           lidar_aug=lidar_aug,
                           hflip_prob=0.,
                           num_samples=configs.num_samples)

    print('\n\nPress n to see the next sample >>> Press Esc to quit...')
    for idx in range(len(dataset)):
        bev_map, labels, img_rgb, img_path = dataset.draw_img_with_label(idx)
        calib = Calibration(
            img_path.replace(".png", ".txt").replace("image_2", "calib"))
        bev_map = (bev_map.transpose(1, 2, 0) * 255).astype(np.uint8)
        bev_map = cv2.resize(bev_map, (cnf.BEV_HEIGHT, cnf.BEV_WIDTH))

        for box_idx, (cls_id, x, y, z, h, w, l, yaw) in enumerate(labels):
            # Draw rotated box
            yaw = -yaw
            y1 = int((x - cnf.boundary['minX']) / cnf.DISCRETIZATION)
            x1 = int((y - cnf.boundary['minY']) / cnf.DISCRETIZATION)
            w1 = int(w / cnf.DISCRETIZATION)
            l1 = int(l / cnf.DISCRETIZATION)

            drawRotatedBox(bev_map, x1, y1, w1, l1, yaw,
                           cnf.colors[int(cls_id)])
        # Rotate the bev_map
        bev_map = cv2.rotate(bev_map, cv2.ROTATE_180)
コード例 #4
0
def evaluate_mAP(val_loader, model, configs, logger):
    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')

    progress = ProgressMeter(len(val_loader), [batch_time, data_time],
                             prefix="Evaluation phase...")
    labels = []
    sample_metrics = []  # List of tuples (TP, confs, pred)
    # switch to evaluate mode
    model.eval()

    class_id = {0:'Car', 1:'Pedestrian', 2:'Cyclist'}

    with torch.no_grad():
        start_time = time.time()
        for batch_idx, batch_data in enumerate(tqdm(val_loader)):
            metadatas, targets= batch_data

            batch_size = len(metadatas['img_path'])

            voxelinput = metadatas['voxels']
            coorinput = metadatas['coors']
            numinput = metadatas['num_points']

            dtype = torch.float32
            voxelinputr = torch.tensor(
                voxelinput, dtype=torch.float32, device=configs.device).to(dtype)

            coorinputr = torch.tensor(
                coorinput, dtype=torch.int32, device=configs.device)

            numinputr = torch.tensor(
                numinput, dtype=torch.int32, device=configs.device)
            t1 = time_synchronized()
            outputs =  model(voxelinputr, coorinputr, numinputr)
            outputs = outputs._asdict()

            outputs['hm_cen'] = _sigmoid(outputs['hm_cen'])
            outputs['cen_offset'] = _sigmoid(outputs['cen_offset'])
            # detections size (batch_size, K, 10)
            img_path = metadatas['img_path'][0]
            #print(img_path)
            calib = Calibration(img_path.replace(".png", ".txt").replace("image_2", "calib"))

            detections = decode(outputs['hm_cen'], outputs['cen_offset'], outputs['direction'], outputs['z_coor'],
                                outputs['dim'], K=configs.K)
            detections = detections.cpu().numpy().astype(np.float32)
            detections = post_processing(detections, configs.num_classes, configs.down_ratio, configs.peak_thresh)

            for i in range(configs.batch_size):
                detections[i] = convert_det_to_real_values(detections[i])
                img_path = metadatas['img_path'][i]
                #rint(img_path)
                datap = str.split(img_path,'/')
                filename = str.split(datap[7],'.')
                file_write_obj = open('../result/' + filename[0] + '.txt', 'w')
                lidar_path = '/' + datap[1] + '/' + datap[2] + '/' + datap[3] + '/' + \
                             datap[4] + '/' + datap[5] + '/' + 'velodyne' + '/' + filename[0] + '.bin'
                #print(lidar_path)
                #show3dlidar(lidar_path, detections[i], calib.V2C, calib.R0, calib.P2)
                dets = detections[i]
                if len(dets) >0 :
                    dets[:, 1:] = lidar_to_camera_box(dets[:, 1:], calib.V2C, calib.R0, calib.P2)
                    for box_idx, label in enumerate(dets):
                        location, dim, ry = label[1:4], label[4:7], label[7]
                        if ry < -np.pi:
                            ry = 2*np.pi + ry
                        if ry > np.pi:
                            ry = -2*np.pi + ry
                        corners_3d = compute_box_3d(dim, location, ry)
                        corners_2d = project_to_image(corners_3d, calib.P2)
                        minxy = np.min(corners_2d, axis=0)
                        maxxy = np.max(corners_2d, axis=0)
                        bbox = np.concatenate([minxy, maxxy], axis=0)
                        if bbox[0] < 0 or bbox[2]<0:
                            continue
                        if bbox[1] > 1272 or bbox[3] > 375:
                            continue
                        oblist = ['Car',' ','0.0', ' ', '0', ' ', '-10', ' ','%.2f'%bbox[0], ' ', \
                              '%.2f' %bbox[1], ' ','%.2f'%bbox[2], ' ','%.2f'%bbox[3], ' ','%.2f'%dim[0], ' ','%.2f'%dim[1], ' ','%.2f'%dim[2], ' ', \
                              '%.2f' %location[0], ' ','%.2f'%location[1], ' ','%.2f'%location[2], ' ', '%.2f'%ry, '\n']
                        file_write_obj.writelines(oblist)
                file_write_obj.close()

            '''for sample_i in range(len(detections)):