dataloader = create_val_dataloader(configs)
        print('len val dataloader: {}'.format(len(dataloader)))

    print('\n\nPress n to see the next sample >>> Press Esc to quit...')

    for batch_i, (img_files, imgs, targets) in enumerate(dataloader):
        if not (configs.mosaic and configs.show_train_data):
            img_file = img_files[0]
            img_rgb = cv2.imread(img_file)
            calib = kitti_data_utils.Calibration(
                img_file.replace(".png", ".txt").replace("image_2", "calib"))
            objects_pred = invert_target(targets[:, 1:],
                                         calib,
                                         img_rgb.shape,
                                         RGB_Map=None)
            img_rgb = show_image_with_boxes(img_rgb, objects_pred, calib,
                                            False)

        # Rescale target
        targets[:, 2:6] *= configs.img_size
        # Get yaw angle
        targets[:, 6] = torch.atan2(targets[:, 6], targets[:, 7])

        img_bev = imgs.squeeze() * 255
        img_bev = img_bev.permute(1, 2, 0).numpy().astype(np.uint8)
        img_bev = cv2.resize(img_bev, (configs.img_size, configs.img_size))

        for c, x, y, w, l, yaw in targets[:, 1:7].numpy():
            # Draw rotated box
            bev_utils.drawRotatedBox(img_bev, x, y, w, l, yaw,
                                     cnf.colors[int(c)])
Exemplo n.º 2
0
            img_detections.extend(detections)
            bev_maps = torch.squeeze(bev_maps).numpy()
            RGB_Map = np.zeros((cnf.BEV_WIDTH, cnf.BEV_WIDTH, 3))
            RGB_Map[:, :, 2] = bev_maps[0, :, :]  # r_map
            RGB_Map[:, :, 1] = bev_maps[1, :, :]  # g_map
            RGB_Map[:, :, 0] = bev_maps[2, :, :]  # b_map

            RGB_Map = (255 * RGB_Map).astype(np.uint8)
            for detections in img_detections:
                if detections is None:
                    continue
                # Rescale boxes to original image
                detections = np.array(detections)
                print('detections shape: {}'.format(detections.shape))
                detections = rescale_boxes(detections, configs.img_size, RGB_Map.shape[:2])
                for x, y, w, l, im, re, cls_conf, cls_pred in detections:
                    yaw = np.arctan2(im, re)
                    # Draw rotated box
                    kitti_bev_utils.drawRotatedBox(RGB_Map, x, y, w, l, yaw, cnf.colors[int(cls_pred)])

            img2d = cv2.imread(img_paths[0])
            calib = kitti_data_utils.Calibration(img_paths[0].replace(".png", ".txt").replace("image_2", "calib"))
            objects_pred = predictions_to_kitti_format(img_detections, calib, img2d.shape, configs.img_size)
            img2d = show_image_with_boxes(img2d, objects_pred, calib, False)

            cv2.imshow("bev img", RGB_Map)
            cv2.imshow("img2d", img2d)

            if cv2.waitKey(0) & 0xFF == 27:
                break