calib = Calibration( img_path.replace(".png", ".txt").replace("image_2", "calib")) bev_map = (bev_map.transpose(1, 2, 0) * 255).astype(np.uint8) bev_map = cv2.resize(bev_map, (cnf.BEV_HEIGHT, cnf.BEV_WIDTH)) for box_idx, (cls_id, x, y, z, h, w, l, yaw) in enumerate(labels): # Draw rotated box yaw = -yaw y1 = int((x - cnf.boundary['minX']) / cnf.DISCRETIZATION) x1 = int((y - cnf.boundary['minY']) / cnf.DISCRETIZATION) w1 = int(w / cnf.DISCRETIZATION) l1 = int(l / cnf.DISCRETIZATION) drawRotatedBox(bev_map, x1, y1, w1, l1, yaw, cnf.colors[int(cls_id)]) # Rotate the bev_map bev_map = cv2.rotate(bev_map, cv2.ROTATE_180) labels[:, 1:] = lidar_to_camera_box(labels[:, 1:], calib.V2C, calib.R0, calib.P2) img_rgb = cv2.cvtColor(img_rgb, cv2.COLOR_RGB2BGR) img_rgb = show_rgb_image_with_boxes(img_rgb, labels, calib) out_img = merge_rgb_to_bev(img_rgb, bev_map, output_width=configs.output_width) cv2.imshow('bev_map', out_img) if cv2.waitKey(0) & 0xff == 27: break
RGB_Map=None) img_rgb = show_image_with_boxes(img_rgb, objects_pred, calib, False) # Rescale target targets[:, 2:6] *= configs.img_size # Get yaw angle targets[:, 6] = torch.atan2(targets[:, 6], targets[:, 7]) img_bev = imgs.squeeze() * 255 img_bev = img_bev.permute(1, 2, 0).numpy().astype(np.uint8) img_bev = cv2.resize(img_bev, (configs.img_size, configs.img_size)) for c, x, y, w, l, yaw in targets[:, 1:7].numpy(): # Draw rotated box bev_utils.drawRotatedBox(img_bev, x, y, w, l, yaw, cnf.colors[int(c)]) img_bev = cv2.rotate(img_bev, cv2.ROTATE_180) if configs.mosaic and configs.show_train_data: cv2.imshow('mosaic_sample', img_bev) else: out_img = merge_rgb_to_bev(img_rgb, img_bev, output_width=configs.output_width) cv2.imshow('single_sample', out_img) if cv2.waitKey(0) & 0xff == 27: break
# Draw rotated box kitti_bev_utils.drawRotatedBox(img_bev, x, y, w, l, yaw, cnf.colors[int(cls_pred)]) img_rgb = cv2.imread(img_paths[0]) calib = kitti_data_utils.Calibration(img_paths[0].replace( ".png", ".txt").replace("image_2", "calib")) objects_pred = predictions_to_kitti_format(img_detections, calib, img_rgb.shape, configs.img_size) img_rgb = show_image_with_boxes(img_rgb, objects_pred, calib, False) img_bev = cv2.flip(cv2.flip(img_bev, 0), 1) out_img = merge_rgb_to_bev(img_rgb, img_bev, output_width=608) print( '\tDone testing the {}th sample, time: {:.1f}ms, speed {:.2f}FPS' .format(batch_idx, (t2 - t1) * 1000, 1 / (t2 - t1))) if configs.save_test_output: if configs.output_format == 'image': img_fn = os.path.basename(img_paths[0])[:-4] cv2.imwrite( os.path.join(configs.results_dir, '{}.jpg'.format(img_fn)), out_img) elif configs.output_format == 'video': if out_cap is None: out_cap_h, out_cap_w = out_img.shape[:2] fourcc = cv2.VideoWriter_fourcc(*'MJPG')