def try_undistort(desired_count): undist = cc.CameraConverter() bagdir = '/data/bags/didi-round2/release/car/training/suburu_leading_at_distance' bt = mb.find_bag_tracklets(bagdir, '/data/tracklets') multi = mb.MultiBagStream(bt, ns.generate_numpystream) generator = multi.generate(infinite = False) count = 0 output_count = 0 for numpydata in generator: im = numpydata.image frame_idx, obs = numpydata.obs im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) undistorted = undist.undistort_image(im) if count % 25 == 0: cv2.imwrite('/data/dev/camera/orig_{}.png'.format(count), im) # Print center. img_point = undist.project_point(obs.position) cv2.circle(undistorted, (int(img_point[0]), int(img_point[1])), radius = 5, color = (255, 0, 0), thickness=2) # Print bbox corners. img_points = undist.project_points(obs.get_bbox().transpose()) for img_point in img_points: cv2.circle(undistorted, (int(img_point[0]), int(img_point[1])), radius = 5, color = (0, 255, 0), thickness=2) cv2.imwrite('/data/dev/camera/undist_{}.png'.format(count), undistorted) output_count += 1 count += 1 if desired_count is not None and output_count == desired_count: return
def generate_camera_images(bag_file, mc, tracklet_file): camera_converter = cc.CameraConverter() im = None generator = ns.generate_numpystream(bag_file, tracklet_file) for numpydata in generator: im = numpydata.image obs = numpydata.obs if im is not None: im = sd.undistort_and_crop(im, camera_converter, mc) # Must yield item for each frame in generator. yield im, obs
def __init__(self, demo_net): assert demo_net == 'squeezeDet' or demo_net == 'squeezeDet+' \ or demo_net == 'didi', \ 'Selected neural net architecture not supported: {}'.format(demo_net) self.demo_net = demo_net self.mc = None self.model = None self.sess = None self._prepare_graph() self.camera_converter = cc.CameraConverter()
def try_augmenting_camera_boxes(): camera_converter = cc.CameraConverter() bagdir = '/data/bags/didi-round2/release/car/training/suburu_leading_front_left' bt = mb.find_bag_tracklets(bagdir, '/data/tracklets') generator = generate_training_data_multi(bt) count = 0 for bbox, label, im in generator: new_bbox, new_label = augment_example(bbox, label, camera_converter) print('bbox', bbox) print('new_bbox', new_bbox) print('label', label) print('new_label', new_label) count += 1 if count == 10: return
def generate_camera_boxes_dir(train_dir, index_file, augment, infinite = True): camera_converter = cc.CameraConverter() while infinite: ids = [] with open(os.path.join(train_dir, index_file), 'r') as f: for id in f: ids.append(int(id)) bboxdir, labeldir, imagedir = get_bbox_label_dirs(train_dir) for id in ids: bbox_path = get_bbox_path(bboxdir, id) bbox = np.loadtxt(bbox_path) label_path = get_label_path(labeldir, id) label = np.loadtxt(label_path) if augment: (bbox, label) = augment_example(bbox, label, camera_converter) yield bbox, label
def generate_training_data(bag_file, tracklet_file): mc = sd.get_model_config(demo_net = 'squeezeDet') camera_converter = cc.CameraConverter() generator = generate_top_boxes(bag_file, tracklet_file) for top_car, top_ped, (frame_idx, obs), im in generator: top_obs = None if obs.object_type == 'Car' and top_car is not None: top_obs = top_car elif obs.object_type == 'Pedestrian' and top_ped is not None: top_obs = top_ped if top_obs is not None: (box, prob, class_idx) = top_obs if (box is not None and camera_converter.obstacle_is_in_view(obs)): yield (np.array([box[0], box[1], box[2], box[3], prob, class_idx]), np.array([obs.position[0], obs.position[1], obs.position[2], obs.yaw]), im)
def __init__(self): self.squeezedet = sd.SqueezeDetector(demo_net = 'squeezeDet') self.box_to_pose_predictor = get_latest_predictor() self.camera_converter = cc.CameraConverter()