Beispiel #1
0
    def detect(self, lidar):
        birdseye = ld.lidar_to_birdseye(lidar, ld.slice_config())

        mc = self.squeezedet.mc
        im = ci.crop_image(birdseye,
                           (mc.IMAGE_WIDTH + 1, mc.IMAGE_HEIGHT + 1, 3),
                           (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT, 3))

        found_car = None
        found_ped = None
        boxes, probs, classes = self.squeezedet.detect(im)
        # Assume decreasing order of probability
        for box, prob, class_idx in zip(boxes, probs, classes):
            global_box = ld.birdseye_to_global(box, ld.slice_config())

            car_box = rd.get_birdseye_box(lidar,
                                          (global_box[0], global_box[1]))
            predicted_yaw = self.rotation_detector.detect_rotation(car_box)

            sd.correct_global(global_box, class_idx)

            pose = np.array(
                [global_box[0], global_box[1], -0.9, predicted_yaw])

            if found_car is None and class_idx == sd.CAR_CLASS:
                found_car = pose

            if found_ped is None and class_idx == sd.PED_CLASS:
                found_ped = pose

            if found_car is not None and found_ped is not None:
                break
        return (found_car, found_ped)
Beispiel #2
0
def get_birdseye_box(lidar, obs_position):
    slice_config = ld.slice_config()
    slice_config.SIDE_RANGE = (-2.5, 2.5)
    slice_config.FWD_RANGE = (-2.5, 2.5)

    birdseye = ld.lidar_to_birdseye(lidar,
                                    slice_config,
                                    return_points=False,
                                    center=(obs_position[0], obs_position[1]))
    return ci.crop_image(birdseye, (51, 51, 3), INPUT_SHAPE)
Beispiel #3
0
def generate_lidar_birdseye(bag_file, mc):
  im = None
  generator = ns.generate_numpystream(bag_file, tracklet_file = None)
  for numpydata in generator:
    lidar = numpydata.lidar
    if lidar is not None:
      birdseye = ld.lidar_to_birdseye(lidar, ld.slice_config())

      im = ci.crop_image(birdseye,
                         (mc.IMAGE_WIDTH + 1, mc.IMAGE_HEIGHT + 1, 3),
                         (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT, 3))
    # Must yield item for each frame in generator.
    yield im, lidar
Beispiel #4
0
    def next(self):
        msg = self.msgstream.next()
        if msg.lidar is None:
            lidar_panorama = None
            lidar_slices = None
        else:
            points = pc2.read_points(msg.lidar)
            points = np.array(list(points))
            lidar_panorama = lidar.lidar_to_panorama(points)
            lidar_slices = lidar.lidar_to_slices(points, lidar.slice_config())

        imagemsg = image.ImageMsg(msg.image)

        return TrainData(msg.pose, imagemsg.bgr, lidar_panorama, lidar_slices)
Beispiel #5
0
def generate_traindata(bag_file, tracklet_file):
    data_generator = framestream.generate_trainmsgs(bag_file, tracklet_file)

    while True:
        frame = next(data_generator)

        if frame.lidar is None:
            lidar_panorama = None
            lidar_slices = None
        else:
            points = pc2.read_points(frame.lidar)
            points = np.array(list(points))
            lidar_panorama = lidar.lidar_to_panorama(points)
            lidar_slices = lidar.lidar_to_slices(points, lidar.slice_config())

        imagemsg = image.ImageMsg(frame.image)

        yield TrainData(frame.pose, imagemsg.bgr, lidar_panorama, lidar_slices)
Beispiel #6
0
                crop = ci.crop_image(birdseye, expected_shape, new_shape)
                image_file = os.path.join(imagedir, '{:06d}.png'.format(id))
                imlib.save_np_image(crop, image_file, bbox_tuple)

                label_path = os.path.join(labeldir, '{:06d}.txt'.format(id))
                write_kitti_annotation(obs, birdseye_bbox, label_path)

                id += 1
                if id % 1000 == 0:
                    print('Working... Processed {} samples.'.format(id))
                    stopwatch.stop()
                    print('Elapsed time: {}'.format(
                        stopwatch.format_duration()))
                    stopwatch.start()
    print('DONE. Processed {} samples.'.format(id))
    stopwatch.stop()
    print('Elapsed time: {}'.format(stopwatch.format_duration()))


if __name__ == '__main__':
    bagdir = '/data/bags/'
    # bagdir = '/data/bags/didi-round2/release/car/training/suburu_leading_at_distance'
    bag_tracklets = multibag.find_bag_tracklets(bagdir, '/data/tracklets')
    slice_config = ld.slice_config()
    generate_kitti(
        bag_tracklets,
        '/home/eljefec/repo/squeezeDet/data/KITTI/training_64x64/image_2',
        '/home/eljefec/repo/squeezeDet/data/KITTI/training_64x64/label_2',
        output_bbox=False,
        slice_config=slice_config)
Beispiel #7
0
  def gen_tracklet(self, bag_file, include_car, include_ped):

    def make_pose(x, y, rz):
      # Estimate tz from histogram.
      return {'tx': x,
              'ty': y,
              'tz': -0.9,
              'rx': 0,
              'ry': 0,
              'rz': rz}

    prev_car_pose = make_pose(0, 0, 0)
    prev_ped_pose = make_pose(0, 0, 0)
    predicted_yaw = 0

    # l, w, h from histogram
    car_tracklet = generate_tracklet.Tracklet(object_type='Car', l=4.3, w=1.7, h=1.7, first_frame=0)
    ped_tracklet = generate_tracklet.Tracklet(object_type='Pedestrian', l=0.8, w=0.8, h=1.708, first_frame=0)

    generator = generate_detections(bag_file, demo_net = 'didi', skip_null = False)
    for im, boxes, probs, classes, lidar in generator:
      car_found = False
      ped_found = False
      if (im is not None and boxes is not None
          and probs is not None and classes is not None):
        # Assume decreasing order of probability
        for box, prob, class_idx in zip(boxes, probs, classes):
          global_box = ld.birdseye_to_global(box, ld.slice_config())

          if lidar is not None:
            car_box = rd.get_birdseye_box(lidar, (global_box[0], global_box[1]))
            predicted_yaw = self.rotation_detector.detect_rotation(car_box)

          sd.correct_global(global_box, class_idx)

          pose = make_pose(global_box[0], global_box[1], predicted_yaw)

          if not car_found and class_idx == sd.CAR_CLASS:
            # box is in center form (cx, cy, w, h)
            car_tracklet.poses.append(pose)
            prev_car_pose = pose
            car_found = True
          if not ped_found and class_idx == sd.PED_CLASS:
            ped_tracklet.poses.append(pose)
            prev_ped_pose = pose
            ped_found = True

          if car_found and ped_found:
            break
      if not car_found:
        car_tracklet.poses.append(prev_car_pose)
      if not ped_found:
        ped_tracklet.poses.append(prev_ped_pose)

    tracklet_collection = generate_tracklet.TrackletCollection()
    if include_car:
      tracklet_collection.tracklets.append(car_tracklet)
    if include_ped:
      tracklet_collection.tracklets.append(ped_tracklet)

    tracklet_file = os.path.join(FLAGS.out_dir, get_filename(bag_file) + '.xml')

    tracklet_collection.write_xml(tracklet_file)