def try_undistort(desired_count): undist = cc.CameraConverter() bagdir = '/data/bags/didi-round2/release/car/training/suburu_leading_at_distance' bt = mb.find_bag_tracklets(bagdir, '/data/tracklets') multi = mb.MultiBagStream(bt, ns.generate_numpystream) generator = multi.generate(infinite = False) count = 0 output_count = 0 for numpydata in generator: im = numpydata.image frame_idx, obs = numpydata.obs im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB) undistorted = undist.undistort_image(im) if count % 25 == 0: cv2.imwrite('/data/dev/camera/orig_{}.png'.format(count), im) # Print center. img_point = undist.project_point(obs.position) cv2.circle(undistorted, (int(img_point[0]), int(img_point[1])), radius = 5, color = (255, 0, 0), thickness=2) # Print bbox corners. img_points = undist.project_points(obs.get_bbox().transpose()) for img_point in img_points: cv2.circle(undistorted, (int(img_point[0]), int(img_point[1])), radius = 5, color = (0, 255, 0), thickness=2) cv2.imwrite('/data/dev/camera/undist_{}.png'.format(count), undistorted) output_count += 1 count += 1 if desired_count is not None and output_count == desired_count: return
def try_detector(): detector = get_latest_detector() bagdir = '/data/bags/didi-round2/release/car/training/suburu_leading_front_left' bt = mb.find_bag_tracklets(bagdir, '/data/tracklets') multi = mb.MultiBagStream(bt, numpystream.generate_numpystream) generator = generate_birdseye_boxes_single(multi, infinite=False) for birdseye_box, yaw in generator: prediction = detector.detect_rotation(birdseye_box) print('gt_yaw: [{}], predicted_yaw: [{}]'.format(yaw, prediction))
def train_model(model): validation_batch_size = 128 train_batch_size = 128 bag_tracklets = multibag.find_bag_tracklets( '/data/didi/didi-round1/Didi-Release-2/Data/', '/old_data/output/tracklet/') # Good shuffle seeds: (7, 0.15) shuffleseed = 7 multibag.shuffle(bag_tracklets, shuffleseed) split = multibag.train_validation_split(bag_tracklets, 0.15) validation_stream = multibag.MultiBagStream(split.validation_bags) validation_generator = TrainDataGenerator(validation_stream, include_ground_truth=True) training_stream = multibag.MultiBagStream(split.train_bags) training_generator = TrainDataGenerator(training_stream, include_ground_truth=True) print('train: ', training_generator.get_count(), ', validation: ', validation_generator.get_count()) checkpoint_path = get_model_filename( CHECKPOINT_DIR, suffix='e{epoch:02d}-vl{val_loss:.2f}') # Set up callbacks. Stop early if the model does not improve. Save model checkpoints. # Source: http://stackoverflow.com/questions/37293642/how-to-tell-keras-stop-training-based-on-loss-value callbacks = [ EarlyStopping(monitor='val_loss', patience=2, verbose=0), ModelCheckpoint(checkpoint_path, monitor='val_loss', save_best_only=False, verbose=0), ] hist = model.fit_generator( training_generator.generate(train_batch_size), steps_per_epoch=(training_generator.get_count() / train_batch_size), epochs=100, # Values for quick testing: # steps_per_epoch = (128 / batch_size), # epochs = 2, validation_data=validation_generator.generate(validation_batch_size), validation_steps=(validation_generator.get_count() / validation_batch_size), callbacks=callbacks) model.save(get_model_filename(MODEL_DIR)) # print(hist) with open(get_model_filename(HISTORY_DIR, '', 'p'), 'wb') as f: pickle.dump(hist.history, f)
def try_augmenting_camera_boxes(): camera_converter = cc.CameraConverter() bagdir = '/data/bags/didi-round2/release/car/training/suburu_leading_front_left' bt = mb.find_bag_tracklets(bagdir, '/data/tracklets') generator = generate_training_data_multi(bt) count = 0 for bbox, label, im in generator: new_bbox, new_label = augment_example(bbox, label, camera_converter) print('bbox', bbox) print('new_bbox', new_bbox) print('label', label) print('new_label', new_label) count += 1 if count == 10: return
def try_draw_panoramas(): import cv2 import matplotlib.pyplot as plt import matplotlib.image as mpimg bagdir = '/data/bags/' # bagdir = '/data/bags/didi-round2/release/car/training/suburu_leading_front_left' # bagdir = '/data/bags/didi-round2/release/pedestrian/' # bag_file = '/data/bags/didi-round2/release/car/testing/ford02.bag' bt = mb.find_bag_tracklets(bagdir, '/data/tracklets') multi = mb.MultiBagStream(bt, ns.generate_numpystream) # numpystream = ns.generate_numpystream(bag_file, tracklet) generator = generate_panoramas_multi(multi) id = 1 for im, bbox, obs in generator: cv2.rectangle(im, tuple(bbox[0]), tuple(bbox[1]), color = (255, 0, 0)) im = cv2.resize(im, (0,0), fx=1.0, fy=8.0) plt.imshow(im) plt.show()
def try_rotating_images(train_dir): bagdir = '/data/bags/didi-round2/release/car/training/suburu_leading_front_left' bt = mb.find_bag_tracklets(bagdir, '/data/tracklets') multi = mb.MultiBagStream(bt, numpystream.generate_numpystream) generator = generate_birdseye_boxes_single(multi, infinite=False) count = 0 frames_since_last_conversion = 0 for birdseye_box, yaw in generator: if (yaw > (math.pi / 4) or yaw < (-math.pi / 4)) and frames_since_last_conversion > 10: # Try to undo rotation with negative yaw. rotated = rotate_image(birdseye_box, -yaw) # Expect car to have zero rotation in image. cv2.imwrite('rotate_test_{}.png'.format(count), rotated) print('count: {}, orig_yaw: {}'.format(count, yaw)) count += 1 frames_since_last_conversion = 0 if count % 10 == 0: return else: frames_since_last_conversion += 1
def try_write(): bagdir = '/data/bags/' # bagdir = '/data/bags/didi-round2/release/car/training/suburu_leading_front_left' bt = mb.find_bag_tracklets(bagdir, '/data/tracklets') multi = mb.MultiBagStream(bt, ns.generate_numpystream) write_train_data(multi, '/home/eljefec/repo/squeezeDet/data/KITTI/panorama912x48')
crop = ci.crop_image(birdseye, expected_shape, new_shape) image_file = os.path.join(imagedir, '{:06d}.png'.format(id)) imlib.save_np_image(crop, image_file, bbox_tuple) label_path = os.path.join(labeldir, '{:06d}.txt'.format(id)) write_kitti_annotation(obs, birdseye_bbox, label_path) id += 1 if id % 1000 == 0: print('Working... Processed {} samples.'.format(id)) stopwatch.stop() print('Elapsed time: {}'.format( stopwatch.format_duration())) stopwatch.start() print('DONE. Processed {} samples.'.format(id)) stopwatch.stop() print('Elapsed time: {}'.format(stopwatch.format_duration())) if __name__ == '__main__': bagdir = '/data/bags/' # bagdir = '/data/bags/didi-round2/release/car/training/suburu_leading_at_distance' bag_tracklets = multibag.find_bag_tracklets(bagdir, '/data/tracklets') slice_config = ld.slice_config() generate_kitti( bag_tracklets, '/home/eljefec/repo/squeezeDet/data/KITTI/training_64x64/image_2', '/home/eljefec/repo/squeezeDet/data/KITTI/training_64x64/label_2', output_bbox=False, slice_config=slice_config)