def __init__(self, data_type, from_rgb_detection, use_multisweep, use_detected_2d=False): if data_type == "train": self.lyftd = load_train_data() elif data_type == "test": self.lyftd = load_test_data() else: raise ValueError( "invalid data type. Valid dataset names are train or test") self.from_rgb_detection = from_rgb_detection self.data_type = data_type if self.from_rgb_detection: if use_detected_2d: from object_classifier import LoaderClassifier self.object_classifier = LoaderClassifier() else: from object_classifier import TLClassifier self.object_classifier = TLClassifier() else: self.object_classifier = None if self.from_rgb_detection: self.file_type = "rgb" else: self.file_type = "gt" self.use_multisweep = use_multisweep
def main(argv): lyftd = load_test_data() data_path, artifacts_path, _ = get_paths() det_path = os.path.join(artifacts_path, "detection") scenes_to_process = range(0, 218, 1) sp = SceneImagePathSaver(det_path, lyftd) from multiprocessing import Pool with Pool(processes=3) as p: p.map(sp.find_and_save_image_in_scene, scenes_to_process)
def plot_prediction_data(): lyftd = load_test_data() pv = PredViewer(pred_file="prediction.csv", lyftd=lyftd) # test_token = lyftd.sample[2]['token'] test_token = pv.pred_pd.index[1] pv.render_3d_lidar_points_to_camera_coordinates(test_token, prob_threshold=0.4) from object_classifier import TLClassifier tl = TLClassifier() draw_preprocess_results(test_sample_token=test_token, lyftd=lyftd, object_classifier=tl)
def plot_prediction_data(): lyftd = load_test_data() pv = PredViewer(pred_file="test_pred.csv", lyftd=lyftd) # test_token = lyftd.sample[2]['token'] test_token = pv.pred_pd.index[100] fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 5)) pv.render_camera_image(ax[0], sample_token=test_token, prob_threshold=0.1) pv.render_lidar_points(ax[1], sample_token=test_token, prob_threshold=0.1) fig.savefig("./artifact/camera_top_view.png", dpi=600) pv.render_3d_lidar_points_to_camera_coordinates(test_token, prob_threshold=0.1)
def main(argv): inference_pickle_file = FLAGS.inference_file pred_csv_file = FLAGS.pred_file FROM_RGB_DETECTION = FLAGS.from_rgb_detection data_name = FLAGS.data_name if data_name == 'train': data = load_train_data() elif data_name == 'test': data = load_test_data() pred_boxes = [] sample_token_list = [] for box, sample_token in read_frustum_pointnet_output_v2( data, inference_pickle_file): pred_boxes.append(box) sample_token_list.append(sample_token) write_output_csv(pred_boxes, sample_token_list, pred_csv_file)
def main(argv): logging.set_verbosity(logging.INFO) tlc = FastClassifer() if FLAGS.data_type == "test": lyftd = load_test_data() file_pat = "test_scene_{}_images.pickle" elif FLAGS.data_type == "train": lyftd = load_train_data() file_pat = "train_scene_{}_images.pickle" else: raise ValueError("data_type should be either test or train") scene_num = map(int, FLAGS.scenes) for s in scene_num: detect_image_in_scene(s, lyftd, tlc, file_pat)
def conv_net_model_test(save_dir, test_dir, output_dir0, output_dir1): """ The feed forward convolutional neural network model Hyper parameters include learning rate, number of convolutional layers and fully connected layers. (Currently TBD) """ # Reset graphs tf.reset_default_graph() # Create placeholders x = tf.placeholder(dtype=tf.float32, shape=[ None, INPUT_IMAGE_DIMENSION, INPUT_IMAGE_DIMENSION, INPUT_IMAGE_CHANNELS ], name="x") weight1 = tf.Variable(tf.truncated_normal([4, 4, 3, 16], stddev=0.1), dtype=tf.float32, name="W1") bias1 = tf.Variable(tf.constant(0.1, shape=[16]), dtype=tf.float32, name="B1") weight2 = tf.Variable(tf.truncated_normal([4, 4, 16, 32], stddev=0.1), dtype=tf.float32, name="W2") bias2 = tf.Variable(tf.constant(0.1, shape=[32]), dtype=tf.float32, name="B2") weight3 = tf.Variable(tf.truncated_normal([4608, 2], stddev=0.1), dtype=tf.float32, name="W3") bias3 = tf.Variable(tf.constant(0.1, shape=[2]), dtype=tf.float32, name="B3") # First convolutional layer conv1 = ly.conv_layer(x, weight1, bias1, False) # First pooling pool1 = ly.pool_layer(conv1) # Second convolutional layer conv2 = ly.conv_layer(pool1, weight2, bias2, True) # Second pooling pool2 = ly.pool_layer(conv2) # Flatten input flattened = tf.reshape(pool2, shape=[-1, 12 * 12 * 32]) # Create fully connected layer logits = ly.fully_connected_layer(flattened, weight3, bias3) saver = tf.train.Saver() with tf.Session() as sess: saver.restore(sess, save_dir) # Run model test_images = tdl.load_test_data(test_dir) coord = tf.train.Coordinator() # Test the model l = sess.run(tf.argmax(logits, 1), feed_dict={x: test_images}) od.output(output_dir0, output_dir1, test_images, l) coord.request_stop()
def setUp(self) -> None: self.level5testdata = load_test_data() self.object_classifier = TLClassifier()