Ejemplo n.º 1
0
def main(argv):
    from multiprocessing import Pool
    logging.set_verbosity(logging.INFO)

    sp = SceneProcessor(FLAGS.data_type,
                        from_rgb_detection=FLAGS.from_rgb,
                        use_multisweep=FLAGS.use_multisweep,
                        use_detected_2d=FLAGS.use_detected_2d)

    if "all" in FLAGS.scenes:
        scenes_to_process = range(218)
    elif "rest" in FLAGS.scenes:
        _, artifact_path, _ = get_paths()
        processed_scenes = sp.find_processed_scenes(artifact_path)
        scenes_to_process = []
        for i in range(218):
            if i not in processed_scenes:
                scenes_to_process.append(i)
    else:
        scenes_to_process = map(int, FLAGS.scenes)

    if FLAGS.from_rgb:  # it seems that object detector does not support parallel processes
        if FLAGS.use_detected_2d:
            logging.info("parallel processing RGB data:")
            with Pool(processes=2) as p:
                p.map(sp.process_one_scene, scenes_to_process)
        else:
            for s in scenes_to_process:
                # map(sp.process_one_scene, scenes_to_process)
                sp.process_one_scene(s)
    else:
        logging.info("parallel processing GT data:")
        with Pool(processes=7) as p:
            p.map(sp.process_one_scene, scenes_to_process)
def main(argv):
    lyftd = load_test_data()
    data_path, artifacts_path, _ = get_paths()

    det_path = os.path.join(artifacts_path, "detection")

    scenes_to_process = range(0, 218, 1)
    sp = SceneImagePathSaver(det_path, lyftd)

    from multiprocessing import Pool
    with Pool(processes=3) as p:
        p.map(sp.find_and_save_image_in_scene, scenes_to_process)
Ejemplo n.º 3
0
def list_all_files(data_dir=None, pat="scene_\d+_train.tfrec"):
    if data_dir is None:
        _, artifact_path, _ = get_paths()
        data_dir = artifact_path

    files = []
    for file in os.listdir(data_dir):
        match = re.match(pat, file)
        if match:
            files.append(os.path.join(data_dir, file))

    return files
Ejemplo n.º 4
0
 def process_one_scene(self, scene_num):
     _, artifact_path, _ = get_paths()
     with tf.io.TFRecordWriter(
             os.path.join(
                 artifact_path,
                 "scene_{0}_{1}_{2}.tfrec".format(scene_num, self.data_type,
                                                  self.file_type))) as tfrw:
         for fp in get_all_boxes_in_single_scene(
                 scene_number=scene_num,
                 from_rgb_detection=self.from_rgb_detection,
                 ldf=self.lyftd,
                 use_multisweep=self.use_multisweep,
                 object_classifier=self.object_classifier):
             tfexample = fp.to_train_example()
             tfrw.write(tfexample.SerializeToString())
Ejemplo n.º 5
0
def load_train_data():
    from config_tool import get_paths
    DATA_PATH, ARTIFACT_PATH, _ = get_paths()
    level5data_snapshot_file = "level5data.pickle"

    if os.path.exists(os.path.join(DATA_PATH, level5data_snapshot_file)):
        with open(os.path.join(DATA_PATH, level5data_snapshot_file), 'rb') as fp:
            level5data = pickle.load(fp)
    else:

        level5data = LyftDataset(data_path=DATA_PATH,
                                 json_path=os.path.join(DATA_PATH, 'data/'),
                                 verbose=True)
        with open(os.path.join(DATA_PATH, level5data_snapshot_file), 'wb') as fp:
            pickle.dump(level5data, fp)

    return level5data
Ejemplo n.º 6
0
def detect_image_in_scene(scene_num, lyftd, tlc, file_pat):
    data_path, artifacts_path, _ = get_paths()

    image_dir = os.path.join(data_path, "images")

    det_path = os.path.join(artifacts_path, "detection")
    if not os.path.exists(det_path):
        os.makedirs(det_path)

    logging.info("Run 2D detector on scene number :{}".format(scene_num))

    pre_processed_scene_image_file = os.path.join(det_path, file_pat.format(scene_num))
    if os.path.exists(pre_processed_scene_image_file):
        logging.info("use proprocessed paths")
        all_images = load_image_paths_in_scene(scene_num, det_path, file_pat)
    else:
        all_images = [ip for ip in get_all_image_paths_in_single_scene(scene_number=scene_num, ldf=lyftd)]

    for file in tqdm(all_images):
        head, tail = os.path.split(file)
        root, ext = os.path.splitext(tail)
        save_file = os.path.join(det_path, root + ".pickle")

        tlc.detect_and_save(image_path=os.path.join(image_dir, file), save_file=save_file)
Ejemplo n.º 7
0
    def test_load_pickle_file(self):
        data_path, artifacts_path, _ = get_paths()
        det_path = os.path.join(artifacts_path, "detection")
        file = "host-a101_cam4_1243095627316053006.pickle"
        root, ext = os.path.splitext(file)
        image_file_path = os.path.join(data_path, 'images', root + ".jpeg")
        import pickle
        with open(os.path.join(det_path, file), 'rb') as fp:
            det = pickle.load(fp)

        image_np = imread(image_file_path)

        sel_id = det['scores'] > 0.5
        sel_classes = det['classes'][sel_id]

        strings = [[
            g_type_object_of_interest[map_2d_detector[int(sel_classes[i])]]
        ] for i in range(sel_classes.shape[0])]

        draw_bounding_boxes_on_image_array(image_np,
                                           det['boxes'][sel_id],
                                           display_str_list_list=strings)

        imsave("./artifact/test_read_pickle.png", image_np)
Ejemplo n.º 8
0
    return cropped_image


def classify_all_boxes_in_image(image_np, boxes):
    result_index_array = np.zeros(boxes.shape[0], dtype=np.int)
    for i, box in enumerate(boxes):
        cropped_image = crop_roi_image(image_np, box)
        result_color_index, _ = classify_color_cropped_image(cropped_image)
        result_index_array[i] = result_color_index

    return result_index_array


from config_tool import get_paths

data_path, artifacts_path, _ = get_paths()

detection_path = os.path.join(artifacts_path, "detection")

import pickle


def load_detection_boxes(detection_data_path,
                         image_path,
                         score_threshold=[0.4 for i in range(9)],
                         target_classes=[1, 2, 3, 4, 5, 6, 7, 8, 9],
                         rearrange_to_pointnet_convention=True,
                         output_target_class=False):
    assert len(score_threshold) == len(target_classes)

    # print(detection_data_path)
Ejemplo n.º 9
0
from run_prepare_lyft_data import SceneProcessor

from config_tool import get_paths

_, artifact_path, _ = get_paths()

sp = SceneProcessor("train", from_rgb_detection=False)

numbers = sp.find_processed_scenes(artifact_path)

print(numbers)
Ejemplo n.º 10
0
import pandas as pd

from test_making_inference import ScoreCalculator
from config_tool import get_paths
import os
import numpy as np

data_path, _, _ = get_paths()

gt_file = os.path.join(data_path, "train.csv")

pred_file = "train_val_pred.csv"

sc = ScoreCalculator(pred_csv_file=pred_file, gt_csv_file=gt_file)

scores = sc.calculate_single_entry(1)

mean_score = sc.calculate_mean_ious()

#print(scores)
#print(mean_score)

iou_levels = [0.5, 0.55, 0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95]

for il in iou_levels:
    avg_precision = sc.calculate_average_precision(il)
    print("iou level:{}, avg precision: {}".format(il, np.mean(avg_precision)))
Ejemplo n.º 11
0
def inference_new(sess, ops):
    feed_dict = {ops['is_training_pl']: False}

    ep = ops['end_points']
    from config_tool import get_paths
    _, artifact_path, _ = get_paths()
    with tf.io.TFRecordWriter(
            os.path.join(artifact_path, "inference_results.tfrec")) as tfrw:
        for count_num in itertools.count():
            try:
                batch_logits, batch_centers, \
                batch_heading_scores, batch_heading_residuals, \
                batch_size_scores, batch_size_residuals, \
                camera_token_bytes_string, sample_token_bytes_string, \
                point_clouds, seg_labels, frustum_angle, type_name_bytes_string = \
                    sess.run([ops['logits'], ops['center'],
                              ep['heading_scores'], ep['heading_residuals'],
                              ep['size_scores'], ep['size_residuals'],
                              ops['camera_token'], ops['sample_token'],
                              ops['pointclouds_pl'], ops['labels_pl'],
                              ops['frustum_angle'],ops['type_name']],
                             feed_dict=feed_dict)

                # Compute scores
                batch_seg_prob = softmax(batch_logits)[:, :, 1]  # BxN
                batch_seg_mask = np.argmax(batch_logits, 2)  # BxN
                mask_mean_prob = np.sum(batch_seg_prob * batch_seg_mask,
                                        1)  # B,
                mask_mean_prob = mask_mean_prob / np.sum(batch_seg_mask,
                                                         1)  # B,
                heading_prob = np.max(softmax(batch_heading_scores), 1)  # B
                size_prob = np.max(softmax(batch_size_scores), 1)  # B,

                # batch_size score includes the score of segmentation mask accuracy, heading, and size
                batch_scores = np.log(mask_mean_prob) + np.log(
                    heading_prob) + np.log(size_prob)
                batch_prob = (mask_mean_prob + heading_prob + size_prob) / 3

                heading_cls = np.argmax(softmax(batch_heading_scores), 1)  # B
                size_cls = np.argmax(softmax(batch_size_scores), 1)  # B

                current_batch_size = batch_logits.shape[0]
                for batch_index in range(current_batch_size):
                    heading_res = batch_heading_residuals[
                        batch_index, heading_cls[batch_index]]
                    size_res = batch_size_residuals[batch_index,
                                                    size_cls[batch_index], :]

                    example_msg = get_inference_results_tfexample(
                        point_cloud=point_clouds[batch_index, ...],
                        seg_label=seg_labels[batch_index, ...],
                        seg_label_logits=batch_logits[batch_index, ...],
                        box_center=batch_centers[batch_index, ...],
                        heading_angle_class=heading_cls[batch_index, ...],
                        heading_angle_residual=heading_res,
                        size_class=size_cls[batch_index, ...],
                        size_residual=size_res,
                        frustum_angle=frustum_angle[batch_index, ...],
                        score=batch_prob[batch_index, ...],
                        camera_token=camera_token_bytes_string[batch_index].
                        decode('utf8'),
                        sample_token=sample_token_bytes_string[batch_index].
                        decode('utf8'),
                        type_name=type_name_bytes_string[batch_index].decode(
                            'utf8'))

                    tfrw.write(example_msg.SerializeToString())

                # Finished computing scores
            except tf.errors.OutOfRangeError:
                break
Ejemplo n.º 12
0
import warnings

try:
    tlc = TLClassifier()
except:
    print("Error occured when importing object classification models")

# Load the dataset
# Adjust the dataroot parameter below to point to your local dataset path.
# The correct dataset path contains at least the following four folders (or similar): images, lidar, maps, v1.0.1-train

from prepare_lyft_data_v2 import load_train_data

level5data = load_train_data()

DATA_PATH, ARTIFACT_PATH, _ = get_paths()

default_train_file = os.path.join(DATA_PATH, "train.csv")


def parse_train_csv(data_file=default_train_file, with_score=False):
    train = pd.read_csv(data_file)

    object_columns = ['sample_id', 'object_id', 'center_x', 'center_y', 'center_z',
                      'width', 'length', 'height', 'yaw', 'class_name']
    objects = []
    col_num = 8
    if with_score:
        col_num = 9
    for sample_id, ps in tqdm(train.values[:]):
        if type(ps) != str: