示例#1
0
def get_all_detections(idx, persp_id, results, filter_area=False):
    all_perspect_detections = []

    # Load predictions from persp_id vehicle
    persp_dir = get_folder(persp_id)
    perspect_detections = get_detections(persp_dir, persp_dir, idx, persp_id,
                                         persp_id, results, filter_area)
    if perspect_detections is not None and len(perspect_detections) > 0:
        all_perspect_detections.append(perspect_detections)

    # Load detections from cfg.DATASET_DIR if ego_vehicle is not the persp_id
    if persp_id != const.ego_id():
        perspect_detections = get_detections(persp_dir, cfg.DATASET_DIR, idx,
                                             persp_id, const.ego_id(), results,
                                             filter_area)
        if perspect_detections is not None and len(perspect_detections) > 0:
            all_perspect_detections.append(perspect_detections)

    # Load detections from remaining perspectives
    for entity_str in const.valid_perspectives():
        other_persp_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
        if os.path.isdir(other_persp_dir):
            # Skip own detections since they're loaded first
            if int(entity_str) != persp_id:
                perspect_detections = get_detections(persp_dir,
                                                     other_persp_dir,
                                                     idx, persp_id,
                                                     int(entity_str), results,
                                                     filter_area)
                if perspect_detections is not None and len(
                        perspect_detections) > 0:
                    all_perspect_detections.append(perspect_detections)

    return all_perspect_detections
示例#2
0
def get_folder(persp_id):
    if persp_id == const.ego_id():
        return cfg.DATASET_DIR
    else:
        persp_dir = cfg.DATASET_DIR + '/alt_perspective' + '/{:07d}/'.format(
            persp_id)
        return persp_dir
示例#3
0
def get_own_vehicle_object(persp_dir, idx, persp_id):
    ego_dir = persp_dir + '/ego_object/'
    ego_detection = obj_utils.read_labels(ego_dir, idx)
    ego_detection[0].score = 1.0
    ego_detection[0].id = persp_id

    if const.ego_id() == persp_id:
        # These weren't set in this version of synthetic data (TODO)
        ego_detection[0].t = (0, ego_detection[0].h, 0)
        ego_detection[0].ry = -math.pi / 2
    else:
        # Need to convert to perspective coordinates if perspective is not ego vehicle
        # All ego_object objects are in ego-vehicle coordinates
        to_world(ego_detection, get_folder(const.ego_id()), idx)
        to_perspective(ego_detection, persp_dir, idx)

    return ego_detection
def main():
    velo_dir = cfg.DATASET_DIR + '/velodyne'

    # Do this for every sample index
    velo_files = os.listdir(velo_dir)

    for file in velo_files:
        filepath = velo_dir + '/' + file
        idx = int(os.path.splitext(file)[0])

        if idx < cfg.MIN_IDX or idx > cfg.MAX_IDX:
            continue

        ego_dir = p_utils.get_folder(const.ego_id())
        gt_objects = p_utils.get_detections(ego_dir, ego_dir, idx, const.ego_id(), const.ego_id(), results=False, filter_area=True)

        pc = points_3d.get_nan_point_cloud(ego_dir, idx)

        obj_invalid = []
        obj_valid = []

        for obj in gt_objects:
            if not (obj.obj.type == 'Car' or obj.obj.type == 'Pedestrian'):
                continue

            obj_pos = np.asanyarray(obj.obj.t)
            obj_dist = np.sqrt(np.dot(obj_pos, obj_pos.T))
            num_points = points_3d.numPointsIn3DBox(obj.obj, pc, ego_dir, idx)
            if plausibility_checker.is_plausible(obj.obj, idx, const.ego_id(), obj.det_idx) == False:
                print("Not plausible at dist: ", obj_dist, num_points)
                obj_invalid.append(obj.obj)
            else:
                # print("Plausible at dist: ", obj_dist, num_points)
                obj_valid.append(obj.obj)

        if len(obj_invalid) > 0:
            vis_objects.visualize_objects(obj_invalid, idx, False, False, -1, compare_with_gt=False, show_image=False)
示例#5
0
def aggregate_msgs(matching_objs, trust_dict, idx):
    final_dets = []

    msg_evals_dict = msg_evals.load_agg_msg_evals(idx)

    if cfg.VISUALIZE_AGG_EVALS:
        for match_list in matching_objs:
            for trust_obj in match_list:
                if trust_obj.detector_id in msg_evals_dict:
                    if trust_obj.det_idx in msg_evals_dict[
                            trust_obj.detector_id]:
                        trust_obj.obj.score = msg_evals_dict[
                            trust_obj.detector_id][trust_obj.det_idx]
                        print("Setting trust_obj score to: ",
                              trust_obj.obj.score)
        # print(matching_objs[0][0].obj.score)
        vis_matches.visualize_matches(matching_objs,
                                      idx,
                                      cfg.USE_RESULTS,
                                      False,
                                      -1,
                                      vis_eval_scores=True)

    for match_list in matching_objs:
        # Do not add self to the list of detections
        if match_list[0].detector_id == const.ego_id(
        ) and match_list[0].det_idx == 0:
            logging.debug("Skipping self detection")
            continue

        match_list[0].obj.score = aggregate_score(match_list, trust_dict, idx,
                                                  msg_evals_dict)
        final_dets.append(match_list[0].obj)
        logging.debug("Adding multi object: {}".format(match_list[0].obj.t))

    if cfg.VISUALIZE_FINAL_DETS:
        vis_objects.visualize_objects(final_dets,
                                      idx,
                                      cfg.USE_RESULTS,
                                      False,
                                      -1,
                                      vis_scores=True)

    return final_dets
示例#6
0
def save_false_dets():

    print("Beginning save of false detections")

    # First for the ego vehicle
    save_false_dets_persp(cfg.DATASET_DIR, const.ego_id())

    # Then for all the alternate perspectives
    persp_count = len(os.listdir(cfg.ALT_PERSP_DIR))
    persp_idx = 0
    for entity_str in const.valid_perspectives():
        persp_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
        save_false_dets_persp(persp_dir, int(entity_str))

        sys.stdout.flush()
        sys.stdout.write(
            '\rFinished saving detections for perspective {}: {} / {}'.format(
                int(entity_str), persp_idx, persp_count))
        persp_idx += 1
def save_points_in_3d_boxes(trust_objs, idx, perspect_dir, persp_id):
    if trust_objs is None:
        logging.debug("trust_objs is none")
        return

    det_count = 0
    for obj_list in trust_objs:
        det_count += len(obj_list)

    logging.debug(
        "********************Saving points_in_3d_boxes val to id: {} at idx: {}"
        .format(persp_id, idx))
    # Save to text file
    file_path = p_utils.get_folder(persp_id) + '/{}/{:06d}.txt'.format(
        cfg.POINTS_IN_3D_BOXES_DIR, idx)
    std_utils.make_dir(file_path)
    logging.debug("Writing points_in_3d_boxes to file: %s", file_path)

    with open(file_path, 'a+') as f:
        pc = get_nan_point_cloud(perspect_dir, idx)
        for obj_list in trust_objs:
            for trust_obj in obj_list:
                num_points = numPointsIn3DBox(trust_obj.obj, pc, perspect_dir,
                                              idx)

                # For testing saving/loading
                if cfg.VISUALIZE_POINTS_IN_3D_BOXES:
                    trust_obj.evaluator_3d_points = num_points

                # Fill the array to write
                output = np.zeros([1, 3])
                output[0, 0] = trust_obj.detector_id
                output[0, 1] = trust_obj.det_idx
                output[0, 2] = num_points

                np.savetxt(f, output, newline='\r\n', fmt='%i %i %i')

        # Visualize evaluations by setting config option to True
        if cfg.VISUALIZE_POINTS_IN_3D_BOXES:
            alt_persp = persp_id != const.ego_id()
            vis_matches.visualize_matches(trust_objs, idx, \
                                           cfg.USE_RESULTS, alt_persp, persp_id, \
                                           vis_eval_scores=True)
def compute_points_in_3d_boxes():

    print("Beginning calculation of points_in_3d_boxes")

    std_utils.delete_all_subdirs(cfg.POINTS_IN_3D_BOXES_DIR)

    # First for the ego vehicle
    compute_perspect_points_in_3d_boxes(cfg.DATASET_DIR, const.ego_id())

    # Then for all the alternate perspectives
    persp_count = len(os.listdir(cfg.ALT_PERSP_DIR))
    persp_idx = 0
    for entity_str in const.valid_perspectives():
        perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
        compute_perspect_points_in_3d_boxes(perspect_dir, int(entity_str))

        sys.stdout.flush()
        sys.stdout.write(
            '\rFinished point count for perspective {}: {} / {}'.format(
                int(entity_str), persp_idx, persp_count))
        persp_idx += 1
示例#9
0
def compute_final_detections():
    print("Aggregate method: ", cfg.AGGREGATE_METHOD)
    std_utils.delete_subdir(cfg.FINAL_DETS_SUBDIR)
    std_utils.delete_subdir(cfg.FINAL_DETS_SUBDIR_AF)

    # First for the ego vehicle
    velo_dir = cfg.DATASET_DIR + '/velodyne'

    # Do this for every sample index
    velo_files = os.listdir(velo_dir)
    num_files = len(velo_files)
    file_idx = 0

    for file in velo_files:
        filepath = velo_dir + '/' + file
        idx = int(os.path.splitext(file)[0])

        if idx < cfg.MIN_IDX or idx > cfg.MAX_IDX:
            continue

        trust_dict = v_trust.load_vehicle_trust_objs(idx)

        perspect_trust_objs = p_utils.get_all_detections(
            idx, const.ego_id(), results=cfg.USE_RESULTS, filter_area=False)

        # Find matching pairs
        # Returns a list of lists of objects which have been matched
        matching_objs = matching_utils.match_iou3ds(perspect_trust_objs,
                                                    only_ego_matches=False)

        logging.debug("Matching objects!!!!!!!!!!!!!!!!!!!!!!!!!")
        logging.debug(matching_objs)
        # Aggregate messages into final detections
        final_dets = aggregate_msgs(matching_objs, trust_dict, idx)
        logging.debug("Final detections!!!!!!!!!!!!!!!!!!!!!")
        logging.debug(final_dets)

        output_final_dets(final_dets, idx)

    print("Finished computing final detections")
示例#10
0
def calculate_vehicle_trusts():

    # Before calculating, first delete all previous vehicle trust values
    std_utils.delete_subdir(cfg.V_TRUST_SUBDIR)

    # Initialize dictionary for vehicle trust values
    # Entity ID/VehicleTrust object pairs
    trust_dict = {}

    velo_dir = cfg.DATASET_DIR + '/velodyne'
    velo_files = os.listdir(velo_dir)
    for idx in range(cfg.MIN_IDX, cfg.MAX_IDX + 1):
        filepath = velo_dir + '/{:06d}.bin'.format(idx)

        if not os.path.isfile(filepath):
            logging.debug("Could not find file: %s", filepath)
            logging.debug("Stopping at idx: %d", idx)
            break

        # Load stale trust dict if we need it (past msg fresh period)
        stale_trust_dict = {}
        if (idx - cfg.STALE_EVALS_TIME) >= 0:
            stale_trust_dict = load_vehicle_trust_objs(idx -
                                                       cfg.STALE_EVALS_TIME)

        # First for the ego vehicle
        compute_vehicle_trust(cfg.DATASET_DIR, const.ego_id(), idx, trust_dict,
                              stale_trust_dict)

        # Then for all the alternate perspectives
        for entity_str in const.valid_perspectives():
            perspect_dir = os.path.join(cfg.ALT_PERSP_DIR, entity_str)
            compute_vehicle_trust(perspect_dir, int(entity_str), idx,
                                  trust_dict, stale_trust_dict)

        write_trust_vals(trust_dict, idx)

    print("Finished calculating vehicle trusts")
示例#11
0
def aggregate_score(match_list, trust_dict, idx, msg_evals_dict):

    final_score = 0.0

    # TODO potentially add local threshold to simply believe local
    # detections with a high score
    # if cfg.LOCAL_THRESHOLD <= 1 and //
    #     match_list[0].detector_id == const.ego_id() and //
    #     match_list[0].obj.score >= cfg.LOCAL_THRESHOLD:
    #     final_score += match_list[0].obj.score

    # Aggregate based on weighted average of scores
    if cfg.AGGREGATE_METHOD == 0:
        count = 0
        num = 0
        den = 0
        for trust_obj in match_list:
            weight = trust_obj.detector_certainty * v_trust.vehicle_trust_value(
                trust_dict, trust_obj.detector_id)
            num += trust_obj.obj.score * weight
            den += weight
            count += 1

        if den == 0:
            final_score = 0
        else:
            final_score = num / (count * den)

    # Aggregate additively on weighted scores
    elif cfg.AGGREGATE_METHOD == 1:
        for trust_obj in match_list:
            weight = trust_obj.detector_certainty * v_trust.vehicle_trust_value(
                trust_dict, trust_obj.detector_id)
            final_score += trust_obj.obj.score * weight

    # TruPercept 1
    # Aggregate based on overall message evaluations
    elif cfg.AGGREGATE_METHOD == 2:
        den = 0
        num = 0.0
        for trust_obj in match_list:
            found = False
            if trust_obj.detector_id in msg_evals_dict:
                if trust_obj.det_idx in msg_evals_dict[trust_obj.detector_id]:
                    num += msg_evals_dict[trust_obj.detector_id][
                        trust_obj.det_idx]
                    found = True

            if not found and trust_obj.detector_id == const.ego_id():
                num += trust_obj.obj.score

            den += 1

        if den == 0:
            final_score = 0
        else:
            final_score = num / den

        # No need to plausibility check ego-vehicle detections or null detections
        if match_list[0].detector_id != const.ego_id() and final_score > 0:
            if not plausibility_checker.is_plausible(match_list[0].obj, idx,
                                                     match_list[0].detector_id,
                                                     match_list[0].det_idx):
                final_score = 0.0

    # BA 1
    elif cfg.AGGREGATE_METHOD == 3:
        final_score = 1.0

    # BA 2
    elif cfg.AGGREGATE_METHOD == 4:
        if len(match_list) > 1:
            final_score = 1.0
        else:
            final_score = match_list[0].obj.score

    # BA 3
    elif cfg.AGGREGATE_METHOD == 5:
        if len(match_list) > 1:
            final_score = 1.0
        elif match_list[0].detector_id == const.ego_id():
            final_score = match_list[0].obj.score

    # BA 4 - This one seems to work the best
    elif cfg.AGGREGATE_METHOD == 6:
        if match_list[0].detector_id == const.ego_id():
            if len(match_list) > 1:
                final_score = 1.0
            else:
                final_score = match_list[0].obj.score

    elif cfg.AGGREGATE_METHOD == 7:
        if match_list[0].detector_id == const.ego_id():
            final_score = match_list[0].obj.score

        # Check to ensure ego-vehicle is not matching with its own detections
        first = True
        for obj in match_list:
            if first:
                first = False
            elif obj.detector_id == const.ego_id():
                print("Ego objects matched!!!!")

    # Aggregate additively
    # Ego vehicle gets weight of 1
    # Other vehicles weighted at 0.5
    elif cfg.AGGREGATE_METHOD == 8:
        if match_list[0].detector_id == const.ego_id():
            final_score = match_list[0].obj.score

        for trust_obj in match_list:
            final_score += trust_obj.obj.score

        final_score = final_score / 2

    # Aggregate based on overall message evaluations
    # Same as 2 but average msg evals with ego vehicle confidence
    elif cfg.AGGREGATE_METHOD == 9:
        den = 0.0
        num = 0.0
        for trust_obj in match_list:
            found = False
            if trust_obj.detector_id in msg_evals_dict:
                if trust_obj.det_idx in msg_evals_dict[trust_obj.detector_id]:
                    num += msg_evals_dict[trust_obj.detector_id][
                        trust_obj.det_idx]
                    found = True

            if not found and trust_obj.detector_id == const.ego_id():
                num += trust_obj.obj.score

            den += 1.0

        if den == 0:
            final_score = 0
        else:
            final_score = num / den

        # Bias the detection towards the local detection score
        if match_list[0].detector_id == const.ego_id():
            final_score += match_list[0].obj.score
            final_score /= 2

        # No need to plausibility check ego-vehicle detections or null detections
        if match_list[0].detector_id != const.ego_id() and final_score > 0:
            if not plausibility_checker.is_plausible(match_list[0].obj, idx,
                                                     match_list[0].detector_id,
                                                     match_list[0].det_idx):
                final_score = 0.0

    # TruPercept 2
    elif cfg.AGGREGATE_METHOD == 10:
        den = 0.0
        num = 0.0
        for trust_obj in match_list:
            found = False
            if trust_obj.detector_id in msg_evals_dict:
                if trust_obj.det_idx in msg_evals_dict[trust_obj.detector_id]:
                    num += msg_evals_dict[trust_obj.detector_id][
                        trust_obj.det_idx]
                    found = True

            if not found:
                num += trust_obj.obj.score

            den += 1.0

        if den == 0:
            final_score = 0.0
        else:
            final_score = num / den

        # No need to plausibility check ego-vehicle detections or null detections
        if match_list[0].detector_id != const.ego_id() and final_score > 0:
            if not plausibility_checker.is_plausible(match_list[0].obj, idx,
                                                     match_list[0].detector_id,
                                                     match_list[0].det_idx):
                final_score = 0.0

    # Only use ego vehicle detections but adjust position
    elif cfg.AGGREGATE_METHOD == 11:
        # No need to plausibility check ego-vehicle detections or null detections
        if match_list[0].detector_id == const.ego_id():
            final_score = match_list[0].obj.score
            min_dist = sys.float_info.max
            for trust_obj in match_list:
                if trust_obj.detector_dist < min_dist:
                    min_dist = trust_obj.detector_dist
                    match_list[0].t = trust_obj.obj.t
                    match_list[0].ry = trust_obj.obj.ry

    # TruPercept 3
    # Ego-vehicle if visible in range, otherwise use trupercept
    elif cfg.AGGREGATE_METHOD == 12:
        trust_obj = match_list[0]
        # No need to plausibility check ego-vehicle detections or null detections
        if trust_obj.detector_id == const.ego_id():
            final_score = trust_obj.obj.score
        else:
            #Check if in range
            obj_pos = np.asanyarray(trust_obj.obj.t)
            obj_dist = np.sqrt(np.dot(obj_pos, obj_pos.T))
            if obj_dist < cfg.MAX_LIDAR_DIST:
                # exclude if >= 10 points in box (Should detect if visible)
                if trust_obj.evaluator_3d_points < 10:
                    # if not many points in box then add if it is plausible
                    if plausibility_checker.is_plausible(
                            match_list[0].obj, idx, match_list[0].detector_id,
                            match_list[0].det_idx):
                        final_score = trust_obj.obj.score
            else:
                if trust_obj.detector_id in msg_evals_dict:
                    if trust_obj.det_idx in msg_evals_dict[
                            trust_obj.detector_id]:
                        final_score = msg_evals_dict[trust_obj.detector_id][
                            trust_obj.det_idx]

    # Ego-vehicle if visible in range, otherwise use trupercept. With position corrections.
    elif cfg.AGGREGATE_METHOD == 13:
        trust_obj = match_list[0]
        # No need to plausibility check ego-vehicle detections or null detections
        if trust_obj.detector_id == const.ego_id():
            final_score = trust_obj.obj.score
        else:
            #Check if in range
            obj_pos = np.asanyarray(trust_obj.obj.t)
            obj_dist = np.sqrt(np.dot(obj_pos, obj_pos.T))
            if obj_dist < cfg.MAX_LIDAR_DIST:
                # exclude if >= 10 points in box (Should detect if visible)
                if trust_obj.evaluator_3d_points < 10:
                    # if not many points in box then add if it is plausible
                    if plausibility_checker.is_plausible(
                            match_list[0].obj, idx, match_list[0].detector_id,
                            match_list[0].det_idx):
                        final_score = trust_obj.obj.score
            else:
                if trust_obj.detector_id in msg_evals_dict:
                    if trust_obj.det_idx in msg_evals_dict[
                            trust_obj.detector_id]:
                        final_score = msg_evals_dict[trust_obj.detector_id][
                            trust_obj.det_idx]

        if final_score > 0.0:
            min_dist = sys.float_info.max
            for obj in match_list:
                if obj.detector_dist < min_dist:
                    min_dist = obj.detector_dist
                    match_list[0].t = obj.obj.t
                    match_list[0].ry = obj.obj.ry

    else:
        print("Error: Aggregation method is not properly set!!!")

    # Ensure final_score is within proper range
    final_score = min(final_score, 1.0)
    final_score = max(final_score, 0.0)

    logging.debug("Final detection aggregation. Idx: {}  pos: {}".format(
        idx, match_list[0].obj.t))
    for trust_obj in match_list:
        eval_dict_score = 0.0
        if trust_obj.detector_id in msg_evals_dict:
            if trust_obj.det_idx in msg_evals_dict[trust_obj.detector_id]:
                eval_dict_score = msg_evals_dict[trust_obj.detector_id][
                    trust_obj.det_idx]

        logging.debug(
            "Rec detection det_id: {}, det_idx: {} score: {}, certainty: {}, msg_eval_dict: {}"
            .format(trust_obj.detector_id, trust_obj.det_idx,
                    trust_obj.obj.score, trust_obj.detector_certainty,
                    eval_dict_score))

    logging.debug("Final score: {}".format(final_score))
    return final_score
示例#12
0
def visualize_objects(objects,
                      img_idx,
                      show_results,
                      alt_persp,
                      perspID,
                      fulcrum_of_points=True,
                      use_intensity=False,
                      view_received_detections=True,
                      receive_from_perspective=-1,
                      only_receive_dets=False,
                      compare_pcs=False,
                      show_3d_point_count=False,
                      show_orientation=cfg.VISUALIZE_ORIENTATION,
                      final_results=False,
                      show_score=False,
                      compare_with_gt=True,
                      show_image=True,
                      vis_scores=False):

    if cfg.VISUALIZE_AGG_EVALS:
        show_image = False

    # Setting Paths
    cam = 2
    dataset_dir = cfg.DATASET_DIR
    print("dataset_dir: ", cfg.DATASET_DIR)

    if img_idx == -1:
        print(
            "Please set the TEST_IDX in the config.py file to see a specific index."
        )
        img_idx = random.randint(0, 101)
        print("Using random index: ", img_idx)

    perspStr = '%07d' % perspID
    altPerspect_dir = os.path.join(dataset_dir, 'alt_perspective')
    if alt_persp:
        dataset_dir = dataset_dir + '/alt_perspective/' + perspStr
    else:
        perspID = const.ego_id()

    if show_results:
        label_dir = os.path.join(dataset_dir, 'predictions')
    else:
        label_dir = os.path.join(dataset_dir, 'label_2')

    COLOUR_SCHEME = {
        "Car": (0, 0, 255),  # Blue
        "Pedestrian": (255, 0, 0),  # Red
        "Bus": (0, 0, 255),  #Blue
        "Cyclist": (150, 50, 100),  # Purple
        "Van": (255, 150, 150),  # Peach
        "Person_sitting": (150, 200, 255),  # Sky Blue
        "Truck": (0, 0, 255),  # Light Grey
        "Tram": (150, 150, 150),  # Grey
        "Misc": (100, 100, 100),  # Dark Grey
        "DontCare": (255, 255, 255),  # White
        "Received": (255, 150, 150),  # Peach
        "OwnObject": (51, 255, 255),  # Cyan
        "GroundTruth": (0, 255, 0),  # Green
    }

    # Load points_in_3d_boxes for each object
    if vis_scores:
        text_positions = []
        text_labels = []
    else:
        text_positions = None
        text_labels = None

    if objects is not None:
        for obj in objects:
            if vis_scores:
                text_positions.append(obj.t)
                txt = '{}'.format(obj.score)
                text_labels.append(txt)

    if compare_with_gt:
        label_dir = os.path.join(dataset_dir, cfg.LABEL_DIR)
        real_gt_data = obj_utils.read_labels(label_dir, img_idx, results=False)
        if real_gt_data is not None:
            for obj in real_gt_data:
                obj.type = "GroundTruth"
            objects = objects + real_gt_data

    vis_utils.visualize_objects_in_pointcloud(
        objects, COLOUR_SCHEME, dataset_dir, img_idx, fulcrum_of_points,
        use_intensity, receive_from_perspective, compare_pcs,
        show_3d_point_count, show_orientation, final_results, show_score,
        compare_with_gt, show_image, text_positions, text_labels)
示例#13
0
def visualize_matches(matched_objs,
                      img_idx,
                      show_results,
                      alt_persp,
                      perspID,
                      fulcrum_of_points=True,
                      use_intensity=False,
                      view_received_detections=True,
                      receive_from_perspective=-1,
                      only_receive_dets=False,
                      compare_pcs=False,
                      show_3d_point_count=False,
                      show_orientation=cfg.VISUALIZE_ORIENTATION,
                      final_results=False,
                      show_score=False,
                      compare_with_gt=False,
                      show_image=True,
                      vis_eval_scores=False):
    # Setting Paths
    cam = 2
    dataset_dir = cfg.DATASET_DIR
    print("dataset_dir: ", cfg.DATASET_DIR)

    if img_idx == -1:
        print(
            "Please set the TEST_IDX in the config.py file to see a specific index."
        )
        img_idx = random.randint(0, 101)
        print("Using random index: ", img_idx)

    perspStr = '%07d' % perspID
    altPerspect_dir = os.path.join(dataset_dir, 'alt_perspective')
    if alt_persp:
        dataset_dir = dataset_dir + '/alt_perspective/' + perspStr
    else:
        perspID = const.ego_id()

    if show_results:
        label_dir = os.path.join(dataset_dir, 'predictions')
    else:
        label_dir = os.path.join(dataset_dir, 'label_2')

    COLOUR_SCHEME = {
        "Car": (0, 0, 255),  # Blue
        "Pedestrian": (255, 0, 0),  # Red
        "Bus": (0, 0, 255),  #Blue
        "Cyclist": (150, 50, 100),  # Purple
        "Van": (255, 150, 150),  # Peach
        "Person_sitting": (150, 200, 255),  # Sky Blue
        "Truck": (0, 0, 255),  # Light Grey
        "Tram": (150, 150, 150),  # Grey
        "Misc": (100, 100, 100),  # Dark Grey
        "DontCare": (255, 255, 255),  # White
        "Received": (255, 150, 150),  # Peach
        "OwnObject": (51, 255, 255),  # Cyan
        "GroundTruth": (0, 255, 0),  # Green
    }

    # Load points_in_3d_boxes for each object
    if vis_eval_scores:
        text_positions = []
        text_labels = []
    else:
        text_positions = None
        text_labels = None

    objects = []

    match_idx = 0
    for obj_list in matched_objs:
        obj_list[0].obj.type = "OwnObject"

        color_str = "Match{:07d}".format(match_idx)
        prime_val = match_idx * 809
        entity_colour = (prime_val + 13 % 255, (prime_val / 255) % 255,
                         prime_val % 255)
        COLOUR_SCHEME[color_str] = entity_colour
        first_obj = True
        for obj in obj_list:
            obj.obj.type = color_str
            objects.append(obj.obj)

            if vis_eval_scores:
                text_positions.append(obj.obj.t)
                txt = '{} - {} - {} - {} - {} - {}'.format(
                    obj.detector_id, obj.det_idx, obj.evaluator_3d_points,
                    obj.evaluator_certainty, obj.evaluator_score,
                    obj.obj.score)
                text_labels.append(txt)

        match_idx += 1

    vis_utils.visualize_objects_in_pointcloud(
        objects, COLOUR_SCHEME, dataset_dir, img_idx, fulcrum_of_points,
        use_intensity, receive_from_perspective, compare_pcs,
        show_3d_point_count, show_orientation, final_results, show_score,
        compare_with_gt, show_image, text_positions, text_labels)
示例#14
0
def visualize(img_idx, show_results, alt_persp, perspID, fulcrum_of_points,
              use_intensity, view_received_detections,
              receive_from_perspective, receive_det_id, only_receive_dets,
              change_rec_colour, compare_pcs, alt_colour_peach=False,
              show_3d_point_count=False, show_orientation=cfg.VISUALIZE_ORIENTATION,
              final_results=False, show_score=False,
              compare_with_gt=False, show_image=True,
              filter_area=cfg.VISUALIZE_AREA_FILTER):
    # Setting Paths
    cam = 2
    dataset_dir = cfg.DATASET_DIR
    print("dataset_dir: ", cfg.DATASET_DIR)

    if img_idx == -1:
        print("Please set the TEST_IDX in the config.py file to see a specific index.")
        img_idx = random.randint(0,101)
        print("Using random index: ", img_idx)

    global text_labels
    global text_positions
    global COLOUR_SCHEME
    if show_3d_point_count or show_score:
        text_labels = []
        text_positions = []

    perspStr = '%07d' % perspID
    altPerspect_dir = os.path.join(dataset_dir,'alt_perspective')
    if alt_persp:
        dataset_dir = dataset_dir + '/alt_perspective/' + perspStr
    else:
        perspID = const.ego_id()

    if show_results:
        label_dir = os.path.join(dataset_dir, 'predictions')
    else:
        label_dir = os.path.join(dataset_dir, 'label_2')

    # Load points_in_3d_boxes for each object
    points_dict = points_in_3d_boxes.load_points_in_3d_boxes(img_idx, perspID)

    gt_detections = []
    # Get bounding boxes
    if final_results:
        if filter_area:
            label_dir = os.path.join(dataset_dir, cfg.FINAL_DETS_SUBDIR_AF)
        else:
            label_dir = os.path.join(dataset_dir, cfg.FINAL_DETS_SUBDIR)
        gt_detections = obj_utils.read_labels(label_dir, img_idx, results=show_results)
        if compare_with_gt and not show_results:
            for obj in gt_detections:
                obj.type = "GroundTruth"
        addScoreText(gt_detections, show_3d_point_count, show_score)
    else:
        if (not view_received_detections or receive_from_perspective != -1) and not only_receive_dets:
            gt_detections = perspective_utils.get_detections(dataset_dir, dataset_dir, img_idx, perspID,
                                    perspID, results=show_results, filter_area=filter_area)

            setPointsText(gt_detections, points_dict, show_3d_point_count)
            addScoreTextTrustObjs(gt_detections, show_3d_point_count, show_score)
            gt_detections = trust_utils.strip_objs(gt_detections)
            gt_detections[0].type = "OwnObject"

        if view_received_detections:
            stripped_detections = []
            if receive_from_perspective == -1:
                perspect_detections = perspective_utils.get_all_detections(img_idx, perspID, show_results, filter_area)
                if change_rec_colour:
                    for obj_list in perspect_detections:
                        obj_list[0].obj.type = "OwnObject"
                        if obj_list[0].detector_id == perspID:
                            if compare_with_gt:
                                if obj_list is not None:
                                    for obj in obj_list:
                                        obj.obj.type = "GroundTruth"
                            continue
                        color_str = "Received{:07d}".format(obj_list[0].detector_id)
                        prime_val = obj_list[0].detector_id * 47
                        entity_colour = (prime_val + 13 % 255, (prime_val / 255) % 255, prime_val % 255)
                        COLOUR_SCHEME[color_str] = entity_colour
                        first_obj = True
                        for obj in obj_list:
                            if first_obj:
                                first_obj = False
                                continue
                            obj.obj.type = color_str

                for obj_list in perspect_detections:
                    setPointsText(obj_list, points_dict, show_3d_point_count)
                    addScoreTextTrustObjs(obj_list, show_3d_point_count, show_score)

                stripped_detections = trust_utils.strip_objs_lists(perspect_detections)
            else:
                receive_entity_str = '{:07d}'.format(receive_from_perspective)
                receive_dir = os.path.join(altPerspect_dir, receive_entity_str)
                if os.path.isdir(receive_dir):
                    print("Using detections from: ", receive_dir)
                    perspect_detections = perspective_utils.get_detections(dataset_dir, receive_dir, img_idx, receive_from_perspective,
                                                                            receive_entity_str, results=show_results, filter_area=filter_area)
                    if perspect_detections is not None:
                        color_str = "Received{:07d}".format(receive_from_perspective)
                        prime_val = receive_from_perspective * 47
                        entity_colour = (prime_val + 13 % 255, (prime_val / 255) % 255, prime_val % 255)
                        COLOUR_SCHEME[color_str] = entity_colour
                        first_obj = True
                        for obj in perspect_detections:
                            if first_obj:
                                first_obj = False
                                continue
                            obj.obj.type = color_str
                        setPointsText(perspect_detections, points_dict, show_3d_point_count)
                        addScoreTextTrustObjs(perspect_detections, show_3d_point_count, show_score)
                        stripped_detections = trust_utils.strip_objs(perspect_detections)
                else:
                    print("Could not find directory: ", receive_dir)

            if receive_det_id != -1 and len(stripped_detections) > 0:
                single_det = []
                single_det.append(stripped_detections[receive_det_id])
                stripped_detections = single_det

            if change_rec_colour and alt_colour_peach:
                for obj in stripped_detections:
                    obj.type = "Received"

            if len(stripped_detections) > 0:
                stripped_detections[0].type = "OwnObject"

            if only_receive_dets:
                gt_detections = stripped_detections
                print("Not using main perspective detections")
            else:
                gt_detections = gt_detections + stripped_detections

    if compare_with_gt and show_results:
        label_dir = os.path.join(dataset_dir, cfg.LABEL_DIR)
        real_gt_data = obj_utils.read_labels(label_dir, img_idx, results=False)
        for obj in real_gt_data:
            if obj.type != "DontCare":
                obj.type = "GroundTruth"
        gt_detections = gt_detections + real_gt_data

    visualize_objects_in_pointcloud(gt_detections, COLOUR_SCHEME, dataset_dir,
              img_idx, fulcrum_of_points, use_intensity,
              receive_from_perspective, compare_pcs,
              show_3d_point_count, show_orientation,
              final_results, show_score,
              compare_with_gt, show_image,
              _text_positions=text_positions, _text_labels=text_labels)