def main():
    #########################################################
    # Specify Source Folders and Parameters For Frame Reader
    #########################################################
    data_split_dir = 'training'

    # Only testing works, since it requires covariance matrices
    results_dir = 'testing'

    dataset_dir = os.path.expanduser(
        '~/Datasets/Kitti/object/'
    )  # Change this to corresponding dataset directory
    image_dir = os.path.join(dataset_dir, data_split_dir) + '/image_2'
    label_dir = os.path.join(dataset_dir, data_split_dir) + '/label_2'

    checkpoint_name = 'retinanet_bdd'
    checkpoint_number = '101'

    uncertainty_method = 'bayes_od_none'

    prediction_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                                  'predictions', results_dir, 'kitti',
                                  checkpoint_number, uncertainty_method,
                                  'data')

    mean_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                            'predictions', results_dir, 'kitti',
                            checkpoint_number, uncertainty_method, 'mean')

    cov_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                           'predictions', results_dir, 'kitti',
                           checkpoint_number, uncertainty_method, 'cov')

    cat_param_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                                 'predictions', results_dir, 'kitti',
                                 checkpoint_number, uncertainty_method,
                                 'cat_param')

    frames_list = os.listdir(prediction_dir)
    index = random.randint(0, len(frames_list))
    frame_id = int(frames_list[index][0:6])

    print('Showing Frame: %d' % frame_id)

    #############
    # Read Frame
    #############
    im_path = image_dir + '/{:06d}.png'.format(frame_id)
    image = cv2.imread(im_path)

    label_path = label_dir + '/{:06d}.txt'.format(frame_id)
    gt_classes_hard, gt_boxes_hard = read_labels(label_path)

    prediction_path = prediction_dir + '/{:06d}.txt'.format(frame_id)
    prediction_classes, prediction_boxes, prediction_scores = read_predictions(
        prediction_path)

    prediction_boxes_mean = np.load(mean_dir + '/{:06d}.npy'.format(frame_id))
    prediction_boxes_cov = np.load(cov_dir + '/{:06d}.npy'.format(frame_id))
    prediction_boxes_cat_params = np.load(cat_param_dir +
                                          '/{:06d}.npy'.format(frame_id))

    # Read entropy for debugging purposes
    transformation_mat = np.array([[1, 0, -0.5, 0], [0, 1, 0, -0.5],
                                   [1, 0, 0.5, 0], [0, 1, 0, 0.5]])

    prediction_boxes_cov = np.matmul(
        np.matmul(transformation_mat, prediction_boxes_cov),
        transformation_mat.T)

    prediction_boxes = vuhw_to_vuvu_np(prediction_boxes_mean)
    category_pred = np.zeros(prediction_boxes_cat_params.shape)
    cat_ind = np.argmax(prediction_boxes_cat_params, axis=1)
    category_pred[np.arange(category_pred.shape[0]), cat_ind] = 1

    #########################################################
    # Draw GT and Prediction Boxes
    #########################################################
    # Transform Predictions to left and right images
    image_out = draw_box_2d(np.copy(image),
                            gt_boxes_hard,
                            gt_classes_hard,
                            line_width=2,
                            dataset='kitti',
                            is_gt=True)

    image_out = draw_ellipse_2d_corners(image_out,
                                        prediction_boxes,
                                        prediction_boxes_cov,
                                        prediction_classes,
                                        dataset='kitti',
                                        line_width=3)

    cv2.imshow('Detections from ' + uncertainty_method, image_out)

    cv2.waitKey()
    heatmap_new = np.zeros(image.shape[0:2])

    for prediction_box, prediction_box_cov in zip(prediction_boxes,
                                                  prediction_boxes_cov):
        heatmap = eval_utils.calc_heatmap(prediction_box, prediction_box_cov,
                                          image_out.shape[0:2]) * 255
        heatmap_new = np.where(heatmap != 0, heatmap, heatmap_new)

    im_color = cv2.applyColorMap(heatmap_new.astype(np.uint8),
                                 cv2.COLORMAP_JET)
    overlayed_im = cv2.addWeighted(image, 0.4, im_color, 0.6, 0)
    cv2.imshow('Spatial Heatmap Image from ' + uncertainty_method,
               overlayed_im)

    cv2.waitKey()
def main():
    #########################################################
    # Specify Source Folders and Parameters For Frame Reader
    #########################################################
    data_split_dir = 'training'

    # Specify whether the validation or inference results need to be
    # visualized.
    #results_dir = 'validation'
    results_dir = 'testing'

    # sample_free, anchor_redundancy, black_box,  bayes_od_none,
    # bayes_od_ci_fast. bayes_od_ci,or bayes_od_ici
    uncertainty_method = 'bayes_od'

    dataset_dir = os.path.expanduser('~/Datasets/Kitti/object/')
    image_dir = os.path.join(dataset_dir, data_split_dir) + '/image_2'
    label_dir = os.path.join(dataset_dir, data_split_dir) + '/label_2'

    checkpoint_name = 'retinanet_bdd'
    checkpoint_number = '101'

    if results_dir == 'testing':
        prediction_dir = os.path.join(core.data_dir(), 'outputs',
                                      checkpoint_name, 'predictions',
                                      results_dir, 'kitti', checkpoint_number,
                                      uncertainty_method, 'data')
    else:
        prediction_dir = os.path.join(core.data_dir(), 'outputs',
                                      checkpoint_name, 'predictions',
                                      results_dir, checkpoint_number, 'data')

    frames_list = os.listdir(prediction_dir)
    index = random.randint(0, len(frames_list) - 1)
    frame_id = int(frames_list[index][0:6])

    # frame_id = 27  # Out of distribution example
    frame_id = 4079
    # frame_id = 169   # Many Cars, Hard
    # frame_id = 2290  # Many Cars, Occlusions
    # frame_id = 1941  # Many Cars, Horizontal Direction
    # frame_id = 4032  # Orientation
    # frame_id = 104   # Weird Orientation
    # frame_id = 7047  # Weird Orientation
    # frame_id = 6632 # Very hard orientations

    # frame_id = 195  # Single Pedestrian
    # frame_id = 1574  # Single Pedestrian
    # frame_id = 332  # Multiple Hard Pedestrians
    # frame_id = 1193 # Multiple Hard Pedestrians

    # frame_id = 1274 # Multiple Cyclists

    print('Showing Frame: %d' % frame_id)

    #############
    # Read Frame
    #############
    im_path = image_dir + '/{:06d}.png'.format(frame_id)
    image = cv2.imread(im_path)

    label_path = label_dir + '/{:06d}.txt'.format(frame_id)
    gt_classes_hard, gt_boxes_hard = read_labels(label_path)

    prediction_path = prediction_dir + '/{:06d}.txt'.format(frame_id)
    prediction_classes, prediction_boxes, prediction_scores = read_predictions(
        prediction_path)

    max_ious = np.zeros(prediction_boxes.shape[0])
    # Compute IOU between each prediction and the ground truth boxes
    if gt_boxes_hard.size > 0 and prediction_boxes.size > 0:
        for obj_idx in range(prediction_boxes.shape[0]):
            obj_iou_fmt = prediction_boxes[obj_idx]

            ious_2d = two_d_iou(obj_iou_fmt, gt_boxes_hard)

            max_iou = np.amax(ious_2d)
            max_ious[obj_idx] = max_iou
    #########################################################
    # Draw GT and Prediction Boxes
    #########################################################
    # Transform Predictions to left and right images
    image_out = draw_box_2d(image,
                            gt_boxes_hard,
                            gt_classes_hard,
                            line_width=2,
                            dataset='kitti',
                            is_gt=True)

    image_out = draw_box_2d(image_out,
                            prediction_boxes,
                            prediction_classes,
                            line_width=2,
                            is_gt=False,
                            dataset='kitti',
                            text_to_plot=max_ious,
                            plot_text=True)
    if results_dir == 'testing':
        cv2.imshow('Detections from ' + uncertainty_method, image_out)
    else:
        cv2.imshow('Validation Set Detections', image_out)

    cv2.waitKey()
Exemplo n.º 3
0
def main():
    #########################################################
    # Specify Source Folders and Parameters For Frame Reader
    #########################################################
    data_split_dir = 'training'
    difficulty = 'all'
    categories = ['car', 'pedestrian']

    # Specify whether the validation or inference results need to be evaluated.
    # results_dir = 'validation'  # Or testing
    results_dir = 'testing'

    uncertainty_method = 'bayes_od_none'

    entropy_method = 'categorical'  # evaluate using gaussian or categorical entropy

    # All or per category. Note that if per category is used, out of
    # distribution detections are ignored. Results in overestimation of
    # performance.
    compute_method = 'category'

    checkpoint_name = 'retinanet_bdd'
    checkpoint_number = '101'

    dataset_dir = os.path.expanduser('~/Datasets/Kitti/object/')
    label_dir = os.path.join(dataset_dir, data_split_dir) + '/label_2'

    prediction_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                                  'predictions', results_dir, 'kitti',
                                  checkpoint_number, uncertainty_method,
                                  'data')

    cov_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                           'predictions', results_dir, 'kitti',
                           checkpoint_number, uncertainty_method, 'cov')

    cat_param_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                                 'predictions', results_dir, 'kitti',
                                 checkpoint_number, uncertainty_method,
                                 'cat_param')

    frames_list = os.listdir(prediction_dir)

    id = 0
    gt_dict_list = []
    prediction_dict_list = []
    for frame in frames_list:
        frame_id = int(frame[0:6])
        #############
        # Read Frame
        #############
        label_path = label_dir + '/{:06d}.txt'.format(frame_id)
        gt_classes, gt_boxes = read_labels(label_path,
                                           difficulty=difficulty,
                                           categories=categories)

        prediction_path = prediction_dir + '/{:06d}.txt'.format(frame_id)
        prediction_classes, prediction_boxes, prediction_scores = read_predictions(
            prediction_path, categories=categories)

        if gt_boxes.size > 0 and prediction_boxes.size > 0:
            prediction_box_cat_params = np.load(
                os.path.join(cat_param_dir, '{:06d}.npy'.format(frame_id)))
            prediction_box_covs = np.load(
                os.path.join(cov_dir, '{:06d}.npy'.format(frame_id)))

            if entropy_method == 'gaussian':
                ranking_entropies = [
                    compute_gaussian_entropy_np(cov)
                    for cov in prediction_box_covs
                ]
            elif entropy_method == 'categorical':
                ranking_entropies = [
                    compute_categorical_entropy_np(cat_vect)
                    for cat_vect in prediction_box_cat_params
                ]

            for gt_class, gt_box in zip(gt_classes, gt_boxes):

                ind = np.argmax(gt_class)
                gt_box_list = [gt_box[1], gt_box[0], gt_box[3], gt_box[2]]
                if compute_method == 'All':
                    category_name = 'All'
                else:
                    category_name = categories[ind]
                gt_dict = {
                    'name': str(id),
                    'category': category_name,
                    'bbox': gt_box_list,
                    'score': 1
                }
                gt_dict_list.append(gt_dict)

            for pred_class, pred_box, ranking_entropy in zip(
                    prediction_classes, prediction_boxes, ranking_entropies):
                ind = np.argmax(pred_class)
                if ind >= len(categories):
                    continue
                pred_box_list = [
                    pred_box[1], pred_box[0], pred_box[3], pred_box[2]
                ]
                if compute_method == 'All':
                    category_name = 'All'
                else:
                    category_name = categories[ind]
                pred_dict = {
                    'name': str(id),
                    'category': category_name,
                    'bbox': pred_box_list,
                    'entropy_score': ranking_entropy
                }

                prediction_dict_list.append(pred_dict)
        id += 1
        print('Computed {} / {} frames.'.format(id, len(frames_list)))

    mean_u_error_list, mean_u_error, cat_list, scores_at_min_u_error = evaluate_u_error(
        gt_dict_list, prediction_dict_list, iou_thresholds=[0.5])

    table = PrettyTable(cat_list)
    table.add_row(mean_u_error_list)
    print("Average " + entropy_method + " MUE: " + str(mean_u_error))
    print("Average " + entropy_method + " Score: " +
          str(np.mean(scores_at_min_u_error)))

    print(table)
Exemplo n.º 4
0
def main():
    #########################################################
    # Specify Source Folders and Parameters For Frame Reader
    #########################################################
    data_split_dir = 'training'
    difficulty = 'all'
    categories = ['car', 'pedestrian']

    # Specify whether the validation or inference results need to be evaluated.
    # results_dir = 'validation'  # Or testing
    results_dir = 'testing'

    # sample_free, anchor_redundancy, black_box,naive_aleatoric_epistemic  bayes_od_none,
    # bayes_od_ci_fast. bayes_od_ci,or bayes_od_ici
    uncertainty_method = 'bayes_od_none'

    checkpoint_name = 'retinanet_bdd'
    checkpoint_number = '101'

    dataset_dir = os.path.expanduser('~/Datasets/Kitti/object/')
    label_dir = os.path.join(dataset_dir, data_split_dir) + '/label_2'

    mean_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                            'predictions', results_dir, 'kitti',
                            checkpoint_number, uncertainty_method, 'mean')

    prediction_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                                  'predictions', results_dir, 'kitti',
                                  checkpoint_number, uncertainty_method,
                                  'data')

    cov_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                           'predictions', results_dir, 'kitti',
                           checkpoint_number, uncertainty_method, 'cov')

    cat_param_dir = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                                 'predictions', results_dir, 'kitti',
                                 checkpoint_number, uncertainty_method,
                                 'cat_param')

    frames_list = os.listdir(prediction_dir)

    id = 0
    gt_dict_list = []
    prediction_dict_list = []
    print("PDQ evaluation starting:")
    match_list = []
    for frame in frames_list:
        frame_id = int(frame[0:6])
        #############
        # Read Frame
        #############
        label_path = label_dir + '/{:06d}.txt'.format(frame_id)
        gt_classes, gt_boxes = read_labels(label_path,
                                           difficulty=difficulty,
                                           categories=categories)

        # Create GT list
        gt_instance_list = []
        if gt_boxes.size > 0:
            for cat_gt, box_2d_gt in zip(gt_classes, gt_boxes):
                seg_mask = np.zeros([375, 1300], dtype=np.bool)
                box_inds = box_2d_gt.astype(np.int32).tolist()
                box_inds = np.array(
                    [box_inds[1], box_inds[0], box_inds[3], box_inds[2]])

                box_inds = np.clip(box_inds, a_min=0.0,
                                   a_max=1300).astype(np.int32)

                seg_mask[box_inds[1]:box_inds[3],
                         box_inds[0]:box_inds[2]] = True
                gt_index = np.argmax(cat_gt)
                gt_instance = pdq_data_holders.GroundTruthInstance(
                    seg_mask, gt_index, 0, 0, bounding_box=box_inds)
                gt_instance_list.append(gt_instance)

        prediction_boxes_mean = np.load(mean_dir +
                                        '/{:06d}.npy'.format(frame_id))
        prediction_boxes_cov = np.load(cov_dir +
                                       '/{:06d}.npy'.format(frame_id)) * 70
        prediction_boxes_cat_params = np.load(cat_param_dir +
                                              '/{:06d}.npy'.format(frame_id))

        det_instance_list = []
        if prediction_boxes_cov.size:
            prediction_boxes_cat_params = np.stack([
                prediction_boxes_cat_params[:, 0],
                prediction_boxes_cat_params[:, 3]
            ],
                                                   axis=1)
            transformation_mat = np.array([[0, 1, 0, -0.5], [1, 0, -0.5, 0],
                                           [0, 1, 0, 0.5], [1, 0, 0.5, 0]])
            prediction_boxes_cov = np.matmul(
                np.matmul(transformation_mat, prediction_boxes_cov),
                transformation_mat.T)
            prediction_boxes_mean = vuhw_to_vuvu_np(prediction_boxes_mean)
            for cat_det, box_mean, cov_det in zip(prediction_boxes_cat_params,
                                                  prediction_boxes_mean,
                                                  prediction_boxes_cov):
                if np.max(cat_det) >= 0.5:
                    box_processed = np.array(
                        [box_mean[1], box_mean[0], box_mean[3],
                         box_mean[2]]).astype(np.int32)
                    cov_processed = [cov_det[0:2, 0:2], cov_det[2:4, 2:4]]
                    det_instance = pdq_data_holders.PBoxDetInst(
                        cat_det, box_processed, cov_processed)
                    det_instance_list.append(det_instance)
        match_list.append((gt_instance_list, det_instance_list))

        id += 1
        print('Computed {} / {} frames.'.format(id, len(frames_list)))

    print("PDQ Ended")
    evaluator = pdq.PDQ()
    score = evaluator.score(match_list) * 100
    TP, FP, FN = evaluator.get_assignment_counts()
    avg_spatial_quality = evaluator.get_avg_spatial_score()
    avg_label_quality = evaluator.get_avg_label_score()
    avg_overall_quality = evaluator.get_avg_overall_quality_score()

    table = PrettyTable([
        'score', 'True Positives', 'False Positives', 'False Negatives',
        'Average Spatial Quality', 'Average Label Quality',
        'Average Overall Quality'
    ])

    table.add_row([
        score, TP, FP, FN, avg_spatial_quality, avg_label_quality,
        avg_overall_quality
    ])

    print(table)

    text_file_name = os.path.join(core.data_dir(), 'outputs', checkpoint_name,
                                  'predictions', results_dir, 'kitti',
                                  checkpoint_number, uncertainty_method,
                                  'pdq_res.txt')

    with open(text_file_name, "w") as text_file:
        print(table, file=text_file)
Exemplo n.º 5
0
def main():
    #########################################################
    # Specify Source Folders and Parameters For Frame Reader
    #########################################################
    data_split_dir = 'training'
    difficulty = 'all'
    categories = ['car', 'pedestrian']

    # Specify whether the validation or inference results need to be evaluated.
    # results_dir = 'validation'  # Or testing
    results_dir = 'testing'

    # sample_free, anchor_redundancy, black_box,  naive_aleatoric_epistemic,  bayes_od_none,
    # bayes_od_ci_fast. bayes_od_ci,or bayes_od_ici
    uncertainty_method = 'bayes_od_none'

    checkpoint_name = 'retinanet_bdd_covar'
    checkpoint_number = '101'

    dataset_dir = os.path.expanduser('~/Datasets/Kitti/object/')
    label_dir = os.path.join(dataset_dir, data_split_dir) + '/label_2'

    if results_dir == 'testing':
        prediction_dir = os.path.join(core.data_dir(), 'outputs',
                                      checkpoint_name, 'predictions',
                                      results_dir, 'kitti', checkpoint_number,
                                      uncertainty_method, 'data')
    else:
        prediction_dir = os.path.join(core.data_dir(), 'outputs',
                                      checkpoint_name, 'predictions',
                                      results_dir, checkpoint_number, 'data')

    frames_list = os.listdir(prediction_dir)

    id = 0
    gt_dict_list = []
    prediction_dict_list = []
    for frame in frames_list:
        frame_id = int(frame[0:6])
        #############
        # Read Frame
        #############
        label_path = label_dir + '/{:06d}.txt'.format(frame_id)
        gt_classes, gt_boxes = read_labels(label_path,
                                           difficulty=difficulty,
                                           categories=categories)

        prediction_path = prediction_dir + '/{:06d}.txt'.format(frame_id)
        prediction_classes, prediction_boxes, prediction_scores = read_predictions(
            prediction_path, categories=categories)

        if gt_boxes.size > 0 and prediction_boxes.size > 0:

            for gt_class, gt_box in zip(gt_classes, gt_boxes):

                ind = np.argmax(gt_class)
                gt_box_list = [gt_box[1], gt_box[0], gt_box[3], gt_box[2]]
                gt_dict = {
                    'name': str(id),
                    'category': categories[ind],
                    'bbox': gt_box_list,
                    'score': 1
                }
                gt_dict_list.append(gt_dict)

            for pred_class, pred_box, pred_score in zip(
                    prediction_classes, prediction_boxes, prediction_scores):

                ind = np.argmax(pred_class)
                pred_box_list = [
                    pred_box[1], pred_box[0], pred_box[3], pred_box[2]
                ]
                pred_dict = {
                    'name': str(id),
                    'category': categories[ind],
                    'bbox': pred_box_list,
                    'score': pred_score
                }

                prediction_dict_list.append(pred_dict)
        id += 1
        print('Computed {} / {} frames.'.format(id, len(frames_list)))

    mean, breakdown, cat_list, optimal_score_thresholds, maximum_f_scores = evaluate_detection(
        gt_dict_list, prediction_dict_list, iou_thresholds=[0.5])

    table = PrettyTable(cat_list)
    table.add_row(breakdown)
    table.add_row(optimal_score_thresholds)

    print('Mean AP: ' + str(mean) + '\n')
    print('Mean Optimal Score Threshold: ' +
          str(np.mean(np.array(optimal_score_thresholds))) + '\n')
    print('Mean Maximum F-score: ' + str(np.mean(np.array(maximum_f_scores))) +
          '\n')

    # Compute number of Out of Distribution predictions. Predicitions that do
    # not exist in GT data but where classified as such.
    num_od = np.array([
        0 if prediction['category'] in cat_list else 1
        for prediction in prediction_dict_list
    ])

    print('Ratio of Out of Distribution Predictions: ' +
          str(np.sum(num_od) / len(prediction_dict_list)) + '\n')
    print(table)