Example #1
0
def compute_AP_COCO(annotation_records,
                    gt_classes_records,
                    pred_classes_records,
                    class_names,
                    show_result=True):
    '''
    Compute MSCOCO AP list on AP 0.5:0.05:0.95
    '''
    iou_threshold_list = np.arange(0.50, 0.95, 0.05)
    APs = {}
    for iou_threshold in iou_threshold_list:
        iou_threshold = round(iou_threshold, 2)
        mAP = compute_mAP_PascalVOC(annotation_records,
                                    gt_classes_records,
                                    pred_classes_records,
                                    class_names,
                                    iou_threshold,
                                    show_result=False)
        APs[iou_threshold] = round(mAP, 6)

    #get overall AP percentage value
    AP = np.mean(list(APs.values()))

    if show_result:
        '''
         Draw MS COCO AP plot
        '''
        touchdir('result')
        window_title = "MSCOCO AP on different IOU"
        plot_title = "COCO AP = {0:.2f}%".format(AP)
        x_label = "Average Precision"
        output_path = os.path.join('result', 'COCO_AP.jpg')
        draw_plot_func(APs,
                       len(APs),
                       window_title,
                       plot_title,
                       x_label,
                       output_path,
                       to_show=False,
                       plot_color='royalblue',
                       true_p_bar='')

        print('\nMS COCO AP evaluation')
        for (iou_threshold, AP_value) in APs.items():
            print('IOU %.2f: AP %f' % (iou_threshold, AP_value))
        print('total AP: %f' % (AP))

    #return AP percentage value
    return AP
Example #2
0
def draw_rec_prec(rec, prec, mrec, mprec, class_name, ap):
    """
     Draw plot
    """
    plt.plot(rec, prec, '-o')
    # add a new penultimate point to the list (mrec[-2], 0.0)
    # since the last line segment (and respective area) do not affect the AP value
    area_under_curve_x = mrec[:-1] + [mrec[-2]] + [mrec[-1]]
    area_under_curve_y = mprec[:-1] + [0.0] + [mprec[-1]]
    plt.fill_between(area_under_curve_x,
                     0,
                     area_under_curve_y,
                     alpha=0.2,
                     edgecolor='r')
    # set window title
    fig = plt.gcf()  # gcf - get current figure
    fig.canvas.set_window_title('AP ' + class_name)
    # set plot title
    plt.title('class: ' + class_name + ' AP = {}%'.format(ap * 100))
    #plt.suptitle('This is a somewhat long figure title', fontsize=16)
    # set axis titles
    plt.xlabel('Recall')
    plt.ylabel('Precision')
    # optional - set axes
    axes = plt.gca()  # gca - get current axes
    axes.set_xlim([0.0, 1.0])
    axes.set_ylim([0.0, 1.05])  # .05 to give some extra space
    # Alternative option -> wait for button to be pressed
    #while not plt.waitforbuttonpress(): pass # wait for key display
    # Alternative option -> normal display
    #plt.show()
    # save the plot
    rec_prec_plot_path = os.path.join('result', 'classes')
    touchdir(rec_prec_plot_path)
    fig.savefig(os.path.join(rec_prec_plot_path, class_name + ".jpg"))
    plt.cla()  # clear axes for next plot
Example #3
0
    def dump_saved_model(self, saved_model_path):
        model = self.prenms_model
        touchdir(saved_model_path)

        tf.keras.experimental.export_saved_model(model, saved_model_path)
        print('export inference model to %s' % str(saved_model_path))
Example #4
0
def compute_AP_COCO_Scale(annotation_records, scale_gt_classes_records,
                          pred_classes_records, class_names):
    '''
    Compute MSCOCO AP on different scale object: small, medium, large
    '''
    scale_APs = {}
    for scale_key in ['small', 'medium', 'large']:
        gt_classes_records = scale_gt_classes_records[scale_key]
        scale_AP = compute_AP_COCO(annotation_records,
                                   gt_classes_records,
                                   pred_classes_records,
                                   class_names,
                                   show_result=False)
        scale_APs[scale_key] = round(scale_AP, 4)

    #get overall AP percentage value
    scale_mAP = np.mean(list(scale_APs.values()))
    '''
     Draw Scale AP plot
    '''
    touchdir('result')
    window_title = "MSCOCO AP on different scale"
    plot_title = "scale mAP = {0:.2f}%".format(scale_mAP)
    x_label = "Average Precision"
    output_path = os.path.join('result', 'COCO_scale_AP.jpg')
    draw_plot_func(scale_APs,
                   len(scale_APs),
                   window_title,
                   plot_title,
                   x_label,
                   output_path,
                   to_show=False,
                   plot_color='royalblue',
                   true_p_bar='')
    '''
     Draw Scale Object Sum plot
    '''
    for scale_key in ['small', 'medium', 'large']:
        gt_classes_records = scale_gt_classes_records[scale_key]
        gt_classes_sum = {}

        for _, class_name in enumerate(class_names):
            # summarize the gt object number for every class on different scale
            gt_classes_sum[class_name] = np.sum(
                len(gt_classes_records[class_name])
            ) if class_name in gt_classes_records else 0

        total_sum = np.sum(list(gt_classes_sum.values()))

        window_title = "{} object number".format(scale_key)
        plot_title = "total {} object number = {}".format(scale_key, total_sum)
        x_label = "Object Number"
        output_path = os.path.join('result',
                                   '{}_object_number.jpg'.format(scale_key))
        draw_plot_func(gt_classes_sum,
                       len(gt_classes_sum),
                       window_title,
                       plot_title,
                       x_label,
                       output_path,
                       to_show=False,
                       plot_color='royalblue',
                       true_p_bar='')

    print('\nMS COCO AP evaluation on different scale')
    for (scale, AP_value) in scale_APs.items():
        print('%s scale: AP %f' % (scale, AP_value))
    print('total AP: %f' % (scale_mAP))
Example #5
0
def get_prediction_class_records(model_path, annotation_records, anchors,
                                 class_names, model_image_size, conf_threshold,
                                 save_result):
    '''
    Do the predict with YOLO model on annotation images to get predict class dict

    predict class dict would contain image_name, coordinary and score, and
    sorted by score:
    pred_classes_records = {
        'car': [
                ['00001.jpg','94,115,203,232',0.98],
                ['00002.jpg','82,64,154,128',0.93],
                ...
               ],
        ...
    }
    '''

    # support of tflite model
    if model_path.endswith('.tflite'):
        from tensorflow.lite.python import interpreter as interpreter_wrapper
        interpreter = interpreter_wrapper.Interpreter(model_path=model_path)
        interpreter.allocate_tensors()
    # support of MNN model
    elif model_path.endswith('.mnn'):
        interpreter = MNN.Interpreter(model_path)
        session = interpreter.createSession()
    # normal keras h5 model
    else:
        model = load_model(model_path, compile=False)

    pred_classes_records = {}
    for (image_name, gt_records) in annotation_records.items():
        image = Image.open(image_name)
        image_array = np.array(image, dtype='uint8')
        image_data = preprocess_image(image, model_image_size)
        image_shape = image.size

        if model_path.endswith('.tflite'):
            pred_boxes, pred_classes, pred_scores = yolo_predict_tflite(
                interpreter, image, anchors, len(class_names), conf_threshold)
        elif model_path.endswith('.mnn'):
            pred_boxes, pred_classes, pred_scores = yolo_predict_mnn(
                interpreter, session, image, anchors, len(class_names),
                conf_threshold)
        else:
            pred_boxes, pred_classes, pred_scores = yolo3_postprocess_np(
                model.predict([image_data]),
                image_shape,
                anchors,
                len(class_names),
                model_image_size,
                max_boxes=100,
                confidence=conf_threshold)

        print('Found {} boxes for {}'.format(len(pred_boxes), image_name))

        if save_result:

            gt_boxes, gt_classes, gt_scores = transform_gt_record(
                gt_records, class_names)

            result_dir = os.path.join('result', 'detection')
            touchdir(result_dir)
            colors = get_colors(class_names)
            image_array = draw_boxes(image_array,
                                     gt_boxes,
                                     gt_classes,
                                     gt_scores,
                                     class_names,
                                     colors=None,
                                     show_score=False)
            image_array = draw_boxes(image_array, pred_boxes, pred_classes,
                                     pred_scores, class_names, colors)
            image = Image.fromarray(image_array)
            # here we handle the RGBA image
            if (len(image.split()) == 4):
                r, g, b, a = image.split()
                image = Image.merge("RGB", (r, g, b))
            image.save(
                os.path.join(result_dir,
                             image_name.split(os.path.sep)[-1]))

        # Nothing detected
        if pred_boxes is None or len(pred_boxes) == 0:
            continue

        for box, cls, score in zip(pred_boxes, pred_classes, pred_scores):
            pred_class_name = class_names[cls]
            xmin, ymin, xmax, ymax = box
            coordinate = "{},{},{},{}".format(xmin, ymin, xmax, ymax)

            #append or add predict class item
            if pred_class_name in pred_classes_records:
                pred_classes_records[pred_class_name].append(
                    [image_name, coordinate, score])
            else:
                pred_classes_records[pred_class_name] = list(
                    [[image_name, coordinate, score]])

    # sort pred_classes_records for each class according to score
    for pred_class_list in pred_classes_records.values():
        pred_class_list.sort(key=lambda ele: ele[2], reverse=True)

    return pred_classes_records