Пример #1
0
def bboxes_eval(org_image, image_shape, bbox_img, cls_pred_logits, bboxes_pred,
                glabels_raw, gbboxes_raw, isdifficult, num_classes):
    # Performing post-processing on CPU: loop-intensive, usually more efficient.
    cls_pred_prob = tf.nn.softmax(
        tf.reshape(cls_pred_logits, [-1, num_classes]))
    bboxes_pred = tf.reshape(bboxes_pred, [-1, 4])
    glabels_raw = tf.reshape(glabels_raw, [-1])
    gbboxes_raw = tf.reshape(gbboxes_raw, [-1, 4])
    gbboxes_raw = tf.boolean_mask(gbboxes_raw, glabels_raw > 0)
    glabels_raw = tf.boolean_mask(glabels_raw, glabels_raw > 0)
    isdifficult = tf.reshape(isdifficult, [-1])

    with tf.device('/device:CPU:0'):
        selected_scores, selected_bboxes = eval_helper.tf_bboxes_select(
            [cls_pred_prob], [bboxes_pred],
            FLAGS.select_threshold,
            num_classes,
            scope='xdet_v2_select')

        selected_bboxes = eval_helper.bboxes_clip(bbox_img, selected_bboxes)
        selected_scores, selected_bboxes = eval_helper.filter_boxes(
            selected_scores,
            selected_bboxes,
            0.03,
            image_shape, [FLAGS.train_image_size] * 2,
            keep_top_k=FLAGS.nms_topk * 2)

        # Resize bboxes to original image shape.
        selected_bboxes = eval_helper.bboxes_resize(bbox_img, selected_bboxes)

        selected_scores, selected_bboxes = eval_helper.bboxes_sort(
            selected_scores, selected_bboxes, top_k=FLAGS.nms_topk * 2)

        # Apply NMS algorithm.
        selected_scores, selected_bboxes = eval_helper.bboxes_nms_batch(
            selected_scores,
            selected_bboxes,
            nms_threshold=FLAGS.nms_threshold,
            keep_top_k=FLAGS.nms_topk)

        # label_scores, pred_labels, bboxes_pred = eval_helper.xdet_predict(bbox_img, cls_pred_prob, bboxes_pred, image_shape, FLAGS.train_image_size, FLAGS.nms_threshold, FLAGS.select_threshold, FLAGS.nms_topk, num_classes, nms_mode='union')

        dict_metrics = {}
        # Compute TP and FP statistics.
        num_gbboxes, tp, fp = eval_helper.bboxes_matching_batch(
            selected_scores.keys(), selected_scores, selected_bboxes,
            glabels_raw, gbboxes_raw, isdifficult)

        # FP and TP metrics.
        tp_fp_metric = metrics.streaming_tp_fp_arrays(num_gbboxes, tp, fp,
                                                      selected_scores)
        metrics_name = ('nobjects', 'ndetections', 'tp', 'fp', 'scores')
        for c in tp_fp_metric[0].keys():
            for _ in range(len(tp_fp_metric[0][c])):
                dict_metrics['tp_fp_%s_%s' %
                             (label2name_table[c], metrics_name[_])] = (
                                 tp_fp_metric[0][c][_], tp_fp_metric[1][c][_])

        # Add to summaries precision/recall values.
        aps_voc07 = {}
        aps_voc12 = {}
        for c in tp_fp_metric[0].keys():
            # Precison and recall values.
            prec, rec = metrics.precision_recall(*tp_fp_metric[0][c])

            # Average precision VOC07.
            v = metrics.average_precision_voc07(prec, rec)
            summary_name = 'AP_VOC07/%s' % c
            op = tf.summary.scalar(summary_name, v)
            # op = tf.Print(op, [v], summary_name)
            #tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
            aps_voc07[c] = v

            # Average precision VOC12.
            v = metrics.average_precision_voc12(prec, rec)
            summary_name = 'AP_VOC12/%s' % c
            op = tf.summary.scalar(summary_name, v)
            # op = tf.Print(op, [v], summary_name)
            #tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
            aps_voc12[c] = v

        # Mean average precision VOC07.
        summary_name = 'AP_VOC07/mAP'
        mAP = tf.add_n(list(aps_voc07.values())) / len(aps_voc07)
        mAP = tf.Print(mAP, [mAP], summary_name)
        op = tf.summary.scalar(summary_name, mAP)
        #tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

        # Mean average precision VOC12.
        summary_name = 'AP_VOC12/mAP'
        mAP = tf.add_n(list(aps_voc12.values())) / len(aps_voc12)
        mAP = tf.Print(mAP, [mAP], summary_name)
        op = tf.summary.scalar(summary_name, mAP)
        #tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)

        labels_list = []
        for k, v in selected_scores.items():
            labels_list.append(tf.ones_like(v, tf.int32) * k)
        save_image_op = tf.py_func(
            save_image_with_bbox,
            [
                org_image,
                tf.concat(labels_list, axis=0),
                #tf.convert_to_tensor(list(selected_scores.keys()), dtype=tf.int64),
                tf.concat(list(selected_scores.values()), axis=0),
                tf.concat(list(selected_bboxes.values()), axis=0)
            ],
            tf.int64,
            stateful=True)

        #dict_metrics['save_image_with_bboxes'] = save_image_count#tf.tuple([save_image_count, save_image_count_update_op])
    # for i, v in enumerate(l_precisions):
    #     summary_name = 'eval/precision_at_recall_%.2f' % LIST_RECALLS[i]
    #     op = tf.summary.scalar(summary_name, v, collections=[])
    #     op = tf.Print(op, [v], summary_name)
    #     tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
    # print(dict_metrics)
    # metric_names = dict_metrics.keys()
    # value_ops, update_ops = zip(*dict_metrics.values())
    # return dict(zip(metric_names, update_ops)), save_image_op

    return dict_metrics, save_image_op
Пример #2
0
def main(_):
    with tf.Graph().as_default():
        image_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
        shape_input = tf.placeholder(tf.int32, shape=(2, ))

        features = common_preprocessing.light_head_preprocess_for_test(
            image_input, [FLAGS.train_image_size] * 2,
            data_format=('NCHW'
                         if FLAGS.data_format == 'channels_first' else 'NHWC'))

        features = tf.expand_dims(features, axis=0)

        anchor_creator = anchor_manipulator.AnchorCreator(
            [FLAGS.train_image_size] * 2,
            layers_shapes=[(30, 30)],
            anchor_scales=[[0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]],
            extra_anchor_scales=[[0.1]],
            anchor_ratios=[[1., 2., .5]],
            layer_steps=[16])

        all_anchors, num_anchors_list = anchor_creator.get_all_anchors()

        anchor_encoder_decoder = anchor_manipulator.AnchorEncoder(
            all_anchors,
            num_classes=FLAGS.num_classes,
            allowed_borders=None,
            positive_threshold=None,
            ignore_threshold=None,
            prior_scaling=[1., 1., 1., 1.])

        with tf.variable_scope(FLAGS.model_scope,
                               default_name=None,
                               values=[features],
                               reuse=tf.AUTO_REUSE):
            rpn_feat_map, backbone_feat = xception_body.XceptionBody(
                features,
                FLAGS.num_classes,
                is_training=False,
                data_format=FLAGS.data_format)
            #rpn_feat_map = tf.Print(rpn_feat_map,[tf.shape(rpn_feat_map), rpn_feat_map,backbone_feat])
            rpn_cls_score, rpn_bbox_pred = xception_body.get_rpn(
                rpn_feat_map, num_anchors_list[0], False, FLAGS.data_format,
                'rpn_head')

            large_sep_feature = xception_body.large_sep_kernel(
                backbone_feat, 256, 10 * 7 * 7, False, FLAGS.data_format,
                'large_sep_feature')

            if FLAGS.data_format == 'channels_first':
                rpn_cls_score = tf.transpose(rpn_cls_score, [0, 2, 3, 1])
                rpn_bbox_pred = tf.transpose(rpn_bbox_pred, [0, 2, 3, 1])

            rpn_cls_score = tf.reshape(rpn_cls_score, [-1, 2])
            rpn_object_score = tf.nn.softmax(rpn_cls_score)[:, -1]

            rpn_object_score = tf.reshape(rpn_object_score, [1, -1])
            rpn_location_pred = tf.reshape(rpn_bbox_pred, [1, -1, 4])

            rpn_bboxes_pred = anchor_encoder_decoder.decode_all_anchors(
                [rpn_location_pred], squeeze_inner=True)[0]

            proposals_bboxes = xception_body.get_proposals(
                rpn_object_score, rpn_bboxes_pred, None,
                FLAGS.rpn_pre_nms_top_n, FLAGS.rpn_post_nms_top_n,
                FLAGS.rpn_nms_thres, FLAGS.rpn_min_size, False,
                FLAGS.data_format)

            cls_score, bboxes_reg = xception_body.get_head(
                large_sep_feature, lambda input_, bboxes_,
                grid_width_, grid_height_: ps_roi_align(
                    input_, bboxes_, grid_width_, grid_height_, pool_method),
                7, 7, None, proposals_bboxes, FLAGS.num_classes, False, False,
                0, FLAGS.data_format, 'final_head')

            head_bboxes_pred = anchor_encoder_decoder.ext_decode_rois(
                proposals_bboxes,
                bboxes_reg,
                head_prior_scaling=[1., 1., 1., 1.])

            head_cls_score = tf.reshape(cls_score, [-1, FLAGS.num_classes])
            head_cls_score = tf.nn.softmax(head_cls_score)
            head_bboxes_pred = tf.reshape(head_bboxes_pred, [-1, 4])

            with tf.device('/device:CPU:0'):
                selected_scores, selected_bboxes = eval_helper.tf_bboxes_select(
                    [head_cls_score], [head_bboxes_pred],
                    FLAGS.select_threshold,
                    FLAGS.num_classes,
                    scope='xdet_v2_select')

                selected_bboxes = eval_helper.bboxes_clip(
                    tf.constant([0., 0., 1., 1.]), selected_bboxes)
                selected_scores, selected_bboxes = eval_helper.filter_boxes(
                    selected_scores,
                    selected_bboxes,
                    0.03,
                    shape_input, [FLAGS.train_image_size] * 2,
                    keep_top_k=FLAGS.nms_topk * 2)

                # Resize bboxes to original image shape.
                selected_bboxes = eval_helper.bboxes_resize(
                    tf.constant([0., 0., 1., 1.]), selected_bboxes)

                selected_scores, selected_bboxes = eval_helper.bboxes_sort(
                    selected_scores, selected_bboxes, top_k=FLAGS.nms_topk * 2)

                # Apply NMS algorithm.
                selected_scores, selected_bboxes = eval_helper.bboxes_nms_batch(
                    selected_scores,
                    selected_bboxes,
                    nms_threshold=FLAGS.nms_threshold,
                    keep_top_k=FLAGS.nms_topk)

                labels_list = []
                for k, v in selected_scores.items():
                    labels_list.append(tf.ones_like(v, tf.int32) * k)
                all_labels = tf.concat(labels_list, axis=0)
                all_scores = tf.concat(list(selected_scores.values()), axis=0)
                all_bboxes = tf.concat(list(selected_bboxes.values()), axis=0)

        saver = tf.train.Saver()
        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)

            saver.restore(sess, FLAGS.checkpoint_path)

            np_image = imread('./demo/test.jpg')
            labels_, scores_, bboxes_ = sess.run(
                [all_labels, all_scores, all_bboxes],
                feed_dict={
                    image_input: np_image,
                    shape_input: np_image.shape[:-1]
                })

            img_to_draw = draw_toolbox.bboxes_draw_on_img(np_image,
                                                          labels_,
                                                          scores_,
                                                          bboxes_,
                                                          thickness=2)
            imsave(os.path.join(FLAGS.debug_dir, 'test_out.jpg'), img_to_draw)