コード例 #1
0
def inference():
    with tf.Graph().as_default():

        img_plac = tf.placeholder(shape=[None, None, 3], dtype=tf.uint8)

        img_tensor = tf.cast(img_plac, tf.float32) - tf.constant(
            [103.939, 116.779, 123.68])
        img_batch = image_preprocess.short_side_resize_for_inference_data(
            img_tensor,
            target_shortside_len=cfgs.SHORT_SIDE_LEN,
            is_resize=True)

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)
        # ***********************************************************************************************
        # *                                            RPN                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=None,
            is_training=False,
            share_head=cfgs.SHARE_HEAD,
            share_net=share_net,
            stride=cfgs.STRIDE,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

        # rpn predict proposals
        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]

        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************
        fast_rcnn = build_fast_rcnn.FastRCNN(
            img_batch=img_batch,
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            img_shape=tf.shape(img_batch),
            roi_size=cfgs.ROI_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            gtboxes_and_label=None,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=cfgs.FINAL_SCORE_THRESHOLD,
            # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,
            use_dropout=False,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=False,
            level=cfgs.LEVEL)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = \
            fast_rcnn.fast_rcnn_predict()

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)

            imgs, img_names = get_imgs()
            for i, img in enumerate(imgs):

                start = time.time()

                _img_batch, _fast_rcnn_decode_boxes, _fast_rcnn_score, _detection_category = \
                    sess.run([img_batch, fast_rcnn_decode_boxes, fast_rcnn_score, detection_category],
                             feed_dict={img_plac: img})
                end = time.time()

                img_np = np.squeeze(_img_batch, axis=0)

                img_np = draw_box_cv(img_np,
                                     boxes=_fast_rcnn_decode_boxes,
                                     labels=_detection_category,
                                     scores=_fast_rcnn_score)
                mkdir(cfgs.INFERENCE_SAVE_PATH)
                cv2.imwrite(
                    cfgs.INFERENCE_SAVE_PATH +
                    '/{}_fpn.jpg'.format(img_names[i]), img_np)
                view_bar('{} cost {}s'.format(img_names[i], (end - start)),
                         i + 1, len(imgs))

            coord.request_stop()
            coord.join(threads)
コード例 #2
0
def test(args):
    with tf.Graph().as_default():

        img_batch = tf.placeholder(shape=[1, None, None, 3], dtype=tf.float32)

        # img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
        #     next_batch(dataset_name=cfgs.DATASET_NAME,
        #                batch_size=cfgs.BATCH_SIZE,
        #                shortside_len=cfgs.SHORT_SIDE_LEN,
        #                is_training=False)

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)

        # ***********************************************************************************************
        # *                                            RPN                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=None,
            is_training=False,
            share_head=cfgs.SHARE_HEAD,
            share_net=share_net,
            stride=cfgs.STRIDE,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

        # rpn predict proposals
        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]

        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************
        fast_rcnn = build_fast_rcnn.FastRCNN(
            img_batch=img_batch,
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            img_shape=tf.shape(img_batch),
            roi_size=cfgs.ROI_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            gtboxes_and_label=None,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=cfgs.
            FINAL_SCORE_THRESHOLD,  # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,
            use_dropout=False,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=False,
            level=cfgs.LEVEL)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = \
            fast_rcnn.fast_rcnn_predict()

        # train
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer(
            checkpoint_path=args.weights)

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')

            # coord = tf.train.Coordinator()
            # threads = tf.train.start_queue_runners(sess, coord)
            test_filenames = [file for file in os.listdir(args.test_dir)]
            for i in tqdm(range(args.img_num)):

                # _img_name_batch, _img_batch, _gtboxes_and_label_batch, _fast_rcnn_decode_boxes, \
                #     _fast_rcnn_score, _detection_category \
                #     = sess.run([img_name_batch, img_batch, gtboxes_and_label_batch, fast_rcnn_decode_boxes,
                #                 fast_rcnn_score, detection_category])
                img_test = cv2.imread(args.test_dir + '/' + test_filenames[i])
                img_test, img_test_4dim = no_tensor_pic_prepross(
                    img_test, cfgs.SHORT_SIDE_LEN)

                _fast_rcnn_decode_boxes, \
                    _fast_rcnn_score, _detection_category \
                    = sess.run([fast_rcnn_decode_boxes,
                                fast_rcnn_score, detection_category], feed_dict={img_batch: img_test_4dim})

                # _img_batch = np.squeeze(_img_batch, axis=0)
                # print(_detection_category)
                _img_batch_fpn = help_utils.draw_box_cv(
                    img_test,
                    boxes=_fast_rcnn_decode_boxes,
                    labels=_detection_category,
                    scores=_fast_rcnn_score)
                mkdir(cfgs.TEST_SAVE_PATH)
                cv2.imwrite(
                    cfgs.TEST_SAVE_PATH +
                    '/{}_fpn.jpg'.format(str(test_filenames[i])),
                    _img_batch_fpn)
コード例 #3
0
def test(img_num):
    with tf.Graph().as_default():

        img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
            next_batch(dataset_name=cfgs.DATASET_NAME,
                       batch_size=cfgs.BATCH_SIZE,
                       shortside_len=cfgs.SHORT_SIDE_LEN,
                       is_training=False)

        gtboxes_and_label, head = get_head(
            tf.squeeze(gtboxes_and_label_batch, 0))
        gtboxes_and_label = tf.py_func(back_forward_convert,
                                       inp=[gtboxes_and_label],
                                       Tout=tf.float32)
        gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 6])
        head_quadrant = tf.py_func(get_head_quadrant,
                                   inp=[head, gtboxes_and_label],
                                   Tout=tf.float32)
        head_quadrant = tf.reshape(head_quadrant, [-1, 1])

        gtboxes_and_label_minAreaRectangle = get_horizen_minAreaRectangle(
            gtboxes_and_label)

        gtboxes_and_label_minAreaRectangle = tf.reshape(
            gtboxes_and_label_minAreaRectangle, [-1, 5])

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)

        # ***********************************************************************************************
        # *                                            RPN                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=None,
            is_training=False,
            share_head=cfgs.SHARE_HEAD,
            share_net=share_net,
            stride=cfgs.STRIDE,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

        # rpn predict proposals
        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]

        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************
        fast_rcnn = build_fast_rcnn.FastRCNN(
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            img_shape=tf.shape(img_batch),
            img_batch=img_batch,
            roi_size=cfgs.ROI_SIZE,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            gtboxes_and_label=None,
            gtboxes_and_label_minAreaRectangle=
            gtboxes_and_label_minAreaRectangle,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=cfgs.FINAL_SCORE_THRESHOLD,
            # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,
            # iou>0.5 is positive, iou<0.5 is negative
            use_dropout=cfgs.USE_DROPOUT,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=False,
            level=cfgs.LEVEL,
            head_quadrant=head_quadrant)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category, \
        fast_rcnn_decode_boxes_rotate, fast_rcnn_score_rotate, fast_rcnn_head_quadrant, \
        num_of_objects_rotate, detection_category_rotate = fast_rcnn.fast_rcnn_predict()

        # train
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)

            for i in range(img_num):

                start = time.time()

                _img_name_batch, _img_batch, _gtboxes_and_label, _gtboxes_and_label_minAreaRectangle, _head_quadrant,  \
                _fast_rcnn_decode_boxes, _fast_rcnn_score, _detection_category, _fast_rcnn_decode_boxes_rotate, \
                _fast_rcnn_score_rotate, _fast_rcnn_head_quadrant, _detection_category_rotate \
                    = sess.run([img_name_batch, img_batch, gtboxes_and_label, gtboxes_and_label_minAreaRectangle, head_quadrant,
                                fast_rcnn_decode_boxes, fast_rcnn_score, detection_category, fast_rcnn_decode_boxes_rotate,
                                fast_rcnn_score_rotate, fast_rcnn_head_quadrant, detection_category_rotate])
                end = time.time()

                _img_batch = np.squeeze(_img_batch, axis=0)

                _img_batch_fpn_horizonal = help_utils.draw_box_cv(
                    _img_batch,
                    boxes=_fast_rcnn_decode_boxes,
                    labels=_detection_category,
                    scores=_fast_rcnn_score)

                _img_batch_fpn_rotate = help_utils.draw_rotate_box_cv(
                    _img_batch,
                    boxes=_fast_rcnn_decode_boxes_rotate,
                    labels=_detection_category_rotate,
                    scores=_fast_rcnn_score_rotate,
                    head=np.argmax(_fast_rcnn_head_quadrant, axis=1))
                mkdir(cfgs.TEST_SAVE_PATH)
                cv2.imwrite(
                    cfgs.TEST_SAVE_PATH +
                    '/{}_horizontal_fpn.jpg'.format(str(_img_name_batch[0])),
                    _img_batch_fpn_horizonal)
                cv2.imwrite(
                    cfgs.TEST_SAVE_PATH +
                    '/{}_rotate_fpn.jpg'.format(str(_img_name_batch[0])),
                    _img_batch_fpn_rotate)

                temp_label_horizontal = np.reshape(_gtboxes_and_label[:, -1:],
                                                   [
                                                       -1,
                                                   ]).astype(np.int64)
                temp_label_rotate = np.reshape(_gtboxes_and_label[:, -1:], [
                    -1,
                ]).astype(np.int64)

                _img_batch_gt_horizontal = help_utils.draw_box_cv(
                    _img_batch,
                    boxes=_gtboxes_and_label_minAreaRectangle[:, :-1],
                    labels=temp_label_horizontal,
                    scores=None)

                _img_batch_gt_rotate = help_utils.draw_rotate_box_cv(
                    _img_batch,
                    boxes=_gtboxes_and_label[:, :-1],
                    labels=temp_label_rotate,
                    scores=None,
                    head=np.reshape(_head_quadrant, [
                        -1,
                    ]))

                cv2.imwrite(
                    cfgs.TEST_SAVE_PATH +
                    '/{}_horizontal_gt.jpg'.format(str(_img_name_batch[0])),
                    _img_batch_gt_horizontal)
                cv2.imwrite(
                    cfgs.TEST_SAVE_PATH +
                    '/{}_rotate_gt.jpg'.format(str(_img_name_batch[0])),
                    _img_batch_gt_rotate)

                view_bar(
                    '{} image cost {}s'.format(str(_img_name_batch[0]),
                                               (end - start)), i + 1, img_num)

            coord.request_stop()
            coord.join(threads)
コード例 #4
0
def train():
    with tf.Graph().as_default():
        with tf.name_scope('get_batch'):
            img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
                next_batch(dataset_name=cfgs.DATASET_NAME,
                           batch_size=cfgs.BATCH_SIZE,
                           shortside_len=cfgs.SHORT_SIDE_LEN,
                           is_training=True)
            gtboxes_and_label = tf.py_func(
                back_forward_convert,
                inp=[tf.squeeze(gtboxes_and_label_batch, 0)],
                Tout=tf.float32)
            gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 6])

            gtboxes_and_label_minAreaRectangle = get_horizen_minAreaRectangle(
                gtboxes_and_label)

            gtboxes_and_label_minAreaRectangle = tf.reshape(
                gtboxes_and_label_minAreaRectangle, [-1, 5])

        with tf.name_scope('draw_gtboxes'):
            gtboxes_in_img = draw_box_with_color(
                img_batch,
                tf.reshape(gtboxes_and_label_minAreaRectangle,
                           [-1, 5])[:, :-1],
                text=tf.shape(gtboxes_and_label_minAreaRectangle)[0])

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)

        # ***********************************************************************************************
        # *                                            rpn                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=gtboxes_and_label_minAreaRectangle,
            is_training=True,
            share_head=cfgs.SHARE_HEAD,
            share_net=share_net,
            stride=cfgs.STRIDE,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.
            RPN_IOU_NEGATIVE_THRESHOLD,  # iou>=0.7 is positive box, iou< 0.3 is negative
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]

        rpn_location_loss, rpn_classification_loss = rpn.rpn_losses()
        rpn_total_loss = rpn_classification_loss + rpn_location_loss

        with tf.name_scope('draw_proposals'):
            # score > 0.5 is object
            rpn_object_boxes_indices = tf.reshape(
                tf.where(tf.greater(rpn_proposals_scores, 0.5)), [-1])
            rpn_object_boxes = tf.gather(rpn_proposals_boxes,
                                         rpn_object_boxes_indices)

            rpn_proposals_objcet_boxes_in_img = draw_box_with_color(
                img_batch,
                rpn_object_boxes,
                text=tf.shape(rpn_object_boxes)[0])
            rpn_proposals_boxes_in_img = draw_box_with_color(
                img_batch,
                rpn_proposals_boxes,
                text=tf.shape(rpn_proposals_boxes)[0])
        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************

        fast_rcnn = build_fast_rcnn.FastRCNN(
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            img_shape=tf.shape(img_batch),
            roi_size=cfgs.ROI_SIZE,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            gtboxes_and_label=gtboxes_and_label,
            gtboxes_and_label_minAreaRectangle=
            gtboxes_and_label_minAreaRectangle,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=cfgs.
            FINAL_SCORE_THRESHOLD,  # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,  # iou>0.5 is positive, iou<0.5 is negative
            use_dropout=cfgs.USE_DROPOUT,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=True,
            level=cfgs.LEVEL)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = \
            fast_rcnn.fast_rcnn_predict()
        fast_rcnn_location_loss, fast_rcnn_classification_loss = fast_rcnn.fast_rcnn_loss(
        )
        fast_rcnn_total_loss = fast_rcnn_location_loss + fast_rcnn_classification_loss

        with tf.name_scope('draw_boxes_with_categories'):
            fast_rcnn_predict_boxes_in_imgs = draw_boxes_with_categories(
                img_batch=img_batch,
                boxes=fast_rcnn_decode_boxes,
                labels=detection_category,
                scores=fast_rcnn_score)

        # train
        total_loss = slim.losses.get_total_loss()

        global_step = slim.get_or_create_global_step()

        lr = tf.train.piecewise_constant(
            global_step,
            boundaries=[np.int64(20000), np.int64(40000)],
            values=[cfgs.LR, cfgs.LR / 10, cfgs.LR / 100])
        tf.summary.scalar('lr', lr)
        optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)

        train_op = slim.learning.create_train_op(
            total_loss, optimizer, global_step)  # rpn_total_loss,
        # train_op = optimizer.minimize(second_classification_loss, global_step)

        # ***********************************************************************************************
        # *                                          Summary                                            *
        # ***********************************************************************************************
        # ground truth and predict
        tf.summary.image('img/gtboxes', gtboxes_in_img)
        tf.summary.image('img/faster_rcnn_predict',
                         fast_rcnn_predict_boxes_in_imgs)
        # rpn loss and image
        tf.summary.scalar('rpn/rpn_location_loss', rpn_location_loss)
        tf.summary.scalar('rpn/rpn_classification_loss',
                          rpn_classification_loss)
        tf.summary.scalar('rpn/rpn_total_loss', rpn_total_loss)

        tf.summary.scalar('fast_rcnn/fast_rcnn_location_loss',
                          fast_rcnn_location_loss)
        tf.summary.scalar('fast_rcnn/fast_rcnn_classification_loss',
                          fast_rcnn_classification_loss)
        tf.summary.scalar('fast_rcnn/fast_rcnn_total_loss',
                          fast_rcnn_total_loss)

        tf.summary.scalar('loss/total_loss', total_loss)

        tf.summary.image('rpn/rpn_all_boxes', rpn_proposals_boxes_in_img)
        tf.summary.image('rpn/rpn_object_boxes',
                         rpn_proposals_objcet_boxes_in_img)
        # learning_rate
        tf.summary.scalar('learning_rate', lr)

        summary_op = tf.summary.merge_all()
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()
        saver = tf.train.Saver(max_to_keep=10)

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)

            summary_path = os.path.join(FLAGS.summary_path, cfgs.VERSION)
            mkdir(summary_path)
            summary_writer = tf.summary.FileWriter(summary_path,
                                                   graph=sess.graph)

            for step in range(cfgs.MAX_ITERATION):
                training_time = time.strftime('%Y-%m-%d %H:%M:%S',
                                              time.localtime(time.time()))
                start = time.time()

                _global_step, _img_name_batch, _rpn_location_loss, _rpn_classification_loss, \
                _rpn_total_loss, _fast_rcnn_location_loss, _fast_rcnn_classification_loss, \
                _fast_rcnn_total_loss, _total_loss, _ = \
                    sess.run([global_step, img_name_batch, rpn_location_loss, rpn_classification_loss,
                              rpn_total_loss, fast_rcnn_location_loss, fast_rcnn_classification_loss,
                              fast_rcnn_total_loss, total_loss, train_op])
                end = time.time()

                if step % 10 == 0:
                    print(""" {}: step{}    image_name:{} |\t
                                rpn_loc_loss:{} |\t rpn_cla_loss:{} |\t rpn_total_loss:{} |
                                fast_rcnn_loc_loss:{} |\t fast_rcnn_cla_loss:{} |\t fast_rcnn_total_loss:{} |
                                total_loss:{} |\t pre_cost_time:{}s""" \
                          .format(training_time, _global_step, str(_img_name_batch[0]), _rpn_location_loss,
                                  _rpn_classification_loss, _rpn_total_loss, _fast_rcnn_location_loss,
                                  _fast_rcnn_classification_loss, _fast_rcnn_total_loss, _total_loss,
                                  (end - start)))

                if step % 50 == 0:
                    summary_str = sess.run(summary_op)
                    summary_writer.add_summary(summary_str, _global_step)
                    summary_writer.flush()

                if (step > 0
                        and step % 1000 == 0) or (step
                                                  == cfgs.MAX_ITERATION - 1):
                    save_dir = os.path.join(FLAGS.trained_checkpoint,
                                            cfgs.VERSION)
                    mkdir(save_dir)

                    save_ckpt = os.path.join(
                        save_dir, 'voc_' + str(_global_step) + 'model.ckpt')
                    saver.save(sess, save_ckpt)
                    print(' weights had been saved')

            coord.request_stop()
            coord.join(threads)
コード例 #5
0
def detect_img(file_paths, des_folder, det_th, h_len, w_len, show_res=False):
    with tf.Graph().as_default():

        img_plac = tf.placeholder(shape=[None, None, 3], dtype=tf.uint8)

        img_tensor = tf.cast(img_plac, tf.float32) - tf.constant(
            [103.939, 116.779, 123.68])
        img_batch = image_preprocess.short_side_resize_for_inference_data(
            img_tensor,
            target_shortside_len=cfgs.SHORT_SIDE_LEN,
            is_resize=False)

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)
        # ***********************************************************************************************
        # *                                            RPN                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=None,
            is_training=False,
            share_head=cfgs.SHARE_HEAD,
            share_net=share_net,
            stride=cfgs.STRIDE,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

        # rpn predict proposals
        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]

        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************
        fast_rcnn = build_fast_rcnn.FastRCNN(
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            img_shape=tf.shape(img_batch),
            roi_size=cfgs.ROI_SIZE,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            gtboxes_and_label=None,
            gtboxes_and_label_minAreaRectangle=None,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=det_th,
            # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,
            # iou>0.5 is positive, iou<0.5 is negative
            use_dropout=cfgs.USE_DROPOUT,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=False,
            level=cfgs.LEVEL)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = \
            fast_rcnn.fast_rcnn_predict()

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)

            for img_path in file_paths:
                start = timer()
                # gdal.AllRegister()
                # ds = gdal.Open(img_path, gdalconst.GA_ReadOnly)
                # if ds is None:
                #     print("Image %s open failed!" % img_path)
                #     sys.exit()
                img = cv2.imread(img_path)

                box_res = []
                label_res = []
                score_res = []
                # imgH = ds.RasterYSize
                # imgW = ds.RasterXSize
                imgH = img.shape[0]
                imgW = img.shape[1]
                for hh in range(0, imgH, h_len):
                    h_size = min(h_len, imgH - hh)
                    if h_size < 10:
                        break
                    for ww in range(0, imgW, w_len):
                        w_size = min(w_len, imgW - ww)
                        if w_size < 10:
                            break

                        # src_img = ds.ReadAsArray(ww, hh, w_size, h_size)
                        src_img = img[hh:(hh + h_size), ww:(ww + w_size), :]
                        # if len(src_img.shape) == 2:
                        #     src_img = cv2.cvtColor(src_img, cv2.COLOR_GRAY2RGB)
                        # else:
                        #     src_img = chw2hwc(src_img)

                        boxes, labels, scores = sess.run(
                            [
                                fast_rcnn_decode_boxes, detection_category,
                                fast_rcnn_score
                            ],
                            feed_dict={img_plac: src_img})

                        if show_res:
                            visualize_detection(src_img, boxes, scores)
                        if len(boxes) > 0:
                            for ii in range(len(boxes)):
                                box = boxes[ii]
                                box[0] = box[0] + hh
                                box[1] = box[1] + ww
                                box[2] = box[2] + hh
                                box[3] = box[3] + ww
                                box_res.append(box)
                                label_res.append(labels[ii])
                                score_res.append(scores[ii])
                # ds = None
                time_elapsed = timer() - start
                print("{} detection time : {:.4f} sec".format(
                    img_path.split('/')[-1].split('.')[0], time_elapsed))

                # if target_name == 'aircraft':
                # img = cv2.imread(img_path)
                # if len(img.shape) == 2:
                #     img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
                # elif len(img.shape) == 3:
                #     img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
                #     img[:, :, 0] = img[:, :, 1] = img[:, :, 2] = img_gray
                mkdir(des_folder)
                img_np = draw_box_cv(img,
                                     boxes=np.array(box_res),
                                     labels=np.array(label_res),
                                     scores=np.array(score_res)) - np.array(
                                         [103.939, 116.779, 123.68])
                cv2.imwrite(
                    des_folder + '/{}_fpn.jpg'.format(
                        img_path.split('/')[-1].split('.')[0]), img_np)
                # clip_obj_imgs(src_img, box_res, label_res, score_res, des_folder)
                # print(img_path)
                # det_xml_path =img_path.replace(".tif", ".det.xml")
                # obj_to_det_xml(img_path, box_res, label_res, score_res, det_xml_path)

            coord.request_stop()
            coord.join(threads)
コード例 #6
0
def eval_dict_convert(img_num):
    with tf.Graph().as_default():

        img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
            next_batch(dataset_name=cfgs.DATASET_NAME,
                       batch_size=cfgs.BATCH_SIZE,
                       shortside_len=cfgs.SHORT_SIDE_LEN,
                       is_training=False)

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)

        # ***********************************************************************************************
        # *                                            RPN                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=None,
            is_training=False,
            share_head=True,
            share_net=share_net,
            stride=cfgs.STRIDE,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

        # rpn predict proposals
        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]

        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************
        fast_rcnn = build_fast_rcnn.FastRCNN(
            img_batch=img_batch,
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            img_shape=tf.shape(img_batch),
            roi_size=cfgs.ROI_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            gtboxes_and_label=None,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=cfgs.
            FINAL_SCORE_THRESHOLD,  # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,
            use_dropout=False,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=False,
            level=cfgs.LEVEL)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = \
            fast_rcnn.fast_rcnn_predict()

        # train
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)

            gtbox_dict = {}
            predict_dict = {}

            for i in range(img_num):

                start = time.time()

                _img_name_batch, _img_batch, _gtboxes_and_label_batch, _fast_rcnn_decode_boxes, \
                _fast_rcnn_score, _detection_category \
                    = sess.run([img_name_batch, img_batch, gtboxes_and_label_batch, fast_rcnn_decode_boxes,
                                fast_rcnn_score, detection_category])
                end = time.time()

                # gtboxes convert dict
                gtbox_dict[str(_img_name_batch[0])] = []
                predict_dict[str(_img_name_batch[0])] = []

                for j, box in enumerate(_gtboxes_and_label_batch[0]):
                    bbox_dict = {}
                    bbox_dict['bbox'] = np.array(
                        _gtboxes_and_label_batch[0][j, :-1], np.float64)
                    bbox_dict['name'] = LABEl_NAME_MAP[int(
                        _gtboxes_and_label_batch[0][j, -1])]
                    gtbox_dict[str(_img_name_batch[0])].append(bbox_dict)

                for label in NAME_LABEL_MAP.keys():
                    if label == 'back_ground':
                        continue
                    else:
                        temp_dict = {}
                        temp_dict['name'] = label

                        ind = np.where(
                            _detection_category == NAME_LABEL_MAP[label])[0]
                        temp_boxes = _fast_rcnn_decode_boxes[ind]
                        temp_score = np.reshape(_fast_rcnn_score[ind], [-1, 1])
                        temp_dict['bbox'] = np.array(
                            np.concatenate([temp_boxes, temp_score], axis=1),
                            np.float64)
                        predict_dict[str(_img_name_batch[0])].append(temp_dict)

                view_bar(
                    '{} image cost {}s'.format(str(_img_name_batch[0]),
                                               (end - start)), i + 1, img_num)

            fw1 = open('gtboxes_dict.pkl', 'w')
            fw2 = open('predict_dict.pkl', 'w')
            pickle.dump(gtbox_dict, fw1)
            pickle.dump(predict_dict, fw2)
            fw1.close()
            fw2.close()
            coord.request_stop()
            coord.join(threads)
コード例 #7
0
def detect_img(file_paths,
               des_folder,
               paramPath,
               bakpath,
               det_th,
               h_len,
               w_len,
               h_overlap,
               w_overlap,
               file_ext,
               show_res=False):
    with tf.Graph().as_default():

        img_plac = tf.placeholder(shape=[None, None, 3], dtype=tf.uint8)

        img_tensor = tf.cast(img_plac, tf.float32) - tf.constant(
            [103.939, 116.779, 123.68])
        img_batch = image_preprocess.short_side_resize_for_inference_data(
            img_tensor,
            target_shortside_len=cfgs.SHORT_SIDE_LEN,
            is_resize=False)

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)
        # ***********************************************************************************************
        # *                                            RPN                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=None,
            is_training=False,
            share_head=cfgs.SHARE_HEAD,
            share_net=share_net,
            stride=cfgs.STRIDE,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

        # rpn predict proposals
        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]

        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************
        fast_rcnn = build_fast_rcnn.FastRCNN(
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            img_shape=tf.shape(img_batch),
            roi_size=cfgs.ROI_SIZE,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            gtboxes_and_label=None,
            gtboxes_and_label_minAreaRectangle=None,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=det_th,
            # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,
            # iou>0.5 is positive, iou<0.5 is negative
            use_dropout=cfgs.USE_DROPOUT,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=False,
            level=cfgs.LEVEL,
            head_quadrant=None)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category, \
        fast_rcnn_decode_boxes_rotate, fast_rcnn_score_rotate, fast_rcnn_head_quadrant, \
        num_of_objects_rotate, detection_category_rotate = fast_rcnn.fast_rcnn_predict()

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)

            obstacle_points, obstacle_labels = read_vif('./sp_whole_reg.vif')
            while True:

                alldemo = os.listdir(file_paths)
                file_names = []
                for singledemo in alldemo:
                    singlepath = os.path.join(file_paths, singledemo)
                    file_names.append(singlepath)

                for img_path in file_names:
                    if img_path.endswith(('.jpg')):  # and f not in fs_found:
                        l_f = img_path + '.lock'
                        if os.path.exists(l_f):
                            time.sleep(0.01)
                            continue
                    # try:
                    start = timer()
                    img = cv2.imread(img_path)

                    box_res = []
                    label_res = []
                    score_res = []
                    box_res_rotate = []
                    label_res_rotate = []
                    score_res_rotate = []
                    head_rotate = []
                    imgH = img.shape[0]
                    imgW = img.shape[1]
                    for hh in range(0, imgH, h_len - h_overlap):
                        h_size = min(h_len, imgH - hh)
                        if h_size < 10:
                            break
                        for ww in range(0, imgW, w_len - w_overlap):
                            w_size = min(w_len, imgW - ww)
                            if w_size < 10:
                                break

                            src_img = img[hh:(hh + h_size),
                                          ww:(ww + w_size), :]

                            # boxes, labels, scores = sess.run([fast_rcnn_decode_boxes, detection_category, fast_rcnn_score],
                            #                                  feed_dict={img_plac: src_img})

                            boxes_rotate, labels_rotate, scores_rotate, _fast_rcnn_head_quadrant = \
                                sess.run([fast_rcnn_decode_boxes_rotate, detection_category_rotate,
                                          fast_rcnn_score_rotate,
                                          fast_rcnn_head_quadrant],
                                         feed_dict={img_plac: src_img})

                            # if len(boxes) > 0:
                            #     for ii in range(len(boxes)):
                            #         box = boxes[ii]
                            #         box[0] = box[0] + hh
                            #         box[1] = box[1] + ww
                            #         box[2] = box[2] + hh
                            #         box[3] = box[3] + ww
                            #         box_res.append(box)
                            #         label_res.append(labels[ii])
                            #         score_res.append(scores[ii])
                            if len(boxes_rotate) > 0:
                                for ii in range(len(boxes_rotate)):
                                    box_rotate = boxes_rotate[ii]
                                    box_rotate[0] = box_rotate[0] + hh
                                    box_rotate[1] = box_rotate[1] + ww
                                    box_res_rotate.append(box_rotate)
                                    label_res_rotate.append(labels_rotate[ii])
                                    score_res_rotate.append(scores_rotate[ii])
                                    head_rotate.append(
                                        _fast_rcnn_head_quadrant[ii])

                    time_elapsed = timer() - start
                    print("{} detection time : {:.4f} sec".format(
                        img_path.split('/')[-1].split('.')[0], time_elapsed))

                    mkdir(des_folder)

                    if len(head_rotate) != 0:
                        # img_np = draw_box_cv(np.array(img, np.float32) - np.array([103.939, 116.779, 123.68]),
                        #                      boxes=np.array(box_res),
                        #                      labels=np.array(label_res),
                        #                      scores=np.array(score_res))
                        img_np_rotate = draw_rotate_box_cv(
                            np.array(img, np.float32) -
                            np.array([103.939, 116.779, 123.68]),
                            boxes=np.array(box_res_rotate),
                            labels=np.array(label_res_rotate),
                            scores=np.array(score_res_rotate),
                            head=np.argmax(head_rotate, axis=1))

                        geo_points = get_points(box_res_rotate,
                                                np.argmax(head_rotate, axis=1))
                        image_name = img_path.split('/')[-1]
                        xml_path_1 = os.path.join(des_folder,
                                                  '1_' + image_name).replace(
                                                      file_ext, ".xml")
                        param_path = os.path.join(paramPath,
                                                  'UAV_' + image_name).replace(
                                                      file_ext, ".param")
                        x_tr, y_tr = get_param(param_path)
                        obstacle_left, obstacle_labels = filter_obstacle(
                            obstacle_points, imgH, imgW, x_tr, y_tr)

                        ######################################################
                        # obstacle_left = []
                        # temp = np.array([[2233, 1013], [2196, 980], [2215, 959], [2252, 993]])
                        # for coord in temp:
                        #     coord_convet = convert_coordinate(coord, imgH, imgW, x_tr, y_tr)
                        #     obstacle_left.extend(coord_convet)
                        # geo_points, obstacle_labels = filter_obstacle(np.array(geo_points)[:, :8], imgH, imgW, x_tr, y_tr)
                        ######################################################

                        detect_res, label_res = get_detect_res(
                            obstacle_left, obstacle_labels, geo_points,
                            label_res_rotate, imgH, imgW, x_tr, y_tr)

                        # writer_XML(xml_name, geo_points, label_res, imgW, imgH)
                        writer_XML2(xml_path_1, detect_res, label_res)
                        shutil.move(img_path,
                                    os.path.join(bakpath, image_name))
                        # cv2.imwrite(des_folder + '/{}_horizontal_fpn.jpg'.format(img_path.split('/')[-1].split('.')[0]), img_np)
                        cv2.imwrite(
                            des_folder + '/{}_rotate_fpn.jpg'.format(
                                img_path.split('/')[-1].split('.')[0]),
                            img_np_rotate)

                        final_points = []
                        final_labels = []
                        for type in range(3):
                            indx = np.where(np.equal(label_res_rotate,
                                                     type))[0]
                            if len(indx) != 0:
                                box_res_rotate_ = np.array(
                                    box_res_rotate)[indx]
                                label_res_rotate_ = np.array(
                                    label_res_rotate)[indx]
                                head_rotate_ = np.array(
                                    np.argmax(head_rotate, axis=1))[indx]
                                angles_ = get_angles(box_res_rotate_[:, 4],
                                                     head_rotate_)
                                convex_points_, center_point_, angle_ = get_convex_points(
                                    box_res_rotate_[:, :2], angles_)
                                head_ = get_head(center_point_, angle_)
                                all_points = []
                                for ii in box_res_rotate_:
                                    all_points.extend(
                                        convert_coordinate(
                                            ii, imgH, imgW, x_tr, y_tr))
                                all_points.extend(
                                    convert_coordinate(center_point_, imgH,
                                                       imgW, x_tr, y_tr))
                                all_points.extend(
                                    convert_coordinate(head_, imgH, imgW, x_tr,
                                                       y_tr))

                                final_points.append(all_points)
                                final_labels.append(type)
                        xml_path_2 = os.path.join(des_folder,
                                                  '2_' + image_name).replace(
                                                      file_ext, ".xml")
                        writer_XML2(xml_path_2, final_points, final_labels)

                    # except:
                    #     print("Get an error, filename: {}".format(img_path))
            coord.request_stop()
            coord.join(threads)
コード例 #8
0
def eval_ship(img_num):
    with tf.Graph().as_default():

        img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
            next_batch(dataset_name=cfgs.DATASET_NAME,
                       batch_size=cfgs.BATCH_SIZE,
                       shortside_len=cfgs.SHORT_SIDE_LEN,
                       is_training=True)

        gtboxes_and_label, head = get_head(
            tf.squeeze(gtboxes_and_label_batch, 0))
        gtboxes_and_label = tf.py_func(back_forward_convert,
                                       inp=[gtboxes_and_label],
                                       Tout=tf.float32)
        gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 6])
        head_quadrant = tf.py_func(get_head_quadrant,
                                   inp=[head, gtboxes_and_label],
                                   Tout=tf.float32)
        head_quadrant = tf.reshape(head_quadrant, [-1, 1])

        gtboxes_and_label_minAreaRectangle = get_horizen_minAreaRectangle(
            gtboxes_and_label)

        gtboxes_and_label_minAreaRectangle = tf.reshape(
            gtboxes_and_label_minAreaRectangle, [-1, 5])

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)

        # ***********************************************************************************************
        # *                                            RPN                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=None,
            is_training=False,
            share_head=cfgs.SHARE_HEAD,
            share_net=share_net,
            stride=cfgs.STRIDE,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

        # rpn predict proposals
        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]

        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************
        fast_rcnn = build_fast_rcnn.FastRCNN(
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            img_shape=tf.shape(img_batch),
            img_batch=img_batch,
            roi_size=cfgs.ROI_SIZE,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            gtboxes_and_label=None,
            gtboxes_and_label_minAreaRectangle=
            gtboxes_and_label_minAreaRectangle,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=cfgs.FINAL_SCORE_THRESHOLD,
            # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,
            # iou>0.5 is positive, iou<0.5 is negative
            use_dropout=cfgs.USE_DROPOUT,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=False,
            level=cfgs.LEVEL,
            head_quadrant=head_quadrant)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category, \
        fast_rcnn_decode_boxes_rotate, fast_rcnn_score_rotate, fast_rcnn_head_quadrant, \
        num_of_objects_rotate, detection_category_rotate = fast_rcnn.fast_rcnn_predict()

        # train
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)

            gtboxes_horizontal_dict = {}
            gtboxes_rotate_dict = {}

            all_boxes_h = []
            all_boxes_r = []
            all_img_names = []

            for i in range(img_num):

                start = time.time()

                _img_name_batch, _img_batch, _gtboxes_and_label, _gtboxes_and_label_minAreaRectangle, \
                _fast_rcnn_decode_boxes, _fast_rcnn_score, _detection_category, _fast_rcnn_decode_boxes_rotate, \
                _fast_rcnn_score_rotate, _detection_category_rotate \
                    = sess.run([img_name_batch, img_batch, gtboxes_and_label, gtboxes_and_label_minAreaRectangle,
                                fast_rcnn_decode_boxes, fast_rcnn_score, detection_category, fast_rcnn_decode_boxes_rotate,
                                fast_rcnn_score_rotate, detection_category_rotate])
                end = time.time()

                # gtboxes convert dict
                gtboxes_horizontal_dict[str(_img_name_batch[0])] = []
                gtboxes_rotate_dict[str(_img_name_batch[0])] = []

                gtbox_horizontal_list, gtbox_rotate_list = make_dict_packle(
                    _gtboxes_and_label, _gtboxes_and_label_minAreaRectangle)

                xmin, ymin, xmax, ymax = _fast_rcnn_decode_boxes[:,
                                                                 1], _fast_rcnn_decode_boxes[:,
                                                                                             0], _fast_rcnn_decode_boxes[:,
                                                                                                                         3], _fast_rcnn_decode_boxes[:,
                                                                                                                                                     2]
                x_c, y_c, w, h, theta = _fast_rcnn_decode_boxes_rotate[:, 1], _fast_rcnn_decode_boxes_rotate[:, 0], _fast_rcnn_decode_boxes_rotate[:, 3], \
                                        _fast_rcnn_decode_boxes_rotate[:, 2], _fast_rcnn_decode_boxes_rotate[:, 4]
                boxes_h = np.transpose(np.stack([xmin, ymin, xmax, ymax]))
                boxes_r = np.transpose(np.stack([x_c, y_c, w, h, theta]))
                dets_h = np.hstack((_detection_category.reshape(-1, 1),
                                    _fast_rcnn_score.reshape(-1, 1), boxes_h))
                dets_r = np.hstack(
                    (_detection_category_rotate.reshape(-1, 1),
                     _fast_rcnn_score_rotate.reshape(-1, 1), boxes_r))
                all_boxes_h.append(dets_h)
                all_boxes_r.append(dets_r)
                all_img_names.append(str(_img_name_batch[0]))

                gtboxes_horizontal_dict[str(
                    _img_name_batch[0])].extend(gtbox_horizontal_list)
                gtboxes_rotate_dict[str(
                    _img_name_batch[0])].extend(gtbox_rotate_list)

                print(str(_img_name_batch[0]))

                view_bar(
                    '{} image cost {}s'.format(str(_img_name_batch[0]),
                                               (end - start)), i + 1, img_num)

            write_voc_results_file(all_boxes_h, all_img_names,
                                   cfgs.EVALUATE_R_DIR, 0)
            write_voc_results_file(all_boxes_r, all_img_names,
                                   cfgs.EVALUATE_R_DIR, 1)

            fw1 = open('gtboxes_horizontal_dict.pkl', 'wb')
            fw2 = open('gtboxes_rotate_dict.pkl', 'wb')
            pickle.dump(gtboxes_horizontal_dict, fw1)
            pickle.dump(gtboxes_rotate_dict, fw2)
            fw1.close()
            fw2.close()
            coord.request_stop()
            coord.join(threads)
コード例 #9
0
ファイル: demo.py プロジェクト: ouc-ocean-group/PMID2019
def detect_img(file_paths,
               des_folder,
               det_th,
               h_len,
               w_len,
               h_overlap,
               w_overlap,
               show_res=False):
    with tf.Graph().as_default():

        img_plac = tf.placeholder(shape=[None, None, 3], dtype=tf.uint8)

        img_tensor = tf.cast(img_plac, tf.float32) - tf.constant(
            [103.939, 116.779, 123.68])
        img_batch = image_preprocess.short_side_resize_for_inference_data(
            img_tensor,
            target_shortside_len=cfgs.SHORT_SIDE_LEN,
            is_resize=False)

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************

        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)
        # ***********************************************************************************************
        # *                                            RPN                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=None,
            is_training=False,
            share_head=cfgs.SHARE_HEAD,
            share_net=share_net,
            stride=cfgs.STRIDE,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

        # rpn predict proposals
        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]

        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************
        fast_rcnn = build_fast_rcnn.FastRCNN(
            img_batch=img_batch,
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            img_shape=tf.shape(img_batch),
            roi_size=cfgs.ROI_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            gtboxes_and_label=None,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=det_th,
            # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,
            use_dropout=False,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=False,
            level=cfgs.LEVEL)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = \
            fast_rcnn.fast_rcnn_predict()

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)

            for img_path in file_paths:
                start = timer()
                img = cv2.imread(img_path)

                box_res = []
                label_res = []
                score_res = []

                imgH = img.shape[0]
                imgW = img.shape[1]
                for hh in range(0, imgH, h_len - h_overlap):
                    if imgH - hh - 1 < h_len:
                        hh_ = imgH - h_len
                    else:
                        hh_ = hh
                    for ww in range(0, imgW, w_len - w_overlap):
                        if imgW - ww - 1 < w_len:
                            ww_ = imgW - w_len
                        else:
                            ww_ = ww

                        src_img = img[hh_:(hh_ + h_len), ww_:(ww_ + w_len), :]

                        boxes, labels, scores = sess.run(
                            [
                                fast_rcnn_decode_boxes, detection_category,
                                fast_rcnn_score
                            ],
                            feed_dict={img_plac: src_img})

                        if show_res:
                            visualize_detection(src_img, boxes, scores)
                        if len(boxes) > 0:
                            for ii in range(len(boxes)):
                                box = boxes[ii]
                                box[0] = box[0] + hh_
                                box[1] = box[1] + ww_
                                box[2] = box[2] + hh_
                                box[3] = box[3] + ww_
                                box_res.append(box)
                                label_res.append(labels[ii])
                                score_res.append(scores[ii])

                box_res = np.array(box_res)
                label_res = np.array(label_res)
                score_res = np.array(score_res)

                box_res_, label_res_, score_res_ = [], [], []

                for sub_class in range(1, cfgs.CLASS_NUM + 1):
                    index = np.where(label_res == sub_class)[0]
                    if len(index) == 0:
                        continue
                    tmp_boxes_h = box_res[index]
                    tmp_label_h = label_res[index]
                    tmp_score_h = score_res[index]

                    tmp_boxes_h = np.array(tmp_boxes_h)
                    tmp = np.zeros(
                        [tmp_boxes_h.shape[0], tmp_boxes_h.shape[1] + 1])
                    tmp[:, 0:-1] = tmp_boxes_h
                    tmp[:, -1] = np.array(tmp_score_h)

                    inx = nms.py_cpu_nms(dets=np.array(tmp, np.float32),
                                         thresh=0.7,
                                         max_output_size=500)

                    box_res_.extend(np.array(tmp_boxes_h)[inx])
                    score_res_.extend(np.array(tmp_score_h)[inx])
                    label_res_.extend(np.array(tmp_label_h)[inx])

                time_elapsed = timer() - start
                print("{} detection time : {:.4f} sec".format(
                    img_path.split('/')[-1].split('.')[0], time_elapsed))

                mkdir(des_folder)
                img_np = draw_box_cv(np.array(img, np.float32) -
                                     np.array([103.939, 116.779, 123.68]),
                                     boxes=np.array(box_res_),
                                     labels=np.array(label_res_),
                                     scores=np.array(score_res_))
                cv2.imwrite(
                    des_folder + '/{}_fpn.jpg'.format(
                        img_path.split('/')[-1].split('.')[0]), img_np)

            coord.request_stop()
            coord.join(threads)
コード例 #10
0
def eval_dict_convert(img_num, mode):
    with tf.Graph().as_default():

        # img = tf.placeholder(shape=[None, None, 3], dtype=tf.uint8)

        img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
            next_batch(dataset_name=cfgs.DATASET_NAME,
                       batch_size=cfgs.BATCH_SIZE,
                       shortside_len=cfgs.SHORT_SIDE_LEN,
                       is_training=False)

        gtboxes_and_label = tf.py_func(
            back_forward_convert,
            inp=[tf.squeeze(gtboxes_and_label_batch, 0)],
            Tout=tf.float32)
        gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 6])

        gtboxes_and_label_minAreaRectangle = get_horizon_minAreaRectangle(
            gtboxes_and_label)

        gtboxes_and_label_minAreaRectangle = tf.reshape(
            gtboxes_and_label_minAreaRectangle, [-1, 5])

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)
        # ***********************************************************************************************
        # *                                            RPN                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=gtboxes_and_label,
            is_training=False,
            share_head=False,
            share_net=share_net,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            anchor_angles=cfgs.ANCHOR_ANGLES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            anchor_stride=cfgs.ANCHOR_STRIDE,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            kernel_size=cfgs.KERNEL_SIZE,
            use_angles_condition=False,
            anchor_angle_threshold=cfgs.RPN_ANCHOR_ANGLES_THRESHOLD,
            nms_angle_threshold=cfgs.RPN_NMS_ANGLES_THRESHOLD,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
            # iou>=0.7 is positive box, iou< 0.3 is negative
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=cfgs.
            IS_FILTER_OUTSIDE_BOXES,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            scope='')

        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]
        _, _, rpn_predict_boxes, rpn_predict_scores = rpn.rpn_losses()

        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************
        fast_rcnn = build_fast_rcnn.FastRCNN(
            img_batch=img_batch,
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            stop_gradient_for_proposals=False,
            img_shape=tf.shape(img_batch),
            roi_size=cfgs.ROI_SIZE,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            gtboxes_and_label=None,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            top_k_nms=cfgs.FAST_RCNN_TOP_K_NMS,
            nms_angle_threshold=cfgs.FAST_RCNN_NMS_ANGLES_THRESHOLD,
            use_angle_condition=False,
            level=cfgs.LEVEL,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=cfgs.FINAL_SCORE_THRESHOLD,
            # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,
            boxes_angle_threshold=cfgs.FAST_RCNN_BOXES_ANGLES_THRESHOLD,
            use_dropout=cfgs.USE_DROPOUT,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=False)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = \
            fast_rcnn.fast_rcnn_predict()

        ##############################################################################################
        if cfgs.NEED_AUXILIARY:
            predict_boxes = tf.concat(
                [fast_rcnn_decode_boxes, rpn_predict_boxes], axis=0)
            predict_scores = tf.concat(
                [fast_rcnn_score, rpn_predict_scores - 0.2], axis=0)
            rpn_predict_label = tf.ones([
                tf.shape(rpn_predict_scores)[0],
            ], tf.int64)
            labels = tf.concat([detection_category, rpn_predict_label], axis=0)

            # valid_indices = nms_rotate.nms_rotate(decode_boxes=predict_boxes,
            #                                       scores=predict_scores,
            #                                       iou_threshold=0.15,
            #                                       max_output_size=30,
            #                                       use_angle_condition=False,
            #                                       angle_threshold=15,
            #                                       use_gpu=True)
            valid_indices = tf.py_func(nms_rotate.nms_rotate_cpu,
                                       inp=[
                                           predict_boxes, predict_scores,
                                           tf.constant(0.15, tf.float32),
                                           tf.constant(30, tf.float32)
                                       ],
                                       Tout=tf.int64)

            fast_rcnn_decode_boxes = tf.gather(predict_boxes, valid_indices)
            fast_rcnn_score = tf.gather(predict_scores, valid_indices)
            detection_category = tf.gather(labels, valid_indices)

        ##############################################################################################
        if mode == 0:
            fast_rcnn_decode_boxes = get_horizon_minAreaRectangle(
                fast_rcnn_decode_boxes, False)

        # train
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)

            gtboxes_dict = {}
            predict_dict = {}

            for i in range(img_num):

                start = time.time()

                _img_name_batch, _img_batch, _gtboxes_and_label, _fast_rcnn_decode_boxes, \
                _gtboxes_and_label_minAreaRectangle, _fast_rcnn_score, _detection_category \
                    = sess.run([img_name_batch, img_batch, gtboxes_and_label, fast_rcnn_decode_boxes,
                                gtboxes_and_label_minAreaRectangle, fast_rcnn_score, detection_category])
                end = time.time()

                # gtboxes convert dict
                gtboxes_dict[str(_img_name_batch[0])] = []
                predict_dict[str(_img_name_batch[0])] = []

                # for j, box in enumerate(_gtboxes_and_label):
                #     bbox_dict = {}
                #     bbox_dict['bbox'] = np.array(_gtboxes_and_label[j, :-1], np.float64)
                #     bbox_dict['name'] = LABEl_NAME_MAP[int(_gtboxes_and_label[j, -1])]
                #     gtbox_dict[str(_img_name_batch[0])].append(bbox_dict)
                #
                # for label in NAME_LABEL_MAP.keys():
                #     if label == 'back_ground':
                #         continue
                #     else:
                #         temp_dict = {}
                #         temp_dict['name'] = label
                #
                #         ind = np.where(_detection_category == NAME_LABEL_MAP[label])[0]
                #         temp_boxes = _fast_rcnn_decode_boxes[ind]
                #         temp_score = np.reshape(_fast_rcnn_score[ind], [-1, 1])
                #         temp_dict['bbox'] = np.array(np.concatenate([temp_boxes, temp_score], axis=1), np.float64)
                #         predict_dict[str(_img_name_batch[0])].append(temp_dict)

                if mode == 0:
                    gtboxes_list, predict_list = \
                        make_dict_packle(_gtboxes_and_label_minAreaRectangle, _fast_rcnn_decode_boxes,
                                         _fast_rcnn_score, _detection_category)
                else:
                    gtboxes_list, predict_list = \
                        make_dict_packle(_gtboxes_and_label, _fast_rcnn_decode_boxes,
                                         _fast_rcnn_score, _detection_category)

                gtboxes_dict[str(_img_name_batch[0])].extend(gtboxes_list)
                predict_dict[str(_img_name_batch[0])].extend(predict_list)

                view_bar(
                    '{} image cost {}s'.format(str(_img_name_batch[0]),
                                               (end - start)), i + 1, img_num)

            fw1 = open('gtboxes_dict.pkl', 'w')
            fw2 = open('predict_dict.pkl', 'w')
            pickle.dump(gtboxes_dict, fw1)
            pickle.dump(predict_dict, fw2)
            fw1.close()
            fw2.close()
            coord.request_stop()
            coord.join(threads)
コード例 #11
0
def train():
    with tf.Graph().as_default():
        tf.set_random_seed(1234)
        with tf.name_scope('get_batch'):
            img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
                next_batch(dataset_name=cfgs.DATASET_NAME,
                           batch_size=cfgs.BATCH_SIZE,
                           shortside_len=cfgs.SHORT_SIDE_LEN,
                           is_training=True,
                           is_val=False)

        with tf.name_scope('draw_gtboxes'):
            gtboxes_in_img = draw_box_with_color(
                img_batch,
                tf.reshape(gtboxes_and_label_batch, [-1, 5])[:, :-1],
                text=tf.shape(gtboxes_and_label_batch)[1])

        # *********************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)

        # ***********************************************************************************************
        # *                                            rpn                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=tf.squeeze(gtboxes_and_label_batch, 0),
            is_training=True,
            share_head=cfgs.SHARE_HEAD,
            share_net=share_net,
            stride=cfgs.STRIDE,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.
            RPN_IOU_NEGATIVE_THRESHOLD,  # iou>=0.7 is positive box, iou< 0.3 is negative
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]

        rpn_location_loss, rpn_classification_loss = rpn.rpn_losses()
        rpn_total_loss = rpn_classification_loss + rpn_location_loss

        with tf.name_scope('draw_proposals'):
            # score > 0.5 is object
            rpn_object_boxes_indices = tf.reshape(
                tf.where(tf.greater(rpn_proposals_scores, 0.5)), [-1])
            rpn_object_boxes = tf.gather(rpn_proposals_boxes,
                                         rpn_object_boxes_indices)

            rpn_proposals_objcet_boxes_in_img = draw_box_with_color(
                img_batch,
                rpn_object_boxes,
                text=tf.shape(rpn_object_boxes)[0])
            rpn_proposals_boxes_in_img = draw_box_with_color(
                img_batch,
                rpn_proposals_boxes,
                text=tf.shape(rpn_proposals_boxes)[0])
        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************

        fast_rcnn = build_fast_rcnn.FastRCNN(
            img_batch=img_batch,
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            img_shape=tf.shape(img_batch),
            roi_size=cfgs.ROI_SIZE,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            gtboxes_and_label=tf.squeeze(gtboxes_and_label_batch, 0),
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=cfgs.
            FINAL_SCORE_THRESHOLD,  # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,  # iou>0.5 is positive, iou<0.5 is negative
            use_dropout=False,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=True,
            level=cfgs.LEVEL)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = \
            fast_rcnn.fast_rcnn_predict()
        fast_rcnn_location_loss, fast_rcnn_classification_loss = fast_rcnn.fast_rcnn_loss(
        )
        fast_rcnn_total_loss = fast_rcnn_location_loss + fast_rcnn_classification_loss

        with tf.name_scope('draw_boxes_with_categories'):
            fast_rcnn_predict_boxes_in_imgs = draw_boxes_with_categories(
                img_batch=img_batch,
                boxes=fast_rcnn_decode_boxes,
                labels=detection_category,
                scores=fast_rcnn_score)

        # train
        total_loss = slim.losses.get_total_loss()

        global_step = slim.get_or_create_global_step()  #返回并创建全局步长张量
        #
        # lr = tf.train.piecewise_constant(global_step,
        #                                  boundaries=[np.int64(10000), np.int64(20000)],
        #                                  values=[cfgs.LR, cfgs.LR / 10, cfgs.LR / 100])
        lr = tf.train.exponential_decay(cfgs.LR,
                                        global_step,
                                        decay_steps=5000,
                                        decay_rate=1 / 2.,
                                        staircase=True)
        # lr = tf.train.piecewise_constant(global_step,
        #                                  boundaries=[np.int64(30000), np.int64(40000)],
        #                                  values=[lr, cfgs.LR/100, cfgs.LR/1000])
        tf.summary.scalar('learning_rate', lr)
        # optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)
        optimizer = tf.train.AdamOptimizer(lr,
                                           beta1=cfgs.MOMENTUM,
                                           beta2=0.999,
                                           epsilon=1e-8,
                                           use_locking=False,
                                           name='Adam')
        # optimizer = tf.train.RMSPropOptimizer(lr, decay=0.9, epsilon=1e-6, name='RMSProp')
        #创建一个计算梯度并返回损失的Operation
        train_op = slim.learning.create_train_op(
            total_loss, optimizer, global_step)  # rpn_total_loss,
        # train_op = optimizer.minimize(second_classification_loss, global_step)

        # ***********************************************************************************************
        # *                                          Summary                                            *
        # ***********************************************************************************************
        # ground truth and predict
        tf.summary.image('img/gtboxes', gtboxes_in_img)
        tf.summary.image('img/faster_rcnn_predict',
                         fast_rcnn_predict_boxes_in_imgs)
        # rpn loss and image
        tf.summary.scalar('rpn/rpn_location_loss', rpn_location_loss)
        tf.summary.scalar('rpn/rpn_classification_loss',
                          rpn_classification_loss)
        tf.summary.scalar('rpn/rpn_total_loss', rpn_total_loss)

        tf.summary.scalar('fast_rcnn/fast_rcnn_location_loss',
                          fast_rcnn_location_loss)
        tf.summary.scalar('fast_rcnn/fast_rcnn_classification_loss',
                          fast_rcnn_classification_loss)
        tf.summary.scalar('fast_rcnn/fast_rcnn_total_loss',
                          fast_rcnn_total_loss)

        tf.summary.scalar('loss/total_loss', total_loss)
        # #
        # tf.summary.image('C2', _concact_features(share_net['resnet_v1_50/block1/unit_2/bottleneck_v1'][:, :, :, 0:16]), 1)
        # tf.summary.image('C3', _concact_features(share_net['resnet_v1_50/block2/unit_3/bottleneck_v1'][:, :, :, 0:16]), 1)
        # tf.summary.image('C4', _concact_features(share_net['resnet_v1_50/block3/unit_5/bottleneck_v1'][:, :, :, 0:16]), 1)
        # tf.summary.image('C5', _concact_features(share_net['resnet_v1_50/block4'][:, :, :, 0:16]), 1)
        # tf.summary.image('P2', _concact_features(rpn.feature_pyramid['P2'][:, :, :, 0:16]),1)
        # tf.summary.image('P3', _concact_features(rpn.feature_pyramid['P3'][:, :, :, 0:16]),1)
        # tf.summary.image('P4', _concact_features(rpn.feature_pyramid['P4'][:, :, :, 0:16]),1)
        # tf.summary.image('P5', _concact_features(rpn.feature_pyramid['P5'][:, :, :, 0:16]), 1)
        # tf.summary.image('rpn/rpn_all_boxes', rpn_proposals_boxes_in_img)
        # tf.summary.image('rpn/rpn_object_boxes', rpn_proposals_objcet_boxes_in_img)
        # learning_rate
        # tf.summary.scalar('learning_rate', lr)

        summary_op = tf.summary.merge_all()
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()

        saver = tf.train.Saver(max_to_keep=16)

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:

            if cfgs.NET_NAME == 'pvanet':
                sess.run(init_op)
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess, coord)
                start = 0
                if not restorer is None:
                    restorer.restore(sess, restore_ckpt)
                    print('restore model')
                    start = int("".join(
                        list(restore_ckpt.split('/')[-1])[4:8])) + 1
                else:
                    # read_npy.load_initial_weights(sess)
                    read_npy.load_ckpt_weights(sess)
            else:

                sess.run(init_op)
                # print(sess.run('resnet_v1_50/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance'))
                # print(sess.run('vgg_16/block4/unit_3/bottleneck_v1/conv3/BatchNorm/moving_variance'))
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess, coord)
                start = 0
                if not restorer is None:
                    restorer.restore(sess, restore_ckpt)
                    print('restore model')
                    # start = int("".join(list(restore_ckpt.split('/')[-1])[4:8]))+1

            summary_path = os.path.join(FLAGS.summary_path, cfgs.VERSION)
            mkdir(summary_path)
            summary_writer = tf.summary.FileWriter(summary_path,
                                                   graph=sess.graph)
            df = pd.DataFrame(
                [],
                columns=['Recall', 'Precision', 'mAP', 'F1_score'],
                index=[])

            for step in range(0, cfgs.MAX_ITERATION):
                # print(img_name_batch.eval())
                training_time = time.strftime('%Y-%m-%d %H:%M:%S',
                                              time.localtime(time.time()))
                start = time.time()

                _global_step, _img_name_batch, _rpn_location_loss, _rpn_classification_loss, \
                _rpn_total_loss, _fast_rcnn_location_loss, _fast_rcnn_classification_loss, \
                _fast_rcnn_total_loss, _total_loss, _ = \
                    sess.run([global_step, img_name_batch, rpn_location_loss, rpn_classification_loss,
                              rpn_total_loss, fast_rcnn_location_loss, fast_rcnn_classification_loss,
                              fast_rcnn_total_loss, total_loss, train_op])

                end = time.time()
                # if step == 100:
                #     save_dir = os.path.join(FLAGS.trained_checkpoint, cfgs.VERSION)
                #     mkdir(save_dir)
                #
                #     save_ckpt = os.path.join(save_dir, 'voc_' + str(_global_step) + 'model.ckpt')
                #     saver.save(sess, save_ckpt)
                #     print(' weights had been saved')
                # if step == 500:
                #     save_dir = os.path.join(FLAGS.trained_checkpoint, cfgs.VERSION)
                #     mkdir(save_dir)
                #
                #     save_ckpt = os.path.join(save_dir, 'voc_' + str(_global_step) + 'model.ckpt')
                #     saver.save(sess, save_ckpt)
                #     print(' weights had been saved')
                if step % 50 == 0:
                    print(""" {}: step{}    image_name:{} |\t
                                rpn_loc_loss:{} |\t rpn_cla_loss:{} |\t rpn_total_loss:{} |
                                fast_rcnn_loc_loss:{} |\t fast_rcnn_cla_loss:{} |\t fast_rcnn_total_loss:{} |
                                total_loss:{} |\t pre_cost_time:{}s""" \
                          .format(training_time, _global_step, str(_img_name_batch[0]), _rpn_location_loss,
                                  _rpn_classification_loss, _rpn_total_loss, _fast_rcnn_location_loss,
                                  _fast_rcnn_classification_loss, _fast_rcnn_total_loss, _total_loss,
                                  (end - start)))
                    # print(""" {}: step{}    image_name:{} |\t
                    #             rpn_loc_loss:{} |\t
                    #             fast_rcnn_loc_loss:{} |\t fast_rcnn_cla_loss:{} |\t fast_rcnn_total_loss:{} |
                    #             total_loss:{} |\t pre_cost_time:{}s""" \
                    #       .format(training_time, _global_step, str(_img_name_batch[0]), _rpn_location_loss,
                    #                _fast_rcnn_location_loss,
                    #               _fast_rcnn_classification_loss, _fast_rcnn_total_loss, _total_loss,
                    #               (end - start)))

                if step % 250 == 0:
                    summary_str = sess.run(summary_op)
                    summary_writer.add_summary(summary_str, _global_step)
                    summary_writer.flush()

                if (step > 0 and step % 2000 == 0) or (
                        step > 0 and
                    (step == 1000)) or (step == cfgs.MAX_ITERATION - 1):
                    save_dir = os.path.join(FLAGS.trained_checkpoint,
                                            cfgs.VERSION)
                    mkdir(save_dir)

                    save_ckpt = os.path.join(
                        save_dir, 'voc_' + str(_global_step) + 'model.ckpt')
                    saver.save(sess, save_ckpt)
                    print(' weights had been saved')
                #保存验证集信息
                if (step > 0
                        and step % 2000 == 0) or (step
                                                  == cfgs.MAX_ITERATION - 1):
                    save_excel = os.path.abspath(
                        '../'
                    ) + r'/Loss/' + cfgs.NET_NAME + r'_' + cfgs.VERSION
                    mkdir(save_excel)

                    new_index = np.append(df.index, [str(step)])
                    df2 = pd.DataFrame(
                        [valval.val(is_val=True)],
                        columns=['Recall', 'Precision', 'mAP', 'F1_score'])
                    df = df.append(df2)
                    df.index = new_index

                    df.to_excel(save_excel + r'/validation.xls')
                    print('validation result had been saved')

            coord.request_stop()
            coord.join(threads)
コード例 #12
0
ファイル: train.py プロジェクト: Bobwang100/fp_apply
def train():
  with tf.Graph().as_default():
    with tf.name_scope('get_batch'):
      img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
          next_batch(dataset_name=cfgs.DATASET_NAME,
                     batch_size=cfgs.BATCH_SIZE,
                     shortside_len=cfgs.SHORT_SIDE_LEN,
                     is_training=True)

    with tf.name_scope('draw_gtboxes'):
      gtboxes_in_img = draw_box_with_color(img_batch, tf.reshape(gtboxes_and_label_batch, [-1, 5])[:, :-1],
                                           text=tf.shape(gtboxes_and_label_batch)[1])

    # ***********************************************************************************************
    # *                                         share net                                           *
    # ***********************************************************************************************
    _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                      inputs=img_batch,
                                      num_classes=None,
                                      is_training=True,
                                      output_stride=None,
                                      global_pool=False,
                                      spatial_squeeze=False)

    # ***********************************************************************************************
    # *                                            rpn                                              *
    # ***********************************************************************************************
    rpn = build_rpn.RPN(net_name=cfgs.NET_NAME,
                        inputs=img_batch,
                        gtboxes_and_label=tf.squeeze(gtboxes_and_label_batch, 0),
                        is_training=True,
                        share_head=cfgs.SHARE_HEAD,
                        share_net=share_net,
                        stride=cfgs.STRIDE,
                        anchor_ratios=cfgs.ANCHOR_RATIOS,
                        anchor_scales=cfgs.ANCHOR_SCALES,
                        scale_factors=cfgs.SCALE_FACTORS,
                        base_anchor_size_list=cfgs.BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
                        level=cfgs.LEVEL,
                        top_k_nms=cfgs.RPN_TOP_K_NMS,
                        rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
                        max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
                        rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
                        # iou>=0.7 is positive box, iou< 0.3 is negative
                        rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
                        rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
                        rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
                        remove_outside_anchors=False,  # whether remove anchors outside
                        rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

    rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals()  # rpn_score shape: [300, ]

    rpn_location_loss, rpn_classification_loss = rpn.rpn_losses()
    rpn_total_loss = 10*rpn_classification_loss + rpn_location_loss

    with tf.name_scope('draw_proposals'):
      # score > 0.5 is object
      rpn_object_boxes_indices = tf.reshape(tf.where(tf.greater(rpn_proposals_scores, 0.5)), [-1])
      rpn_object_boxes = tf.gather(rpn_proposals_boxes, rpn_object_boxes_indices)

      rpn_proposals_objcet_boxes_in_img = draw_box_with_color(img_batch, rpn_object_boxes,
                                                              text=tf.shape(rpn_object_boxes)[0])
      rpn_proposals_boxes_in_img = draw_box_with_color(img_batch, rpn_proposals_boxes,
                                                       text=tf.shape(rpn_proposals_boxes)[0])
    # ***********************************************************************************************
    # *                                         Fast RCNN                                           *
    # ***********************************************************************************************

    fast_rcnn = build_fast_rcnn.FastRCNN(img_batch=img_batch,
                                         feature_pyramid=rpn.feature_pyramid,
                                         rpn_proposals_boxes=rpn_proposals_boxes,
                                         rpn_proposals_scores=rpn_proposals_scores,
                                         img_shape=tf.shape(img_batch),
                                         roi_size=cfgs.ROI_SIZE,
                                         roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
                                         scale_factors=cfgs.SCALE_FACTORS,
                                         gtboxes_and_label=tf.squeeze(gtboxes_and_label_batch, 0),
                                         fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
                                         fast_rcnn_maximum_boxes_per_img=100,
                                         fast_rcnn_nms_max_boxes_per_class=cfgs.FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
                                         show_detections_score_threshold=cfgs.FINAL_SCORE_THRESHOLD,  # show detections which score >= 0.6
                                         num_classes=cfgs.CLASS_NUM,
                                         fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
                                         fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
                                         # iou>0.5 is positive, iou<0.5 is negative
                                         fast_rcnn_positives_iou_threshold=cfgs.FAST_RCNN_IOU_POSITIVE_THRESHOLD,
                                         use_dropout=False,
                                         weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
                                         is_training=True,
                                         level=cfgs.LEVEL)

    fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = \
        fast_rcnn.fast_rcnn_predict()
    fast_rcnn_location_l, fast_rcnn_classification_loss = fast_rcnn.fast_rcnn_loss()
    fast_rcnn_total_loss = fast_rcnn_location_l + 10*fast_rcnn_classification_loss

    with tf.name_scope('draw_boxes_with_categories'):
      fast_rcnn_predict_boxes_in_imgs = draw_boxes_with_categories(img_batch=img_batch,
                                                                   boxes=fast_rcnn_decode_boxes,
                                                                   labels=detection_category,
                                                                   scores=fast_rcnn_score)

    # train
    added_loss = rpn_total_loss + fast_rcnn_total_loss
    total_loss = tf.losses.get_total_loss()

    global_step = tf.train.get_or_create_global_step()

    lr = tf.train.piecewise_constant(global_step,
                                     boundaries=[np.int64(20000), np.int64(40000)],
                                     values=[cfgs.LR, cfgs.LR / 10, cfgs.LR / 100])
    tf.summary.scalar('lr', lr)
    optimizer = tf.train.MomentumOptimizer(lr, momentum=cfgs.MOMENTUM)
    # optimizer = tf.train.AdamOptimizer(lr)

    # train_op = slim.learning.create_train_op(total_loss, optimizer, global_step)  # rpn_total_loss,
    train_op = slim.learning.create_train_op(added_loss, optimizer, global_step)  # rpn_total_loss,
    # train_op = optimizer.minimize(second_classification_loss, global_step)

    # **********************************-*************************************************************
    # *                                          Summary                                            *
    # ***********************************************************************************************
    # ground truth and predict
    tf.summary.image('img/gtboxes', gtboxes_in_img)
    tf.summary.image('img/faster_rcnn_predict', fast_rcnn_predict_boxes_in_imgs)
    # rpn loss and image
    tf.summary.scalar('rpn/rpn_location_loss', rpn_location_loss)
    tf.summary.scalar('rpn/rpn_classification_loss', rpn_classification_loss)
    tf.summary.scalar('rpn/rpn_total_loss', rpn_total_loss)

    tf.summary.scalar('fast_rcnn/fast_rcnn_location_lossum', fast_rcnn_location_l)
    tf.summary.scalar('fast_rcnn/fast_rcnn_classification_loss', fast_rcnn_classification_loss)
    tf.summary.scalar('fast_rcnn/fast_rcnn_total_loss', fast_rcnn_total_loss)

    tf.summary.scalar('loss/added_loss', added_loss)
    tf.summary.scalar('loss/total_loss', total_loss)

    tf.summary.image('rpn/rpn_all_boxes', rpn_proposals_boxes_in_img)
    tf.summary.image('rpn/rpn_object_boxes', rpn_proposals_objcet_boxes_in_img)
    # learning_rate
    tf.summary.scalar('learning_rate', lr)

    summary_op = tf.compat.v1.summary.merge_all()
    init_op = tf.group(
        tf.global_variables_initializer(),
        tf.local_variables_initializer()
    )

    restorer, restore_ckpt = restore_model.get_restorer(test=False)
    saver = tf.train.Saver(max_to_keep=5)

    config = tf.ConfigProto()
    # config.gpu_options.per_process_gpu_memory_fraction = 0.5
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
      sess.run(init_op)
      if not restorer is None:
        restorer.restore(sess, restore_ckpt)
        print('restore model', restore_ckpt)
      coord = tf.train.Coordinator()
      threads = tf.train.start_queue_runners(sess, coord)

      summary_path = os.path.join('output/{}'.format(cfgs.DATASET_NAME),
                                  FLAGS.summary_path, cfgs.VERSION)
      mkdir(summary_path)
      summary_writer = tf.summary.FileWriter(summary_path, graph=sess.graph)

      for step in range(cfgs.MAX_ITERATION):
        training_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
        start = time.time()
        # gtboxes = tf.cast(
        #     tf.reshape(fast_rcnn.gtboxes_and_label[:, :-1], [-1, 4]), tf.float32)
        # ious = iou.iou_calculate(fast_rcnn.fast_rcnn_all_level_proposals, gtboxes)

        # _global_step, _img_name_batch, _rpn_location_loss, _rpn_classification_loss, \
        #     _rpn_total_loss, _fast_rcnn_location_l, _fast_rcnn_classification_loss, \
        #     _fast_rcnn_total_loss, _added_loss, _total_loss, rcnn_minibatch, find_samples,\
        # gtbox1, iou1, _ = \
        #     sess.run([global_step, img_name_batch, rpn_location_loss, rpn_classification_loss,
        #               rpn_total_loss, fast_rcnn_location_l, fast_rcnn_classification_loss,
        #               fast_rcnn_total_loss, added_loss, total_loss,
        #               fast_rcnn.fast_rcnn_minibatch(fast_rcnn.fast_rcnn_all_level_proposals),
        #               fast_rcnn.fast_rcnn_find_positive_negative_samples(fast_rcnn.fast_rcnn_all_level_proposals),
        #               gtboxes, ious,
        #               train_op])

        _global_step, _img_name_batch, _rpn_location_loss, _rpn_classification_loss, \
            _rpn_total_loss, _fast_rcnn_location_l, _fast_rcnn_classification_loss, \
            _fast_rcnn_total_loss, _added_loss, _total_loss, \
            _ = \
            sess.run([global_step, img_name_batch, rpn_location_loss, rpn_classification_loss,
                      rpn_total_loss, fast_rcnn_location_l, fast_rcnn_classification_loss,
                      fast_rcnn_total_loss, added_loss, total_loss,
                      train_op])
        # _global_step, _img_name_batch, _added_loss, _total_loss, \
        #     _ = \
        #     sess.run([global_step, img_name_batch, added_loss, total_loss,
        #               train_op])


        end = time.time()
        # a, b, c ,d = rcnn_minibatch
        # aa, bb, cc = find_samples
        # q = []
        # for i in cc:
        #     if i not in q:
        #         q.append(i)
        # qq = []
        # for i in c:
        #     if i not in qq:
        #         qq.append(i)

        # minibatch_encode_boxes = tf.gather(fast_rcnn.fast_rcnn_encode_boxes,a)
        # minibatch_reference_boxes = tf.gather(fast_rcnn.fast_rcnn_all_level_proposals, a)
        # minibatch_encode_gtboxes = \
        #     encode_and_decode.encode_boxes(
        #         unencode_boxes=b,
        #         reference_boxes=minibatch_reference_boxes,
        #         scale_factors=fast_rcnn.scale_factors
        #     )
        # # [minibatch_size, num_classes*4]
        # minibatch_encode_gtboxes = tf.tile(minibatch_encode_gtboxes, [1, fast_rcnn.num_classes])
        #
        # class_weights_list = []
        # category_list = tf.unstack(d, axis=1)
        # for i in range(1, fast_rcnn.num_classes + 1):
        #     tmp_class_weights = tf.ones(
        #         shape=[tf.shape(minibatch_encode_boxes)[0], 4], dtype=tf.float32)
        #     tmp_class_weights = tmp_class_weights * tf.expand_dims(category_list[i], axis=1)
        #     class_weights_list.append(tmp_class_weights)
        # class_weights = tf.concat(class_weights_list, axis=1)  # [minibatch_size, num_classes*4]
        #
        # loc_loss1 = losses.l1_smooth_losses(predict_boxes=minibatch_encode_boxes,
        #                                                    gtboxes=minibatch_encode_gtboxes,
        #                                                    object_weights=c,
        #                                                    classes_weights=class_weights)
        # print('loction-loss:', sess.run(loc_loss1))
        if step % 50 == 0:
          print("""{}: step{} image_name:{}
                     rpn_loc_loss:{:.4f} | rpn_cla_loss:{:.4f} | rpn_total_loss:{:.4f}
                     fast_rcnn_loc_loss:{:.10f} | fast_rcnn_cla_loss:{:.10f} | fast_rcnn_total_loss:{:.4f}
                     added_loss:{:.4f} | total_loss:{:.4f} | pre_cost_time:{:.4f}s"""
                .format(training_time, _global_step, str(_img_name_batch[0]), _rpn_location_loss,
                        _rpn_classification_loss, _rpn_total_loss, _fast_rcnn_location_l,
                        _fast_rcnn_classification_loss, _fast_rcnn_total_loss, _added_loss, _total_loss,
                        (end - start)))
        # if step % 100 == 0:
        #   print("""{}: step{} image_name:{}
        #              added_loss:{:.4f} | total_loss:{:.4f} | pre_cost_time:{:.4f}s"""
        #         .format(training_time, _global_step, str(_img_name_batch[0]),  _added_loss, _total_loss, (end - start)
        #                 ))
        # print(q, '\n', qq)
        # print(gtbox1)
        # print(iou1)

        if step % 500000 == 0:
          summary_str = sess.run(summary_op)
          summary_writer.add_summary(summary_str, _global_step)
          summary_writer.flush()

        if (step > 0 and step % 10000 == 0) or (step == cfgs.MAX_ITERATION - 1):
          save_dir = os.path.join('output/{}'.format(cfgs.DATASET_NAME),
                                  FLAGS.trained_checkpoint, cfgs.VERSION)
          mkdir(save_dir)

          save_ckpt = os.path.join(save_dir, '{}_'.format(
              cfgs.DATASET_NAME)+str(_global_step)+'model.ckpt')
          saver.save(sess, save_ckpt)
          print('Weights have been saved to {}.'.format(save_ckpt))

      print('Training done.')

      coord.request_stop()
      coord.join(threads)
コード例 #13
0
def test(img_num):
    with tf.Graph().as_default():

        # img = tf.placeholder(shape=[None, None, 3], dtype=tf.uint8)

        img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
            next_batch(dataset_name=cfgs.DATASET_NAME,
                       batch_size=cfgs.BATCH_SIZE,
                       shortside_len=cfgs.SHORT_SIDE_LEN,
                       is_training=False)

        gtboxes_and_label = tf.py_func(
            back_forward_convert,
            inp=[tf.squeeze(gtboxes_and_label_batch, 0)],
            Tout=tf.float32)
        gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 6])

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)
        # ***********************************************************************************************
        # *                                            RPN                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=gtboxes_and_label,
            is_training=False,
            share_head=cfgs.SHARE_HEAD,
            share_net=share_net,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            anchor_angles=cfgs.ANCHOR_ANGLES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            anchor_stride=cfgs.ANCHOR_STRIDE,
            pool_stride=cfgs.POOL_STRIDE,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            kernel_size=cfgs.KERNEL_SIZE,
            use_angles_condition=True,
            anchor_angle_threshold=cfgs.RPN_ANCHOR_ANGLES_THRESHOLD,
            nms_angle_threshold=cfgs.RPN_NMS_ANGLES_THRESHOLD,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            scope='')

        # rpn predict proposals
        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]
        _, _, rpn_top_k_boxes, rpn_top_k_scores = rpn.rpn_losses()

        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************
        fast_rcnn = build_fast_rcnn.FastRCNN(
            img_batch=img_batch,
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            stop_gradient_for_proposals=False,
            img_shape=tf.shape(img_batch),
            roi_size=cfgs.ROI_SIZE,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            gtboxes_and_label=None,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            top_k_nms=cfgs.FAST_RCNN_TOP_K_NMS,
            nms_angle_threshold=cfgs.FAST_RCNN_NMS_ANGLES_THRESHOLD,
            use_angle_condition=False,
            level=cfgs.LEVEL,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=cfgs.FINAL_SCORE_THRESHOLD,
            # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,
            boxes_angle_threshold=cfgs.FAST_RCNN_BOXES_ANGLES_THRESHOLD,
            use_dropout=cfgs.USE_DROPOUT,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=False)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = \
            fast_rcnn.fast_rcnn_predict()

        ##############################################################################################
        if cfgs.NEED_AUXILIARY:
            predict_boxes = tf.concat(
                [fast_rcnn_decode_boxes, rpn_top_k_boxes], axis=0)
            predict_scores = tf.concat(
                [fast_rcnn_score, rpn_top_k_scores - 0.2], axis=0)
            rpn_top_k_label = tf.ones([
                tf.shape(rpn_top_k_scores)[0],
            ], tf.int64)
            labels = tf.concat([detection_category, rpn_top_k_label], axis=0)

            # valid_indices = nms_rotate.nms_rotate(decode_boxes=predict_boxes,
            #                                       scores=predict_scores,
            #                                       iou_threshold=0.15,
            #                                       max_output_size=30,
            #                                       use_angle_condition=False,
            #                                       angle_threshold=15,
            #                                       use_gpu=True)
            valid_indices = tf.py_func(nms_rotate.nms_rotate_cpu,
                                       inp=[
                                           predict_boxes, predict_scores,
                                           tf.constant(0.15, tf.float32),
                                           tf.constant(30, tf.float32)
                                       ],
                                       Tout=tf.int64)

            fast_rcnn_decode_boxes = tf.gather(predict_boxes, valid_indices)
            fast_rcnn_score = tf.gather(predict_scores, valid_indices)
            detection_category = tf.gather(labels, valid_indices)

            ##############################################################################################

        # train
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()

        config = tf.ConfigProto()
        # config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.allow_growth = True
        with tf.Session(config=config) as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)

            for i in range(img_num):

                start = time.time()

                _img_name_batch, _img_batch, _gtboxes_and_label, _fast_rcnn_decode_boxes, \
                _fast_rcnn_score, _detection_category \
                    = sess.run([img_name_batch, img_batch, gtboxes_and_label, fast_rcnn_decode_boxes,
                                fast_rcnn_score, detection_category])
                end = time.time()

                _img_batch = np.squeeze(_img_batch, axis=0)

                _img_batch_fpn = help_utils.draw_box_cv(
                    _img_batch,
                    boxes=_fast_rcnn_decode_boxes,
                    labels=_detection_category,
                    scores=_fast_rcnn_score)
                mkdir(cfgs.TEST_SAVE_PATH)
                cv2.imwrite(
                    cfgs.TEST_SAVE_PATH + '/{}_fpn.jpg'.format(
                        str(_img_name_batch[0]).split('.tif')[0]),
                    _img_batch_fpn)

                # _gtboxes_and_label_batch = np.squeeze(_gtboxes_and_label_batch, axis=0)

                temp_label = np.reshape(_gtboxes_and_label[:, -1:], [
                    -1,
                ]).astype(np.int64)
                _img_batch_gt = help_utils.draw_box_cv(
                    _img_batch,
                    boxes=_gtboxes_and_label[:, :-1],
                    labels=temp_label,
                    scores=None)

                cv2.imwrite(
                    cfgs.TEST_SAVE_PATH + '/{}_gt.jpg'.format(
                        str(_img_name_batch[0]).split('.tif')[0]),
                    _img_batch_gt)

                #view_bar('{} image cost {}s'.format(str(_img_name_batch[0]), (end - start)), i+1, img_num)
                print('{} image cost {}s'.format(str(_img_name_batch[0]),
                                                 (end - start)) + str(i + 1) +
                      '/' + str(img_num))

            coord.request_stop()
            coord.join(threads)
コード例 #14
0
def eval_ship(img_num):
    with tf.Graph().as_default():

        img_name_batch, img_batch, gtboxes_and_label_batch, num_objects_batch = \
            next_batch(dataset_name=cfgs.DATASET_NAME,
                       batch_size=cfgs.BATCH_SIZE,
                       shortside_len=cfgs.SHORT_SIDE_LEN,
                       is_training=False)

        gtboxes_and_label = tf.py_func(
            back_forward_convert,
            inp=[tf.squeeze(gtboxes_and_label_batch, 0)],
            Tout=tf.float32)
        gtboxes_and_label = tf.reshape(gtboxes_and_label, [-1, 6])

        gtboxes_and_label_minAreaRectangle = get_horizen_minAreaRectangle(
            gtboxes_and_label)

        gtboxes_and_label_minAreaRectangle = tf.reshape(
            gtboxes_and_label_minAreaRectangle, [-1, 5])

        # ***********************************************************************************************
        # *                                         share net                                           *
        # ***********************************************************************************************
        _, share_net = get_network_byname(net_name=cfgs.NET_NAME,
                                          inputs=img_batch,
                                          num_classes=None,
                                          is_training=True,
                                          output_stride=None,
                                          global_pool=False,
                                          spatial_squeeze=False)

        # ***********************************************************************************************
        # *                                            RPN                                              *
        # ***********************************************************************************************
        rpn = build_rpn.RPN(
            net_name=cfgs.NET_NAME,
            inputs=img_batch,
            gtboxes_and_label=None,
            is_training=False,
            share_head=False,
            share_net=share_net,
            stride=cfgs.STRIDE,
            anchor_ratios=cfgs.ANCHOR_RATIOS,
            anchor_scales=cfgs.ANCHOR_SCALES,
            scale_factors=cfgs.SCALE_FACTORS,
            base_anchor_size_list=cfgs.
            BASE_ANCHOR_SIZE_LIST,  # P2, P3, P4, P5, P6
            level=cfgs.LEVEL,
            top_k_nms=cfgs.RPN_TOP_K_NMS,
            rpn_nms_iou_threshold=cfgs.RPN_NMS_IOU_THRESHOLD,
            max_proposals_num=cfgs.MAX_PROPOSAL_NUM,
            rpn_iou_positive_threshold=cfgs.RPN_IOU_POSITIVE_THRESHOLD,
            rpn_iou_negative_threshold=cfgs.RPN_IOU_NEGATIVE_THRESHOLD,
            rpn_mini_batch_size=cfgs.RPN_MINIBATCH_SIZE,
            rpn_positives_ratio=cfgs.RPN_POSITIVE_RATE,
            remove_outside_anchors=False,  # whether remove anchors outside
            rpn_weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME])

        # rpn predict proposals
        rpn_proposals_boxes, rpn_proposals_scores = rpn.rpn_proposals(
        )  # rpn_score shape: [300, ]

        # ***********************************************************************************************
        # *                                         Fast RCNN                                           *
        # ***********************************************************************************************
        fast_rcnn = build_fast_rcnn.FastRCNN(
            feature_pyramid=rpn.feature_pyramid,
            rpn_proposals_boxes=rpn_proposals_boxes,
            rpn_proposals_scores=rpn_proposals_scores,
            img_shape=tf.shape(img_batch),
            roi_size=cfgs.ROI_SIZE,
            roi_pool_kernel_size=cfgs.ROI_POOL_KERNEL_SIZE,
            scale_factors=cfgs.SCALE_FACTORS,
            gtboxes_and_label=None,
            gtboxes_and_label_minAreaRectangle=
            gtboxes_and_label_minAreaRectangle,
            fast_rcnn_nms_iou_threshold=cfgs.FAST_RCNN_NMS_IOU_THRESHOLD,
            fast_rcnn_maximum_boxes_per_img=100,
            fast_rcnn_nms_max_boxes_per_class=cfgs.
            FAST_RCNN_NMS_MAX_BOXES_PER_CLASS,
            show_detections_score_threshold=cfgs.FINAL_SCORE_THRESHOLD,
            # show detections which score >= 0.6
            num_classes=cfgs.CLASS_NUM,
            fast_rcnn_minibatch_size=cfgs.FAST_RCNN_MINIBATCH_SIZE,
            fast_rcnn_positives_ratio=cfgs.FAST_RCNN_POSITIVE_RATE,
            fast_rcnn_positives_iou_threshold=cfgs.
            FAST_RCNN_IOU_POSITIVE_THRESHOLD,
            # iou>0.5 is positive, iou<0.5 is negative
            use_dropout=cfgs.USE_DROPOUT,
            weight_decay=cfgs.WEIGHT_DECAY[cfgs.NET_NAME],
            is_training=False,
            level=cfgs.LEVEL)

        fast_rcnn_decode_boxes, fast_rcnn_score, num_of_objects, detection_category = fast_rcnn.fast_rcnn_predict(
        )

        # train
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        restorer, restore_ckpt = restore_model.get_restorer()
        with tf.Session() as sess:
            sess.run(init_op)
            if not restorer is None:
                restorer.restore(sess, restore_ckpt)
                print('restore model')

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess, coord)

            gtboxes_horizontal_dict = {}
            predict_horizontal_dict = {}

            for i in range(img_num):

                start = time.time()

                _img_name_batch, _img_batch, _gtboxes_and_label, _gtboxes_and_label_minAreaRectangle, \
                _fast_rcnn_decode_boxes, _fast_rcnn_score, _detection_category \
                    = sess.run([img_name_batch, img_batch, gtboxes_and_label, gtboxes_and_label_minAreaRectangle,
                                fast_rcnn_decode_boxes, fast_rcnn_score, detection_category])
                end = time.time()

                # gtboxes convert dict
                gtboxes_horizontal_dict[str(_img_name_batch[0])] = []
                predict_horizontal_dict[str(_img_name_batch[0])] = []

                gtbox_horizontal_list, predict_horizontal_list = \
                    make_dict_packle(_gtboxes_and_label_minAreaRectangle, _fast_rcnn_decode_boxes,
                                     _fast_rcnn_score, _detection_category)

                gtboxes_horizontal_dict[str(
                    _img_name_batch[0])].extend(gtbox_horizontal_list)
                predict_horizontal_dict[str(
                    _img_name_batch[0])].extend(predict_horizontal_list)

                view_bar(
                    '{} image cost {}s'.format(str(_img_name_batch[0]),
                                               (end - start)), i + 1, img_num)

            fw1 = open('gtboxes_horizontal_dict.pkl', 'w')
            fw2 = open('predict_horizontal_dict.pkl', 'w')
            pickle.dump(gtboxes_horizontal_dict, fw1)
            pickle.dump(predict_horizontal_dict, fw2)
            fw1.close()
            fw2.close()
            coord.request_stop()
            coord.join(threads)