コード例 #1
0
def save_image_with_bbox(image, labels_, scores_, bboxes_):
    if not hasattr(save_image_with_bbox, "counter"):
        save_image_with_bbox.counter = 0  # it doesn't exist yet, so initialize it
    save_image_with_bbox.counter += 1

    img_to_draw = np.copy(image)#common_preprocessing.np_image_unwhitened(image))
    img_to_draw = draw_toolbox.bboxes_draw_on_img(img_to_draw, labels_, scores_, bboxes_, thickness=2)
    imsave(os.path.join('./Debug', '{}.jpg').format(save_image_with_bbox.counter), img_to_draw)
    return save_image_with_bbox.counter
コード例 #2
0
def big_detect():

    labels_total = []
    scores_total = []
    bboxes_total = []

    im = readTif(im_path)
    H = im.shape[0]
    W = im.shape[1]
    sub_img, site = PSP.splitimage(im_path, shape=[416, 416], strided=300)

    num = 0
    for image in sub_img:  # 子图
        image_name = './data/samples/' + str(num) + '.png'
        cv2.imwrite(image_name, image)
        labels_, scores_, bboxes_ = param(True, Source=image_name)  # 单张子图结果
        #os.remove(image_name)
        #num_bbox = len(labels_)
        labels_total.append(labels_)
        scores_total.append(scores_)
        bboxes_total.append(bboxes_)
        num += 1
        print('num=', num)

    #print('labels_total:',labels_total)
    #print('scores_total:', scores_total)
    #print('bboxes_total:', bboxes_total)
    print('num of  bbo:', len(bboxes_total))

    labels_merge, scores_merge, bboxes_merge = PSP.merge_label(
        labels_total, scores_total, bboxes_total, site, [416, 416], im.shape)

    result_img = draw_toolbox.bboxes_draw_on_img(im, labels_merge,
                                                 scores_merge, bboxes_merge)

    # label_id_dict = draw_toolbox.gain_translate_table()
    # result_num = {}
    # for i in range(len(labels_merge)):
    #     temp = labels_merge[i]
    #     if temp not in result_num.keys():
    #         result_num[temp] = 1
    #     else:
    #         result_num[temp] = result_num[temp] + 1
    # class_num = ''
    # for key, value in result_num.items():
    #     if key in label_id_dict.keys():
    #         class_num = class_num + label_id_dict[key] + "_" + str(value)

    print('--------------- finish detect -----------------------')
    cv2.imwrite(save_path, result_img)
    #np.savetxt(im_path[:-8]+save_name+'_result.txt', bboxes_merge, fmt='%0.3f')
    #write_xml_cvat(xml_path,img_name,H,W,bboxes_merge)
    print('im_path:', im_path)
コード例 #3
0
def call_Detection(input, thred=0.5):
    print(thred)
    if thred is None:
        thred = 0.5
    image_path = input
    NMS_thred = thred

    basename = 'Detection_result_Thred_' + str(thred) + '_' + osp.basename(
        image_path)
    dirname = osp.dirname(image_path)

    if not osp.exists(dirname):
        os.mkdir(dirname)

    savedir = osp.join(dirname, 'out_result/')
    if not osp.exists(savedir):
        os.mkdir(savedir)

    savename = osp.join(savedir, basename)

    args = parse_args()
    # model path
    demonet = args.demo_net
    dataset = args.dataset

    tfmodel = os.path.join('./Crop_Detection/trained_models',
                           'default_0.0001_DataAug2', NETS[demonet][0])

    if not os.path.isfile(tfmodel + '.meta'):
        print('tfmodel: ', tfmodel)
        raise IOError(
            ('{:s} not found.\nDid you download the proper networks from '
             'our server and place them properly?').format(tfmodel + '.meta'))

    # set config
    tfconfig = tf.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth = True
    tfconfig.gpu_options.per_process_gpu_memory_fraction = 0.6

    # init session
    sess = tf.Session(config=tfconfig)
    # load network
    if demonet == 'vgg16':
        net = vgg16(batch_size=1)

    else:
        raise NotImplementedError

    n_classes = len(CLASSES)
    # create the structure of the net having a certain shape (which depends on the number of classes)
    net.create_architecture(sess,
                            "TEST",
                            n_classes,
                            tag='default',
                            anchor_scales=[8, 16, 32])
    saver = tf.train.Saver()
    saver.restore(sess, tfmodel)

    print('Loaded network {:s}'.format(tfmodel))
    paths = []
    paths.append(image_path)
    for im_name in paths:  # 大图
        print('---------------------------------------------------------')
        print('Demo for data/demo/{}'.format(im_name))
        labels_total = []
        scores_total = []
        bboxes_total = []

        import time
        start_time = time.time()
        sub_img, site = PSP.splitimage(image_path,
                                       shape=[416, 416],
                                       strided=300)

        for image in sub_img:  # 子图

            labels_, scores_, bboxes_ = mulite_demo(
                sess, net, image, NMS_thred=NMS_thred)  # 单张子图结果
            num_bbox = len(labels_)
            labels_total.append(labels_)
            scores_total.append(scores_)
            bboxes_total.append(bboxes_)

        print('num of  bbo:', len(bboxes_total))

        labels_merge, scores_merge, bboxes_merge = PSP.merge_label(
            labels_total, scores_total, bboxes_total, site, [500, 500],
            im.shape)

        result_img, shapes = draw_toolbox.bboxes_draw_on_img(
            im, labels_merge, scores_merge, bboxes_merge)

        label_id_dict = draw_toolbox.gain_translate_table()
        # labels_merge = labels_merge.tolist()
        result_num = {}
        for i in range(len(labels_merge)):
            temp = labels_merge[i]
            if temp not in result_num.keys():
                result_num[temp] = 1
            else:
                result_num[temp] = result_num[temp] + 1
        class_num = ''
        for key, value in result_num.items():
            if key in label_id_dict.keys():
                class_num = class_num + label_id_dict[key] + "_" + str(value)
        end_time = time.time()

        time_image = end_time - start_time
        jsonname = 'Thred_' + str(thred) + osp.basename(image_path).split(
            '.')[0] + '.json'
        filename = osp.join(savedir, jsonname)

        from save_json import Save_Json
        json_result = Save_Json()
        json_result.save(filename, shapes, im, NMS_thred)
        cv2.imwrite(savename, result_img)

    print('--------------- finish -----------------------')


# if __name__ == '__main__':
#     img = '/home/kfgeo/Models/Crop_Detection/orig_img/test.png'
#     call_Detection(img,thred=0.8)
#     # args = parse_args()
#     # out_path = '/home/kfgeo/Models/Crop_Detection/123/'
#     # # model path
#     # demonet = args.demo_net
#     # dataset = args.dataset
#     # # tfmodel = os.path.join('output', demonet, DATASETS[dataset][0], 'default', NETS[demonet][0])
#     # tfmodel = os.path.join('trained_models','default_0.0001_DataAug2', NETS[demonet][0])
#     #
#     # if not os.path.isfile(tfmodel + '.meta'):
#     #     print('tfmodel: ',tfmodel)
#     #     raise IOError(('{:s} not found.\nDid you download the proper networks from '
#     #                    'our server and place them properly?').format(tfmodel + '.meta'))
#     #
#     # # set config
#     # tfconfig = tf.ConfigProto(allow_soft_placement=True)
#     # tfconfig.gpu_options.allow_growth = True
#     #
#     # # init session
#     # sess = tf.Session(config=tfconfig)
#     # # load network
#     # if demonet == 'vgg16':
#     #     net = vgg16(batch_size=1)
#     # # elif demonet == 'res101':
#     #     # net = resnetv1(batch_size=1, num_layers=101)
#     # else:
#     #     raise NotImplementedError
#     #
#     # n_classes = len(CLASSES)
#     # # create the structure of the net having a certain shape (which depends on the number of classes)
#     # net.create_architecture(sess, "TEST", n_classes,
#     #                         tag='default', anchor_scales=[8, 16, 32])
#     # saver = tf.train.Saver()
#     # saver.restore(sess, tfmodel)
#     #
#     # print('Loaded network {:s}'.format(tfmodel))
#     #
#     # # im_names = ['000456.jpg', '000457.jpg', '000542.jpg', '001150.jpg',
#     # #            '001763.jpg', '004545.jpg']
#     # # file_path = '/kuafugeo/zmy/Faster-RCNN-TensorFlow-Python3/data/test'
#     # file_path ='/home/kfgeo/Models/Crop_Detection/orig_img/'
#     # im_names = os.listdir(file_path)
#     #
#     # for im_name in im_names:  # 大图
#     #     print('---------------------------------------------------------')
#     #     print('Demo for data/demo/{}'.format(im_name))
#     #     # print('\n')
#     #     labels_total = []
#     #     scores_total = []
#     #     bboxes_total = []
#     #     im_file = os.path.join(file_path, im_name)
#     #     # im_name = 'DJI_0017.JPG'
#     #     # im_file = '/kuafugeo/zmy/Faster-RCNN-TensorFlow-Python3/data/demo_big/DJI_0017.JPG'
#     #     import time
#     #     start_time = time.time()
#     #     im = cv2.imread(im_file)
#     #     # labels_list, score_list, bbox_list = demo(sess, net, im_name)
#     #     sub_img, site = PSP.splitimage(im, shape=[500, 500], strided=200)
#     #     #i=0
#     #     for image in sub_img:  # 子图
#     #       #  cv2.imwrite('/kuafugeo/zmy/Faster-RCNN-TensorFlow-Python3/data/17/'+str(i)+'.png',image)
#     #      #   i+=1
#     #         labels_, scores_, bboxes_ = mulite_demo(sess, net, image)  # 单张子图结果
#     #         # print('labels_:', labels_)
#     #         # print('scores_: ', scores_)
#     #         # print('bboxes_: ', bboxes_)
#     #         num_bbox = len(labels_)
#     #         #print('label: ',labels_, 'scores_: ',scores_, 'bboxes_: ',bboxes_)
#     #         #print('len(labels_): ',len(labels_), 'len(scores_): ',len(scores_), 'len(bboxes_): ',len(bboxes_))
#     #         # print('num_bbox: ',num_bbox)
#     #         #if num_bbox != 0:
#     #         labels_total.append(labels_)
#     #         scores_total.append(scores_)
#     #         bboxes_total.append(bboxes_)
#     #             # for i in range(num_bbox):
#     #             #     labels_total.append(labels_[i])
#     #             #     scores_total.append(scores_[i])
#     #             #     bboxes_total.append(bboxes_[i])
#     #     print('num of  bbo:',len(bboxes_total))
#     #     # # labels_ =
#     #     labels_merge, scores_merge, bboxes_merge = PSP.merge_label(labels_total, scores_total, bboxes_total, site, [500, 500],im.shape)
#     #     # print('labels_merge: ',labels_merge)
#     #     # print('len(labels_merge): ',len(labels_merge))
#     #     # num = 0
#     #     # for t in range(len(bboxes_total)):
#     #     #     num = num + len(bboxes_total[t])
#     #     # temp_bbox_total = np.zeros(shape=(num, 4))
#     #     #
#     #     # num = 0
#     #     # for t in range(len(bboxes_total)):
#     #     #     for j in range(len(bboxes_total[t])):
#     #     #        temp_bbox_total[num] = bboxes_total[t][j]
#     #     #        num = num + 1
#     #     #
#     #     # labels_merge, scores_merge = [], []
#     #     # for t in range(len(labels_total)):
#     #     #     for j in range(len(labels_total[t])):
#     #     #         labels_merge.append(labels_total[t][j])
#     #     #
#     #     # for t in range(len(scores_total)):
#     #     #     for j in range(len(scores_total[t])):
#     #     #         scores_merge.append(scores_total[t][j])
#     #     #
#     #     # bboxes_merge = temp_bbox_total
#     #     # labels_merge = np.array(labels_merge)
#     #     # scores_merge = np.array(scores_merge)
#     #     # im = cv2.imread('/kuafugeo/zmy/Faster-RCNN-TensorFlow-Python3/data/0.png')
#     #     result_img = draw_toolbox.bboxes_draw_on_img(im, labels_merge, scores_merge, bboxes_merge)
#     #
#     #     label_id_dict = draw_toolbox.gain_translate_table()
#     #     # labels_merge = labels_merge.tolist()
#     #     result_num = {}
#     #     for i in range(len(labels_merge)):
#     #         temp = labels_merge[i]
#     #         if temp not in result_num.keys():
#     #             result_num[temp] = 1
#     #         else:
#     #             result_num[temp] = result_num[temp] + 1
#     #     class_num = ''
#     #     for key, value in result_num.items():
#     #         if key in label_id_dict.keys():
#     #             class_num = class_num + label_id_dict[key] + "_" + str(value)
#     #     end_time = time.time()
#     #
#     #     time_image = end_time - start_time
#     #
#     #     cv2.imwrite(out_path + im_name[:-4] + '_' + class_num +  '_totalnums_' + str(len(labels_merge))
#     #                 + '_'  + '%.2f' % time_image+ '.png', result_img)
#     #     #print(PSP)
#     #     # #portion = os.path.splitext(im_name)
#     #     # new_file_path = '/kuafugeo/zmy/Faster-RCNN-TensorFlow-Python3/data/new_image/'
#     #     # plt.savefig(new_file_path + im_name, dpi=300)
#     #
#     # print('---------------------------------------------------------')
#     #
#     #
#     # #plt.show()
コード例 #4
0
ファイル: ron_eval.py プロジェクト: zhjpqq/RON_Tensorflow
def main(_):
    if not FLAGS.dataset_dir:
        raise ValueError(
            'You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        # Get the RON network and its anchors.
        ron_class = nets_factory.get_network(FLAGS.model_name)
        ron_params = ron_class.default_params._replace(
            num_classes=FLAGS.num_classes)
        ron_net = ron_class(ron_params)
        ron_shape = ron_net.params.img_shape
        ron_anchors = ron_net.anchors(ron_shape)
        # Get for RON network: image, labels, bboxes.
        # (ymin, xmin, ymax, xmax) fro gbboxes

        image_input = tf.placeholder(tf.int32, shape=(None, None, 3))
        shape_input = tf.placeholder(tf.int32, shape=(2, ))
        glabels_input = tf.placeholder(tf.int32, shape=(None, ))
        gbboxes_input = tf.placeholder(tf.float32, shape=(None, 4))

        # Select the preprocessing function.
        preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            preprocessing_name, is_training=False)

        # Pre-processing image, labels and bboxes.
        image, glabels, gbboxes, bbox_img = image_preprocessing_fn(
            image_input,
            glabels_input,
            gbboxes_input,
            out_shape=ron_shape,
            data_format=DATA_FORMAT)

        #### DEBUG ####
        #image = tf.Print(image, [shape, glabels, gbboxes], message='after preprocess: ', summarize=20)

        # Construct RON network.
        arg_scope = ron_net.arg_scope(is_training=False,
                                      data_format=DATA_FORMAT)
        with slim.arg_scope(arg_scope):
            predictions, _, objness_pred, _, localisations, _ = ron_net.net(
                tf.expand_dims(image, axis=0), is_training=False)
            bboxes = ron_net.bboxes_decode(localisations, ron_anchors)

            flaten_scores, flaten_labels, flaten_bboxes = flaten_predict(
                predictions, objness_pred, bboxes)
            #objness_pred = tf.reduce_max(tf.cast(tf.greater(objness_pred[-1], FLAGS.objectness_thres), tf.float32))

        flaten_bboxes = tfe.bboxes.bboxes_clip(bbox_img, flaten_bboxes)
        flaten_scores, flaten_labels, flaten_bboxes = filter_boxes(
            flaten_scores, flaten_labels, flaten_bboxes, 0.03, shape_input,
            [320., 320.])

        #flaten_scores, flaten_labels, flaten_bboxes = tf_bboxes_nms_by_class(flaten_scores, flaten_labels, flaten_bboxes, nms_threshold=FLAGS.nms_threshold, keep_top_k=FLAGS.nms_topk_percls, mode = 'union')
        flaten_scores, flaten_labels, flaten_bboxes = tf_bboxes_nms(
            flaten_scores,
            flaten_labels,
            flaten_bboxes,
            nms_threshold=FLAGS.nms_threshold,
            keep_top_k=FLAGS.nms_topk,
            mode='union')

        # Resize bboxes to original image shape.
        flaten_bboxes = tfe.bboxes.bboxes_resize(bbox_img, flaten_bboxes)

        # configure model restore
        if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
            checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
        else:
            checkpoint_path = FLAGS.checkpoint_path

        tf.logging.info('Restoring model from %s. Ignoring missing vars: %s' %
                        (checkpoint_path, FLAGS.ignore_missing_vars))

        if FLAGS.moving_average_decay:
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay)
            variables_to_restore = variable_averages.variables_to_restore()
        else:
            variables_to_restore = slim.get_variables_to_restore()

        init_fn = slim.assign_from_checkpoint_fn(
            checkpoint_path,
            variables_to_restore,
            ignore_missing_vars=FLAGS.ignore_missing_vars)

        def wrapper_debug(sess):
            sess = tf_debug.LocalCLIDebugWrapperSession(
                sess, thread_name_filter="MainThread$")
            sess.add_tensor_filter("has_inf_or_nan", tf_debug.has_inf_or_nan)
            return sess

        # no need for specify local_variables_initializer and tables_initializer, Supervisor will do this via default local_init_op
        init_op = tf.group(tf.global_variables_initializer())
        # Pass the init function to the supervisor.
        # - The init function is called _after_ the variables have been initialized by running the init_op.
        # - manage summary in current process by ourselves for memory saving
        # - no need to specify global_step, supervisor will find this automately
        # - initialize order: checkpoint -> local_init_op -> init_op -> init_func
        sv = tf.train.Supervisor(logdir=FLAGS.test_dir,
                                 init_fn=init_fn,
                                 init_op=init_op,
                                 summary_op=None,
                                 save_model_secs=0)

        gpu_options = tf.GPUOptions(
            per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)
        config = tf.ConfigProto(
            log_device_placement=False,
            allow_soft_placement=True,
            intra_op_parallelism_threads=FLAGS.num_cpu_threads,
            inter_op_parallelism_threads=FLAGS.num_cpu_threads,
            gpu_options=gpu_options)

        cur_step = 0
        tf.logging.info(
            datetime.now().strftime('Evaluation Start: %Y-%m-%d %H:%M:%S'))

        detector_eval = voc_eval.DetectorEvalPascal('../PASCAL/VOC2007TEST/',
                                                    './eval_logs/',
                                                    set_type='test')
        num_images = pascalvoc_2007.SPLITS_TO_SIZES['test']

        # all detections are collected into:
        #    all_boxes[cls][image] = N x 5 array of detections in
        #    (x1, y1, x2, y2, score)
        all_boxes = [[[] for _ in range(num_images)]
                     for _ in range(len(pascalvoc_common.VOC_CLASSES) + 1)]
        output_dir = detector_eval.output_dir
        det_file = os.path.join(output_dir, 'detections.pkl')

        with sv.managed_session(config=config) as sess:
            while True:
                if sv.should_stop():
                    tf.logging.info('Supervisor emited finish!')
                    break
                if cur_step >= len(detector_eval.image_ids):
                    break
                start_time = time.time()

                input_datas = _process_image(
                    detector_eval.image_ids[cur_step][0],
                    detector_eval.image_ids[cur_step][1])
                with tf.device('/gpu:0'):
                    image_, shape_, _, _, scores_, labels_, bboxes_ = sess.run(
                        [
                            image, shape_input, glabels, gbboxes,
                            flaten_scores, flaten_labels, flaten_bboxes
                        ],
                        feed_dict={
                            image_input: input_datas[0],
                            shape_input: input_datas[1],
                            glabels_input: input_datas[2],
                            gbboxes_input: input_datas[3]
                        })
                    # print(image_)

                    # print(len(a),a[0].shape,a[1].shape,a[2].shape,a[3].shape)
                    # print(len(b),b[0].shape,b[1].shape,b[2].shape,b[3].shape)
                    # print(len(c),c[0].shape,c[1].shape,c[2].shape,c[3].shape)
                    print(scores_)
                    print(labels_)
                    print(bboxes_)
                    # print(a)
                    # print(FLAGS.objectness_thres)
                    img_to_draw = np.copy(
                        preprocessing_factory.ssd_vgg_preprocessing.
                        np_image_unwhitened(image_))
                    img_to_draw = draw_toolbox.bboxes_draw_on_img(img_to_draw,
                                                                  labels_,
                                                                  scores_,
                                                                  bboxes_,
                                                                  thickness=2)
                    imsave('./Debug/{}.jpg'.format(cur_step), img_to_draw)

                unique_labels = []
                for l in labels_:
                    if l not in unique_labels:
                        unique_labels.append(l)
                print('unique_labels:', unique_labels)
                # skip j = 0, because it's the background class
                for j in unique_labels:
                    mask = labels_ == j
                    boxes = bboxes_[mask]
                    # all detections are collected into:
                    #    all_boxes[cls][image] = N x 5 array of detections in
                    #    (x1, y1, x2, y2, score)
                    boxes[:, 0] *= shape_[0]
                    boxes[:, 2] *= shape_[0]
                    boxes[:, 1] *= shape_[1]
                    boxes[:, 3] *= shape_[1]

                    boxes[:, [0, 1]] = boxes[:, [1, 0]]
                    boxes[:, [2, 3]] = boxes[:, [3, 2]]
                    scores = scores_[mask]

                    cls_dets = np.hstack(
                        (boxes, scores[:, np.newaxis])).astype(np.float32,
                                                               copy=False)
                    print(cls_dets)
                    all_boxes[j][cur_step] = cls_dets

                time_elapsed = time.time() - start_time
                if cur_step % FLAGS.log_every_n_steps == 0:
                    tf.logging.info(
                        'Eval Speed: {:5.3f}sec/image, {}/{}'.format(
                            time_elapsed, cur_step,
                            len(detector_eval.image_ids)))

                cur_step += 1

        with open(det_file, 'wb') as f:
            pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)

        detector_eval.evaluate_detections(all_boxes)

        tf.logging.info(
            datetime.now().strftime('Evaluation Finished: %Y-%m-%d %H:%M:%S'))