def evaluate_one_model(mm, data, config):
    gt_prob = config['gt_prob']
    result = {'TP': 0, 'TN': 0, 'FP': 0, 'FN': 0}
    n_examples_for_test = len(data)
    logger.info('Evaluating %d examples in total...' % n_examples_for_test)

    def update_dict(target, to_update):
        for key in to_update:
            target[key] += to_update[key]

    def inference_bbox(mm, image_):
        return mm.inference(image_, ['bbox'])[0]

    iou = []
    for i, (image, label, bbox_gt) in enumerate(data):
        bbox_pred = inference_bbox(mm, image[None])[0]
        bbox_crf_result_i = crf.crf_from_bbox(image, bbox_pred, gt_prob)
        result_i = my_utils.count_many(bbox_crf_result_i, label)
        update_dict(result, result_i)

        iou_i = my_utils.calc_bbox_iou(bbox_pred, bbox_gt)
        iou.append(iou_i)

    result.update(my_utils.metric_many_from_counter(result))
    result['mIoU'] = np.mean(iou)
    return result
def aggregate_result(results):
    final_result = {'TP': 0, 'TN': 0, 'FP': 0, 'FN': 0}

    def update_dict(target, to_update):
        for key in target:
            target[key] += to_update[key]

    for result in results:
        update_dict(final_result, result)

    final_result.update(my_utils.metric_many_from_counter(final_result))
    logger.info('Final result:\n%s' % json.dumps(final_result, indent=2))
def eval_one_fold(fold, ckpt_path, out_path, ignore_iou=None):
    if ignore_iou:
        logger.warning('Will ignore images with IoU small than %.3f' % ignore_iou)
    config = my_utils.load_config()
    net = RestoredModel(ckpt_path)
    dermquest = inputs.load_raw_data('dermquest', config)
    # train_data = inputs.get_kth_fold(dermquest, fold, config['n_folds'], seed=config['split_seed'])
    test_data = inputs.get_kth_fold(dermquest, fold, config['n_folds'], seed=config['split_seed'], type_='test')
    with net.graph.as_default() as g:
        result = {
            'TP': 0,
            'TN': 0,
            'FP': 0,
            'FN': 0
        }

        def update_dict(target, to_update):
            for key in to_update:
                target[key] += to_update[key]
        with tf.Session(graph=g, config=tf.ConfigProto(device_count={'GPU': 0})):
            counter = 0
            for i, base in enumerate(test_data.listing):
                image, label, bbox_gt = inputs.load_one_example(base, highest_to=800)
                result_i, _ = evaluation.inference_with_restored_model(net, image, label,
                                                                       bbox_gt=bbox_gt,
                                                                       verbose=False,
                                                                       times=3,
                                                                       gt_prob=0.51)
                if ignore_iou and _['IoU'] < ignore_iou:
                    counter += 1
                    print(i, base, '---->')
                    continue
                update_dict(result, result_i)
                result_i.update(my_utils.metric_many_from_counter(result_i))
            result.update(my_utils.metric_many_from_counter(result))
            logger.warning('%d of the images are ignored' % counter)
            logger.info(result)
    my_utils.dump_obj(out_path, result)
    logger.info('Result saved at %s' % out_path)
def test_one_model(model, listing, config, eval_one_func):
    result = {'TP': 0, 'TN': 0, 'FP': 0, 'FN': 0}

    def update_dict(target, to_update):
        for key in to_update:
            target[key] += to_update[key]

    for base in listing:
        image, label, bbox = inputs.load_one_example(base,
                                                     size=config['input_size'])
        result_i = eval_one_func(model, image, label)
        update_dict(result, result_i)
    result = my_utils.metric_many_from_counter(result)
    return result