Esempio n. 1
0
def get_result(result_path, img_id_file_path):
    anno_json = os.path.join(config.coco_root,
                             config.instances_set.format(config.val_data_type))
    files = os.listdir(img_id_file_path)
    pred_data = []

    for file in files:
        img_ids_name = file.split('.')[0]
        img_id = int(np.squeeze(img_ids_name))
        img_size = get_imgSize(os.path.join(img_id_file_path, file))
        image_shape = np.array([img_size[1], img_size[0]])

        result_path_0 = os.path.join(result_path, img_ids_name + "_0.bin")
        result_path_1 = os.path.join(result_path, img_ids_name + "_1.bin")

        boxes = np.fromfile(result_path_0, dtype=np.float32).reshape(51150, 4)
        box_scores = np.fromfile(result_path_1,
                                 dtype=np.float32).reshape(51150, 81)

        pred_data.append({
            "boxes": boxes,
            "box_scores": box_scores,
            "img_id": img_id,
            "image_shape": image_shape
        })
    mAP = metrics(pred_data, anno_json)
    print(f" mAP:{mAP}")
Esempio n. 2
0
def get_result(result_path, img_id_file_path):
    anno_json = os.path.join(config.coco_root,
                             config.instances_set.format(config.val_data_type))

    if args.drop:
        from pycocotools.coco import COCO
        train_cls = config.classes
        train_cls_dict = {}
        for i, cls in enumerate(train_cls):
            train_cls_dict[cls] = i
        coco = COCO(anno_json)
        classs_dict = {}
        cat_ids = coco.loadCats(coco.getCatIds())
        for cat in cat_ids:
            classs_dict[cat["id"]] = cat["name"]

    files = os.listdir(img_id_file_path)
    pred_data = []

    for file in files:
        img_ids_name = file.split('.')[0]
        img_id = int(np.squeeze(img_ids_name))
        if args.drop:
            anno_ids = coco.getAnnIds(imgIds=img_id, iscrowd=None)
            anno = coco.loadAnns(anno_ids)
            annos = []
            iscrowd = False
            for label in anno:
                bbox = label["bbox"]
                class_name = classs_dict[label["category_id"]]
                iscrowd = iscrowd or label["iscrowd"]
                if class_name in train_cls:
                    x_min, x_max = bbox[0], bbox[0] + bbox[2]
                    y_min, y_max = bbox[1], bbox[1] + bbox[3]
                    annos.append(
                        list(map(round, [y_min, x_min, y_max, x_max])) +
                        [train_cls_dict[class_name]])
            if iscrowd or (not annos):
                continue

        img_size = get_imgSize(os.path.join(img_id_file_path, file))
        image_shape = np.array([img_size[1], img_size[0]])
        result_path_0 = os.path.join(result_path, img_ids_name + "_0.bin")
        result_path_1 = os.path.join(result_path, img_ids_name + "_1.bin")

        boxes = np.fromfile(result_path_0, dtype=np.float32).reshape(51150, 4)
        box_scores = np.fromfile(result_path_1,
                                 dtype=np.float32).reshape(51150, 81)

        pred_data.append({
            "boxes": boxes,
            "box_scores": box_scores,
            "img_id": img_id,
            "image_shape": image_shape
        })
    mAP = metrics(pred_data, anno_json)
    print(f" mAP:{mAP}")
Esempio n. 3
0
def ssd_eval(dataset_path, ckpt_path, anno_json):
    """SSD evaluation."""
    batch_size = 1
    ds = create_ssd_dataset(dataset_path,
                            batch_size=batch_size,
                            repeat_num=1,
                            is_training=False,
                            use_multiprocessing=False)
    if config.model == "ssd300":
        net = SSD300(ssd_mobilenet_v2(), config, is_training=False)
    elif config.model == "ssd_vgg16":
        net = ssd_vgg16(config=config)
    elif config.model == "ssd_mobilenet_v1_fpn":
        net = ssd_mobilenet_v1_fpn(config=config)
    elif config.model == "ssd_resnet50_fpn":
        net = ssd_resnet50_fpn(config=config)
    else:
        raise ValueError(f'config.model: {config.model} is not supported')
    net = SsdInferWithDecoder(net, Tensor(default_boxes), config)

    print("Load Checkpoint!")
    param_dict = load_checkpoint(ckpt_path)
    net.init_parameters_data()
    load_param_into_net(net, param_dict)

    net.set_train(False)
    i = batch_size
    total = ds.get_dataset_size() * batch_size
    start = time.time()
    pred_data = []
    print("\n========================================\n")
    print("total images num: ", total)
    print("Processing, please wait a moment.")
    for data in ds.create_dict_iterator(output_numpy=True, num_epochs=1):
        img_id = data['img_id']
        img_np = data['image']
        image_shape = data['image_shape']

        output = net(Tensor(img_np))
        for batch_idx in range(img_np.shape[0]):
            pred_data.append({
                "boxes": output[0].asnumpy()[batch_idx],
                "box_scores": output[1].asnumpy()[batch_idx],
                "img_id": int(np.squeeze(img_id[batch_idx])),
                "image_shape": image_shape[batch_idx]
            })
        percent = round(i / total * 100., 2)

        print(f'    {str(percent)} [{i}/{total}]', end='\r')
        i += batch_size
    cost_time = int((time.time() - start) * 1000)
    print(f'    100% [{total}/{total}] cost {cost_time} ms')
    mAP = metrics(pred_data, anno_json)
    print("\n========================================\n")
    print(f"mAP: {mAP}")