Пример #1
0
def inference(config_file, coco_to_kitti_dict):
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(config_file))
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file)
    cfg.DATASETS.TRAIN = ("kitti_mots_train", )
    cfg.DATASETS.TEST = ("kitti_mots_test", )
    cfg.SOLVER.IMS_PER_BATCH = 8
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    evaluator = COCOEvaluator("kitti_mots_test",
                              cfg,
                              False,
                              output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, "kitti_mots_test")
    inference_on_dataset(trainer.model, val_loader, evaluator)

    preds = evaluator._predictions

    filtered_preds = filter_preds(preds, coco_to_kitti_dict)
    evaluator._predictions = filtered_preds

    evaluator.evaluate()
Пример #2
0
 def test_unknown_category(self):
     dataset = "coco_2017_val_100"
     evaluator = COCOEvaluator(dataset)
     evaluator.reset()
     inputs = DatasetCatalog.get(dataset)[:2]
     pred = Instances((100, 100))
     pred.pred_boxes = Boxes(torch.rand(2, 4))
     pred.scores = torch.rand(2)
     pred.pred_classes = torch.tensor([10, 80])
     output = {"instances": pred}
     evaluator.process(inputs, [output, output])
     with self.assertRaises(AssertionError):
         evaluator.evaluate()
Пример #3
0
def KITTIMOTS_evaluation_task(model_name, model_file):
    path = os.path.join(SAVE_PATH, 'eval_inf_task', model_name)
    if not os.path.exists(path):
        os.makedirs(path)
    # Load Data
    print('Loading Data.')
    dataloader = KITTIMOTS_Dataloader()

    def kitti_test():
        return dataloader.get_dicts(train_flag=False)

    DatasetCatalog.register("KITTIMOTS_test", kitti_test)
    MetadataCatalog.get("KITTIMOTS_test").set(
        thing_classes=list(KITTI_CATEGORIES.keys()))

    # Load MODEL and Configuration
    print('Loading Model.')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    predictor = DefaultPredictor(cfg)

    predictions_path = os.path.join(SAVE_PATH, "predictions.pkl")
    if (os.path.exists(predictions_path)):
        predictions = pickle.load(open(predictions_path, "rb"))
    else:
        print('Using Model to predict on input')
        predictions = []
        for i, input_test in enumerate(kitti_test()):
            img_path = input_test['file_name']
            img = cv2.imread(img_path)
            prediction = predictor(img)
            predictions.append(prediction)
        pickle.dump(predictions, open(predictions_path, "wb"))

    print('Predictions length ' + str(len(predictions)))
    print('Inputs length ' + str(len(kitti_test())))

    # Evaluation
    print('Evaluating......')
    evaluator = COCOEvaluator('KITTIMOTS_test',
                              cfg,
                              False,
                              output_dir="./output/")
    evaluator.reset()
    evaluator.process(kitti_test(), predictions)
    evaluator.evaluate()
Пример #4
0
def coco_evaluation(cfg, dataset_name):
    predictor = DefaultPredictor(cfg)

    dataset_name_with_prefix = get_name_with_prefix(dataset_name, cfg.DATASETS.USE_DIRECTION_CLASSES)
    evaluator = COCOEvaluator(dataset_name_with_prefix, cfg, distributed=False, output_dir=cfg.OUTPUT_DIR)

    predictions = [predictor(img) for img in load_all_image_in_dataset(dataset_name, cfg)]

    dataset = get_dataset(dataset_name, cfg)

    evaluator.reset()
    evaluator.process(dataset, predictions)

    return evaluator.evaluate()
Пример #5
0
def inference(config_file, correspondences):

    # test_set = 'kitti_mots_test'
    test_set = 'mots_challenge_train'

    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(config_file))
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file)
    cfg.DATASETS.TRAIN = (test_set, )
    cfg.DATASETS.TEST = (test_set, )
    cfg.SOLVER.IMS_PER_BATCH = 16
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
    cfg.OUTPUT_DIR = "../week4/output/r50_fpn_cityscapes/"
    cfg.MODEL.WEIGHTS = "../week4/output/r50_fpn_cityscapes/model_final.pth"

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=True)

    evaluator = COCOEvaluator(test_set, cfg, False, output_dir="./output/")
    print(evaluator._metadata.get("thing_classes"))
    val_loader = build_detection_test_loader(cfg, test_set)
    inference_on_dataset(trainer.model, val_loader, evaluator)

    preds = evaluator._predictions

    filtered_preds = filter_preds(preds, correspondences)
    evaluator._predictions = filtered_preds

    evaluator.evaluate()

    predictor = DefaultPredictor(cfg)
    motschallenge = DatasetCatalog.get(test_set)
    show_results(cfg, motschallenge, predictor)
Пример #6
0
# Evaluating the prediction:

# Setting up the COCO evaluation:
coco_evaluator = COCOEvaluator(dataset_name, cfg, distributed=True, output_dir=output_folder_path)

# Setting up the PASCAL VOC evaluation:
my_dataset.dirname = os.path.join(output_folder_path, "PascalVOCAnnotations")
my_dataset.split = 'test'
my_dataset.year = 2012
dataset.to_pascal(my_dataset.dirname) # Converting the dataset to PASCAL VOC
pascal_evaluator = PascalVOCDetectionEvaluator(dataset_name)

# COCO evaluating:
coco_evaluator.reset()
coco_evaluator.process(images, outputs_list)
coco_results = coco_evaluator.evaluate()

# Dumping the COCO evaluation results:
coco_results_file_path = os.path.join(output_folder_path, "coco_eval_results.json")
with open(coco_results_file_path, "w") as coco_results_file:
        json.dump(coco_results, coco_results_file, indent=2)

print("COCO EVALUATION FINISHED")

# PASCAL VOC evaluating:
pascal_evaluator.reset()
pascal_evaluator.process(images, outputs_list)
pascal_results = pascal_evaluator.evaluate()

# Dumping the PASCAL VOC evaluation results:
pascal_results_file_path = os.path.join(output_folder_path, "pascal_eval_results.json")
Пример #7
0
    ]
    data_loader = build_detection_test_loader(cfg, "my_dataset")

    # Define evaluator
    evaluator = COCOEvaluator("my_dataset", ("bbox", ),
                              False,
                              output_dir=args.output)

    # Evaluation
    with torch.no_grad():

        def get_all_inputs_outputs():
            image_id = 0
            for data in data_loader:
                img = read_image(data[0]["file_name"], format="BGR")
                pred = model(img)
                out = []
                out.append(pred)
                image_id = image_id + 1
                yield data, out

        evaluator.reset()
        for inputs, outputs in get_all_inputs_outputs():
            evaluator.process(inputs, outputs)
        eval_results = evaluator.evaluate(
        )  # Compare preditions to annotations
        print(eval_results)
        results_file = args.output + 'eval_results.txt'
        with open(results_file, 'a', encoding='utf-8') as f:
            f.write(str(eval_results))