Example #1
0
def testing():
    name = "SYNTHIA_test"  # "HoliCity_train" "HoliCity_valid"
    assign_global(name)
    cache_pth = f"/home/dxl/Code/PlanarReconstruction/data/SYNTHIA_test_coco_format.json"
    register_coco_instances(name=_name,
                            metadata={'thing_classes': ["P"]},
                            json_file=cache_pth,
                            image_root="/home/dxl/Data/PlaneRecover")

    cfg__ = make_cfg()

    # begin train
    trainer = DefaultTrainer(cfg__)
    # trainer.resume_or_load(resume=True)
    # trainer.train()

    # predict and visualization
    # extract_roi_feature(cfg__)
    # build_predictor_vis(cfg__)
    # evaluator_metric(cfg__)

    # evaluating validation data set
    trainer.resume_or_load(resume=True)
    evaluator, val_loader = build_evaluator(cfg__)
    inference_on_dataset(trainer.model, val_loader, evaluator)
Example #2
0
def inference(config_file, coco_to_kitti_dict):
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(config_file))
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(config_file)
    cfg.DATASETS.TRAIN = ("kitti_mots_train", )
    cfg.DATASETS.TEST = ("kitti_mots_test", )
    cfg.SOLVER.IMS_PER_BATCH = 8
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    evaluator = COCOEvaluator("kitti_mots_test",
                              cfg,
                              False,
                              output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, "kitti_mots_test")
    inference_on_dataset(trainer.model, val_loader, evaluator)

    preds = evaluator._predictions

    filtered_preds = filter_preds(preds, coco_to_kitti_dict)
    evaluator._predictions = filtered_preds

    evaluator.evaluate()
Example #3
0
def validation(
    name,
    metadata,
    json_file,
    image_root,
    output_dir,
    ckpt,
    score_thresh_test,
    output_meta_dirs,
    meta_dir_mode,
):

    register_coco_instances(
        name=name,
        metadata=metadata,
        json_file=json_file,
        image_root=image_root,
    )

    cfg__ = make_cfg()
    cfg__.OUTPUT_DIR = output_dir
    os.makedirs(cfg__.OUTPUT_DIR, exist_ok=True)

    # evaluating validation data set
    cfg__.MODEL.WEIGHTS = ckpt
    cfg__.MODEL.ROI_HEADS.SCORE_THRESH_TEST = score_thresh_test  # set the testing threshold for this model
    cfg__.DATASETS.TEST = (name, )
    predictor = DefaultPredictor(cfg__)

    evaluator = COCOEvaluator(name, cfg__, False, output_dir=output_dir)
    val_loader = build_detection_test_loader(cfg__, name)
    inference_on_dataset(predictor.model, val_loader, evaluator)
Example #4
0
 def evaluate(self, test_set, output_folder):
     self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8
     predictor = DefaultPredictor(self.cfg)
     evaluator = COCOEvaluator(
         test_set, self.cfg, False, output_dir=output_folder)
     val_loader = build_detection_test_loader(self.cfg, test_set)
     inference_on_dataset(self.trainer.model, val_loader, evaluator)
def main():
    # log
    setup_logger(output='./output/log.txt')

    # train args
    args = parse()

    # model configurations
    cfg = get_cfg()

    cfg.merge_from_file(args.config)

    register_coco_instances("val_data", {}, args.val_json_path,
                            args.val_img_dir)
    cfg.DATASETS.TRAIN = ("val_data", )
    cfg.DATASETS.TEST = ("val_data", )

    cfg.MODEL.ROI_HEADS.NUM_CLASSES = args.num_class
    cfg.MODEL.WEIGHTS = args.pretrain

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

    # train
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=True)

    # validation
    evaluator = COCOEvaluator("val_data",
                              cfg,
                              False,
                              output_dir=cfg.OUTPUT_DIR)
    val_loader = build_detection_test_loader(cfg, "val_data")
    inference_on_dataset(trainer.model, val_loader, evaluator)
def validation(cfg: DictConfig) -> None:
    """
    Transfer learning using pretrained models from detectron2 model zoo.

    :param cfg: the configuration dictionary of dataset_model.
    :type cfg: omegaconf.dictconfig.DictConfig.
    :return: None
    """
    log.info('--- Start Validation ---')
    val_dataset_dicts, val_dataset_metadata = register_custom_coco_dataset(
        cfg=cfg, process='val')
    visualizing_coco_dataset(dataset_dicts=val_dataset_dicts,
                             dataset_metadata=val_dataset_metadata,
                             num_ims=cfg.validation.show_images)
    model_cfg: CfgNode = get_model_configs(cfg=cfg, process='val')
    evaluator = COCOEvaluator(dataset_name=cfg.name + '_val',
                              cfg=model_cfg,
                              distributed=False,
                              output_dir=os.getcwd())
    val_loader = build_detection_test_loader(cfg=model_cfg,
                                             dataset_name=cfg.name + '_val')
    trainer: DefaultTrainer = DefaultTrainer(model_cfg)
    trainer.resume_or_load(resume=True)
    inference_on_dataset(model=trainer.model,
                         data_loader=val_loader,
                         evaluator=evaluator)
    log.info('--- Validation Done ---')
def test(exp, selected_dataset, theshold):
    print(f'Testing experiment: \n{exp}\n')
    cfg = get_cfg()
    cfg.merge_from_file(networks[exp['net']]['cfg'])
    cfg.DATASETS.TEST = exp['test']
    cfg.INPUT.MAX_SIZE_TEST = 2500
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
    cfg.DATALOADER.NUM_WORKERS = 2

    if 'from' in exp:
        trainFromStr = exp['from'].split('/')[-2]

        cfg.OUTPUT_DIR = f"{EXPERIMENTS_OUTPUT_PATH}/{exp['net']}[LR={exp['lr']}][{str(exp['train'])}]FROM[{trainFromStr}]"
    else:
        cfg.OUTPUT_DIR = f"{EXPERIMENTS_OUTPUT_PATH}/{exp['net']}[LR={exp['lr']}][{str(exp['train'])}]"

    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_final.pth')

    if all([ds in DATASETS_REAL or ds in DATASETS_SYTH
            for ds in exp['train']]):
        output_dir = f"{cfg.OUTPUT_DIR}/{selected_dataset}"
        model = build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=False)
        evaluator = COCOEvaluator(selected_dataset,
                                  cfg,
                                  False,
                                  output_dir=output_dir)
        val_loader = build_detection_test_loader(cfg, selected_dataset)
        inference_on_dataset(model, val_loader, evaluator)
def mAPtest(yaml, weight):
    from detectron2.engine import DefaultTrainer
    from detectron2.config import get_cfg
    cfg = get_cfg()
    cfg.merge_from_file(yaml)
    cfg.DATALOADER.NUM_WORKERS = 2
    # cfg.MODEL.WEIGHTS = "detectron2://COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/137849600/model_final_f10217.pkl"  # initialize from model zoo
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3  # 3 classes (Kong, lee, Huh)

    from detectron2.data.datasets import register_coco_instances
    register_coco_instances("Acheck_test", {}, "./Acheck_hair_test.json",
                            "./img_hair_test")

    from detectron2.data import MetadataCatalog
    MetadataCatalog.get("Acheck_test").thing_classes = ["Kong", "Lee", "Huh"]
    Acheck_metadata = MetadataCatalog.get("Acheck_test")
    from detectron2.data import DatasetCatalog
    dataset_dicts = DatasetCatalog.get("Acheck_test")
    cfg.DATASETS.TRAIN = ("Acheck_test", )

    from detectron2.engine import DefaultPredictor

    cfg.MODEL.WEIGHTS = weight
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8  # set the testing threshold for this model
    cfg.DATASETS.TEST = ("Acheck_test", )
    predictor = DefaultPredictor(cfg)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    from detectron2.evaluation import COCOEvaluator, inference_on_dataset
    from detectron2.data import build_detection_test_loader
    evaluator = COCOEvaluator("Acheck_test", cfg, False, "./output/")
    val_loader = build_detection_test_loader(cfg, "Acheck_test")
    inference_on_dataset(trainer.model, val_loader, evaluator)
Example #9
0
def main(args):
    cfg = setup(args)

    predictor = DefaultPredictor(cfg)
    evaluator = COCOEvaluator(args.dataset, cfg, False, cfg.OUTPUT_DIR)
    val_loader = build_detection_test_loader(cfg, args.dataset)
    inference_on_dataset(predictor.model, val_loader, evaluator)
def test(cfg, dataset_name):
    cfg.DATASETS.TEST = (dataset_name, )
    predictor = DefaultPredictor(cfg)
    evaluator_FLIR = FLIREvaluator(dataset_name, cfg, False, output_dir=out_folder, out_pr_name='pr_val.png')
    #DefaultTrainer.test(cfg, trainer.model, evaluators=evaluator_FLIR)
    val_loader = build_detection_test_loader(cfg, dataset_name)
    inference_on_dataset(predictor.model, val_loader, evaluator_FLIR)
def main():
    args = parser.parse_args()
    register_coco_instances(args.dataset, {}, args.label, args.file)  # training dataset
    register_coco_instances(args.test_dataset, {}, args.test_label, args.test_file)  # testing dataset

    ### set metadata
    MetadataCatalog.get(args.test_dataset).evaluator_type="coco"
    DatasetCatalog.get(args.test_dataset)

    ### cfg setting
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(args.model))  
    cfg.DATASETS.TRAIN = (args.dataset,)
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = args.num_class  # excavator, dump_truck, cement_truck
    cfg.MODEL.WEIGHTS = args.weight 
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7   # set the testing threshold for this model
    cfg.DATASETS.TEST = (args.test_dataset,)

    ### trainner setting
    trainer = DefaultTrainer(cfg) 
    trainer.resume_or_load(cfg.MODEL.WEIGHTS)

    ### evaluation setting
    evaluator = COCOEvaluator(args.test_dataset, cfg, False, output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, args.test_dataset)
    inference_on_dataset(trainer.model, val_loader, evaluator)
Example #12
0
def HolicityV2():
    name = "HoliCity_valid_scannet_pretrained"  # "HoliCity_train" "HoliCity_valid"
    assign_global(name)

    # configuration
    # -------- change with your path ----------
    gt_image = "/home/dxl/Data/LondonCity/V2/train-images"  # train-images, valid-test-images
    # gt_label = "/home/dxl/Data/LondonCity/V2/valid-test-GT"
    # name = "HoliCityV2"
    out = "data"
    # ------------------------------------------

    # filelist = V2_filelist(gt_image)
    # img_dirs = [osp.join(gt_image, path) for path in filelist]
    # gt_seglist = [osp.join(gt_label, path.replace("_imag.jpg", "_plan.png")) for path in filelist]

    cache_pth = f"{out}/HoliCityV2_valid_coco_format.json"
    # if not os.path.isfile(cache_pth):
    #     cache_pth = COCO_format(img_dirs, gt_seglist, out, f"{_name}")
    # exit()

    register_coco_instances(name=_name,
                            metadata={'thing_classes': ["P"]},
                            json_file=cache_pth,
                            image_root=gt_image)

    cfg__ = make_cfg()
    cfg__.OUTPUT_DIR = f"./output/{_name}_output"

    # begin train
    # trainer = DefaultTrainer(cfg__)
    # trainer.resume_or_load(resume=True)
    # trainer.train()

    # predict and visualization
    # extract_roi_feature(cfg__)
    # build_predictor_vis(cfg__)
    # evaluator_metric(cfg__)

    # evaluating validation data set
    # trainer.resume_or_load(resume=True)
    cfg__.MODEL.WEIGHTS = os.path.join("./output/ScanNet_train_output",
                                       "model_0099999.pth")
    cfg__.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set the testing threshold for this model
    cfg__.DATASETS.TEST = (_name, )
    metadata = MetadataCatalog.get(_name)
    dataset_dicts = coco_wrapper(
        gt_image, "/home/dxl/Data/LondonCity/V2/test-hd-sub.txt")
    # dataset_dicts = DatasetCatalog.get(_name)

    save_npz(cfg__,
             dataset_dicts=dataset_dicts,
             metadata=metadata,
             isnpz=False,
             isviz=True)
    exit()

    predictor = DefaultPredictor(cfg__)
    evaluator, val_loader = build_evaluator(cfg__)
    inference_on_dataset(predictor.model, val_loader, evaluator)
Example #13
0
def main(args):
    register_coco_instances("test", {}, args.anno_path, args.data_dir)
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml"))
    cfg.DATASETS.TEST = ("test", )
    cfg.MODEL.WEIGHTS = args.model_path
    cfg.MODEL.PIXEL_MEAN = [84.6518, 84.6518, 84.6518]
    cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[16], [32], [64], [128], [256]]
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.MAX_ITER = 10000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1

    # INPUT settings are very important !!
    cfg.INPUT.CROP.ENABLED = True
    cfg.INPUT.CROP.TYPE = "relative"
    cfg.INPUT.CROP.SIZE = [0.4, 0.4]
    cfg.INPUT.MIN_SIZE_TRAIN = (960, )  # 2400 * 0.4
    cfg.INPUT.MAX_SIZE_TRAIN = 1440
    cfg.INPUT.MIN_SIZE_TEST = 2400
    cfg.INPUT.MAX_SIZE_TEST = 3600

    predictor = DefaultPredictor(cfg)

    evaluator = COCOEvaluator("test", cfg, False, output_dir=args.output_path)
    val_loader = build_detection_test_loader(cfg, "test")
    inference_on_dataset(predictor.model, val_loader, evaluator)
def evaluate(args, mode, _appcfg):
    name = "hmd"
    subset = "val"

    if args.subset:
        subset = args.subset

    metadata = load_and_register_dataset(name, subset, _appcfg)
    dataset_name = get_dataset_name(name, subset)
    dataset_dicts = DatasetCatalog.get(dataset_name)

    conf = Config(args, config)
    cfg = conf.merge(conf.arch, conf.cfg)
    cfg.DATASETS.TEST = (dataset_name)

    cfg.OUTPUT_DIR = "/codehub/apps/detectron2/release"
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")

    _loader = build_detection_test_loader(cfg, dataset_name)
    evaluator = COCOEvaluator(dataset_name,
                              cfg,
                              False,
                              output_dir=cfg.OUTPUT_DIR)

    file_path = cfg.MODEL.WEIGHTS
    model = build_model(cfg)
    DetectionCheckpointer(model).load(file_path)

    inference_on_dataset(model, _loader, evaluator)
def test(cfg, dataset_name, file_name='FLIR_thermal_only_result.out'):    
    cfg.DATASETS.TEST = (dataset_name, )
    predictor = DefaultPredictor(cfg)
    out_name = out_folder + file_name    
    evaluator_FLIR = FLIREvaluator(dataset_name, cfg, False, output_dir=out_folder, save_eval=True, out_eval_path=out_name)
    val_loader = build_detection_test_loader(cfg, dataset_name)
    inference_on_dataset(predictor.model, val_loader, evaluator_FLIR)
def test_during_train(trainer, dataset_name):
    cfg.DATASETS.TEST = (dataset_name, )
    evaluator_FLIR = FLIREvaluator(dataset_name,
                                   cfg,
                                   False,
                                   output_dir=out_folder,
                                   out_pr_name='pr_val.png')
    val_loader = build_detection_test_loader(cfg, dataset_name)
    inference_on_dataset(trainer.model, val_loader, evaluator_FLIR)
def evaluation(output_path,
               configuation,
               eval_dataset="val_kitti-mots",
               eval_concept=("segm", )):
    #EVALUATION
    evaluator = COCOEvaluator(eval_dataset,
                              eval_concept,
                              False,
                              output_dir=output_path)
    val_loader = build_detection_test_loader(configuration, "val_kitti-mots")
    inference_on_dataset(predictor.model, val_loader, evaluator)
Example #18
0
def evaluate(args, mode, _appcfg):
    name = "hmd"

    #uncomment if using trainer.model
    # for subset in ["train", "val"]:
    #     metadata = load_and_register_dataset(name, subset, _appcfg)

    subset = "test"
    # subset = "val"
    metadata = load_and_register_dataset(name, subset, _appcfg)

    dataset_name = get_dataset_name(name, subset)

    dataset_dicts = DatasetCatalog.get(dataset_name)

    cfg = config.get_cfg()
    cfg.merge_from_file(
        "/codehub/external/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )
    cfg.DATASETS.TRAIN = ("hmd_train", "hmd_val")
    cfg.DATASETS.TEST = (dataset_name)

    cfg.OUTPUT_DIR = "/codehub/apps/detectron2/release"
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")

    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025

    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7

    # mapper = DatasetMapper(cfg, False)
    _loader = build_detection_test_loader(cfg, dataset_name)
    evaluator = COCOEvaluator(dataset_name,
                              cfg,
                              False,
                              output_dir=cfg.OUTPUT_DIR)

    # trainer = DefaultTrainer(cfg)
    # trainer.resume_or_load(resume=True)
    # model = trainer.model

    # predictor = DefaultPredictor(cfg)
    # model = predictor.model

    file_path = cfg.MODEL.WEIGHTS
    model = build_model(cfg)
    DetectionCheckpointer(model).load(file_path)

    inference_on_dataset(model, _loader, evaluator)
Example #19
0
def main(argv):

    setup_logger()

    finetune = False
    trainPath = "./data/train/images/"
    trainjsonPath = "./data/train/train.json"
    testPath = "./data/val/images/"
    testjsonPath = "./data/val/val.json"

    opts, args = getopt.getopt(argv, "hf:", ["finetune="])

    for opt, arg in opts:
        if opt == '-f':
            if arg == 'True':
                finetune = True

    register_coco_instances("trainSet", {}, trainjsonPath, trainPath)
    register_coco_instances("testSet", {}, testjsonPath, testPath)

    cfg = get_cfg()

    # cfg.MODEL.DEVICE = 'cpu'
    
    cfg.OUTPUT_DIR = './output'
    cfg.merge_from_file("./detectron2/configs/COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml")
    cfg.DATASETS.TRAIN = ("trainSet",)
    cfg.DATASETS.TEST = ("testSet",)
    cfg.DATALOADER.NUM_WORKERS = 6
    cfg.SOLVER.CHECKPOINT_PERIOD = 500
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 1e-3
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3

    if finetune:
        cfg.SOLVER.MAX_ITER = 190500
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_0005499.pth")
    else:
        cfg.SOLVER.MAX_ITER = 500
        cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x/139173657/model_final_68b088.pkl"
    
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

    for i in range(16):
        trainer = DefaultTrainer(cfg)
        trainer.resume_or_load(resume=True)
        trainer.train()
        evaluator = COCOEvaluator("testSet", cfg, False, output_dir = cfg.OUTPUT_DIR)
        val_loader = build_detection_test_loader(cfg, "testSet")
        inference_on_dataset(trainer.model, val_loader, evaluator)
        
        cfg.SOLVER.MAX_ITER += 500
Example #20
0
def train(config_file, image_path, annot_file, out_filename="results.txt"):
    train_split = lambda: get_AICity_dataset(
        image_path, annot_file, mode='first', is_train=True)
    test_split = lambda: get_AICity_dataset(
        image_path, annot_file, mode='first')

    DatasetCatalog.clear()
    DatasetCatalog.register("ai_city_train", train_split)
    MetadataCatalog.get('ai_city_train').set(
        thing_classes=[k for k in thing_classes.keys()])

    DatasetCatalog.register("ai_city_test", test_split)
    MetadataCatalog.get('ai_city_test').set(
        thing_classes=[k for k in thing_classes.keys()])

    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(config_file))
    cfg.DATASETS.TRAIN = ("ai_city_train", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 8
    cfg.SOLVER.BASE_LR = 0.0001
    cfg.SOLVER.MAX_ITER = 100
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
    cfg.MODEL.RETINANET.NUM_CLASSES = 1

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    evaluator = COCOEvaluator("ai_city_test",
                              cfg,
                              False,
                              output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, "ai_city_test")
    inference_on_dataset(trainer.model, val_loader, evaluator)

    ims_from_test = [annot['file_name'] for annot in test_split()]
    det_bb = inference(config_file,
                       ims_from_test,
                       weight="./output/model_final.pth",
                       save_results=True,
                       out_filename=out_filename)

    return det_bb
Example #21
0
def test_during_train(cfg, dataset_name, save_eval_name, save_folder):
    cfg.DATASETS.TEST = (dataset_name, )
    trainer = DefaultTrainer(cfg)
    #predictor = DefaultPredictor(cfg)
    #evaluator_FLIR = FLIREvaluator(dataset_name, cfg, False, output_dir=out_folder, out_pr_name='pr_val.png')
    evaluator_FLIR = FLIREvaluator(dataset_name,
                                   cfg,
                                   False,
                                   output_dir=save_folder,
                                   save_eval=True,
                                   out_eval_path=(save_folder +
                                                  save_eval_name))
    #DefaultTrainer.test(cfg, trainer.model, evaluators=evaluator_FLIR)
    val_loader = build_detection_test_loader(cfg, dataset_name)
    inference_on_dataset(trainer.model, val_loader, evaluator_FLIR)
def model_configuration(model_url, learning_rate, max_iter):

    model_a = os.path.join("COCO-InstanceSegmentation", model_url)
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_a))
    cfg.DATASETS.TRAIN = ("kitti-mots", )
    cfg.DATASETS.TEST = ()
    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_X_101_32x8d_FPN_3x.yaml"
    )  # Let training initialize from model zoo
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = learning_rate
    cfg.SOLVER.MAX_ITER = max_iter
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
    # cfg.INPUT.MASK_FORMAT = 'rle'
    # cfg.INPUT.MASK_FORMAT='bitmask'

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    print('Training...')
    trainer.train()

    # EVALUATION
    print('Evaluating...')
    evaluator = COCOEvaluator("kitti-mots", cfg, False, output_dir="./Search/")
    val_loader = build_detection_test_loader(cfg, "kitti-mots")
    results = inference_on_dataset(trainer.model, val_loader, evaluator)
    print(results)

    return results
Example #23
0
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        mapper = None if cfg.INPUT.TEST_INPUT_TYPE == 'default' else \
            DatasetMapper(
                cfg, False, augmentations=build_custom_augmentation(cfg, False))
        data_loader = build_detection_test_loader(cfg,
                                                  dataset_name,
                                                  mapper=mapper)
        output_folder = os.path.join(cfg.OUTPUT_DIR,
                                     "inference_{}".format(dataset_name))
        evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type

        if evaluator_type == "lvis":
            evaluator = LVISEvaluator(dataset_name, cfg, True, output_folder)
        elif evaluator_type == 'coco':
            evaluator = COCOEvaluator(dataset_name, cfg, True, output_folder)
        else:
            assert 0, evaluator_type

        results[dataset_name] = inference_on_dataset(model, data_loader,
                                                     evaluator)
        if comm.is_main_process():
            logger.info("Evaluation results for {} in csv format:".format(
                dataset_name))
            print_csv_format(results[dataset_name])
    if len(results) == 1:
        results = list(results.values())[0]
    return results
def Predict():
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    # register test dataset
    register_coco_instances("custom", {}, "datasets/testdata/midv500_coco.json", "datasets/testdata/")
    custom_metadata = MetadataCatalog.get("custom")
    dataset_dicts = DatasetCatalog.get("custom")

    # set cfg
    cfg = get_cfg()
    cfg.merge_from_file("configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml")
    cfg.DATASETS.TEST = ("custom", )
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = (512)  
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1 
    predictor = DefaultPredictor(cfg)

    # save prediction image results
    cnt=0
    for d in dataset_dicts:
        img = cv2.imread(d["file_name"])
        outputs = predictor(img)
        v = Visualizer(img[:, :, ::-1], metadata=custom_metadata, scale=1, instance_mode=ColorMode.IMAGE_BW)
        v = v.draw_instance_predictions(outputs["instances"].to("cpu"))
        cv2.imwrite('D:/eagletmp/detectron2-maskrcnn/outputimg/'+str(cnt)+'.png',v.get_image()[:, :, ::-1])
        cnt+=1

    # model evaulation
    evaluator = COCOEvaluator("custom", cfg, False, output_dir="./output/")
    val_loader = build_detection_test_loader(cfg, "custom")
    print(inference_on_dataset(predictor.model, val_loader, evaluator))
def do_test(cfg, model):
    dataset_name = cfg.DATASETS.TEST[0]
    evaluator = DatasetEvaluators(
        [COCOEvaluator(dataset_name, cfg, True, cfg.OUTPUT_DIR)])
    data_loader = build_detection_test_loader(cfg, dataset_name)
    results = inference_on_dataset(model, data_loader, evaluator)
    return results
def do_test(cfg, model):
    if "evaluator" in cfg.dataloader:
        ret = inference_on_dataset(
            model, instantiate(cfg.dataloader.test), instantiate(cfg.dataloader.evaluator)
        )
        print_csv_format(ret)
        return ret
def best_inference(best_model_name, config_dir, model_dir, test_path_images, \
    test_annotations, confidence_threshold, result_metrics_dir, \
    video_img_dir, video, framerate):
    #Storing test data
    test_dataset_metadata, test_dataset_dicts = setup_data(test_path_images, \
        test_annotations)
    #Creating config file
    cfg = setup_config(config_dir, model_dir, best_model_name, \
        confidence_threshold)
    #Creating predictor
    predictor = DefaultPredictor(cfg)
    #Creating evaluator
    evaluator = COCOEvaluator("test_detector", \
        distributed = False, output_dir=os.path.join(model_dir, "final_test"))
    #Building test loader
    test_loader = build_detection_test_loader(cfg, "test_detector")
    #Loading in train
    trainer = COCOFormatTrainer(cfg)
    #Getting inference results on trainer
    test_results = inference_on_dataset(trainer.model, test_loader, evaluator)
    #Dumping into json file
    with open(os.path.join(result_metrics_dir, 'test_results.json'), 'w') as \
        outfile:
        json.dump(dict(test_results), outfile)
    #Generating video from predictions
    create_video(video_img_dir, video, test_loader, \
        test_dataset_metadata, test_dataset_dicts, predictor, framerate)
Example #28
0
def do_test(cfg, model):
    results = OrderedDict()
    for dataset_name in cfg.DATASETS.TEST:
        data_loader = build_detection_test_loader(cfg, dataset_name)
        evaluator = BasicEvalOperations(dataset_name, cfg, True, os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name))
        results[dataset_name] = inference_on_dataset(model, data_loader, evaluator)
        if comm.is_main_process():
            logger.info(f"Image level evaluation complete for {dataset_name}")
            logger.info(f"Results for {dataset_name}")
            WIC.only_mAP_analysis(results[dataset_name]['predictions']['correct'],
                                  results[dataset_name]['predictions']['scores'],
                                  results[dataset_name]['predictions']['pred_classes'],
                                  results[dataset_name]['category_counts'],
                                  evaluator._coco_api.cats)
    if comm.is_main_process():
        logger.info(f"Combined results for datasets {', '.join(cfg.DATASETS.TEST)}")
        eval_info={}
        eval_info['category_counts'] = results[list(results.keys())[0]]['category_counts']
        eval_info['predictions']={}
        for dataset_name in results:
            for k in results[dataset_name]['predictions'].keys():
                if k not in eval_info['predictions']:
                    eval_info['predictions'][k]=[]
                eval_info['predictions'][k].extend(results[dataset_name]['predictions'][k])
        WIC.only_mAP_analysis(eval_info['predictions']['correct'],
                              eval_info['predictions']['scores'],
                              eval_info['predictions']['pred_classes'],
                              eval_info['category_counts'])
        Recalls_to_process = (0.1, 0.3, 0.5)
        wilderness = torch.arange(0, 5, 0.1).tolist()
        WIC_values,wilderness_processed = WIC.WIC_analysis(eval_info,Recalls_to_process=Recalls_to_process,wilderness=wilderness)
        plot_WIC(WIC_values, Recalls_to_process, wilderness_processed, line_style='--', label='Faster RCNN')
    return
Example #29
0
def test_model(path,
               model,
               weights,
               dataset,
               action_type='test',
               mode="full",
               visualize=False):
    dataset_name = os.path.basename(path)
    test = bottle_loader.register_dataset(path, dataset_name, action_type,
                                          mode)
    bottle_loader.register_dataset(path, dataset, 'train', mode)
    cfg_test = gen_cfg_test(dataset, model, dataset_name)
    cfg = gen_cfg_train(model, weights, dataset)
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.9
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    evaluator = COCOEvaluator("%s_%s" % (dataset_name, action_type),
                              cfg_test,
                              False,
                              output_dir="./output_%s/" % (dataset))
    val_loader = build_detection_test_loader(cfg_test,
                                             "%s_%s" % (dataset, 'train'))
    result = inference_on_dataset(trainer.model, val_loader, evaluator)

    #Visualize the test
    if visualize:
        visualize_images_dict(
            dataset_name, test,
            MetadataCatalog.get('%s_%s' % (dataset, 'train')), cfg,
            dataset_name)
    return result
def lslm_evaluation(cfg, output_dir=None):
    # 创建模型
    model = build_model(cfg)
    # 创建数据器
    print(cfg.DATASETS.TEST[0])
    evaluator_type = MetadataCatalog.get(cfg.DATASETS.TEST[0]).evaluator_type
    lslm_evaluation_data_loader = build_detection_test_loader(
        cfg, cfg.DATASETS.TEST[0])
    # for idx, inputs in enumerate(lslm_evaluation_data_loader):
    #     model.eval()
    #     outputs = model(inputs)
    # print("outputs: ", outputs)
    # 创建评估器
    if output_dir is None:
        print(cfg.OUTPUT_DIR)
        output_dir = os.path.join(cfg.OUTPUT_DIR, "inference")
    lslm_coco_evaluator = COCOEvaluator(cfg.DATASETS.TEST[0],
                                        cfg,
                                        True,
                                        output_dir=output_dir)
    # 进行数据评估
    evaluation_result = inference_on_dataset(model,
                                             lslm_evaluation_data_loader,
                                             lslm_coco_evaluator)
    # 下面这种做法并没有什么用
    # evaluation_result = inference_on_dataset(model, lslm_evaluation_data_loader,
    #                                          DatasetEvaluators([lslm_coco_evaluator]))

    return evaluation_result