Пример #1
0
def task_b_MOTS_training(model_name, model_file):
    #model_name = model_name + '_inference'
    print('Running task B for model', model_name)

    SAVE_PATH = os.path.join('./results_week_5_task_b', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('MOTS_train',)
    cfg.DATASETS.TEST = ('KITTIMOTS_val',)
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = 200
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5

    # Training
    print('Training')
    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Evaluation
    print('Evaluating')
    evaluator = COCOEvaluator('KITTIMOTS_val', cfg, False, output_dir=SAVE_PATH)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    print('Plotting losses')
    draw_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, SAVE_PATH)

    # Qualitative results: visualize some results
    print('Getting qualitative results')
    predictor = DefaultPredictor(cfg)
    predictor.model.load_state_dict(trainer.model.state_dict())
    inputs = kitti_val()
    #inputs = inputs[:20] + inputs[-20:]
    inputs = inputs[220:233] + inputs[1995:2100]
    for i, input in enumerate(inputs):
        file_name = input['file_name']
        print('Prediction on image ' + file_name)
        img = cv2.imread(file_name)
        outputs = predictor(img)
        v = Visualizer(
            img[:, :, ::-1],
            metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
            scale=0.8,
            instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
Пример #2
0
def task_b(model_name, model_file):
    save_path = Path("output/task_b") / model_name
    os.makedirs(save_path, exist_ok=True)

    cfg = base_cfg(model_file, save_path)
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = 1000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5

    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    evaluator = COCOEvaluator("kitti-mots-val",
                              cfg,
                              False,
                              output_dir=save_path)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, save_path)

    get_qualitative_results(cfg, save_path)
Пример #3
0
def task_a(model_name, model_file, checkpoint=None, evaluate=True, visualize=True):
    print('Running task A for model', model_name)
    if checkpoint:
        SAVE_PATH = os.path.join('./results_week_5_task_a', model_name + '_wCheckpoint')
    else:
        SAVE_PATH = os.path.join('./results_week_5_task_a', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Loading data
    print('Loading data')
    dataloader = MOTS_Dataloader(dataset='motschallenge')
    def mots_train(): return dataloader.get_dicts(train_flag=True)
    def mots_val(): return dataloader.get_dicts(train_flag=False)
    DatasetCatalog.register('MOTS_train', mots_train)
    MetadataCatalog.get('MOTS_train').set(thing_classes=list(MOTS_CATEGORIES.keys()))
    DatasetCatalog.register('MOTS_val', mots_val)
    MetadataCatalog.get('MOTS_val').set(thing_classes=list(MOTS_CATEGORIES.keys()))

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    model_training_metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0]) # Store current model training metadata
    cfg.DATASETS.TRAIN = ('MOTS_train', )
    cfg.DATASETS.TEST = ('MOTS_val', )
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    if checkpoint:
        print('Using Checkpoint')
        cfg.MODEL.WEIGHTS = checkpoint
        cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    else:
        cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    
    if evaluate:
        model = build_model(cfg)
        DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

        # Evaluation
        print('Evaluating')
        evaluator = COCOEvaluator('MOTS_val', cfg, False, output_dir=SAVE_PATH)
        trainer = DefaultTrainer(cfg)
        trainer.test(cfg, model, evaluators=[evaluator])

    if visualize:
        # Qualitative results: visualize some results
        print('Getting qualitative results')
        predictor = DefaultPredictor(cfg)
        inputs = mots_val()
        inputs = inputs[:20] + inputs[-20:]
        for i, input in enumerate(inputs):
            img = cv2.imread(input['file_name'])
            outputs = predictor(img)
            v = Visualizer(
                img[:, :, ::-1],
                metadata=model_training_metadata,
                scale=0.8,
                instance_mode=ColorMode.IMAGE)
            v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
            cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
Пример #4
0
def task_a_no_KITTI_training(model_name,
                             model_file,
                             evaluate=True,
                             visualize=True):
    print('Running task A for model', model_name)

    SAVE_PATH = os.path.join('./results_week_5_task_a', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    model_training_metadata = MetadataCatalog.get(
        cfg.DATASETS.TRAIN[0])  # Store current model training metadata
    cfg.DATASETS.TRAIN = ('KITTIMOTS_train', )
    cfg.DATASETS.TEST = ('MOTS_train', )
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)

    if evaluate:
        model = build_model(cfg)
        DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

        # Evaluation
        print('Evaluating')
        evaluator = COCOEvaluator('MOTS_train',
                                  cfg,
                                  False,
                                  output_dir=SAVE_PATH)
        trainer = DefaultTrainer(cfg)
        trainer.test(cfg, model, evaluators=[evaluator])

    if visualize:
        # Qualitative results: visualize some results
        print('Getting qualitative results')
        predictor = DefaultPredictor(cfg)
        inputs = mots_train()
        inputs = inputs[:20] + inputs[-20:]
        for i, input in enumerate(inputs):
            img = cv2.imread(input['file_name'])
            outputs = predictor(img)
            v = Visualizer(img[:, :, ::-1],
                           metadata=model_training_metadata,
                           scale=0.8,
                           instance_mode=ColorMode.IMAGE)
            v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
            cv2.imwrite(
                os.path.join(
                    SAVE_PATH,
                    'Inference_' + model_name + '_inf_' + str(i) + '.png'),
                v.get_image()[:, :, ::-1])
Пример #5
0
def train(output, iou=None, nms=None, rpn=None):
    batch_size = 2
    DatasetCatalog.clear()
    register_kitti_mots_dataset("datasets/KITTI-MOTS/training/image_02",
                                "datasets/KITTI-MOTS/instances_txt",
                                ("kitti_mots_train", "kitti_mots_test"),
                                image_extension="png")
    cfg_file = "Cityscapes/mask_rcnn_R_50_FPN.yaml"
    output_dir = output

    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(cfg_file))
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(cfg_file)
    cfg.SEED = 42

    cfg.DATASETS.TRAIN = ("kitti_mots_train",)
    cfg.DATASETS.TEST = ("kitti_mots_test",)
    cfg.DATALOADER.NUM_WORKERS = 4

    cfg.SOLVER.IMS_PER_BATCH = batch_size
    cfg.SOLVER.BASE_LR = 0.0002 * batch_size / 16  # pick a good LR
    cfg.SOLVER.MAX_ITER = 7500
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = output_dir

    if iou is not None:
        cfg.MODEL.RPN.IOU_THRESHOLDS = [iou[0], iou[1]]

    if nms is not None:
        cfg.MODEL.RPN.NMS_THRESH = nms

    if rpn is not None:
        cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN = rpn[0]
        cfg.MODEL.RPN.PRE_NMS_TOPK_TEST = rpn[1]

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=True)
    trainer.train()
    evaluator = COCOEvaluator("kitti_mots_test", cfg, False, output_dir=output_dir)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    plot_losses(cfg)
Пример #6
0
def task_a(model_name, model_file):

    save_path = Path("output/task_a") / model_name
    os.makedirs(save_path, exist_ok=True)
    cfg = base_cfg(model_file, save_path)

    model = build_model(cfg)
    DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

    evaluator = COCOEvaluator("kitti-mots-val",
                              cfg,
                              False,
                              output_dir="./output")
    trainer = DefaultTrainer(cfg)
    trainer.test(cfg, model, evaluators=[evaluator])

    get_qualitative_results(cfg, save_path)
Пример #7
0
def main(args):
    # if args.unitest:
    #     return unitest()
    cfg = setup(args)

    for d in ["train", 'val']:
        # train for 6998images , val for 1199 images
        DatasetCatalog.register("chefCap_" + d,
                                lambda d=d: get_chefcap_image_dicts())
        MetadataCatalog.get("chefCap_" + d).set(
            thing_classes=list(things_class_dict.keys()))
        if d == 'val':
            MetadataCatalog.get("chefCap_val").evaluator_type = "pascal_voc"
            MetadataCatalog.get("chefCap_val").year = 2012
            MetadataCatalog.get(
                "chefCap_val"
            ).dirname = "/opt/work/chefCap/detectron2_fasterrcnn/data"

    # for d in ["/opt/work/chefCap/data/ziped/Making-PascalVOC-export/"]:
    #     DatasetCatalog.register("chefCap_val",
    #                             lambda d=d: get_chefcap_image_dicts(d))
    # MetadataCatalog.get("chefCap_val").set(
    #     thing_classes=['face-head', 'mask-head', 'face-cap', 'mask-cap'])
    # MetadataCatalog.get("chefCap_val").evaluator_type = "pascal_voc"
    # MetadataCatalog.get("chefCap_val").dirname = "/opt/work/chefCap/data/ziped/Making-PascalVOC-export/"
    # MetadataCatalog.get("chefCap_val").year = 2012
    if args.eval_only:
        model = DefaultTrainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = DefaultTrainer.test(cfg, model)
        if cfg.TEST.AUG.ENABLED:
            res.update(DefaultTrainer.test_with_TTA(cfg, model))
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    # if cfg.TEST.AUG.ENABLED:
    #     trainer.register_hooks(
    #         [hooks.EvalHook(0, lambda: trainer.test_with_TTA(cfg, trainer.model))]
    #     )
    return trainer.train()
Пример #8
0
def main(args):

    cfg = setup(args)
    show = True

    register_openlogo(cfg.DATASETS.TRAIN[0], "datasets/data/openlogo",
                      "trainval", "supervised_imageset")
    register_openlogo(cfg.DATASETS.TEST[0], "datasets/data/openlogo", "test",
                      "supervised_imageset")
    trainer = DefaultTrainer(cfg)

    evaluator = OpenLogoDetectionEvaluator(cfg.DATASETS.TEST[0])

    if args.eval_only:

        model = trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)

        if show:
            visualize(cfg, amount=20)

        res = trainer.test(cfg, model, evaluators=[evaluator])

        if comm.is_main_process():
            verify_results(cfg, res)
        if cfg.TEST.AUG.ENABLED:
            res.update(trainer.test_with_TTA(cfg, model))

        return res

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=args.resume)

    if cfg.TEST.AUG.ENABLED:
        trainer.register_hooks([
            hooks.EvalHook(0,
                           lambda: trainer.test_with_TTA(cfg, trainer.model))
        ])

    return trainer.train()
Пример #9
0
def evaluate_on_dataset(
        config_file="../../configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml",
        override_cfg=(),
        test_datasets=(),
):
    if override_cfg is None:
        override_cfg = []
    cfg = get_cfg()
    cfg.merge_from_file(config_file)
    cfg.merge_from_list(override_cfg)
    cfg.DATASETS.TEST = test_datasets
    model = build_model(cfg)

    checkpointer = Checkpointer(model)
    checkpointer.load(cfg.MODEL.WEIGHTS)

    evaluator = [
        COCOEvaluator(test_set, cfg, False) for test_set in test_datasets
    ]

    metrics = DefaultTrainer.test(cfg, model, evaluator)

    return metrics
Пример #10
0
def KITTIMOTS_training_and_evaluation_task(model_name, model_file):
    path = os.path.join(SAVE_PATH, 'train_task', model_name)
    if not os.path.exists(path):
        os.makedirs(path)
    # Load Data
    print('Loading Data.')
    dataloader = KITTIMOTS_Dataloader()

    def kittimots_train():
        return dataloader.get_dicts(train_flag=True)

    def kittimots_test():
        return dataloader.get_dicts(train_flag=False)

    DatasetCatalog.register("KITTIMOTS_train", kittimots_train)
    MetadataCatalog.get("KITTIMOTS_train").set(
        thing_classes=list(KITTI_CATEGORIES.keys()))
    DatasetCatalog.register("KITTIMOTS_test", kittimots_test)
    MetadataCatalog.get("KITTIMOTS_test").set(
        thing_classes=list(KITTI_CATEGORIES.keys()))

    NUM_IMGS = len(kittimots_train())
    print(NUM_IMGS)

    # PARAMETERS
    print('Loading Model.')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('KITTIMOTS_train', )
    cfg.DATASETS.TEST = ('KITTIMOTS_test', )
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = NUM_IMGS // cfg.SOLVER.IMS_PER_BATCH + 1
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2

    # Training
    print('Training....')
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    # EVALUATION
    print('Evaluating....')
    evaluator = COCOEvaluator("KITTIMOTS_test",
                              cfg,
                              False,
                              output_dir="./output/")
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    plot_validation_loss(cfg)

    # Qualitative results
    print('Inference on trained model')
    predictor = DefaultPredictor(cfg)
    predictor.model.load_state_dict(trainer.model.state_dict())
    dataloader = Inference_Dataloader()
    dataset = dataloader.load_data()
    print('Getting Qualitative Results...')
    for i, img_path in enumerate(dataset['test'][:20]):
        img = cv2.imread(img_path)
        outputs = predictor(img)
        v = Visualizer(img[:, :, ::-1],
                       metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
                       scale=0.8,
                       instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(
            os.path.join(
                path,
                'Inference_' + model_name + '_trained_' + str(i) + '.png'),
            v.get_image()[:, :, ::-1])
Пример #11
0
def experiment_1(exp_name, model_file):

    print('Running Task B experiment', exp_name)
    SAVE_PATH = os.path.join('./results_week_6_task_b', exp_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Loading data
    print('Loading data')
    kittiloader = KittiMots()

    def rkitti_train():
        return kittiloader.get_dicts(flag='train',
                                     method='complete',
                                     percentage=1.0)

    def rkitti_val():
        return kittiloader.get_dicts(flag='val')

    def rkitti_test():
        return kittiloader.get_dicts(flag='test')

    DatasetCatalog.register('KITTI_train', rkitti_train)
    MetadataCatalog.get('KITTI_train').set(
        thing_classes=list(KITTI_CATEGORIES.keys()))
    DatasetCatalog.register('KITTI_val', rkitti_val)
    MetadataCatalog.get('KITTI_val').set(
        thing_classes=list(KITTI_CATEGORIES.keys()))
    DatasetCatalog.register('KITTI_test', rkitti_test)
    MetadataCatalog.get('KITTI_test').set(
        thing_classes=list(KITTI_CATEGORIES.keys()))

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('KITTI_train', )
    cfg.DATASETS.TEST = ('KITTI_val', )
    cfg.DATALOADER.NUM_WORKERS = 4
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = 4000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5

    # Training
    print('Training')
    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Evaluation
    print('Evaluating')
    cfg.DATASETS.TEST = ('KITTI_test', )
    evaluator = COCOEvaluator('KITTI_test', cfg, False, output_dir=SAVE_PATH)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    print('Plotting losses')
    plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, exp_name, SAVE_PATH,
                         'validation_loss.png')

    # Qualitative results: visualize some results
    print('Getting qualitative results')
    predictor = DefaultPredictor(cfg)
    predictor.model.load_state_dict(trainer.model.state_dict())
    inputs = rkitti_test()
    inputs = [inputs[i] for i in TEST_INFERENCE_VALUES]
    for i, input in enumerate(inputs):
        file_name = input['file_name']
        print('Prediction on image ' + file_name)
        img = cv2.imread(file_name)
        outputs = predictor(img)
        v = Visualizer(img[:, :, ::-1],
                       metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
                       scale=0.8,
                       instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(
            os.path.join(SAVE_PATH,
                         'Inference_' + exp_name + '_inf_' + str(i) + '.png'),
            v.get_image()[:, :, ::-1])
Пример #12
0
cfg.DATASETS.TEST = ()
cfg.DATALOADER.NUM_WORKERS = 4
# Let training initialize from model zoo
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
    'COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml')
cfg.SOLVER.IMS_PER_BATCH = 4
cfg.SOLVER.BASE_LR = 0.00025  # pick a good LR
cfg.SOLVER.MAX_ITER = 1000
# 300 iterations seems good enough for this toy dataset; you will need to train longer for a practical dataset
cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset (default: 512)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 20

os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()

# Inference should use the config with parameters that are used in training
# cfg now already contains everything we've set previously. We changed it a little bit for inference:
cfg.MODEL.WEIGHTS = os.path.join(
    cfg.OUTPUT_DIR, 'model_final.pth')  # path to the model we just trained
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set a custom testing threshold
predictor = DefaultPredictor(cfg)

evaluator = COCOEvaluator('my_dataset_val', ('segm', ),
                          False,
                          output_dir='./output/')
val_loader = build_detection_test_loader(cfg, 'my_dataset_val')
print(inference_on_dataset(trainer.model, val_loader, evaluator))
print(trainer.test(cfg, build_model(cfg)))
Пример #13
0
def main(args):
    # Register datasets
    print("Registering wheat_detection_train")
    DatasetCatalog.register(
        "wheat_detection_train",
        lambda path=args.train_annot_fp: get_detectron_dicts(path))
    MetadataCatalog.get("wheat_detection_train").set(thing_classes=["Wheat"])

    print("Registering wheat_detection_val")
    DatasetCatalog.register(
        "wheat_detection_val",
        lambda path=args.val_annot_fp: get_detectron_dicts(path))
    MetadataCatalog.get("wheat_detection_val").set(thing_classes=["Wheat"])

    # Set up configurations
    cfg = get_cfg()
    if not args.model_dir:
        cfg.merge_from_file(
            model_zoo.get_config_file(f"COCO-Detection/{args.model}.yaml"))
        cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
            f"COCO-Detection/{args.model}.yaml")
        cfg.DATASETS.TRAIN = ("wheat_detection_train", )
        cfg.DATASETS.TEST = ("wheat_detection_val", )

        cfg.SOLVER.IMS_PER_BATCH = args.ims_per_batch
        cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
        cfg.SOLVER.BASE_LR = args.lr
        cfg.SOLVER.MAX_ITER = args.max_iter
        cfg.SOLVER.WARMUP_ITERS = args.warmup_iters
        cfg.SOLVER.GAMMA = args.gamma
        cfg.SOLVER.STEPS = args.lr_decay_steps

        cfg.DATALOADER.NUM_WORKERS = 6
        cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1
        cfg.MODEL.RETINANET.NUM_CLASSES = 1
        cfg.OUTPUT_DIR = f"{args.model}__iter-{args.max_iter}__lr-{args.lr}"
        if os.path.exists(cfg.OUTPUT_DIR): shutil.rmtree(cfg.OUTPUT_DIR)
        os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

        # Save config
        with open(os.path.join(cfg.OUTPUT_DIR, "config.yaml"), "w") as f:
            f.write(cfg.dump())
    else:
        print("Loading model from ", args.model_dir)
        cfg.merge_from_file(os.path.join(args.model_dir, "config.yaml"))
        cfg.MODEL.WEIGHTS = os.path.join(args.model_dir, "model_final.pth")
        cfg.OUTPUT_DIR = args.model_dir

    # Train
    setup_logger(output=os.path.join(cfg.OUTPUT_DIR, "terminal_output.log"))
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    if args.train:
        trainer.train()

    # Evaluate
    if args.eval:
        evaluator = COCOEvaluator("wheat_detection_val",
                                  cfg,
                                  False,
                                  output_dir=cfg.OUTPUT_DIR)
        eval_results = trainer.test(cfg=cfg,
                                    model=trainer.model,
                                    evaluators=evaluator)
        with open(os.path.join(cfg.OUTPUT_DIR, "eval_results.json"), "w") as f:
            json.dump(eval_results, f)
    def train(self, training_frames, train_method, network, gtruth_config):

        if (training_frames <= 0):
            raise ValueError(
                "The number of input frames must be bigger than 0")

        self.cfg.OUTPUT_DIR = (
            f'../datasets/detectron2/{network}_{train_method}')

        self.generate_datasets(training_frames, train_method, gtruth_config)

        retinanet_path = "COCO-Detection/retinanet_R_101_FPN_3x.yaml"
        faster_rcnn_path = "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"
        if (detectron2.__version__ == "0.1"):
            if network == 'faster_rcnn':
                self.cfg.merge_from_file(
                    pkg_resources.resource_filename(
                        "detectron2.model_zoo",
                        os.path.join("configs", faster_rcnn_path)))
                self.cfg.MODEL.WEIGHTS = model_zoo.ModelZooUrls.get(
                    faster_rcnn_path)
            if network == 'retinanet':
                self.cfg.merge_from_file(
                    pkg_resources.resource_filename(
                        "detectron2.model_zoo",
                        os.path.join("configs", retinanet_path)))
                self.cfg.MODEL.WEIGHTS = model_zoo.ModelZooUrls.get(
                    retinanet_path)
        else:
            if network == 'faster_rcnn':
                self.cfg.merge_from_file(
                    model_zoo.get_config_file(faster_rcnn_path))
                self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
                    faster_rcnn_path)
            if network == 'retinanet':
                self.cfg.merge_from_file(
                    model_zoo.get_config_file(retinanet_path))
                self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
                    retinanet_path)

        self.cfg.DATASETS.TRAIN = ('train_set', )
        self.cfg.DATASETS.TEST = ('val_set', )
        self.cfg.DATALOADER.NUM_WORKERS = 1
        self.cfg.SOLVER.IMS_PER_BATCH = 1
        self.cfg.SOLVER.BASE_LR = 0.001
        self.cfg.SOLVER.MAX_ITER = 1000
        self.cfg.SOLVER.STEPS = (500, 1000)
        self.cfg.MODEL.ROI_HEADS.NUM_CLASSES = 1

        if not os.path.isfile(
                os.path.join(self.cfg.OUTPUT_DIR, 'model_final.pth')):

            os.makedirs(self.cfg.OUTPUT_DIR, exist_ok=True)

            trainer = DefaultTrainer(self.cfg)
            trainer.resume_or_load(resume=False)
            trainer.train()

            # evaluator = COCOEvaluator("val_set", self.cfg, False, output_dir=self.cfg.OUTPUT_DIR)
            # trainer.test(self.cfg, trainer.model, evaluators=[evaluator])
            trainer.test(self.cfg, trainer.model)
Пример #15
0
def training_loop(SAVE_PATH, model_name, model_file, hyperparams, dataloader, checkpoint=None, visualize=True):
    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('MOTS_train', )
    cfg.DATASETS.TEST = ('KITTIMOTS_val', )
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    if checkpoint:
        last_checkpoint = torch.load(checkpoint)
        new_path = checkpoint.split('.')[0]+'_modified.pth'
        last_checkpoint['iteration'] = -1
        torch.save(last_checkpoint,new_path)
        cfg.MODEL.WEIGHTS = new_path
    else:
        cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = hyperparams['batch']
    cfg.SOLVER.BASE_LR = hyperparams['lr']
    cfg.SOLVER.LR_SCHEDULER_NAME = hyperparams['scheduler']
    cfg.MODEL.RPN.IOU_THRESHOLDS = hyperparams['iou']
    cfg.MODEL.RPN.PRE_NMS_TOPK_TRAIN = hyperparams['top_k_train']
    cfg.SOLVER.MAX_ITER = 1000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5

    # Training
    print('Training')
    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Evaluation
    print('Evaluating')
    evaluator = COCOEvaluator('KITTIMOTS_val', cfg, False, output_dir=SAVE_PATH)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    print('Plotting losses')
    plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, SAVE_PATH)

    if visualize:
        # Qualitative results: visualize some results
        print('Getting qualitative results')
        predictor = DefaultPredictor(cfg)
        predictor.model.load_state_dict(trainer.model.state_dict())
        def kitti_val(): return dataloader.get_dicts(train_flag=False)
        inputs = kitti_val()
        inputs = inputs[:20] + inputs[-20:]
        for i, input in enumerate(inputs):
            file_name = input['file_name']
            print('Prediction on image ' + file_name)
            img = cv2.imread(file_name)
            outputs = predictor(img)
            v = Visualizer(
                img[:, :, ::-1],
                metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
                scale=0.8,
                instance_mode=ColorMode.IMAGE)
            v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
            cv2.imwrite(os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
def main():
    print("Starting! \n")
    print("Model: ", MODEL, "\n")
    print("LR: ", LR, "\n")
    print("MAX_ITER: ", MAX_ITER, "\n")
    print("THRESHOLD: ", THRESHOLD, "\n")

    if TRAIN:
        PATH = './output_kittimots_' + MODEL_NAME
    else:
        PATH = './output_kittimots_' + MODEL_NAME + '/TESTING'

    cfg = get_cfg()
    cfg.OUTPUT_DIR = PATH
    cfg.merge_from_file(model_zoo.get_config_file(MODEL))

    if not TRAIN:
        metadata = MetadataCatalog.get(cfg.DATASETS.TRAIN[0])
        thing_classes = metadata.thing_classes
        print(thing_classes)
        del metadata  # We don't need it anymore

    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(MODEL)
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = LR
    cfg.SOLVER.STEPS = (1000, MAX_ITER)
    cfg.INPUT.MAX_SIZE_TRAIN = 1242
    cfg.INPUT.MAX_SIZE_TEST = 1242
    cfg.INPUT.MIN_SIZE_TRAIN = (375, )
    cfg.INPUT.MIN_SIZE_TEST = 375
    cfg.SOLVER.MAX_ITER = MAX_ITER
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

    # Finish configuration depending on the procedure we are performing
    if TRAIN:
        datasets = [
            'kitti-mots-dataset-train', 'kitti-mots-dataset-validation'
        ]
        cfg.DATASETS.TRAIN = ('kitti-mots-dataset-train', )
    else:
        datasets = ['kitti-mots-dataset-validation']
        cfg.DATASETS.TRAIN = ('kitti-mots-dataset-validation', )
    cfg.DATASETS.TEST = ("kitti-mots-dataset-validation", )

    # Register the datasets
    if not REGISTERED:
        for d in datasets:
            DatasetCatalog.register(d, lambda d=d: kitti_mots_dataset(d))
            if TRAIN:
                MetadataCatalog.get(d).set(thing_classes=["Car", "Pedestrian"])
                cfg.MODEL.ROI_HEADS.NUM_CLASSES = 2
            else:
                MetadataCatalog.get(d).set(thing_classes=thing_classes)
                cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(thing_classes)

    # If training is done, get the TRAINing dataset metadata.
    """if TRAIN:
        kitti_mots_metadata_train = MetadataCatalog.get("kitti-mots-dataset_train")
    kitti_mots_metadata_validation = MetadataCatalog.get("kitti-mots-dataset-validation")"""

    # Set-up trainer
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    # Train if wanted
    if TRAIN:
        print('Start training')
        trainer.train()

    # Set the dataset to test and the threshold
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = THRESHOLD
    evaluator = COCOEvaluator("kitti-mots-dataset-validation",
                              cfg,
                              False,
                              output_dir=PATH)

    # Update weights if the model has been trained
    if TRAIN:
        cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")
        # Start evaluation
        trainer.test(cfg, trainer.model, evaluators=[evaluator])
    else:
        val_loader = build_detection_test_loader(
            cfg, "kitti-mots-dataset-validation")
        inference_on_dataset(trainer.model, val_loader, evaluator)

    print("Finishing! \n\n\n\n")
Пример #17
0
from detectron2.engine import DefaultTrainer
from detectron2.evaluation import COCOEvaluator
from detectron2 import model_zoo

from utils import get_my_cfg
from visdrone import register_one_set

import os

if __name__ == '__main__':
    train_dataset = "VisDrone2019-DET-train"
    val_dataset = "VisDrone2019-DET-val"
    register_one_set(train_dataset)
    register_one_set(val_dataset)

    cfg = get_my_cfg()
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml")
    cfg.DATASETS.TRAIN = (train_dataset, )
    cfg.DATASETS.TEST = (val_dataset, )

    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()

    evaluator = COCOEvaluator(val_dataset, ("bbox", ),
                              False,
                              output_dir=os.path.join("output", "evaluate"))
    trainer.test(cfg, trainer.model, evaluator)
Пример #18
0
def train_task(model_name, model_file):
    path = os.path.join(SAVE_PATH, 'train_task', model_name)
    if not os.path.exists(path):
        os.makedirs(path)
    # Load Data
    print('Loading Data.')
    dataloader = KITTI_Dataloader()
    def kitti_train(): return dataloader.get_dicts(train_flag=True)
    def kitti_test(): return dataloader.get_dicts(train_flag=False)
    DatasetCatalog.register("KITTI_train", kitti_train)
    MetadataCatalog.get("KITTI_train").set(thing_classes=[k for k,_ in CATEGORIES.items()])
    DatasetCatalog.register("KITTI_test", kitti_test)
    MetadataCatalog.get("KITTI_test").set(thing_classes=[k for k,_ in CATEGORIES.items()])

    # Load MODEL and configure train hyperparameters
    print('Loading Model.')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('KITTI_train',)
    cfg.DATASETS.TEST = ('KITTI_test',)
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.MAX_ITER = NUM_IMGS // cfg.SOLVER.IMS_PER_BATCH + 1 
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 9

    # TRAIN!!
    print('Training.......')
    os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    trainer = DefaultTrainer(cfg) 
    trainer.resume_or_load(resume=False)
    trainer.train()
    print('Training Done.')

    # EVAL
    print('Evaluating......')
    cfg.TEST.KEYPOINT_OKS_SIGMAS
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, 'model_final.pth')
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    predictor = DefaultPredictor(cfg)
    dataset_dicts = kitti_test()
    for i,d in enumerate(random.sample(dataset_dicts, 5)):    
        im = cv2.imread(d['file_name'])
        outputs = predictor(im)
        v = Visualizer(im[:, :, ::-1],
                   metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
                   scale=0.8, 
                   instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(os.path.join(path, 'Evaluation_' + model_name + '_trained_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
    print('COCO EVALUATOR....')
    evaluator = COCOEvaluator('KITTI_test', cfg, False, output_dir="./output/")
    trainer.test(cfg, trainer.model, evaluators=[evaluator])

    # Loading training and test examples
    inference_dataloader = Inference_Dataloader(MIT_DATA_DIR)
    inference_dataset = inference_dataloader.load_data()

    # Qualitative results: visualize some prediction results on MIT_split dataset
    for i, img_path in enumerate([i for i in inference_dataset['test'] if 'inside_city' in i][:20]):
        img = cv2.imread(img_path)
        outputs = predictor(img)
        v = Visualizer(
            img[:, :, ::-1],
            metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),

            scale=0.8, 
            instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(os.path.join(path, 'Inference_' + model_name + '_trained_' + str(i) + '.png'), v.get_image()[:, :, ::-1])
    
    """
    val_loader = build_detection_test_loader(cfg, 'KITTI_test')
    inference_on_dataset(trainer.model, val_loader, evaluator)
    """
    print('DONE!!')
Пример #19
0
def task_b_MOTS_and_KITTI_training(model_name, model_file):
    # model_name = model_name + '_inference'
    print('Running task B for model', model_name)

    SAVE_PATH = os.path.join('./results_week_5_task_c', model_name)
    os.makedirs(SAVE_PATH, exist_ok=True)

    # Load model and configuration
    print('Loading Model')
    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(model_file))
    cfg.DATASETS.TRAIN = ('MOTS_KITTI_train', )
    cfg.DATASETS.TEST = ('KITTIMOTS_val', )
    cfg.DATALOADER.NUM_WORKERS = 0
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5
    cfg.OUTPUT_DIR = SAVE_PATH
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(model_file)
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.00025
    cfg.SOLVER.LR_SCHEDULER_NAME = "WarmupCosineLR"
    #hyperparameters
    #cfg.SOLVER.LR_POLICY = 'steps_with_decay'
    #cfg.SOLVER.STEPS = [0, 1000, 2000]
    #cfg.SOLVER.GAMMA = 0.1
    #cfg.DATASETS.TRAIN.USE_FLIPPED = True #Eeste no va
    #cfg.MODEL.RPN.IOU_THRESHOLDS = [0.1, 0.9] #defatults 0.3 and 0.7
    #cfg.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64]]#default: [[32, 64, 128, 256, 512]]
    #cfg.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
    #End of hyperparameters playing
    cfg.SOLVER.MAX_ITER = 1000
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 256
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.TEST.SCORE_THRESH = 0.5
    print(cfg)
    # Training
    print('Training')
    trainer = DefaultTrainer(cfg)
    val_loss = ValidationLoss(cfg)
    trainer.register_hooks([val_loss])
    trainer._hooks = trainer._hooks[:-2] + trainer._hooks[-2:][::-1]
    trainer.resume_or_load(resume=False)
    trainer.train()

    # Evaluation
    print('Evaluating')
    evaluator = COCOEvaluator('KITTIMOTS_val',
                              cfg,
                              False,
                              output_dir=SAVE_PATH)
    trainer.model.load_state_dict(val_loss.weights)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])
    print('Plotting losses')
    plot_validation_loss(cfg, cfg.SOLVER.MAX_ITER, model_name, SAVE_PATH)

    # Qualitative results: visualize some results
    print('Getting qualitative results')
    predictor = DefaultPredictor(cfg)
    predictor.model.load_state_dict(trainer.model.state_dict())
    inputs = kitti_val()
    # inputs = inputs[:20] + inputs[-20:]
    inputs = inputs[220:233] + inputs[1995:2100]
    for i, input in enumerate(inputs):
        file_name = input['file_name']
        print('Prediction on image ' + file_name)
        img = cv2.imread(file_name)
        outputs = predictor(img)
        v = Visualizer(img[:, :, ::-1],
                       metadata=MetadataCatalog.get(cfg.DATASETS.TRAIN[0]),
                       scale=0.8,
                       instance_mode=ColorMode.IMAGE)
        v = v.draw_instance_predictions(outputs['instances'].to('cpu'))
        cv2.imwrite(
            os.path.join(SAVE_PATH, 'Inference_' + model_name + '_inf_' +
                         str(i) + '.png'),
            v.get_image()[:, :, ::-1])
cfg.DATASETS.TRAIN = ("kitti-mots-dataset-train",)
cfg.DATASETS.TEST = ("kitti-mots-dataset-train",)
cfg.OUTPUT_DIR = './output_kittimots_all'
cfg.DATALOADER.NUM_WORKERS = 1
if model == "retinanet":
  cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/retinanet_R_101_FPN_3x.yaml") 
else:
  cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url("COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml") 
cfg.SOLVER.IMS_PER_BATCH = 1
cfg.SOLVER.BASE_LR = 0.005
cfg.SOLVER.WARMUP_ITERS = 1000
cfg.SOLVER.MAX_ITER = 3000
cfg.SOLVER.STEPS = (1000, 3000)
cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.50
# cfg.MODEL.WEIGHTS = './output_kittimots_all/model_final.pth'

# predictor = DefaultPredictor(cfg)


# os.environ["CUDA_VISIBLE_DEVICES"] = '0'
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)

trainer = DefaultTrainer(cfg)
trainer.resume_or_load(resume=False)
trainer.train()


evaluator = COCOEvaluator("kitti-mots-dataset-val", cfg, False, output_dir='./output_kittimots_all')
trainer.test(cfg, trainer.model, evaluators=[evaluator])
def main():
    pkl_file_train = open('../KITTI-MOTS/train_KITTI-MOTS_dataset_local.pkl',
                          'rb')
    pkl_file_val = open(
        '../KITTI-MOTS/validation_KITTI-MOTS_dataset_local.pkl', 'rb')

    dataset_dicts_train = pickle.load(pkl_file_train,
                                      fix_imports=True,
                                      encoding='ASCII',
                                      errors='strict')
    dataset_dicts_validation = pickle.load(pkl_file_val,
                                           fix_imports=True,
                                           encoding='ASCII',
                                           errors='strict')

    def kitti_mots_dataset(d):
        if d == "kitti-mots-dataset-train":
            return dataset_dicts_train
        else:
            return dataset_dicts_validation

    for d in ["kitti-mots-dataset-train", "kitti-mots-dataset-validation"]:
        DatasetCatalog.register(d, lambda d=d: kitti_mots_dataset(d))
        MetadataCatalog.get(d).set(thing_classes=["none", "Car", "Pedestrian"])

    kitti_mots_metadata_train = MetadataCatalog.get("kitti-mots-dataset_train")
    kitti_mots_metadata_validation = MetadataCatalog.get(
        "kitti-mots-dataset-validation")

    retinanet = False

    # Load MODEL and Configuration
    PATH = './output'
    cfg = get_cfg()

    if retinanet:
        cfg.merge_from_file(
            model_zoo.get_config_file(
                "COCO-Detection/retinanet_R_101_FPN_3x.yaml"))
        cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
            "COCO-Detection/retinanet_R_101_FPN_3x.yaml")
    else:
        cfg.merge_from_file(
            model_zoo.get_config_file(
                "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml"))
        cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
            "COCO-Detection/faster_rcnn_X_101_32x8d_FPN_3x.yaml")

    cfg.DATASETS.TRAIN = ("kitti-mots-dataset-train", )
    cfg.DATASETS.TEST = ("kitti-mots-dataset-validation", )
    cfg.DATALOADER.NUM_WORKERS = 1
    cfg.OUTPUT_DIR = PATH

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=False)

    # Evaluation
    evaluator = COCOEvaluator("kitti-mots-dataset-validation",
                              cfg,
                              False,
                              output_dir=PATH)
    trainer.test(cfg, trainer.model, evaluators=[evaluator])