Ejemplo n.º 1
0
def main(args):
    cfg = setup(args)

    if args.eval_only:
        model = Trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        # res = Trainer.test_with_TTA(cfg, model)
        Trainer.inference_with_TTA(cfg, model, args)
        return
    else:
        raise Exception("Only evaluation supported")
Ejemplo n.º 2
0
def main(args):
    cfg = setup(args)
    if args.eval_only:
        model = TrainerNoMeta.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = TrainerNoMeta.test(cfg, model)
        return res

    trainer = TrainerNoMeta(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
Ejemplo n.º 3
0
    def __init__(self, cfg):
        """
        Args:
            cfg (CfgNode):
        """
        super().__init__()
        logger = logging.getLogger("detectron2")
        if not logger.isEnabledFor(
                logging.INFO):  # setup_logger is not called for d2
            setup_logger()
        cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())

        # Assume these objects must be constructed in this order.
        model = self.build_model(cfg)
        optimizer = self.build_optimizer(cfg, model)
        data_loader = self.build_train_loader(cfg)

        # For training, wrap with DDP. But don't need this for inference.
        if comm.get_world_size() > 1:
            model = DistributedDataParallel(model,
                                            device_ids=[comm.get_local_rank()],
                                            broadcast_buffers=False)
        self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else
                         SimpleTrainer)(model, data_loader, optimizer)

        self.scheduler = self.build_lr_scheduler(cfg, optimizer)
        # Assume no other objects need to be checkpointed.
        # We can later make it checkpoint the stateful hooks
        self.checkpointer = DetectionCheckpointer(
            # Assume you want to save checkpoints together with logs/statistics
            model,
            cfg.OUTPUT_DIR,
            optimizer=optimizer,
            scheduler=self.scheduler,
        )
        self.start_iter = 0
        self.max_iter = cfg.SOLVER.MAX_ITER
        self.cfg = cfg

        self.register_hooks(self.build_hooks())
Ejemplo n.º 4
0
def evaluate(args, mode, _appcfg):
    name = "hmd"

    #uncomment if using trainer.model
    # for subset in ["train", "val"]:
    #     metadata = load_and_register_dataset(name, subset, _appcfg)

    subset = "test"
    # subset = "val"
    metadata = load_and_register_dataset(name, subset, _appcfg)

    dataset_name = get_dataset_name(name, subset)

    dataset_dicts = DatasetCatalog.get(dataset_name)

    cfg = config.get_cfg()
    cfg.merge_from_file(
        "/codehub/external/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
    )
    cfg.DATASETS.TRAIN = ("hmd_train", "hmd_val")
    cfg.DATASETS.TEST = (dataset_name)

    cfg.OUTPUT_DIR = "/codehub/apps/detectron2/release"
    cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "model_final.pth")

    cfg.DATALOADER.NUM_WORKERS = 2
    cfg.SOLVER.IMS_PER_BATCH = 2
    cfg.SOLVER.BASE_LR = 0.00025

    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128  # faster, and good enough for this toy dataset
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7

    # mapper = DatasetMapper(cfg, False)
    _loader = build_detection_test_loader(cfg, dataset_name)
    evaluator = COCOEvaluator(dataset_name,
                              cfg,
                              False,
                              output_dir=cfg.OUTPUT_DIR)

    # trainer = DefaultTrainer(cfg)
    # trainer.resume_or_load(resume=True)
    # model = trainer.model

    # predictor = DefaultPredictor(cfg)
    # model = predictor.model

    file_path = cfg.MODEL.WEIGHTS
    model = build_model(cfg)
    DetectionCheckpointer(model).load(file_path)

    inference_on_dataset(model, _loader, evaluator)
Ejemplo n.º 5
0
def load_model(config_file, model_weights, model_device):
    print('Loading cfg')
    args = [
        '--config-file',
        config_file,
        'MODEL.WEIGHTS',
        model_weights,
        'MODEL.DEVICE',
        model_device,
    ]
    args = default_argument_parser().parse_args(args)
    cfg = get_cfg()
    cfg.merge_from_file(args.config_file)
    cfg.merge_from_list(args.opts)
    cfg.freeze()
    default_setup(cfg, args)

    print('Loading model')
    model = build_model(cfg)
    checkpointer = DetectionCheckpointer(model, cfg.OUTPUT_DIR)
    checkpointer.resume_or_load(cfg.MODEL.WEIGHTS, resume=True)
    return model
Ejemplo n.º 6
0
class Tester:
    def __init__(self, cfg):
        self.cfg = cfg
        self.model = Trainer.build_model(cfg)
        self.check_pointer = DetectionCheckpointer(
            self.model, save_dir=cfg.OUTPUT_DIR
        )

        self.best_res = None
        self.best_file = None
        self.all_res = {}

    def test(self, ckpt):
        self.check_pointer._load_model(self.check_pointer._load_file(ckpt))
        print("evaluating checkpoint {}".format(ckpt))
        res = Trainer.test(self.cfg, self.model)

        if comm.is_main_process():
            verify_results(self.cfg, res)
            print(res)
            if (self.best_res is None) or (
                self.best_res is not None
                and self.best_res["bbox"]["AP"] < res["bbox"]["AP"]
            ):
                self.best_res = res
                self.best_file = ckpt
            print("best results from checkpoint {}".format(self.best_file))
            print(self.best_res)
            self.all_res["best_file"] = self.best_file
            self.all_res["best_res"] = self.best_res
            self.all_res[ckpt] = res
            os.makedirs(
                os.path.join(self.cfg.OUTPUT_DIR, "inference"), exist_ok=True
            )
            with open(
                os.path.join(self.cfg.OUTPUT_DIR, "inference", "all_res.json"),
                "w",
            ) as fp:
                json.dump(self.all_res, fp)
Ejemplo n.º 7
0
 def build_train_loader(cls, cfg: CfgNode):
     data_loader = build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True))
     if not has_inference_based_loaders(cfg):
         return data_loader
     model = cls.build_model(cfg)
     model.to(cfg.BOOTSTRAP_MODEL.DEVICE)
     DetectionCheckpointer(model).resume_or_load(cfg.BOOTSTRAP_MODEL.WEIGHTS, resume=False)
     inference_based_loaders, ratios = build_inference_based_loaders(cfg, model)
     loaders = [data_loader] + inference_based_loaders
     ratios = [1.0] + ratios
     combined_data_loader = build_combined_loader(cfg, loaders, ratios)
     sample_counting_loader = SampleCountingLoader(combined_data_loader)
     return sample_counting_loader
Ejemplo n.º 8
0
Archivo: PFPN_d2.py Proyecto: w-hc/pcv
    def __init__(self,
                 num_classes,
                 num_votes,
                 fix_bn=True,
                 freeze_at=2,
                 norm='GN',
                 fpn_norm='',
                 conv_dims=128,
                 **kwargs):  # FPN_C, backbone='resnet50'):
        super().__init__()
        d2_cfg = detectron2.config.get_cfg()
        d2_cfg.merge_from_file(
            '/home-nfs/whc/glab/detectron2/configs/COCO-PanopticSegmentation/panoptic_fpn_R_50_1x.yaml'
        )
        if fix_bn:
            d2_cfg.MODEL.RESNETS.NORM = "FrozenBN"
        else:
            d2_cfg.MODEL.RESNETS.NORM = "BN"
        d2_cfg.MODEL.BACKBONE.FREEZE_AT = freeze_at

        d2_cfg.MODEL.FPN.NORM = 'BN'
        self.backbone = build_backbone(d2_cfg)

        d2_cfg.MODEL.SEM_SEG_HEAD.NORM = norm
        d2_cfg.MODEL.SEM_SEG_HEAD.CONVS_DIM = conv_dims

        d2_cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES = num_classes
        self.sem_seg_head = build_sem_seg_head(d2_cfg,
                                               self.backbone.output_shape())
        self.sem_classifier = self.sem_seg_head.predictor
        del self.sem_seg_head.predictor

        d2_cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES = num_votes
        tmp = build_sem_seg_head(d2_cfg, self.backbone.output_shape())
        self.vote_classifier = tmp.predictor
        assert cfg.data.dataset.params.caffe_mode == True

        checkpointer = DetectionCheckpointer(self)
        checkpointer.load(d2_cfg.MODEL.WEIGHTS)
def run_train():
    torch.multiprocessing.freeze_support()

    cfg = get_cfg()
    cfg.merge_from_file(model_zoo.get_config_file('COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml'))
    # cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.75  # Threshold
    cfg.MODEL.WEIGHTS = "detectron2://COCO-Detection/faster_rcnn_R_101_FPN_3x/137851257/model_final_f6e8b1.pkl"
    cfg.MODEL.DEVICE = "cpu"  # cpu or cuda

    register_datasets()
    cfg.DATASETS.TRAIN = ('grini_nc_merged_bbox_only_train',)
    cfg.DATASETS.TEST = ('grini_nc_merged_bbox_only_val',)

    # cfg.MODEL.WEIGHTS = get_checkpoint_url('COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml')
    cfg.MODEL.DEVICE = "cpu"  # cpu or cuda

    # todo find out how rescale images and annotations first...
    # Parameters fixed
    cfg.SOLVER.IMS_PER_BATCH = 4
    cfg.SOLVER.BASE_LR = 0.001
    cfg.SOLVER.WARMUP_ITERS = 1000
    cfg.SOLVER.MAX_ITER = 1500  # adjust up if val mAP is still rising, adjust down if overfit
    cfg.SOLVER.STEPS = (1000, 1500)
    cfg.SOLVER.GAMMA = 0.05
    cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 12
    cfg.MODEL.ROI_HEADS.NUM_CLASSES = 3

    cfg.TEST.EVAL_PERIOD = 500

    # makedirs(cfg.OUTPUT_DIR, exist_ok=True)
    setup_logger(output=cfg.OUTPUT_DIR, distributed_rank=comm.get_rank(), name="my_model")

    # DetectionCheckpointer(cfg).load(file_path_or_url)  # load a file, usually from cfg.MODEL.WEIGHTS

    checkpointer = DetectionCheckpointer(build_model(cfg), save_dir=cfg.OUTPUT_DIR)
    checkpointer.save("model_faster_rcnn_unscaled")  # save to output/model_999.pth
    trainer = CocoTrainer(cfg)
    trainer.resume_or_load(resume=False)
    trainer.train()
Ejemplo n.º 10
0
def do_activation(cfg):
    if isinstance(cfg, CfgNode):
        data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
        model = build_model(cfg)
        DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
    else:
        data_loader = instantiate(cfg.dataloader.test)
        model = instantiate(cfg.model)
        model.to(cfg.train.device)
        DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
    model.eval()

    counts = Counter()
    total_activations = []
    for idx, data in zip(tqdm.trange(args.num_inputs), data_loader):  # noqa
        count = activation_count_operators(model, data)
        counts += count
        total_activations.append(sum(count.values()))
    logger.info("(Million) Activations for Each Type of Operators:\n" +
                str([(k, v / idx) for k, v in counts.items()]))
    logger.info("Total (Million) Activations: {}±{}".format(
        np.mean(total_activations), np.std(total_activations)))
def do_activation(cfg):
    data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    model = build_model(cfg)
    DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
    model.eval()

    counts = Counter()
    for idx, data in zip(tqdm.trange(args.num_inputs), data_loader):  # noqa
        counts += activation_count_operators(model, data)
    logger.info(
        "(Million) Activations for Each Type of Operators:\n"
        + str([(k, v / idx) for k, v in counts.items()])
    )
Ejemplo n.º 12
0
def main(args):
    cfg = setup(args)

    model = build_model(cfg)
    logger.info("Model:\n{}".format(model))
    if args.eval_only:
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume
        )
        return do_test(cfg, model)

    do_train(cfg, model, resume=args.resume)
    return do_test(cfg, model)
Ejemplo n.º 13
0
    def __init__(self, cfg_file, model_path, classes, confidence_thresh=0.5):
        self.cpu_device = torch.device("cpu")

        self.cfg = get_cfg()
        self.cfg.merge_from_file(cfg_file)
        self.cfg.MODEL.WEIGHTS = os.path.join(model_path, "model_final.pth")
        self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = confidence_thresh
        self.model = build_model(self.cfg)
        self.model.eval()

        MetadataCatalog.get(self.cfg.DATASETS.TEST[0]).thing_classes = classes
        self.metadata = MetadataCatalog.get(self.cfg.DATASETS.TEST[0])

        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(self.cfg.MODEL.WEIGHTS)

        self.transform_gen = T.ResizeShortestEdge(
            [self.cfg.INPUT.MIN_SIZE_TEST, self.cfg.INPUT.MIN_SIZE_TEST],
            self.cfg.INPUT.MAX_SIZE_TEST)

        self.input_format = self.cfg.INPUT.FORMAT
        assert self.input_format in ["RGB", "BGR"], self.input_format
Ejemplo n.º 14
0
def main(args):
    cfg = LazyConfig.load(args.config_file)
    cfg = LazyConfig.apply_overrides(cfg, args.opts)
    default_setup(cfg, args)

    if args.eval_only:
        model = instantiate(cfg.model)
        model.to(cfg.train.device)
        model = create_ddp_model(model)
        DetectionCheckpointer(model).load(cfg.train.init_checkpoint)
        print(do_test(cfg, model))
    else:
        do_train(args, cfg)
Ejemplo n.º 15
0
def load_model(config_file, model_path):
    cfg = get_cfg()
    add_centernet_config(cfg)
    cfg.merge_from_file(config_file)
    forward = {'centerX': centerX_forward}

    # model
    model = build_model(cfg)
    model.forward = MethodType(forward['centerX'], model)
    DetectionCheckpointer(model).load(model_path)
    model.eval()
    model.cuda()
    return model
Ejemplo n.º 16
0
def benchmark_eval(args):
    cfg = setup(args)
    if args.config_file.endswith(".yaml"):
        model = build_model(cfg)
        DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)

        cfg.defrost()
        cfg.DATALOADER.NUM_WORKERS = 0
        data_loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0])
    else:
        model = instantiate(cfg.model)
        model.to(cfg.train.device)
        DetectionCheckpointer(model).load(cfg.train.init_checkpoint)

        cfg.dataloader.num_workers = 0
        data_loader = instantiate(cfg.dataloader.test)

    model.eval()
    logger.info("Model:\n{}".format(model))
    dummy_data = DatasetFromList(list(itertools.islice(data_loader, 100)),
                                 copy=False)

    def f():
        while True:
            yield from dummy_data

    for k in range(5):  # warmup
        model(dummy_data[k])

    max_iter = 300
    timer = Timer()
    with tqdm.tqdm(total=max_iter) as pbar:
        for idx, d in enumerate(f()):
            if idx == max_iter:
                break
            model(d)
            pbar.update()
    logger.info("{} iters in {} seconds.".format(max_iter, timer.seconds()))
    def __init__(self, cfg):
        # Create common attributes.
        self.cfg = cfg.clone()  # cfg can be modified by model
        self.model = build_model(self.cfg)
        self.model_list = []

        # Parse config
        self.inference_mode = self.cfg.PROBABILISTIC_INFERENCE.INFERENCE_MODE
        self.mc_dropout_enabled = self.cfg.PROBABILISTIC_INFERENCE.MC_DROPOUT.ENABLE
        self.num_mc_dropout_runs = self.cfg.PROBABILISTIC_INFERENCE.MC_DROPOUT.NUM_RUNS

        # Set model to train for MC-Dropout runs
        if self.mc_dropout_enabled:
            self.model.train()
        else:
            self.model.eval()

        # Create ensemble if applicable.
        if self.inference_mode == 'ensembles':
            ensemble_random_seeds = self.cfg.PROBABILISTIC_INFERENCE.ENSEMBLES.RANDOM_SEED_NUMS

            for i, random_seed in enumerate(ensemble_random_seeds):
                model = build_model(self.cfg)
                model.eval()

                checkpoint_dir = os.path.join(
                    os.path.split(self.cfg.OUTPUT_DIR)[0],
                    'random_seed_' + str(random_seed))
                # Load last checkpoint.
                DetectionCheckpointer(model,
                                      save_dir=checkpoint_dir).resume_or_load(
                                          cfg.MODEL.WEIGHTS, resume=True)
                self.model_list.append(model)
        else:
            # Or Load single model last checkpoint.
            DetectionCheckpointer(self.model,
                                  save_dir=cfg.OUTPUT_DIR).resume_or_load(
                                      cfg.MODEL.WEIGHTS, resume=True)
Ejemplo n.º 18
0
    def __init__(self, cf, cf_add_d2, dataset, out):
        num_classes = len(dataset.action_names)
        TEST_DATASET_NAME = 'daly_objaction_test'

        # / Define d2 conf
        d2_output_dir = str(small.mkdir(out / 'd2_output'))
        d_cfg = set_detectron_cfg_base(d2_output_dir, num_classes, cf['seed'])
        d_cfg = set_detectron_cfg_test(d_cfg,
                                       TEST_DATASET_NAME,
                                       cf['d2_rcnn.model'],
                                       cf['d2_rcnn.conf_thresh'],
                                       cf_add_d2,
                                       freeze=False)
        d_cfg.MODEL.PROPOSAL_GENERATOR.NAME = "PrecomputedProposals"
        d_cfg.freeze()

        # / Start d2
        simple_d2_setup(d_cfg)

        # Predictor without proposal generator
        model = build_model(d_cfg)
        model.eval()
        checkpointer = DetectionCheckpointer(model)

        checkpointer.load(d_cfg.MODEL.WEIGHTS)
        MIN_SIZE_TEST = d_cfg.INPUT.MIN_SIZE_TEST
        MAX_SIZE_TEST = d_cfg.INPUT.MAX_SIZE_TEST
        transform_gen = d2_transforms.ResizeShortestEdge(
            [MIN_SIZE_TEST, MIN_SIZE_TEST], MAX_SIZE_TEST)

        # Instance monkeypatching
        # https://stackoverflow.com/questions/50599045/python-replacing-a-function-within-a-class-of-a-module/50600307#50600307
        model.forward = MethodType(genrcnn_rcnn_roiscores_forward, model)

        self.d_cfg = d_cfg
        self.rcnn_roiscores_model = model
        self.cpu_device = torch.device("cpu")
        self.transform_gen = transform_gen
Ejemplo n.º 19
0
    def __init__(self, cfg):
        """
        Args:
            cfg (CfgNode):
        """
        # Assume these objects must be constructed in this order.
        model = self.build_model(cfg)
        optimizer = self.build_optimizer(cfg, model)
        data_loader = self.build_train_loader(cfg)

        # For training, wrap with DDP. But don't need this for inference.
        if comm.get_world_size() > 1:
            model = DistributedDataParallel(model,
                                            device_ids=[comm.get_local_rank()],
                                            broadcast_buffers=False,
                                            find_unused_parameters=True)
        super().__init__(model, data_loader, optimizer)

        self.scheduler = self.build_lr_scheduler(cfg, optimizer)
        # Assume no other objects need to be checkpointed.
        # We can later make it checkpoint the stateful hooks
        checkpoint_dir = osp.join(cfg.OUTPUT_DIR, "checkpoints")
        if not osp.exists(checkpoint_dir):
            PathManager.mkdirs(checkpoint_dir)

        self.checkpointer = DetectionCheckpointer(
            # Assume you want to save checkpoits together with logs/statistics
            model,
            checkpoint_dir,
            optimizer=optimizer,
            scheduler=self.scheduler,
        )

        self.start_iter = 0
        self.max_iter = cfg.SOLVER.MAX_ITER
        self.cfg = cfg

        self.register_hooks(self.build_hooks())
def main(args):
    cfg = setup(args)
    register_lofar_datasets(cfg)

    if args.eval_only:
        model = LOFARTrainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = LOFARTrainer.test(cfg, model)
        return res

    trainer = LOFARTrainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
Ejemplo n.º 21
0
    def __init__(self, context: PyTorchTrialContext):
        self.context = context

        self.cfg = self.setup_cfg()
        model = build_model(self.cfg)

        checkpointer = DetectionCheckpointer(
            model, self.cfg.OUTPUT_DIR
        )
        checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=False)
        self.model = self.context.wrap_model(checkpointer.model)

        optimizer = build_optimizer(self.cfg, self.model)
        self.optimizer = self.context.wrap_optimizer(optimizer)

        self.scheduler = build_lr_scheduler(self.cfg, self.optimizer)
        self.scheduler = self.context.wrap_lr_scheduler(self.scheduler,LRScheduler.StepMode.STEP_EVERY_BATCH)

        self.dataset_name = self.cfg.DATASETS.TEST[0]
        self.evaluators = get_evaluator(self.cfg, self.dataset_name, self.context.get_hparam("output_dir"), self.context.get_hparam('fake_data'))
        self.val_reducer = self.context.wrap_reducer(EvaluatorReducer(self.evaluators), for_training=False)

        self.context.experimental.disable_dataset_reproducibility_checks()
Ejemplo n.º 22
0
def main(args):
    cfg = setup(args)
    model = build_model(cfg)
    DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
        cfg.MODEL.WEIGHTS, resume=False)

    dataset_path = ''
    with open(dataset_path, 'r') as f:
        dataset = json.load(f)

    image_path = './images/'
    image_list = list(map(str, list(vqamb.keys())))
    do_feature_extraction(cfg, model, image_path,
                          image_list[args.lower:args.higher])
def main(args):
    cfg = setup(args)
    if args.eval_only:
        model = Trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = Trainer.test(cfg, model)
        if comm.is_main_process():
            verify_results(cfg, res)
        return res

    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
Ejemplo n.º 24
0
def create_model():
    from detectron2 import model_zoo
    from detectron2.config import get_cfg
    from detectron2.checkpoint import DetectionCheckpointer
    from detectron2.modeling import build_model

    # Create a config
    cfg = get_cfg()
    cfg.merge_from_file(
        model_zoo.get_config_file(
            "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set threshold for this model
    cfg.TEST.DETECTIONS_PER_IMAGE = 5
    cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
        "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")

    model = build_model(cfg)
    model.eval()

    checkpointer = DetectionCheckpointer(model)
    checkpointer.load(cfg.MODEL.WEIGHTS)

    return model, cfg
Ejemplo n.º 25
0
    def __init__(self, cfg):
        """
        Args:
            cfg (CfgNode):
        """
        # Assume these objects must be constructed in this order.
        model = self.build_model(cfg)
        if cfg.FREEZE_ALL:
            # Freeze model parameters except for viewpoint
            for name,param in model.named_parameters():
                if param.requires_grad and 'viewpoint' not in name:
                    param.requires_grad = False
        optimizer = self.build_optimizer(cfg, model)
        data_loader = self.build_train_loader(cfg)

        # For training, wrap with DDP. But don't need this for inference.
        if comm.get_world_size() > 1:
            model = DistributedDataParallel(
                model, device_ids=[comm.get_local_rank()], broadcast_buffers=False
            )
        super().__init__(model, data_loader, optimizer)

        self.scheduler = self.build_lr_scheduler(cfg, optimizer)
        # Assume no other objects need to be checkpointed.
        # We can later make it checkpoint the stateful hooks
        self.checkpointer = DetectionCheckpointer(
            # Assume you want to save checkpoints together with logs/statistics
            model,
            cfg.OUTPUT_DIR,
            optimizer=optimizer,
            scheduler=self.scheduler,
        )
        self.start_iter = 0
        self.max_iter = cfg.SOLVER.MAX_ITER
        self.cfg = cfg

        self.register_hooks(self.build_hooks())
Ejemplo n.º 26
0
def get_teacher(args, cfg, logger):
    class ModelWrapper:
        def __init__(self, model):
            self.model = model

        @torch.no_grad()
        def __call__(self, img: torch.Tensor) -> dict:
            img = img.permute(2, 0, 1).type(torch.float32)
            input = {'image': img, 'height': 720, 'width': 1280}
            return self.model([input])[0]

    class ModelTimerWrapper:
        def __init__(self, model):
            self.model = model
            self.inference_timer = TimeAverageMeter('MRCNN inference',
                                                    'SERVER', True)

        @torch.no_grad()
        def __call__(self, img: torch.Tensor) -> dict:
            self.inference_timer.tic()
            img = img.permute(2, 0, 1).type(torch.float32)
            input = {'image': img, 'height': 720, 'width': 1280}
            output = self.model([input])[0]
            self.inference_timer.toc()
            return output

    model = build_model(cfg)
    checkpointer = DetectionCheckpointer(model)
    checkpointer.load(cfg.MODEL.WEIGHTS)
    model.eval()

    if args.time:
        model = ModelTimerWrapper(model)
    else:
        model = ModelWrapper(model)
    logger.critical('Loaded teacher from model zoo')
    return model
Ejemplo n.º 27
0
def main(args):
    # register_coco_instances('asparagus_train', {'_background_': 0, 'clump': 1, 'stalk': 2, 'spear': 3, 'bar': 4} , "./datasets/coco/annotations/train/annotations.json", "./datasets/coco/annotations/train")
    # register_coco_instances('asparagus_val', {'_background_': 0, 'clump': 1, 'stalk': 2, 'spear': 3, 'bar': 4} , "./datasets/coco/annotations/test_458/annotations.json", "./datasets/coco/annotations/test_458")
    register_coco_instances(
        'asparagus_train', {
            '_background_': 0,
            'clump': 1,
            'stalk': 2,
            'spear': 3,
            'bar': 4,
            'straw': 5
        }, "./datasets/coco/annotations/straw/train/annotations.json",
        "./datasets/coco/annotations/straw/train")
    register_coco_instances(
        'asparagus_val', {
            '_background_': 0,
            'clump': 1,
            'stalk': 2,
            'spear': 3,
            'bar': 4,
            'straw': 5
        }, "./datasets/coco/annotations/val_straw/val/annotations.json",
        "./datasets/coco/annotations/val_straw/val")

    cfg = setup(args)

    if args.eval_only:
        model = Trainer.build_model(cfg)
        DetectionCheckpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = Trainer.test(cfg, model)
        if cfg.TEST.AUG.ENABLED:
            res.update(Trainer.test_with_TTA(cfg, model))
        if comm.is_main_process():
            verify_results(cfg, res)
        return res
    """
    If you'd like to do anything fancier than the standard training logic,
    consider writing your own training loop (see plain_train_net.py) or
    subclassing the trainer.
    """
    trainer = Trainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    if cfg.TEST.AUG.ENABLED:
        trainer.register_hooks([
            hooks.EvalHook(0,
                           lambda: trainer.test_with_TTA(cfg, trainer.model))
        ])
    return trainer.train()
def get_detector(config_file,
                 nms_thresh,
                 data_type,
                 weight_dir=None,
                 dataset_name=None):
    """ for each image you make an prediction and find the indexes of correctly classified rois
  
  returns: 
      - model: object detector(nn.module)
      - cfg: config file for the module

  params:
      - config_file:
      - nms_thresh: it determins the minimum confidence threshold for rois to survive for NMS
      - data_type: 
      - weight_file: 
  """
    cfg = get_cfg()

    # add project-specific config (e.g., TensorMask) here if you're not running a model in detectron2's core library
    cfg.merge_from_file(model_zoo.get_config_file(config_file))

    # set threshold for this model
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = nms_thresh

    # Find a model from detectron2's model zoo. You can use the https://dl.fbaipublicfiles... url as well
    if weight_dir is None:
        cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
            config_file)  # use the pretrained weights

    else:
        cfg.MODEL.WEIGHTS = os.path.join(
            weight_dir, 'model_final.pth')  # use the custom trained weights
        cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(
            MetadataCatalog.get(
                dataset_name).thing_classes)  # number of classes

    if data_type == 'model':
        model = build_model(cfg)  # returns a torch.nn.Module
        DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS)
        device = torch.device(
            'cuda') if torch.cuda.is_available() else torch.device('cpu')
        model = model.to(device)
        model.eval()

    if data_type == 'predictor':
        model = DefaultPredictor(cfg)

    return model, cfg
Ejemplo n.º 29
0
  def __init__(self, sess, model_saved_path, labels_path, fix_ratio, config_path):
    with open(labels_path) as f:
      self.labels = f.read().splitlines()
    self.image_shape = [600, 600, 3]  # TODO

    self.cfg = cfg = get_cfg()
    cfg.merge_from_file(config_path)
    cfg.freeze()
    self.model = build_model(cfg)
    self.model.eval()
    DetectionCheckpointer(self.model).load(model_saved_path)
    self.fix_ratio = fix_ratio
    if not fix_ratio:
      self.transform_gen = T.ResizeShortestEdge(
          [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST)
Ejemplo n.º 30
0
def _get_model(args):

    cfg = get_cfg()
    cfg.merge_from_file(args.config)
    cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7  # set threshold for this model

    model = build_model(cfg)  # returns a torch.nn.Module
    weights = args.weights if args.weights != None else cfg.MODEL.WEIGHTS

    DetectionCheckpointer(model).load(
        weights
    )  # must load weights this way, can't use cfg.MODEL.WEIGHTS = "..."
    model.train(False)  # inference mode

    return model