예제 #1
0
def main(args):
    cfg = setup(args)

    if args.eval_only:
        model = DefaultTrainer.build_model(cfg)
        Checkpointer(model, save_dir=cfg.OUTPUT_DIR).load(cfg.MODEL.WEIGHTS)
        res = DefaultTrainer.test(cfg, model)
        return res

    if args.kd: trainer = KDTrainer(cfg)
    else: trainer = DefaultTrainer(cfg)

    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
예제 #2
0
파일: trainer.py 프로젝트: xhuljl/fast-reid
    def auto_scale_hyperparams(cfg, num_classes):
        cfg = DefaultTrainer.auto_scale_hyperparams(cfg, num_classes)

        # Save index to class dictionary
        output_dir = cfg.OUTPUT_DIR
        if comm.is_main_process() and output_dir:
            path = os.path.join(output_dir, "idx2class.json")
            with PathManager.open(path, "w") as f:
                json.dump(ClasTrainer.idx2class, f)

        return cfg
예제 #3
0
def main(args):
    cfg = setup(args)

    if args.eval_only:
        cfg.defrost()
        cfg.MODEL.BACKBONE.PRETRAIN = False
        model = DefaultTrainer.build_model(cfg)

        Checkpointer(model).load(cfg.MODEL.WEIGHTS)  # load trained model

        res = DefaultTrainer.test(cfg, model)
        return res
    if "CenterLoss" in cfg.MODEL.LOSSES.NAME:
        trainer = CenterTrainer(cfg)
    else:
        trainer = DefaultTrainer(cfg)

    if args.finetune:
        Checkpointer(trainer.model).load(
            cfg.MODEL.WEIGHTS)  # load trained model to funetune

    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
예제 #4
0
 def build_model(cls, cfg):
     """
     Returns:
         torch.nn.Module:
     It now calls :func:`fastreid.modeling.build_model`.
     Overwrite it if you'd like a different model.
     """
     model = DefaultTrainer.build_model(cfg)
     if cfg.MODEL.LOSSES.BCE.WEIGHT_ENABLED and \
             AttrTrainer.sample_weights is not None:
         setattr(model, "sample_weights",
                 AttrTrainer.sample_weights.to(model.device))
     else:
         setattr(model, "sample_weights", None)
     return model
예제 #5
0
def main(args):
    cfg = setup(args)

    if args.eval_only:
        cfg.defrost()
        cfg.MODEL.BACKBONE.PRETRAIN = False
        model = DefaultTrainer.build_model(cfg)

        Checkpointer(model).load(cfg.MODEL.WEIGHTS)  # load trained model

        res = DefaultTrainer.test(cfg, model)
        return res

    trainer = DefaultTrainer(cfg)
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()
    def __init__(self, model_config, model_path, use_cuda=True):
        cfg = get_cfg()
        cfg.merge_from_file(model_config)
        cfg.MODEL.BACKBONE.PRETRAIN = False
        self.net = DefaultTrainer.build_model(cfg)
        self.device = "cuda" if torch.cuda.is_available(
        ) and use_cuda else "cpu"

        Checkpointer(self.net).load(model_path)
        logger = logging.getLogger("root.tracker")
        logger.info("Loading weights from {}... Done!".format(model_path))
        self.net.to(self.device)
        self.net.eval()
        height, width = cfg.INPUT.SIZE_TEST
        self.size = (width, height)
        self.norm = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ])
예제 #7
0
def main(args):
    cfg = setup(args)

    if args.eval_only:
        model = DefaultTrainer.build_model(cfg)
        Checkpointer(model, save_dir=cfg.OUTPUT_DIR).resume_or_load(
            cfg.MODEL.WEIGHTS, resume=args.resume)
        res = DefaultTrainer.test(cfg, model)
        return res

    trainer = DefaultTrainer(cfg)
    # moco pretrain
    # import torch
    # state_dict = torch.load('logs/model_0109999.pth')['model_ema']
    # ret = trainer.model.module.load_state_dict(state_dict, strict=False)
    #
    trainer.resume_or_load(resume=args.resume)
    return trainer.train()