Exemplo n.º 1
0
    def __init__(self, cfg):
        super().__init__(cfg)

        # Get teacher model config
        model_ts = []
        for i in range(len(cfg.KD.MODEL_CONFIG)):
            cfg_t = get_cfg()
            cfg_t.merge_from_file(cfg.KD.MODEL_CONFIG[i])
            cfg_t.defrost()
            cfg_t.MODEL.META_ARCHITECTURE = "Baseline"
            # Change syncBN to BN due to no DDP wrapper
            if cfg_t.MODEL.BACKBONE.NORM == "syncBN":
                cfg_t.MODEL.BACKBONE.NORM = "BN"
            if cfg_t.MODEL.HEADS.NORM == "syncBN":
                cfg_t.MODEL.HEADS.NORM = "BN"

            model_t = build_model(cfg_t)

            # No gradients for teacher model
            for param in model_t.parameters():
                param.requires_grad_(False)

            logger.info("Loading teacher model weights ...")
            Checkpointer(model_t).load(cfg.KD.MODEL_WEIGHTS[i])

            model_ts.append(model_t)

        self.ema_enabled = cfg.KD.EMA.ENABLED
        self.ema_momentum = cfg.KD.EMA.MOMENTUM
        if self.ema_enabled:
            cfg_self = cfg.clone()
            cfg_self.defrost()
            cfg_self.MODEL.META_ARCHITECTURE = "Baseline"
            if cfg_self.MODEL.BACKBONE.NORM == "syncBN":
                cfg_self.MODEL.BACKBONE.NORM = "BN"
            if cfg_self.MODEL.HEADS.NORM == "syncBN":
                cfg_self.MODEL.HEADS.NORM = "BN"
            model_self = build_model(cfg_self)
            # No gradients for self model
            for param in model_self.parameters():
                param.requires_grad_(False)

            if cfg_self.MODEL.WEIGHTS != '':
                logger.info("Loading self distillation model weights ...")
                Checkpointer(model_self).load(cfg_self.MODEL.WEIGHTS)
            else:
                # Make sure the initial state is same
                for param_q, param_k in zip(self.parameters(), model_self.parameters()):
                    param_k.data.copy_(param_q.data)

            model_ts.insert(0, model_self)

        # Not register teacher model as `nn.Module`, this is
        # make sure teacher model weights not saved
        self.model_ts = model_ts
Exemplo n.º 2
0
    def __init__(self, cfg_):
        self.cfg = cfg_.clone()  # cfg can be modified by model
        self.cfg.defrost()
        self.cfg.MODEL.BACKBONE.PRETRAIN = False
        self.model = build_model(self.cfg)
        self.model.eval()

        Checkpointer(self.model).load(cfg_.MODEL.WEIGHTS)
Exemplo n.º 3
0
    def __init__(self, cfg):
        self.cfg = cfg.clone()  # cfg can be modified by model
        self.model = build_model(self.cfg)
        self.model.eval()
        # self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])

        checkpointer = Checkpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)
Exemplo n.º 4
0
 def build_model(cls, cfg):
     """
     Returns:
         torch.nn.Module:
     It now calls :func:`detectron2.modeling.build_model`.
     Overwrite it if you'd like a different model.
     """
     model = build_model(cfg)
     logger = logging.getLogger(__name__)
     logger.info("Model:\n{}".format(model))
     return model
Exemplo n.º 5
0
    def __init__(self, cfg, device='cpu'):
        self.cfg = cfg.clone()  # cfg can be modified by model
        self.cfg.defrost()
        self.cfg.MODEL.BACKBONE.PRETRAIN = False
        self.device = device
        self.model = build_model(self.cfg)
        self.model.to(device)
        self.model.eval()

        checkpointer = Checkpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)
Exemplo n.º 6
0
    def build_model(cls, cfg, show_model=True):
        """
        Returns:
            torch.nn.Module:
        It now calls :func:`fastreid.modeling.build_model`.
        Overwrite it if you'd like a different model.
        """
        model = build_model(cfg)

        if show_model:
            logger = logging.getLogger('fastreid')
            logger.info("Model:\n{}".format(model))

        return model
Exemplo n.º 7
0
    def __init__(self, cfg):
        self.cfg = cfg.clone()  # cfg can be modified by model
        model = build_model(self.cfg)
        self.model = DataParallel(model)
        self.model.cuda()
        self.model.eval()

        checkpointer = Checkpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)

        num_channels = len(cfg.MODEL.PIXEL_MEAN)
        self.mean = torch.tensor(cfg.MODEL.PIXEL_MEAN).view(
            1, num_channels, 1, 1)
        self.std = torch.tensor(cfg.MODEL.PIXEL_STD).view(
            1, num_channels, 1, 1)
Exemplo n.º 8
0
    def build_model(cls,
                    cfg,
                    load_model=True,
                    show_model=True,
                    use_dsbn=False):
        cfg = cfg.clone()  # cfg can be modified by model
        cfg.defrost()
        cfg.MODEL.DEVICE = "cpu"

        model = build_model(cfg)
        logger = logging.getLogger('fastreid')

        if load_model:
            pretrain_path = cfg.MODEL.PRETRAIN_PATH
            try:
                state_dict = torch.load(
                    pretrain_path, map_location=torch.device("cpu"))['model']
                for layer in cfg.MODEL.IGNORE_LAYERS:
                    if layer in state_dict.keys():
                        del state_dict[layer]
                logger.info(f"Loading pretrained model from {pretrain_path}")
            except FileNotFoundError as e:
                logger.info(
                    f"{pretrain_path} is not found! Please check this path.")
                raise e
            except KeyError as e:
                logger.info(
                    "State dict keys error! Please check the state dict.")
                raise e

            incompatible = model.load_state_dict(state_dict, strict=False)
            if incompatible.missing_keys:
                logger.info(
                    get_missing_parameters_message(incompatible.missing_keys))
            if incompatible.unexpected_keys:
                logger.info(
                    get_unexpected_parameters_message(
                        incompatible.unexpected_keys))

        if use_dsbn:
            logger.info("==> Convert BN to Domain Specific BN")
            convert_dsbn(model)

        if show_model:
            logger.info("Model:\n{}".format(model))

        model.to(torch.device("cuda"))
        return model
Exemplo n.º 9
0
    def __init__(self, cfg):
        super(Distiller, self).__init__(cfg)

        # Get teacher model config
        cfg_t = get_cfg()
        cfg_t.merge_from_file(cfg.KD.MODEL_CONFIG)

        model_t = build_model(cfg_t)
        logger.info("Teacher model:\n{}".format(model_t))

        # No gradients for teacher model
        for param in model_t.parameters():
            param.requires_grad_(False)

        logger.info("Loading teacher model weights ...")
        Checkpointer(model_t).load(cfg.KD.MODEL_WEIGHTS)

        # Not register teacher model as `nn.Module`, this is
        # make sure teacher model weights not saved
        self.model_t = [model_t.backbone, model_t.heads]
Exemplo n.º 10
0
    def __init__(self, cfg):
        super().__init__(cfg)

        # Get teacher model config
        model_ts = []
        for i in range(len(cfg.KD.MODEL_CONFIG)):
            cfg_t = get_cfg()
            cfg_t.merge_from_file(cfg.KD.MODEL_CONFIG[i])

            model_t = build_model(cfg_t)

            # No gradients for teacher model
            for param in model_t.parameters():
                param.requires_grad_(False)

            logger.info("Loading teacher model weights ...")
            Checkpointer(model_t).load(cfg.KD.MODEL_WEIGHTS[i])

            model_ts.append(model_t)

        # Not register teacher model as `nn.Module`, this is
        # make sure teacher model weights not saved
        self.model_ts = model_ts
Exemplo n.º 11
0
        default=[],
        nargs=argparse.REMAINDER,
    )
    return parser


if __name__ == '__main__':
    args = get_parser().parse_args()
    cfg = setup_cfg(args)

    cfg.defrost()
    cfg.MODEL.BACKBONE.PRETRAIN = False
    if cfg.MODEL.HEADS.POOL_LAYER == 'FastGlobalAvgPool':
        cfg.MODEL.HEADS.POOL_LAYER = 'GlobalAvgPool'
    cfg.MODEL.BACKBONE.WITH_NL = False

    model = build_model(cfg)
    Checkpointer(model).load(cfg.MODEL.WEIGHTS)
    model.eval()
    logger.info(model)

    inputs = torch.randn(1, 3, cfg.INPUT.SIZE_TEST[0],
                         cfg.INPUT.SIZE_TEST[1]).to(
                             torch.device(cfg.MODEL.DEVICE))
    PathManager.mkdirs(args.output)
    pytorch_to_caffe.trans_net(model, inputs, args.name)
    pytorch_to_caffe.save_prototxt(f"{args.output}/{args.name}.prototxt")
    pytorch_to_caffe.save_caffemodel(f"{args.output}/{args.name}.caffemodel")

    logger.info(f"Export caffe model in {args.output} sucessfully!")
Exemplo n.º 12
0
 def build_model_teacher(cls, cfg) -> nn.Module:
     cfg_t = update_model_teacher_config(cfg)
     model_t = build_model(cfg_t)
     return model_t