Exemplo n.º 1
0
    def __init__(
        self,
        cfg: CfgNode,
        n_iter=150,
        gamma=0.5,
        nms_thresh=0.9,
        mapper: Callable = DatasetMapper,
    ):
        """Implements the DAG algorithm

        Parameters
        ----------
        cfg : CfgNode
            Config object used to train the model
        n_iter : int, optional
            Number of iterations to run the algorithm on each image, by default 150
        gamma : float, optional
            Perturbation weight, by default 0.5
        nms_thresh : float, optional
            NMS threshold of RPN; higher it is, more dense set of proposals, by default 0.9
        mapper : Callable, optional
            Can specify own DatasetMapper logic, by default DatasetMapper
        """
        self.n_iter = n_iter
        self.gamma = gamma

        # Modify config
        self.cfg = cfg.clone()  # cfg can be modified by model
        # To generate more dense proposals
        self.cfg.MODEL.RPN.NMS_THRESH = nms_thresh
        self.cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 3000

        # Init model
        self.model = build_model(self.cfg)
        self.model.eval()

        # Load weights
        checkpointer = DetectionCheckpointer(self.model)
        checkpointer.load(cfg.MODEL.WEIGHTS)

        self.aug = T.ResizeShortestEdge(
            [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST],
            cfg.INPUT.MAX_SIZE_TEST)
        self.input_format = cfg.INPUT.FORMAT
        assert self.input_format in ["RGB", "BGR"], self.input_format

        # Init dataloader on training dataset
        dataset_mapper = mapper(cfg, is_train=False)
        #         self.data_loader = build_detection_test_loader(
        #             cfg, cfg.DATASETS['TRAIN'][0], mapper=dataset_mapper
        #         )
        self.data_loader = build_detection_test_loader(cfg,
                                                       "benign_train",
                                                       mapper=dataset_mapper)

        self.device = self.model.device
        self.n_classes = self.cfg.MODEL.ROI_HEADS.NUM_CLASSES
        self.metadata = MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0])
        # HACK Only specific for this dataset
        self.metadata.thing_classes = ["box", "logo"]
Exemplo n.º 2
0
 def __init__(self, cfg: CfgNode, val_augmentation: Sequence[Augmentation],
              period: int):
     super().__init__()
     self.cfg = cfg.clone()
     self.cfg.DATASETS.TRAIN = cfg.DATASETS.TEST
     self._loader = iter(
         build_detection_train_loader(
             self.cfg,
             mapper=DatasetMapper(self.cfg,
                                  is_train=True,
                                  augmentations=val_augmentation),
         ))
     self._period = period
     self.num_steps = 0
Exemplo n.º 3
0
def _create_gradient_clipper(cfg: CfgNode) -> _GradientClipper:
    """
    Creates gradient clipping closure to clip by value or by norm,
    according to the provided config.
    """
    cfg = cfg.clone()

    def clip_grad_norm(p: _GradientClipperInput):
        torch.nn.utils.clip_grad_norm_(p, cfg.CLIP_VALUE, cfg.NORM_TYPE)

    def clip_grad_value(p: _GradientClipperInput):
        torch.nn.utils.clip_grad_value_(p, cfg.CLIP_VALUE)

    _GRADIENT_CLIP_TYPE_TO_CLIPPER = {
        GradientClipType.VALUE: clip_grad_value,
        GradientClipType.NORM: clip_grad_norm,
    }
    return _GRADIENT_CLIP_TYPE_TO_CLIPPER[GradientClipType(cfg.CLIP_TYPE)]