Exemple #1
0
        def efail_fn(slf, device, *args, **kwargs):
            if self.device_type is None or self.device_type == slf.device_type:
                deterministic_restore = torch.is_deterministic()
                torch.set_deterministic(True)
                try:
                    if self.fn_has_device_arg:
                        fn(slf, device, *args, **kwargs)
                    else:
                        fn(slf, *args, **kwargs)
                except RuntimeError as e:
                    torch.set_deterministic(deterministic_restore)
                    if self.error_message not in str(e):
                        slf.fail(
                            'expected non-deterministic error message to start with "'
                            + self.error_message +
                            '" but got this instead: "' + str(e) + '"')
                    return
                else:
                    torch.set_deterministic(deterministic_restore)
                    slf.fail(
                        'expected a non-deterministic error, but it was not raised'
                    )

            if self.fn_has_device_arg:
                return fn(slf, device, *args, **kwargs)
            else:
                return fn(slf, *args, **kwargs)
Exemple #2
0
    def __init__(self, config):
        super().__init__()
        self.cfg = detectron2.config.get_cfg()
        add_layoutlmv2_config(self.cfg)
        meta_arch = self.cfg.MODEL.META_ARCHITECTURE
        model = META_ARCH_REGISTRY.get(meta_arch)(self.cfg)
        assert isinstance(model.backbone, detectron2.modeling.backbone.FPN)
        self.backbone = model.backbone
        if (
            config.convert_sync_batchnorm
            and torch.distributed.is_available()
            and torch.distributed.is_initialized()
            and torch.distributed.get_rank() > -1
        ):
            self_rank = torch.distributed.get_rank()
            node_size = torch.cuda.device_count()
            world_size = torch.distributed.get_world_size()
            assert world_size % node_size == 0

            node_global_ranks = [
                list(range(i * node_size, (i + 1) * node_size)) for i in range(world_size // node_size)
            ]
            sync_bn_groups = [
                torch.distributed.new_group(ranks=node_global_ranks[i]) for i in range(world_size // node_size)
            ]
            node_rank = self_rank // node_size
            assert self_rank in node_global_ranks[node_rank]

            self.backbone = my_convert_sync_batchnorm(self.backbone, process_group=sync_bn_groups[node_rank])

        assert len(self.cfg.MODEL.PIXEL_MEAN) == len(self.cfg.MODEL.PIXEL_STD)
        num_channels = len(self.cfg.MODEL.PIXEL_MEAN)
        self.register_buffer(
            "pixel_mean",
            torch.Tensor(self.cfg.MODEL.PIXEL_MEAN).view(num_channels, 1, 1),
        )
        self.register_buffer("pixel_std", torch.Tensor(self.cfg.MODEL.PIXEL_STD).view(num_channels, 1, 1))
        self.out_feature_key = "p2"
        if torch.is_deterministic():
            logger.warning("using `AvgPool2d` instead of `AdaptiveAvgPool2d`")
            input_shape = (224, 224)
            backbone_stride = self.backbone.output_shape()[self.out_feature_key].stride
            self.pool = nn.AvgPool2d(
                (
                    math.ceil(math.ceil(input_shape[0] / backbone_stride) / config.image_feature_pool_shape[0]),
                    math.ceil(math.ceil(input_shape[1] / backbone_stride) / config.image_feature_pool_shape[1]),
                )
            )
        else:
            self.pool = nn.AdaptiveAvgPool2d(config.image_feature_pool_shape[:2])
        if len(config.image_feature_pool_shape) == 2:
            config.image_feature_pool_shape.append(self.backbone.output_shape()[self.out_feature_key].channels)
        assert self.backbone.output_shape()[self.out_feature_key].channels == config.image_feature_pool_shape[2]
Exemple #3
0
 def __init__(self, no_side_effect: bool = True):
     self._no_side_effect = no_side_effect
     if no_side_effect:
         self.prev_state = torch.is_deterministic()