Example #1
0
    def __new__(cls, name: str, bases: Tuple, attrs: Dict):
        for test_config in config:
            (
                algorithms,
                model,
                args,
                layer,
                noise_tunnel,
                baseline_distr,
            ) = parse_test_config(test_config)
            for algorithm in algorithms:
                if not should_create_generated_test(algorithm):
                    continue
                if algorithm in JIT_SUPPORTED:
                    for mode in JITCompareMode:
                        # Creates test case corresponding to each algorithm and
                        # JITCompareMode
                        test_method = cls.make_single_jit_test(
                            algorithm, model, args, noise_tunnel,
                            baseline_distr, mode)
                        test_name = gen_test_name(
                            "test_jit_" + mode.name,
                            cast(str, test_config["name"]),
                            algorithm,
                            noise_tunnel,
                        )
                        if test_name in attrs:
                            raise AssertionError(
                                "Trying to overwrite existing test with name: %r"
                                % test_name)
                        attrs[test_name] = test_method

        return super(JITMeta, cls).__new__(cls, name, bases, attrs)
Example #2
0
    def __new__(cls, name: str, bases: Tuple, attrs: Dict):
        for test_config in config:
            (
                algorithms,
                model,
                args,
                layer,
                noise_tunnel,
                baseline_distr,
            ) = parse_test_config(test_config)
            target_delta = (test_config["target_delta"]
                            if "target_delta" in test_config else 0.0001)

            if "target" not in args or not isinstance(args["target"],
                                                      (list, Tensor)):
                continue

            for algorithm in algorithms:
                # FeaturePermutation requires a batch of inputs
                # so skipping tests
                if issubclass(algorithm, FeaturePermutation
                              ) or not should_create_generated_test(algorithm):
                    continue
                test_method = cls.make_single_target_test(
                    algorithm,
                    model,
                    layer,
                    args,
                    target_delta,
                    noise_tunnel,
                    baseline_distr,
                )
                test_name = gen_test_name(
                    "test_target",
                    cast(str, test_config["name"]),
                    algorithm,
                    noise_tunnel,
                )

                if test_name in attrs:
                    raise AssertionError(
                        "Trying to overwrite existing test with name: %r" %
                        test_name)
                attrs[test_name] = test_method
        return super(TargetsMeta, cls).__new__(cls, name, bases, attrs)
Example #3
0
    def __new__(cls, name: str, bases: Tuple, attrs: Dict):
        created_tests: Dict[Tuple[Type[Attribution], HookRemovalMode],
                            bool] = {}
        for test_config in config:
            (
                algorithms,
                model,
                args,
                layer,
                noise_tunnel,
                _,
            ) = parse_test_config(test_config)

            for algorithm in algorithms:
                if not should_create_generated_test(algorithm):
                    continue
                for mode in HookRemovalMode:
                    if mode is HookRemovalMode.invalid_module and layer is None:
                        continue
                    # Only one test per algorithm and mode is necessary
                    if (algorithm, mode) in created_tests:
                        continue

                    test_method = cls.make_single_hook_removal_test(
                        algorithm,
                        model,
                        layer,
                        args,
                        noise_tunnel,
                        mode,
                    )
                    test_name = gen_test_name(
                        "test_hook_removal_" + mode.name,
                        cast(str, test_config["name"]),
                        algorithm,
                        noise_tunnel,
                    )

                    if test_name in attrs:
                        raise AssertionError(
                            "Trying to overwrite existing test with name: %r" %
                            test_name)
                    attrs[test_name] = test_method
                    created_tests[(algorithm, mode)] = True
        return super(HookRemovalMeta, cls).__new__(cls, name, bases, attrs)
Example #4
0
    def __new__(cls, name: str, bases: Tuple, attrs: Dict):
        for test_config in config:
            (
                algorithms,
                model,
                args,
                layer,
                noise_tunnel,
                baseline_distr,
            ) = parse_test_config(test_config)
            dp_delta = test_config[
                "dp_delta"] if "dp_delta" in test_config else 0.0001

            for algorithm in algorithms:
                if not should_create_generated_test(algorithm):
                    continue
                for mode in DataParallelCompareMode:
                    # Creates test case corresponding to each algorithm and
                    # DataParallelCompareMode
                    test_method = cls.make_single_dp_test(
                        algorithm,
                        model,
                        layer,
                        args,
                        dp_delta,
                        noise_tunnel,
                        baseline_distr,
                        mode,
                    )
                    test_name = gen_test_name(
                        "test_dp_" + mode.name,
                        cast(str, test_config["name"]),
                        algorithm,
                        noise_tunnel,
                    )
                    if test_name in attrs:
                        raise AssertionError(
                            "Trying to overwrite existing test with name: %r" %
                            test_name)
                    attrs[test_name] = test_method

        return super(DataParallelMeta, cls).__new__(cls, name, bases, attrs)