Esempio n. 1
0
    def _check_data_types(self, key, obtained_column, expected_column):
        """
        Check if data type of obtained and expected columns are the same. Fail if not.
        Helper method used in _check_fn method.
        """
        try:
            import numpy as np
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("Numpy"))

        __tracebackhide__ = True
        obtained_data_type = obtained_column.values.dtype
        expected_data_type = expected_column.values.dtype
        if obtained_data_type != expected_data_type:
            # Check if both data types are comparable as numbers (float, int, short, bytes, etc...)
            if np.issubdtype(obtained_data_type, np.number) and np.issubdtype(
                expected_data_type, np.number
            ):
                return

            # In case they are not, assume they are not comparable
            error_msg = (
                "Data type for data %s of obtained and expected are not the same.\n"
                "Obtained: %s\n"
                "Expected: %s\n" % (key, obtained_data_type, expected_data_type)
            )
            raise AssertionError(error_msg)
Esempio n. 2
0
    def _check_data_types(self, key, obtained_array, expected_array):
        """
        Check if data type of obtained and expected arrays are the same. Fail if not.
        Helper method used in _check_fn method.
        """
        try:
            import numpy as np
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("NumPy"))

        __tracebackhide__ = True

        if obtained_array.dtype != expected_array.dtype:
            # Check if both data types are comparable as numbers (float, int, short, bytes, etc...)
            if np.issubdtype(obtained_array.dtype, np.number) and np.issubdtype(
                expected_array.dtype, np.number
            ):
                return
            # Check if both data types are comparable as strings
            if np.issubdtype(obtained_array.dtype, str) and np.issubdtype(
                expected_array.dtype, str
            ):
                return

            # In case they are not, assume they are not comparable
            error_msg = (
                "Data types are not the same.\n"
                f"  key: {key}\n"
                f"  Obtained: {obtained_array.dtype}\n"
                f"  Expected: {expected_array.dtype}\n"
            )
            raise AssertionError(error_msg)
Esempio n. 3
0
    def _check_images_manhattan_distance(self, obtained_file, expected_file,
                                         expect_equal, diff_threshold):
        """
        Compare two image by computing the differences spatially, pixel by pixel.

        The Manhattan Distance is used to compute how much two images differ.

        :param str obtained_file:
            The image with the obtained image

        :param str expected_files:
            The image with the expected image

        :param bool expected_equal:
            If True, the images are expected to be equal, otherwise, they're expected to be
            different.

        :param float diff_threshold:
            The maximum percentage of difference accepted.
            A value between 0.0 and 100.0

        :raises AssertionError:
            raised if they are actually different and expect_equal is False or
            if they are equal and expect_equal is True.
        """
        try:
            from PIL import ImageChops
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("Pillow"))

        __tracebackhide__ = True

        obtained_img = self._load_image(obtained_file)
        expected_img = self._load_image(expected_file)

        def check_result(equal, manhattan_distance):
            if equal != expect_equal:
                params = manhattan_distance, expected_file, obtained_file
                if expect_equal:
                    assert 0, (
                        "Difference between images too high: %.2f %%\n%s\n%s" %
                        params)
                else:
                    assert 0, (
                        "Difference between images too small: %.2f %%\n%s\n%s"
                        % params)

        # 1st check: identical
        diff_img = ImageChops.difference(obtained_img, expected_img)

        if diff_img.getbbox() is None:  # Equal
            check_result(True, None)

        manhattan_distance = self._compute_manhattan_distance(diff_img)
        equal = manhattan_distance <= diff_threshold
        check_result(equal, manhattan_distance)
Esempio n. 4
0
    def _dump_fn(self, data_object, filename):
        """
        Dump dict contents to the given filename.

        :param Dict[str, np.ndarray] data_object:
        :param str filename:
        """
        try:
            import numpy as np
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("NumPy"))

        np.savez_compressed(str(filename), **data_object)
Esempio n. 5
0
    def check(self,
              image_data,
              diff_threshold=0.1,
              expect_equal=True,
              basename=None):
        """
        Checks that the given image contents are comparable with the ones stored in the data directory.

        :param bytes image_data: image data
        :param str|None basename: basename to store the information in the data directory. If none, use the name
            of the test function.
        :param bool expect_equal: if the image should considered equal below of the given threshold. If False, the
            image should be considered different at least above the threshold.
        :param float diff_threshold:
            Tolerage as a percentage (1 to 100) on how the images are allowed to differ.

        """
        __tracebackhide__ = True

        try:
            from PIL import Image
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("Pillow"))

        def dump_fn(target):
            image = Image.open(io.BytesIO(image_data))
            image.save(str(target), "PNG")

        perform_regression_check(
            datadir=self.datadir,
            original_datadir=self.original_datadir,
            request=self.request,
            check_fn=partial(
                self._check_images_manhattan_distance,
                diff_threshold=diff_threshold,
                expect_equal=expect_equal,
            ),
            dump_fn=dump_fn,
            extension=".png",
            basename=basename,
            force_regen=self.force_regen,
            with_test_class_names=self.with_test_class_names,
        )
Esempio n. 6
0
    def _load_image(self, filename):
        """
        Reads the image from the given file and convert it to RGB if necessary.
        This is necessary to be used with the ImageChops module operations.
        At this time, in this module, channel operations are only implemented
        for 8-bit images (e.g. "L" and "RGB").

        :param Path filename:
            The name of the file
        """
        try:
            from PIL import Image
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("Pillow"))

        img = Image.open(str(filename), "r")
        if img.mode not in ("L" or "RGB"):
            return img.convert("RGB")
        else:
            return img
Esempio n. 7
0
    def _compute_manhattan_distance(self, diff_image):
        """
        Computes a percentage of similarity of the difference image given.

        :param PIL.Image diff_image:
            An image in RGB mode computed from ImageChops.difference
        """
        try:
            import numpy
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("Numpy"))

        number_of_pixels = diff_image.size[0] * diff_image.size[1]
        return (
            # To obtain a number in 0.0 -> 100.0
            100.0 * (
                # Compute the sum of differences
                numpy.sum(diff_image) /
                # Divide by the number of channel differences RGB * Pixels
                float(3 * number_of_pixels))
            # Normalize between 0.0 -> 1.0
            / 255.0)
Esempio n. 8
0
    def _load_fn(self, filename):
        """
        Load dict contents from the given filename.

        :param str filename"
        """
        try:
            import numpy as np
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("NumPy"))

        try:
            # Open the file with a context manager manually, because np.load does not
            # follow such good practices internally, causing avoidable error messages
            # in the unit tests.
            with open(filename, "rb") as f:
                result = dict(np.load(f))
        except (zipfile.BadZipFile, ValueError) as e:
            raise IOError(
                f"NPZ file {filename} could not be loaded. Corrupt file?"
            ) from e
        return result
Esempio n. 9
0
    def _check_fn(self, obtained_filename, expected_filename):
        """
        Check if dict contents dumped to a file match the contents in expected file.

        :param str obtained_filename:
        :param str expected_filename:
        """
        try:
            import numpy as np
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("Numpy"))
        try:
            import pandas as pd
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("Pandas"))

        __tracebackhide__ = True

        obtained_data = pd.read_csv(str(obtained_filename))
        expected_data = pd.read_csv(str(expected_filename))

        comparison_tables_dict = {}
        for k in obtained_data.keys():
            obtained_column = obtained_data[k]
            expected_column = expected_data.get(k)

            if expected_column is None:
                error_msg = f"Could not find key '{k}' in the expected results.\n"
                error_msg += "Keys in the obtained data table: ["
                for k in obtained_data.keys():
                    error_msg += f"'{k}', "
                error_msg += "]\n"
                error_msg += "Keys in the expected data table: ["
                for k in expected_data.keys():
                    error_msg += f"'{k}', "
                error_msg += "]\n"
                error_msg += "To update values, use --force-regen option.\n\n"
                raise AssertionError(error_msg)

            tolerance_args = self._tolerances_dict.get(k, self._default_tolerance)

            self._check_data_types(k, obtained_column, expected_column)
            self._check_data_shapes(obtained_column, expected_column)

            data_type = obtained_column.values.dtype
            if data_type in [float, np.float16, np.float32, np.float64]:
                not_close_mask = ~np.isclose(
                    obtained_column.values,
                    expected_column.values,
                    equal_nan=True,
                    **tolerance_args,
                )
            else:
                not_close_mask = obtained_column.values != expected_column.values

            if np.any(not_close_mask):
                diff_ids = np.where(not_close_mask)[0]
                diff_obtained_data = obtained_column[diff_ids]
                diff_expected_data = expected_column[diff_ids]
                if data_type == bool:
                    diffs = np.logical_xor(obtained_column, expected_column)[diff_ids]
                else:
                    diffs = np.abs(obtained_column - expected_column)[diff_ids]

                comparison_table = pd.concat(
                    [diff_obtained_data, diff_expected_data, diffs], axis=1
                )
                comparison_table.columns = [f"obtained_{k}", f"expected_{k}", "diff"]
                comparison_tables_dict[k] = comparison_table

        if len(comparison_tables_dict) > 0:
            error_msg = "Values are not sufficiently close.\n"
            error_msg += "To update values, use --force-regen option.\n\n"
            for k, comparison_table in comparison_tables_dict.items():
                error_msg += f"{k}:\n{comparison_table}\n\n"
            raise AssertionError(error_msg)
Esempio n. 10
0
    def check(
        self,
        data_frame,
        basename=None,
        fullpath=None,
        tolerances=None,
        default_tolerance=None,
    ):
        """
        Checks the given pandas dataframe against a previously recorded version, or generate a new file.

        Example::

            data_frame = pandas.DataFrame.from_dict({
                'U_gas': U[0][positions],
                'U_liquid': U[1][positions],
                'gas_vol_frac [-]': vol_frac[0][positions],
                'liquid_vol_frac [-]': vol_frac[1][positions],
                'P': Pa_to_bar(P)[positions],
            })
            dataframe_regression.check(data_frame)

        :param pandas.DataFrame data_frame: pandas DataFrame containing data for regression check.

        :param str basename: basename of the file to test/record. If not given the name
            of the test is used.

        :param str fullpath: complete path to use as a reference file. This option
            will ignore embed_data completely, being useful if a reference file is located
            in the session data dir for example.

        :param dict tolerances: dict mapping keys from the data_dict to tolerance settings for the
            given data. Example::

                tolerances={'U': Tolerance(atol=1e-2)}

        :param dict default_tolerance: dict mapping the default tolerance for the current check
            call. Example::

                default_tolerance=dict(atol=1e-7, rtol=1e-18).

            If not provided, will use defaults from numpy's ``isclose`` function.

        ``basename`` and ``fullpath`` are exclusive.
        """
        try:
            import pandas as pd
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("Pandas"))

        import functools

        __tracebackhide__ = True

        assert type(data_frame) is pd.DataFrame, (
            "Only pandas DataFrames are supported on on dataframe_regression fixture.\n"
            "Object with type '%s' was given." % (str(type(data_frame)),)
        )

        for column in data_frame.columns:
            array = data_frame[column]
            # Skip assertion if an array of strings
            if (array.dtype == "O") and (type(array[0]) is str):
                continue
            # Rejected: timedelta, datetime, objects, zero-terminated bytes, unicode strings and raw data
            assert array.dtype not in ["m", "M", "O", "S", "a", "U", "V"], (
                "Only numeric data is supported on dataframe_regression fixture.\n"
                "Array with type '%s' was given.\n" % (str(array.dtype),)
            )

        if tolerances is None:
            tolerances = {}
        self._tolerances_dict = tolerances

        if default_tolerance is None:
            default_tolerance = {}
        self._default_tolerance = default_tolerance

        dump_fn = functools.partial(self._dump_fn, data_frame)

        with pd.option_context(*self._pandas_display_options):
            perform_regression_check(
                datadir=self.datadir,
                original_datadir=self.original_datadir,
                request=self.request,
                check_fn=self._check_fn,
                dump_fn=dump_fn,
                extension=".csv",
                basename=basename,
                fullpath=fullpath,
                force_regen=self._force_regen,
            )
    def check(
        self,
        data_dict,
        basename=None,
        fullpath=None,
        tolerances=None,
        default_tolerance=None,
        data_index=None,
        fill_different_shape_with_nan=True,
    ):
        """
        Checks the given dict against a previously recorded version, or generate a new file.
        The dict must map from user-defined keys to 1d numpy arrays or array-like values.

        Example::

            num_regression.check({
                'U_gas': U[0][positions],
                'U_liquid': U[1][positions],
                'gas_vol_frac [-]': vol_frac[0][positions],
                'liquid_vol_frac [-]': vol_frac[1][positions],
                'P': Pa_to_bar(P)[positions],
            })

        :param dict data_dict: dict mapping keys to numpy arrays, or objects that can be
            coerced to 1d numpy arrays with a numeric dtype (e.g. list, tuple, etc).

        :param str basename: basename of the file to test/record. If not given the name
            of the test is used.

        :param str fullpath: complete path to use as a reference file. This option
            will ignore embed_data completely, being useful if a reference file is located
            in the session data dir for example.

        :param dict tolerances: dict mapping keys from the data_dict to tolerance settings for the
            given data. Example::

                tolerances={'U': Tolerance(atol=1e-2)}

        :param dict default_tolerance: dict mapping the default tolerance for the current check
            call. Example::

                default_tolerance=dict(atol=1e-7, rtol=1e-18).

            If not provided, will use defaults from numpy's ``isclose`` function.

        :param list data_index: If set, will override the indexes shown in the outputs. Default
            is panda's default, which is ``range(0, len(data))``.

        :param bool fill_different_shape_with_nan: If set, all the data provided in the data_dict
            that has size lower than the bigger size will be filled with ``np.NaN``, in order to save
            the data in a CSV file.

        ``basename`` and ``fullpath`` are exclusive.
        """

        try:
            import numpy as np
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("Numpy"))
        try:
            import pandas as pd
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("Pandas"))

        __tracebackhide__ = True

        for k, obj in data_dict.items():
            if not isinstance(obj, np.ndarray):
                arr = np.atleast_1d(np.asarray(obj))
                if np.issubdtype(arr.dtype, np.number):
                    data_dict[k] = arr

        data_shapes = []
        for obj in data_dict.values():
            assert type(obj) in [
                np.ndarray
            ], "Only objects that can be coerced to numpy arrays are valid for numeric_data_regression fixture.\n"
            shape = obj.shape

            assert len(shape) == 1, (
                "Only 1D arrays are supported on num_data_regression fixture.\n"
                "Array with shape %s was given.\n" % (shape, ))
            data_shapes.append(shape[0])

        data_shapes = np.array(data_shapes)
        if not np.all(data_shapes == data_shapes[0]):
            if not fill_different_shape_with_nan:
                assert (
                    False
                ), "Data dict with different array lengths will not be accepted. Try setting fill_different_shape_with_nan=True."
            elif len(data_dict) > 1 and not all(
                    np.issubdtype(a.dtype, np.floating)
                    for a in data_dict.values()):
                raise TypeError(
                    "Checking multiple arrays with different shapes are not supported for non-float arrays"
                )
            else:
                max_size = max(data_shapes)
                for k, obj in data_dict.items():
                    new_data = np.empty(shape=(max_size, ), dtype=obj.dtype)
                    new_data[:len(obj)] = obj
                    new_data[len(obj):] = np.nan
                    data_dict[k] = new_data

        data_frame = pd.DataFrame(data_dict, index=data_index)

        DataFrameRegressionFixture.check(self, data_frame, basename, fullpath,
                                         tolerances, default_tolerance)
Esempio n. 12
0
    def _check_fn(self, obtained_filename, expected_filename):
        """
        Check if dict contents dumped to a file match the contents in expected file.

        :param str obtained_filename:
        :param str expected_filename:
        """
        try:
            import numpy as np
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("NumPy"))

        __tracebackhide__ = True

        # Turn result of np.load into a dictionary, such that the files are closed immediately.
        expected_data = self._load_fn(expected_filename)
        obtained_data = self._load_fn(obtained_filename)

        # Check mismatches in the keys.
        if set(obtained_data) != set(expected_data):
            error_msg = (
                "They keys in the obtained results differ from the expected results.\n"
            )
            error_msg += "  Matching keys: "
            error_msg += str(list(set(obtained_data) & set(expected_data)))
            error_msg += "\n"
            error_msg += "  New in obtained: "
            error_msg += str(list(set(obtained_data) - set(expected_data)))
            error_msg += "\n"
            error_msg += "  Missing from obtained: "
            error_msg += str(list(set(expected_data) - set(obtained_data)))
            error_msg += "\n"
            error_msg += "To update values, use --force-regen option.\n\n"
            raise AssertionError(error_msg)

        # Compare the contents of the arrays.
        comparison_tables_dict = {}
        for k, obtained_array in obtained_data.items():
            expected_array = expected_data.get(k)
            tolerance_args = self._tolerances_dict.get(k, self._default_tolerance)

            self._check_data_types(k, obtained_array, expected_array)
            self._check_data_shapes(k, obtained_array, expected_array)

            if np.issubdtype(obtained_array.dtype, np.inexact):
                not_close_mask = ~np.isclose(
                    obtained_array,
                    expected_array,
                    equal_nan=True,
                    **tolerance_args,
                )
            else:
                not_close_mask = obtained_array != expected_array

            if np.any(not_close_mask):
                if not_close_mask.ndim == 0:
                    diff_ids = [()]
                else:
                    diff_ids = np.array(np.nonzero(not_close_mask)).T
                comparison_tables_dict[k] = (
                    expected_array.size,
                    expected_array.shape,
                    diff_ids,
                    obtained_array[not_close_mask],
                    expected_array[not_close_mask],
                )

        if len(comparison_tables_dict) > 0:
            error_msg = "Values are not sufficiently close.\n"
            error_msg += "To update values, use --force-regen option.\n\n"
            for k, (
                size,
                shape,
                diff_ids,
                obtained_array,
                expected_array,
            ) in comparison_tables_dict.items():
                # Summary
                error_msg += f"{k}:\n  Shape: {shape}\n"
                pct = 100 * len(diff_ids) / size
                error_msg += (
                    f"  Number of differences: {len(diff_ids)} / {size} ({pct:.1f}%)\n"
                )
                if np.issubdtype(obtained_array.dtype, np.number) and len(diff_ids) > 1:
                    error_msg += (
                        "  Statistics are computed for differing elements only.\n"
                    )

                    abs_errors = abs(obtained_array - expected_array)
                    error_msg += "  Stats for abs(obtained - expected):\n"
                    error_msg += f"    Max:     {abs_errors.max()}\n"
                    error_msg += f"    Mean:    {abs_errors.mean()}\n"
                    error_msg += f"    Median:  {np.median(abs_errors)}\n"

                    expected_nonzero = np.array(np.nonzero(expected_array)).T
                    rel_errors = abs(
                        (
                            obtained_array[expected_nonzero]
                            - expected_array[expected_nonzero]
                        )
                        / expected_array[expected_nonzero]
                    )
                    if len(rel_errors) == 0:
                        error_msg += "  Relative errors are not reported because all expected values are zero.\n"
                    else:
                        error_msg += (
                            f"  Stats for abs(obtained - expected) / abs(expected):\n"
                        )
                        if len(rel_errors) != len(abs_errors):
                            pct = 100 * len(rel_errors) / len(abs_errors)
                            error_msg += f"    Number of (differing) non-zero expected results: {len(rel_errors)} / {len(abs_errors)} ({pct:.1f}%)\n"
                            error_msg += f"    Relative errors are computed for the non-zero expected results.\n"
                        else:
                            rel_errors = abs(
                                (obtained_array - expected_array) / expected_array
                            )
                        error_msg += f"    Max:     {rel_errors.max()}\n"
                        error_msg += f"    Mean:    {rel_errors.mean()}\n"
                        error_msg += f"    Median:  {np.median(rel_errors)}\n"

                # Details results
                error_msg += "  Individual errors:\n"
                if len(diff_ids) > self.THRESHOLD:
                    error_msg += (
                        f"    Only showing first {self.THRESHOLD} mismatches.\n"
                    )
                    diff_ids = diff_ids[: self.THRESHOLD]
                    obtained_array = obtained_array[: self.THRESHOLD]
                    expected_array = expected_array[: self.THRESHOLD]
                error_msg += self.ROWFORMAT.format(
                    "Index",
                    "Obtained",
                    "Expected",
                    "Difference",
                )
                for diff_id, obtained, expected in zip(
                    diff_ids, obtained_array, expected_array
                ):
                    diff_id_str = ", ".join(str(i) for i in diff_id)
                    if len(diff_id) != 1:
                        diff_id_str = f"({diff_id_str})"
                    error_msg += self.ROWFORMAT.format(
                        diff_id_str,
                        str(obtained),
                        str(expected),
                        str(obtained - expected)
                        if isinstance(obtained, np.number)
                        else "",
                    )
                error_msg += "\n"

            raise AssertionError(error_msg)
Esempio n. 13
0
    def check(
        self,
        data_dict,
        basename=None,
        fullpath=None,
        tolerances=None,
        default_tolerance=None,
    ):
        """
        Checks a dictionary of NumPy ndarrays, containing only numeric data, against a previously recorded version, or generate a new file.

        Example::

            def test_some_data(ndarrays_regression):
                points, values = some_function()
                ndarrays_regression.check(
                    {
                        'points': points,  # array with shape (100, 3)
                        'values': values,  # array with shape (100,)
                    },
                    default_tolerance=dict(atol=1e-8, rtol=1e-8)
                )

        :param Dict[str, numpy.ndarray] data_dict: dictionary of NumPy ndarrays containing
            data for regression check. The arrays can have any shape.

        :param str basename: basename of the file to test/record. If not given the name
            of the test is used.

        :param str fullpath: complete path to use as a reference file. This option
            will ignore embed_data completely, being useful if a reference file is located
            in the session data dir for example.

        :param dict tolerances: dict mapping keys from the data_dict to tolerance settings
            for the given data. Example::

                tolerances={'U': Tolerance(atol=1e-2)}

        :param dict default_tolerance: dict mapping the default tolerance for the current
            check call. Example::

                default_tolerance=dict(atol=1e-7, rtol=1e-18).

            If not provided, will use defaults from numpy's ``isclose`` function.

        ``basename`` and ``fullpath`` are exclusive.
        """
        try:
            import numpy as np
        except ModuleNotFoundError:
            raise ModuleNotFoundError(import_error_message("NumPy"))

        import functools

        __tracebackhide__ = True

        if not isinstance(data_dict, dict):
            raise TypeError(
                "Only dictionaries with NumPy arrays or array-like objects are "
                "supported on ndarray_regression fixture.\n"
                "Object with type '{}' was given.".format(str(type(data_dict)))
            )
        for key, array in data_dict.items():
            assert isinstance(key, str), (
                "The dictionary keys must be strings. "
                "Found key with type '{}'".format(str(type(key)))
            )
            data_dict[key] = np.asarray(array)

        for key, array in data_dict.items():
            # Accepted:
            #  - b: boolean
            #  - i: signed integer
            #  - u: unsigned integer
            #  - f: floating-point number
            #  - c: complex floating-point number
            #  - U: unicode string
            # Rejected:
            #  - m: timedelta
            #  - M: datetime
            #  - O: objects
            #  - S: zero-terminated bytes
            #  - V: void (raw data, structured arrays)
            if array.dtype.kind not in ["b", "i", "u", "f", "c", "U"]:
                raise TypeError(
                    "Only numeric or unicode data is supported on ndarrays_regression "
                    f"fixture.\nArray '{key}' with type '{array.dtype}' was given."
                )

        if tolerances is None:
            tolerances = {}
        self._tolerances_dict = tolerances

        if default_tolerance is None:
            default_tolerance = {}
        self._default_tolerance = default_tolerance

        dump_fn = functools.partial(self._dump_fn, data_dict)

        perform_regression_check(
            datadir=self.datadir,
            original_datadir=self.original_datadir,
            request=self.request,
            check_fn=self._check_fn,
            dump_fn=dump_fn,
            extension=".npz",
            basename=basename,
            fullpath=fullpath,
            force_regen=self._force_regen,
            with_test_class_names=self._with_test_class_names,
        )