예제 #1
0
    def check_performance(self):
        """The performance checking phase of the regression test pipeline.

        :raises reframe.core.exceptions.SanityError: If the performance check
            fails.
        """
        if self.perf_patterns is None:
            return

        with os_ext.change_dir(self._stagedir):
            # We first evaluate and log all performance values and then we
            # check them against the reference. This way we always log them
            # even if the don't meet the reference.
            for tag, expr in self.perf_patterns.items():
                value = evaluate(expr)
                key = '%s:%s' % (self._current_partition.fullname, tag)
                if key not in self.reference:
                    raise SanityError(
                        "tag `%s' not resolved in references for `%s'" %
                        (tag, self._current_partition.fullname))

                self._perfvalues[key] = (value, *self.reference[key])
                self._perf_logger.log_performance(logging.INFO, tag, value,
                                                  *self.reference[key])

            for val, *reference in self._perfvalues.values():
                ref, low_thres, high_thres, *_ = reference
                try:
                    evaluate(assert_reference(val, ref, low_thres, high_thres))
                except SanityError as e:
                    raise PerformanceError(e)
예제 #2
0
 def check_performance(self):
     raise PerformanceError('performance failure')
예제 #3
0
    def check_performance(self):
        """The performance checking phase of the regression test pipeline.

        :raises reframe.core.exceptions.SanityError: If the performance check
            fails.
        """
        if self.perf_patterns is None:
            return

        with os_ext.change_dir(self._stagedir):
            # Check if default reference perf values are provided and
            # store all the variables  tested in the performance check
            has_default = False
            variables = set()
            for key, ref in self.reference.items():
                keyparts = key.split(self.reference.scope_separator)
                system = keyparts[0]
                varname = keyparts[-1]
                try:
                    unit = ref[3]
                except IndexError:
                    unit = None

                variables.add((varname, unit))
                if system == '*':
                    has_default = True
                    break

            if not has_default:
                if not variables:
                    # If empty, it means that self.reference was empty, so try
                    # to infer their name from perf_patterns
                    variables = {(name, None)
                                 for name in self.perf_patterns.keys()}

                for var in variables:
                    name, unit = var
                    ref_tuple = (0, None, None)
                    if unit:
                        ref_tuple += (unit, )

                    self.reference.update({'*': {name: ref_tuple}})

            # We first evaluate and log all performance values and then we
            # check them against the reference. This way we always log them
            # even if the don't meet the reference.
            for tag, expr in self.perf_patterns.items():
                value = evaluate(expr)
                key = '%s:%s' % (self._current_partition.fullname, tag)
                if key not in self.reference:
                    raise SanityError(
                        "tag `%s' not resolved in references for `%s'" %
                        (tag, self._current_partition.fullname))

                self._perfvalues[key] = (value, *self.reference[key])
                self._perf_logger.log_performance(logging.INFO, tag, value,
                                                  *self.reference[key])

            for key, values in self._perfvalues.items():
                val, ref, low_thres, high_thres, *_ = values
                tag = key.split(':')[-1]
                try:
                    evaluate(
                        assert_reference(
                            val,
                            ref,
                            low_thres,
                            high_thres,
                            msg=('failed to meet reference: %s={0}, '
                                 'expected {1} (l={2}, u={3})' % tag)))
                except SanityError as e:
                    raise PerformanceError(e)