Exemple #1
0
    def _get_results(self, subspace, instruction_index=None):
        util.get_logger().info('Reading results from \'%s\'...' %
                               repr(util.subspace_to_tuple(subspace)))

        results = []
        next_key = subspace.range().start
        while True:
            next_results = self.db.get_range(next_key,
                                             subspace.range().stop, 1000)
            if len(next_results) == 0:
                break

            results += [
                Result(subspace, kv.key, (kv.value, )) for kv in next_results
            ]
            next_key = fdb.KeySelector.first_greater_than(next_results[-1].key)

        return results
Exemple #2
0
    def check_for_errors(self):
        if len(self.tester_results) == 1:
            return (0, False)

        util.get_logger().info(
            'Comparing results from \'%s\'...' %
            repr(util.subspace_to_tuple(self.specification.subspace)))

        num_errors = 0
        has_filtered_error = False

        # Tracks the current result being evaluated for each tester
        indices = [0 for i in range(len(self.tester_results))]

        name_length = max([len(name) for name in self.tester_results.keys()])

        while True:
            # Gets the next result for each tester
            results = {
                i: r[indices[i]]
                for i, r in enumerate(self.tester_results.values())
                if len(r) > indices[i]
            }
            if len(results) == 0:
                break

            # Attempt to 'align' the results. If two results have matching sequence numbers, then they should be compared.
            # Only those testers which have a result matching the minimum current sequence number will be included. All
            # others are considered to have not produced a result and will be evaluated in a future iteration.
            sequence_nums = [
                r.sequence_num(self.specification) for r in results.values()
            ]
            if any([s is not None for s in sequence_nums]):
                results = {
                    i: r
                    for i, r in results.items()
                    if r.sequence_num(self.specification) == min(sequence_nums)
                }

            # If these results aren't using sequence numbers, then we match two results based on whether they share the same key
            else:
                min_key = reduce(
                    ResultSet._min_tuple,
                    [r.key(self.specification) for r in results.values()])
                results = {
                    i: r
                    for i, r in results.items()
                    if Result.tuples_match(r.key(self.specification), min_key)
                }

            # Increment the indices for those testers which produced a result in this iteration
            for i in results.keys():
                indices[i] += 1

            # Fill in 'None' values for testers that didn't produce a result and generate an output string describing the results
            all_results = {
                i: results[i] if i in results else None
                for i in range(len(self.tester_results))
            }
            result_keys = list(self.tester_results.keys())
            result_str = '\n'.join([
                '  %-*s - %s' % (name_length, result_keys[i], r)
                for i, r in all_results.items()
            ])

            result_list = list(results.values())

            # If any of our results matches the global error filter, we ignore the result
            if any(
                    r.matches_global_error_filter(self.specification)
                    for r in result_list):
                has_filtered_error = True

            # The result is considered correct if every tester produced a value and all the values meet the matching criteria
            if len(results) < len(all_results) or not all(
                    result_list[0].matches(r, self.specification)
                    for r in result_list):
                util.get_logger().error('\nIncorrect result: \n%s' %
                                        result_str)
                num_errors += 1
            else:
                util.get_logger().debug('\nCorrect result: \n%s' % result_str)

        if num_errors > 0:
            util.get_logger().error('')
        else:
            util.get_logger().debug('')

        return (num_errors, has_filtered_error)