def test_parse_problem_file(self):
     """
     Tests the parse_problem_file method
     """
     filename = os.path.join(os.path.dirname(__file__), 'nist', 'basic.dat')
     fitting_problem = parse_problem_file(filename, OPTIONS)
     self.assertEqual(fitting_problem.name, 'basic')
示例#2
0
    def setUp(self):
        """
        Setting up FitBenchmarking results object
        """
        self.options = Options()
        mock_problems_dir = os.path.dirname(inspect.getfile(mock_problems))
        problem_dir = os.path.join(mock_problems_dir, "cubic.dat")
        self.problem = parse_problem_file(problem_dir, self.options)
        self.problem.correct_data()

        self.chi_sq = 10
        self.minimizer = "test_minimizer"
        self.runtime = 0.01
        self.params = np.array([1, 3, 4, 4])
        self.initial_params = np.array([0, 0, 0, 0])
        self.cost_func = NLLSCostFunc(self.problem)
        self.jac = Scipy(self.cost_func)
        self.jac.method = "2-point"
        self.result = FittingResult(options=self.options,
                                    cost_func=self.cost_func,
                                    jac=self.jac,
                                    chi_sq=self.chi_sq,
                                    runtime=self.runtime,
                                    minimizer=self.minimizer,
                                    initial_params=self.initial_params,
                                    params=self.params,
                                    error_flag=0)

        self.min_chi_sq = 0.1
        self.result.min_chi_sq = self.min_chi_sq
        self.min_runtime = 1
        self.result.min_runtime = self.min_runtime
def make_fitting_problem():
    """
    Helper function that returns a simple fitting problem
    """

    bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems))
    fname = os.path.join(bench_prob_dir, 'cubic.dat')

    fitting_problem = parse_problem_file(fname)
    fitting_problem.correct_data(True)
    return fitting_problem
示例#4
0
def make_fitting_problem():
    """
    Helper function that returns a simple fitting problem
    """

    bench_prob_dir = os.path.dirname(inspect.getfile(benchmark_problems))
    fname = os.path.join(bench_prob_dir, 'simple_tests',
                         'cubic.dat')

    fitting_problem = parse_problem_file(fname)
    return fitting_problem
def make_cost_func(file_name='cubic.dat'):
    """
    Helper function that returns a simple fitting problem
    """
    options = Options()

    bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems))
    fname = os.path.join(bench_prob_dir, file_name)

    fitting_problem = parse_problem_file(fname, options)
    fitting_problem.correct_data()
    cost_func = WeightedNLLSCostFunc(fitting_problem)
    return cost_func
示例#6
0
def fitbenchmark_group(group_name, options, data_dir):
    """
    Gather the user input and list of paths. Call benchmarking on these.

    :param group_name: is the name (label) for a group. E.g. the name for
                       the group of problems in "NIST/low_difficulty" may be
                       picked to be NIST_low_difficulty
    :type group_name: str
    :param options: dictionary containing software used in fitting
                    the problem, list of minimizers and location of
                    json file contain minimizers
    :type options: fitbenchmarking.utils.options.Options
    :param data_dir: full path of a directory that holds a group of problem
                     definition files
    :type date_dir: str

    :return: prob_results array of fitting results for
             the problem group and the location of the results
    :rtype: tuple(list, str)
    """
    grabbed_output = output_grabber.OutputGrabber()

    # Extract problem definitions
    problem_group = misc.get_problem_files(data_dir)

    results = []
    template_prob_name = " Running data from: {}"
    for i, p in enumerate(problem_group):
        with grabbed_output:
            parsed_problem = parse_problem_file(p)
            parsed_problem.correct_data(options.use_errors)

        decorator = '#' * (len(template_prob_name) + len(parsed_problem.name) +
                           4)
        tmp_prob_name = template_prob_name.format(parsed_problem.name)
        print("\n{0}\n{1} {2}/{3}\n{0}\n".format(decorator, tmp_prob_name,
                                                 i + 1, len(problem_group)))

        problem_results = fitbm_one_prob(problem=parsed_problem,
                                         options=options)

        # Convert from list of dict to list of list and store
        for r in problem_results:
            tmp_result = []
            for s in options.software:
                tmp_result.extend(r[s])
            results.append(tmp_result)

    return results
示例#7
0
def fitbenchmark_group(group_name, options, data_dir):
    """
    Gather the user input and list of paths. Call benchmarking on these.

    :param group_name: is the name (label) for a group. E.g. the name for
                       the group of problems in "NIST/low_difficulty" may be
                       picked to be NIST_low_difficulty
    :type group_name: str
    :param options: dictionary containing software used in fitting
                    the problem, list of minimizers and location of
                    json file contain minimizers
    :type options: fitbenchmarking.utils.options.Options
    :param data_dir: full path of a directory that holds a group of problem
                     definition files
    :type date_dir: str

    :returns: tuple(prob_results, results_dir) array of fitting results for
              the problem group and the path to the results directory
    :rtype: (list of FittingResult, str)
    """

    # Create results directory
    results_dir = create_dirs.results(options.results_dir)
    group_results_dir = create_dirs.group_results(results_dir, group_name)

    # Extract problem definitions
    problem_group = misc.get_problem_files(data_dir)

    results = []
    for p in problem_group:
        parsed_problem = parse_problem_file(p)
        problem_results = fitbm_one_prob(problem=parsed_problem,
                                         options=options,
                                         directory=group_results_dir)

        # Convert from list of dict to list of list and store
        for r in problem_results:
            tmp_result = []
            for s in options.software:
                tmp_result.extend(r[s])
            results.append(tmp_result)

    return results
def loop_over_benchmark_problems(problem_group, options):
    """
    Loops over benchmark problems

    :param problem_group: locations of the benchmark problem files
    :type problem_group: list
    :param options: FitBenchmarking options for current run
    :type options: fitbenchmarking.utils.options.Options

    :return: prob_results array of fitting results for the problem group,
             list of failed problems and dictionary of unselected minimizers,
             rst description of the cost function from the docstring
    :rtype: tuple(list, list, dict, str)
    """
    grabbed_output = output_grabber.OutputGrabber(options)
    results = []
    failed_problems = []
    for i, p in enumerate(problem_group):
        problem_passed = True
        info_str = " Running data from: {} {}/{}".format(
            os.path.basename(p), i + 1, len(problem_group))
        LOGGER.info('\n' + '#' * (len(info_str) + 1))
        LOGGER.info(info_str)
        LOGGER.info('#' * (len(info_str) + 1))
        try:
            with grabbed_output:
                parsed_problem = parse_problem_file(p, options)
                parsed_problem.correct_data()
        except Exception as e:
            info_str = " Could not run data from: {} {}/{}".format(
                p, i + 1, len(problem_group))
            LOGGER.warn(e)
            problem_passed = False

        if problem_passed:
            ##############################
            # Loops over starting values #
            ##############################
            cost_func_cls = create_cost_func(options.cost_func_type)
            cost_func = cost_func_cls(parsed_problem)
            cost_func_description = cost_func.__doc__
            problem_results, problem_fails, \
                unselected_minimzers, minimizer_dict = \
                loop_over_starting_values(
                    cost_func, options, grabbed_output)
            results.extend(problem_results)
            failed_problems.extend(problem_fails)

    # If the results are an empty list then this means that all minimizers
    # raise an exception and the tables will produce errors if they run.
    if results == []:
        message = "The user chosen options and/or problem setup resulted in " \
                  "all minimizers and/or parsers raising an exception. " \
                  "This is likely due to the way `algorithm_type` was set " \
                  "in the options or the selected problem set requires " \
                  "additional software to be installed. Please review your " \
                  "options setup and/or problem set then re-run " \
                  "FitBenmarking."
        raise NoResultsError(message)

    return results, failed_problems, unselected_minimzers, \
        minimizer_dict, cost_func_description