def setUp(self): self.opts = Options() self.opts.use_errors = True self.prob = FittingProblem(self.opts) self.prob.data_x = np.array([1, 2, 4, 3, 5]) self.prob.sorted_index = np.array([0, 1, 3, 2, 4]) self.prob.data_y = np.array([4, 3, 5, 2, 1]) self.prob.data_e = np.array([0.5, 0.2, 0.3, 0.1, 0.4]) self.prob.starting_values = [{'x': 1, 'y': 2}] self.prob.name = 'full name' self.prob.eval_model = lambda y, x: y[0] * x + y[1] cost_func = NLLSCostFunc(self.prob) jac = Scipy(cost_func) jac.method = "2-point" self.fr = FittingResult(options=self.opts, cost_func=cost_func, jac=jac, chi_sq=1.0, initial_params=[1.8], params=[1.2], runtime=2.0, minimizer='fit', error_flag=1) self.opts = Options() self.opts.use_errors = True self.dir = TemporaryDirectory() self.plot = plots.Plot(best_result=self.fr, options=self.opts, figures_dir=self.dir.name)
def generate_mock_results(): """ Generates results to test against :return: best results calculated using the chi_sq value, list of results and the options :rtype: tuple(list of best results, list of list fitting results, Options object) """ software = 'scipy_ls' options = Options() options.software = [software] num_min = len(options.minimizers[options.software[0]]) data_x = np.array([[1, 4, 5], [2, 1, 5]]) data_y = np.array([[1, 2, 1], [2, 2, 2]]) data_e = np.array([[1, 1, 1], [1, 2, 1]]) func = [fitting_function_1, fitting_function_2] problems = [FittingProblem(options), FittingProblem(options)] params_in = [[[.3, .11], [.04, 2], [3, 1], [5, 0]], [[4, 2], [3, .006], [.3, 10], [9, 0]]] starting_values = [{"a": .3, "b": .11}, {"a": 0, "b": 0}] error_in = [[1, 0, 2, 0], [0, 1, 3, 1]] link_in = [['link1', 'link2', 'link3', 'link4'], ['link5', 'link6', 'link7', 'link8']] min_chi_sq = [1, 1] acc_in = [[1, 5, 2, 1.54], [7, 3, 5, 1]] min_runtime = [4.2e-5, 5.0e-14] runtime_in = [[1e-2, 2.2e-3, 4.2e-5, 9.8e-1], [3.0e-10, 5.0e-14, 1e-7, 4.3e-12]] results_out = [] for i, p in enumerate(problems): p.data_x = data_x[i] p.data_y = data_y[i] p.data_e = data_e[i] p.function = func[i] p.name = "prob_{}".format(i) results = [] for j in range(num_min): p.starting_values = starting_values cost_func = NLLSCostFunc(p) jac = Scipy(cost_func) jac.method = '2-point' r = FittingResult(options=options, cost_func=cost_func, jac=jac, initial_params=starting_values, params=params_in[i][j]) r.chi_sq = acc_in[i][j] r.runtime = runtime_in[i][j] r.error_flag = error_in[i][j] r.support_page_link = link_in[i][j] r.minimizer = options.minimizers[software][j] results.append(r) results_out.append(results) return results_out, options, min_chi_sq, min_runtime
def setUp(self): """ Setting up tests """ options = Options() options.cost_func_type = "nlls" self.fitting_problem = FittingProblem(options) self.cost_func = NLLSCostFunc(self.fitting_problem) self.jacobian = Jacobian(self.cost_func)
def test_create_config(self): """ Test that created config object contains all valid sections. """ options = Options(file_name=self.options_file) config = options._create_config() for section in options.VALID_SECTIONS: self.assertTrue(config.has_section(section))
def test_write(self): """ Test that the options writer works. """ options = Options(file_name=self.options_file) new_file_name = 'copy_of_{}'.format(self.options_file) options.write(new_file_name) new_options = Options(new_file_name) os.remove(new_file_name) self.assertDictEqual(options.__dict__, new_options.__dict__)
def setUp(self): """ Setting up tests """ options = Options() options.cost_func_type = "nlls" self.fitting_problem = FittingProblem(options) self.fitting_problem.function = f self.fitting_problem.jacobian = J self.fitting_problem.data_x = np.array([1, 2, 3, 4, 5]) self.fitting_problem.data_y = np.array([1, 2, 4, 8, 16]) self.cost_func = NLLSCostFunc(self.fitting_problem) self.params = [6, 0.1] self.actual = J(x=self.fitting_problem.data_x, p=self.params)
def make_cost_function(file_name='cubic.dat', minimizers=None): """ Helper function that returns a simple fitting problem """ options = Options() if minimizers: options.minimizers = minimizers bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems)) fname = os.path.join(bench_prob_dir, file_name) fitting_problem = parse_problem_file(fname, options) fitting_problem.correct_data() cost_func = NLLSCostFunc(fitting_problem) return cost_func
def setUp(self): """ Setting up FitBenchmarking results object """ self.options = Options() mock_problems_dir = os.path.dirname(inspect.getfile(mock_problems)) problem_dir = os.path.join(mock_problems_dir, "cubic.dat") self.problem = parse_problem_file(problem_dir, self.options) self.problem.correct_data() self.chi_sq = 10 self.minimizer = "test_minimizer" self.runtime = 0.01 self.params = np.array([1, 3, 4, 4]) self.initial_params = np.array([0, 0, 0, 0]) self.cost_func = NLLSCostFunc(self.problem) self.jac = Scipy(self.cost_func) self.jac.method = "2-point" self.result = FittingResult(options=self.options, cost_func=self.cost_func, jac=self.jac, chi_sq=self.chi_sq, runtime=self.runtime, minimizer=self.minimizer, initial_params=self.initial_params, params=self.params, error_flag=0) self.min_chi_sq = 0.1 self.result.min_chi_sq = self.min_chi_sq self.min_runtime = 1 self.result.min_runtime = self.min_runtime
def setUp(self): """ Setting up paths and results folders """ self.options = Options() test_path = os.path.dirname(os.path.realpath(__file__)) self.dirname = os.path.join(test_path, 'fitbenchmarking_results') os.mkdir(self.dirname)
def setUp(self): """ Initializes options class with defaults """ self.options = Options() software = ['bumps', 'dfo', 'gsl', 'mantid', 'minuit', 'ralfit', 'scipy', 'scipy_ls'] self.options.software = software
def setUp(self): """ Setting up nonlinear least squares cost function tests """ self.options = Options() fitting_problem = FittingProblem(self.options) self.cost_function = NLLSCostFunc(fitting_problem) fitting_problem.function = lambda x, p1: x + p1 self.x_val = np.array([1, 8, 11]) self.y_val = np.array([6, 10, 20])
def test_get_js(self): options = Options() print(options.results_dir) test_dir = os.path.join(options.results_dir, "foo") expected_js_dir = os.path.join("..", "js") expected_mathjax_js = os.path.join(expected_js_dir, "tex-mml-chtml.js") js = get_js(options, test_dir) self.assertEqual(js['mathjax'], expected_mathjax_js)
def shared_invalid(self, options_set, software): """ Shared test to check that the minimizer option set is invalid :param options_set: option set to be tested :type options_set: list :param software: software to be tests :type software: str """ opts_file = self.generate_user_ini_file(options_set, software) with self.assertRaises(exceptions.OptionsError): Options(opts_file)
def shared_invalid(self, opt_name, config_str): """ Shared test to check that the plotting option set is invalid :param opt_name: name of option to be set :type opt_name: str :param config_str: option set to be tested :type config_str: list """ opts_file = self.generate_user_ini_file(opt_name, config_str) with self.assertRaises(exceptions.OptionsError): Options(opts_file)
def setUp(self): """ Setting up problem for tests """ self.cost_func = make_cost_function() self.problem = self.cost_func.problem self.options = Options() self.options.software = ["scipy"] self.scipy_len = len(self.options.minimizers["scipy"]) bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems)) self.default_parsers_dir = os.path.join(bench_prob_dir, "default_parsers") self.all_minimzers = copy.copy(self.options.minimizers)
def test_user_section_valid(self): """ Tests that the user defined sections are correct. """ config_str = """ [MINIMIZERS] [FITTING] """ opts_file = 'test_options_tests_valid.ini' with open(opts_file, 'w') as f: f.write(config_str) Options(opts_file) os.remove(opts_file)
def shared_valid(self, opt_name, options_set, config_str): """ Shared test to check that the plotting option set is valid :param opt_name: name of option to be set :type opt_name: str :param options_set: option set to be tested :type options_set: list :param config_str: section of an ini file which sets the option :type config_str: str """ opts_file = self.generate_user_ini_file(opt_name, config_str) options = Options(opts_file) actual = getattr(options, opt_name) self.assertEqual(options_set, actual)
def shared_valid(self, options_set, software): """ Shared test to check that the minimizer option set is valid :param options_set: option set to be tested :type options_set: list :param software: software to be tests :type software: str """ opts_file = self.generate_user_ini_file(options_set, software) options = Options(opts_file) if software not in options.software: options.software.append(software) actual = options.minimizers[software] self.assertEqual(options_set, actual)
def test_user_section_invalid(self): """ Tests that the user defined sections are incorrect. """ config_str = """ [MINIMIZERS SECTION] [FITTING] """ opts_file = 'test_options_tests_valid.ini' with open(opts_file, 'w') as f: f.write(config_str) with self.assertRaises(exceptions.OptionsError): Options(opts_file) os.remove(opts_file)
def generate_mock_results(self): """ Generates results to test against :return: A list of results objects along with expected values for normallised accuracy and runtimes :rtype: tuple(list of FittingResults, list of list of float, list of list of float) """ self.num_problems = 4 self.num_minimizers = 2 results = [] options = Options() problem = FittingProblem(options) problem.starting_values = [{'a': 1, 'b': 2, 'c': 3}] acc_in = [[1, 5], [7, 3], [10, 8], [2, 3]] runtime_in = [[float('Inf'), 2.2e-3], [3.0e-10, 5.0e-14], [6.9e-7, 4.3e-5], [1.6e-13, 1.8e-13]] acc_expected = [] runtime_expected = [] for i in range(self.num_problems): acc_results = acc_in[i][:] acc_expected.append(list(acc_results) / np.min(acc_results)) runtime_results = runtime_in[i][:] runtime_expected.append( list(runtime_results) / np.min(runtime_results)) prob_results = [] cost_func = NLLSCostFunc(problem) jac = Scipy(cost_func) jac.method = "2-point" for j in range(self.num_minimizers): minimizer = 'min_{}'.format(j) prob_results.append( FittingResult(options=options, cost_func=cost_func, jac=jac, initial_params=[1, 2, 3], params=[1, 2, 3], chi_sq=acc_results[j], runtime=runtime_results[j], minimizer=minimizer)) results.append(prob_results) return results, acc_expected, runtime_expected
def test_get_css(self): options = Options() print(options.results_dir) test_dir = os.path.join(options.results_dir, "foo") expected_css_dir = os.path.join("..", "css") expected_main_css = os.path.join(expected_css_dir, "main_style.css") expected_table_css = os.path.join(expected_css_dir, "table_style.css") expected_custom_css = os.path.join(expected_css_dir, "custom_style.css") css = get_css(options, test_dir) self.assertEqual(css['main'], expected_main_css) self.assertEqual(css['table'], expected_table_css) self.assertEqual(css['custom'], expected_custom_css)
def test_options_reset(self): """ Tests options reset """ options = Options() options_save = copy.copy(options) options.minimizers = {} options.software = ['updated_software1', 'updated_software2'] options.reset() self.assertDictEqual(options.__dict__, options_save.__dict__)
def test_from_file(self): options = Options(file_name=self.options_file) for key in self.options['MINIMIZERS']: self.assertEqual(self.options['MINIMIZERS'][key], options.minimizers[key]) fitting_opts = self.options['FITTING'] self.assertEqual(fitting_opts['software'], options.software) plotting_opts = self.options['PLOTTING'] self.assertEqual(plotting_opts['colour_scale'], options.colour_scale) self.assertEqual(plotting_opts['comparison_mode'], options.comparison_mode) self.assertEqual(plotting_opts['table_type'], options.table_type) # Use ends with as options creates an abs path rather than rel. self.assertTrue( options.results_dir.endswith(plotting_opts['results_dir']))
def test_imports(self): """ Test that the factory returns the correct class for inputs """ self.options = Options() valid = ['scipy', 'analytic'] invalid = ['numpy', 'random_jac'] for jac_method in valid: jac = create_jacobian(jac_method) self.assertTrue(jac.__name__.lower().startswith(jac_method)) for jac_method in invalid: self.assertRaises(exceptions.NoJacobianError, create_jacobian, jac_method)
def test_imports(self): """ Test that the factory returns the correct class for inputs """ self.options = Options() valid = ['weighted_nlls', 'nlls', 'hellinger_nlls'] invalid = ['normal'] for cost_func_type in valid: cost_func = create_cost_func(cost_func_type) self.assertTrue(cost_func.__name__.lower().startswith( cost_func_type.replace("_", ""))) for cost_func_type in invalid: self.assertRaises(exceptions.CostFuncError, create_cost_func, cost_func_type)
def test_write(self): """ Test that the options writer works. """ options = Options(file_name=self.options_file) new_file_name = 'copy_of_{}'.format(self.options_file) options.write(new_file_name) new_options = Options(new_file_name) os.remove(new_file_name) assert options.stored_file_name == self.options_file assert new_options.stored_file_name == new_file_name # Overwrite file names options.stored_file_name = "" new_options.stored_file_name = "" self.assertDictEqual(options.__dict__, new_options.__dict__)
def setUp(self): """ Setting up paths and results folders """ self.options = Options() test_path = os.path.dirname(os.path.realpath(__file__)) self.group_dir = os.path.join(test_path, 'fitbenchmarking_results') os.mkdir(self.group_dir) self.table_names = { "compare": "compare_table_name.", "runtime": "runtime_table_name." } self.table_descriptions = { "compare": "compare table descriptions", "runtime": "runtime table descriptions", "both": "both table descriptions" } self.group_name = "random_name" self.cost_func_description = "rst_desciption"
def test_write_to_stream(self): """ Test that the stream options writer works. """ options = Options(file_name=self.options_file) new_file_name = 'copy_of_{}'.format(self.options_file) # open stream, write to it and close it f = open(new_file_name, 'w') options.write_to_stream(f) f.close() new_options = Options(new_file_name) assert options.stored_file_name == self.options_file assert new_options.stored_file_name == new_file_name # Overwrite file names options.stored_file_name = "" new_options.stored_file_name = "" os.remove(new_file_name) self.assertDictEqual(options.__dict__, new_options.__dict__)
def setUpClass(cls): """ Create an options file, run it, and get the results. """ # Get defaults which should have minimizers for every software opts = Options() # Use only the first minimizer for each software opts.minimizers = {k: [v[0]] for k, v in opts.minimizers.items()} # Get a list of all softwares # (sorted to ensure it is the same order as expected) opts.software = sorted(opts.minimizers.keys()) opts.results_dir = os.path.join(os.path.dirname(__file__), 'results') opt_file = tempfile.NamedTemporaryFile(suffix='.ini') opts.write(opt_file.name) problem = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir, 'mock_problems', 'all_parsers_set')) run([problem], options_file=opt_file.name)
def setUp(self): """ Setting up problem for tests """ self.cost_func = make_cost_function() self.problem = self.cost_func.problem self.options = Options() self.options.software = ["scipy"] self.scipy_len = len(self.options.minimizers["scipy"]) bench_prob_dir = os.path.dirname(inspect.getfile(mock_problems)) self.default_parsers_dir = os.path.join(bench_prob_dir, "default_parsers") self.count = 0 self.result_args = {'options': self.options, 'cost_func': self.cost_func, 'jac': 'jac', 'initial_params': self.problem.starting_values[0], 'params': [], 'chi_sq': 1} self.list_results = [fitbm_result.FittingResult(**self.result_args) for i in range(self.scipy_len)] self.individual_problem_results = [ self.list_results, self.list_results]