def test_unknown_parser(self): """ Tests that the parser factory raises a NoParserError when an erroneous parser is requested. """ filename = os.path.join(os.getcwd(), 'this_is_a_fake_parser.txt') with open(filename, 'w') as f: f.write('this_is_a_fake_parser') factory = ParserFactory() with self.assertRaises(exceptions.NoParserError): _ = factory.create_parser(filename) os.remove(filename)
def test_factory(self, file_format, test_file): """ Tests that the factory selects the correct parser :param file_format: The name of the file format :type file_format: string :param test_file: The path to the test file :type test_file: string """ parser = ParserFactory.create_parser(test_file) assert (parser.__name__.lower().startswith(file_format.lower())), \ 'Factory failed to get associated parser for {}'.format(test_file)
def test_jacobian_evaluation(self, file_format, evaluations_file): """ Tests that the Jacobian evaluation is consistent with what would be expected by comparing to some precomputed values with fixed params and x values. :param file_format: The name of the file format :type file_format: string :param evaluations_file: Path to a json file containing tests and results in the following format: {"test_file1": [[x1, params1, results1], [x2, params2, results2], ...], "test_file2": ...} :type evaluations_file: string """ # Note that this test is optional so will only run if the file_format # is added to the JACOBIAN_ENABLED_PARSERS list. if file_format in JACOBIAN_ENABLED_PARSERS: message = 'No function evaluations provided to test ' \ 'against for {}'.format(file_format) assert (evaluations_file is not None), message with open(evaluations_file, 'r') as ef: results = load(ef) format_dir = os.path.dirname(evaluations_file) for f, tests in results.items(): f = os.path.join(format_dir, f) parser = ParserFactory.create_parser(f) with parser(f, OPTIONS) as p: fitting_problem = p.parse() for r in tests: x = np.array(r[0]) actual = fitting_problem.jacobian(x, r[1]) assert np.isclose(actual, r[2]).all()
def test_function_evaluation(self, file_format, evaluations_file): """ Tests that the function evaluation is consistent with what would be expected by comparing to some precomputed values with fixed params and x values. :param file_format: The name of the file format :type file_format: string :param evaluations_file: Path to a json file containing tests and results in the following format: {"test_file1": [[x1, params1, results1], [x2, params2, results2], ...], "test_file2": ...} :type evaluations_file: string """ assert (evaluations_file is not None), \ 'No function evaluations provided to test against for {}'.format( file_format) with open(evaluations_file, 'r') as ef: results = load(ef) format_dir = os.path.dirname(evaluations_file) for f, tests in results.items(): f = os.path.join(format_dir, f) parser = ParserFactory.create_parser(f) with parser(f) as p: fitting_problem = p.parse() for r in tests: for i in range(len(fitting_problem.functions)): actual = fitting_problem.eval_f(x=np.array(r[0]), params=r[1], function_id=i) assert np.isclose(actual, r[2]).all()