def test_get_data(self): # without renaming data should remain as is experiment = basico.get_data_from_experiment(0, rename_headers=False) self.assertEqual([ '# Time', 'Values[F16BP_obs]', 'Values[Glu_obs]', 'Values[Pyr_obs]', '[Glc(ext)]_0', 'Unnamed: 5' ], list(experiment.columns)) self.assertEqual((101, 6), experiment.shape) # data should be renamed as expected, and unassigned columns dropped experiment = basico.get_data_from_experiment(0, rename_headers=True) self.assertEqual([ 'Time', '[Fru1,6-P2]', '[Glc(int)]', '[pyruvate]', '[Glc(ext)]_0' ], list(experiment.columns)) self.assertEqual((101, 5), experiment.shape) self.assertEqual(0, experiment.iloc[0][0]) self.assertEqual(2, experiment.iloc[100][0]) # now simulate exp, sim = basico.get_simulation_results() self.assertTrue(len(exp) == len(sim) == 2) # ensure that simulations are different self.assertGreater( float(((sim[0][['Fru1,6-P2']] - sim[1][['Fru1,6-P2']])**2).sum()), 10)
def simulate_without_noise(self): """Simulate the PEtab problem. This is an abstract method that should be implemented with a simulation package. Examples of this are referenced in the class docstring. :return: Simulated data, as a PEtab measurements table, which should be equivalent to replacing all values in the `petab.C.MEASUREMENT` column of the measurements table (of the PEtab problem supplied to the `__init__` method), with simulated values. :rtype: pandas.DataFrame """ simulation_results = basico.get_simulation_results( values_only=True, solution=self.evaluate()) return create_simulation_df(self.petab_problem.measurement_df, simulation_results)
def evaluate_model(test_model, evaluation=default_evaluation, temp_dir=None, delete_temp_files=True, sim_dfs=None, sol_dfs=None, temp_files=None): """evaluates the given test model and updates it with the calculated metrics and estimated parameters :param test_model: the model to test :type test_model: petab_select.Model :param evaluation: optional function to evaluate the test model with. defaults to func:`.default_evaluation` :type evaluation: () -> pandasDataFrame :param temp_dir: optional temp directory to store the files in (otherwise the os temp dir will be used) :type temp_dir: str or None :param delete_temp_files: boolean indicating whether temp files should be deleted :type delete_temp_files: bool :param sim_dfs: optional array, in which simulation data frames will be returned :type sim_dfs: [] or None :param sol_dfs: optional array in which found parameters will be returned :type sol_dfs: [] or None :param temp_files: optional array that returns filenames of temp files created during the run :type temp_files: [] or None :return: COPASI objective value of the evaluation :rtype: float """ # create petab problem pp = test_model.to_petab()['petab_problem'] created_temp_dir = False if temp_dir is None: temp_dir = tempfile.mkdtemp() created_temp_dir = True if not os.path.exists(temp_dir): os.makedirs(temp_dir, exist_ok=True) model_id = test_model.model_id files = core.write_problem_to(pp, temp_dir, model_id) # load into basico out_name = 'cps_{0}'.format(model_id) cps_file = os.path.join(temp_dir, out_name + '.cps') core.load_petab(files['problem'], temp_dir, out_name) files = list(files.values()) files.append(cps_file) files = files + basico.get_experiment_filenames() # run parameter estimation sol = evaluation() if sol_dfs: sol_dfs.append(sol) simulation_results = basico.get_simulation_results(values_only=True, solution=sol) basico.prune_simulation_results(simulation_results) sim_df = basico.petab.create_simulation_df(pp.measurement_df, simulation_results) if sim_dfs: sim_dfs.append(sim_df) # compute metrics llh = basico.petab.petab_llh(pp, sim_df) test_model.set_criterion(Criterion.LLH, llh) task = basico.get_current_model().getTask("Parameter Estimation") prob = task.getProblem() obj = prob.getSolutionValue() test_model.compute_criterion(Criterion.AIC) test_model.compute_criterion(Criterion.AICC) test_model.compute_criterion(Criterion.BIC) # update estimated parameters for param_id in test_model.parameters: value = test_model.parameters[param_id] if str(value) != 'estimate': # not isnan(value): # we only want to include what we estimated continue name = 'Values[{0}]'.format(param_id) if name in sol.index: test_model.estimated_parameters[param_id] = sol.loc[name].sol # write result for testing result_file = os.path.join(temp_dir, 'result_{0}.yaml'.format(model_id)) files.append(result_file) test_model.to_yaml(result_file) # delete temp files if needed if delete_temp_files: for file in files: os.remove(file) if created_temp_dir: # since we created the temp dir, lets get rid of it os.rmdir(temp_dir) elif temp_files is not None: # add temp files to the list of temp files: temp_files = temp_files + files return obj
def test_get_plotting_data(self): exp, sim = basico.get_simulation_results() self.assertTrue(len(exp) == len(sim)) exp, sim = basico.get_simulation_results(values_only=True) self.assertListEqual(exp[0].Time.to_list(), sim[0].reset_index().Time.to_list())
def test_current_solution(self): result = basico.get_simulation_results() self.assertTrue(len(result) == 2) self.assertTrue(len(result[0]) == len(result[0]))
def test_get_plotting_data(self): exp, sim = basico.get_simulation_results() self.assertTrue(len(exp) == len(sim))