def test_likelihoods(model_test_likelihoods): """Test the custom noise distributions used to define cost functions.""" model = model_test_likelihoods.getModel() model.setTimepoints(np.linspace(0, 60, 60)) solver = model.getSolver() solver.setSensitivityOrder(amici.SensitivityOrder.first) # run model once to create an edata rdata = amici.runAmiciSimulation(model, solver) sigmas = rdata['y'].max(axis=0) * 0.05 edata = amici.ExpData(rdata, sigmas, []) # just make all observables positive since some are logarithmic while min(edata.getObservedData()) < 0: edata = amici.ExpData(rdata, sigmas, []) # and now run for real and also compute likelihood values rdata = amici.runAmiciSimulations(model, solver, [edata])[0] # check if the values make overall sense assert np.isfinite(rdata['llh']) assert np.all(np.isfinite(rdata['sllh'])) assert np.any(rdata['sllh']) rdata_df = amici.getSimulationObservablesAsDataFrame(model, edata, rdata, by_id=True) edata_df = amici.getDataObservablesAsDataFrame(model, edata, by_id=True) # check correct likelihood value llh_exp = -sum([ normal_nllh(edata_df['o1'], rdata_df['o1'], sigmas[0]), log_normal_nllh(edata_df['o2'], rdata_df['o2'], sigmas[1]), log10_normal_nllh(edata_df['o3'], rdata_df['o3'], sigmas[2]), laplace_nllh(edata_df['o4'], rdata_df['o4'], sigmas[3]), log_laplace_nllh(edata_df['o5'], rdata_df['o5'], sigmas[4]), log10_laplace_nllh(edata_df['o6'], rdata_df['o6'], sigmas[5]), custom_nllh(edata_df['o7'], rdata_df['o7'], sigmas[6]), ]) assert np.isclose(rdata['llh'], llh_exp) # check gradient for sensi_method in [ amici.SensitivityMethod.forward, amici.SensitivityMethod.adjoint ]: solver = model.getSolver() solver.setSensitivityMethod(sensi_method) solver.setSensitivityOrder(amici.SensitivityOrder.first) solver.setRelativeTolerance(1e-12) solver.setAbsoluteTolerance(1e-12) check_derivatives(model, solver, edata, assert_fun, atol=1e-2, rtol=1e-2, epsilon=1e-5, check_least_squares=False)
def _check_parameter_mapping_ok(mapping_par_opt_to_par_sim, par_sim_ids, model, edatas): """ Check whether there are suspicious parameter mappings and/or data points. Currently checks whether nan values in the parameter mapping table correspond to nan columns in the edatas, corresponding to missing data points. """ # regular expression for noise and observable parameters pattern = "(noise|observable)Parameter[0-9]+_" rex = re.compile(pattern) # prepare output msg_data_notnan = "" msg_data_nan = "" # iterate over conditions for i_condition, (mapping_for_condition, edata_for_condition) in \ enumerate(zip(mapping_par_opt_to_par_sim, edatas)): # turn amici.ExpData into pd.DataFrame df = amici.getDataObservablesAsDataFrame(model, edata_for_condition, by_id=True) # iterate over simulation parameters indices and the mapped # optimization parameters for i_sim_id, par_sim_id in enumerate(par_sim_ids): # only continue if sim par is a noise or observable parameter if not rex.match(par_sim_id): continue # extract observable id obs_id = re.sub(pattern, "", par_sim_id) # extract mapped optimization parameter mapped_par = mapping_for_condition[i_sim_id] # check if opt par is nan, but not all corresponding data points if not isinstance(mapped_par, str) and np.isnan(mapped_par) \ and not df["observable_" + obs_id].isnull().all(): msg_data_notnan += \ f"({i_condition, par_sim_id, obs_id})\n" # check if opt par is string, but all corresponding data points # are nan if isinstance(mapped_par, str) \ and df["observable_" + obs_id].isnull().all(): msg_data_nan += f"({i_condition, par_sim_id, obs_id})\n" if not len(msg_data_notnan) + len(msg_data_nan): return logger.warn( "There are suspicious combinations of parameters and data " "points:\n" "For the following combinations of " "(condition_ix, par_sim_id, obs_id)" ", there are real-valued data points, but unmapped scaling or noise " "parameters: \n" + msg_data_notnan + "\n" "For the following combinations, scaling or noise parameters have " "been mapped, but all corresponding data points are nan: \n" + msg_data_nan + "\n")
def tests_presimulation(self): self.model.getFixedParameterNames() combos = itertools.product([(10, 5), (5, 10), ()], repeat=3) cases = dict() for icombo, combo in enumerate(combos): cases[f'{icombo}'] = { 'fixedParameters': combo[0], 'fixedParametersPreequilibration': combo[1], 'fixedParametersPresimulation': combo[2], } for case in cases: with self.subTest(**cases[case]): for fp in cases[case]: setattr(self.edata[0], fp, cases[case][fp]) df_edata = amici.getDataObservablesAsDataFrame( self.model, self.edata) edata_reconstructed = amici.getEdataFromDataFrame( self.model, df_edata) for fp in [ 'fixedParameters', 'fixedParametersPreequilibration', 'fixedParametersPresimulation' ]: if fp != 'fixedParameters' or cases[case][fp] is not (): self.assertTupleEqual( getattr(self.edata[0], fp), getattr(edata_reconstructed[0], fp), ) self.assertTupleEqual( cases[case][fp], getattr(edata_reconstructed[0], fp), ) self.assertTupleEqual( getattr(self.edata[0], fp), cases[case][fp], ) else: self.assertTupleEqual( self.model.getFixedParameters(), getattr(edata_reconstructed[0], fp), ) self.assertTupleEqual( self.model.getFixedParameters(), getattr(edata_reconstructed[0], fp), ) self.assertTupleEqual( getattr(self.edata[0], fp), cases[case][fp], )
def test_steadystate_simulation(model_steadystate_module): model = model_steadystate_module.getModel() model.setTimepoints(np.linspace(0, 60, 60)) solver = model.getSolver() solver.setSensitivityOrder(amici.SensitivityOrder.first) rdata = amici.runAmiciSimulation(model, solver) edata = [amici.ExpData(rdata, 1, 0)] rdata = amici.runAmiciSimulations(model, solver, edata) # check roundtripping of DataFrame conversion df_edata = amici.getDataObservablesAsDataFrame(model, edata) edata_reconstructed = amici.getEdataFromDataFrame(model, df_edata) assert np.isclose( amici.ExpDataView(edata[0])['observedData'], amici.ExpDataView(edata_reconstructed[0])['observedData']).all() assert np.isclose( amici.ExpDataView(edata[0])['observedDataStdDev'], amici.ExpDataView(edata_reconstructed[0])['observedDataStdDev']).all() if len(edata[0].fixedParameters): assert list(edata[0].fixedParameters) \ == list(edata_reconstructed[0].fixedParameters) else: assert list(model.getFixedParameters()) \ == list(edata_reconstructed[0].fixedParameters) assert list(edata[0].fixedParametersPreequilibration) == \ list(edata_reconstructed[0].fixedParametersPreequilibration) df_state = amici.getSimulationStatesAsDataFrame(model, edata, rdata) assert np.isclose(rdata[0]['x'], df_state[list(model.getStateIds())].values).all() df_obs = amici.getSimulationObservablesAsDataFrame(model, edata, rdata) assert np.isclose(rdata[0]['y'], df_obs[list(model.getObservableIds())].values).all() amici.getResidualsAsDataFrame(model, edata, rdata) solver.setRelativeTolerance(1e-12) solver.setAbsoluteTolerance(1e-12) check_derivatives(model, solver, edata[0], assert_fun, atol=1e-3, rtol=1e-3, epsilon=1e-4) # Run some additional tests which need a working Model, # but don't need precomputed expectations. _test_set_parameters_by_dict(model_steadystate_module)
def test_pandas_import_export(sbml_example_presimulation_module, case): """TestCase class for testing csv import using pandas""" # setup model = sbml_example_presimulation_module.getModel() model.setTimepoints(np.linspace(0, 60, 61)) solver = model.getSolver() rdata = amici.runAmiciSimulation(model, solver) edata = [amici.ExpData(rdata, 0.01, 0)] # test copy constructor _ = amici.ExpData(edata[0]) for fp in case: setattr(edata[0], fp, case[fp]) df_edata = amici.getDataObservablesAsDataFrame(model, edata) edata_reconstructed = amici.getEdataFromDataFrame(model, df_edata) for fp in [ 'fixedParameters', 'fixedParametersPreequilibration', 'fixedParametersPresimulation' ]: if fp != 'fixedParameters' or case[fp] != (): assert getattr(edata[0], fp) == getattr(edata_reconstructed[0], fp) assert case[fp] == getattr(edata_reconstructed[0], fp) else: assert model.getFixedParameters() \ == getattr(edata_reconstructed[0], fp) assert model.getFixedParameters() == \ getattr(edata_reconstructed[0], fp) assert getattr(edata[0], fp) == case[fp]
def test_steadystate_scaled(self): """ Test SBML import and simulation from AMICI python interface """ def assert_fun(x): return self.assertTrue(x) sbmlFile = os.path.join(os.path.dirname(__file__), '..', 'python', 'examples', 'example_steadystate', 'model_steadystate_scaled.xml') sbmlImporter = amici.SbmlImporter(sbmlFile) observables = amici.assignmentRules2observables( sbmlImporter.sbml, filter_function=lambda variable: variable.getId().startswith('observable_') and not variable.getId().endswith('_sigma') ) outdir = 'test_model_steadystate_scaled' sbmlImporter.sbml2amici('test_model_steadystate_scaled', outdir, observables=observables, constantParameters=['k0'], sigmas={'observable_x1withsigma': 'observable_x1withsigma_sigma'}) sys.path.insert(0, outdir) import test_model_steadystate_scaled as modelModule model = modelModule.getModel() model.setTimepoints(np.linspace(0, 60, 60)) solver = model.getSolver() solver.setSensitivityOrder(amici.SensitivityOrder_first) rdata = amici.runAmiciSimulation(model, solver) edata = [amici.ExpData(rdata, 1, 0)] rdata = amici.runAmiciSimulations(model, solver, edata) # check roundtripping of DataFrame conversion df_edata = amici.getDataObservablesAsDataFrame(model, edata) edata_reconstructed = amici.getEdataFromDataFrame(model, df_edata) self.assertTrue( np.isclose( amici.ExpDataView(edata[0]) ['observedData'], amici.ExpDataView(edata_reconstructed[0]) ['observedData'], ).all() ) self.assertTrue( np.isclose( amici.ExpDataView(edata[0]) ['observedDataStdDev'], amici.ExpDataView(edata_reconstructed[0]) ['observedDataStdDev'], ).all() ) if len(edata[0].fixedParameters): self.assertListEqual( list(edata[0].fixedParameters), list(edata_reconstructed[0].fixedParameters), ) else: self.assertListEqual( list(model.getFixedParameters()), list(edata_reconstructed[0].fixedParameters), ) self.assertListEqual( list(edata[0].fixedParametersPreequilibration), list(edata_reconstructed[0].fixedParametersPreequilibration), ) df_state = amici.getSimulationStatesAsDataFrame(model, edata, rdata) self.assertTrue( np.isclose( rdata[0]['x'], df_state[list(model.getStateIds())].values ).all() ) df_obs = amici.getSimulationObservablesAsDataFrame(model, edata, rdata) self.assertTrue( np.isclose( rdata[0]['y'], df_obs[list(model.getObservableIds())].values ).all() ) amici.getResidualsAsDataFrame(model, edata, rdata) solver.setRelativeTolerance(1e-12) solver.setAbsoluteTolerance(1e-12) check_derivatives(model, solver, edata[0], assert_fun, atol=1e-3, rtol=1e-3, epsilon=1e-4)
def test_special_likelihoods(model_special_likelihoods): """Test special likelihood functions.""" model = model_special_likelihoods.getModel() model.setTimepoints(np.linspace(0, 60, 10)) solver = model.getSolver() solver.setSensitivityOrder(amici.SensitivityOrder.first) # Test in region with positive density # run model once to create an edata rdata = amici.runAmiciSimulation(model, solver) edata = amici.ExpData(rdata, 0.001, 0) # make sure measurements are smaller for non-degenerate probability y = edata.getObservedData() y = tuple([val * np.random.uniform(0, 1) for val in y]) edata.setObservedData(y) # set sigmas sigma = 0.2 sigmas = sigma * np.ones(len(y)) edata.setObservedDataStdDev(sigmas) # and now run for real and also compute likelihood values rdata = amici.runAmiciSimulations(model, solver, [edata])[0] # check if the values make overall sense assert np.isfinite(rdata['llh']) assert np.all(np.isfinite(rdata['sllh'])) assert np.any(rdata['sllh']) rdata_df = amici.getSimulationObservablesAsDataFrame(model, edata, rdata, by_id=True) edata_df = amici.getDataObservablesAsDataFrame(model, edata, by_id=True) # check correct likelihood value llh_exp = -sum([ binomial_nllh(edata_df['o1'], rdata_df['o1'], sigma), negative_binomial_nllh(edata_df['o2'], rdata_df['o2'], sigma), ]) assert np.isclose(rdata['llh'], llh_exp) # check gradient for sensi_method in [ amici.SensitivityMethod.forward, amici.SensitivityMethod.adjoint ]: solver = model.getSolver() solver.setSensitivityMethod(sensi_method) solver.setSensitivityOrder(amici.SensitivityOrder.first) check_derivatives(model, solver, edata, assert_fun, atol=1e-1, rtol=1e-1, check_least_squares=False) # Test for m > y, i.e. in region with 0 density rdata = amici.runAmiciSimulation(model, solver) edata = amici.ExpData(rdata, 0.001, 0) # make sure measurements are smaller for non-degenerate probability y = edata.getObservedData() y = tuple([val * np.random.uniform(0.5, 3) for val in y]) edata.setObservedData(y) edata.setObservedDataStdDev(sigmas) # and now run for real and also compute likelihood values rdata = amici.runAmiciSimulations(model, solver, [edata])[0] # m > y -> outside binomial domain -> 0 density assert rdata['llh'] == -np.inf # check for non-informative gradient assert all(np.isnan(rdata['sllh']))
def test_steadystate_scaled(self): ''' Test SBML import and simulation from AMICI python interface ''' sbmlFile = os.path.join(os.path.dirname(__file__), '..', 'python', 'examples', 'example_steadystate', 'model_steadystate_scaled.xml') sbmlImporter = amici.SbmlImporter(sbmlFile) observables = amici.assignmentRules2observables( sbmlImporter.sbml, filter_function=lambda variable: variable.getId().startswith( 'observable_') and not variable.getId().endswith('_sigma')) outdir = 'test_model_steadystate_scaled' sbmlImporter.sbml2amici( 'test_model_steadystate_scaled', outdir, observables=observables, constantParameters=['k0'], sigmas={'observable_x1withsigma': 'observable_x1withsigma_sigma'}) sys.path.insert(0, outdir) import test_model_steadystate_scaled as modelModule model = modelModule.getModel() model.setTimepoints(amici.DoubleVector(np.linspace(0, 60, 60))) solver = model.getSolver() rdata = amici.runAmiciSimulation(model, solver) edata = [amici.ExpData(rdata, 0.01, 0)] rdata = amici.runAmiciSimulations(model, solver, edata) # check roundtripping of DataFrame conversion df_edata = amici.getDataObservablesAsDataFrame(model, edata) edata_reconstructed = amici.getEdataFromDataFrame(model, df_edata) self.assertTrue( np.isclose( amici.edataToNumPyArrays(edata[0])['observedData'], amici.edataToNumPyArrays( edata_reconstructed[0])['observedData'], ).all()) self.assertTrue( np.isclose( amici.edataToNumPyArrays(edata[0])['observedDataStdDev'], amici.edataToNumPyArrays( edata_reconstructed[0])['observedDataStdDev'], ).all()) if edata[0].fixedParameters.size(): self.assertListEqual( list(edata[0].fixedParameters), list(edata_reconstructed[0].fixedParameters), ) else: self.assertListEqual( list(model.getFixedParameters()), list(edata_reconstructed[0].fixedParameters), ) self.assertListEqual( list(edata[0].fixedParametersPreequilibration), list(edata_reconstructed[0].fixedParametersPreequilibration), ) df_state = amici.getSimulationStatesAsDataFrame(model, edata, rdata) self.assertTrue( np.isclose(rdata[0]['x'], df_state[list(model.getStateIds())].values).all()) df_obs = amici.getSimulationObservablesAsDataFrame(model, edata, rdata) self.assertTrue( np.isclose(rdata[0]['y'], df_obs[list(model.getObservableIds())].values).all()) amici.getResidualsAsDataFrame(model, edata, rdata)
def test_replicates(self): """ Use a model that has replicates and check that all data points are inserted. """ # import a model with replicates at some time points and observables importer = pypesto.PetabImporter.from_folder(folder_base + "Schwen_PONE2014") # create amici.ExpData list edatas = importer.create_edatas() # convert to dataframe amici_df = amici.getDataObservablesAsDataFrame(importer.create_model(), edatas) # extract original measurement df meas_df = importer.petab_problem.measurement_df # find time points amici_times = sorted(amici_df.time.unique().tolist()) meas_times = sorted(meas_df.time.unique().tolist()) # assert same time points for amici_time, meas_time in zip(amici_times, meas_times): self.assertTrue(np.isclose(amici_time, meas_time)) # extract needed stuff from amici df amici_df = amici_df[[ col for col in amici_df.columns if col == 'time' or col.startswith('observable_') and not col.endswith("_std") ]] # find observable ids amici_obs_ids = [col for col in amici_df.columns if col != 'time'] amici_obs_ids = [ val.replace("observable_", "") for val in amici_obs_ids ] amici_obs_ids = sorted(amici_obs_ids) meas_obs_ids = sorted(meas_df.observableId.unique().tolist()) for amici_obs_id, meas_obs_id in zip(amici_obs_ids, meas_obs_ids): self.assertEqual(amici_obs_id, meas_obs_id) # iterate over time points for time in meas_times: amici_df_for_time = amici_df[amici_df.time == time] amici_df_for_time = amici_df_for_time[[ col for col in amici_df.columns if col != 'time' ]] meas_df_for_time = meas_df[meas_df.time == time] # iterate over observables for obs_id in meas_obs_ids: amici_df_for_obs = amici_df_for_time["observable_" + obs_id] meas_df_for_obs = meas_df_for_time[ meas_df_for_time.observableId == obs_id] # extract non-nans and sort amici_vals = amici_df_for_obs.values.flatten().tolist() amici_vals = sorted( [val for val in amici_vals if np.isfinite(val)]) meas_vals = meas_df_for_obs.measurement \ .values.flatten().tolist() meas_vals = sorted( [val for val in meas_vals if np.isfinite(val)]) # test if the measurement data coincide for the given time # point for amici_val, meas_val in zip(amici_vals, meas_vals): self.assertTrue(np.isclose(amici_val, meas_val))