Example #1
0
    def simulate_without_noise(self, **kwargs) -> pd.DataFrame:
        """
        See :py:func:`petab.simulate.Simulator.simulate()` docstring.

        Additional keyword arguments can be supplied to specify arguments for
        the AMICI PEtab import, simulate, and export methods. See the
        docstrings for the respective methods for argument options:
        - :py:func:`amici.petab_import.import_petab_problem`, and
        - :py:func:`amici.petab_objective.simulate_petab`.

        Note that some arguments are expected to have already been specified
        in the Simulator constructor (including the PEtab problem).
        """
        if AMICI_MODEL in {*kwargs, *dir(self)} and (any([
                k in kwargs
                for k in inspect.signature(import_petab_problem).parameters
        ])):
            print('Arguments related to the PEtab import are unused if '
                  f'`{AMICI_MODEL}` is specified, or the '
                  '`PetabSimulator.simulate()` method was previously called.')

        kwargs[PETAB_PROBLEM] = self.petab_problem

        # The AMICI model instance for the PEtab problem is saved in the state,
        # such that it need not be supplied with each request for simulated
        # data. Any user-supplied AMICI model will overwrite the model saved
        # in the state.
        if AMICI_MODEL not in kwargs:
            if self.amici_model is None:
                if MODEL_NAME not in kwargs:
                    kwargs[MODEL_NAME] = AMICI_MODEL
                    # If the model name is the name of a module that is already
                    # cached, it can cause issues during import.
                    while kwargs[MODEL_NAME] in sys.modules:
                        kwargs[MODEL_NAME] += str(self.rng.integers(10))
                if MODEL_OUTPUT_DIR not in kwargs:
                    kwargs[MODEL_OUTPUT_DIR] = self.working_dir
                self.amici_model = subset_call(import_petab_problem, kwargs)
            kwargs[AMICI_MODEL] = self.amici_model
        self.amici_model = kwargs[AMICI_MODEL]

        if AMICI_SOLVER not in kwargs:
            kwargs[AMICI_SOLVER] = self.amici_model.getSolver()
            kwargs[AMICI_SOLVER].setSensitivityMethod(SensitivityMethod_none)

        result = subset_call(simulate_petab, kwargs)
        return rdatas_to_measurement_df(result[RDATAS], self.amici_model,
                                        self.petab_problem.measurement_df)
Example #2
0
def _test_case(case):
    """Run a single PEtab test suite case"""
    case = petabtests.test_id_str(case)
    logger.debug(f"Case {case}")

    # load
    case_dir = os.path.join(petabtests.CASES_DIR, case)

    # import petab problem
    yaml_file = os.path.join(case_dir, petabtests.problem_yaml_name(case))
    problem = petab.Problem.from_yaml(yaml_file)

    # compile amici model
    model_output_dir = f'amici_models/model_{case}'
    model = import_petab_problem(
        problem, model_output_dir=model_output_dir)

    # simulate
    chi2s_match = llhs_match = simulations_match = False
    ret = simulate_petab(problem, model, log_level=logging.DEBUG)

    rdatas = ret['rdatas']
    chi2 = sum(rdata['chi2'] for rdata in rdatas)
    llh = ret['llh']
    simulation_df = rdatas_to_measurement_df(rdatas, model,
                                             problem.measurement_df)
    petab.check_measurement_df(simulation_df, problem.observable_df)
    simulation_df = simulation_df.rename(
        columns={petab.MEASUREMENT: petab.SIMULATION})
    simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int)
    solution = petabtests.load_solution(case)
    gt_chi2 = solution[petabtests.CHI2]
    gt_llh = solution[petabtests.LLH]
    gt_simulation_dfs = solution[petabtests.SIMULATION_DFS]
    tol_chi2 = solution[petabtests.TOL_CHI2]
    tol_llh = solution[petabtests.TOL_LLH]
    tol_simulations = solution[petabtests.TOL_SIMULATIONS]

    chi2s_match = petabtests.evaluate_chi2(chi2, gt_chi2, tol_chi2)
    llhs_match = petabtests.evaluate_llh(llh, gt_llh, tol_llh)
    simulations_match = petabtests.evaluate_simulations(
        [simulation_df], gt_simulation_dfs, tol_simulations)

    logger.log(logging.DEBUG if chi2s_match else logging.ERROR,
               f"CHI2: simulated: {chi2}, expected: {gt_chi2},"
               f" match = {chi2s_match}")
    logger.log(logging.DEBUG if simulations_match else logging.ERROR,
               f"LLH: simulated: {llh}, expected: {gt_llh}, "
               f"match = {llhs_match}")
    logger.log(logging.DEBUG if simulations_match else logging.ERROR,
               f"Simulations: match = {simulations_match}")

    # FIXME case 7 fails due to #963
    if case not in ['0007', '0016']:
        check_derivatives(problem, model)

    # FIXME case 7 fails due to #963
    if not all([llhs_match, simulations_match]) \
            or (not chi2s_match and case not in ['0007', '0016']):
        # chi2s_match ignored until fixed in amici
        logger.error(f"Case {case} failed.")
        raise AssertionError(f"Case {case}: Test results do not match "
                             "expectations")

    logger.info(f"Case {case} passed.")
Example #3
0
    # compile amici model
    if case.startswith('0006') and model_type != "pysb":
        petab.flatten_timepoint_specific_output_overrides(problem)
    model_output_dir = f'amici_models/model_{case}'
    model = import_petab_problem(problem,
                                 model_output_dir=model_output_dir,
                                 force_compile=True)

    # simulate
    ret = simulate_petab(problem, model, log_level=logging.DEBUG)

    rdatas = ret['rdatas']
    chi2 = sum(rdata['chi2'] for rdata in rdatas)
    llh = ret['llh']
    simulation_df = rdatas_to_measurement_df(rdatas, model,
                                             problem.measurement_df)
    petab.check_measurement_df(simulation_df, problem.observable_df)
    simulation_df = simulation_df.rename(
        columns={petab.MEASUREMENT: petab.SIMULATION})
    simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int)
    solution = petabtests.load_solution(case, model_type)
    gt_chi2 = solution[petabtests.CHI2]
    gt_llh = solution[petabtests.LLH]
    gt_simulation_dfs = solution[petabtests.SIMULATION_DFS]
    if case.startswith('0006'):
        # account for flattening
        gt_simulation_dfs[0].loc[:, petab.OBSERVABLE_ID] = ('obs_a__10__c0',
                                                            'obs_a__15__c0')
    tol_chi2 = solution[petabtests.TOL_CHI2]
    tol_llh = solution[petabtests.TOL_LLH]
    tol_simulations = solution[petabtests.TOL_SIMULATIONS]
Example #4
0
def main():
    """Simulate the model specified on the command line"""

    args = parse_cli_args()

    if args.verbose:
        logger.setLevel(logging.DEBUG)

    logger.info(f"Simulating '{args.model_name}' "
                f"({args.model_directory}) using PEtab data from "
                f"{args.yaml_file_name}")

    # load PEtab files
    problem = petab.Problem.from_yaml(args.yaml_file_name)
    petab.flatten_timepoint_specific_output_overrides(problem)

    # load model
    if args.model_directory:
        sys.path.insert(0, args.model_directory)
    model_module = importlib.import_module(args.model_name)
    amici_model = model_module.getModel()

    res = simulate_petab(petab_problem=problem,
                         amici_model=amici_model,
                         log_level=logging.DEBUG)
    rdatas = res[RDATAS]
    llh = res[LLH]

    # create simulation PEtab table
    sim_df = rdatas_to_measurement_df(rdatas=rdatas,
                                      model=amici_model,
                                      measurement_df=problem.measurement_df)
    sim_df.rename(columns={petab.MEASUREMENT: petab.SIMULATION}, inplace=True)

    if args.simulation_file:
        sim_df.to_csv(index=False, sep="\t")

    if args.plot:
        try:
            # visualize fit
            axs = plot_petab_problem(petab_problem=problem, sim_data=sim_df)

            # save figure
            for plot_id, ax in axs.items():
                fig_path = os.path.join(
                    args.model_directory,
                    args.model_name + "_" + plot_id + "_vis.png")
                logger.info(f"Saving figure to {fig_path}")
                ax.get_figure().savefig(fig_path, dpi=150)

        except NotImplementedError:
            pass

    if args.check:
        references_yaml = os.path.join(os.path.dirname(__file__),
                                       "benchmark_models.yaml")
        with open(references_yaml) as f:
            refs = yaml.full_load(f)

        try:
            ref_llh = refs[args.model_name]["llh"]
            logger.info(f"Reference llh: {ref_llh}")

            if abs(ref_llh - llh) < 1e-3:
                logger.info(f"Computed llh {llh} matches reference "
                            f"{ref_llh}. Absolute difference is "
                            f"{ref_llh - llh}.")
            else:
                logger.error(f"Computed llh {llh} does not match reference "
                             f"{ref_llh}. Absolute difference is "
                             f"{ref_llh - llh}."
                             f" Relative difference is {llh / ref_llh}")
                sys.exit(1)
        except KeyError:
            logger.error("No reference likelihood found for "
                         f"{args.model_name} in {references_yaml}")