Ejemplo n.º 1
0
        def model(par: Union[Sequence, Mapping]) -> Mapping:
            """The model function."""
            # copy since we add fixed parameters
            par = copy.deepcopy(par)

            # convenience to allow calling model not only with dicts
            if not isinstance(par, Mapping):
                par = {key: val for key, val in zip(x_ids, par)}

            # add fixed parameters
            for key, val in zip(x_fixed_ids, x_fixed_vals):
                par[key] = val

            # simulate model
            sim = simulate_petab(petab_problem=petab_problem,
                                 amici_model=amici_model,
                                 solver=amici_solver,
                                 problem_parameters=par,
                                 scaled_parameters=True)

            # return values of interest
            ret = {'llh': sim[LLH]}
            if return_simulations:
                for i_rdata, rdata in enumerate(sim[RDATAS]):
                    ret[f'y_{i_rdata}'] = rdata['y']
            if return_rdatas:
                ret[RDATAS] = sim[RDATAS]

            return ret
Ejemplo n.º 2
0
    def __call__(self, par: Union[Sequence, Mapping]) -> Mapping:
        """The model function.

        Note: The parameters are assumed to be passed on prior scale.
        """
        # copy since we add fixed parameters
        par = copy.deepcopy(par)

        # convenience to allow calling model not only with dicts
        if not isinstance(par, Mapping):
            par = {key: val for key, val in zip(self.x_free_ids, par)}

        # add fixed parameters
        for key, val in zip(self.x_fixed_ids, self.x_fixed_vals):
            par[key] = val

        # scale parameters whose priors are not on scale
        for key in self.prior_scales.keys():
            par[key] = rescale(
                val=par[key],
                origin_scale=self.prior_scales,
                target_scale=self.scaled_scales,
            )

        # simulate model
        sim = simulate_petab(
            petab_problem=self.petab_problem,
            amici_model=self.amici_model,
            solver=self.amici_solver,
            problem_parameters=par,
            scaled_parameters=True,
        )

        # return values of interest
        ret = {'llh': sim[LLH]}
        if self.return_simulations:
            for i_rdata, rdata in enumerate(sim[RDATAS]):
                ret[f'y_{i_rdata}'] = rdata['y']
        if self.return_rdatas:
            ret[RDATAS] = sim[RDATAS]

        return ret
Ejemplo n.º 3
0
def simulate_timecourse(
    parent_petab_problem: petab.Problem,
    timecourse_id: str,
    solver_settings: Dict[str, Any],
    problem_parameters: Dict[str, float] = None,
    parameter_mapping=None,
    sensi_orders: Tuple[int, ...] = (0, 1),
    # FIXME Dict typehint
    initial_states: Union[Dict, Tuple[float, ...]] = None,
    model_settings: Dict[str, Any] = None,
    #solver_customizer: Callable[[amici.Solver], None] = None
):
    """
    Solver settings are a required attribute to ensure these are not
    forgotten...

    To use a default solver, supply an empty dictionary (`solver_settings={}`).

    initial_states:
        FIXME
        Can supply a dictionary or a tuple.
    """
    timecourse = get_timecourse(
        petab_problem=parent_petab_problem,
        timecourse_id=timecourse_id,
    )
    petab_problems = subset_petab_problem(
        petab_problem=parent_petab_problem,
        timecourse_id=timecourse_id,
    )
    #print([petab_problem.measurement_df for petab_problem in petab_problems])
    results = []
    #amici_model = import_petab_problem(petab_problems[0])
    for index, petab_problem in enumerate(petab_problems):
        if petab_problem.measurement_df.empty:
            # FIXME resolve this properly. Is it due to timecourse pieces
            #       that are defined to occur after the last measured time
            #       point?
            break
        # FIXME uncomment, remove global `amici_model = ...` above

        # Replace a controlled parameter with the ID of its timepoint-specific
        # control parameter (hack to ensure sensitivities are computed correctly)
        #print(1)
        replace_timecourse_parameters = {
            k: v
            for k, v in petab_problem.condition_df.loc[timecourse_id].items()
            if isinstance(v, str) and k != CONDITION_NAME
        }
        petab_problem.condition_df.drop(
            replace_timecourse_parameters,
            axis=1,
            inplace=True,
        )
        for old_parameter_id, new_parameter_id in replace_timecourse_parameters.items(
        ):
            if old_parameter_id in problem_parameters:
                raise ValueError(
                    'A timecourse parameter was assigned a value via '
                    '`problem_parameters`, but should receive its value '
                    'from the timecourse information.')
            if new_parameter_id not in problem_parameters:
                raise ValueError(
                    'Please supply a value for estimated timecourse '
                    'replacement parameters (parameter IDs in the rows) '
                    'of the condition table of a PEtab Timecourse problem.')
            problem_parameters[old_parameter_id] = \
                problem_parameters[new_parameter_id]
            # Duplicate control parameter in parameter_df, new index value is the
            # controlled parameter, such that sensitivities are output for the
            # controlled parameter (and can be interpreted as the sensitivities for
            # the control parameter)
            petab_problem.parameter_df.loc[old_parameter_id] = \
                petab_problem.parameter_df.loc[new_parameter_id]
        #print(2)

        amici_model = import_petab_problem(petab_problem)
        amici_model.setT0(float(timecourse[index][0]))
        #if initial_states is not None:
        #    breakpoint()
        #print(f'\n\nTime: {amici_model.t0()}\n\n')

        amici_edatas = create_edatas(amici_model, petab_problem)
        if problem_parameters is not None:
            # FIXME temp fix to add into parameters that AMICI automatically
            #       sets to be estimated from the SBML model (that weren't
            #       fixed like parameters in a condition table)
            model_parameters = dict(
                zip(amici_model.getParameterIds(),
                    amici_model.getParameters()))

            #assert parameter_mapping is not None
            #print(problem_parameters)
            #print(petab_problem.measurement_df)
            if parameter_mapping is None:
                parameter_mapping = \
                    create_identity_parameter_mapping(amici_model, 1)
            # Remove parameters from problem parameters if they are already
            # specified by the timecourse.
            # May break if a problem parameter is named `'conditionName'`.
            subset_problem_parameters = {
                parameter_id: parameter_value
                for parameter_id, parameter_value in {
                    **model_parameters,
                    **problem_parameters
                }.items()
                #for parameter_id, parameter_value in problem_parameters.items()
                if parameter_id not in petab_problem.condition_df.columns
            }
            removed_problem_parameters = \
                set(problem_parameters).difference(subset_problem_parameters)
            warnings.warn(
                'The following parameters were removed from the supplied '
                '`problem_parameters`, as they are already specified by the '
                f'timecourse: {removed_problem_parameters}')

            #notpositive = {
            #    k: v
            #    for k, v in subset_problem_parameters.items()
            #    if v <= 0
            #}
            #print(f'not positive parameters: {notpositive}')  # FIXME

            amici.parameter_mapping.fill_in_parameters(
                edatas=amici_edatas,
                problem_parameters=subset_problem_parameters,
                scaled_parameters=True,
                parameter_mapping=parameter_mapping,
                amici_model=amici_model,
            )
        else:
            subset_problem_parameters = None
        #amici_model.setParameterById(problem_parameters)
        #parameters = [
        #    problem_parameters[parameter_id]
        #    for parameter_id in amici_model.getParameterIds()
        #]
        #amici_edatas = create_edatas(amici_model, petab_problem)
        #one(amici_edatas).parameters = parameters
        #print(3)

        if results:
            one(amici_edatas).x0 = one(results[-1]['rdatas']).x[-1].flatten()
            one(amici_edatas).sx0 = one(results[-1]['rdatas']).sx[-1].flatten()
        elif initial_states is not None:
            # TODO untested
            #print(initial_states)
            #print(amici_model.getStateIds())
            if isinstance(initial_states, dict):
                indexed_initial_states = [
                    initial_states[state_id]
                    for state_id in amici_model.getStateIds()
                ]
            else:
                indexed_initial_states = initial_states
            #print(indexed_initial_states)
            #print(amici_model.getStateIds())
            #print(4)
            one(amici_edatas).x0 = indexed_initial_states
            #print(5)

        amici_solver = amici_model.getSolver()
        # TODO allow a user to specify these settings
        if model_settings is not None:
            for setter, value in model_settings.items():
                getattr(amici_model, setter)(value)
        if solver_settings is not None:
            for setter, value in solver_settings.items():
                getattr(amici_solver, setter)(value)
        #print(4)
        #amici_solver.setSensitivityOrder(amici.SensitivityOrder_first)
        #amici_solver.setSensitivityMethod(amici.SensitivityMethod_forward)
        #amici_solver.setMaxSteps(int(1e6))

        #solver_settings = {
        #    'setSensitivityOrder': amici.SensitivityOrder_first,
        #    'setSensitivityMethod': amici.SensitivityMethod_forward,
        #    'setMaxSteps': int(1e6),
        #    'setMaxTime': 60,
        #}

        #amici_solver.setAbsoluteTolerance(1e-8)
        #amici_solver.setRelativeTolerance(1e-6)
        #amici_solver.setAbsoluteToleranceFSA(1e-8)
        #amici_solver.setRelativeToleranceFSA(1e-6)

        #print(one(amici_edatas).parameters)
        #import functools
        #sp = lambda x: simulate_petab(
        #    petab_problem=petab_problem,
        #    amici_model=amici_model,
        #    solver=amici_solver,
        #    problem_parameters=x,
        #)
        results.append(
            simulate_petab(
                petab_problem=petab_problem,
                amici_model=amici_model,
                solver=amici_solver,
                edatas=amici_edatas,
                problem_parameters=subset_problem_parameters,
            ))
        #print(5)

        #print(problem_parameters)
        #print(results)
        if initial_states is not None:
            pass
        for old_parameter_id, new_parameter_id in replace_timecourse_parameters.items(
        ):
            results[-1]['sllh'][new_parameter_id] = \
                results[-1]['sllh'][old_parameter_id]
            # Was artifically added, so remove now
            del problem_parameters[old_parameter_id]
    return results
Ejemplo n.º 4
0
        yaml_file = os.path.join(case_dir, petabtests.problem_yaml_name(case))
        problem = PysbPetabProblem.from_yaml(yaml_file,
                                             flatten=case.startswith('0006'))
    else:
        raise ValueError(f"Unsupported model_type: {model_type}")

    # compile amici model
    if case.startswith('0006') and model_type != "pysb":
        petab.flatten_timepoint_specific_output_overrides(problem)
    model_output_dir = f'amici_models/model_{case}'
    model = import_petab_problem(problem,
                                 model_output_dir=model_output_dir,
                                 force_compile=True)

    # simulate
    ret = simulate_petab(problem, model, log_level=logging.DEBUG)

    rdatas = ret['rdatas']
    chi2 = sum(rdata['chi2'] for rdata in rdatas)
    llh = ret['llh']
    simulation_df = rdatas_to_measurement_df(rdatas, model,
                                             problem.measurement_df)
    petab.check_measurement_df(simulation_df, problem.observable_df)
    simulation_df = simulation_df.rename(
        columns={petab.MEASUREMENT: petab.SIMULATION})
    simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int)
    solution = petabtests.load_solution(case, model_type)
    gt_chi2 = solution[petabtests.CHI2]
    gt_llh = solution[petabtests.LLH]
    gt_simulation_dfs = solution[petabtests.SIMULATION_DFS]
    if case.startswith('0006'):
Ejemplo n.º 5
0
def _test_case(case):
    """Run a single PEtab test suite case"""
    case = petabtests.test_id_str(case)
    logger.debug(f"Case {case}")

    # load
    case_dir = os.path.join(petabtests.CASES_DIR, case)

    # import petab problem
    yaml_file = os.path.join(case_dir, petabtests.problem_yaml_name(case))
    problem = petab.Problem.from_yaml(yaml_file)

    # compile amici model
    model_output_dir = f'amici_models/model_{case}'
    model = import_petab_problem(
        problem, model_output_dir=model_output_dir)

    # simulate
    chi2s_match = llhs_match = simulations_match = False
    ret = simulate_petab(problem, model, log_level=logging.DEBUG)

    rdatas = ret['rdatas']
    chi2 = sum(rdata['chi2'] for rdata in rdatas)
    llh = ret['llh']
    simulation_df = rdatas_to_measurement_df(rdatas, model,
                                             problem.measurement_df)
    petab.check_measurement_df(simulation_df, problem.observable_df)
    simulation_df = simulation_df.rename(
        columns={petab.MEASUREMENT: petab.SIMULATION})
    simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int)
    solution = petabtests.load_solution(case)
    gt_chi2 = solution[petabtests.CHI2]
    gt_llh = solution[petabtests.LLH]
    gt_simulation_dfs = solution[petabtests.SIMULATION_DFS]
    tol_chi2 = solution[petabtests.TOL_CHI2]
    tol_llh = solution[petabtests.TOL_LLH]
    tol_simulations = solution[petabtests.TOL_SIMULATIONS]

    chi2s_match = petabtests.evaluate_chi2(chi2, gt_chi2, tol_chi2)
    llhs_match = petabtests.evaluate_llh(llh, gt_llh, tol_llh)
    simulations_match = petabtests.evaluate_simulations(
        [simulation_df], gt_simulation_dfs, tol_simulations)

    logger.log(logging.DEBUG if chi2s_match else logging.ERROR,
               f"CHI2: simulated: {chi2}, expected: {gt_chi2},"
               f" match = {chi2s_match}")
    logger.log(logging.DEBUG if simulations_match else logging.ERROR,
               f"LLH: simulated: {llh}, expected: {gt_llh}, "
               f"match = {llhs_match}")
    logger.log(logging.DEBUG if simulations_match else logging.ERROR,
               f"Simulations: match = {simulations_match}")

    # FIXME case 7 fails due to #963
    if case not in ['0007', '0016']:
        check_derivatives(problem, model)

    # FIXME case 7 fails due to #963
    if not all([llhs_match, simulations_match]) \
            or (not chi2s_match and case not in ['0007', '0016']):
        # chi2s_match ignored until fixed in amici
        logger.error(f"Case {case} failed.")
        raise AssertionError(f"Case {case}: Test results do not match "
                             "expectations")

    logger.info(f"Case {case} passed.")
Ejemplo n.º 6
0
def main():
    """Simulate the model specified on the command line"""

    args = parse_cli_args()

    if args.verbose:
        logger.setLevel(logging.DEBUG)

    logger.info(f"Simulating '{args.model_name}' "
                f"({args.model_directory}) using PEtab data from "
                f"{args.yaml_file_name}")

    # load PEtab files
    problem = petab.Problem.from_yaml(args.yaml_file_name)
    petab.flatten_timepoint_specific_output_overrides(problem)

    # load model
    if args.model_directory:
        sys.path.insert(0, args.model_directory)
    model_module = importlib.import_module(args.model_name)
    amici_model = model_module.getModel()

    res = simulate_petab(petab_problem=problem,
                         amici_model=amici_model,
                         log_level=logging.DEBUG)
    rdatas = res[RDATAS]
    llh = res[LLH]

    # create simulation PEtab table
    sim_df = rdatas_to_measurement_df(rdatas=rdatas,
                                      model=amici_model,
                                      measurement_df=problem.measurement_df)
    sim_df.rename(columns={petab.MEASUREMENT: petab.SIMULATION}, inplace=True)

    if args.simulation_file:
        sim_df.to_csv(index=False, sep="\t")

    if args.plot:
        try:
            # visualize fit
            axs = plot_petab_problem(petab_problem=problem, sim_data=sim_df)

            # save figure
            for plot_id, ax in axs.items():
                fig_path = os.path.join(
                    args.model_directory,
                    args.model_name + "_" + plot_id + "_vis.png")
                logger.info(f"Saving figure to {fig_path}")
                ax.get_figure().savefig(fig_path, dpi=150)

        except NotImplementedError:
            pass

    if args.check:
        references_yaml = os.path.join(os.path.dirname(__file__),
                                       "benchmark_models.yaml")
        with open(references_yaml) as f:
            refs = yaml.full_load(f)

        try:
            ref_llh = refs[args.model_name]["llh"]
            logger.info(f"Reference llh: {ref_llh}")

            if abs(ref_llh - llh) < 1e-3:
                logger.info(f"Computed llh {llh} matches reference "
                            f"{ref_llh}. Absolute difference is "
                            f"{ref_llh - llh}.")
            else:
                logger.error(f"Computed llh {llh} does not match reference "
                             f"{ref_llh}. Absolute difference is "
                             f"{ref_llh - llh}."
                             f" Relative difference is {llh / ref_llh}")
                sys.exit(1)
        except KeyError:
            logger.error("No reference likelihood found for "
                         f"{args.model_name} in {references_yaml}")