Exemplo n.º 1
0
def main():
    """
    Command line interface to import a model in the PEtab
    (https://github.com/PEtab-dev/PEtab/) format into AMICI.
    """
    args = parse_cli_args()

    if args.yaml_file_name:
        pp = petab.Problem.from_yaml(args.yaml_file_name)
    else:
        pp = petab.Problem.from_files(
            sbml_file=args.sbml_file_name,
            condition_file=args.condition_file_name,
            measurement_file=args.measurement_file_name,
            parameter_file=args.parameter_file_name,
            observable_files=args.observable_file_name)

    # First check for valid PEtab
    petab.lint_problem(pp)

    if args.flatten:
        petab.flatten_timepoint_specific_output_overrides(pp)

    import_model(model_name=args.model_name,
                 sbml_model=pp.sbml_model,
                 condition_table=pp.condition_df,
                 observable_table=pp.observable_df,
                 measurement_table=pp.measurement_df,
                 model_output_dir=args.model_output_dir,
                 compile=args.compile,
                 verbose=args.verbose)
Exemplo n.º 2
0
    def __init__(self, pysb_model: 'pysb.Model' = None, *args, **kwargs):
        """
        Constructor

        :param pysb_model: PySB model instance for this PEtab problem
        :param args: See :meth:`petab.Problem.__init__`
        :param kwargs: See :meth:`petab.Problem.__init__`
        """
        flatten = kwargs.pop('flatten', False)
        super().__init__(*args, **kwargs)
        if flatten:
            petab.flatten_timepoint_specific_output_overrides(self)

        self.pysb_model: 'pysb.Model' = pysb_model
        self._add_observation_model()

        if self.pysb_model is not None:
            self.sbml_document, self.sbml_model = \
                create_dummy_sbml(
                    self.pysb_model,
                    observable_ids=self.observable_df.index.values
                    if self.observable_df is not None else None
                )
Exemplo n.º 3
0
def test_flatten_timepoint_specific_output_overrides_special_cases():
    """Test flatten_timepoint_specific_output_overrides
    for special cases:
    * no preequilibration
    * no observable parameters
    """
    observable_df = pd.DataFrame(data={
        OBSERVABLE_ID: ['obs1'],
        OBSERVABLE_FORMULA: ['species1'],
        NOISE_FORMULA: ['noiseParameter1_obs1']
    })
    observable_df.set_index(OBSERVABLE_ID, inplace=True)

    observable_df_expected = pd.DataFrame(data={
        OBSERVABLE_ID: ['obs1__noiseParOverride1__condition1',
                        'obs1__noiseParOverride2__condition1'],
        OBSERVABLE_FORMULA: [
            'species1',
            'species1'],
        NOISE_FORMULA: ['noiseParameter1_obs1__noiseParOverride1__condition1',
                        'noiseParameter1_obs1__noiseParOverride2__condition1']
    })
    observable_df_expected.set_index(OBSERVABLE_ID, inplace=True)

    # Measurement table with timepoint-specific overrides
    measurement_df = pd.DataFrame(data={
        OBSERVABLE_ID:
            ['obs1', 'obs1', 'obs1', 'obs1'],
        SIMULATION_CONDITION_ID:
            ['condition1', 'condition1', 'condition1', 'condition1'],
        TIME:
            [1.0, 1.0, 2.0, 2.0],
        MEASUREMENT:
            [.1] * 4,
        NOISE_PARAMETERS:
            ['noiseParOverride1', 'noiseParOverride1',
             'noiseParOverride2', 'noiseParOverride2'],
    })

    measurement_df_expected = pd.DataFrame(data={
        OBSERVABLE_ID:
            ['obs1__noiseParOverride1__condition1',
             'obs1__noiseParOverride1__condition1',
             'obs1__noiseParOverride2__condition1',
             'obs1__noiseParOverride2__condition1'],
        SIMULATION_CONDITION_ID:
            ['condition1', 'condition1', 'condition1', 'condition1'],
        TIME:
            [1.0, 1.0, 2.0, 2.0],
        MEASUREMENT:
            [.1] * 4,
        NOISE_PARAMETERS:
            ['noiseParOverride1', 'noiseParOverride1',
             'noiseParOverride2', 'noiseParOverride2'],
    })

    problem = petab.Problem(measurement_df=measurement_df,
                            observable_df=observable_df)

    assert petab.lint_problem(problem) is False

    # Ensure having timepoint-specific overrides
    assert petab.lint.measurement_table_has_timepoint_specific_mappings(
        measurement_df) is True

    petab.flatten_timepoint_specific_output_overrides(problem)

    # Timepoint-specific overrides should be gone now
    assert petab.lint.measurement_table_has_timepoint_specific_mappings(
        problem.measurement_df) is False

    assert problem.observable_df.equals(observable_df_expected) is True
    assert problem.measurement_df.equals(measurement_df_expected) is True

    assert petab.lint_problem(problem) is False
Exemplo n.º 4
0
        problem = petab.Problem.from_yaml(yaml_file)
    elif model_type == "pysb":
        import pysb
        pysb.SelfExporter.cleanup()
        pysb.SelfExporter.do_export = True
        case_dir = os.path.join(petabtests.PYSB_DIR, case)
        # import petab problem
        yaml_file = os.path.join(case_dir, petabtests.problem_yaml_name(case))
        problem = PysbPetabProblem.from_yaml(yaml_file,
                                             flatten=case.startswith('0006'))
    else:
        raise ValueError(f"Unsupported model_type: {model_type}")

    # compile amici model
    if case.startswith('0006') and model_type != "pysb":
        petab.flatten_timepoint_specific_output_overrides(problem)
    model_output_dir = f'amici_models/model_{case}'
    model = import_petab_problem(problem,
                                 model_output_dir=model_output_dir,
                                 force_compile=True)

    # simulate
    ret = simulate_petab(problem, model, log_level=logging.DEBUG)

    rdatas = ret['rdatas']
    chi2 = sum(rdata['chi2'] for rdata in rdatas)
    llh = ret['llh']
    simulation_df = rdatas_to_measurement_df(rdatas, model,
                                             problem.measurement_df)
    petab.check_measurement_df(simulation_df, problem.observable_df)
    simulation_df = simulation_df.rename(
Exemplo n.º 5
0
def test_flatten_timepoint_specific_output_overrides():
    """Test flatten_timepoint_specific_output_overrides"""
    observable_df = pd.DataFrame(data={
        OBSERVABLE_ID: ['obs1'],
        OBSERVABLE_FORMULA: [
            'observableParameter1_obs1 + observableParameter2_obs1'],
        NOISE_FORMULA: ['noiseParameter1_obs1']
    })
    observable_df.set_index(OBSERVABLE_ID, inplace=True)

    observable_df_expected = pd.DataFrame(data={
        OBSERVABLE_ID: ['obs1_1', 'obs1_2', 'obs1_3'],
        OBSERVABLE_FORMULA: [
            'observableParameter1_obs1_1 + observableParameter2_obs1_1',
            'observableParameter1_obs1_2 + observableParameter2_obs1_2',
            'observableParameter1_obs1_3 + observableParameter2_obs1_3'],
        NOISE_FORMULA: ['noiseParameter1_obs1_1',
                        'noiseParameter1_obs1_2',
                        'noiseParameter1_obs1_3']
    })
    observable_df_expected.set_index(OBSERVABLE_ID, inplace=True)

    # Measurement table with timepoint-specific overrides
    measurement_df = pd.DataFrame(data={
        OBSERVABLE_ID:
            ['obs1', 'obs1', 'obs1', 'obs1'],
        SIMULATION_CONDITION_ID:
            ['condition1', 'condition1', 'condition1', 'condition1'],
        PREEQUILIBRATION_CONDITION_ID:
            ['', '', '', ''],
        TIME:
            [1.0, 1.0, 2.0, 2.0],
        MEASUREMENT:
            [np.nan] * 4,
        OBSERVABLE_PARAMETERS:
            ['obsParOverride1;1.0', 'obsParOverride2;1.0',
             'obsParOverride2;1.0', 'obsParOverride2;1.0'],
        NOISE_PARAMETERS:
            ['noiseParOverride1', 'noiseParOverride1',
             'noiseParOverride2', 'noiseParOverride2']
    })

    measurement_df_expected = pd.DataFrame(data={
        OBSERVABLE_ID:
            ['obs1_1', 'obs1_2', 'obs1_3', 'obs1_3'],
        SIMULATION_CONDITION_ID:
            ['condition1', 'condition1', 'condition1', 'condition1'],
        PREEQUILIBRATION_CONDITION_ID:
            ['', '', '', ''],
        TIME:
            [1.0, 1.0, 2.0, 2.0],
        MEASUREMENT:
            [np.nan] * 4,
        OBSERVABLE_PARAMETERS:
            ['obsParOverride1;1.0', 'obsParOverride2;1.0',
             'obsParOverride2;1.0', 'obsParOverride2;1.0'],
        NOISE_PARAMETERS:
            ['noiseParOverride1', 'noiseParOverride1',
             'noiseParOverride2', 'noiseParOverride2']
    })

    problem = petab.Problem(measurement_df=measurement_df,
                            observable_df=observable_df)

    assert petab.lint_problem(problem) is False

    # Ensure having timepoint-specific overrides
    assert petab.lint.measurement_table_has_timepoint_specific_mappings(
        measurement_df) is True

    petab.flatten_timepoint_specific_output_overrides(problem)

    # Timepoint-specific overrides should be gone now
    assert petab.lint.measurement_table_has_timepoint_specific_mappings(
        problem.measurement_df) is False

    assert problem.observable_df.equals(observable_df_expected) is True
    assert problem.measurement_df.equals(measurement_df_expected) is True

    assert petab.lint_problem(problem) is False
Exemplo n.º 6
0
def test_flatten_timepoint_specific_output_overrides(minimal_sbml_model):
    document, model = minimal_sbml_model
    petab.sbml.add_global_parameter(
        sbml_model=model, parameter_id='observableParameter1_obs1')
    petab.sbml.add_model_output_with_sigma(
        sbml_model=model, observable_id='obs1',
        observable_formula='observableParameter1_obs1')

    # Measurement table with timepoint-specific overrides
    measurement_df = pd.DataFrame(data={
        'observableId':
            ['obs1', 'obs1', 'obs1', 'obs1'],
        'simulationConditionId':
            ['condition1', 'condition1', 'condition1', 'condition1'],
        'preequilibrationConditionId':
            ['', '', '', ''],
        'time':
            [1.0, 1.0, 2.0, 2.0],
        'measurement':
            [np.nan] * 4,
        'observableParameters':
            ['obsParOverride1', 'obsParOverride2',
             'obsParOverride2', 'obsParOverride2'],
        'noiseParameters':
            ['noiseParOverride1', 'noiseParOverride1',
             'noiseParOverride2', 'noiseParOverride2']
    })

    measurement_df_expected = pd.DataFrame(data={
        'observableId':
            ['obs1_1', 'obs1_2', 'obs1_3', 'obs1_3'],
        'simulationConditionId':
            ['condition1', 'condition1', 'condition1', 'condition1'],
        'preequilibrationConditionId':
            ['', '', '', ''],
        'time':
            [1.0, 1.0, 2.0, 2.0],
        'measurement':
            [np.nan] * 4,
        'observableParameters':
            ['obsParOverride1', 'obsParOverride2',
             'obsParOverride2', 'obsParOverride2'],
        'noiseParameters':
            ['noiseParOverride1', 'noiseParOverride1',
             'noiseParOverride2', 'noiseParOverride2']
    })

    problem = petab.Problem(sbml_model=model,
                            measurement_df=measurement_df)

    assert petab.lint_problem(problem) is False

    # Ensure having timepoint-specific overrides
    assert petab.lint.measurement_table_has_timepoint_specific_mappings(
        measurement_df) is True

    petab.flatten_timepoint_specific_output_overrides(problem)

    # Timepoint-specific overrides should be gone now
    assert petab.lint.measurement_table_has_timepoint_specific_mappings(
        problem.measurement_df) is False

    assert problem.measurement_df.equals(measurement_df_expected) is True

    assert petab.lint_problem(problem) is False
Exemplo n.º 7
0
def main():
    """Simulate the model specified on the command line"""

    args = parse_cli_args()

    if args.verbose:
        logger.setLevel(logging.DEBUG)

    logger.info(f"Simulating '{args.model_name}' "
                f"({args.model_directory}) using PEtab data from "
                f"{args.yaml_file_name}")

    # load PEtab files
    problem = petab.Problem.from_yaml(args.yaml_file_name)
    petab.flatten_timepoint_specific_output_overrides(problem)

    # load model
    if args.model_directory:
        sys.path.insert(0, args.model_directory)
    model_module = importlib.import_module(args.model_name)
    amici_model = model_module.getModel()

    res = simulate_petab(petab_problem=problem,
                         amici_model=amici_model,
                         log_level=logging.DEBUG)
    rdatas = res[RDATAS]
    llh = res[LLH]

    # create simulation PEtab table
    sim_df = rdatas_to_measurement_df(rdatas=rdatas,
                                      model=amici_model,
                                      measurement_df=problem.measurement_df)
    sim_df.rename(columns={petab.MEASUREMENT: petab.SIMULATION}, inplace=True)

    if args.simulation_file:
        sim_df.to_csv(index=False, sep="\t")

    if args.plot:
        try:
            # visualize fit
            axs = plot_petab_problem(petab_problem=problem, sim_data=sim_df)

            # save figure
            for plot_id, ax in axs.items():
                fig_path = os.path.join(
                    args.model_directory,
                    args.model_name + "_" + plot_id + "_vis.png")
                logger.info(f"Saving figure to {fig_path}")
                ax.get_figure().savefig(fig_path, dpi=150)

        except NotImplementedError:
            pass

    if args.check:
        references_yaml = os.path.join(os.path.dirname(__file__),
                                       "benchmark_models.yaml")
        with open(references_yaml) as f:
            refs = yaml.full_load(f)

        try:
            ref_llh = refs[args.model_name]["llh"]
            logger.info(f"Reference llh: {ref_llh}")

            if abs(ref_llh - llh) < 1e-3:
                logger.info(f"Computed llh {llh} matches reference "
                            f"{ref_llh}. Absolute difference is "
                            f"{ref_llh - llh}.")
            else:
                logger.error(f"Computed llh {llh} does not match reference "
                             f"{ref_llh}. Absolute difference is "
                             f"{ref_llh - llh}."
                             f" Relative difference is {llh / ref_llh}")
                sys.exit(1)
        except KeyError:
            logger.error("No reference likelihood found for "
                         f"{args.model_name} in {references_yaml}")