def test_all_override(condition_df_2_conditions, minimal_sbml_model):
        # Condition-specific parameters overriding original parameters
        condition_df = condition_df_2_conditions

        _, sbml_model = minimal_sbml_model
        add_global_parameter(sbml_model, 'dynamicParameter1')
        add_global_parameter(sbml_model, 'dynamicParameter2')
        add_global_parameter(sbml_model, 'observableParameter1_obs1')
        add_global_parameter(sbml_model, 'observableParameter2_obs1')
        add_global_parameter(sbml_model, 'observableParameter1_obs2')

        measurement_df = pd.DataFrame(
            data={
                'observableId': ['obs1', 'obs2', 'obs1', 'obs2'],
                'simulationConditionId':
                ['condition1', 'condition1', 'condition2', 'condition2'],
                'preequilibrationConditionId': ['', '', '', ''],
                'observableParameters': [
                    'obs1par1override;obs1par2cond1override',
                    'obs2par1cond1override',
                    'obs1par1override;obs1par2cond2override',
                    'obs2par1cond2override'
                ],
                'noiseParameters': ['', '', '', '']
            })

        expected = [({}, {
            'dynamicParameter1': 'dynamicParameter1',
            'dynamicParameter2': 'dynamicParameter2',
            'observableParameter1_obs1': 'obs1par1override',
            'observableParameter2_obs1': 'obs1par2cond1override',
            'observableParameter1_obs2': 'obs2par1cond1override',
        }),
                    ({}, {
                        'dynamicParameter1': 'dynamicParameter1',
                        'dynamicParameter2': 'dynamicParameter2',
                        'observableParameter1_obs1': 'obs1par1override',
                        'observableParameter2_obs1': 'obs1par2cond2override',
                        'observableParameter1_obs2': 'obs2par1cond2override'
                    })]

        actual = petab.get_optimization_to_simulation_parameter_mapping(
            measurement_df=measurement_df,
            condition_df=condition_df,
            sbml_model=sbml_model)

        assert actual == expected
Ejemplo n.º 2
0
def test_assert_model_parameters_in_condition_or_parameter_table():
    document = libsbml.SBMLDocument(3, 1)
    model = document.createModel()
    model.setTimeUnits("second")
    model.setExtentUnits("mole")
    model.setSubstanceUnits('mole')
    sbml.add_global_parameter(model, 'parameter1')
    sbml.add_global_parameter(model, 'noiseParameter1_')
    sbml.add_global_parameter(model, 'observableParameter1_')

    lint.assert_model_parameters_in_condition_or_parameter_table(
            model, pd.DataFrame(columns=['parameter1']), pd.DataFrame()
    )

    lint.assert_model_parameters_in_condition_or_parameter_table(
            model, pd.DataFrame(), pd.DataFrame(index=['parameter1']))

    with pytest.raises(AssertionError):
        lint.assert_model_parameters_in_condition_or_parameter_table(
            model,
            pd.DataFrame(columns=['parameter1']),
            pd.DataFrame(index=['parameter1']))

    lint.assert_model_parameters_in_condition_or_parameter_table(
            model, pd.DataFrame(), pd.DataFrame())

    sbml.create_assigment_rule(model, assignee_id='parameter1',
                               formula='parameter2')
    lint.assert_model_parameters_in_condition_or_parameter_table(
        model, pd.DataFrame(), pd.DataFrame())
    def test_no_condition_specific(condition_df_2_conditions,
                                   minimal_sbml_model):
        # Trivial case - no condition-specific parameters

        condition_df = condition_df_2_conditions

        measurement_df = pd.DataFrame(
            data={
                'observableId': ['obs1', 'obs2'],
                'simulationConditionId': ['condition1', 'condition2'],
                'preequilibrationConditionId': ['', ''],
                'observableParameters': ['', ''],
                'noiseParameters': ['', '']
            })

        _, sbml_model = minimal_sbml_model
        add_global_parameter(sbml_model, 'dynamicParameter1')
        add_global_parameter(sbml_model, 'dynamicParameter2')
        add_global_parameter(sbml_model, 'dynamicParameter3')

        expected = [({}, {
            'dynamicParameter1': 'dynamicParameter1',
            'dynamicParameter2': 'dynamicParameter2',
            'dynamicParameter3': 'dynamicParameter3'
        }),
                    ({}, {
                        'dynamicParameter1': 'dynamicParameter1',
                        'dynamicParameter2': 'dynamicParameter2',
                        'dynamicParameter3': 'dynamicParameter3'
                    })]

        actual = petab.get_optimization_to_simulation_parameter_mapping(
            sbml_model=sbml_model,
            measurement_df=measurement_df,
            condition_df=condition_df,
        )

        assert actual == expected
Ejemplo n.º 4
0
    def test_partial_override(condition_df_2_conditions,
                              minimal_sbml_model):
        # Condition-specific parameters, keeping original parameters
        condition_df = condition_df_2_conditions

        _, sbml_model = minimal_sbml_model
        add_global_parameter(sbml_model, 'dynamicParameter1')
        add_global_parameter(sbml_model, 'observableParameter1_obs1')
        add_global_parameter(sbml_model, 'observableParameter2_obs1')
        add_global_parameter(sbml_model, 'observableParameter1_obs2')

        measurement_df = pd.DataFrame(data={
            OBSERVABLE_ID: ['obs1', 'obs2', 'obs1', 'obs2'],
            SIMULATION_CONDITION_ID: ['condition1', 'condition1',
                                      'condition2', 'condition2'],
            PREEQUILIBRATION_CONDITION_ID: ['', '', '', ''],
            OBSERVABLE_PARAMETERS: ['obs1par1override;obs1par2cond1override',
                                    '',
                                    'obs1par1override;obs1par2cond2override',
                                    'obs2par1cond2override'],
            NOISE_PARAMETERS: ['', '', '', '']
        })

        parameter_df = pd.DataFrame(data={
            PARAMETER_ID: [
                'dynamicParameter1', 'obs1par1override',
                'obs1par2cond1override', 'obs1par2cond2override',
                'obs2par1cond2override'],
            ESTIMATE: [1, 1, 1, 1, 1],
        })
        parameter_df.set_index(PARAMETER_ID, inplace=True)

        expected = [({},
                     {'fixedParameter1': 1.0,
                      'dynamicParameter1': 'dynamicParameter1',
                      'observableParameter1_obs1': 'obs1par1override',
                      'observableParameter2_obs1': 'obs1par2cond1override',
                      'observableParameter1_obs2': np.nan,
                      },
                     {},
                     {'fixedParameter1': LIN,
                      'dynamicParameter1': LIN,
                      'observableParameter1_obs1': LIN,
                      'observableParameter2_obs1': LIN,
                      'observableParameter1_obs2': LIN}),
                    ({},
                     {'fixedParameter1': 2.0,
                      'dynamicParameter1': 'dynamicParameter1',
                      'observableParameter1_obs1': 'obs1par1override',
                      'observableParameter2_obs1': 'obs1par2cond2override',
                      'observableParameter1_obs2': 'obs2par1cond2override'
                      },
                     {},
                     {'fixedParameter1': LIN,
                      'dynamicParameter1': LIN,
                      'observableParameter1_obs1': LIN,
                      'observableParameter2_obs1': LIN,
                      'observableParameter1_obs2': LIN}),
                    ]

        actual = petab.get_optimization_to_simulation_parameter_mapping(
            measurement_df=measurement_df,
            condition_df=condition_df,
            sbml_model=sbml_model, parameter_df=parameter_df
        )

        # Comparison with NaN containing expected results fails after pickling!
        # Need to test first for correct NaNs, then for the rest.
        assert np.isnan(expected[0][1]['observableParameter1_obs2'])
        assert np.isnan(actual[0][1]['observableParameter1_obs2'])
        expected[0][1]['observableParameter1_obs2'] = 0.0
        actual[0][1]['observableParameter1_obs2'] = 0.0

        assert actual == expected
Ejemplo n.º 5
0
    def test_all_override(condition_df_2_conditions,
                          minimal_sbml_model):
        # Condition-specific parameters overriding original parameters
        condition_df = condition_df_2_conditions

        _, sbml_model = minimal_sbml_model
        add_global_parameter(sbml_model, 'dynamicParameter1')
        add_global_parameter(sbml_model, 'dynamicParameter2')

        measurement_df = pd.DataFrame(data={
            OBSERVABLE_ID: ['obs1', 'obs2', 'obs1', 'obs2'],
            SIMULATION_CONDITION_ID: ['condition1', 'condition1',
                                      'condition2', 'condition2'],
            PREEQUILIBRATION_CONDITION_ID: ['', '', '', ''],
            OBSERVABLE_PARAMETERS: ['obs1par1override;obs1par2cond1override',
                                    'obs2par1cond1override',
                                    'obs1par1override;obs1par2cond2override',
                                    'obs2par1cond2override'],
            NOISE_PARAMETERS: ['', '', '', '']
        })

        parameter_df = pd.DataFrame(data={
            PARAMETER_ID: [
                'dynamicParameter1', 'dynamicParameter2', 'obs1par1override',
                'obs1par2cond1override', 'obs1par2cond2override',
                'obs2par1cond1override', 'obs2par1cond2override'
            ],
            ESTIMATE: [1] * 7
        })
        parameter_df.set_index(PARAMETER_ID, inplace=True)

        expected = [
            (
                {},
                {'fixedParameter1': 1.0,
                 'dynamicParameter1': 'dynamicParameter1',
                 'dynamicParameter2': 'dynamicParameter2',
                 'observableParameter1_obs1': 'obs1par1override',
                 'observableParameter2_obs1': 'obs1par2cond1override',
                 'observableParameter1_obs2': 'obs2par1cond1override',
                 },
                {},
                {'fixedParameter1': LIN,
                 'dynamicParameter1': LIN,
                 'dynamicParameter2': LIN,
                 'observableParameter1_obs1': LIN,
                 'observableParameter2_obs1': LIN,
                 'observableParameter1_obs2': LIN
                 }
            ),
            (
                {},
                {'fixedParameter1': 2.0,
                 'dynamicParameter1': 'dynamicParameter1',
                 'dynamicParameter2': 'dynamicParameter2',
                 'observableParameter1_obs1': 'obs1par1override',
                 'observableParameter2_obs1': 'obs1par2cond2override',
                 'observableParameter1_obs2': 'obs2par1cond2override'
                 },
                {},
                {'fixedParameter1': LIN,
                 'dynamicParameter1': LIN,
                 'dynamicParameter2': LIN,
                 'observableParameter1_obs1': LIN,
                 'observableParameter2_obs1': LIN,
                 'observableParameter1_obs2': LIN
                 }
            )
        ]

        actual = petab.get_optimization_to_simulation_parameter_mapping(
            measurement_df=measurement_df,
            condition_df=condition_df,
            sbml_model=sbml_model, parameter_df=parameter_df)
        assert actual == expected

        # For one case we test parallel execution, which must yield the same
        # result
        os.environ[petab.ENV_NUM_THREADS] = "4"
        actual = petab.get_optimization_to_simulation_parameter_mapping(
            measurement_df=measurement_df,
            condition_df=condition_df,
            sbml_model=sbml_model, parameter_df=parameter_df)
        assert actual == expected
Ejemplo n.º 6
0
    def test_no_condition_specific(condition_df_2_conditions,
                                   minimal_sbml_model):
        # Trivial case - no condition-specific parameters

        condition_df = condition_df_2_conditions

        measurement_df = pd.DataFrame(data={
            OBSERVABLE_ID: ['obs1', 'obs2'],
            SIMULATION_CONDITION_ID: ['condition1', 'condition2'],
            PREEQUILIBRATION_CONDITION_ID: ['', ''],
            OBSERVABLE_PARAMETERS: ['', ''],
            NOISE_PARAMETERS: ['', '']
        })

        _, sbml_model = minimal_sbml_model
        add_global_parameter(sbml_model, 'dynamicParameter1').setValue(1.0)
        add_global_parameter(sbml_model, 'dynamicParameter2').setValue(2.0)
        add_global_parameter(sbml_model, 'dynamicParameter3').setValue(3.0)
        # add species, which will have initial concentration in condition table
        #  but which should not show up in mapping
        s = sbml_model.createSpecies()
        s.setId("someSpecies")
        condition_df["someSpecies"] = [0.0, 0.0]

        # Test without parameter table
        expected = [({},
                     {'dynamicParameter1': 1.0,
                      'dynamicParameter2': 2.0,
                      'dynamicParameter3': 3.0,
                      'fixedParameter1': 1.0},
                     {},
                     {'dynamicParameter1': LIN,
                      'dynamicParameter2': LIN,
                      'dynamicParameter3': LIN,
                      'fixedParameter1': LIN}),
                    ({},
                     {'dynamicParameter1': 1.0,
                      'dynamicParameter2': 2.0,
                      'dynamicParameter3': 3.0,
                      'fixedParameter1': 2.0},
                     {},
                     {'dynamicParameter1': LIN,
                      'dynamicParameter2': LIN,
                      'dynamicParameter3': LIN,
                      'fixedParameter1': LIN}
                     )]

        actual = petab.get_optimization_to_simulation_parameter_mapping(
            sbml_model=sbml_model,
            measurement_df=measurement_df,
            condition_df=condition_df,
        )
        assert actual == expected

        # Test with parameter table
        parameter_df = pd.DataFrame(data={
            PARAMETER_ID: ['dynamicParameter1', 'dynamicParameter2',
                           'dynamicParameter3'],
            ESTIMATE: [0, 1, 1],
            NOMINAL_VALUE: [11.0, 12.0, None],
            PARAMETER_SCALE: [LOG, LOG10, LIN],
        })
        parameter_df.set_index(PARAMETER_ID, inplace=True)

        expected = [({},
                     {'dynamicParameter1': 11.0,
                      'dynamicParameter2': 'dynamicParameter2',
                      'dynamicParameter3': 'dynamicParameter3',
                      'fixedParameter1': 1.0},
                     {},
                     {'dynamicParameter1': LIN,
                      'dynamicParameter2': LOG10,
                      'dynamicParameter3': LIN,
                      'fixedParameter1': LIN}),
                    ({},
                     {'dynamicParameter1': 11.0,
                      'dynamicParameter2': 'dynamicParameter2',
                      'dynamicParameter3': 'dynamicParameter3',
                      'fixedParameter1': 2.0},
                     {},
                     {'dynamicParameter1': LIN,
                      'dynamicParameter2': LOG10,
                      'dynamicParameter3': LIN,
                      'fixedParameter1': LIN})
                    ]

        actual = petab.get_optimization_to_simulation_parameter_mapping(
            sbml_model=sbml_model,
            measurement_df=measurement_df,
            condition_df=condition_df,
            parameter_df=parameter_df
        )

        assert actual == expected

        # Test with applied scaling

        expected = [
            ({},
             {'dynamicParameter1': np.log(11.0),
              'dynamicParameter2': 'dynamicParameter2',
              'dynamicParameter3': 'dynamicParameter3',
              'fixedParameter1': 1.0},
             {},
             {'dynamicParameter1': LOG,
              'dynamicParameter2': LOG10,
              'dynamicParameter3': LIN,
              'fixedParameter1': LIN}),
            ({},
             {'dynamicParameter1': np.log(11.0),
              'dynamicParameter2': 'dynamicParameter2',
              'dynamicParameter3': 'dynamicParameter3',
              'fixedParameter1': 2.0},
             {},
             {'dynamicParameter1': LOG,
              'dynamicParameter2': LOG10,
              'dynamicParameter3': LIN,
              'fixedParameter1': LIN}),
        ]

        actual = petab.get_optimization_to_simulation_parameter_mapping(
            sbml_model=sbml_model,
            measurement_df=measurement_df,
            condition_df=condition_df,
            parameter_df=parameter_df,
            scaled_parameters=True
        )

        assert actual == expected

        # Test without fixed overrides

        expected = [
            ({},
             {'dynamicParameter1': 'dynamicParameter1',
              'dynamicParameter2': 'dynamicParameter2',
              'dynamicParameter3': 'dynamicParameter3',
              'fixedParameter1': 1.0},
             {},
             {'dynamicParameter1': LOG,
              'dynamicParameter2': LOG10,
              'dynamicParameter3': LIN,
              'fixedParameter1': LIN}),
            ({},
             {'dynamicParameter1': 'dynamicParameter1',
              'dynamicParameter2': 'dynamicParameter2',
              'dynamicParameter3': 'dynamicParameter3',
              'fixedParameter1': 2.0},
             {},
             {'dynamicParameter1': LOG,
              'dynamicParameter2': LOG10,
              'dynamicParameter3': LIN,
              'fixedParameter1': LIN}),
        ]

        actual = petab.get_optimization_to_simulation_parameter_mapping(
            sbml_model=sbml_model,
            measurement_df=measurement_df,
            condition_df=condition_df,
            parameter_df=parameter_df,
            fill_fixed_parameters=False
        )

        assert actual == expected