示例#1
0
def test_to_files(petab_problem):  # pylint: disable=W0621
    """Test problem.to_files."""
    with tempfile.TemporaryDirectory() as outdir:
        # create target files
        sbml_file = Path(outdir, "model.xml")
        condition_file = Path(outdir, "conditions.tsv")
        measurement_file = Path(outdir, "measurements.tsv")
        parameter_file = Path(outdir, "parameters.tsv")
        observable_file = Path(outdir, "observables.tsv")

        # write contents to files
        petab_problem.to_files(
            sbml_file=sbml_file,
            condition_file=condition_file,
            measurement_file=measurement_file,
            parameter_file=parameter_file,
            visualization_file=None,
            observable_file=observable_file,
            yaml_file=None)

        # exemplarily load some
        parameter_df = petab.get_parameter_df(parameter_file)
        same_nans = parameter_df.isna() == petab_problem.parameter_df.isna()
        assert ((parameter_df == petab_problem.parameter_df) | same_nans) \
            .all().all()
示例#2
0
def test_to_files(petab_problem):  # pylint: disable=W0621
    """Test problem.to_files."""
    with tempfile.TemporaryDirectory() as folder:
        # create target files
        sbml_file = tempfile.mkstemp(dir=folder)[1]
        condition_file = tempfile.mkstemp(dir=folder)[1]
        measurement_file = tempfile.mkstemp(dir=folder)[1]
        parameter_file = tempfile.mkstemp(dir=folder)[1]
        observable_file = tempfile.mkstemp(dir=folder)[1]

        # write contents to files
        petab_problem.to_files(
            sbml_file=sbml_file,
            condition_file=condition_file,
            measurement_file=measurement_file,
            parameter_file=parameter_file,
            visualization_file=None,
            observable_file=observable_file,
            yaml_file=None)

        # exemplarily load some
        parameter_df = petab.get_parameter_df(parameter_file)
        same_nans = parameter_df.isna() == petab_problem.parameter_df.isna()
        assert ((parameter_df == petab_problem.parameter_df) | same_nans) \
            .all().all()
示例#3
0
def test_write_parameter_df():
    """Test parameters.write_parameter_df."""
    parameter_df = pd.DataFrame(data={
        PARAMETER_ID: ['par1', 'par2'],
        PARAMETER_NAME: ['parname1', 'parname2'],
    }).set_index(PARAMETER_ID)

    with tempfile.NamedTemporaryFile(mode='w', delete=True) as fh:
        file_name = fh.name
        petab.write_parameter_df(parameter_df, file_name)
        re_df = petab.get_parameter_df(file_name)
        assert (parameter_df == re_df).all().all()
示例#4
0
def test_write_parameter_df():
    """Test parameters.write_parameter_df."""
    parameter_df = pd.DataFrame(data={
        PARAMETER_ID: ['par1', 'par2'],
        # Test utf8 characters
        PARAMETER_NAME: ['ɑ', 'β'],
    }).set_index(PARAMETER_ID)

    with tempfile.TemporaryDirectory() as temp_dir:
        file_name = Path(temp_dir) / "parameters.tsv"
        petab.write_parameter_df(parameter_df, file_name)
        re_df = petab.get_parameter_df(file_name)
        assert (parameter_df == re_df).all().all()
示例#5
0
def create_test_data(measurement_file_name, parameter_file_name, yaml_config,
                     yaml_file_name_test, model_output_dir, model_name,
                     hdf5_file_name):
    """Create some synthetic data to emulate a test set"""

    test_measurement_file_name = \
        "-testset".join(os.path.splitext(measurement_file_name))
    test_parameter_file_name = \
        "-testset".join(os.path.splitext(parameter_file_name))

    # measurements
    df = petab.get_measurement_df(measurement_file_name)
    df.loc[df.observableParameters == 'scaling_x1_common', 'measurement'] = \
        df.loc[df.observableParameters == 'scaling_x1_common', 'measurement'] \
        * 2.0
    df.loc[~df.observableParameters.isnull(), 'observableParameters'] = \
        df.loc[~df.observableParameters.isnull(), 'observableParameters'] \
        + "_test"
    petab.write_parameter_df(df, test_measurement_file_name)

    # parameters
    df = petab.get_parameter_df(parameter_file_name)
    df.rename(index={
        'scaling_x1_common': 'scaling_x1_common_test',
        'offset_x2_batch_0': 'offset_x2_batch_0_test',
        'offset_x2_batch_1': 'offset_x2_batch_1_test'
    },
              inplace=True)
    petab.write_parameter_df(df, test_parameter_file_name)

    # yaml
    yaml_config[ptc.PARAMETER_FILE] = test_parameter_file_name
    yaml_config[ptc.PROBLEMS][0][ptc.MEASUREMENT_FILES][0] = \
        test_measurement_file_name
    with open(yaml_file_name_test, 'w') as outfile:
        yaml.dump(yaml_config, outfile, default_flow_style=False)

    generate_hdf5_file(yaml_file=yaml_file_name_test,
                       model_output_dir=model_output_dir,
                       hdf5_file_name="-testset".join(
                           os.path.splitext(hdf5_file_name)),
                       model_name=model_name)
            PARAMETER_NAME: parameter_dict0['name'],
            PARAMETER_SCALE: LIN,
            NOMINAL_VALUE: 1,
            ESTIMATE: 0,
        }
    else:
        raise NotImplementedError(parameter_dict0['id'])
    parameter_dicts.append(parameter_dict)
## Noise
parameter_dicts.append({
    PARAMETER_ID: noise,
    PARAMETER_NAME: noise,
    PARAMETER_SCALE: LOG10,
    LOWER_BOUND: '1e-12',
    UPPER_BOUND: '1e3',
    NOMINAL_VALUE: 0.1,
    ESTIMATE: 1,
})

condition_df = petab.get_condition_df(
    pd.DataFrame({CONDITION_ID: [condition_id]}))
observable_df = petab.get_observable_df(pd.DataFrame(observable_dicts))
measurement_df = petab.get_measurement_df(pd.DataFrame(measurement_dicts))
parameter_df = petab.get_parameter_df(pd.DataFrame(parameter_dicts))

petab.write_condition_df(condition_df, 'output/petab/conditions.tsv')
petab.write_observable_df(observable_df, 'output/petab/observables.tsv')
petab.write_measurement_df(measurement_df, 'output/petab/measurements.tsv')
petab.write_parameter_df(parameter_df, 'output/petab/parameters.tsv')
shutil.copy('input/petab_problem.yaml', 'output/petab/petab_problem.yaml')
示例#7
0
def main():
    args = parse_cli_args()

    script_path = os.path.split(os.path.abspath(__file__))[0]
    model_name = 'model_steadystate_scaled'

    print(f'{__file__} running in {os.getcwd()}')
    print(f'Processing model {args.sbml_file_name}')

    # Create sbml model from scratch
    cmd = f'bash -c "{script_path}/createSteadystateExampleSBML.py > {args.sbml_file_name}"'
    print(cmd)
    out = subprocess.check_output(cmd, shell=True)
    print(out.decode('utf-8'))
    print()

    print_model_info(args.sbml_file_name)
    print()

    fixed_parameters, observables = create_module(args.sbml_file_name,
                                                  model_name,
                                                  args.model_output_dir)

    print('Observables:', observables)
    print('Fixed parameters', fixed_parameters)
    print()

    # load model
    sys.path.insert(0, args.model_output_dir)
    model_module = importlib.import_module(model_name)

    print()
    print("--- Creating data ---")
    true_parameters, expected_llh = create_data_tables(
        model=model_module.getModel(),
        measurement_file=args.measurement_file_name,
        fixed_parameter_file=args.condition_file_name,
        fixed_parameters=fixed_parameters)

    # check for valid PEtab
    pp = petab.Problem.from_files(sbml_file=args.sbml_file_name,
                                  condition_file=args.condition_file_name,
                                  measurement_file=args.measurement_file_name)

    create_parameter_table(problem=pp,
                           parameter_file=args.parameter_file_name,
                           nominal_parameters=true_parameters)

    petab.lint_problem(pp)

    # create training data
    generate_hdf5_file(sbml_file_name=args.sbml_file_name,
                       model_output_dir=args.model_output_dir,
                       measurement_file_name=args.measurement_file_name,
                       condition_file_name=args.condition_file_name,
                       hdf5_file_name=args.hdf5_file_name,
                       parameter_file_name=args.parameter_file_name,
                       model_name=model_name)

    # create test data
    args.test_measurement_file_name = \
        "-testset".join(os.path.splitext(args.measurement_file_name))
    args.test_parameter_file_name = \
        "-testset".join(os.path.splitext(args.parameter_file_name))
    df = petab.get_measurement_df(args.measurement_file_name)
    df.loc[df.observableParameters == 'scaling_x1_common', 'measurement'] = \
        df.loc[df.observableParameters == 'scaling_x1_common', 'measurement'] \
        * 2.0
    df.loc[~df.observableParameters.isnull(), 'observableParameters'] = \
        df.loc[~df.observableParameters.isnull(), 'observableParameters'] \
        + "_test"
    df.to_csv(args.test_measurement_file_name, sep='\t', index=False)
    df = petab.get_parameter_df(args.parameter_file_name)
    df.rename(index={
        'scaling_x1_common': 'scaling_x1_common_test',
        'offset_x2_batch-0': 'offset_x2_batch-0_test',
        'offset_x2_batch-1': 'offset_x2_batch-1_test'
    },
              inplace=True)
    df.to_csv(args.test_parameter_file_name, sep='\t')
    generate_hdf5_file(sbml_file_name=args.sbml_file_name,
                       model_output_dir=args.model_output_dir,
                       measurement_file_name=args.test_measurement_file_name,
                       condition_file_name=args.condition_file_name,
                       hdf5_file_name="-testset".join(
                           os.path.splitext(args.hdf5_file_name)),
                       parameter_file_name=args.test_parameter_file_name,
                       model_name=model_name)

    save_expected_results(args.hdf5_file_name, true_parameters, expected_llh)

    write_starting_points(args.hdf5_file_name, true_parameters)
示例#8
0
def test_get_parameter_df():
    """Test parameters.get_parameter_df."""
    # parameter df missing ids
    parameter_df = pd.DataFrame(data={
        PARAMETER_NAME: ['parname1', 'parname2'],
    })
    with tempfile.NamedTemporaryFile(mode='w', delete=False) as fh:
        file_name = fh.name
        parameter_df.to_csv(fh, sep='\t', index=False)

    with pytest.raises(KeyError):
        petab.get_parameter_df(file_name)

    # with ids
    parameter_df = pd.DataFrame(data={
        PARAMETER_ID: ['par1', 'par2'],
        PARAMETER_NAME: ['parname1', 'parname2'],
    })
    with tempfile.NamedTemporaryFile(mode='w', delete=False) as fh:
        file_name = fh.name
        parameter_df.to_csv(fh, sep='\t', index=False)

    df = petab.get_parameter_df(file_name)
    assert (df == parameter_df.set_index(PARAMETER_ID)).all().all()

    # Test parameter subset files
    with tempfile.TemporaryDirectory() as directory:
        parameter_dfs, parameter_files = ({}, {})
        parameter_dfs['complete'] = pd.DataFrame(data={
            PARAMETER_ID: ['id1', 'id2', 'id3'],
            PARAMETER_NAME: ['name1', 'name2', 'name3']
        })
        parameter_dfs['subset1'] = pd.DataFrame(data={
            PARAMETER_ID: ['id1', 'id2'],
            PARAMETER_NAME: ['name1', 'name2']
        })
        parameter_dfs['subset2_strict'] = pd.DataFrame(data={
            PARAMETER_ID: ['id3'],
            PARAMETER_NAME: ['name3']
        })
        parameter_dfs['subset2_overlap'] = pd.DataFrame(data={
            PARAMETER_ID: ['id2', 'id3'],
            PARAMETER_NAME: ['name2', 'name3']
        })
        parameter_dfs['subset2_error'] = pd.DataFrame(data={
            PARAMETER_ID: ['id2', 'id3'],
            PARAMETER_NAME: ['different_name2', 'name3']
        })
        for name, df in parameter_dfs.items():
            with tempfile.NamedTemporaryFile(
                    mode='w', delete=False, dir=directory) as fh:
                parameter_files[name] = fh.name
                parameter_dfs[name].to_csv(fh, sep='\t', index=False)
        # Check that subset files are correctly combined
        assert(petab.get_parameter_df(parameter_files['complete']).equals(
            petab.get_parameter_df([parameter_files['subset1'],
                                    parameter_files['subset2_strict']])))
        # Check that identical parameter definitions are correctly combined
        assert(petab.get_parameter_df(parameter_files['complete']).equals(
            petab.get_parameter_df([parameter_files['subset1'],
                                    parameter_files['subset2_overlap']])))
        # Ensure an error is raised if there exist parameterId duplicates
        # with non-identical parameter definitions
        with pytest.raises(ValueError):
            petab.get_parameter_df([parameter_files['subset1'],
                                    parameter_files['subset2_error']])