def test_evaluate_chi2():
    assert evaluate_chi2(0.5, 0.5001)
    assert not evaluate_chi2(0.5, 0.501)
Beispiel #2
0
def _execute_case(case):
    """Run a single PEtab test suite case"""
    case = petabtests.test_id_str(case)
    logger.info(f"Case {case}")

    # case folder
    case_dir = os.path.join(petabtests.CASES_DIR, case)

    # load solution
    solution = petabtests.load_solution(case, 'sbml')
    gt_chi2 = solution[petabtests.CHI2]
    gt_llh = solution[petabtests.LLH]
    gt_simulation_dfs = solution[petabtests.SIMULATION_DFS]
    tol_chi2 = solution[petabtests.TOL_CHI2]
    tol_llh = solution[petabtests.TOL_LLH]
    tol_simulations = solution[petabtests.TOL_SIMULATIONS]

    # import petab problem
    yaml_file = os.path.join(case_dir, petabtests.problem_yaml_name(case))

    # unique folder for compiled amici model
    output_folder = f'amici_models/model_{case}'

    # import and create objective function
    importer = pypesto.petab.PetabImporter.from_yaml(
        yaml_file, output_folder=output_folder)
    model = importer.create_model(generate_sensitivity_code=False)
    obj = importer.create_objective(model=model)

    # the scaled parameters
    problem_parameters = importer.petab_problem.x_nominal_scaled

    # simulate
    ret = obj(problem_parameters, sensi_orders=(0, ), return_dict=True)

    # extract results
    rdatas = ret['rdatas']
    chi2 = sum(rdata['chi2'] for rdata in rdatas)
    llh = -ret['fval']
    simulation_df = amici.petab_objective.rdatas_to_measurement_df(
        rdatas, model, importer.petab_problem.measurement_df)
    petab.check_measurement_df(simulation_df,
                               importer.petab_problem.observable_df)
    simulation_df = simulation_df.rename(
        columns={petab.MEASUREMENT: petab.SIMULATION})
    simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int)

    # check if matches
    chi2s_match = petabtests.evaluate_chi2(chi2, gt_chi2, tol_chi2)
    llhs_match = petabtests.evaluate_llh(llh, gt_llh, tol_llh)
    simulations_match = petabtests.evaluate_simulations([simulation_df],
                                                        gt_simulation_dfs,
                                                        tol_simulations)

    # log matches
    logger.log(
        logging.INFO if chi2s_match else logging.ERROR,
        f"CHI2: simulated: {chi2}, expected: {gt_chi2},"
        f" match = {chi2s_match}",
    )
    logger.log(
        logging.INFO if simulations_match else logging.ERROR,
        f"LLH: simulated: {llh}, expected: {gt_llh}, "
        f"match = {llhs_match}",
    )
    logger.log(
        logging.INFO if simulations_match else logging.ERROR,
        f"Simulations: match = {simulations_match}",
    )

    if not all([llhs_match, chi2s_match, simulations_match]):
        logger.error(f"Case {case} failed.")
        raise AssertionError(f"Case {case}: Test results do not match "
                             "expectations")

    logger.info(f"Case {case} passed.")
Beispiel #3
0
def _execute_case(case):
    """Run a single PEtab test suite case"""
    case = petabtests.test_id_str(case)
    logger.info(f"Case {case}")

    # case folder
    case_dir = os.path.join(petabtests.CASES_DIR, case)

    # load solution
    solution = petabtests.load_solution(case)
    gt_chi2 = solution[petabtests.CHI2]
    gt_llh = solution[petabtests.LLH]
    gt_simulation_dfs = solution[petabtests.SIMULATION_DFS]
    tol_chi2 = solution[petabtests.TOL_CHI2]
    tol_llh = solution[petabtests.TOL_LLH]
    tol_simulations = solution[petabtests.TOL_SIMULATIONS]

    # unique folder for compiled amici model
    output_folder = f'amici_models/model_{case}'

    # import petab problem
    yaml_file = os.path.join(case_dir, petabtests.problem_yaml_name(case))

    # create problem
    petab_problem = petab.Problem.from_yaml(yaml_file)

    # compile amici
    amici_model = amici.petab_import.import_petab_problem(
        petab_problem=petab_problem, model_output_dir=output_folder)
    solver = amici_model.getSolver()

    # import to pyabc
    importer = pyabc.petab.AmiciPetabImporter(petab_problem, amici_model,
                                              solver)
    model = importer.create_model(return_rdatas=True)

    # simulate
    problem_parameters = petab_problem.x_nominal_free_scaled
    ret = model(problem_parameters)

    llh = ret['llh']

    # extract results
    rdatas = ret['rdatas']
    chi2 = sum(rdata['chi2'] for rdata in rdatas)
    simulation_df = amici.petab_objective.rdatas_to_measurement_df(
        rdatas, amici_model, importer.petab_problem.measurement_df)
    petab.check_measurement_df(simulation_df,
                               importer.petab_problem.observable_df)
    simulation_df = simulation_df.rename(
        columns={petab.MEASUREMENT: petab.SIMULATION})
    simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int)

    # check if matches
    chi2s_match = petabtests.evaluate_chi2(chi2, gt_chi2, tol_chi2)
    llhs_match = petabtests.evaluate_llh(llh, gt_llh, tol_llh)
    simulations_match = petabtests.evaluate_simulations([simulation_df],
                                                        gt_simulation_dfs,
                                                        tol_simulations)

    # log matches
    logger.log(
        logging.INFO if chi2s_match else logging.ERROR,
        f"CHI2: simulated: {chi2}, expected: {gt_chi2},"
        f" match = {chi2s_match}")
    logger.log(
        logging.INFO if simulations_match else logging.ERROR,
        f"LLH: simulated: {llh}, expected: {gt_llh}, "
        f"match = {llhs_match}")
    logger.log(logging.INFO if simulations_match else logging.ERROR,
               f"Simulations: match = {simulations_match}")

    # FIXME case 7, 16 fail due to amici/#963
    if not all([llhs_match, simulations_match]) \
            or (not chi2s_match and case not in ['0007', '0016']):
        # chi2s_match ignored until fixed in amici
        logger.error(f"Case {case} failed.")
        raise AssertionError(f"Case {case}: Test results do not match "
                             "expectations")

    logger.info(f"Case {case} passed.")
Beispiel #4
0
    simulation_df = simulation_df.rename(
        columns={petab.MEASUREMENT: petab.SIMULATION})
    simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int)
    solution = petabtests.load_solution(case, model_type)
    gt_chi2 = solution[petabtests.CHI2]
    gt_llh = solution[petabtests.LLH]
    gt_simulation_dfs = solution[petabtests.SIMULATION_DFS]
    if case.startswith('0006'):
        # account for flattening
        gt_simulation_dfs[0].loc[:, petab.OBSERVABLE_ID] = ('obs_a__10__c0',
                                                            'obs_a__15__c0')
    tol_chi2 = solution[petabtests.TOL_CHI2]
    tol_llh = solution[petabtests.TOL_LLH]
    tol_simulations = solution[petabtests.TOL_SIMULATIONS]

    chi2s_match = petabtests.evaluate_chi2(chi2, gt_chi2, tol_chi2)
    llhs_match = petabtests.evaluate_llh(llh, gt_llh, tol_llh)
    simulations_match = petabtests.evaluate_simulations([simulation_df],
                                                        gt_simulation_dfs,
                                                        tol_simulations)

    logger.log(
        logging.DEBUG if chi2s_match else logging.ERROR,
        f"CHI2: simulated: {chi2}, expected: {gt_chi2},"
        f" match = {chi2s_match}")
    logger.log(
        logging.DEBUG if simulations_match else logging.ERROR,
        f"LLH: simulated: {llh}, expected: {gt_llh}, "
        f"match = {llhs_match}")
    logger.log(logging.DEBUG if simulations_match else logging.ERROR,
               f"Simulations: match = {simulations_match}")
Beispiel #5
0
def _test_case(case):
    """Run a single PEtab test suite case"""
    case = petabtests.test_id_str(case)
    logger.debug(f"Case {case}")

    # load
    case_dir = os.path.join(petabtests.CASES_DIR, case)

    # import petab problem
    yaml_file = os.path.join(case_dir, petabtests.problem_yaml_name(case))
    problem = petab.Problem.from_yaml(yaml_file)

    # compile amici model
    model_output_dir = f'amici_models/model_{case}'
    model = import_petab_problem(
        problem, model_output_dir=model_output_dir)

    # simulate
    chi2s_match = llhs_match = simulations_match = False
    ret = simulate_petab(problem, model, log_level=logging.DEBUG)

    rdatas = ret['rdatas']
    chi2 = sum(rdata['chi2'] for rdata in rdatas)
    llh = ret['llh']
    simulation_df = rdatas_to_measurement_df(rdatas, model,
                                             problem.measurement_df)
    petab.check_measurement_df(simulation_df, problem.observable_df)
    simulation_df = simulation_df.rename(
        columns={petab.MEASUREMENT: petab.SIMULATION})
    simulation_df[petab.TIME] = simulation_df[petab.TIME].astype(int)
    solution = petabtests.load_solution(case)
    gt_chi2 = solution[petabtests.CHI2]
    gt_llh = solution[petabtests.LLH]
    gt_simulation_dfs = solution[petabtests.SIMULATION_DFS]
    tol_chi2 = solution[petabtests.TOL_CHI2]
    tol_llh = solution[petabtests.TOL_LLH]
    tol_simulations = solution[petabtests.TOL_SIMULATIONS]

    chi2s_match = petabtests.evaluate_chi2(chi2, gt_chi2, tol_chi2)
    llhs_match = petabtests.evaluate_llh(llh, gt_llh, tol_llh)
    simulations_match = petabtests.evaluate_simulations(
        [simulation_df], gt_simulation_dfs, tol_simulations)

    logger.log(logging.DEBUG if chi2s_match else logging.ERROR,
               f"CHI2: simulated: {chi2}, expected: {gt_chi2},"
               f" match = {chi2s_match}")
    logger.log(logging.DEBUG if simulations_match else logging.ERROR,
               f"LLH: simulated: {llh}, expected: {gt_llh}, "
               f"match = {llhs_match}")
    logger.log(logging.DEBUG if simulations_match else logging.ERROR,
               f"Simulations: match = {simulations_match}")

    # FIXME case 7 fails due to #963
    if case not in ['0007', '0016']:
        check_derivatives(problem, model)

    # FIXME case 7 fails due to #963
    if not all([llhs_match, simulations_match]) \
            or (not chi2s_match and case not in ['0007', '0016']):
        # chi2s_match ignored until fixed in amici
        logger.error(f"Case {case} failed.")
        raise AssertionError(f"Case {case}: Test results do not match "
                             "expectations")

    logger.info(f"Case {case} passed.")