mae = MechanisticAutoEncoder(N_HIDDEN, (
    os.path.join('data', f'{DATA}__{MODEL}__measurements.tsv'),
    os.path.join('data', f'{DATA}__{MODEL}__conditions.tsv'),
    os.path.join('data', f'{DATA}__{MODEL}__observables.tsv'),
), MODEL)

pretraining_problems = generate_per_sample_pretraining_problems(mae)

pretrained_samples = []
pretraindir = 'pretraining'
prefix = f'{mae.pathway_name}__{mae.data_name}'
for sample, importer in pretraining_problems.items():
    problem = importer.create_problem()
    model = importer.create_model()
    result = pretrain(problem, pypesto.startpoint.uniform, 100)
    output_prefix = f'{prefix}__{sample}'

    store_and_plot_pretraining(result, pretraindir, output_prefix)
    pretrained_samples.append(output_prefix + '.csv')

    simulation = amici.petab_objective.simulate_petab(
        importer.petab_problem,
        model,
        problem_parameters=dict(
            zip(
                problem.x_names,
                result.optimize_result.list[0]['x'],
            )),
        scaled_parameters=True,
        edatas=problem.objective._objectives[0].edatas)
                problem.get_reduced_vector(np.asarray(problem.x_names),
                                           problem.x_free_indices)
        ):
            if xname.startswith(MODEL_FEATURE_PREFIX):
                match = re.match(fr'{MODEL_FEATURE_PREFIX}([\w_]+)__'
                                 r'([\w0-9]+)', xname)
                par = match.group(1)
                sample = match.group(2)
                xs[istart, ix] = par_combo.loc[sample, par] - means[par]
            else:
                xs[istart, ix] = means[xname]

    return xs


result = pretrain(problem, startpoints, 10,
                  subspace=fides.SubSpaceDim.TWO, maxiter=int(1e2))

store_and_plot_pretraining(result, pretraindir, output_prefix)

N_STARTS = 5

# plot residuals and pca of inputs for debugging purposes
data_dicts = []
fig_pca, axes_pca = plt.subplots(1, N_STARTS, figsize=(18.5, 10.5))

for ir, r in enumerate(result.optimize_result.list[:N_STARTS]):

    rdatas = problem.objective._objectives[0](r['x'], return_dict=True)[
        pypesto.objective.constants.RDATAS
    ]
Exemplo n.º 3
0
    inputs.rename(columns={0: 'value'}, inplace=True)
    input_start = next(
        ix for ix, name in enumerate(mae.pypesto_subproblem.x_names)
        if name.startswith('INPUT_'))
    pretrained_inputs = pd.pivot(
        inputs, index='par', columns='sample').reindex([
            re.match(pattern, name).group(1) for name in
            mae.pypesto_subproblem.x_names[input_start:input_start +
                                           mae.n_model_inputs]
        ])
    pretrained_inputs.columns = [r[1] for r in pretrained_inputs.columns]

    problem = generate_encoder_inflate_pretraining_problem(
        mae, pretrained_inputs, pars)

    result = pretrain(problem, pypesto.startpoint.uniform, 10, fatol=1e-4)
    for r in result.optimize_result.list:
        r['id'] += f'_{i_input}'

    results.append(result)

# merge results
result = pypesto.Result(problem)
result.optimize_result.list = [
    r for result in results for r in result.optimize_result.list
]
result.optimize_result.sort()

store_and_plot_pretraining(result, pretraindir, output_prefix)

# compute and plot residuals