def test_des(testdata): code = """$PROBLEM $INPUT ID TIME DV AMT $DATA data.csv IGNORE=@ $SUBROUTINES ADVAN13 TOL=6 $MODEL COMP=(DOSE) COMP=(CENTRAL) COMP=(PERIPH) COMP=(LIVER) $PK MAT = THETA(1) * EXP(ETA(1)) CL = THETA(2) * EXP(ETA(2)) VC = THETA(3) * EXP(ETA(3)) Q = THETA(4) * EXP(ETA(4)) VP = THETA(5) * EXP(ETA(5)) VM = THETA(6) * EXP(ETA(6)) KM = THETA(7) * EXP(ETA(7)) KLO = THETA(8) * EXP(ETA(8)) VL = THETA(9) * EXP(ETA(9)) KA = 1/MAT $DES DADT(1) = -(KA * A(1)) DADT(2) = KA * A(1) + Q/VP * A(3) - (Q/VC * A(2) + A(2)/VC * VM/(KM + A(2)/VC)) DADT(3) = Q/VC * A(2) - Q/VP * A(3) DADT(4) = A(2)/VC * VM/(KM + A(2)/VC) - KLO * A(4) $ERROR CONC = A(2)/VC Y = CONC + EPS(1) $ESTIMATION METHOD=COND INTER $COVARIANCE PRINT=E $THETA (0, 1, Inf) ; POP_MAT $THETA (0, 1, Inf) ; POP_CL $THETA (0, 1, Inf) ; POP_VC $THETA (0, 1, Inf) ; POP_Q $THETA (0, 1, Inf) ; POP_VP $THETA (0, 1, Inf) ; POP_VM $THETA (0, 1, Inf) ; POP_KM $THETA (0, 1, Inf) ; POP_KLO $THETA (0, 1, Inf) ; POP_VL $OMEGA 0.1; IIV_MAT $OMEGA 0.1; IIV_CL $OMEGA 0.1; IIV_VC $OMEGA 0.1; IIV_Q $OMEGA 0.1; IIV_VP $OMEGA 0.1; IIV_VM $OMEGA 0.1; IIV_KM $OMEGA 0.1; IIV_KLO $OMEGA 0.1; IIV_VL $SIGMA 0.1; RUV_ADD """ pheno = Model(testdata / 'nonmem' / 'pheno.mod') model = Model(StringIO(code)) model.dataset = pheno.dataset cs = model.statements.ode_system.to_compartmental_system() assert len(cs) == 5
def test_get_params(testdata): model_frem = Model(testdata / 'nonmem' / 'frem' / 'pheno' / 'model_4.mod') rvs, _ = model_frem.random_variables.distributions( level=VariabilityLevel.IIV)[-1] npars = 2 param_names = get_params(model_frem, rvs, npars) assert param_names == ['CL', 'V'] model_multiple_etas = re.sub( r'(V=TVV\*EXP\(ETA\(2\)\))', r'\1*EXP(ETA(3))', str(model_frem), ) model = Model(StringIO(model_multiple_etas)) model.dataset = model_frem.dataset rvs, _ = model.random_variables.distributions( level=VariabilityLevel.IIV)[-1] npars = 3 param_names = get_params(model, rvs, npars) assert param_names == ['CL', 'V(1)', 'V(2)'] model_separate_declare = re.sub( r'(V=TVV\*EXP\(ETA\(2\)\))', 'ETA2=ETA(2)\n V=TVV*EXP(ETA2)', str(model_frem), ) model = Model(StringIO(model_separate_declare)) model.dataset = model_frem.dataset rvs, _ = model.random_variables.distributions( level=VariabilityLevel.IIV)[-1] npars = 2 param_names = get_params(model, rvs, npars) print(param_names) assert param_names == ['CL', 'V']
def test_add_covariate_effect_nan(pheno_path): model = Model(pheno_path) data = model.dataset new_col = [np.nan] * 10 + ([1.0] * (len(data.index) - 10)) data['new_col'] = new_col model.dataset = data add_covariate_effect(model, 'CL', 'new_col', 'cat') model.update_source(nofiles=True) assert not re.search('NaN', str(model)) assert re.search(r'NEW_COL\.EQ\.-99', str(model))
def test_check_covariates(testdata): model = Model(testdata / 'nonmem' / 'pheno_real.mod') newcov = check_covariates(model, ['WGT', 'APGR']) assert newcov == ['WGT', 'APGR'] newcov = check_covariates(model, ['APGR', 'WGT']) assert newcov == ['APGR', 'WGT'] data = model.dataset data['NEW'] = data['WGT'] model.dataset = data with pytest.warns(UserWarning): newcov = check_covariates(model, ['APGR', 'WGT', 'NEW']) assert newcov == ['APGR', 'WGT'] with pytest.warns(UserWarning): newcov = check_covariates(model, ['NEW', 'APGR', 'WGT']) assert newcov == ['NEW', 'APGR']
def test_check_covariates(testdata): model = Model(testdata / 'nonmem' / 'pheno_real.mod') newcov = check_covariates(model, ['FA1', 'FA2']) assert newcov == [] newcov = check_covariates(model, ['WGT', 'APGR']) assert newcov == ['WGT', 'APGR'] newcov = check_covariates(model, ['APGR', 'WGT']) assert newcov == ['APGR', 'WGT'] data = model.dataset data['NEW'] = data['WGT'] model.dataset = data newcov = check_covariates(model, ['APGR', 'WGT', 'NEW']) assert newcov == ['APGR', 'WGT'] newcov = check_covariates(model, ['NEW', 'APGR', 'WGT']) assert newcov == ['NEW', 'APGR']
def test_power_on_ruv(testdata, epsilons, err_ref, theta_ref): with Patcher(additional_skip_names=['pkgutil']) as patcher: fs = patcher.fs fs.add_real_file(testdata / 'nonmem/pheno_real.mod', target_path='run1.mod') fs.add_real_file(testdata / 'nonmem/pheno_real.phi', target_path='run1.phi') fs.add_real_file(testdata / 'nonmem/pheno_real.ext', target_path='run1.ext') fs.add_real_file(testdata / 'nonmem/pheno.dta', target_path='pheno.dta') model_pheno = Model('run1.mod') model_more_eps = re.sub( r'( 0.031128 ; IVV\n)', '$SIGMA 0.1\n$SIGMA 0.1', str(model_pheno), ) model_more_eps = re.sub( r'IPRED=F\nIRES=DV-IPRED', r'IPRED=F+EPS(2)\nIRES=DV-IPRED+EPS(3)', model_more_eps, ) model = Model(StringIO(model_more_eps)) model.dataset = model_pheno.dataset power_on_ruv(model, epsilons) model.update_source() rec_err = str(model.control_stream.get_records('ERROR')[0]) assert rec_err == f'$ERROR\n' f'W=F\n' f'{err_ref}\n' f'IWRES=IRES/W\n\n' rec_theta = ''.join( str(rec) for rec in model.control_stream.get_records('THETA')) assert (rec_theta == f'$THETA (0,0.00469307) ; PTVCL\n' f'$THETA (0,1.00916) ; PTVV\n' f'$THETA (-.99,.1)\n' f'{theta_ref}\n')
def psn_frem_results(path, force_posdef_covmatrix=False, force_posdef_samples=500, method=None): """Create frem results from a PsN FREM run :param path: Path to PsN frem run directory :return: A :class:`FREMResults` object """ path = Path(path) model_4_path = path / 'final_models' / 'model_4.mod' if not model_4_path.is_file(): raise IOError(f'Could not find FREM model 4: {str(model_4_path)}') model_4 = Model(model_4_path) if model_4.modelfit_results is None: raise ValueError('Model 4 has no results') cov_model = None if method == 'cov_sampling': try: model_4.modelfit_results.covariance_matrix except Exception: model_4b_path = path / 'final_models' / 'model_4b.mod' try: model_4b = Model(model_4b_path) except FileNotFoundError: pass else: cov_model = model_4b with open(path / 'covariates_summary.csv') as covsum: covsum.readline() raw_cov_list = covsum.readline() all_covariates = raw_cov_list[1:].rstrip().split(',') # FIXME: Not introducing yaml parser in pharmpy just yet. Options should be collected # differently. Perhaps using json logtransformed_covariates = [] with open(path / 'meta.yaml') as meta: for row in meta: row = row.strip() if row.startswith('rescale: 1'): rescale = True elif row.startswith('rescale: 0'): rescale = False if row.startswith("log: ''"): logtransformed_covariates = [] elif row.startswith('log: '): logtransformed_covariates = row[5:].split(',') # add log transformed columns for the -log option. Should be done when creating dataset df = model_4.dataset if logtransformed_covariates: for lncov in logtransformed_covariates: df[f'LN{lncov}'] = np.log(df[lncov]) model_4.dataset = df nunique = model_4.dataset.pharmpy.baselines[all_covariates].nunique() continuous = list(nunique.index[nunique != 2]) categorical = list(nunique.index[nunique == 2]) intmod_names = [ 'model_1.mod', 'model_2.mod', 'model_3.mod', 'model_3b.mod' ] intmods = [] for m in intmod_names: intmod_path = path / 'm1' / m if intmod_path.is_file(): intmod = Model(intmod_path) intmods.append(intmod) res = calculate_results( model_4, continuous, categorical, method=method, force_posdef_covmatrix=force_posdef_covmatrix, force_posdef_samples=force_posdef_samples, cov_model=cov_model, rescale=rescale, intermediate_models=intmods, ) return res