def onedmodel(): """One dimensional model with normal prior.""" mu = -2 sd = 3 x = SampledParam(norm, loc=mu, scale=sd) like = simple_likelihood return [x], like
def multidmodel(): """Multidimensional model with normal prior.""" mu = np.array([-6.6, 3, 1.0, -.12]) sd = np.array([.13, 5, .9, 1.0]) x = SampledParam(norm, loc=mu, scale=sd) like = simple_likelihood return [x], like
def multidmodel_uniform(): """Multidimensional model with uniform priors.""" lower = np.array([-5, -9, 5, 3]) upper = np.array([10, 2, 7, 8]) range = upper-lower x = SampledParam(uniform, loc=lower, scale=range) like =simple_likelihood return [x], like
'kr_AG_allo1', 'kr_AG_allo2' ] kfs_to_change = [ 'kf_AA_cat2', 'kf_AA_cat3', 'kf_AG_cat2', 'kf_AG_cat3', 'kf_AA_allo1', 'kf_AA_allo2', 'kf_AA_allo3', 'kf_AG_allo1', 'kf_AG_allo2' ] kf_idxs = [ i for i, param in enumerate(cox2_model.parameters) if param.name in kfs_to_change ] # Add PySB rate parameters to be sampled as unobserved random variables to DREAM with normal priors. kd_AA_cat2 = SampledParam( norm, loc=np.log10(cox2_model.parameters['kr_AA_cat2'].value / cox2_model.parameters['kf_AA_cat2'].value), scale=1.5) kcat_AA2 = SampledParam(norm, loc=np.log10(cox2_model.parameters['kcat_AA2'].value), scale=.66) kd_AA_cat3 = SampledParam( norm, loc=np.log10(cox2_model.parameters['kr_AA_cat3'].value / cox2_model.parameters['kf_AA_cat3'].value), scale=1.5) kcat_AA3 = SampledParam(norm, loc=np.log10(cox2_model.parameters['kcat_AA1'].value), scale=.66) kd_AG_cat2 = SampledParam( norm,
rates_of_interest_mask = [ i in idx_pars_calibrate for i, par in enumerate(model.parameters) ] # Index of Initial conditions of Arrestin arrestin_idx = [44] jnk3_initial_value = 0.6 # total jnk3 jnk3_initial_idxs = [47, 48, 49] kcat_idx = [36, 37] param_values = np.array([p.value for p in model.parameters]) sampled_parameter_names = [ SampledParam(uniform, loc=np.log10(5E-8), scale=np.log10(1.9E3) - np.log10(5E-8)) for pa in param_values[rates_of_interest_mask] ] # sampled_parameter_names = [SampledParam(norm, loc=np.log10(par), scale=2) for par in param_values[rates_of_interest_mask]] # # We calibrate the pMKK4 - Arrestin-3 reverse reaction rate. We have experimental data # # for this interaction and know that the k_r varies from 160 to 1068 (standard deviation) sampled_parameter_names[0] = SampledParam(uniform, loc=np.log10(120), scale=np.log10(1200) - np.log10(120)) sampled_parameter_names[6] = SampledParam(uniform, loc=np.log10(28), scale=np.log10(280) - np.log10(28)) nchains = 5 niterations = 500000
#If simulation failed due to integrator errors, return a log probability of -inf. if np.isnan(logp_ctotal): logp_ctotal = -np.inf return logp_ctotal # Add vector of rate parameters to be sampled as unobserved random variables in DREAM with uniform priors. original_params = np.log10([.04, 3.0e7, 1.0e4]) #Set upper and lower limits for uniform prior to be 3 orders of magnitude above and below original parameter values. lower_limits = original_params - 3 parameters_to_sample = SampledParam(uniform, loc=lower_limits, scale=6) #The run_dream function expects a list rather than a single variable sampled_parameter_names = [parameters_to_sample] niterations = 10 converged = False total_iterations = niterations nchains = 5 if __name__ == '__main__': #Run DREAM sampling. Documentation of DREAM options is in Dream.py. sampled_params, log_ps = run_dream( sampled_parameter_names, likelihood,
#Calculate log probability contribution given simulated experimental values. logp_ctotal = np.sum(like_FAR.logpdf(res_arr)) #If simulation failed due to integrator errors, return a log probability of -inf. if np.isnan(logp_ctotal): logp_ctotal = -np.inf return -logp_ctotal # Add vector of rate parameters to be sampled as unobserved random variables in DREAM with uniform priors. original_params = np.ones((18 * 2)) * 0.2 #Set upper and lower limits for uniform prior to be 3 orders of magnitude above and below original parameter values. lower_limits = np.ones((18 * 2)) * 0.001 parameters_to_sample = SampledParam( uniform, loc=lower_limits, scale=[2 if x % 2 else 5 for x in range(18 * 2)]) #The run_dream function expects a list rather than a single variable sampled_parameter_names = [parameters_to_sample] niterations = 100 converged = False total_iterations = niterations nchains = 5 if __name__ == '__main__': #Run DREAM sampling. Documentation of DREAM options is in Dream.py. sampled_params, log_ps = run_dream(sampled_parameter_names, likelihood,
data_4states = data[['ML', 'MLH', 'NEH', 'NE']].values # Add NEH values to MLH to have only three states and keep the sum to 1 data_4states[:, 1] = data_4states[:, 1] + data_4states[:, 2] data_3states = data_4states[:, [0, 1, 3]] # Add small number because dirichlet has problems when the data is zero in a sample data_3states += 1e-10 dirichlet_likelihood = dirichlet([0.4, 5, 15]) myclip_a = 0 myclip_b = 10 my_mean = 0.5 my_std = 0.3 a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std a0 = SampledParam(uniform, loc=0, scale=60) a1 = SampledParam(uniform, loc=0, scale=60) a2 = SampledParam(uniform, loc=0, scale=60) # a3 = SampledParam(uniform, loc=0, scale=60) sampled_params_list = [a0, a1, a2] def likelihood(position): pars = np.copy(position) # pars = 10 ** pars print(pars) try: cost = np.sum([dirichlet.logpdf(sample, pars) for sample in data_3states]) print('cost', cost) except ValueError as e: print(e)
paths = generate_enums() vars = [] vars = [ "decisionMakingModel", "timing CCS", "offshore wind growth", "BOILER paths", "FURNACE paths", "COGEN paths", "leadtime_factor" ] bounds = np.asarray([[0, 4], [2022, 2031], [0, 1], [0, 5], [0, 5], [0, 4], [0.6, 1.4]]) lower_bounds = bounds[:, 0] upper_bounds = bounds[:, 1] - bounds[:, 0] parameters_to_sample = SampledParam(uniform, loc=lower_bounds, scale=upper_bounds) # Parameters for run_dream n_iterations = 500 nchains = 3 if __name__ == '__main__': total_iterations = n_iterations converged = False sampled_params, log_ps = core.run_dream(parameters_to_sample, calculate_likelihood_dream_wm, niterations=n_iterations, nchains=nchains, random_start=True, start=None,
# Parameters to fit: # ----------------------------------------------------------------------------- pysb_sampled_parameter_names = [ 'kpa', 'kSOCSon', 'R1*', 'R2*', 'kint_a', 'kint_b', 'krec_a1', 'krec_a2' ] original_params = [] priors_list = [] priors_dict = {} for key in pysb_sampled_parameter_names: # build prior if key in ['kd4', 'k_d4', 'R1', 'R2']: original_params.append(Mixed_Model.parameters[key]) mu = np.log10(Mixed_Model.parameters[key]) std = 0.2 priors_list.append(SampledParam(norm, loc=mu, scale=std)) priors_dict.update({key: (mu, std)}) elif key in ['R1*', 'R2*']: # set mean prior original_params.append(Mixed_Model.parameters[key[:-1]]) mu = np.log10(Mixed_Model.parameters[key[:-1]]) std = 0.2 priors_list.append(SampledParam(norm, loc=mu, scale=std)) priors_dict.update({key[:-1] + '_mu*': (mu, std)}) # set std prior original_params.append(0.2) mu = np.log10(0.2) std = 0.1 priors_list.append(SampledParam(norm, loc=mu, scale=std)) priors_dict.update({key[:-1] + '_std*': (mu, std)}) else:
like_ctot = norm( loc=[el[0][0] for el in Moraga_data.get_responses()['T_Epo']], scale=[el[0][1] for el in Moraga_data.get_responses()['T_Epo']]) # Create lists of sampled pysb parameter names to use for subbing in parameter values in likelihood function. pysb_sampled_parameter_names = ['kpa', 'k_1_epo', 'k_2_epo'] pysb_sampled_parameter_log10_values = np.log10( np.array([ model.parameters[key] for key in pysb_sampled_parameter_names ])) priors_list = [] for idx, key in enumerate(pysb_sampled_parameter_names): priors_list.append( SampledParam(norm, loc=pysb_sampled_parameter_log10_values[idx], scale=2.0)) # Create likelihood function likelihood = make_likelihood(times, Epo_doses, like_ctot, 'Epo_model', pysb_sampled_parameter_names, response_species, dose_species) # Run DREAM sampling. Documentation of DREAM options is in Dream.py. total_iterations = niterations converged = False sampled_params, log_ps = run_dream( priors_list, likelihood, start=pysb_sampled_parameter_log10_values,
'$n_{AHL}$', '$n_{TETR}$' ] # lower_limits = np.array([2.55,2.35,2.0,2.0, 0.0,1.0 , -2.0,-1.0,-1, 0.0,0.0,0.0]) # scale_limits = np.array([0.1,0.1,1.0,1.0, 2.0,2.0, 2.0,2.0,2.0, 0.7,0.7,0.7]) # Limits of the parameter search in the order given by parnames lower_limits = np.array( [2.0, 2.0, 1.5, 1.5, -0.5, 1.5, -2.5, -1.5, -2.5, 0.0, 0.0, 0.0]) scale_limits = np.array( [1.0, 1.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 2.0, 0.7, 0.7, 0.7]) # lower_limits+np.random.random(len(scale_limits))*scale_limits parameters_to_sample = [ SampledParam(uniform, loc=lower_limits, scale=scale_limits) ] niterations = 40000 converged = False total_iterations = niterations nchains = 3 # number of MCMC chains GRlim = 1.2 # GR Convergence limit startchains = [] for ch in range(nchains): startchains.append( lower_limits + np.random.random(len(scale_limits)) * scale_limits ) # Initial point can be random or fixed close to a known solution # startchains.append(np.array([ 2.65928628 , 2.65969698 , 2.56514661 , 2.72445355 , 0.08293827 , 2.17268301 # ,-1.50328667 ,-0.24599039 ,-0.5103528 , 0.39328967 , 0.32894559 , 0.32731401]))
'NonNE_obs': 0.031684600453998 } TKO_ne_nev2_nonne_mean = [ 0.670866848446898, 0.294235217878072, 0.033886762908045 ] TKO_ne_nev2_nonne_std = [ 0.152576600276884, 0.147927688661517, 0.031684600453998 ] like_pct_data = norm(loc=TKO_ne_nev2_nonne_mean, scale=TKO_ne_nev2_nonne_std) like_steady_state = norm(0, 10) # PRIOR sp_k_NE_div_0 = SampledParam(norm, loc=np.log10(.428), scale=.25) sampled_params_list.append(sp_k_NE_div_0) sp_k_NE_div_x = SampledParam(norm, loc=np.log10(1.05), scale=1) sampled_params_list.append(sp_k_NE_div_x) sp_KD_Kx_NE_div = SampledParam(norm, loc=np.log10(1000), scale=1) sampled_params_list.append(sp_KD_Kx_NE_div) sp_k_NE_die_0 = SampledParam(norm, loc=np.log10(0.365), scale=.5) sampled_params_list.append(sp_k_NE_die_0) sp_k_NE_die_x = SampledParam(norm, loc=np.log10(0.95), scale=1) sampled_params_list.append(sp_k_NE_die_x) sp_KD_Kx_NE_die = SampledParam(norm, loc=np.log10(1000), scale=1) sampled_params_list.append(sp_KD_Kx_NE_die) sp_k_NEv2_div_0 = SampledParam(norm, loc=np.log10(.428), scale=.25) sampled_params_list.append(sp_k_NEv2_div_0)
def fit_with_DREAM(sim_name, parameter_dict, likelihood): original_params = [parameter_dict[k] for k in parameter_dict.keys()] priors_list = [] for p in original_params: priors_list.append(SampledParam(norm, loc=np.log(p), scale=1.0)) # Set simulation parameters niterations = 10000 converged = False total_iterations = niterations nchains = 5 # Make save directory today = datetime.now() save_dir = "PyDREAM_" + today.strftime('%d-%m-%Y') + "_" + str(niterations) os.makedirs(os.path.join(os.getcwd(), save_dir), exist_ok=True) # Run DREAM sampling. Documentation of DREAM options is in Dream.py. sampled_params, log_ps = run_dream(priors_list, likelihood, start=np.log(original_params), niterations=niterations, nchains=nchains, multitry=False, gamma_levels=4, adapt_gamma=True, history_thin=1, model_name=sim_name, verbose=True) # Save sampling output (sampled parameter values and their corresponding logps). for chain in range(len(sampled_params)): np.save(os.path.join(save_dir, sim_name + str(chain) + '_' + str(total_iterations)), sampled_params[chain]) np.save(os.path.join(save_dir, sim_name + str(chain) + '_' + str(total_iterations)), log_ps[chain]) # Check convergence and continue sampling if not converged GR = Gelman_Rubin(sampled_params) print('At iteration: ', total_iterations, ' GR = ', GR) np.savetxt(os.path.join(save_dir, sim_name + str(total_iterations) + '.txt'), GR) old_samples = sampled_params if np.any(GR > 1.2): starts = [sampled_params[chain][-1, :] for chain in range(nchains)] while not converged: total_iterations += niterations sampled_params, log_ps = run_dream(priors_list, likelihood, start=starts, niterations=niterations, nchains=nchains, multitry=False, gamma_levels=4, adapt_gamma=True, history_thin=1, model_name=sim_name, verbose=True, restart=True) for chain in range(len(sampled_params)): np.save(os.path.join(save_dir, sim_name + '_' + str(chain) + '_' + str(total_iterations)), sampled_params[chain]) np.save(os.path.join(save_dir, sim_name + '_' + str(chain) + '_' + str(total_iterations)), log_ps[chain]) old_samples = [np.concatenate((old_samples[chain], sampled_params[chain])) for chain in range(nchains)] GR = Gelman_Rubin(old_samples) print('At iteration: ', total_iterations, ' GR = ', GR) np.savetxt(os.path.join(save_dir, sim_name + '_' + str(total_iterations) + '.txt'), GR) if np.all(GR < 1.2): converged = True try: # Plot output total_iterations = len(old_samples[0]) burnin = int(total_iterations / 2) samples = np.concatenate(list((old_samples[i][burnin:, :] for i in range(len(old_samples))))) np.save(os.path.join(save_dir, sim_name+'_samples'), samples) ndims = len(old_samples[0][0]) colors = sns.color_palette(n_colors=ndims) for dim in range(ndims): fig = plt.figure() sns.distplot(samples[:, dim], color=colors[dim]) fig.savefig(os.path.join(save_dir, sim_name + '_dimension_' + str(dim) + '_' + list(parameter_dict.keys())[dim]+ '.pdf')) # Convert to dataframe df = pd.DataFrame(samples, columns=parameter_dict.keys()) g = sns.pairplot(df) for i, j in zip(*np.triu_indices_from(g.axes, 1)): g.axes[i,j].set_visible(False) g.savefig(os.path.join(save_dir, 'corner_plot.pdf')) # Basic statistics mean_parameters = np.mean(samples, axis=0) median_parameters = np.median(samples, axis=0) np.save(os.path.join(save_dir, 'mean_parameters'), mean_parameters) np.save(os.path.join(save_dir, 'median_parameters'), median_parameters) df.describe().to_csv(os.path.join(save_dir, 'descriptive_statistics.csv')) except ImportError: pass return 0
pysb_sampled_parameter_names = [ 'kpa', 'kSOCSon', 'R1', 'R2', 'kd4', 'k_d4', 'kint_a', 'kint_b', 'krec_a2', 'krec_b2' ] # Parameters to be sampled as unobserved random variables in DREAM: original_params = np.log10( [Mixed_Model.parameters[param] for param in pysb_sampled_parameter_names]) priors_list = [] priors_dict = {} for key in pysb_sampled_parameter_names: if key in ['ka1', 'ka2', 'k_a1', 'k_a2', 'R1', 'R2']: priors_list.append( SampledParam(norm, loc=np.log10(Mixed_Model.parameters[key]), scale=np.log10(2))) priors_dict.update( {key: (np.log10(Mixed_Model.parameters[key]), np.log10(2))}) else: priors_list.append( SampledParam(norm, loc=np.log10(Mixed_Model.parameters[key]), scale=2.0)) priors_dict.update({key: (np.log10(Mixed_Model.parameters[key]), 2.0)}) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Preparing experimental data # ----------------------------------------------------------------------------- mean_data = IfnData("MacParland_Extended")
logp_ctotal = -np.inf return logp_ctotal # Add vector of PySB rate parameters to be sampled as unobserved random variables to DREAM with log normal priors. original_params = np.log10([ Mixed_Model.get_parameters()[param] for param in pysb_sampled_parameter_names ]) priors_list = [] for key in pysb_sampled_parameter_names: priors_list.append( SampledParam(norm, loc=np.log10(Mixed_Model.get_parameters()[key]), scale=1.0)) # Set simulation parameters niterations = 10000 converged = False total_iterations = niterations nchains = 5 sim_name = 'mixed_IFN' if __name__ == '__main__': # Make save directory today = datetime.now() save_dir = "PyDREAM_" + today.strftime('%d-%m-%Y') + "_" + str(niterations) os.makedirs(os.path.join(os.getcwd(), save_dir), exist_ok=True)
# Mean and variance of Td (delay time) and Ts (switching time) of MOMP, and # yfinal (the last value of the IMS-RP trajectory) momp_data = np.array([9810.0, 180.0, model.parameters['Smac_0'].value]) momp_var = np.array([7245000.0, 3600.0, 1e4]) # Mean and variance of Td (delay time) and Ts (switching time) of MOMP, and # yfinal (the last value of the IMS-RP trajectory) momp_data = np.array([9810.0, 180.0, model.parameters['Smac_0'].value]) momp_var = np.array([7245000.0, 3600.0, 1e4]) like_mbid = norm(loc=exp_data['norm_IC-RP'], scale=exp_data['nrm_var_IC-RP']) like_momp = norm(loc=momp_data, scale=momp_var) sampled_parameter_names = [ SampledParam(norm, loc=np.log10(pa), scale=2) for pa in param_values[rate_mask] ] def likelihood(position): Y = np.copy(position) param_values[rate_mask] = 10**Y sim = solver.run(param_values=param_values) logp_mbid = np.sum( like_mbid.logpdf(sim.observables['mBid'] / model.parameters['Bid_0'].value)) momp_traj = sim.observables['cSmac']
param_values = np.array([p.value for p in model.parameters]) # USER must add commands to import/load any experimental data for use in the likelihood function! experiments_avg = np.load() experiments_sd = np.load() like_data = norm(loc=experiments_avg, scale=experiments_sd) # USER must define a likelihood function! def likelihood(position): Y=np.copy(position) param_values[rates_mask] = 10 ** Y sim = solver.run(param_values=param_values).all logp_data = np.sum(like_data.logpdf(sim['observable'])) return logp_data sampled_params_list = list() sp_k_1 = SampledParam(norm, loc=np.log10(0.002), scale=2.0) sampled_params_list.append(sp_k_1) sp_k_2 = SampledParam(norm, loc=np.log10(0.001), scale=2.0) sampled_params_list.append(sp_k_2) sp_k_4 = SampledParam(norm, loc=np.log10(0.004), scale=2.0) sampled_params_list.append(sp_k_4) sp_k_5 = SampledParam(uniform, loc=np.log10(0.001)-1.0, scale=2.0) sampled_params_list.append(sp_k_5) sp_k_3 = SampledParam(norm, loc=np.log10(0.001), scale=2.0) sampled_params_list.append(sp_k_3) converged = False sampled_params, log_ps = run_dream(parameters=sampled_params_list, likelihood=likelihood, niterations=niterations, nchains=nchains, multitry=False,