import matplotlib.pyplot as plt
import datetime

obj_names = ['emBid', 'ecPARP', 'e2']

outdir = '/Users/lopezlab/temp/EARM/'
outfiles = {}
for name in obj_names:
	outfiles[name+'_sens']    = os.path.join(outdir,name+"_sens_scipy.txt")
	outfiles[name+'_sens_t']  = os.path.join(outdir,name+"_sens_t_scipy.txt")
	outfiles[name+'_sens_2']  = os.path.join(outdir,name+"_sens_2_scipy.txt")
OUTPUT = {key : open(file, 'a') for key,file in outfiles.items()}
for file in OUTPUT.values():
	file.write("-----"+str(datetime.datetime.now())+"-----\n")
	file.write("n")
	for i in range(len(model.parameters_rules())):
    		file.write("\t"+"p_"+str(i))
	file.write("\n")

# Best fit parameters
param_dict = load_params("/Users/lopezlab/git/earm/EARM_2_0_M1a_fitted_params.txt")
for param in model.parameters:
	if param.name in param_dict:
		param.value = param_dict[param.name]

# Load experimental data file
exp_data = np.genfromtxt('/Users/lopezlab/git/earm/xpdata/forfits/EC-RP_IMS-RP_IC-RP_data_for_models.csv', delimiter=',', names=True)

# Time points (same as for experiments)
tspan = exp_data['Time']
d7500 = np.loadtxt('7500_sens.txt')
d10000 = np.loadtxt('10000sens.txt')
d15000 = np.loadtxt('15000sens.txt')

# sens_t
t1000 = np.loadtxt('1000_senst.txt')
t1500 = np.loadtxt('1500_senst.txt')
t2500 = np.loadtxt('2500_senst.txt')
t5000 = np.loadtxt('5000_senst.txt')
t7500 = np.loadtxt('7500_senst.txt')
t10000 = np.loadtxt('10000_sens_t.txt')
t15000 = np.loadtxt('15000_sens_t.txt')
name_dict={}
counter=0
namess = np.loadtxt('names.txt',dtype='string')
k_ids = [i for i,p in enumerate(model.parameters) if p in model.parameters_rules()]
for i in namess[k_ids]:
    name_dict[counter]=i
    counter+=1
    
    
#obj_names = ['emBid', 'ecPARP', 'e2']

for i in (d1000,d1500,d2500,d5000,d7500,d10000,d15000):
    Plotter(i)
plt.legend(('1000','1500','2500','5000','7500','10000','15000'))
plt.show()

for i in (t1000,t1500,t2500,t5000,t7500,t10000,t15000):
    Plotter(i)
plt.legend(('1000','1500','2500','5000','7500','10000','15000'))
Exemple #3
0
momp_var = np.array([7245000.0, 3600.0, 1e4])

# Build time points for the integrator, using the same time scale as the
# experimental data but with greater resolution to help the integrator converge.
ntimes = len(exp_data['Time'])
# Factor by which to increase time resolution
tmul = 10
# Do the sampling such that the original experimental timepoints can be
# extracted with a slice expression instead of requiring interpolation.
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
                    (ntimes-1) * tmul + 1)
# Initialize solver object
solver = pysb.integrate.Solver(model, tspan, rtol=1e-5, atol=1e-5)

# Get parameters for rates only
rate_params = model.parameters_rules()
# Build a boolean mask for those params against the entire param list
rate_mask = np.array([p in rate_params for p in model.parameters])
# Build vector of nominal parameter values from the model
nominal_values = np.array([p.value for p in model.parameters])
# Set the radius of a hypercube bounding the search space
bounds_radius = 2


def objective_func(x, rate_mask, lb, ub):

    # Print out an indication of how the annealing is progressing
    print_anneal_status()

    # Apply hard bounds
    if np.any((x < lb) | (x > ub)):
import pickle
from earm.lopez_embedded import model 
import pylab as plt
import numpy as np
import scipy.interpolate
cm = plt.cm.get_cmap('RdBu')


traces = pickle.load(open('test_traces2.p'))

params = [ i for i in traces['params']]
params = np.asarray(params)
params = params.reshape(np.shape(params)[0]*np.shape(params)[1],np.shape(params)[2])
#params = params[:100,:]
k_names = [p.name for p in model.parameters_rules()]
#fig = plt.figure(figsize=(10,10))
#for i in range(25):
#    ax =fig.add_subplot(5,5,i)
#    weights = np.ones_like(params[:,i])/float(len(params[:,i]))
#    ax.hist(params[:,i],100,weights=weights)
    #plt.title(k_names[i])
#    ax.set_xticklabels([])
#    ax.set_yticklabels([])
#fig.tight_layout()
#plt.show()
mat = np.zeros((len(model.parameters_rules()),len(model.parameters_rules())))
for i in range(105):
    for j in range(i+1,105):
        plt.plot(params[:,i],params[:,j],'.')
        plt.savefig('p%s_vs_p%s.png'%(i,j))
        plt.close()
momp_data = np.array([9810.0, 180.0, momp_obs_total])
momp_var = np.array([7245000.0, 3600.0, 1e4])

# Build time points for the integrator, using the same time scale as the
# experimental data but with greater resolution to help the integrator converge.
ntimes = len(exp_data['Time'])
# Factor by which to increase time resolution
tmul = 100
# Do the sampling such that the original experimental timepoints can be
# extracted with a slice expression instead of requiring interpolation.
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
                    (ntimes-1) * tmul + 1)
# Initialize solver object
solver = pysb.integrate.Solver(earm, tspan, integrator='vode', rtol=1e-7, atol=1e-7, nsteps=10000)

cov = np.identity(len(earm.parameters_rules()))
mu = np.array([np.log10(param.value) for param in earm.parameters_rules()])
m = np.random.multivariate_normal(mu, cov, size=10*len(earm.parameters_rules()))
np.save('earm_embedded_seed.npy', m)

@theano.compile.ops.as_op(itypes=[t.dvector],otypes=[t.dscalar, t.dscalar, t.dscalar]) #to use gpu use type t.fvector for all inputs/outputs
def likelihood(param_vector):
    # Sub in parameter values for current location in parameter space and simulate
    for i in range(len(param_vector)):
        earm.parameters_rules()[name_dict[i]].value = 10**param_vector[i]

    solver.run()
    
    e1 = {}
    for obs_name, data_name, var_name, obs_total in \
            zip(obs_names, data_names, var_names, obs_totals):
Exemple #6
0
exp_data = np.genfromtxt(data_path, delimiter=',', names=True)

# Model observable corresponding to the IMS-RP reporter (MOMP timing)
momp_obs = 'aSmac'
# Mean and variance of Td (delay time) and Ts (switching time) of MOMP, and
# yfinal (the last value of the IMS-RP trajectory)
momp_obs_total = EARM.parameters['Smac_0'].value
momp_data = np.array([9810.0, 180.0, momp_obs_total])
momp_var = np.array([7245000.0, 3600.0, 1e4])

ntimes = len(exp_data['Time'])
tmul = 10
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
                    (ntimes-1) * tmul + 1)

rate_params = EARM.parameters_rules()
param_values = np.array([p.value for p in EARM.parameters])
rate_mask = np.array([p in rate_params for p in EARM.parameters])
k_ids = [p.value for p in EARM.parameters_rules()]
nominal_values = np.array([p.value for p in EARM.parameters])
xnominal = np.log10(nominal_values[rate_mask])
bounds_radius = 2
solver = pysb.integrate.Solver(EARM, tspan, integrator='vode', rtol=1e-6, atol=1e-6,)

def display(position):

    exp_obs_norm = exp_data[data_names].view(float).reshape(len(exp_data), -1).T
    var_norm = exp_data[var_names].view(float).reshape(len(exp_data), -1).T
    std_norm = var_norm ** 0.5
    Y=np.copy(position)
    param_values[rate_mask] = 10 ** Y
Exemple #7
0
exp_data = np.genfromtxt(data_path, delimiter=',', names=True)

# Model observable corresponding to the IMS-RP reporter (MOMP timing)
momp_obs = 'aSmac'
# Mean and variance of Td (delay time) and Ts (switching time) of MOMP, and
# yfinal (the last value of the IMS-RP trajectory)
momp_obs_total = model.parameters['Smac_0'].value
momp_data = np.array([9810.0, 180.0, momp_obs_total])
momp_var = np.array([7245000.0, 3600.0, 1e4])

ntimes = len(exp_data['Time'])
tmul = 10
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
                    (ntimes - 1) * tmul + 1)

rate_params = model.parameters_rules()
param_values = np.array([p.value for p in model.parameters])
rate_mask = np.array([p in rate_params for p in model.parameters])
k_ids = [p.value for p in model.parameters_rules()]
nominal_values = np.array([p.value for p in model.parameters])
xnominal = np.log10(nominal_values[rate_mask])
bounds_radius = 2
solver = Solver(
    model,
    tspan,
    integrator='vode',
    rtol=1e-6,
    atol=1e-6,
)

        e2 = -np.inf
        momp_sim.fill(-np.inf)
    #error = e1_mBid + e1_cPARP + e2
    #print 'subbed values: ',[np.log10(param.value) for param in earm.parameters_rules()]

    sim_momp = momp_sim
    
    return sim_mBid, sim_cPARP, sim_momp, np.array(e1_mBid), np.array(e1_cPARP), np.array(e2) #to use gpu add .astype('float32') to end of first two arrays
    #return np.array(error)

#Setting up PyMC model
with model:
    #Add PySB rate parameters as unobserved random variables to PyMC model
    # To use gpu pass dtype='float32' and add .astype('float32') to end of np.log10(param.value)
    #params = pm.Normal('params', mu=[np.log10(param.value) for param in earm.parameters_rules()], sd=np.array([1.0]*len(earm.parameters_rules())), shape=(len(earm.parameters_rules())))
    lower_limits = np.zeros(len(earm.parameters_rules()))
    upper_limits = np.zeros(len(earm.parameters_rules()))
    starting_vals = np.zeros(len(earm.parameters_rules()))
    kf_idx = [idx for idx, param in enumerate(earm.parameters_rules()) if 'kf' in param.name]
    kr_idx = [idx for idx, param in enumerate(earm.parameters_rules()) if 'kr' in param.name]
    kc_idx = [idx for idx, param in enumerate(earm.parameters_rules()) if 'kc' in param.name]
    lower_limits[kf_idx] = -16    
    upper_limits[kf_idx] = -1
    #Sampling for kr is really KD values that are then used with the sampled kf to choose a kr
    lower_limits[kr_idx] = -3
    upper_limits[kr_idx] = 15
    lower_limits[kc_idx] = -6
    upper_limits[kc_idx] = 3
    starting_vals[kf_idx] = np.log10([param.value for param in earm.parameters_rules() if 'kf' in param.name])
    #print 'starting kf vals: ',10**starting_vals[kf_idx]
    starting_vals[kr_idx] = np.log10([param.value for param in earm.parameters_rules() if 'kr' in param.name])-starting_vals[kf_idx]
params = params.reshape(np.shape(params)[0]*np.shape(params)[1],np.shape(params)[2])

def gelman_rubin_trace_dict(params):
    
    means,Vars = [],[]
    n = len(params)
    for i in range(len(params)):
        means.append( np.mean(params[i],axis=0))
    vari = np.var(np.asarray(means),axis=0,ddof=1)
    for i in range(len(params)):
        Vars.append( np.var(params[i],axis=0,ddof=1))
    Mean = np.mean(np.asarray(Vars) ,axis=0)     
    Vhat = Mean*(n - 1)/n + vari/n
    return  Vhat

param_vec_dict = convert_param_vec_dict_to_param_dict(traces, model.parameters_rules())
#print_convergence_summary(param_vec_dict)

data = params
above_avg= np.where( scores > 0. )
copy_data = data[above_avg]
print len(copy_data)
copy_scores = scores[above_avg]


b = np.ascontiguousarray(copy_data).view(np.dtype((np.void, copy_data.dtype.itemsize * copy_data.shape[1])))

_, idx = np.unique(b, return_index=True)

copy_data = copy_data[idx]
copy_scores = copy_scores[idx]
output_file_prefix = options.output_file
if output_file_prefix == None:
    output_file_prefix = ''    
walk_length = options.walk_length
if walk_length == None:
    walk_length = 50000

##Start at random location in parameter space drawn from prior
#for param in model.parameters_rules():
#    new_value = np.random.normal(np.log10(param.value))
#    param.value = 10**new_value

#Start at end of last chain.
old_trace = pickle.load(open('earm_embedded_bayessb_normal_randstart.p'))
last_position = old_trace['params'][randomseed-1][-1]
for param, new_param in zip(model.parameters_rules(), last_position):
    param.value = 10**new_param

# List of model observables and corresponding data file columns for
# point-by-point fitting
obs_names = ['mBid', 'cPARP']
data_names = ['norm_ICRP', 'norm_ECRP']
var_names = ['nrm_var_ICRP', 'nrm_var_ECRP']
# Total starting amounts of proteins in obs_names, for normalizing simulations
obs_totals = [model.parameters['Bid_0'].value,
              model.parameters['PARP_0'].value]

# Load experimental data file
if socket.gethostname() == 'Erins-MacBook-Pro.local':
    earm_path = '/Users/Erin/git/earm'
if socket.gethostname() == 'puma':
# extracted with a slice expression instead of requiring interpolation.
#tspan = np.linspace(0.0, exp_data['Time'][-1], (ntimes-1) * tmul + 1)
#tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],    (ntimes-1) * tmul + 1)

tspan = exp_data['Time']


# Initialize solver object
#solver = Solver(model, tspan, integrator='lsoda', rtol=1e-6, atol=1e-6, nsteps=20000)
solver = Solver(model, tspan, integrator='vode', with_jacobian=True, atol=1e-5, rtol=1e-5, nsteps=20000)


# Determine IDs for rate parameters, original values for all parameters to overlay, and reference values for scaling.


k_ids = [i for i,p in enumerate(model.parameters) if p in model.parameters_rules()]
k_ids_names = [p for i,p in enumerate(model.parameters) if p in model.parameters_rules()]
par_vals = np.array([p.value for p in model.parameters])

ref = np.array([p.value for p in model.parameters_rules()])

# Sample takes the parameters: k or num of parameters, n or num of samples, scaling, verbose and loadFile
# =====
# Run to generate samples
# sample = Sample(len(model.parameters_rules()), 7500, lambda x: scale.magnitude(x, reference=ref, orders=1.0), verbose=True)
# n_samples = 100 #7500
# sample = Sample(len(model.parameters_rules()), n_samples, lambda x: scale.linear(x, lower_bound=0.1*ref, upper_bound=10*ref), verbose=True)

# List of model observables and corresponding data file columns for point-by-point fitting
obs_names = ['mBid', 'cPARP']
data_names = ['norm_ICRP', 'norm_ECRP']
data_path = os.path.join(earm_path, "xpdata", "forfits", "EC-RP_IMS-RP_IC-RP_data_for_models.csv")
exp_data = np.genfromtxt(data_path, delimiter=",", names=True)

# Model observable corresponding to the IMS-RP reporter (MOMP timing)
momp_obs = "aSmac"
# Mean and variance of Td (delay time) and Ts (switching time) of MOMP, and
# yfinal (the last value of the IMS-RP trajectory)
momp_obs_total = model.parameters["Smac_0"].value
momp_data = np.array([9810.0, 180.0, momp_obs_total])
momp_var = np.array([7245000.0, 3600.0, 1e4])

ntimes = len(exp_data["Time"])
tmul = 10
tspan = np.linspace(exp_data["Time"][0], exp_data["Time"][-1], (ntimes - 1) * tmul + 1)

rate_params = model.parameters_rules()
param_values = np.array([p.value for p in model.parameters])
rate_mask = np.array([p in rate_params for p in model.parameters])
k_ids = [p.value for p in model.parameters_rules()]
nominal_values = np.array([p.value for p in model.parameters])
xnominal = np.log10(nominal_values[rate_mask])
bounds_radius = 2
solver = pysb.integrate.Solver(model, tspan, integrator="vode", rtol=1e-6, atol=1e-6)


def display(pars):

    exp_obs_norm = exp_data[data_names].view(float).reshape(len(exp_data), -1).T
    var_norm = exp_data[var_names].view(float).reshape(len(exp_data), -1).T
    std_norm = var_norm ** 0.5
    solver.run(pars)
    #print 'sim mBid shape: ',sim_mBid.shape
    #print 'sim cPARP shape: ',sim_cPARP.shape
    #print 'sim MOMP shape: ',momp_sim.shape
    logp = np.sum(norm.logpdf(sim_mBid, exp_data['norm_ICRP'], exp_data['nrm_var_ICRP'])) + np.sum(norm.logpdf(sim_cPARP, exp_data['norm_ECRP'], exp_data['nrm_var_ECRP'])) + np.sum(norm.logpdf(momp_sim, momp_data, momp_var))
        
    return logp

uni_param_dict = {}    
norm_param_dict = {}
uni_n = 0
norm_n = 0
uni_idx = []
norm_idx = []
kr_idx = []
name_dict = {}
for i, param in enumerate(earm.parameters_rules()):
    name_dict[i] = param.name
    if earm_rates[param.name]['type'] == 'uniform':
        uni_idx.append(i)
        uni_param_dict[param.name] = (i, uni_n)
        uni_n += 1
    elif earm_rates[param.name]['type'] == 'normal':
        norm_idx.append(i)
        norm_param_dict[param.name] = (i, norm_n)
        norm_n += 1
    if 'kr' in param.name:
        kr_idx.append(i)
uni_idx = np.array(uni_idx)
norm_idx = np.array(norm_idx)
n_uni_params = uni_n
n_norm_params = norm_n
obj_names = ['emBid', 'ecPARP', 'e2']

outdir = '/Users/lopezlab/temp/EARM/'
outfiles = {}
for name in obj_names:
    outfiles[name + '_sens'] = os.path.join(outdir, name + "_sens_scipy.txt")
    outfiles[name + '_sens_t'] = os.path.join(outdir,
                                              name + "_sens_t_scipy.txt")
    outfiles[name + '_sens_2'] = os.path.join(outdir,
                                              name + "_sens_2_scipy.txt")
OUTPUT = {key: open(file, 'a') for key, file in outfiles.items()}
for file in OUTPUT.values():
    file.write("-----" + str(datetime.datetime.now()) + "-----\n")
    file.write("n")
    for i in range(len(model.parameters_rules())):
        file.write("\t" + "p_" + str(i))
    file.write("\n")

# Best fit parameters
param_dict = load_params(
    "/Users/lopezlab/git/earm/EARM_2_0_M1a_fitted_params.txt")
for param in model.parameters:
    if param.name in param_dict:
        param.value = param_dict[param.name]

# Load experimental data file
exp_data = np.genfromtxt(
    '/Users/lopezlab/git/earm/xpdata/forfits/EC-RP_IMS-RP_IC-RP_data_for_models.csv',
    delimiter=',',
    names=True)
momp_data = np.array([9810.0, 180.0, momp_obs_total])
momp_var = np.array([7245000.0, 3600.0, 1e4])

# Build time points for the integrator, using the same time scale as the
# experimental data but with greater resolution to help the integrator converge.
ntimes = len(exp_data['Time'])
# Factor by which to increase time resolution
tmul = 100
# Do the sampling such that the original experimental timepoints can be
# extracted with a slice expression instead of requiring interpolation.
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
                    (ntimes-1) * tmul + 1)
# Initialize solver object
solver = Solver(earm, tspan, integrator='vode', rtol=1e-7, atol=1e-7, nsteps=10000)

kf_idx = [idx for idx, param in enumerate(earm.parameters_rules()) if 'kf' in param.name]
kr_idx = [idx for idx, param in enumerate(earm.parameters_rules()) if 'kr' in param.name]
kc_idx = [idx for idx, param in enumerate(earm.parameters_rules()) if 'kc' in param.name]

earm_results = np.load('/data/lola/shockle/dream_results/dream_results_earm_embedded_normal_prior.npy')

trace_arr = earm_results
ncols = trace_arr.shape[1]

dtype = trace_arr.dtype.descr * ncols

struct = trace_arr.view(dtype)

uniq = np.unique(struct)

earm_unique_vecs = uniq.view(trace_arr.dtype).reshape(-1, ncols)
momp_data = np.array([9810.0, 180.0, momp_obs_total])
momp_var = np.array([7245000.0, 3600.0, 1e4])

# Build time points for the integrator, using the same time scale as the
# experimental data but with greater resolution to help the integrator converge.
ntimes = len(exp_data['Time'])
# Factor by which to increase time resolution
tmul = 100
# Do the sampling such that the original experimental timepoints can be
# extracted with a slice expression instead of requiring interpolation.
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
                    (ntimes-1) * tmul + 1)
# Initialize solver object
solver = pysb.integrate.Solver(earm, tspan, integrator='vode', rtol=1e-7, atol=1e-7, nsteps=10000)

kf_idx = [idx for idx, param in enumerate(earm.parameters_rules()) if 'kf' in param.name]
kr_idx = [idx for idx, param in enumerate(earm.parameters_rules()) if 'kr' in param.name]
kc_idx = [idx for idx, param in enumerate(earm.parameters_rules()) if 'kc' in param.name]
lower_limits = np.zeros(len(earm.parameters_rules()))
upper_limits = np.zeros(len(earm.parameters_rules()))
starting_vals = np.zeros(len(earm.parameters_rules()))
lower_limits[kf_idx] = -16    
upper_limits[kf_idx] = -1
#Sampling for kr is really KD values that are then used with the sampled kf to choose a kr
lower_limits[kr_idx] = -3
upper_limits[kr_idx] = 15
lower_limits[kc_idx] = -6
upper_limits[kc_idx] = 3
starting_vals[kf_idx] = np.log10([param.value for param in earm.parameters_rules() if 'kf' in param.name])
starting_vals[kr_idx] = np.log10([param.value for param in earm.parameters_rules() if 'kr' in param.name])-starting_vals[kf_idx]  
starting_vals[kc_idx] = np.log10([param.value for param in earm.parameters_rules() if 'kc' in param.name])
def likelihood(param_vector):
    # Sub in parameter values for current location in parameter space and simulate
    for i in range(len(param_vector)):
        if i in kr_idx:
            #Sampled value is a KD value that is then used with the kf to choose a kr
            earm.parameters_rules()[name_dict[i]].value = 10**(param_vector[i]+param_vector[i-1])
        #    earm.parameters_rules()[name_dict[i]].value = 10**param_vector[i]
            #print 'KD value = ',10**param_vector[i]
            #print 'set parameter: ',earm.parameters_rules()[name_dict[i]].name,' to ',10**(param_vector[i]+param_vector[i-1])
        else:
            earm.parameters_rules()[name_dict[i]].value = 10**param_vector[i]
            #print 'set parameter: ',earm.parameters_rules()[name_dict[i]].name,' to ',10**param_vector[i]
        #earm.parameters_rules()[name_dict[i]].value = 10**param_vector[i]
    #print 'subbed kf vals: ',10**param_vector[kf_idx]
    #print 'subbed kr vals: ',10**param_vector[kr_idx]
    #print 'subbed kc vals: ',10**param_vector[kc_idx]
    solver.run()
    
    e1 = {}
    sims = {}
    for obs_name, data_name, var_name, obs_total in \
            zip(obs_names, data_names, var_names, obs_totals):
        # Get model observable trajectory (this is the slice expression
        # mentioned above in the comment for tspan)
        ysim = solver.yobs[obs_name][::tmul]
        # Normalize it to 0-1
        ysim_norm = ysim / obs_total
        # Get experimental measurement and variance
        ydata = exp_data[data_name]
        yvar = exp_data[var_name]
        # Compute error between simulation and experiment (chi-squared)
        e1[obs_name] = np.sum((ydata - ysim_norm) ** 2 / (2 * yvar)) / len(ydata)    
        sims[obs_name] = ysim_norm
    
    e1_mBid = e1['mBid'] 
    e1_mBid = -np.log10(e1_mBid)
    sim_mBid = sims['mBid']
    if np.isnan(e1_mBid):
        e1_mBid = -np.inf
        sim_mBid.fill(-np.inf)      
    e1_cPARP = e1['cPARP']
    e1_cPARP = -np.log10(e1_cPARP)
    sim_cPARP = sims['cPARP']
    if np.isnan(e1_cPARP):
        e1_cPARP = -np.inf
        sim_cPARP.fill(-np.inf)

    # Calculate Td, Ts, and final value for IMS-RP reporter
    # =====
    # Normalize trajectory
    ysim_momp = solver.yobs[momp_obs]
    if np.nanmax(ysim_momp) == 0:
        ysim_momp_norm = ysim_momp
        t10 = 0
        t90 = 0
    
    else:  
        ysim_momp_norm = ysim_momp / np.nanmax(ysim_momp)
        # Build a spline to interpolate it
        st, sc, sk = scipy.interpolate.splrep(solver.tspan, ysim_momp_norm)
        try: 
            # Use root-finding to find the point where trajectory reaches 10% and 90%
            t10 = scipy.interpolate.sproot((st, sc-0.10, sk))[0]
            t90 = scipy.interpolate.sproot((st, sc-0.90, sk))[0]
        #If integration has failed and nans are present in trajectory, 
        # interpolation will fail and an IndexError will occur
        except IndexError:
            t10 = 0
            t90 = 0
    # Calculate Td as the mean of these times
    td = (t10 + t90) / 2
    # Calculate Ts as their difference
    ts = t90 - t10
    # Get yfinal, the last element from the trajectory
    yfinal = ysim_momp[-1]
    # Build a vector of the 3 variables to fit
    momp_sim = np.array([td, ts, yfinal]) #to use gpu add type='float32'
    
    # Perform chi-squared calculation against mean and variance vectors
    e2 = np.sum((momp_data - momp_sim) ** 2 / (2 * momp_var)) / 3
    e2 = -np.log10(e2)
    if np.isnan(e2):
        e2 = -np.inf
        momp_sim.fill(-np.inf)
    #error = e1_mBid + e1_cPARP + e2
    #print 'subbed values: ',[np.log10(param.value) for param in earm.parameters_rules()]
    #print 'mBid error: ',e1_mBid
    #print 'e1_cPARP: ',e1_cPARP
    #print 'e2: ',e2
    
    return sim_mBid, sim_cPARP, momp_sim #to use gpu add .astype('float32') to end of first two arrays
momp_obs_total = model.parameters['Smac_0'].value
momp_data = np.array([9810.0, 180.0, momp_obs_total])
momp_var = np.array([7245000.0, 3600.0, 1e4])

# Build time points for the integrator, using the same time scale as the
# experimental data but with greater resolution to help the integrator converge.
ntimes = len(exp_data['Time'])
# Factor by which to increase time resolution
tmul = 500
# Do the sampling such that the original experimental timepoints can be
# extracted with a slice expression instead of requiring interpolation.
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
                    (ntimes-1) * tmul + 1)

# Get parameters for rates only
rate_params = model.parameters_rules()
# Build a boolean mask for those params against the entire param list
rate_mask = np.array([p in rate_params for p in model.parameters])
# Build vector of nominal parameter values from the model
nominal_values = np.array([p.value for p in model.parameters])

#Likelihood function is objective function from earm.estimate_m1a.py with small changes
def likelihood(mcmc, position):

    # Simulate model at current position in parameter space
    yobs = mcmc.simulate(position, observables=True)
    # Calculate error for point-by-point trajectory comparisons
    e1 = 0
    for obs_name, data_name, var_name, obs_total in \
            zip(obs_names, data_names, var_names, obs_totals):
        # Get model observable trajectory (this is the slice expression
Exemple #19
0
momp_var = np.array([7245000.0, 3600.0, 1e4])

# Build time points for the integrator, using the same time scale as the
# experimental data but with greater resolution to help the integrator converge.
ntimes = len(exp_data['Time'])
# Factor by which to increase time resolution
tmul = 10
# Do the sampling such that the original experimental timepoints can be
# extracted with a slice expression instead of requiring interpolation.
tspan = np.linspace(exp_data['Time'][0], exp_data['Time'][-1],
                    (ntimes - 1) * tmul + 1)
# Initialize solver object
solver = pysb.integrate.Solver(model, tspan, rtol=1e-5, atol=1e-5)

# Get parameters for rates only
rate_params = model.parameters_rules()
# Build a boolean mask for those params against the entire param list
rate_mask = np.array([p in rate_params for p in model.parameters])
# Build vector of nominal parameter values from the model
nominal_values = np.array([p.value for p in model.parameters])
# Set the radius of a hypercube bounding the search space
bounds_radius = 2


def objective_func(x, rate_mask, lb, ub):

    # Print out an indication of how the annealing is progressing
    print_anneal_status()

    # Apply hard bounds
    if np.any((x < lb) | (x > ub)):
Exemple #20
0
import os
import copy
import pickle
import numpy as np
import scipy.interpolate
import matplotlib.pyplot as plt
import datetime

##### User-defined variables
outdir = '/Users/lopezlab/temp/EARM/'
set_cupSODA_path("/Users/lopezlab/cupSODA")
GPU = 0
max_samples_per_batch = 352
#####

par_names = [p.name for p in model.parameters_rules()]
par_dict  = {name : index for index,name in enumerate(par_names)}

obj_names = ['emBid', 'ecPARP', 'e2']

outfiles = {}
for name in obj_names:
	outfiles[name+'_sens']    = os.path.join(outdir,name+"_sens_cupSODA.txt")
	outfiles[name+'_sens_t']  = os.path.join(outdir,name+"_sens_t_cupSODA.txt")
	outfiles[name+'_sens_2']  = os.path.join(outdir,name+"_sens_2_cupSODA.txt")
OUTPUT = {key : open(file, 'a') for key,file in outfiles.items()}
for file in OUTPUT.values():
	file.write("-----"+str(datetime.datetime.now())+"-----\n")
	file.write("n")
	for i in range(len(model.parameters_rules())):
    		file.write("\t"+"p_"+str(i))