def norm_readout(pname, guess_range, time, d, model, norm): """ guess range should be a tuple of pmin pmax, cond is a dict with model params pname is name of parameter to be normalized norm cond: value of area to normalize against """ sim_name = "dummy" model_name = "dummy" pmin, pmax = guess_range d = dict(d) d1 = dict(d) d2 = dict(d) d1[pname] = pmin d2[pname] = pmax df1 = run_exp(time, d1, model, sim_name, model_name) df2 = run_exp(time, d2, model, sim_name, model_name) area1 = get_area(df1.time, df1.cells) area2 = get_area(df2.time, df2.cells) #print(pname, cond, area1, area2) if not(area1 < norm < area2): guess = np.nan print("param not in guess range or nan") return guess else: guess = (pmin+pmax) / 2 crit = False counter = 0 while crit == False: counter = counter + 1 if counter > 50: print("counter reached, stopping normalization") break d[pname] = guess df = run_exp(time, d, model, sim_name, model_name) area = get_area(df.time, df.cells) #print("guess...area...pmin...pmax") #print(guess, area, pmin, pmax) if area < norm: pmin = guess guess = (guess+pmax)/2 else: pmax = guess guess = (guess+pmin)/2 if np.abs(area-norm) < 0.005: crit = True return guess
def get_readouts(self): """ get readouts from state array """ state = self.state peak = readouts.get_peak_height(state.time, state.cells) area = readouts.get_area(state.time, state.cells) tau = readouts.get_peaktime(state.time, state.cells) decay = readouts.get_duration(state.time, state.cells) reads = [peak, area, tau, decay] read_names = ["Peak Height", "Response Size", "Peak Time", "Decay"] data = {"readout" : read_names, "read_val" : reads} reads_df = pd.DataFrame(data = data) reads_df["name"] = self.name # deprecated, use only to compare menten vs threshold models if "menten" in self.mode.__name__ : modelname = "menten" else: modelname = "thres" # reads_df["model_name"] = modelname return reads_df
def get_readouts_from_df(self, state): """ run simulation and then get readouts Parameters ---------- state : TYPE DESCRIPTION. Returns ------- reads_df : TYPE DESCRIPTION. """ # get readouts peak = readouts.get_peak_height(state.time, state.cells) area = readouts.get_area(state.time, state.cells) tau = readouts.get_peaktime(state.time, state.cells) decay = readouts.get_decay(state.time, state.cells) # convert readouts to dataframe reads = [peak, area, tau, decay] read_names = ["peak", "area", "tau", "decay"] reads_df = pd.DataFrame(data=np.array([reads]), columns=read_names) return reads_df
def norm(self, val, pname, norm): """ optimization function calculate difference between simulated response size and wanted response size val : parameter value pname: str, parameter name norm : wanted response size returns float, difference in abs. values between wanted resp. size and calc. response size """ self.parameters[pname] = float(val) state = self.state area = readouts.get_area(state.time, state.cells) return np.abs(area-norm)
def get_readouts(time, cells): """ get readouts from state array """ peak = readouts.get_peak_height(time, cells) area = readouts.get_area(time, cells) tau = readouts.get_peaktime(time, cells) decay = readouts.get_duration(time, cells) reads = [peak, area, tau, decay] read_names = ["Peak Height", "Response Size", "Peak Time", "Decay"] data = {"readout" : read_names, "read_val" : reads} reads_df = pd.DataFrame(data = data) return reads_df
def get_readouts(self): """ get readouts from state array """ state = self.state peak = readouts.get_peak(state.time, state.cells) area = readouts.get_area(state.time, state.cells) tau = readouts.get_peaktime2(state.time, state.cells) decay = readouts.get_duration(state.time, state.cells) reads = [peak, area, tau, decay] read_names = ["Peak", "Area", "Peaktime", "Decay"] data = {"readout" : read_names, "read_val" : reads} reads_df = pd.DataFrame(data = data) reads_df["name"] = self.name if "menten" in self.mode.__name__ : modelname = "menten" else: modelname = "thres" reads_df["model_name"] = modelname return reads_df
import matplotlib.pyplot as plt import itertools # ============================================================================= # run experiment to get normalization conditions # ============================================================================= time = np.arange(0,20,0.01) sim_name = "dummy" model_name = "dummy" # readouts should be normalized for default param conditions between models model = C_model df = run_exp(time, d, model, sim_name, model_name) area_norm = get_area(df.time, df.cells) # ============================================================================= # heatmap conditions # ============================================================================= arr1 = np.logspace(-1, 1, 30) arr2 = arr1 name1 = "n_div" name2 = "r_diff" name3 = "gamma" pnames = [name1, name2, name3] model_list = [il2_model, timer_model, C_model] # =============================================================================
# systematic feedback analysis fb_arr = np.geomspace(1.0, 10, 50) fb_1 = [] fb_2 = [] fb_3 = [] fb_list = [fb_1, fb_2, fb_3] # for all delay types vary feedback strength for d, label, l in zip(dic, labels, fb_list): # for each fb value get resposne size for fb in fb_arr: d["fb_strength"] = fb state = run_model(time, d) cells = get_cells(state, time, d) cells = cells[["time", "eff"]] area = get_area(time, cells.eff) l.append(area) df = pd.DataFrame({ "no_delay": fb_list[0], "small_delay": fb_list[1], "high_delay": fb_list[2], "fb_fc": fb_arr }) df = df.melt(id_vars="fb_fc", value_name="population response", var_name="delay") df["mode"] = mode df2_list.append(df)