def _compute_jacobian(self, model_configuration): # Check if any of the equilibria are in the supercritical regime (beyond the separatrix) and set it right before # the bifurcation. zeq = model_configuration.zeq if self.lsa_method == "2D": fz_jacobian = calc_jac(model_configuration.x1eq, model_configuration.zeq, model_configuration.yc, model_configuration.Iext1, model_configuration.x0, model_configuration.K, model_configuration.model_connectivity, model_vars=2, zmode="lin", a=model_configuration.a, b=model_configuration.b, d=model_configuration.d, tau1= model_configuration.tau1, tau0=model_configuration.tau0) else: temp = model_configuration.x1eq > X1EQ_CR_DEF - 10 ** (-3) if temp.any(): correction_value = X1EQ_CR_DEF - 10 ** (-3) self.logger.warning("Equilibria x1eq[" + str(numpy.where(temp)[0]) + "] = " + str(model_configuration.x1eq[temp]) + "\nwere corrected for LSA to value: X1EQ_CR_DEF - 10 ** (-3) = " + str(correction_value) + " to be sub-critical!") model_configuration.x1eq[temp] = correction_value i_temp = numpy.ones(model_configuration.x1eq.shape) zeq[temp] = calc_eq_z(model_configuration.x1eq[temp], model_configuration.yc * i_temp[temp], model_configuration.Iext1 * i_temp[temp], "2d", 0.0, model_configuration.slope * i_temp[temp], model_configuration.a * i_temp[temp], model_configuration.b * i_temp[temp], model_configuration.d * i_temp[temp]) fz_jacobian = calc_fz_jac_square_taylor(model_configuration.zeq, model_configuration.yc, model_configuration.Iext1, model_configuration.K, model_configuration.model_connectivity, model_configuration.a, model_configuration.b, model_configuration.d) if numpy.any([numpy.any(numpy.isnan(fz_jacobian.flatten())), numpy.any(numpy.isinf(fz_jacobian.flatten()))]): raise_value_error("nan or inf values in dfz") return fz_jacobian
def _compute_z_equilibrium(self, x1EQ): return calc_eq_z(x1EQ, self.yc, self.Iext1, "2d", slope=self.slope, a=self.a, b=self.b, d=self.d)
def _compute_jacobian(self, model_configuration): # Check if any of the equilibria are in the supercritical regime (beyond the separatrix) and set it right before # the bifurcation. zEQ = model_configuration.zEQ temp = model_configuration.x1EQ > X1_EQ_CR_DEF - 10**(-3) if temp.any(): correction_value = X1_EQ_CR_DEF - 10**(-3) warning( "Equibria x1EQ[" + str(numpy.where(temp)[0]) + "] = " + str(model_configuration.x1EQ[temp]) + "\nwere corrected for LSA to value: X1_EQ_CR_DEF - 10 ** (-3) = " + str(correction_value) + " to be sub-critical!") model_configuration.x1EQ[temp] = correction_value i_temp = numpy.ones(model_configuration.x1EQ.shape) zEQ[temp] = calc_eq_z(model_configuration.x1EQ[temp], model_configuration.yc * i_temp[temp], model_configuration.Iext1 * i_temp[temp], "2d", 0.0, model_configuration.slope * i_temp[temp], model_configuration.a * i_temp[temp], model_configuration.b * i_temp[temp], model_configuration.d * i_temp[temp]) fz_jacobian = calc_fz_jac_square_taylor( model_configuration.zEQ, model_configuration.yc, model_configuration.Iext1, model_configuration.K, model_configuration.connectivity_matrix, model_configuration.a, model_configuration.b, model_configuration.d) if numpy.any([ numpy.any(numpy.isnan(fz_jacobian.flatten())), numpy.any(numpy.isinf(fz_jacobian.flatten())) ]): raise_value_error("nan or inf values in dfz") return fz_jacobian
def prepare_data_for_fitting(model_configuration, hypothesis, fs, ts, dynamic_model=None, noise_intensity=None, active_regions=None, active_regions_th=0.1, observation_model=3, channel_inds=[], mixing=None, **kwargs): logger.info("Constructing data dictionary...") active_regions_flag = np.zeros((hypothesis.number_of_regions, ), dtype="i") if active_regions is None: if len(hypothesis.propagation_strengths) > 0: active_regions = np.where( hypothesis.propagation_strengths / np.max(hypothesis.propagation_strengths) > active_regions_th )[0] else: raise_not_implemented_error( "There is no other way of automatic selection of " + "active regions implemented yet!") if len(active_regions) < 6: active_regions = np.unique(active_regions.tolist() + (np.where( model_configuration.e_values > active_regions_th)[0]).tolist()) active_regions_flag[active_regions] = 1 n_active_regions = len(active_regions) if isinstance(dynamic_model, (Epileptor, EpileptorModel)): tau1_def = np.mean(1.0 / dynamic_model.r) tau0_def = np.mean(dynamic_model.tt) elif isinstance(dynamic_model, (EpileptorDP, EpileptorDP2D, EpileptorDPrealistic)): tau1_def = np.mean(dynamic_model.tau1) tau0_def = np.mean(dynamic_model.tau0) else: tau1_def = 0.2 tau0_def = 40000 # Gamma distributions' parameters # visualize gamma distributions here: http://homepage.divms.uiowa.edu/~mbognar/applets/gamma.html tau1_mu = tau1_def tau1 = gamma_from_mu_std(kwargs.get("tau1_mu", tau1_mu), kwargs.get("tau1_std", 3 * tau1_mu)) tau0_mu = tau0_def tau0 = gamma_from_mu_std(kwargs.get("tau0_mu", tau0_mu), kwargs.get("tau0_std", 3 * 10000.0)) K_def = np.mean(model_configuration.K) K = gamma_from_mu_std(kwargs.get("K_mu", K_def), kwargs.get("K_std", 10 * K_def)) # zero effective connectivity: conn0 = gamma_from_mu_std(kwargs.get("conn0_mu", 0.001), kwargs.get("conn0_std", 0.001)) if noise_intensity is None: sig_mu = np.mean(model_noise_intensity_dict["EpileptorDP2D"]) else: sig_mu = noise_intensity sig = gamma_from_mu_std(kwargs.get("sig_mu", sig_mu), kwargs.get("sig_std", 3 * sig_mu)) sig_eq_mu = (X1_EQ_CR_DEF - X1_DEF) / 3.0 sig_eq_std = 3 * sig_eq_mu sig_eq = gamma_from_mu_std(kwargs.get("sig_eq_mu", sig_eq_mu), kwargs.get("sig_eq_std", sig_eq_std)) sig_init_mu = sig_eq_mu sig_init_std = sig_init_mu sig_init = gamma_from_mu_std(kwargs.get("sig_init_mu", sig_init_mu), kwargs.get("sig_init_std", sig_init_std)) if mixing is None or len(channel_inds) < 1: if observation_model == 2: mixing = np.random.rand(n_active_regions, n_active_regions) for ii in range(len(n_active_regions)): mixing[ii, :] = mixing[ii, :] / np.sum(mixing[ii, :]) else: observation_model = 3 mixing = np.eye(n_active_regions) else: mixing = mixing[channel_inds][:, active_regions] for ii in range(len(channel_inds)): mixing[ii, :] = mixing[ii, :] / np.sum(mixing[ii, :]) signals = ts.get("signals", None) if signals is None: signals = (ts["x1"][:, active_regions].T - np.expand_dims(model_configuration.x1EQ[active_regions], 1)).T + \ (ts["z"][:, active_regions].T - np.expand_dims(model_configuration.zEQ[active_regions], 1)).T signals = signals / 2.75 # signals = (ts["x1"][:, active_regions].T - np.expand_dims(model_configuration.x1EQ[active_regions], 1)).T / 2.0 # signals = ts["x1"][:, active_regions] signals = (np.dot(mixing, signals.T)).T # from matplotlib import pyplot # pyplot.plot(signals) # pyplot.show() data = { "n_regions": hypothesis.number_of_regions, "n_active_regions": n_active_regions, "n_nonactive_regions": hypothesis.number_of_regions - n_active_regions, "active_regions_flag": active_regions_flag, "n_time": signals.shape[0], "n_signals": signals.shape[1], "x0_nonactive": model_configuration.x0[~active_regions_flag.astype("bool")], "x1eq0": model_configuration.x1EQ, "zeq0": model_configuration.zEQ, "x1eq_lo": kwargs.get("x1eq_lo", -2.0), "x1eq_hi": kwargs.get("x1eq_hi", X1_EQ_CR_DEF), "x1init_lo": kwargs.get("x1init_lo", -2.0), "x1init_hi": kwargs.get("x1init_hi", -1.0), "x1_lo": kwargs.get("x1_lo", -2.5), "x1_hi": kwargs.get("x1_hi", 1.5), "z_lo": kwargs.get("z_lo", 2.0), "z_hi": kwargs.get("z_hi", 5.0), "tau1_lo": kwargs.get("tau1_lo", tau1_mu / 2), "tau1_hi": kwargs.get("tau1_hi", np.min([3 * tau1_mu / 2, 1.0])), "tau0_lo": kwargs.get("tau0_lo", np.min([tau0_mu / 2, 10])), "tau0_hi": kwargs.get("tau0_hi", np.max([3 * tau1_mu / 2, 30.0])), "tau1_a": kwargs.get("tau1_a", tau1["alpha"]), "tau1_b": kwargs.get("tau1_b", tau1["beta"]), "tau0_a": kwargs.get("tau0_a", tau0["alpha"]), "tau0_b": kwargs.get("tau0_b", tau0["beta"]), "SC": model_configuration.connectivity_matrix, "SC_sig": kwargs.get("SC_sig", 0.1), "K_lo": kwargs.get("K_lo", K_def / 10.0), "K_hi": kwargs.get("K_hi", 30.0 * K_def), "K_a": kwargs.get("K_a", K["alpha"]), "K_b": kwargs.get("K_b", K["beta"]), "gamma0": kwargs.get("gamma0", np.array([conn0["alpha"], conn0["beta"]])), "dt": 1000.0 / fs, "sig_hi": kwargs.get("sig_hi", 3 * sig_mu), "sig_a": kwargs.get("sig_a", sig["alpha"]), "sig_b": kwargs.get("sig_b", sig["beta"]), "sig_eq_hi": kwargs.get("sig_eq_hi", sig_eq_std), "sig_eq_a": kwargs.get("sig_eq_a", sig_eq["alpha"]), "sig_eq_b": kwargs.get("sig_eq_b", sig_eq["beta"]), "sig_init_mu": kwargs.get("sig_init_mu", sig_init_mu), "sig_init_hi": kwargs.get("sig_init_hi", sig_init_std), "sig_init_a": kwargs.get("sig_init_a", sig_init["alpha"]), "sig_init_b": kwargs.get("sig_init_b", sig_init["beta"]), "observation_model": observation_model, "signals": signals, "mixing": mixing, "eps_hi": kwargs.get( "eps_hi", (np.max(signals.flatten()) - np.min(signals.flatten()) / 100.0)), "eps_x0": kwargs.get("eps_x0", 0.1), } for p in ["a", "b", "d", "yc", "Iext1", "slope"]: temp = getattr(model_configuration, p) if isinstance(temp, (np.ndarray, list)): if np.all(temp[0], np.array(temp)): temp = temp[0] else: raise_not_implemented_error( "Statistical models where not all regions have the same value " + " for parameter " + p + " are not implemented yet!") data.update({p: temp}) zeq_lo = calc_eq_z(data["x1eq_hi"], data["yc"], data["Iext1"], "2d", x2=0.0, slope=data["slope"], a=data["a"], b=data["b"], d=data["d"]) zeq_hi = calc_eq_z(data["x1eq_lo"], data["yc"], data["Iext1"], "2d", x2=0.0, slope=data["slope"], a=data["a"], b=data["b"], d=data["d"]) data.update({ "zeq_lo": kwargs.get("zeq_lo", zeq_lo), "zeq_hi": kwargs.get("zeq_hi", zeq_hi) }) data.update({ "zinit_lo": kwargs.get("zinit_lo", zeq_lo - sig_init_std), "zinit_hi": kwargs.get("zinit_hi", zeq_hi + sig_init_std) }) x0cr, rx0 = calc_x0cr_r(data["yc"], data["Iext1"], data["a"], data["b"], data["d"], zmode=np.array("lin"), x1_rest=X1_DEF, x1_cr=X1_EQ_CR_DEF, x0def=X0_DEF, x0cr_def=X0_CR_DEF, test=False, shape=None, calc_mode="non_symbol") data.update({"x0cr": x0cr, "rx0": rx0}) logger.info("data dictionary completed with " + str(len(data)) + " fields:\n" + str(data.keys())) return data, tau0_def, tau1_def
def test_computations(self): logger = initialize_logger(__name__, self.config.out.FOLDER_LOGS) # ------------------------------------------------------------------------------------------------------------------ x1 = numpy.array([-4.1 / 3, -4.9 / 3, -5.0 / 3], dtype="float32") w = numpy.array([[0, 0.1, 0.9], [0.1, 0, 0.0], [0.9, 0.0, 0]]) n = x1.size K = 0.0 * K_DEF * numpy.ones(x1.shape, dtype=x1.dtype) yc = YC_DEF * numpy.ones(x1.shape, dtype=x1.dtype) Iext1 = I_EXT1_DEF * numpy.ones(x1.shape, dtype=x1.dtype) slope = SLOPE_DEF * numpy.ones(x1.shape, dtype=x1.dtype) Iext2 = I_EXT2_DEF * numpy.ones(x1.shape, dtype=x1.dtype) a = A_DEF b = B_DEF d = D_DEF s = S_DEF gamma = GAMMA_DEF tau1 = TAU1_DEF tau2 = TAU2_DEF tau0 = TAU0_DEF x1, K = assert_arrays([x1, K]) w = assert_arrays([w]) # , (x1.size, x1.size) zmode = numpy.array("lin") pmode = numpy.array("const") model = "EpileptorDPrealistic" x1eq = x1 z = calc_eq_z(x1, yc, Iext1, "2d", x2=0.0, slope=slope, a=a, b=b, d=d, x1_neg=True) zeq = z x0cr, r = calc_x0cr_r(yc, Iext1, zmode=zmode, x1_rest=X1_DEF, x1_cr=X1EQ_CR_DEF, x0def=X0_DEF, x0cr_def=X0_CR_DEF) x0 = calc_x0(x1, z, K, w, zmode=zmode, z_pos=True) calc_model_x0_to_x0_val(x0, yc, Iext1, a, b, d, zmode=numpy.array("lin")) if model == "EpileptorDP2D": eq = numpy.c_[x1eq, zeq].T.astype('float32') model_vars = 2 dfun = calc_dfun(eq[0].T, eq[1].T, yc, Iext1, x0, K, w, model_vars, zmode=zmode, pmode=pmode, x0_var=x0, slope_var=slope, Iext1_var=Iext1, Iext2_var=Iext2, K_var=K, slope=slope, a=a, b=b, d=d, s=s, Iext2=Iext2, gamma=gamma, tau1=tau1, tau0=tau0, tau2=tau2, output_mode="array") jac = calc_jac(eq[0].T, eq[1].T, yc, Iext1, x0, K, w, model_vars, zmode=zmode, pmode=pmode, x1_neg=True, z_pos=True, x2_neg=False, x0_var=x0, slope_var=slope, Iext1_var=Iext1, Iext2_var=Iext2, K_var=K, slope=slope, a=a, b=b, d=d, s=s, Iext2=Iext2, gamma=gamma, tau1=tau1, tau0=tau0, tau2=tau2) else: if model == "EpileptorDPrealistic": # the 11D "realistic" simulations model eq, slope_eq, Iext2_eq = calc_eq_11d( x0, K, w, yc, Iext1, Iext2, slope, EpileptorDPrealistic.fun_slope_Iext2, x1, a=a, b=b, d=d, zmode=zmode, pmode=pmode) model_vars = 11 dfun = calc_dfun(eq[0].T, eq[2].T, yc, Iext1, x0, K, w, model_vars, zmode, pmode, y1=eq[1].T, x2=eq[3].T, y2=eq[4].T, g=eq[5].T, x0_var=eq[6].T, slope_var=eq[7].T, Iext1_var=eq[8].T, Iext2_var=eq[9].T, K_var=eq[10].T, slope=slope, a=a, b=b, d=d, s=s, Iext2=Iext2, gamma=gamma, tau1=tau1, tau0=tau0, tau2=tau2, output_mode="array") jac = calc_jac(eq[0].T, eq[2].T, yc, Iext1, x0, K, w, model_vars, zmode, pmode, x1_neg=True, z_pos=True, x2_neg=False, y1=eq[1].T, x2=eq[3].T, y2=eq[4].T, g=eq[5].T, x0_var=eq[6].T, slope_var=eq[7].T, Iext1_var=eq[8].T, Iext2_var=eq[9].T, K_var=eq[10].T, slope=slope, a=a, b=b, d=d, s=s, Iext2=Iext2, gamma=gamma, tau1=tau1, tau0=tau0, tau2=tau2) else: # all >=6D models eq = calc_eq_6d(x0, K, w, yc, Iext1, Iext2, x1, a=a, b=b, d=d, zmode=zmode) model_vars = 6 dfun = calc_dfun(eq[0].T, eq[2].T, yc, Iext1, x0, K, w, model_vars, zmode, y1=eq[1].T, x2=eq[3].T, y2=eq[4].T, g=eq[5].T, slope=slope, a=a, b=b, d=d, s=s, Iext2=Iext2, gamma=gamma, tau1=tau1, tau0=tau0, tau2=tau2, output_mode="array") jac = calc_jac(eq[0].T, eq[2].T, yc, Iext1, r, K, w, model_vars, zmode, x1_neg=True, z_pos=True, x2_neg=False, y1=eq[1].T, x2=eq[3].T, y2=eq[4].T, g=eq[5].T, slope=slope, a=a, b=b, d=d, s=s, Iext2=Iext2, gamma=gamma, tau1=tau1, tau0=tau0, tau2=tau2) model = str(model_vars) + "d" sx1, sy1, sz, sx2, sy2, sg, sx0, sx0_val, sK, syc, sIext1, sIext2, sslope, sa, sb, sd, stau1, stau0, stau2, v = \ symbol_vars(n, ["x1", "y1", "z", "x2", "y2", "g", "x0", "x0_val", "K", "yc", "Iext1", "Iext2", "slope", "a", "b", "d", "tau1", "tau0", "tau2"], shape=(3,)) sw, vw = symbol_vars(n, ["w"], dims=2, output_flag="numpy_array") v.update(vw) del vw numpy.fill_diagonal(sw, 0.0) sw = numpy.array(sw) a = numpy.ones((n, )) b = 3.0 * a d = 5.0 * a s = 6.0 * a tau1 = a tau0 = a tau2 = a x1sq = -4.0 / 3 * a if model == "2d": y1 = yc else: y1 = eq[1].T x2 = eq[3].T y2 = eq[4].T g = eq[5].T if model == "11d": x0_var = eq[6].T slope_var = eq[7].T Iext1_var = eq[8].T Iext2_var = eq[9].T K_var = eq[10].T # -------------------------------------------- Test symbolic x0cr, r calculation ---------------------------------- logger.info("\n\nTest symbolic x0cr, r calculation...") x0cr2, r2 = calc_x0cr_r(syc, sIext1, zmode=zmode, x1_rest=X1_DEF, x1_cr=X1EQ_CR_DEF, x0def=X0_DEF, x0cr_def=X0_CR_DEF) # test=True lx0cr_r, sx0cr_r, v = symbol_eqtn_x0cr_r( n, zmode=zmode, shape=(n, )) # symbol_calc_x0cr_r(n, zmode=zmode, shape=(3, )) sx0cr_r = list(sx0cr_r) for ii in range(2): sx0cr_r[ii] = Matrix(sx0cr_r[ii]) for iv in range(n): sx0cr_r[ii][iv] = sx0cr_r[ii][iv].subs([ (v["a"][iv], a[iv]), (v["b"][iv], b[iv]), (v["d"][iv], d[iv]), (v["x1_rest"][iv], X1_DEF), (v["x0_rest"][iv], X0_DEF), (v["x1_cr"][iv], X1EQ_CR_DEF), (v["x0_cr"][iv], X0_CR_DEF) ]) assert list(x0cr2) == list(sx0cr_r[0]) assert list(r2) == list(sx0cr_r[1]) # -------------------------------------------- Test coupling ------------------------------------------------------ coupling = calc_coupling(sx1, sK, sw) scoupling = symbol_eqtn_coupling(n, shape=(n, ))[:2] assert list(coupling) == list(scoupling[1]) assert list(calc_coupling(x1, K, w)) == list(scoupling[0](x1, K, w)) assert coupling.shape == scoupling[1].shape # ---------------------------------------- Test coupling derivative to x1 ------------------------------------------ coupling_diff = calc_coupling_diff(sK, sw) scoupling_diff = symbol_calc_coupling_diff(n, ix=None, jx=None, K="K")[:2] assert coupling_diff.shape == scoupling_diff[1].shape # ------------------------------------- Test the fz with substitution of z via fx1 ---------------------------------- fx1z = calc_fx1z(sx1, sx0, sK, sw, syc, sIext1, sa, sb, sd, stau1, stau0, zmode=zmode) sfx1z = symbol_eqtn_fx1z(n, model, zmode, shape=(n, ))[:2] # if model == "2d": # fx1z = calc_fx1z(x1, x0, K, w, yc, Iext1, a=a, b=b, d=d, tau1=tau1, tau0=tau0, model=model, zmode=zmode) # s_fx1z = sfx1z[0](x1, x0, K, w, yc, Iext1, a, b, d, tau1, tau0) # assert list(fx1z) == list(s_fx1z) # else: # fx1z = calc_fx1z(x1, x0, K, w, yc, Iext1, a=a, b=b, d=d, tau1=tau1, tau0=tau0, model=model, zmode=zmode) # s_fx1z = sfx1z[0](x1, x0, K, w, yc, Iext1, a, b, d, tau1, tau0) # assert list(fx1z) == list(s_fx1z) # ------------------------------- Test the derivative to x1 of fz with substitution of z via fx1 --------------------- fx1z_diff = calc_fx1z_diff(sx1, sK, sw, sa, sb, sd, stau1, stau0, model=model, zmode=zmode) sfx1z_diff = symbol_eqtn_fx1z_diff(n, model, zmode)[:2] # for ii in range(n): # assert list(fx1z_diff[ii]) == list(sfx1z_diff[1][ii, :]) # -------------------------------- Test symbolic fx2 with substitution of y2 via fy2 ---------------------------------- if model != "2d": sfx2y2 = symbol_eqtn_fx2y2(n, x2_neg=False, shape=(n, ))[:2] # ----------------------------------------------- Test calc_fx1_2d_taylor --------------------------------------------- x_taylor = symbol_vars(n, ["x1lin"], shape=(n, ))[0] # x_taylor = -4.5/3 (=x1lin) fx1lin = calc_fx1_2d_taylor(sx1, x_taylor, sz, syc, sIext1, sslope, sa, sb, stau1, x1_neg=True, order=2, shape=(n, )) sfx1lin = symbol_calc_2d_taylor(n, "x1lin", order=2, x1_neg=True, slope="slope", Iext1="Iext1", shape=(n, ))[:2] # for ii in range(3): # assert numpy.array(fx1lin[ii].expand(sx1[ii]).collect(sx1[ii])) == numpy.array( # sfx1lin[1][ii].expand(sx1[ii]).collect(sx1[ii])) calc_fx1_2d_taylor(x1, -1.5, z, yc, Iext1, slope, a=a, b=b, d=d, tau1=tau1, x1_neg=True, order=2, shape=(n, )) # ----------------------------------------- Test calc_fx1y1_6d_diff_x1 ------------------------------------------------- fx1y1_6d_diff_x1 = calc_fx1y1_6d_diff_x1(sx1, syc, sIext1, sa, sb, sd, stau1, stau0) sfx1y1_6d_diff_x1 = symbol_calc_fx1y1_6d_diff_x1(n, shape=(n, ))[:2] # for ii in range(n): # assert fx1y1_6d_diff_x1[ii].expand(sx1[ii]).collect(sx1[ii]) == sfx1y1_6d_diff_x1[1][ii].expand(sx1[ii]).collect(sx1[ii]) # ------------------------------- Test eq_x1_hypo_x0_optimize_fun & eq_x1_hypo_x0_optimize_jac -------------------------- ix0 = numpy.array([1, 2]) iE = numpy.array([0]) x = numpy.empty_like(sx1).flatten() x[ix0] = sx1[ix0] x[iE] = sx0[iE] eq_x1_hypo_x0_optimize(ix0, iE, x1eq, zeq, x0[ix0], K, w, yc, Iext1, a=A_DEF, b=B_DEF, d=D_DEF, slope=SLOPE_DEF) eq_x1_hypo_x0_optimize_fun(x, ix0, iE, sx1, numpy.array(sz), sx0[ix0], sK, sw, syc, sIext1) eq_x1_hypo_x0_optimize_jac(x, ix0, iE, sx1, numpy.array(sz), sx0[ix0], sK, sw, sy1, sIext1) eq_x1_hypo_x0_optimize(ix0, iE, x1eq, zeq, x0[ix0], K, w, yc, Iext1) eq_x1_hypo_x0_linTaylor(ix0, iE, x1eq, zeq, x0[ix0], K, w, yc, Iext1) # ------------------------------------------ Test calc_fz_jac_square_taylor ---------------------------------------------- calc_fz_jac_square_taylor(numpy.array(sz), syc, sIext1, sK, sw, tau1=tau1, tau0=tau0) lfz_jac_square_taylor, sfz_jac_square_taylor, v = symbol_calc_fz_jac_square_taylor( n) sfz_jac_square_taylor = Matrix(sfz_jac_square_taylor).reshape(n, n) for iv in range(n): for jv in range(n): sfz_jac_square_taylor[iv, jv] = sfz_jac_square_taylor[iv, jv].subs( [(v["x_taylor"][jv], x1sq[jv]), (v["a"][jv], a[jv]), (v["b"][jv], b[jv]), (v["d"][jv], d[jv]), (v["tau1"][iv], tau1[iv]), (v["tau0"][iv], tau2[iv])]) assert list( calc_fz_jac_square_taylor( z, yc, Iext1, K, w, tau1=tau1, tau0=tau0)[0]) == list( lfz_jac_square_taylor(zeq, yc, Iext1, K, w, a, b, d, tau1, tau0, x1sq)[0])
def generate_parameters(self): parameters = super(SDEProbabilisticModelBuilder, self).generate_parameters() self.logger.info("Generating model parameters by " + self.__class__.__name__ + "...") if "sigma" in self.parameters: self.logger.info("...sigma...") parameters.update({ "sigma": generate_lognormal_parameter("sigma", self.sigma, 0.0, SIGMA_MAX, sigma=None, sigma_scale=SIGMA_SCALE, p_shape=(), use="scipy") }) names = [] mins = [] maxs = [] means = [] if self.sde_mode == SDE_MODES.CENTERED.value: self.logger.info( "...autoregression centered time series parameters...") if "x1" in self.parameters: names.append("x1") mins.append(X1_MIN) maxs.append(X1_MAX) means.append(X1_REST) if "z" in self.parameters: names.append("z") mins.append(Z_MIN) maxs.append(Z_MAX) means.append( calc_eq_z(X1_REST, self.model_config.yc, self.model_config.Iext1, "2d", x2=0.0, slope=self.model_config.slope, a=self.model_config.a, b=self.model_config.b, d=self.model_config.d, x1_neg=True)) n_xp = len(names) else: self.logger.info( "...autoregression noncentered time series parameters...") names = list(set(["dX1t", "dZt"]) & set(self.parameters)) n_xp = len(names) mins = n_xp * [-1.0] maxs = n_xp * [1.0] means = n_xp * [0.0] for iV in range(n_xp): self.logger.info("..." + names[iV] + "...") parameters.update({ names[iV]: generate_probabilistic_parameter( names[iV], mins[iV], maxs[iV], p_shape=(self.time_length, self.number_of_regions), probability_distribution=ProbabilityDistributionTypes. NORMAL, optimize_pdf=False, use="scipy", **{ "mu": means[iV], "sigma": self.sigma }) }) return parameters
def generate_parameters(self): parameters = super(ODEProbabilisticModelBuilder, self).generate_parameters() self.logger.info("Generating model parameters by " + self.__class__.__name__ + "...") self.logger.info("...initial conditions' parameters...") if self.priors_mode == PriorsModes.INFORMATIVE.value: x1_init = self.model_config.x1eq z_init = self.model_config.zeq else: x1_init = X1_REST * np.ones((self.number_of_regions, )) z_init = calc_eq_z(x1_init, self.model_config.yc, self.model_config.Iext1, "2d", x2=0.0, slope=self.model_config.slope, a=self.model_config.a, b=self.model_config.b, d=self.model_config.d, x1_neg=True) self.logger.info("...x1_init...") parameters.update({ "x1_init": generate_probabilistic_parameter( "x1_init", X1_MIN, X1_MAX, p_shape=(self.number_of_regions, ), probability_distribution=ProbabilityDistributionTypes.NORMAL, optimize_pdf=False, use="scipy", **{ "mu": x1_init, "sigma": self.sigma_init }) }) self.logger.info("...z_init...") parameters.update({ "z_init": generate_probabilistic_parameter( "z_init", Z_MIN, Z_MAX, p_shape=(self.number_of_regions, ), probability_distribution=ProbabilityDistributionTypes.NORMAL, optimize_pdf=False, use="scipy", **{ "mu": z_init, "sigma": self.sigma_init / 2 }) }) # Time scales if "tau1" in self.parameters: self.logger.info("...tau1...") parameters.update({ "tau1": generate_lognormal_parameter("tau1", self.tau1, TAU1_MIN, TAU1_MAX, sigma=None, sigma_scale=TAU1_SCALE, p_shape=(), use="scipy") }) if "tau0" in self.parameters: self.logger.info("...tau0...") parameters.update({ "tau0": generate_lognormal_parameter("tau0", self.tau0, TAU0_MIN, TAU0_MAX, sigma=None, sigma_scale=TAU0_SCALE, p_shape=(), use="scipy") }) if "sigma_init" in self.parameters: self.logger.info("...sigma_init...") parameters.update({ "sigma_init": generate_lognormal_parameter("sigma_init", self.sigma_init, 0.0, 10 * self.sigma_init, sigma=self.sigma_init, p_shape=(), use="scipy") }) self.logger.info("...observation's model parameters...") if "epsilon" in self.parameters: self.logger.info("...epsilon...") parameters.update({ "epsilon": generate_lognormal_parameter("epsilon", self.epsilon, 0.0, 10 * self.epsilon, sigma=self.epsilon, p_shape=(), use="scipy") }) if "scale" in self.parameters: self.logger.info("...scale...") parameters.update({ "scale": generate_lognormal_parameter("scale", self.scale, 0.1, 10 * self.scale, sigma=self.scale, p_shape=(), use="scipy") }) if "offset" in self.parameters: self.logger.info("...offset...") parameters.update({ "offset": generate_probabilistic_parameter( "offset", self.offset - 10.0, self.offset + 10.0, p_shape=(), probability_distribution=ProbabilityDistributionTypes. NORMAL, optimize_pdf=False, use="scipy", **{ "mu": self.offset, "sigma": 1.0 }) }) return parameters