def eval_fitness(self, target: list, **kwargs): param_grid = self.pop.drop(['fitness', 'sigma'], axis=1) results = grid_search(circuit_template=self.gs_config['circuit_template'], param_grid=param_grid, param_map=self.gs_config['param_map'], simulation_time=self.gs_config['simulation_time'], dt=self.gs_config['dt'], sampling_step_size=self.gs_config['sampling_step_size'], permute_grid=False, inputs=self.gs_config['inputs'], outputs=self.gs_config['outputs'].copy() ) for i, candidate_genes in enumerate(param_grid.values): candidate_out = results.loc[:, tuple(candidate_genes)].values.T target_reshaped = np.array(target)[None, :] dist = self.fitness_measure(candidate_out, target_reshaped) self.pop.at[i, 'fitness'] = float(1 / dist)
for key, key_tmp, power in param_scalings: param_grid[key] = param_grid[key] * param_grid[key_tmp]**power #for key, val in param_grid.items(): # if len(val) == 1: # param_grid[key] = np.asarray(list(val)*len(etas)) results, result_map = grid_search( circuit_template="config/stn_gpe/gpe_syns_pop", param_grid=param_grid, param_map=param_map, simulation_time=T, step_size=dt, permute_grid=True, sampling_step_size=dts, inputs={}, outputs={'r_i': 'gpe/gpe_proto_syns_op/R_i'}, init_kwargs={ 'backend': 'numpy', 'solver': 'scipy', 'step_size': dt }, method='RK45') results = results * 1e3 # post-processing ################# inputs, outputs = [], [] cutoff = 100.0 for i in range(len(etas)):
dts = 1e-2 ext_input = np.random.uniform(3., 5., (int(T / dt), 1)) results = grid_search(jrc_template, param_grid={ 'w_ep': w_ein_pc, 'w_ip': w_iin_pc }, param_map={ 'w_ep': { 'var': [(None, 'weight')], 'edges': [('EINs.0', 'PCs.0', 0)] }, 'w_ip': { 'var': [(None, 'weight')], 'edges': [('IINs.0', 'PCs.0', 0)] } }, simulation_time=T, dt=dt, sampling_step_size=dts, inputs={('PCs.0', 'Op_exc_syn.0', 'I_ext'): ext_input}, outputs={'r': ('PCs.0', 'Op_exc_syn.0', 'r')}, init_kwargs={ 'vectorization': 'nodes', 'build_in_place': False, 'backend': 'tensorflow' }, permute_grid=True) from pyrates.utility.visualization import plot_psd
# example gallery sections for model compilation and simulation. # %% # Performing the parameter sweep # ------------------------------ # # To perform the parameter sweep, execute the following call to the :code:`grid_search()` function: results, results_map = grid_search( circuit_template="model_templates.jansen_rit.simple_jansenrit.JRC_simple", param_grid=param_grid, param_map=param_map, simulation_time=10.0, step_size=1e-4, sampling_step_size=1e-3, inputs={}, outputs={ 'V_pce': 'JRC/JRC_op/PSP_pc_e', 'V_pci': 'JRC/JRC_op/PSP_pc_i' }, init_kwargs={ 'backend': 'numpy', 'solver': 'scipy' }) # %% # After performing the parameter sweep, :code:`grid_search()` returns a tuple with 2 entries: # - a 2D :code:`pandas.DataFrame` that contains the simulated timeseries (1. dimension) for each output variable for # each model parametrization (2. dimension) # - a 2D :code:`pandas.DataFrame` that contains a mapping between the column names of the timeseries in the first # tuple entry (1. dimension) and the parameter values of the parameters that were defined in the paramter grid # (2. dimension)
k_l3l5_e[n] = C * r1 * r2 k_l3l5_i[n] = C * r2 n += 1 params = {'k_l3l5_e': k_l3l5_e, 'k_l3l5_i': k_l3l5_i, 'k_l5l3_i': k_l5l3_i} param_map = {'k_l3l5_e': {'var': [(None, 'weight')], 'edges': [('L3/PC.0', 'L5/PC.0', 0)]}, 'k_l3l5_i': {'var': [(None, 'weight')], 'edges': [('L3/PC.0', 'L5/IIN.0', 0)]}, 'k_l5l3_i': {'var': [(None, 'weight')], 'edges': [('L5/PC.0', 'L3/IIN.0', 0)]}, } # perform simulation results = grid_search(circuit_template="EI_circuit.CMC", param_grid=params, param_map=param_map, inputs={("L3/PC.0", "Op_e.0", "i_in"): inp}, outputs={"r": ("L3/PC.0", "Op_e.0", "r")}, dt=dt, simulation_time=T, permute_grid=False, sampling_step_size=1e-3) # plotting cut_off = 1. max_freq = np.zeros((len(ei_ratio), len(l3l5_ratio))) freq_pow = np.zeros_like(max_freq) for k1, k2, k3 in zip(params['k_l3l5_e'], params['k_l3l5_i'], params['k_l5l3_i']): if not results[k1][k2][k3].isnull().any().any(): _ = plot_psd(results[k1][k2][k3], tmin=cut_off, show=False) pow = plt.gca().get_lines()[-1].get_ydata() freqs = plt.gca().get_lines()[-1].get_xdata() r, c = np.argmin(np.abs(ei_ratio - k1/k2)), np.argmin(np.abs(l3l5_ratio - k2/k3)) max_freq[r, c] = freqs[np.argmax(pow)] freq_pow[r, c] = np.max(pow) plt.close(plt.gcf())
c_dict[key] = np.asarray(param_grid[key]) * c_dict[key] elif key in param_grid: c_dict[key] = np.asarray(param_grid[key]) param_grid_tmp = pd.DataFrame.from_dict(c_dict) results, result_map = grid_search( circuit_template="config_files/gpe/gpe_2pop", param_grid=param_grid_tmp, param_map=param_map, simulation_time=T, step_size=dt, permute=True, sampling_step_size=dts, inputs={ #'gpe_p/gpe_proto_syns_op/I_ext': ctx, #'gpe_a/gpe_arky_syns_op/I_ext': ctx }, outputs={ 'r_i': 'gpe_p/gpe_proto_op/R_i', 'r_a': 'gpe_a/gpe_arky_op/R_a', }, init_kwargs={ 'backend': 'numpy', 'solver': 'scipy', 'step_size': dt }, method='RK45', ) fig2, ax = plt.subplots(figsize=(6, 2.0)) results = results * 1e3 plot_timeseries(results, ax=ax)
def main(_): # tf.config.set_soft_device_placement(True) config.THREADING_LAYER = 'omp' # Disable general warnings warnings.filterwarnings("ignore") # disable TF-gpu warnings os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' t_total = time.time() # Load command line arguments and create logfile ################################################ print("") print("***LOADING COMMAND LINE ARGUMENTS***") t0 = time.time() config_file = FLAGS.config_file subgrid = FLAGS.subgrid local_res_file = FLAGS.local_res_file build_dir = FLAGS.build_dir print(f'Elapsed time: {time.time() - t0:.3f} seconds') # Load global config file ######################### print("") print("***LOADING GLOBAL CONFIG FILE***") t0 = time.time() with open(config_file) as g_conf: global_config_dict = json.load(g_conf) circuit_template = global_config_dict['circuit_template'] param_map = global_config_dict['param_map'] dt = global_config_dict['dt'] simulation_time = global_config_dict['simulation_time'] # Optional parameters ##################### try: sampling_step_size = global_config_dict['sampling_step_size'] except KeyError: sampling_step_size = dt try: inputs = global_config_dict['inputs'] except KeyError: inputs = {} try: outputs = global_config_dict['outputs'] except KeyError: outputs = {} try: init_kwargs = global_config_dict['init_kwargs'] except KeyError: init_kwargs = {} print(f'Elapsed time: {time.time() - t0:.3f} seconds') # LOAD PARAMETER GRID ##################### print("") print("***PREPARING PARAMETER GRID***") t0 = time.time() # Load subgrid into DataFrame param_grid = pd.read_hdf(subgrid, key="subgrid") # Drop all columns that don't contain a parameter map value (e.g. status, chunk_idx, err_count) since # grid_search() can't handle additional columns param_grid = param_grid[list(param_map.keys())] print(f'Elapsed time: {time.time() - t0:.3f} seconds') # COMPUTE PARAMETER GRID ######################## print("") print("***COMPUTING PARAMETER GRID***") t0 = time.time() results, result_map, t_ = grid_search( circuit_template=circuit_template, param_grid=param_grid, param_map=param_map, simulation_time=simulation_time, dt=dt, sampling_step_size=sampling_step_size, permute_grid=False, inputs=inputs, outputs=outputs.copy(), init_kwargs=init_kwargs, profile='t', build_dir=build_dir, njit=True, parallel=False) print( f'Total parameter grid computation time: {time.time()-t0:.3f} seconds') # Post process results and write data to local result file ########################################################## print("") print("***POSTPROCESSING AND CREATING RESULT FILES***") t0 = time.time() processed_results = pd.DataFrame(data=None, columns=results.columns, index=results.index) for idx, circuit in enumerate(result_map.iterrows()): circ_idx = result_map.loc[( result_map == tuple(circuit[1].values)).all(1), :].index processed_results[circ_idx] = cgs_postprocessing( results[circ_idx].to_numpy()) with pd.HDFStore(local_res_file, "w") as store: store.put(key='results', value=results) store.put(key='result_map', value=result_map) # TODO: Copy local result file back to master if needed print(f'Result files created. Elapsed time: {time.time()-t0:.3f} seconds') print("") print(f'Total elapsed time: {time.time()-t_total:.3f} seconds')
# 'outputs': {'psp': 'muscle/muscle_op/I_acc'}, # 'verbose': False}, # loss_func=loss, # loss_kwargs={}, # strategy='best2exp', mutation=(0.5, 1.9), recombination=0.8, atol=1e-5, tol=1e-3, # polish=False, disp=True, verbose=False, workers=-1) # grid search results, result_map = grid_search( circuit_template=path, param_grid=params, param_map=param_map, simulation_time=T, step_size=dt, permute_grid=True, sampling_step_size=dts, inputs={'m1/m1_dummy/m_in': inp}, outputs={'psp': 'muscle/muscle_op/I_acc'}, init_kwargs={'backend': 'numpy', 'solver': 'scipy', 'step_size': dt}, method='RK45', ) results = results.loc[:, 'psp'] results.plot() plt.show() data = np.zeros((len(params[v1]), len(params[v2]))) Interactive2DParamPlot(data, results, x_values=params[v1], y_values=params[v2], param_map=result_map, tmin=cutoff, x_key=v1, y_key=v2) plt.show()
} } # simulations ############# results, result_map = grid_search( circuit_template="config/stn_gpe/stn_gpe_2pop", param_grid=param_grid, param_map=param_map, simulation_time=T, step_size=dt, permute=True, sampling_step_size=dts, inputs={ #'stn/stn_op/ctx': ctx, #'str/str_dummy_op/I': stria }, outputs={ 'r_e': 'stn/stn_syns_op/R_e', 'r_p': 'gpe_p/gpe_proto_syns_op/R_i' }, init_kwargs={ 'backend': 'numpy', 'solver': 'scipy', 'step_size': dt }, method='RK45') results = results.loc[cutoff:, :] * 1e3 results.index = results.index * 1e-3 results.plot() #plt.xlim(0, 100)
'vars': ['delay'], 'edges': [('n1/biexp_rate/r2', 'n1/biexp_rate/r_in')] }, 's': { 'vars': ['spread'], 'edges': [('n1/biexp_rate/r2', 'n1/biexp_rate/r_in')] } } out_var = 'n1/biexp_rate/r' r2, r_map = grid_search("config/stn_gpe/biexp_gamma", param_grid, param_map, step_size=dt, simulation_time=T, sampling_step_size=dts, permute_grid=True, backend='numpy', solver='euler', outputs={'r': out_var}, inputs={'n1/biexp_rate/I_ext': inp}, clear=False) # calculate difference between target and approximation n = len(param_grid['d']) m = len(param_grid['s']) alpha = 0.95 error = np.zeros((n, m)) indices = [['_' for j in range(m)] for i in range(n)] for idx in r_map.index: idx_r = np.argmin(np.abs(param_grid['d'] - r_map.at[idx, 'd'])) idx_c = np.argmin(np.abs(param_grid['s'] - r_map.at[idx, 's']))
} # grid searches ############### # numpy backend grid-search results, param_map, _ = grid_search( circuit_template= "model_templates.jansen_rit.simple_jansenrit.JRC_delaycoupled", param_grid=params, param_map=param_map, inputs={ #"all/JRC_op/u": np.asarray(inp, dtype=np.float32) }, outputs={"v": "all/JRC_op/PSP_ein"}, dt=dt, simulation_time=T, permute_grid=True, sampling_step_size=1e-3, init_kwargs={ 'backend': 'numpy', 'matrix_sparseness': 0.9, 'step_size': dt, 'solver': 'scipy' }, profile=True) # tensorflow backend grid-search # results, param_map, _ = grid_search(circuit_template="model_templates.jansen_rit.simple_jansenrit.JRC_delaycoupled", # param_grid=params, param_map=param_map, # inputs={"JRC1/PC/RPO_e_pc/u": np.asarray(inp1, dtype=np.float32), # "JRC2/PC/RPO_e_pc/u": np.asarray(inp2, dtype=np.float32)},
def main(_): # disable TF-gpu warnings os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' t_total = time.time() # Load command line arguments and create logfile ################################################ print("") print("***LOADING COMMAND LINE ARGUMENTS***") t0 = time.time() config_file = FLAGS.config_file subgrid = FLAGS.subgrid local_res_file = FLAGS.local_res_file print(f'Elapsed time: {time.time()-t0:.3f} seconds') # Load grid search configuration parameters from config file ############################################################ print("") print("***LOADING GLOBAL CONFIG FILE***") t0 = time.time() with open(config_file) as g_conf: global_config_dict = json.load(g_conf) circuit_template = global_config_dict['circuit_template'] param_map = global_config_dict['param_map'] sampling_step_size = global_config_dict['sampling_step_size'] dt = global_config_dict['dt'] simulation_time = global_config_dict['simulation_time'] # 'inputs' and 'outputs' are optional parameters try: inputs_temp = global_config_dict['inputs'] if inputs_temp: inputs = {} for key, value in inputs_temp.items(): inputs[ast.literal_eval(key)] = list(value) else: inputs = {} except KeyError: inputs = {} try: outputs_temp = global_config_dict['outputs'] if outputs_temp: outputs = {} for key, value in outputs_temp.items(): outputs[str(key)] = tuple(value) else: outputs = {} except KeyError: outputs = {} print(f'Elapsed time: {time.time()-t0:.3f} seconds') # Load parameter subgrid from subgrid file ########################################## print("") print("***PREPARING PARAMETER GRID***") t0 = time.time() param_grid = pd.read_hdf(subgrid, key="subgrid") # grid_search() can't handle additional columns in the parameter grid param_grid = param_grid.drop(['status', 'chunk_idx', 'err_count'], axis=1) print(f'Elapsed time: {time.time()-t0:.3f} seconds') # Compute parameter subgrid using grid_search ############################################# print("") print("***COMPUTING PARAMETER GRID***") t0 = time.time() results = grid_search(circuit_template=circuit_template, param_grid=param_grid, param_map=param_map, inputs=inputs, outputs=outputs, sampling_step_size=sampling_step_size, dt=dt, simulation_time=simulation_time) out_vars = results.columns.levels[-1] print(f'Total parameter grid computation time: {time.time()-t0:.3f} seconds') # Post process results and write data to local result file ########################################################## print("") print("***POSTPROCESSING AND CREATING RESULT FILES***") t0 = time.time() with pd.HDFStore(local_res_file, "w") as store: for out_var in out_vars: key = out_var.replace(".", "") res_lst = [] # Order results according to rows in parameter grid ################################################### for i, idx in enumerate(param_grid.index): idx_list = param_grid.iloc[i].values.tolist() idx_list.append(out_var) result = results.loc[:, tuple(idx_list)].to_frame() result.columns.names = results.columns.names res_lst.append(result) result_ordered = pd.concat(res_lst, axis=1) # Postprocess ordered results (optional) ######################################## # Write DataFrames to local result file ###################################### store.put(key=key, value=result_ordered) # TODO: Copy local result file back to master if needed print(f'Result files created. Elapsed time: {time.time()-t0:.3f} seconds') print("") print(f'Total elapsed time: {time.time()-t_total:.3f} seconds')
# model parameters n_samples = 200 etas = np.linspace(-6.5, -4.5, num=n_samples) params = {'eta': etas} param_map = {'eta': {'vars': ['Op_sd_exp/eta'], 'nodes': ['p']}} # simulation ############ results, result_map = grid_search( circuit_template="model_templates.montbrio.simple_montbrio.QIF_sd_exp", param_grid=params, param_map=param_map, inputs={}, outputs={ "r": "p/Op_sd_exp/r", "v": "p/Op_sd_exp/v" }, step_size=dt, simulation_time=T, permute_grid=True, sampling_step_size=dts, method='RK45') # calculation of kuramoto order parameter ######################################### eta_vals = np.zeros_like(etas) sync_mean = np.zeros_like(etas) sync_var = np.zeros_like(etas) cutoff = 200.0 for i, key in enumerate(result_map.index):
'vars': ['delay'], 'edges': [(source, neuron)] }, 's': { 'vars': ['spread'], 'edges': [(source, neuron)] } } r2, r_map = grid_search(path, param_grid, param_map, step_size=dt, simulation_time=T, sampling_step_size=dts, permute_grid=True, init_kwargs={ 'backend': 'numpy', 'step_size': dt, 'solver': 'scipy' }, outputs={neuron: f'{neuron}/{neuron}_op/{target_var}'}, inputs={'m1/m1_dummy/m_in': inp}) # calculate difference between target and approximation n = len(param_grid['d']) m = len(param_grid['s']) alpha = 0.95 # controls trade-off between accuracy and complexity of gamma-kernel convolution. alpha = 1.0 for max accuracy. error = np.zeros((n, m)) indices = [['_' for j in range(m)] for i in range(n)] for idx in r_map.index: idx_r = np.argmin(np.abs(param_grid['d'] - r_map.at[idx, 'd']))
c_dict[key] = np.asarray([c_dict[key]]) else: c_dict[key] = np.asarray(param_grid[key]) * c_dict[key] elif key in param_grid: c_dict[key] = np.asarray(param_grid[key]) param_grid_tmp = pd.DataFrame.from_dict(c_dict) results, result_map = grid_search( circuit_template="config/stn_gpe_str/str", param_grid=param_grid_tmp, param_map=param_map, simulation_time=T, step_size=dt, permute=True, sampling_step_size=dts, inputs={ #'gpe_p/gpe_proto_syns_op/I_ext': ctx, #'gpe_a/gpe_arky_syns_op/I_ext': ctx }, outputs=outputs.copy(), init_kwargs={ 'backend': 'numpy', 'solver': 'scipy', 'step_size': dt }, method='RK45', clear=True) fig2, ax = plt.subplots(figsize=(6, 2.0), dpi=dpi) results = results * 1e3 for key in outputs: ax.plot(results.loc[:, key]) plt.legend(list(outputs.keys()))
'J': { 'vars': ['Op_sfa_syns_noise/J'], 'nodes': ['qif'] } } # simulation ############ results, result_map = grid_search(circuit_template="qifs/QIF_sfa_syns_noise", param_grid=params, param_map=param_map, inputs={}, outputs={ "r": "qif/Op_sfa_syns_noise/r", "v": "qif/Op_sfa_syns_noise/v" }, step_size=dt, simulation_time=T, permute_grid=True, sampling_step_size=dts, method='RK45') # visualization ############### sigma_mean = np.zeros((len(Js), len(etas))) sigma_range = np.zeros_like(sigma_mean) sigmas = {} for i, J in enumerate(Js): for j, eta in enumerate(etas):
c_dict[key] = np.asarray([c_dict[key]]) else: c_dict[key] = np.asarray(param_grid[key]) * c_dict[key] elif key in param_grid: c_dict[key] = np.asarray(param_grid[key]) param_grid_tmp = pd.DataFrame.from_dict(c_dict) results, result_map = grid_search( circuit_template="config/stn_gpe_str/stn_gpe", param_grid=param_grid_tmp, param_map=param_map, simulation_time=T, step_size=dt, permute=True, sampling_step_size=dts, inputs={ #'gpe_p/gpe_proto_syns_op/I_ext': ctx, #'gpe_a/gpe_arky_syns_op/I_ext': ctx }, outputs=outputs.copy(), backend='numpy', solver='scipy', method='RK45', clear=True, atol=1e-8, rtol=1e-7) fig2, ax = plt.subplots(figsize=(6, 2.0), dpi=dpi) results = results * 1e3 for key in outputs: ax.plot(results[key]) plt.legend(list(outputs.keys()))
'a0': { 'var': [('Op_e_adapt.0', 'a')], 'nodes': ['E.0'] } } # simulation ############ results = grid_search(circuit_template="../config/cmc_templates.E_adapt", param_grid=params, param_map=param_map, inputs={}, outputs={ "r": ("E.0", "Op_e_adapt.0", "r"), "v": ("E.0", "Op_e_adapt.0", "v"), "e": ("E.0", "Op_e_adapt.0", "e") }, dt=dt, simulation_time=T, permute_grid=True, sampling_step_size=dts) # visualization ############### # color maps #cm1 = create_cmap('pyrates_red', as_cmap=True, n_colors=16) #cm2 = create_cmap('pyrates_green', as_cmap=True, n_colors=16) #cm3 = create_cmap('pyrates_blue/pyrates_yellow', as_cmap=True, n_colors=16, pyrates_blue={'reverse': True}, # pyrates_yellow={'reverse': True})