def setup_mock_data(): # pragma: no cover data = un.Data() data_types = [ "evaluations", "time", "mean", "variance", "percentile_5", "percentile_95", "sobol_first", "sobol_first_average", "sobol_total", "sobol_total_average" ] data.add_features(["feature1d", "TestingModel1d"]) for data_type in data_types: data["feature1d"][data_type] = np.array([1., 2.]) data["TestingModel1d"][data_type] = np.array([3., 4.]) data["feature1d"]["labels"] = ["xlabel", "ylabel"] data["TestingModel1d"]["labels"] = ["xlabel", "ylabel"] data.uncertain_parameters = ["a", "b"] data.model_name = "TestingModel1d" data.method = "mock" data.seed = 10 data.incomplete = ["a", "b"] data.error = ["feature1d"] return data
def generate_data_data_irregular(): # pragma: no cover data = un.Data() data_types = [ "evaluations", "time", "mean", "variance", "percentile_5", "percentile_95", "sobol_first", "sobol_first_average", "sobol_total", "sobol_total_average" ] data.add_features(["feature1d", "TestingModel1d"]) for data_type in data_types: data["feature1d"][data_type] = [1., 2.] data["TestingModel1d"][data_type] = [3., 4.] data["TestingModel1d"].evaluations = [[1, 2], [np.nan], [1, [2, 3], 3], [1], 3, [3, 4, 5], [1, 2], [], [3, 4, 5], [], [3, 4, 5]] data["TestingModel1d"].time = [[1, 2], [np.nan], [1, [2, 3], 3], [1], 3, [3, 4, 5], [1, 2], [], [3, 4, 5], [], [3, 4, 5]] data["feature1d"]["labels"] = ["xlabel", "ylabel"] data["TestingModel1d"]["labels"] = ["xlabel", "ylabel"] data.uncertain_parameters = ["a", "b"] data.model_name = "TestingModel1d" data.method = "mock" data.seed = 10 data.incomplete = ["a", "b"] data.error = ["feature1d"] data.model_ignore = True data.save(os.path.join(test_data_dir, "test_save_mock_irregular"))
def save_analysis_data(ucdata_path,**kwargs): uc_data = un.Data(ucdata_path) model_name = uc_data.model_name features = list(uc_data.keys()) features.remove(model_name) sens_datalist = [] filepath = kwargs.pop('filepath',None) uc_params = uc_data.uncertain_parameters for i,param in enumerate(uc_params): for feature in features: sens_datadict = {} sens_datadict['feature'] = feature sens_datadict['sobol_index'] = uc_data[feature].sobol_first_average[i] sens_datadict['param_name'] = param for key,val in kwargs.items(): sens_datadict.update({key:val}) sens_datalist.append(sens_datadict.copy()) sens_datadf = pd.DataFrame(sens_datalist) if filepath: utility.create_filepath(filepath) sens_datadf.to_csv(filepath, index=False) return sens_datadf
def generate_data_data(): # pragma: no cover data = un.Data() data_types = ["evaluations", "time", "mean", "variance", "percentile_5", "percentile_95", "sobol_first", "sobol_first_sum", "sobol_total", "sobol_total_sum"] data.add_features(["feature1d", "TestingModel1d"]) for data_type in data_types: data["feature1d"][data_type] = [1., 2.] data["TestingModel1d"][data_type] = [3., 4.] data["feature1d"]["labels"] = ["xlabel", "ylabel"] data["TestingModel1d"]["labels"] = ["xlabel", "ylabel"] data.uncertain_parameters = ["a", "b"] data.model_name = "TestingModel1d" data.method = "mock" data.seed = 10 data.incomplete = ["a", "b"] data.save(os.path.join(test_data_dir, "test_save_mock"))
# end ='1-1-2024', freq ='D', name= "When") # df_out = pd.DataFrame(columns=locations,index=index) fig, ax = plt.subplots(len(locations), 1, sharex='col') for i, location in enumerate(locations): SITE, FOLDER = config(location) icestupa = Icestupa(location) icestupa.read_output() icestupa.self_attributes() variance = [] mean = [] evaluations = [] data = un.Data() # filename1 = FOLDER['sim']+ "SE_full.h5" filename1 = FOLDER['sim'] + "full.h5" filename2 = FOLDER['sim'] + "fountain.h5" # filename1 = FOLDER['sim']+ "efficiency.h5" # print(data) if location == 'schwarzsee19': SITE["start_date"] += pd.offsets.DateOffset(year=2023) SITE["end_date"] += pd.offsets.DateOffset(year=2023) if location == 'guttannen20': SITE["start_date"] += pd.offsets.DateOffset(year=2023) SITE["end_date"] += pd.offsets.DateOffset(year=2023) if location == 'guttannen21': SITE["start_date"] += pd.offsets.DateOffset(year=2022) SITE["end_date"] += pd.offsets.DateOffset(year=2023)
def main(): # Read sensitivity analysis config file sens_config_file = sys.argv[-1] sens_config_dict = utility.load_json(sens_config_file) cell_id = sens_config_dict['Cell_id'] cpu_count = sens_config_dict['cpu_count'] if 'cpu_count'\ in sens_config_dict.keys() else mp.cpu_count() perisomatic_sa = sens_config_dict.get('run_peri_analysis',False) # Parameters to vary (All-active) select_aa_param_path = sens_config_dict['select_aa_param_path'] # knobs # Parameters to vary (Perisomatic) if perisomatic_sa: select_peri_param_path = sens_config_dict['select_peri_param_path'] # knobs select_feature_path = sens_config_dict['select_feature_path'] # knobs param_mod_range = sens_config_dict.get('param_mod_range',.1) # knobs mechanism_path = sens_config_dict['mechanism'] # config files with all the paths for Bluepyopt sim lr = lims.LimsReader() morph_path = lr.get_swc_path_from_lims(int(cell_id)) model_base_path='/allen/aibs/mat/ateam_shared/' \ 'Mouse_Model_Fit_Metrics/{}'.format(cell_id) opt_config_file = os.path.join(model_base_path,'config_file.json') if not os.path.exists(opt_config_file): opt_config = { "morphology": "", "parameters": "config/{}/parameters.json".format(cell_id), "mechanism": "config/{}/mechanism.json".format(cell_id), "protocols": "config/{}/protocols.json".format(cell_id), "all_protocols": "config/{}/all_protocols.json".format(cell_id), "features": "config/{}/features.json".format(cell_id), "peri_parameters": "config/{}/peri_parameters.json".format(cell_id), "peri_mechanism": "config/{}/peri_mechanism.json".format(cell_id) } opt_config_file = os.path.join(os.getcwd(),'config_file.json') utility.save_json(opt_config_file,opt_config) # optimized parameters around which select parameters are varied optim_param_path_aa = '/allen/aibs/mat/ateam_shared/Mouse_Model_Fit_Metrics/'\ '{cell_id}/fitted_params/optim_param_unformatted_{cell_id}.json'.\ format(cell_id = cell_id) if not os.path.exists(optim_param_path_aa): optim_param_path_aa = '/allen/aibs/mat/ateam_shared/Mouse_Model_Fit_Metrics/'\ '{cell_id}/fitted_params/optim_param_{cell_id}_bpopt.json'.\ format(cell_id = cell_id) SA_obj_aa = SA_helper(optim_param_path_aa,select_aa_param_path,param_mod_range, opt_config_file) _,protocol_path,mech_path,feature_path,\ param_bound_path = SA_obj_aa.load_config(model_base_path) # Make sure to get the parameter bounds big enough for BluePyOpt sim sens_param_bound_write_path_aa = "param_sensitivity_aa.json" optim_param_aa = SA_obj_aa.create_sa_bound(param_bound_path, sens_param_bound_write_path_aa) param_dict_uc_aa = SA_obj_aa.create_sens_param_dict() parameters_aa ={key:optim_param_aa[val] for key,val in param_dict_uc_aa.items()} eval_handler_aa = Bpopt_Evaluator(protocol_path, feature_path, morph_path, sens_param_bound_write_path_aa, mech_path, ephys_dir=None, timed_evaluation = False) evaluator_aa = eval_handler_aa.create_evaluator() opt_aa = bpopt.optimisations.DEAPOptimisation(evaluator=evaluator_aa) stim_protocols = utility.load_json(protocol_path) stim_protocols = {key:val for key,val in stim_protocols.items() \ if 'LongDC' in key} stim_dict = {key:val['stimuli'][0]['amp'] \ for key,val in stim_protocols.items()} sorted_stim_tuple= sorted(stim_dict.items(), key=operator.itemgetter(1)) stim_name= sorted_stim_tuple[-1][0] # knobs (the max amp) # Copy compiled modfiles if not os.path.isdir('x86_64'): raise Exception('Compiled modfiles do not exist') efel_features = utility.load_json(select_feature_path) un_features = un.EfelFeatures(features_to_run=efel_features) un_parameters_aa = un.Parameters(parameters_aa) un_parameters_aa.set_all_distributions(un.uniform(param_mod_range)) un_model_aa = un.Model(run=nrnsim_bpopt, interpolate=True, labels=["Time (ms)", "Membrane potential (mV)"], opt=opt_aa,stim_protocols =stim_protocols, param_dict_uc = param_dict_uc_aa, stim_name=stim_name, optim_param=optim_param_aa) # Perform the uncertainty quantification UQ_aa = un.UncertaintyQuantification(un_model_aa, parameters=un_parameters_aa, features=un_features) data_folder = 'sensitivity_data' sa_filename_aa = 'sa_allactive_%s.h5'%cell_id sa_filename_aa_csv = 'sa_allactive_%s.csv'%cell_id sa_data_path_aa = os.path.join(data_folder,sa_filename_aa) sa_aa_csv_path = os.path.join(data_folder,sa_filename_aa_csv) UQ_aa.quantify(seed=0,CPUs=cpu_count,data_folder=data_folder, filename= sa_filename_aa) _ = SA_obj_aa.save_analysis_data(sa_data_path_aa, filepath=sa_aa_csv_path) cell_data_aa = un.Data(sa_data_path_aa) SA_obj_aa.plot_sobol_analysis(cell_data_aa,analysis_path = \ 'figures/sa_analysis_aa_%s.pdf'%cell_id, palette='Set1') # Perisomatic model if perisomatic_sa: try: optim_param_path_peri = None SA_obj_peri = SA_helper(optim_param_path_peri,select_peri_param_path,param_mod_range, opt_config_file) _,_,mech_path_peri,_,\ param_bound_path_peri = SA_obj_peri.load_config(model_base_path, perisomatic=True) sens_param_bound_write_path_peri = "param_sensitivity_peri.json" optim_param_peri = SA_obj_peri.create_sa_bound_peri(param_bound_path_peri, sens_param_bound_write_path_peri) param_dict_uc_peri = SA_obj_peri.create_sens_param_dict() parameters_peri ={key:optim_param_peri[val] for key,val in param_dict_uc_peri.items()} eval_handler_peri = Bpopt_Evaluator(protocol_path, feature_path, morph_path, sens_param_bound_write_path_peri, mech_path_peri, ephys_dir=None, timed_evaluation = False) evaluator_peri = eval_handler_peri.create_evaluator() opt_peri = bpopt.optimisations.DEAPOptimisation(evaluator=evaluator_peri) un_parameters_peri= un.Parameters(parameters_peri) un_parameters_peri.set_all_distributions(un.uniform(param_mod_range)) un_model_peri = un.Model(run=nrnsim_bpopt, interpolate=True, labels=["Time (ms)", "Membrane potential (mV)"], opt=opt_peri,stim_protocols =stim_protocols, param_dict_uc = param_dict_uc_peri, stim_name=stim_name, optim_param=optim_param_peri) UQ_peri = un.UncertaintyQuantification(un_model_peri, parameters=un_parameters_peri, features=un_features) sa_filename_peri = 'sa_perisomatic_%s.h5'%cell_id sa_filename_peri_csv = 'sa_perisomatic_%s.csv'%cell_id sa_data_path_peri = os.path.join(data_folder,sa_filename_peri) sa_peri_csv_path = os.path.join(data_folder,sa_filename_peri_csv) UQ_peri.quantify(seed=0,CPUs=cpu_count,data_folder=data_folder, filename= sa_filename_peri) _ = SA_obj_peri.save_analysis_data(sa_data_path_peri, filepath=sa_peri_csv_path) cell_data_peri = un.Data(sa_data_path_peri) SA_obj_peri.plot_sobol_analysis(cell_data_peri,analysis_path = \ 'figures/sa_analysis_peri_%s.pdf'%cell_id, palette='Set2') except Exception as e: print(e)
def generate_data_empty(): # pragma: no cover data = un.Data() data.save(os.path.join(test_data_dir, "test_save_empty"))
def calculate_error(glob_pattern, exact_data, base="data/"): files = glob.glob(base + glob_pattern) exact_mean = exact_data["valderrama"].mean exact_variance = exact_data["valderrama"].variance exact_sobol = exact_data["valderrama"].sobol_first mean_errors = {} variance_errors = {} sobol_errors = {} for file in tqdm(files): data = un.Data(file) mean = data["valderrama"].mean variance = data["valderrama"].variance sobol = data["valderrama"].sobol_first dt = data["valderrama"].time[1] - data["valderrama"].time[0] T = data["valderrama"].time[-1] - data["valderrama"].time[0] nr_evaluations = data["valderrama"].evaluations[0] sobol_evaluations = data["valderrama"].evaluations[1] mean_error = dt * np.sum(np.abs((exact_mean - mean) / exact_mean)) / T variance_error = dt * np.sum( np.abs((exact_variance - variance) / exact_variance)) / T sobol_error = dt * np.sum(np.abs((exact_sobol - sobol) / exact_sobol), axis=1) / T sobol_error = np.mean(sobol_error) if nr_evaluations not in mean_errors: mean_errors[nr_evaluations] = [mean_error] else: mean_errors[nr_evaluations].append(mean_error) if nr_evaluations not in variance_errors: variance_errors[nr_evaluations] = [variance_error] else: variance_errors[nr_evaluations].append(variance_error) if sobol_evaluations not in sobol_errors: sobol_errors[sobol_evaluations] = [sobol_error] else: sobol_errors[sobol_evaluations].append(sobol_error) del data sorted_nr_evaluations = [] average_mean_errors = [] average_variance_errors = [] for evaluation in sorted(mean_errors.keys()): sorted_nr_evaluations.append(evaluation) average_mean_errors.append(np.mean(mean_errors[evaluation])) average_variance_errors.append(np.mean(variance_errors[evaluation])) sorted_sobol_evaluations = [] average_sobol_errors = [] for evaluation in sorted(sobol_errors.keys()): sorted_sobol_evaluations.append(evaluation) average_sobol_errors.append(np.mean(sobol_errors[evaluation])) return sorted_nr_evaluations, average_mean_errors, average_variance_errors, sorted_sobol_evaluations, average_sobol_errors
for evaluation in sorted(mean_errors.keys()): sorted_nr_evaluations.append(evaluation) average_mean_errors.append(np.mean(mean_errors[evaluation])) average_variance_errors.append(np.mean(variance_errors[evaluation])) sorted_sobol_evaluations = [] average_sobol_errors = [] for evaluation in sorted(sobol_errors.keys()): sorted_sobol_evaluations.append(evaluation) average_sobol_errors.append(np.mean(sobol_errors[evaluation])) return sorted_nr_evaluations, average_mean_errors, average_variance_errors, sorted_sobol_evaluations, average_sobol_errors # 3 uncertain parameters exact_data_3 = un.Data("data/parameters_3/exact.h5") pc_evaluations_3, pc_mean_errors_3, pc_variance_errors_3, pc_sobol_evaluations_3, pc_sobol_errors_3 = calculate_error( "parameters_3/pc_*", exact_data_3) mc_evaluations_3, mc_mean_errors_3, mc_variance_errors_3, mc_sobol_evaluations_3, mc_sobol_errors_3 = calculate_error( "parameters_3/mc_*", exact_data_3) # 11 uncertain parameters exact_data_11 = un.Data("data/parameters_11/exact.h5") pc_evaluations_11, pc_mean_errors_11, pc_variance_errors_11, pc_sobol_evaluations_11, pc_sobol_errors_11 = calculate_error( "parameters_11/pc_*", exact_data_11) mc_evaluations_11, mc_mean_errors_11, mc_variance_errors_11, mc_sobol_evaluations_11, mc_sobol_errors_11 = calculate_error( "parameters_11/mc_*", exact_data_11) with h5py.File("pc_mc.h5", "w") as f:
import matplotlib.pyplot as plt import numpy as np import seaborn as sns import h5py import chaospy as cp from HodgkinHuxley import HodgkinHuxley from prettyplot import prettyPlot, set_xlabel, set_ylabel, get_colormap from prettyplot import fontsize, labelsize, titlesize, spines_color, set_style from prettyplot import prettyBar, get_colormap_tableu20 labelsize = 16 ticksize = 14 data = un.Data("valderrama.h5") time = data["valderrama"].time mean = data["valderrama"].mean variance = data["valderrama"].variance percentile_95 = data["valderrama"].percentile_95 percentile_5 = data["valderrama"].percentile_5 sobol = data["valderrama"].sobol_first V = data["valderrama"].evaluations colors = [(0.898, 0, 0), (0.976, 0.729, 0.196), (0.259, 0.431, 0.525), (0.4375, 0.13671875, 0.4375)] style = "seaborn-white" linewidth = 3 ############################### # Single result #