def sensitivity_analysis_pse_from_hypothesis(hypothesis, connectivity_matrix, region_labels, n_samples, method="sobol", half_range=0.1, global_coupling=[], healthy_regions_parameters=[], save_services=False, logger=None, **kwargs): if logger is None: logger = initialize_logger(__name__) # Compute lsa for this hypothesis before sensitivity analysis: logger.info("Running hypothesis: " + hypothesis.name) model_configuration_service, model_configuration, lsa_service, lsa_hypothesis = \ start_lsa_run(hypothesis, connectivity_matrix, logger) results, pse_results = sensitivity_analysis_pse_from_lsa_hypothesis( lsa_hypothesis, connectivity_matrix, region_labels, n_samples, method, half_range, global_coupling, healthy_regions_parameters, model_configuration_service, lsa_service, save_services, logger, **kwargs) return model_configuration_service, model_configuration, lsa_service, lsa_hypothesis, results, pse_results
def start_lsa_run(hypothesis, connectivity_matrix, logger=None): if logger is None: logger = initialize_logger(__name__) logger.info("creating model configuration...") model_configuration_service = ModelConfigurationService( hypothesis.number_of_regions) model_configuration = model_configuration_service. \ configure_model_from_hypothesis(hypothesis, connectivity_matrix) logger.info("running LSA...") lsa_service = LSAService( eigen_vectors_number_selection=EIGENVECTORS_NUMBER_SELECTION, eigen_vectors_number=None, weighted_eigenvector_sum=WEIGHTED_EIGENVECTOR_SUM, normalize_propagation_strength=False) lsa_hypothesis = lsa_service.run_lsa(hypothesis, model_configuration) return model_configuration_service, model_configuration, lsa_service, lsa_hypothesis
SHOW_FLAG_SIM = True SAVE_FLAG = True #Modify data folders for this example: DATA_TRECHH = '/Users/dionperd/Dropbox/Work/VBtech/DenisVEP/Results/TRECHH' #CON_DATA = 'connectivity_2_hypo.zip' CONNECT_DATA = 'connectivity_hypo.zip' #Set a special scaling for HH regions, for this example: #Set Khyp >=1.0 Khyp = 5.0 if __name__ == "__main__": #-------------------------------Reading data----------------------------------- logger = initialize_logger(__name__) # if DATA_MODE == 'ep': # logger.info("Reading from EPISENSE") # data_folder = os.path.join(DATA_EPISENSE, 'Head_TREC') # Head_TREC 'Head_JUNCH' # from tvb_epilepsy.custom.readers_episense import EpisenseReader # # reader = EpisenseReader() # else: # logger.info("Reading from TVB") # data_folder = DATA_TVB # reader = TVBReader() data_folder = os.path.join(DATA_TRECHH, 'Head_TREC') #Head_TREC 'Head_JUNCH' reader = EpisenseReader()
import os import h5py import numpy import warnings from tvb_epilepsy.base.utils import ensure_unique_file, change_filename_or_overwrite, \ read_object_from_h5_file, print_metadata, write_metadata # TODO: solve problems with setting up a logger from tvb_epilepsy.base.utils import initialize_logger from tvb_epilepsy.base.epileptor_model_factory import model_build_dict from tvb_epilepsy.base.simulators import SimulationSettings PATIENT_VIRTUAL_HEAD = "/WORK/episense/episense-root/trunk/demo-data/Head_TREC" import ntpath logger = initialize_logger("log_" + ntpath.split(PATIENT_VIRTUAL_HEAD)[1]) KEY_TYPE = "EPI_Type" KEY_VERSION = "EPI_Version" KEY_DATE = "EPI_Last_update" KEY_NODES = "Number_of_nodes" KEY_SENSORS = "Number_of_sensors" KEY_MAX = "Max_value" KEY_MIN = "Min_value" KEY_CHANNELS = "Number_of_channels" KEY_SV = "Number_of_state_variables" KEY_STEPS = "Number_of_steps" KEY_SAMPLING = "Sampling_period" KEY_START = "Start_time" # Attributes to be read or written for hypothesis object and files:
class CustomReader(ABCReader): logger = initialize_logger(__name__) def read_connectivity(self, h5_path): """ :param h5_path: Path towards a custom Connectivity H5 file :return: Weights, Tracts, Region centers """ self.logger.info("Reading a Connectivity from: " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') self.logger.debug("Structures: " + str(h5_file["/"].keys())) self.logger.debug("Weights shape:" + str(h5_file['/weights'].shape)) weights = h5_file['/weights'][()] tract_lengths = h5_file['/tract_lengths'][()] # TODO: should change to English centers than French centres! region_centers = h5_file['/centres'][()] region_labels = h5_file['/region_labels'][()] orientations = h5_file['/orientations'][()] hemispheres = h5_file['/hemispheres'][()] h5_file.close() return Connectivity(h5_path, weights, tract_lengths, region_labels, region_centers, hemispheres, orientations) def read_cortical_surface(self, h5_path): if os.path.isfile(h5_path): self.logger.info("Reading Surface from " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') vertices = h5_file['/vertices'][()] triangles = h5_file['/triangles'][()] vertex_normals = h5_file['/vertex_normals'][()] h5_file.close() return Surface(vertices, triangles, vertex_normals) else: warning("\nNo Cortical Surface file found at path " + h5_path + "!") return [] def _read_data_field(self, h5_path): self.logger.info("Reading 'data' from H5 " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') data = h5_file['/data'][()] h5_file.close() return data def read_region_mapping(self, h5_path): if os.path.isfile(h5_path): return self._read_data_field(h5_path) else: warning("\nNo Region Mapping file found at path " + h5_path + "!") return [] def read_volume_mapping(self, h5_path): if os.path.isfile(h5_path): return self._read_data_field(h5_path) else: warning("\nNo Volume Mapping file found at path " + h5_path + "!") return [] def read_t1(self, h5_path): if os.path.isfile(h5_path): return self._read_data_field(h5_path) else: warning("\nNo Structural MRI file found at path " + h5_path + "!") return [] def read_sensors(self, h5_path, s_type): if os.path.isfile(h5_path): self.logger.info("Reading Sensors from: " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') labels = h5_file['/labels'][()] locations = h5_file['/locations'][()] h5_file.close() return Sensors(labels, locations, s_type=s_type) else: warning("\nNo Sensor file found at path " + h5_path + "!") return None def read_projection(self, path, s_type): warning("Custom projection matrix reading not implemented yet!") return [] # raise_not_implemented_error() def read_sensors_projections(self, root_folder, conn, sensor_files, s_type): sensors_dict = {} for sensor_file in ensure_list(sensor_files): sensor = self.read_sensors( os.path.join(root_folder, sensor_file[0]), s_type) if isinstance(sensor, Sensors): projection = [] if len(sensor_file) > 1: projection_file = os.path.join(root_folder, sensor_file[1]) if os.path.isfile(projection_file): projection = self.read_projection( os.path.join(root_folder, sensor_file[1]), s_type) if projection == []: warning( "Calculating projection matrix based solely on euclidean distance!" ) projection = sensor.calculate_projection(conn) sensors_dict[sensor] = projection return sensors_dict def read_head( self, root_folder, name='', connectivity_file="Connectivity.h5", surface_file="CorticalSurface.h5", region_mapping_file="RegionMapping.h5", volume_mapping_file="VolumeMapping.h5", structural_mri_file="StructuralMRI.h5", seeg_sensors_files=[("SensorsSEEG_114.h5", ""), ("SensorsSEEG_125.h5", "")], eeg_sensors_files=[("eeg_brainstorm_65.txt", "projection_eeg_65_surface_16k.npy")], meg_sensors_files=[("meg_brainstorm_276.txt", "projection_meg_276_surface_16k.npy")], ): conn = self.read_connectivity( os.path.join(root_folder, "Connectivity.h5")) srf = self.read_cortical_surface( os.path.join(root_folder, "CorticalSurface.h5")) rm = self.read_region_mapping( os.path.join(root_folder, "RegionMapping.h5")) vm = self.read_volume_mapping( os.path.join(root_folder, "VolumeMapping.h5")) t1 = self.read_volume_mapping( os.path.join(root_folder, "StructuralMRI.h5")) seeg_sensors_dict = self.read_sensors_projections( root_folder, conn, seeg_sensors_files, Sensors.TYPE_SEEG) eeg_sensors_dict = self.read_sensors_projections( root_folder, conn, eeg_sensors_files, Sensors.TYPE_EEG) meg_sensors_dict = self.read_sensors_projections( root_folder, conn, meg_sensors_files, Sensors.TYPE_MEG) return Head(conn, srf, rm, vm, t1, name, eeg_sensors_dict, meg_sensors_dict, seeg_sensors_dict) def read_epileptogenicity(self, root_folder, name="ep"): """ :param root_folder: Path towards a valid custom Epileptogenicity H5 file name: the name of the hypothesis :return: Timeseries in a numpy array """ path = os.path.join(root_folder, name, name + ".h5") self.logger.info("Reading Epileptogenicity from:\n" + str(path)) h5_file = h5py.File(path, 'r', libver='latest') self.logger.info("Structures:\n" + str(h5_file["/"].keys())) self.logger.info("Values expected shape: " + str(h5_file['/values'].shape)) values = h5_file['/values'][()] self.logger.info("Actual values shape\: " + str(values.shape)) h5_file.close() return values
def sensitivity_analysis_pse_from_lsa_hypothesis( lsa_hypothesis, connectivity_matrix, region_labels, n_samples, method="sobol", half_range=0.1, global_coupling=[], healthy_regions_parameters=[], model_configuration_service=None, lsa_service=None, save_services=False, logger=None, **kwargs): if logger is None: logger = initialize_logger(__name__) method = method.lower() if np.in1d(method, METHODS): if np.in1d(method, ["delta", "dgsm"]): sampler = "latin" elif method == "sobol": sampler = "saltelli" elif method == "fast": sampler = "fast_sampler" else: sampler = method else: raise_value_error("Method " + str(method) + " is not one of the available methods " + str(METHODS) + " !") all_regions_indices = range(lsa_hypothesis.number_of_regions) disease_indices = lsa_hypothesis.get_regions_disease_indices() healthy_indices = np.delete(all_regions_indices, disease_indices).tolist() pse_params = {"path": [], "indices": [], "name": [], "bounds": []} n_inputs = 0 # First build from the hypothesis the input parameters of the sensitivity analysis. # These can be either originating from excitability, epileptogenicity or connectivity hypotheses, # or they can relate to the global coupling scaling (parameter K of the model configuration) for ii in range(len(lsa_hypothesis.x0_values)): n_inputs += 1 pse_params["indices"].append([ii]) pse_params["path"].append("hypothesis.x0_values") pse_params["name"].append( str(region_labels[lsa_hypothesis.x0_indices[ii]]) + " Excitability") pse_params["bounds"].append([ lsa_hypothesis.x0_values[ii] - half_range, np.min( [MAX_DISEASE_VALUE, lsa_hypothesis.x0_values[ii] + half_range]) ]) for ii in range(len(lsa_hypothesis.e_values)): n_inputs += 1 pse_params["indices"].append([ii]) pse_params["path"].append("hypothesis.e_values") pse_params["name"].append( str(region_labels[lsa_hypothesis.e_indices[ii]]) + " Epileptogenicity") pse_params["bounds"].append([ lsa_hypothesis.e_values[ii] - half_range, np.min( [MAX_DISEASE_VALUE, lsa_hypothesis.e_values[ii] + half_range]) ]) for ii in range(len(lsa_hypothesis.w_values)): n_inputs += 1 pse_params["indices"].append([ii]) pse_params["path"].append("hypothesis.w_values") inds = linear_index_to_coordinate_tuples(lsa_hypothesis.w_indices[ii], connectivity_matrix.shape) if len(inds) == 1: pse_params["name"].append( str(region_labels[inds[0][0]]) + "-" + str(region_labels[inds[0][0]]) + " Connectivity") else: pse_params["name"].append("Connectivity[" + str(inds), + "]") pse_params["bounds"].append([ np.max([lsa_hypothesis.w_values[ii] - half_range, 0.0]), lsa_hypothesis.w_values[ii] + half_range ]) for val in global_coupling: n_inputs += 1 pse_params["path"].append("model.configuration.service.K_unscaled") inds = val.get("indices", all_regions_indices) if np.all(inds == all_regions_indices): pse_params["name"].append("Global coupling") else: pse_params["name"].append("Afferent coupling[" + str(inds) + "]") pse_params["indices"].append(inds) pse_params["bounds"].append(val["bounds"]) # Now generate samples suitable for sensitivity analysis sampler = StochasticSamplingService(n_samples=n_samples, n_outputs=n_inputs, sampler=sampler, trunc_limits={}, sampling_module="salib", random_seed=kwargs.get( "random_seed", None), bounds=pse_params["bounds"]) input_samples = sampler.generate_samples(**kwargs) n_samples = input_samples.shape[1] pse_params.update( {"samples": [np.array(value) for value in input_samples.tolist()]}) pse_params_list = dicts_of_lists_to_lists_of_dicts(pse_params) # Add a random jitter to the healthy regions if required...: for val in healthy_regions_parameters: inds = val.get("indices", healthy_indices) name = val.get("name", "x0_values") n_params = len(inds) sampler = StochasticSamplingService( n_samples=n_samples, n_outputs=n_params, sampler="uniform", trunc_limits={"low": 0.0}, sampling_module="scipy", random_seed=kwargs.get("random_seed", None), loc=kwargs.get("loc", 0.0), scale=kwargs.get("scale", 2 * half_range)) samples = sampler.generate_samples(**kwargs) for ii in range(n_params): pse_params_list.append({ "path": "model_configuration_service." + name, "samples": samples[ii], "indices": [inds[ii]], "name": name }) # Now run pse service to generate output samples: pse = PSEService("LSA", hypothesis=lsa_hypothesis, params_pse=pse_params_list) pse_results, execution_status = pse.run_pse( connectivity_matrix, grid_mode=False, lsa_service_input=lsa_service, model_configuration_service_input=model_configuration_service) pse_results = list_of_dicts_to_dicts_of_ndarrays(pse_results) # Now prepare inputs and outputs and run the sensitivity analysis: # NOTE!: Without the jittered healthy regions which we don' want to include into the sensitivity analysis! inputs = dicts_of_lists_to_lists_of_dicts(pse_params) outputs = [{ "names": ["LSA Propagation Strength"], "values": pse_results["propagation_strengths"] }] sensitivity_analysis_service = SensitivityAnalysisService( inputs, outputs, method=method, calc_second_order=kwargs.get("calc_second_order", True), conf_level=kwargs.get("conf_level", 0.95)) results = sensitivity_analysis_service.run(**kwargs) if save_services: logger.info(pse.__repr__()) pse.write_to_h5(FOLDER_RES, method + "_test_pse_service.h5") logger.info(sensitivity_analysis_service.__repr__()) sensitivity_analysis_service.write_to_h5( FOLDER_RES, method + "_test_sa_service.h5") return results, pse_results
def pse_from_hypothesis(hypothesis, n_samples, half_range=0.1, global_coupling=[], healthy_regions_parameters=[], model_configuration_service=None, lsa_service=None, save_services=False, **kwargs): from tvb_epilepsy.base.constants import MAX_DISEASE_VALUE, K_DEF, FOLDER_RES from tvb_epilepsy.base.utils import initialize_logger, linear_index_to_coordinate_tuples, \ dicts_of_lists_to_lists_of_dicts, list_of_dicts_to_dicts_of_ndarrays from tvb_epilepsy.base.sampling_service import StochasticSamplingService from tvb_epilepsy.base.pse_service import PSEService logger = initialize_logger(__name__) all_regions_indices = range(hypothesis.get_number_of_regions()) disease_indices = hypothesis.get_regions_disease_indices() healthy_indices = np.delete(all_regions_indices, disease_indices).tolist() pse_params = {"path": [], "indices": [], "name": [], "samples": []} # First build from the hypothesis the input parameters of the parameter search exploration. # These can be either originating from excitability, epileptogenicity or connectivity hypotheses, # or they can relate to the global coupling scaling (parameter K of the model configuration) for ii in range(len(hypothesis.x0_values)): pse_params["indices"].append([ii]) pse_params["path"].append("hypothesis.x0_values") pse_params["name"].append( str(hypothesis.connectivity.region_labels[ hypothesis.x0_indices[ii]]) + " Excitability") # Now generate samples using a truncated uniform distribution sampler = StochasticSamplingService( n_samples=n_samples, n_outputs=1, sampling_module="scipy", random_seed=kwargs.get("random_seed", None), trunc_limits={"high": MAX_DISEASE_VALUE}, sampler="uniform", loc=hypothesis.x0_values[ii] - half_range, scale=2 * half_range) pse_params["samples"].append(sampler.generate_samples(**kwargs)) for ii in range(len(hypothesis.e_values)): pse_params["indices"].append([ii]) pse_params["path"].append("hypothesis.e_values") pse_params["name"].append( str(hypothesis.connectivity.region_labels[ hypothesis.e_indices[ii]]) + " Epileptogenicity") # Now generate samples using a truncated uniform distribution sampler = StochasticSamplingService( n_samples=n_samples, n_outputs=1, sampling_module="scipy", random_seed=kwargs.get("random_seed", None), trunc_limits={"high": MAX_DISEASE_VALUE}, sampler="uniform", loc=hypothesis.e_values[ii] - half_range, scale=2 * half_range) pse_params["samples"].append(sampler.generate_samples(**kwargs)) for ii in range(len(hypothesis.w_values)): pse_params["indices"].append([ii]) pse_params["path"].append("hypothesis.w_values") inds = linear_index_to_coordinate_tuples( hypothesis.w_indices[ii], hypothesis.connectivity.weights.shape) if len(inds) == 1: pse_params["name"].append( str(hypothesis.connectivity.region_labels[inds[0][0]]) + "-" + str(hypothesis.connectivity.region_labels[inds[0][0]]) + " Connectivity") else: pse_params["name"].append("Connectivity[" + str(inds), + "]") # Now generate samples using a truncated normal distribution sampler = StochasticSamplingService( n_samples=n_samples, n_outputs=1, sampling_module="scipy", random_seed=kwargs.get("random_seed", None), trunc_limits={"high": MAX_DISEASE_VALUE}, sampler="norm", loc=hypothesis.w_values[ii], scale=half_range) pse_params["samples"].append(sampler.generate_samples(**kwargs)) if model_configuration_service is None: kloc = K_DEF else: kloc = model_configuration_service.K_unscaled[0] for val in global_coupling: pse_params["path"].append("model.configuration.service.K_unscaled") inds = val.get("indices", all_regions_indices) if np.all(inds == all_regions_indices): pse_params["name"].append("Global coupling") else: pse_params["name"].append("Afferent coupling[" + str(inds) + "]") pse_params["indices"].append(inds) # Now generate samples susing a truncated normal distribution sampler = StochasticSamplingService(n_samples=n_samples, n_outputs=1, sampling_module="scipy", random_seed=kwargs.get( "random_seed", None), trunc_limits={"low": 0.0}, sampler="norm", loc=kloc, scale=30 * half_range) pse_params["samples"].append(sampler.generate_samples(**kwargs)) pse_params_list = dicts_of_lists_to_lists_of_dicts(pse_params) # Add a random jitter to the healthy regions if required...: for val in healthy_regions_parameters: inds = val.get("indices", healthy_indices) name = val.get("name", "x0") n_params = len(inds) sampler = StochasticSamplingService( n_samples=n_samples, n_outputs=n_params, sampler="uniform", trunc_limits={"low": 0.0}, sampling_module="scipy", random_seed=kwargs.get("random_seed", None), loc=kwargs.get("loc", 0.0), scale=kwargs.get("scale", 2 * half_range)) samples = sampler.generate_samples(**kwargs) for ii in range(n_params): pse_params_list.append({ "path": "model_configuration_service." + name, "samples": samples[ii], "indices": [inds[ii]], "name": name }) # Now run pse service to generate output samples: pse = PSEService("LSA", hypothesis=hypothesis, params_pse=pse_params_list) pse_results, execution_status = pse.run_pse( grid_mode=False, lsa_service_input=lsa_service, model_configuration_service_input=model_configuration_service) pse_results = list_of_dicts_to_dicts_of_ndarrays(pse_results) if save_services: logger.info(pse.__repr__()) pse.write_to_h5(FOLDER_RES, "test_pse_service.h5") return pse_results, pse_params_list
def main_vep(test_write_read=False): logger = initialize_logger(__name__) # -------------------------------Reading data----------------------------------- data_folder = os.path.join(DATA_CUSTOM, 'Head') reader = Reader() logger.info("Reading from: " + data_folder) head = reader.read_head(data_folder) # --------------------------Hypothesis definition----------------------------------- n_samples = 100 # # Manual definition of hypothesis...: # x0_indices = [20] # x0_values = [0.9] # e_indices = [70] # e_values = [0.9] # disease_values = x0_values + e_values # disease_indices = x0_indices + e_indices # ...or reading a custom file: ep_name = "ep_test1" #FOLDER_RES = os.path.join(data_folder, ep_name) from tvb_epilepsy.custom.readers_custom import CustomReader if not isinstance(reader, CustomReader): reader = CustomReader() disease_values = reader.read_epileptogenicity(data_folder, name=ep_name) disease_indices, = np.where(disease_values > np.min([X0_DEF, E_DEF])) disease_values = disease_values[disease_indices] if disease_values.size > 1: inds_split = np.ceil(disease_values.size * 1.0 / 2).astype("int") x0_indices = disease_indices[:inds_split].tolist() e_indices = disease_indices[inds_split:].tolist() x0_values = disease_values[:inds_split].tolist() e_values = disease_values[inds_split:].tolist() else: x0_indices = disease_indices.tolist() x0_values = disease_values.tolist() e_indices = [] e_values = [] disease_indices = list(disease_indices) n_x0 = len(x0_indices) n_e = len(e_indices) n_disease = len(disease_indices) all_regions_indices = np.array(range(head.number_of_regions)) healthy_indices = np.delete(all_regions_indices, disease_indices).tolist() n_healthy = len(healthy_indices) # This is an example of Excitability Hypothesis: hyp_x0 = DiseaseHypothesis( head.connectivity, excitability_hypothesis={tuple(disease_indices): disease_values}, epileptogenicity_hypothesis={}, connectivity_hypothesis={}) # This is an example of Epileptogenicity Hypothesis: hyp_E = DiseaseHypothesis( head.connectivity, excitability_hypothesis={}, epileptogenicity_hypothesis={tuple(disease_indices): disease_values}, connectivity_hypothesis={}) if len(e_indices) > 0: # This is an example of x0 mixed Excitability and Epileptogenicity Hypothesis: hyp_x0_E = DiseaseHypothesis( head.connectivity, excitability_hypothesis={tuple(x0_indices): x0_values}, epileptogenicity_hypothesis={tuple(e_indices): e_values}, connectivity_hypothesis={}) hypotheses = (hyp_x0, hyp_E, hyp_x0_E) else: hypotheses = (hyp_x0, hyp_E) # --------------------------Projections computations----------------------------------- sensorsSEEG = [] projections = [] for sensors, projection in head.sensorsSEEG.iteritems(): if projection is None: continue else: projection = calculate_projection(sensors, head.connectivity) head.sensorsSEEG[sensors] = projection sensorsSEEG.append(sensors) projections.append(projection) # --------------------------Simulation preparations----------------------------------- # TODO: maybe use a custom Monitor class fs = 2 * 4096.0 scale_time = 2.0 time_length = 10000.0 scale_fsavg = 2.0 report_every_n_monitor_steps = 10.0 (dt, fsAVG, sim_length, monitor_period, n_report_blocks) = \ set_time_scales(fs=fs, dt=None, time_length=time_length, scale_time=scale_time, scale_fsavg=scale_fsavg, report_every_n_monitor_steps=report_every_n_monitor_steps) model_name = "EpileptorDP" # We don't want any time delays for the moment head.connectivity.tract_lengths *= 0.0 hpf_flag = False hpf_low = max(16.0, 1000.0 / time_length) # msec hpf_high = min(250.0, fsAVG) # --------------------------Hypothesis and LSA----------------------------------- for hyp in hypotheses: logger.info("\n\nRunning hypothesis: " + hyp.name) # hyp.write_to_h5(FOLDER_RES, hyp.name + ".h5") logger.info("\n\nCreating model configuration...") model_configuration_service = ModelConfigurationService( hyp.get_number_of_regions()) model_configuration_service.write_to_h5( FOLDER_RES, hyp.name + "_model_config_service.h5") if hyp.type == "Epileptogenicity": model_configuration = model_configuration_service.configure_model_from_E_hypothesis( hyp) else: model_configuration = model_configuration_service.configure_model_from_hypothesis( hyp) model_configuration.write_to_h5(FOLDER_RES, hyp.name + "_ModelConfig.h5") # # Plot nullclines and equilibria of model configuration # model_configuration.plot_nullclines_eq(head.connectivity.region_labels, # special_idx=lsa_hypothesis.propagation_indices, # model=str(model.nvar) + "d", zmode=model.zmode, # figure_name=lsa_hypothesis.name + "_Nullclines and equilibria", # save_flag=SAVE_FLAG, show_flag=SHOW_FLAG, # figure_dir=FOLDER_FIGURES) logger.info("\n\nRunning LSA...") lsa_service = LSAService(eigen_vectors_number=None, weighted_eigenvector_sum=True) lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration) lsa_hypothesis.write_to_h5(FOLDER_RES, lsa_hypothesis.name + "_LSA.h5") lsa_service.write_to_h5(FOLDER_RES, lsa_hypothesis.name + "_LSAConfig.h5") lsa_hypothesis.plot_lsa( model_configuration, weighted_eigenvector_sum=lsa_service.weighted_eigenvector_sum, n_eig=lsa_service.eigen_vectors_number, figure_name=lsa_hypothesis.name + "_LSA.h5") #--------------Parameter Search Exploration (PSE)------------------------------- logger.info("\n\nRunning PSE LSA...") pse_results = pse_from_hypothesis( lsa_hypothesis, n_samples, half_range=0.1, global_coupling=[{ "indices": all_regions_indices }], healthy_regions_parameters=[{ "name": "x0", "indices": healthy_indices }], model_configuration=model_configuration, model_configuration_service=model_configuration_service, lsa_service=lsa_service)[0] lsa_hypothesis.plot_lsa_pse( pse_results, model_configuration, weighted_eigenvector_sum=lsa_service.weighted_eigenvector_sum, n_eig=lsa_service.eigen_vectors_number) # , show_flag=True, save_flag=False convert_to_h5_model(pse_results).write_to_h5( FOLDER_RES, lsa_hypothesis.name + "_PSE_LSA_results.h5") # --------------Sensitivity Analysis Parameter Search Exploration (PSE)------------------------------- logger.info("\n\nrunning sensitivity analysis PSE LSA...") sa_results, pse_sa_results = \ sensitivity_analysis_pse_from_hypothesis(lsa_hypothesis, n_samples, method="sobol", half_range=0.1, global_coupling=[{"indices": all_regions_indices, "bounds":[0.0, 2 * model_configuration_service.K_unscaled[ 0]]}], healthy_regions_parameters=[{"name": "x0", "indices": healthy_indices}], model_configuration=model_configuration, model_configuration_service=model_configuration_service, lsa_service=lsa_service) lsa_hypothesis.plot_lsa_pse( pse_sa_results, model_configuration, weighted_eigenvector_sum=lsa_service.weighted_eigenvector_sum, n_eig=lsa_service.eigen_vectors_number, figure_name="SA PSE LSA overview " + lsa_hypothesis.name) # , show_flag=True, save_flag=False convert_to_h5_model(pse_sa_results).write_to_h5( FOLDER_RES, lsa_hypothesis.name + "_SA_PSE_LSA_results.h5") convert_to_h5_model(sa_results).write_to_h5( FOLDER_RES, lsa_hypothesis.name + "_SA_LSA_results.h5") # ------------------------------Simulation-------------------------------------- logger.info("\n\nSimulating...") sim = setup_simulation_from_model_configuration(model_configuration, head.connectivity, dt, sim_length, monitor_period, model_name, scale_time=scale_time, noise_intensity=10**-8) sim.config_simulation() ttavg, tavg_data, status = sim.launch_simulation(n_report_blocks) convert_to_h5_model(sim.simulation_settings).write_to_h5( FOLDER_RES, lsa_hypothesis.name + "_sim_settings.h5") if not status: warnings.warn("\nSimulation failed!") else: tavg_data = tavg_data[:, :, :, 0] vois = VOIS[model_name] model = sim.model logger.info("\n\nSimulated signal return shape: %s", tavg_data.shape) logger.info("Time: %s - %s", scale_time * ttavg[0], scale_time * ttavg[-1]) logger.info("Values: %s - %s", tavg_data.min(), tavg_data.max()) time = scale_time * np.array(ttavg, dtype='float32') sampling_time = np.min(np.diff(time)) vois_ts_dict = prepare_vois_ts_dict(vois, tavg_data) prepare_ts_and_seeg_h5_file(FOLDER_RES, lsa_hypothesis.name + "_ts.h5", model, projections, vois_ts_dict, hpf_flag, hpf_low, hpf_high, fsAVG, sampling_time) vois_ts_dict['time'] = time # Plot results plot_sim_results(model, lsa_hypothesis.propagation_indices, lsa_hypothesis.name, head, vois_ts_dict, sensorsSEEG, hpf_flag) # Save results vois_ts_dict['time_units'] = 'msec' # savemat(os.path.join(FOLDER_RES, hypothesis.name + "_ts.mat"), vois_ts_dict) if test_write_read: hypothesis_template = DiseaseHypothesis( Connectivity("", np.array([]), np.array([]))) logger.info( "Written and read model configuration services are identical?: " + assert_equal_objects( model_configuration_service, read_h5_model( os.path.join( FOLDER_RES, hyp.name + "_model_config_service.h5")).convert_from_h5_model( obj=deepcopy(model_configuration_service)))) logger.info( "Written and read model configuration services are identical?: " + assert_equal_objects( model_configuration, read_h5_model( os.path.join(FOLDER_RES, hyp.name + "_ModelConfig.h5")).convert_from_h5_model( obj=deepcopy(model_configuration)))) logger.info( "Written and read model configuration services are identical?: " + assert_equal_objects( lsa_service, read_h5_model( os.path.join(FOLDER_RES, lsa_hypothesis.name + "_LSAConfig.h5")).convert_from_h5_model( obj=deepcopy(lsa_service)))) logger.info( "Written and read model configuration services are identical?: " + assert_equal_objects( lsa_hypothesis, read_h5_model( os.path.join(FOLDER_RES, lsa_hypothesis.name + "_LSA.h5")).convert_from_h5_model( obj=deepcopy(lsa_hypothesis)))) logger.info( "Written and read model configuration services are identical?: " + assert_equal_objects( lsa_hypothesis, read_h5_model( os.path.join(FOLDER_RES, lsa_hypothesis.name + "_LSA.h5")).convert_from_h5_model( children_dict=hypothesis_template))) logger.info( "Written and read model configuration services are identical?: " + assert_equal_objects( pse_results, read_h5_model( os.path.join( FOLDER_RES, lsa_hypothesis.name + "_PSE_LSA_results.h5")).convert_from_h5_model())) logger.info( "Written and read model configuration services are identical?: " + assert_equal_objects( pse_sa_results, read_h5_model( os.path.join( FOLDER_RES, lsa_hypothesis.name + "_SA_PSE_LSA_results.h5")).convert_from_h5_model()) ) logger.info( "Written and read model configuration services are identical?: " + assert_equal_objects( sa_results, read_h5_model( os.path.join( FOLDER_RES, lsa_hypothesis.name + "_SA_LSA_results.h5")).convert_from_h5_model())) logger.info( "Written and read model configuration services are identical?: " + assert_equal_objects( sim.simulation_settings, read_h5_model( os.path.join( FOLDER_RES, lsa_hypothesis.name + "_sim_settings.h5")).convert_from_h5_model( obj=deepcopy(sim.simulation_settings))))
class CustomReader(ABCReader): LOG = initialize_logger(__name__) def read_connectivity(self, h5_path): """ :param h5_path: Path towards a custom Connectivity H5 file :return: Weights, Tracts, Region centers """ self.LOG.info("Reading a Connectivity from: " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') self.LOG.debug("Structures: " + str(h5_file["/"].keys())) self.LOG.debug("Weights shape:" + str(h5_file['/weights'].shape)) weights = h5_file['/weights'][()] tract_lengths = h5_file['/tract_lengths'][()] #TODO: should change to English centers than French centres! region_centers = h5_file['/centres'][()] region_labels = h5_file['/region_labels'][()] orientations = h5_file['/orientations'][()] hemispheres = h5_file['/hemispheres'][()] h5_file.close() return Connectivity(weights, tract_lengths, region_labels, region_centers, hemispheres, orientations) def read_cortical_surface(self, h5_path): self.LOG.info("Reading Surface from " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') vertices = h5_file['/vertices'][()] triangles = h5_file['/triangles'][()] vertex_normals = h5_file['/vertex_normals'][()] h5_file.close() return Surface(vertices, triangles, vertex_normals) def _read_data_field(self, h5_path): self.LOG.info("Reading 'data' from H5 " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') data = h5_file['/data'][()] h5_file.close() return data def read_region_mapping(self, h5_path): return self._read_data_field(h5_path) def read_volume_mapping(self, h5_path): return self._read_data_field(h5_path) def read_t1(self, h5_path): return self._read_data_field(h5_path) def read_sensors(self, h5_path, s_type): self.LOG.info("Reading Sensors from: " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') labels = h5_file['/labels'][()] locations = h5_file['/locations'][()] h5_file.close() return Sensors(labels, locations, s_type=s_type) def read_projection(self, path, s_type): raise NotImplementedError() def read_head(self, root_folder, name=''): conn = self.read_connectivity( os.path.join(root_folder, "Connectivity.h5")) srf = self.read_cortical_surface( os.path.join(root_folder, "CorticalSurface.h5")) rm = self.read_region_mapping( os.path.join(root_folder, "RegionMapping.h5")) vm = self.read_volume_mapping( os.path.join(root_folder, "VolumeMapping.h5")) t1 = self.read_volume_mapping( os.path.join(root_folder, "StructuralMRI.h5")) s_114 = self.read_sensors( os.path.join(root_folder, "SensorsSEEG_114.h5"), Sensors.TYPE_SEEG) s_125 = self.read_sensors( os.path.join(root_folder, "SensorsSEEG_125.h5"), Sensors.TYPE_SEEG) seeg_sensors_dict = { s_114: calculate_projection(s_114, conn), s_125: calculate_projection(s_125, conn) } eeg_sensors_dict = {} meg_sensors_dict = {} return Head(conn, srf, rm, vm, t1, name, eeg_sensors_dict, meg_sensors_dict, seeg_sensors_dict) def read_epileptogenicity(self, root_folder, name="ep"): """ :param root_folder: Path towards a valid custom Epileptogenicity H5 file name: the name of the hypothesis :return: Timeseries in a numpy array """ path = os.path.join(root_folder, name, name + ".h5") print "Reading Epileptogenicity from:", path h5_file = h5py.File(path, 'r', libver='latest') print "Structures:", h5_file["/"].keys() print "Values expected shape:", h5_file['/values'].shape values = h5_file['/values'][()] print "Actual values shape", values.shape h5_file.close() return values
def main_vep(test_write_read=False, pse_flag=PSE_FLAG, sa_pse_flag=SA_PSE_FLAG, sim_flag=SIM_FLAG): logger = initialize_logger(__name__) # -------------------------------Reading data----------------------------------- data_folder = os.path.join(DATA_CUSTOM, 'Head') reader = Reader() logger.info("Reading from: " + data_folder) head = reader.read_head(data_folder) head.plot() # --------------------------Hypothesis definition----------------------------------- n_samples = 100 # # Manual definition of hypothesis...: # x0_indices = [20] # x0_values = [0.9] # e_indices = [70] # e_values = [0.9] # disease_values = x0_values + e_values # disease_indices = x0_indices + e_indices # ...or reading a custom file: ep_name = "ep_test1" #FOLDER_RES = os.path.join(data_folder, ep_name) from tvb_epilepsy.custom.readers_custom import CustomReader if not isinstance(reader, CustomReader): reader = CustomReader() disease_values = reader.read_epileptogenicity(data_folder, name=ep_name) disease_indices, = np.where(disease_values > np.min([X0_DEF, E_DEF])) disease_values = disease_values[disease_indices] if disease_values.size > 1: inds_split = np.ceil(disease_values.size * 1.0 / 2).astype("int") x0_indices = disease_indices[:inds_split].tolist() e_indices = disease_indices[inds_split:].tolist() x0_values = disease_values[:inds_split].tolist() e_values = disease_values[inds_split:].tolist() else: x0_indices = disease_indices.tolist() x0_values = disease_values.tolist() e_indices = [] e_values = [] disease_indices = list(disease_indices) n_x0 = len(x0_indices) n_e = len(e_indices) n_disease = len(disease_indices) all_regions_indices = np.array(range(head.number_of_regions)) healthy_indices = np.delete(all_regions_indices, disease_indices).tolist() n_healthy = len(healthy_indices) # This is an example of Excitability Hypothesis: hyp_x0 = DiseaseHypothesis( head.connectivity.number_of_regions, excitability_hypothesis={tuple(disease_indices): disease_values}, epileptogenicity_hypothesis={}, connectivity_hypothesis={}) # This is an example of Epileptogenicity Hypothesis: hyp_E = DiseaseHypothesis( head.connectivity.number_of_regions, excitability_hypothesis={}, epileptogenicity_hypothesis={tuple(disease_indices): disease_values}, connectivity_hypothesis={}) if len(e_indices) > 0: # This is an example of x0_values mixed Excitability and Epileptogenicity Hypothesis: hyp_x0_E = DiseaseHypothesis( head.connectivity.number_of_regions, excitability_hypothesis={tuple(x0_indices): x0_values}, epileptogenicity_hypothesis={tuple(e_indices): e_values}, connectivity_hypothesis={}) hypotheses = (hyp_x0, hyp_E, hyp_x0_E) else: hypotheses = (hyp_x0, hyp_E) # --------------------------Simulation preparations----------------------------------- # TODO: maybe use a custom Monitor class fs = 2048.0 # this is the simulation sampling rate that is necessary for the simulation to be stable time_length = 10000.0 # =100 secs, the final output nominal time length of the simulation report_every_n_monitor_steps = 100.0 (dt, fsAVG, sim_length, monitor_period, n_report_blocks) = \ set_time_scales(fs=fs, time_length=time_length, scale_fsavg=None, report_every_n_monitor_steps=report_every_n_monitor_steps) # Choose model # Available models beyond the TVB Epileptor (they all encompass optional variations from the different papers): # EpileptorDP: similar to the TVB Epileptor + optional variations, # EpileptorDP2D: reduced 2D model, following Proix et all 2014 +optional variations, # EpleptorDPrealistic: starting from the TVB Epileptor + optional variations, but: # -x0, Iext1, Iext2, slope and K become noisy state variables, # -Iext2 and slope are coupled to z, g, or z*g in order for spikes to appear before seizure, # -multiplicative correlated noise is also used # Optional variations: zmode = "lin" # by default, or "sig" for the sigmoidal expression for the slow z variable in Proix et al. 2014 pmode = "z" # by default, "g" or "z*g" for the feedback coupling to Iext2 and slope for EpileptorDPrealistic model_name = "EpileptorDPrealistic" if model_name is "EpileptorDP2D": spectral_raster_plot = False trajectories_plot = True else: spectral_raster_plot = "lfp" trajectories_plot = False # We don't want any time delays for the moment # head.connectivity.tract_lengths *= TIME_DELAYS_FLAG # --------------------------Hypothesis and LSA----------------------------------- for hyp in hypotheses: logger.info("\n\nRunning hypothesis: " + hyp.name) # hyp.write_to_h5(FOLDER_RES, hyp.name + ".h5") logger.info("\n\nCreating model configuration...") model_configuration_service = ModelConfigurationService( hyp.number_of_regions) model_configuration_service.write_to_h5( FOLDER_RES, hyp.name + "_model_config_service.h5") if test_write_read: logger.info( "Written and read model configuration services are identical?: " + str( assert_equal_objects( model_configuration_service, read_h5_model( os.path.join(FOLDER_RES, hyp.name + "_model_config_service.h5") ).convert_from_h5_model( obj=deepcopy(model_configuration_service)), logger=logger))) if hyp.type == "Epileptogenicity": model_configuration = model_configuration_service.\ configure_model_from_E_hypothesis(hyp, head.connectivity.normalized_weights) else: model_configuration = model_configuration_service.\ configure_model_from_hypothesis(hyp, head.connectivity.normalized_weights) model_configuration.write_to_h5(FOLDER_RES, hyp.name + "_ModelConfig.h5") if test_write_read: logger.info( "Written and read model configuration are identical?: " + str( assert_equal_objects( model_configuration, read_h5_model( os.path.join( FOLDER_RES, hyp.name + "_ModelConfig.h5")).convert_from_h5_model( obj=deepcopy(model_configuration)), logger=logger))) # Plot nullclines and equilibria of model configuration model_configuration_service.plot_nullclines_eq( model_configuration, head.connectivity.region_labels, special_idx=disease_indices, model="6d", zmode="lin", figure_name=hyp.name + "_Nullclines and equilibria") logger.info("\n\nRunning LSA...") lsa_service = LSAService(eigen_vectors_number=None, weighted_eigenvector_sum=True) lsa_hypothesis = lsa_service.run_lsa(hyp, model_configuration) lsa_hypothesis.write_to_h5(FOLDER_RES, lsa_hypothesis.name + "_LSA.h5") lsa_service.write_to_h5(FOLDER_RES, lsa_hypothesis.name + "_LSAConfig.h5") if test_write_read: hypothesis_template = DiseaseHypothesis(hyp.number_of_regions) logger.info("Written and read LSA services are identical?: " + str( assert_equal_objects( lsa_service, read_h5_model( os.path.join(FOLDER_RES, lsa_hypothesis.name + "_LSAConfig.h5")).convert_from_h5_model( obj=deepcopy(lsa_service)), logger=logger))) logger.info( "Written and read LSA hypotheses are identical (input object check)?: " + str( assert_equal_objects( lsa_hypothesis, read_h5_model( os.path.join(FOLDER_RES, lsa_hypothesis.name + "_LSA.h5")).convert_from_h5_model( obj=deepcopy(lsa_hypothesis), hypothesis=True), logger=logger))) logger.info( "Written and read LSA hypotheses are identical (input template check)?: " + str( assert_equal_objects( lsa_hypothesis, read_h5_model( os.path.join(FOLDER_RES, lsa_hypothesis.name + "_LSA.h5")).convert_from_h5_model( obj=hypothesis_template, hypothesis=True), logger=logger))) lsa_service.plot_lsa(lsa_hypothesis, model_configuration, head.connectivity.region_labels, None) if pse_flag: #--------------Parameter Search Exploration (PSE)------------------------------- logger.info("\n\nRunning PSE LSA...") pse_results = pse_from_lsa_hypothesis( lsa_hypothesis, head.connectivity.normalized_weights, head.connectivity.region_labels, n_samples, half_range=0.1, global_coupling=[{ "indices": all_regions_indices }], healthy_regions_parameters=[{ "name": "x0_values", "indices": healthy_indices }], model_configuration_service=model_configuration_service, lsa_service=lsa_service, logger=logger)[0] lsa_service.plot_lsa(lsa_hypothesis, model_configuration, head.connectivity.region_labels, pse_results) # , show_flag=True, save_flag=False convert_to_h5_model(pse_results).write_to_h5( FOLDER_RES, lsa_hypothesis.name + "_PSE_LSA_results.h5") if test_write_read: logger.info( "Written and read sensitivity analysis parameter search results are identical?: " + str( assert_equal_objects( pse_results, read_h5_model( os.path.join( FOLDER_RES, lsa_hypothesis.name + "_PSE_LSA_results.h5") ).convert_from_h5_model(), logger=logger))) if sa_pse_flag: # --------------Sensitivity Analysis Parameter Search Exploration (PSE)------------------------------- logger.info("\n\nrunning sensitivity analysis PSE LSA...") sa_results, pse_sa_results = \ sensitivity_analysis_pse_from_lsa_hypothesis(lsa_hypothesis, head.connectivity.normalized_weights, head.connectivity.region_labels, n_samples, method="sobol", half_range=0.1, global_coupling=[{"indices": all_regions_indices, "bounds":[0.0, 2 * model_configuration_service.K_unscaled[ 0]]}], healthy_regions_parameters=[{"name": "x0_values", "indices": healthy_indices}], model_configuration_service=model_configuration_service, lsa_service=lsa_service, logger=logger) lsa_service.plot_lsa(lsa_hypothesis, model_configuration, head.connectivity.region_labels, pse_sa_results, title="SA PSE Hypothesis Overview") # , show_flag=True, save_flag=False convert_to_h5_model(pse_sa_results).write_to_h5( FOLDER_RES, lsa_hypothesis.name + "_SA_PSE_LSA_results.h5") convert_to_h5_model(sa_results).write_to_h5( FOLDER_RES, lsa_hypothesis.name + "_SA_LSA_results.h5") if test_write_read: logger.info( "Written and read sensitivity analysis results are identical?: " + str( assert_equal_objects( sa_results, read_h5_model( os.path.join( FOLDER_RES, lsa_hypothesis.name + "_SA_LSA_results.h5") ).convert_from_h5_model(), logger=logger))) logger.info( "Written and read sensitivity analysis parameter search results are identical?: " + str( assert_equal_objects( pse_sa_results, read_h5_model( os.path.join( FOLDER_RES, lsa_hypothesis.name + "_SA_PSE_LSA_results.h5") ).convert_from_h5_model(), logger=logger))) if sim_flag: # ------------------------------Simulation-------------------------------------- logger.info("\n\nConfiguring simulation...") sim = setup_simulation_from_model_configuration( model_configuration, head.connectivity, dt, sim_length, monitor_period, model_name, zmode=np.array(zmode), pmode=np.array(pmode), noise_instance=None, noise_intensity=None, monitor_expressions=None) # Integrator and initial conditions initialization. # By default initial condition is set right on the equilibrium point. sim.config_simulation(initial_conditions=None) convert_to_h5_model(sim.model).write_to_h5( FOLDER_RES, lsa_hypothesis.name + "_sim_model.h5") logger.info("\n\nSimulating...") ttavg, tavg_data, status = sim.launch_simulation(n_report_blocks) convert_to_h5_model(sim.simulation_settings).write_to_h5( FOLDER_RES, lsa_hypothesis.name + "_sim_settings.h5") if test_write_read: logger.info( "Written and read simulation settings are identical?: " + str( assert_equal_objects( sim.simulation_settings, read_h5_model( os.path.join( FOLDER_RES, lsa_hypothesis.name + "_sim_settings.h5")).convert_from_h5_model( obj=deepcopy(sim.simulation_settings)), logger=logger))) if not status: warning("\nSimulation failed!") else: time = np.array(ttavg, dtype='float32') output_sampling_time = np.mean(np.diff(time)) tavg_data = tavg_data[:, :, :, 0] logger.info("\n\nSimulated signal return shape: %s", tavg_data.shape) logger.info("Time: %s - %s", time[0], time[-1]) logger.info("Values: %s - %s", tavg_data.min(), tavg_data.max()) # Variables of interest in a dictionary: vois_ts_dict = prepare_vois_ts_dict(VOIS[model_name], tavg_data) vois_ts_dict['time'] = time vois_ts_dict['time_units'] = 'msec' vois_ts_dict = compute_seeg_and_write_ts_h5_file( FOLDER_RES, lsa_hypothesis.name + "_ts.h5", sim.model, vois_ts_dict, output_sampling_time, time_length, hpf_flag=True, hpf_low=10.0, hpf_high=512.0, sensor_dicts_list=[head.sensorsSEEG]) # Plot results plot_sim_results(sim.model, lsa_hypothesis.propagation_indices, lsa_hypothesis.name, head, vois_ts_dict, head.sensorsSEEG.keys(), hpf_flag=True, trajectories_plot=trajectories_plot, spectral_raster_plot=spectral_raster_plot, log_scale=True)
class EpisenseReader(ABCReader): LOG = initialize_logger(__name__) def read_connectivity(self, h5_path): """ :param h5_path: Path towards an Episense Connectivity H5 file :return: Weights, Tracts, Region centers """ self.LOG.info("Reading a Connectivity from: " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') self.LOG.debug("Structures: " + str(h5_file["/"].keys())) self.LOG.debug("Weights shape:" + str(h5_file['/weights'].shape)) weights = h5_file['/weights'][()] tract_lengths = h5_file['/tract_lengths'][()] region_centers = h5_file['/centres'][()] #should change to centers! region_labels = h5_file['/region_labels'][()] orientations = h5_file['/orientations'][()] hemispheres = h5_file['/hemispheres'][()] h5_file.close() return Connectivity(weights, tract_lengths, region_labels, region_centers, hemispheres, orientations) def read_cortical_surface(self, h5_path): self.LOG.info("Reading Surface from " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') vertices = h5_file['/vertices'][()] triangles = h5_file['/triangles'][()] vertex_normals = h5_file['/vertex_normals'][()] h5_file.close() return Surface(vertices, triangles, vertex_normals) def _read_data_field(self, h5_path): self.LOG.info("Reading 'data' from H5 " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') data = h5_file['/data'][()] h5_file.close() return data def read_region_mapping(self, h5_path): return self._read_data_field(h5_path) def read_volume_mapping(self, h5_path): return self._read_data_field(h5_path) def read_t1(self, h5_path): return self._read_data_field(h5_path) def read_sensors(self, h5_path, s_type): self.LOG.info("Reading Sensors from: " + h5_path) h5_file = h5py.File(h5_path, 'r', libver='latest') labels = h5_file['/labels'][()] locations = h5_file['/locations'][()] h5_file.close() return Sensors(labels, locations, s_type=s_type) def read_projection(self, path, s_type): raise NotImplementedError() def read_head(self, root_folder, name=''): conn = self.read_connectivity( os.path.join(root_folder, "Connectivity.h5")) srf = self.read_cortical_surface( os.path.join(root_folder, "CorticalSurface.h5")) rm = self.read_region_mapping( os.path.join(root_folder, "RegionMapping.h5")) vm = self.read_volume_mapping( os.path.join(root_folder, "VolumeMapping.h5")) t1 = self.read_volume_mapping( os.path.join(root_folder, "StructuralMRI.h5")) s_114 = self.read_sensors( os.path.join(root_folder, "SensorsSEEG_114.h5"), Sensors.TYPE_SEEG) s_125 = self.read_sensors( os.path.join(root_folder, "SensorsSEEG_125.h5"), Sensors.TYPE_SEEG) seeg_sensors_dict = { s_114: calculate_projection(s_114, conn), s_125: calculate_projection(s_125, conn) } eeg_sensors_dict = {} meg_sensors_dict = {} return Head(conn, srf, rm, vm, t1, name, eeg_sensors_dict, meg_sensors_dict, seeg_sensors_dict)