def test_region_boundaries(self): cortex = surfaces.Cortex() white_matter = connectivity.Connectivity() white_matter.configure() rb = region_boundaries.RegionBoundaries(cortex) self.assertEqual(len(rb.region_neighbours.keys()), white_matter.number_of_regions)
def test_assign_complex_attr(self): """ Test scientific methods are executed """ local_coupling_strength = 0.0121 grey_matter = surfaces.LocalConnectivity(cutoff=10.0) default_cortex = surfaces.Cortex(coupling_strength=local_coupling_strength) #self.assertTrue(default_cortex.local_connectivity is None) default_cortex.local_connectivity = grey_matter #default_cortex.region_average = default_cortex.region_mapping default_cortex.compute_local_connectivity() self.assertTrue(default_cortex.local_connectivity is not None)
def test_cortexdata(self): dt = surfaces.Cortex() ## Initialize Local Connectivity, to avoid long computation time. reader = readers.File(folder_path="surfaces/cortex_reg13") dt.local_connectivity = surfaces.LocalConnectivity() dt.local_connectivity.matrix = reader.read_data("nearest_neighbour.mat", "LocalCoupling") dt.configure() summary_info = dt.summary_info self.assertTrue(abs(summary_info['Region area, maximum (mm:math:`^2`)'] - 9119.4540365252615) < 0.00000001) self.assertTrue(abs(summary_info['Region area, mean (mm:math:`^2`)'] - 3366.2542250541251) < 0.00000001) self.assertTrue(abs(summary_info['Region area, minimum (mm:math:`^2`)'] - 366.48271886512993) < 0.00000001) self.assertEqual(dt.get_data_shape('vertices'), (16384, 3)) self.assertEqual(dt.get_data_shape('vertex_normals'), (16384, 3)) self.assertEqual(dt.get_data_shape('triangles'), (32760, 3))
def test_cortexdata(self): dt = surfaces.Cortex(load_default=True) self.assertTrue(isinstance(dt, surfaces.Cortex)) self.assertTrue(dt.region_mapping is not None) ## Initialize Local Connectivity, to avoid long computation time. dt.local_connectivity = surfaces.LocalConnectivity(load_default=True) dt.configure() summary_info = dt.summary_info self.assertTrue( abs(summary_info['Region area, maximum (mm:math:`^2`)'] - 9119.4540365252615) < 0.00000001) self.assertTrue( abs(summary_info['Region area, mean (mm:math:`^2`)'] - 3366.2542250541251) < 0.00000001) self.assertTrue( abs(summary_info['Region area, minimum (mm:math:`^2`)'] - 366.48271886512993) < 0.00000001) self.assertEqual(dt.get_data_shape('vertices'), (16384, 3)) self.assertEqual(dt.get_data_shape('vertex_normals'), (16384, 3)) self.assertEqual(dt.get_data_shape('triangles'), (32760, 3))
""" .. moduleauthor:: Stuart A. Knock <*****@*****.**> """ import numpy import tvb.datatypes.surfaces as surfaces_module from tvb.simulator.region_boundaries import RegionBoundaries from tvb.simulator.region_colours import RegionColours from tvb.simulator.plot.tools import * CORTEX = surfaces_module.Cortex() CORTEX_BOUNDARIES = RegionBoundaries(CORTEX) region_colours = RegionColours(CORTEX_BOUNDARIES.region_neighbours) colouring = region_colours.back_track() #Make the hemispheres symetric #TODO: should prob. et colouring for one hemisphere then just stack two copies... number_of_regions = len(CORTEX_BOUNDARIES.region_neighbours) for k in range(int(number_of_regions / 2)): colouring[k + int(number_of_regions / 2)] = colouring[k] mapping_colours = list("rgbcmyRGBCMY") colour_rgb = { "r": numpy.array([255, 0, 0], dtype=numpy.uint8), "g": numpy.array([0, 255, 0], dtype=numpy.uint8), "b": numpy.array([0, 0, 255], dtype=numpy.uint8),
class ProjectionMatrix(core.Type): """ Provides the mechanisms necessary to access OpenMEEG for the calculation of EEG and MEG projection matrices, ie matrices that map source activity to sensor activity. It is initialised with datatypes of TVB and ultimately returns the projection matrix as a Numpy ndarray. """ brain_skull = surfaces_module.BrainSkull( label="Boundary between skull and skin domains", default=None, required=True, doc="""A ... surface on which ... including ...""") skull_skin = surfaces_module.SkullSkin( label="surface and auxillary for surface sim", default=None, required=True, doc="""A ... surface on which ... including ...""") skin_air = surfaces_module.SkinAir( label="surface and auxillary for surface sim", default=None, required=True, doc="""A ... surface on which ... including ...""") conductances = basic.Dict( label="Domain conductances", default={ 'air': 0.0, 'skin': 1.0, 'skull': 0.01, 'brain': 1.0 }, required=True, doc="""A dictionary representing the conductances of ...""") sources = surfaces_module.Cortex( label="surface and auxillary for surface sim", default=None, required=True, doc="""A cortical surface on which ... including ...""") sensors = sensors_module.Sensors( label="surface and auxillary for surface sim", default=None, required=False, doc="""A cortical surface on which ... including ... If left as None then EEG is assumed and skin_air is expected to already has sensors associated""") def __init__(self, **kwargs): """ Initialse traited attributes and attributes that will hold OpenMEEG objects. """ super(ProjectionMatrix, self).__init__(**kwargs) LOG.debug(str(kwargs)) #OpenMEEG attributes self.om_head = None self.om_sources = None self.om_sensors = None self.om_head2sensor = None self.om_inverse_head = None self.om_source_matrix = None self.om_source2sensor = None #For MEG, not used for EEG def configure(self): """ Converts TVB objects into a for accessible to OpenMEEG, then uses the OpenMEEG library to calculate the intermediate matrices needed in obtaining the final projection matrix. """ super(ProjectionMatrix, self).configure() if self.sensors is None: self.sensors = self.skin_air.sensors if isinstance(self.sensors, sensors_module.SensorsEEG): self.skin_air.sensors = self.sensors self.skin_air.sensor_locations = self.sensors.sensors_to_surface( self.skin_air) # Create OpenMEEG objects from TVB objects. self.om_head = self.create_om_head() self.om_sources = self.create_om_sources() self.om_sensors = self.create_om_sensors() # Calculate based on type of sources if isinstance(self.sources, surfaces_module.Cortex): self.om_source_matrix = self.surface_source() #NOTE: ~1 hr elif isinstance(self.sources, connectivity_module.Connectivity): self.om_source_matrix = self.dipole_source() # Calculate based on type of sensors if isinstance(self.sensors, sensors_module.SensorsEEG): self.om_head2sensor = self.head2eeg() elif isinstance(self.sensors, sensors_module.SensorsMEG): self.om_head2sensor = self.head2meg() if isinstance(self.sources, surfaces_module.Cortex): self.om_source2sensor = self.surf2meg() elif isinstance(self.sources, connectivity_module.Connectivity): self.om_source2sensor = self.dip2meg() #NOTE: ~1 hr self.om_inverse_head = self.inverse_head(inv_head_mat_file="hminv_uid") def __call__(self): """ Having configured the ProjectionMatrix instance, that is having run the configure() method or otherwise provided the intermedite OpenMEEG (om_*) attributes, the oblect can be called as a function -- returning a projection matrix as a Numpy array. """ #Check source type and sensor type, then call appripriate methods to #generate intermediate data, cascading all the way back to geometry #calculation if it wasn't already done. #Then return a projection matrix... # NOTE: returned projection_matrix is a numpy.ndarray if isinstance(self.sensors, sensors_module.SensorsEEG): projection_matrix = self.eeg_gain() elif isinstance(self.sensors, sensors_module.SensorsMEG): projection_matrix = self.meg_gain() return projection_matrix ##------------------------------------------------------------------------## ##--------------- Methods for creating openmeeg objects ------------------## ##------------------------------------------------------------------------## def create_om_head(self): #TODO: Prob. need to make file names specifiable """ Generates 5 files:: skull_skin.tri skin_air.tri brain_skull.tri head_model.geom head_model.cond Containing the specification of a head in a form that can be read by OpenMEEG, then creates and returns an OpenMEEG Geometry object containing this information. """ surface_files = [] surface_files.append(self._tvb_surface_to_tri("skull_skin.tri")) surface_files.append(self._tvb_surface_to_tri("brain_skull.tri")) surface_files.append(self._tvb_surface_to_tri("skin_air.tri")) geometry_file = self._write_head_geometry(surface_files, "head_model.geom") conductances_file = self._write_conductances("head_model.cond") LOG.info("Creating OpenMEEG Geometry object for the head...") om_head = om.Geometry() om_head.read(geometry_file, conductances_file) #om_head.selfCheck() #Didn't catch bad order... LOG.info("OpenMEEG Geometry object for the head successfully created.") return om_head def create_om_sources( self): #TODO: Prob. should make file names specifiable """ Take a TVB Connectivity or Cortex object and return an OpenMEEG object that specifies sources, a Matrix object for region level sources or a Mesh object for a cortical surface source. """ if isinstance(self.sources, connectivity_module.Connectivity): sources_file = self._tvb_connectivity_to_txt("sources.txt") om_sources = om.Matrix() elif isinstance(self.sources, surfaces_module.Cortex): sources_file = self._tvb_surface_to_tri("sources.tri") om_sources = om.Mesh() else: LOG.error("sources must be either a Connectivity or Cortex.") om_sources.load(sources_file) return om_sources def create_om_sensors(self, file_name=None): """ Take a TVB Sensors object and return an OpenMEEG Sensors object. """ if isinstance(self.sensors, sensors_module.SensorsEEG): file_name = file_name or "eeg_sensors.txt" sensors_file = self._tvb_eeg_sensors_to_txt(file_name) elif isinstance(self.sensors, sensors_module.SensorsMEG): file_name = file_name or "meg_sensors.squid" sensors_file = self._tvb_meg_sensors_to_squid(file_name) else: LOG.error("sensors should be either SensorsEEG or SensorsMEG") LOG.info("Wrote sensors to temporary file: %s" % str(file_name)) om_sensors = om.Sensors() om_sensors.load(sensors_file) return om_sensors ##------------------------------------------------------------------------## ##--------- Methods for calling openmeeg methods, with logging. ----------## ##------------------------------------------------------------------------## def surf2meg(self): """ Create a matrix that can be used to map an OpenMEEG surface source to an OpenMEEG MEG Sensors object. NOTE: This source to sensor mapping is not required for EEG. """ LOG.info("Computing DipSource2MEGMat...") surf2meg_mat = om.SurfSource2MEGMat(self.om_sources, self.om_sensors) LOG.info("surf2meg: %d x %d" % (surf2meg_mat.nlin(), surf2meg_mat.ncol())) return surf2meg_mat def dip2meg(self): """ Create an OpenMEEG Matrix that can be used to map OpenMEEG dipole sources to an OpenMEEG MEG Sensors object. NOTE: This source to sensor mapping is not required for EEG. """ LOG.info("Computing DipSource2MEGMat...") dip2meg_mat = om.DipSource2MEGMat(self.om_sources, self.om_sensors) LOG.info("dip2meg: %d x %d" % (dip2meg_mat.nlin(), dip2meg_mat.ncol())) return dip2meg_mat def head2eeg(self): """ Call OpenMEEG's Head2EEGMat method to calculate the head to EEG sensor matrix. """ LOG.info("Computing Head2EEGMat...") h2s_mat = om.Head2EEGMat(self.om_head, self.om_sensors) LOG.info("head2eeg: %d x %d" % (h2s_mat.nlin(), h2s_mat.ncol())) return h2s_mat def head2meg(self): """ Call OpenMEEG's Head2MEGMat method to calculate the head to MEG sensor matrix. """ LOG.info("Computing Head2MEGMat...") h2s_mat = om.Head2MEGMat(self.om_head, self.om_sensors) LOG.info("head2meg: %d x %d" % (h2s_mat.nlin(), h2s_mat.ncol())) return h2s_mat def surface_source(self, gauss_order=3, surf_source_file=None): """ Call OpenMEEG's SurfSourceMat method to calculate a surface source matrix. Optionaly saving the matrix for later use. """ LOG.info("Computing SurfSourceMat...") ssm = om.SurfSourceMat(self.om_head, self.om_sources, gauss_order) LOG.info("surface_source_mat: %d x %d" % (ssm.nlin(), ssm.ncol())) if surf_source_file is not None: LOG.info("Saving surface_source matrix as %s..." % surf_source_file) ssm.save( os.path.join(OM_STORAGE_DIR, surf_source_file + OM_SAVE_SUFFIX)) #~3GB return ssm def dipole_source(self, gauss_order=3, use_adaptive_integration=True, dip_source_file=None): """ Call OpenMEEG's DipSourceMat method to calculate a dipole source matrix. Optionaly saving the matrix for later use. """ LOG.info("Computing DipSourceMat...") dsm = om.DipSourceMat(self.om_head, self.om_sources, gauss_order, use_adaptive_integration) LOG.info("dipole_source_mat: %d x %d" % (dsm.nlin(), dsm.ncol())) if dip_source_file is not None: LOG.info("Saving dipole_source matrix as %s..." % dip_source_file) dsm.save( os.path.join(OM_STORAGE_DIR, dip_source_file + OM_SAVE_SUFFIX)) return dsm def inverse_head(self, gauss_order=3, inv_head_mat_file=None): """ Call OpenMEEG's HeadMat method to calculate a head matrix. The inverse method of the head matrix is subsequently called to invert the matrix. Optionaly saving the inverted matrix for later use. Runtime ~8 hours, mostly in martix inverse as I just use a stock ATLAS install which doesn't appear to be multithreaded (custom building ATLAS should sort this)... Under Windows it should use MKL, not sure for Mac For reg13+potato surfaces, saved file size: hminv ~ 5GB, ssm ~ 3GB. """ LOG.info("Computing HeadMat...") head_matrix = om.HeadMat(self.om_head, gauss_order) LOG.info("head_matrix: %d x %d" % (head_matrix.nlin(), head_matrix.ncol())) LOG.info("Inverting HeadMat...") hminv = head_matrix.inverse() LOG.info("inverse head_matrix: %d x %d" % (hminv.nlin(), hminv.ncol())) if inv_head_mat_file is not None: LOG.info("Saving inverse_head matrix as %s..." % inv_head_mat_file) hminv.save( os.path.join(OM_STORAGE_DIR, inv_head_mat_file + OM_SAVE_SUFFIX)) #~5GB return hminv def eeg_gain(self, eeg_file=None): """ Call OpenMEEG's GainEEG method to calculate the final projection matrix. Optionaly saving the matrix for later use. The OpenMEEG matrix is converted to a Numpy array before return. """ LOG.info("Computing GainEEG...") eeg_gain = om.GainEEG(self.om_inverse_head, self.om_source_matrix, self.om_head2sensor) LOG.info("eeg_gain: %d x %d" % (eeg_gain.nlin(), eeg_gain.ncol())) if eeg_file is not None: LOG.info("Saving eeg_gain as %s..." % eeg_file) eeg_gain.save( os.path.join(OM_STORAGE_DIR, eeg_file + OM_SAVE_SUFFIX)) return om.asarray(eeg_gain) def meg_gain(self, meg_file=None): """ Call OpenMEEG's GainMEG method to calculate the final projection matrix. Optionaly saving the matrix for later use. The OpenMEEG matrix is converted to a Numpy array before return. """ LOG.info("Computing GainMEG...") meg_gain = om.GainMEG(self.om_inverse_head, self.om_source_matrix, self.om_head2sensor, self.om_source2sensor) LOG.info("meg_gain: %d x %d" % (meg_gain.nlin(), meg_gain.ncol())) if meg_file is not None: LOG.info("Saving meg_gain as %s..." % meg_file) meg_gain.save( os.path.join(OM_STORAGE_DIR, meg_file + OM_SAVE_SUFFIX)) return om.asarray(meg_gain) ##------------------------------------------------------------------------## ##------- Methods for writting temporary files loaded by openmeeg --------## ##------------------------------------------------------------------------## def _tvb_meg_sensors_to_squid(self, sensors_file_name): """ Write a tvb meg_sensor datatype to a .squid file, so that OpenMEEG can read it and compute the projection matrix for MEG... """ sensors_file_path = os.path.join(OM_STORAGE_DIR, sensors_file_name) meg_sensors = numpy.hstack( (self.sensors.locations, self.sensors.orientations)) numpy.savetxt(sensors_file_path, meg_sensors) return sensors_file_path def _tvb_connectivity_to_txt(self, dipoles_file_name): """ Write position and orientation information from a TVB connectivity object to a text file that can be read as source dipoles by OpenMEEG. NOTE: Region level simulations lack sufficient detail of source orientation, etc, to provide anything but superficial relevance. It's probably better to do a mapping of region level simulations to a surface and then perform the EEG projection from the mapped data... """ NotImplementedError def _tvb_surface_to_tri(self, surface_file_name): """ Write a tvb surface datatype to .tri format, so that OpenMEEG can read it and compute projection matrices for EEG/MEG/... """ surface_file_path = os.path.join(OM_STORAGE_DIR, surface_file_name) #TODO: check file doesn't already exist LOG.info("Writing TVB surface to .tri file: %s" % surface_file_path) file_handle = file(surface_file_path, "a") file_handle.write("- %d \n" % self.sources.number_of_vertices) verts_norms = numpy.hstack( (self.sources.vertices, self.sources.vertex_normals)) numpy.savetxt(file_handle, verts_norms) tri_str = "- " + (3 * (str(self.sources.number_of_triangles) + " ")) + "\n" file_handle.write(tri_str) numpy.savetxt(file_handle, self.sources.triangles, fmt="%d") file_handle.close() LOG.info("%s written successfully." % surface_file_name) return surface_file_path def _tvb_eeg_sensors_to_txt(self, sensors_file_name): """ Write a tvb eeg_sensor datatype (after mapping to the head surface to be used) to a .txt file, so that OpenMEEG can read it and compute leadfield/projection/forward_solution matrices for EEG... """ sensors_file_path = os.path.join(OM_STORAGE_DIR, sensors_file_name) LOG.info("Writing TVB sensors to .txt file: %s" % sensors_file_path) numpy.savetxt(sensors_file_path, self.skin_air.sensor_locations) LOG.info("%s written successfully." % sensors_file_name) return sensors_file_path #TODO: enable specifying ?or determining? domain surface relationships... def _write_head_geometry(self, boundary_file_names, geom_file_name): """ Write a geometry file that is read in by OpenMEEG, this file specifies the files containng the boundary surfaces and there relationship to the domains that comprise the head. NOTE: Currently the list of files is expected to be in a specific order, namely:: skull_skin brain_skull skin_air which is reflected in the static setting of domains. Should be generalised. """ geom_file_path = os.path.join(OM_STORAGE_DIR, geom_file_name) #TODO: Check that the file doesn't already exist. LOG.info("Writing head geometry file: %s" % geom_file_path) file_handle = file(geom_file_path, "a") file_handle.write("# Domain Description 1.0\n\n") file_handle.write("Interfaces %d Mesh\n\n" % len(boundary_file_names)) for file_name in boundary_file_names: file_handle.write("%s\n" % file_name) file_handle.write("\nDomains %d\n\n" % (len(boundary_file_names) + 1)) file_handle.write("Domain Scalp %s %s\n" % (1, -3)) file_handle.write("Domain Brain %s %s\n" % ("-2", "shared")) file_handle.write("Domain Air %s\n" % 3) file_handle.write("Domain Skull %s %s\n" % (2, -1)) file_handle.close() LOG.info("%s written successfully." % geom_file_path) return geom_file_path def _write_conductances(self, cond_file_name): """ Write a conductance file that is read in by OpenMEEG, this file specifies the conductance of each of the domains making up the head. NOTE: Vaules are restricted to have 2 decimal places, ie #.##, setting values of the form 0.00# will result in 0.01 or 0.00, for numbers greater or less than ~0.00499999999999999967, respecitvely... """ cond_file_path = os.path.join(OM_STORAGE_DIR, cond_file_name) #TODO: Check that the file doesn't already exist. LOG.info("Writing head conductance file: %s" % cond_file_path) file_handle = file(cond_file_path, "a") file_handle.write("# Properties Description 1.0 (Conductivities)\n\n") file_handle.write("Air %4.2f\n" % self.conductances["air"]) file_handle.write("Scalp %4.2f\n" % self.conductances["skin"]) file_handle.write("Brain %4.2f\n" % self.conductances["brain"]) file_handle.write("Skull %4.2f\n" % self.conductances["skull"]) file_handle.close() LOG.info("%s written successfully." % cond_file_path) return cond_file_path #TODO: Either make these utility functions or have them load directly into # the appropriate attribute... ##------------------------------------------------------------------------## ##---- Methods for loading precomputed matrices into openmeeg objects ----## ##------------------------------------------------------------------------## def _load_om_inverse_head_mat(self, file_name): """ Load a previously stored inverse head matrix into an OpenMEEG SymMatrix object. """ inverse_head_martix = om.SymMatrix() inverse_head_martix.load(file_name) return inverse_head_martix def _load_om_source_mat(self, file_name): """ Load a previously stored source matrix into an OpenMEEG Matrix object. """ source_matrix = om.Matrix() source_matrix.load(file_name) return source_matrix
class Simulator(core.Type): """ The Simulator class coordinates classes from all other modules in the simulator package in order to perform simulations. In general, it is necessary to initialiaze a simulator with the desired components and then call the simulator in a loop to obtain simulation data: >>> sim = Simulator(...) >>> for output in sim(simulation_length=1000): ... Please refer to the user guide and the demos for more detail. .. #Currently there seems to be a clash betwen traits and autodoc, autodoc .. #can't find the methods of the class, the class specific names below get .. #us around this... .. automethod:: Simulator.__init__ .. automethod:: Simulator.configure .. automethod:: Simulator.__call__ .. automethod:: Simulator.configure_history .. automethod:: Simulator.configure_integrator_noise .. automethod:: Simulator.memory_requirement .. automethod:: Simulator.runtime .. automethod:: Simulator.storage_requirement """ connectivity = connectivity_dtype.Connectivity( label="Long-range connectivity", default=None, order=1, required=True, filters_ui=[ UIFilter(linked_elem_name="projection_matrix_data", linked_elem_field=FilterChain.datatype + "._sources", linked_elem_parent_name="monitors", linked_elem_parent_option="EEG"), UIFilter(linked_elem_name="region_mapping_data", linked_elem_field=FilterChain.datatype + "._connectivity", linked_elem_parent_name="surface", linked_elem_parent_option=None) ], doc="""A tvb.datatypes.Connectivity object which contains the structural long-range connectivity data (i.e., white-matter tracts). In combination with the ``Long-range coupling function`` it defines the inter-regional connections. These couplings undergo a time delay via signal propagation with a propagation speed of ``Conduction Speed``""") conduction_speed = basic.Float( label="Conduction Speed", default=3.0, order=2, required=False, range=basic.Range(lo=0.01, hi=100.0, step=1.0), doc="""Conduction speed for ``Long-range connectivity`` (mm/ms)""") coupling = coupling_module.Coupling( label="Long-range coupling function", default=coupling_module.Linear(), required=True, order=2, doc="""The coupling function is applied to the activity propagated between regions by the ``Long-range connectivity`` before it enters the local dynamic equations of the Model. Its primary purpose is to 'rescale' the incoming activity to a level appropriate to Model.""") surface = surfaces_dtype.Cortex( label="Cortical surface", default=None, order=3, required=False, filters_ui=[ UIFilter(linked_elem_name="projection_matrix_data", linked_elem_field=FilterChain.datatype + "._sources", linked_elem_parent_name="monitors", linked_elem_parent_option="EEG"), UIFilter(linked_elem_name="local_connectivity", linked_elem_field=FilterChain.datatype + "._surface", linked_elem_parent_name="surface", linked_elem_parent_option=None) ], doc="""By default, a tvb.datatypes.Cortex object which represents the cortical surface defined by points in the 3D physical space and their neighborhood relationship. In the current TVB version, when setting up a surface-based simulation, the option to configure the spatial spread of the ``Local Connectivity`` is available.""") stimulus = patterns_dtype.SpatioTemporalPattern( label="Spatiotemporal stimulus", default=None, order=4, required=False, doc= """A ``Spatiotemporal stimulus`` can be defined at the region or surface level. It's composed of spatial and temporal components. For region defined stimuli the spatial component is just the strength with which the temporal component is applied to each region. For surface defined stimuli, a (spatial) function, with finite-support, is used to define the strength of the stimuli on the surface centred around one or more focal points. In the current version of TVB, stimuli are applied to the first state variable of the ``Local dynamic model``.""") model = models_module.Model( label="Local dynamic model", default=models_module.Generic2dOscillator, required=True, order=5, doc="""A tvb.simulator.Model object which describe the local dynamic equations, their parameters, and, to some extent, where connectivity (local and long-range) enters and which state-variables the Monitors monitor. By default the 'Generic2dOscillator' model is used. Read the Scientific documentation to learn more about this model.""") integrator = integrators_module.Integrator( label="Integration scheme", default=integrators_module.HeunDeterministic, required=True, order=6, doc="""A tvb.simulator.Integrator object which is an integration scheme with supporting attributes such as integration step size and noise specification for stochastic methods. It is used to compute the time courses of the model state variables.""") initial_conditions = arrays_dtype.FloatArray( label="Initial Conditions", default=None, order=-1, #Hidden until UI support exists. required=False, doc="""Initial conditions from which the simulation will begin. By default, random initial conditions are provided. Needs to be the same shape as simulator 'history', ie, initial history function which defines the minimal initial state of the network with time delays before time t=0. If the number of time points in the provided array is insufficient the array will be padded with random values based on the 'state_variables_range' attribute.""") monitors = monitors_module.Monitor( label="Monitor(s)", default=monitors_module.TemporalAverage, required=True, order=8, select_multiple=True, doc="""A tvb.simulator.Monitor or a list of tvb.simulator.Monitor objects that 'know' how to record relevant data from the simulation. Two main types exist: 1) simple, spatial and temporal, reductions (subsets or averages); 2) physiological measurements, such as EEG, MEG and fMRI. By default the Model's specified variables_of_interest are returned, temporally downsampled from the raw integration rate to a sample rate of 1024Hz.""") simulation_length = basic.Float( label="Simulation Length (ms)", default=1000.0, #ie 1 second required=True, order=9, doc="""The length of a simulation in milliseconds (ms).""") def __init__(self, **kwargs): """ Use the base class' mechanisms to initialise the traited attributes declared above, overriding defaults with any provided keywords. Then declare any non-traited attributes. """ super(Simulator, self).__init__(**kwargs) LOG.debug(str(kwargs)) self.calls = 0 self.current_step = 0 self.number_of_nodes = None self.horizon = None self.good_history_shape = None self.history = None self._memory_requirement_guess = None self._memory_requirement_census = None self._storage_requirement = None self._runtime = None def __str__(self): return "Simulator(**kwargs)" def configure(self): """ THe first step of configuration is to run the configure methods of all the Simulator's components, ie its traited attributes. Configuration of a Simulator primarily consists of calculating the attributes, etc, which depend on the combinations of the Simulator's traited attributes (keyword args). Converts delays from physical time units into integration steps and updates attributes that depend on combinations of the 6 inputs. """ ##--- Perform independent configure of traited attribute components --## #TODO: Temporary hack, until actual speed attribute is properly accessible from UI. if self.conduction_speed not in (0.0, 3.0, None): LOG.warning( "Setting connectivity.speed with conduction_speed provided to simulator." ) self.connectivity.speed = numpy.array([self.conduction_speed]) self.connectivity.configure() if self.surface: self.surface.configure() if self.stimulus: self.stimulus.configure() self.model.configure() self.integrator.configure() # monitors needs to be a list or tuple, even if there is only one... if not isinstance(self.monitors, (list, tuple)): self.monitors = [self.monitors] # Configure monitors for monitor in self.monitors: monitor.configure() ##------------- Now the the interdependant configuration -------------## #"Nodes" refers to either regions or vertices + non-cortical regions. if self.surface is None: self.number_of_nodes = self.connectivity.number_of_regions else: #try: self.number_of_nodes = self.surface.region_mapping.shape[0] #except AttributeError: # msg = "%s: Surface needs region mapping defined... " # LOG.error(msg % (repr(self))) #Make sure spatialised model parameters have the right shape (number_of_nodes, 1) excluded_checks = ("state_variable_range", "variables_of_interest", "noise", "psi_table", "nerf_table") params = self.model.trait.keys() for param in excluded_checks: if param in params: params.remove(param) for param in params: #If it's a surface sim and model parameters were provided at the region level if self.surface is not None: #TODO: Once traits are working properly again, the evals and execs here shouldn't be necessary... if eval("self.model." + param + ".size") == self.connectivity.number_of_regions: exec("self.model." + param + " = self.model." + param + "[self.surface.region_mapping].reshape((-1, 1))") if eval("self.model." + param + ".size") == self.number_of_nodes: exec("self.model." + param + " = self.model." + param + ".reshape((-1, 1))") # Estimate of memory usage self._guesstimate_memory_requirement() #Configure spatial component of any stimuli self.configure_stimuli() #Set delays, provided in physical units, in integration steps. self.connectivity.set_idelays(self.integrator.dt) self.horizon = numpy.max(self.connectivity.idelays) + 1 LOG.info("horizon is %d steps" % self.horizon) # workspace -- minimal state of network with delays self.good_history_shape = (self.horizon, self.model.nvar, self.number_of_nodes, self.model.number_of_modes) msg = "%s: History shape will be: %s" LOG.debug(msg % (repr(self), str(self.good_history_shape))) #Reshape integrator.noise.nsig, if neccessary. if isinstance(self.integrator, integrators_module.IntegratorStochastic): self.configure_integrator_noise() self.configure_history(self.initial_conditions) #Configure Monitors to work with selected Model, etc... self.configure_monitors() #Estimate of memory usage. self._census_memory_requirement() def __call__(self, simulation_length=None, random_state=None): """ When a Simulator is called it returns an iterator. kwargs: ``simulation_length``: total time of simulation ``random_state``: a state for the NumPy random number generator, saved from a previous call to permit consistent continuation of a simulation. """ #The number of times this Simulator has been called. self.calls += 1 #Update the simulator objects simulation_length attribute, if simulation_length is None: simulation_length = self.simulation_length else: self.simulation_length = simulation_length #Estimate run time and storage requirements, with logging. self._guesstimate_runtime() self._calculate_storage_requirement() if random_state is not None: if isinstance(self.integrator, integrators_module.IntegratorStochastic): self.integrator.noise.random_stream.set_state(random_state) #msg = "%s: random_state supplied. Seed is: %s" #LOG.info(msg % str(self, self.integrator.noise.random_stream.get_state()[1][0])) else: msg = "%s: random_state supplied for non-stochastic integration" LOG.warn(msg % str(self)) #Determine the number of integration steps required to produce #data of simulation_length int_steps = int(simulation_length / self.integrator.dt) LOG.info("%s: gonna do %d integration steps" % (str(self), int_steps)) # locals for cleaner code. horizon = self.horizon history = self.history dfun = self.model.dfun coupling = self.coupling scheme = self.integrator.scheme npsum = numpy.sum npdot = numpy.dot ncvar = len(self.model.cvar) number_of_regions = self.connectivity.number_of_regions nsn = (number_of_regions, 1, number_of_regions) #import pdb; pdb.set_trace() #Create cvar index array of shape ... cvar = numpy.tile(numpy.ones(nsn, dtype=numpy.int32), (1, ncvar, 1)) for k in range(0, ncvar): cvar[:, k, :] = self.model.cvar[k] * cvar[:, k, :] LOG.debug("%s: cvar shape is: %s" % (str(self), str(cvar.shape))) LOG.debug("%s: cvars are : %s" % (str(self), str(numpy.unique(cvar)))) #reshaped connectivity.idelays for ... idelays = self.connectivity.idelays.reshape(nsn) idelays = numpy.tile(idelays, (1, ncvar, 1)) #print idelays LOG.debug("%s: idelays shape is: %s" % (str(self), str(idelays.shape))) #reshaped connectivity.weights for ... weights = self.connectivity.weights.reshape(nsn + (1, )) weights = numpy.tile(weights, (1, ncvar, 1, self.model.number_of_modes)) LOG.debug("%s: weights shape is: %s" % (str(self), str(weights.shape))) #Create node index array of shape ... node_ids = numpy.tile( numpy.arange(number_of_regions)[:, numpy.newaxis], (1, number_of_regions)).reshape(nsn) node_ids = numpy.tile(node_ids, (1, ncvar, 1)) LOG.debug("%s: node_ids shape is: %s" % (str(self), str(node_ids.shape))) #import pdb; pdb.set_trace() if self.surface is None: local_coupling = 0.0 else: region_average = self.surface.region_average region_history = npdot(region_average, history) region_history = region_history.transpose((1, 2, 0, 3)) if self.surface.coupling_strength.size == 1: local_coupling = (self.surface.coupling_strength[0] * self.surface.local_connectivity.matrix) elif self.surface.coupling_strength.size == self.surface.number_of_vertices: ind = numpy.arange(self.number_of_nodes, dtype=int) vec_cs = numpy.zeros((self.number_of_nodes, )) vec_cs[:self.surface. number_of_vertices] = self.surface.coupling_strength sp_cs = sparse.csc_matrix( (vec_cs, (ind, ind)), shape=(self.number_of_nodes, self.number_of_nodes)) local_coupling = sp_cs * self.surface.local_connectivity.matrix #local_coupling = local_coupling.tocsr() if self.stimulus is None: stimulus = 0.0 else: #TODO: Consider changing to absolute time... time = numpy.arange(0, simulation_length, self.integrator.dt) time = time[numpy.newaxis, :] self.stimulus.configure_time(time) stimulus = numpy.zeros((self.model.nvar, self.number_of_nodes, 1)) LOG.debug("%s: stimulus shape is: %s" % (str(self), str(stimulus.shape))) # initial state, history[timepoint[0], state_variables, nodes, modes] state = history[self.current_step % horizon, :] LOG.debug("%s: state shape is: %s" % (str(self), str(state.shape))) #print state[0, ] # record initial state #output = [monitor.record(horizon - self.current_step - 1 , state) for monitor in self.monitors] #if any(outputi is not None for outputi in output): # yield output for step in range(self.current_step + 1, self.current_step + int_steps + 1): if self.surface is None: delayed_state = history[(step - 1 - idelays) % horizon, cvar, node_ids, :] #coupling._set_pattern(npsum(delayed_state * weights, axis=0)) #node_coupling = coupling.pattern node_coupling = coupling(weights, state[self.model.cvar], delayed_state) else: delayed_state = region_history[(step - 1 - idelays) % horizon, cvar, node_ids, :] #coupling._set_pattern(npsum(delayed_state * weights, axis=0)) #region_coupling = coupling.pattern region_coupling = coupling( weights, region_history[(step - 1) % horizon, self.model.cvar], delayed_state) node_coupling = npdot(self.surface.vertex_mapping, region_coupling) node_coupling = node_coupling.transpose((1, 0, 2)) #import pdb; pdb.set_trace() if self.stimulus is not None: stimulus[self.model.cvar, :, :] = numpy.reshape( self.stimulus(step - (self.current_step + 1)), (1, -1, 1)) #import pdb; pdb.set_trace() #import pdb; pdb.set_trace() state = scheme(state, dfun, node_coupling, local_coupling, stimulus) history[step % horizon, :] = state if self.surface is not None: region_history[step % horizon, :] = npdot( region_average, state).transpose((1, 0, 2)) # monitor.things e.g. raw, average, eeg, meg, fmri... output = [monitor.record(step, state) for monitor in self.monitors] if any(outputi is not None for outputi in output): yield output #TODO: Need to be able to pause and resume a running simulation. #import pdb; pdb.set_trace() #Update to support continuation self.current_step = self.current_step + int_steps - 1 #TODO: Don't think this -1 should be here, check... self.history = history # def configure_history(self, initial_conditions=None): """ Set initial conditions for the simulation using either the provided initial_conditions or, if none are provided, the model's initial() method. This method is called durin the Simulator's __init__(). Any initial_conditions that are provided as an argument are expected to have dimensions 1, 2, and 3 with shapse corresponding to the number of state_variables, nodes and modes, respectively. If the provided inital_conditions are shorter in time (dim=0) than the required history the model's initial() method is called to make up the difference. """ history = self.history if initial_conditions is None: msg = "%s: Setting default history using model's initial() method." LOG.info(msg % str(self)) history = self.model.initial(self.integrator.dt, self.good_history_shape) else: # history should be [timepoints, state_variables, nodes, modes] LOG.info("%s: Received initial conditions as arg." % str(self)) ic_shape = initial_conditions.shape if ic_shape[1:] != self.good_history_shape[1:]: msg = "%s: bad initial_conditions[1:] shape %s, should be %s" LOG.error(msg % (str(self), str( ic_shape[1:]), str(self.good_history_shape[1:]))) else: if ic_shape[0] >= self.horizon: msg = "%s: Using last %s time-steps for history." LOG.info(msg % (str(self), self.horizon)) history = initial_conditions[ -self.horizon:, :, :, :].copy() else: msg = "%s: initial_conditions shorter than required." LOG.info(msg % str(self)) msg = "%s: Using model's initial() method for difference." LOG.info(msg % str(self)) history = self.model.initial(self.integrator.dt, self.good_history_shape) csmh = self.current_step % self.horizon history = numpy.roll(history, -csmh, axis=0) history[:ic_shape[0], :, :, :] = initial_conditions history = numpy.roll(history, csmh, axis=0) self.current_step += ic_shape[0] - 1 msg = "%s: history shape is: %s" LOG.debug(msg % (str(self), str(history.shape))) self.history = history def configure_integrator_noise(self): """ This enables having noise to be state variable specific and/or to enter only via specific brain structures, for example it we only want to consider noise as an external input entering the brain via appropriate thalamic nuclei. Support 3 possible shapes: 1) number_of_nodes; 2) number_of_state_variables; and 3) (number_of_state_variables, number_of_nodes). """ noise = self.integrator.noise if self.integrator.noise.ntau > 0.0: self.integrator.noise.configure_coloured( self.integrator.dt, self.good_history_shape[1:]) else: self.integrator.noise.configure_white(self.integrator.dt, self.good_history_shape[1:]) if self.surface is not None: if self.integrator.noise.nsig.size == self.connectivity.number_of_regions: self.integrator.noise.nsig = self.integrator.noise.nsig[ self.surface.region_mapping] elif self.integrator.noise.nsig.size == self.model.nvar * self.connectivity.number_of_regions: self.integrator.noise.nsig = self.integrator.noise.nsig[:, self. surface . region_mapping] good_nsig_shape = (self.model.nvar, self.number_of_nodes, self.model.number_of_modes) nsig = self.integrator.noise.nsig LOG.debug("Simulator.integrator.noise.nsig shape: %s" % str(nsig.shape)) if nsig.shape in (good_nsig_shape, (1, )): return elif nsig.shape == (self.model.nvar, ): nsig = nsig.reshape((self.model.nvar, 1, 1)) elif nsig.shape == (self.number_of_nodes, ): nsig = nsig.reshape((1, self.number_of_nodes, 1)) elif nsig.shape == (self.model.nvar, self.number_of_nodes): nsig = nsig.reshape((self.model.nvar, self.number_of_nodes, 1)) else: msg = "Bad Simulator.integrator.noise.nsig shape: %s" LOG.error(msg % str(nsig.shape)) LOG.debug("Simulator.integrator.noise.nsig shape: %s" % str(nsig.shape)) self.integrator.noise.nsig = nsig #LOG.debug("Simulator.integrator.noise.random_stream seed is: %s" % str(self.integrator.noise.random_stream.trait.value.get_state()[1][0])) def configure_monitors(self): """ Configure the requested Monitors for this Simulator """ if not isinstance(self.monitors, (list, tuple)): self.monitors = [self.monitors] # Configure monitors for monitor in self.monitors: monitor.config_for_sim(self) def configure_stimuli(self): """ Configure the defined Stimuli for this Simulator """ #Configure spatial component of any stimuli if self.stimulus is not None: if self.surface: self.stimulus.configure_space(self.surface.region_mapping) else: self.stimulus.configure_space() #TODO: The below was moved to the specific Stimuli datatypes, should be removed from here once we're sure all is right in the world... #NOTE: All is not right in the world. In moving this out of the simulator, to # work around an issue with the framework, the use of number_of_nodes for # surface simulations was replaced with number_of_vertices, these are not # the same thing and doing so has broken the ability to apply stimuli surface # simulations that include non-cortical regions in the connectivity # matrix. One possible solution would be to create a Structure datatype # that merges Cortex and Connectivity before they enter the Simulator, # so that a correct number_of_nodes can be accesible before entry to the simulator... # if isinstance(self.stimulus, patterns_dtype.StimuliSurface): # dis_shp = (self.number_of_nodes, # numpy.size(self.stimulus.focal_points_surface)) # distance = numpy.zeros(dis_shp) # k = -1 # for focal_point in self.stimulus.focal_points_surface: # k += 1 # foci = numpy.array([focal_point], dtype=numpy.int32) # distance[:, k] = self.surface.geodesic_distance(foci) # # elif isinstance(self.stimulus, patterns_dtype.StimuliRegion): # if (self.surface is not None): # #TODO: smooth at surface region boundaries # #import pdb; pdb.set_trace() # distance = self.stimulus.weight_array[self.surface.region_mapping, :] # else: # distance = self.stimulus.weight_array # # LOG.debug("%s: distance shape is: %s" % (str(self), str(distance.shape))) # # #Generate spatial pattern using "distance" of all nodes # self.stimulus.configure_space(distance) def memory_requirement(self): """ Return an estimated of the memory requirements (Bytes) for this simulator's current configuration. """ self._guesstimate_memory_requirement() return self._memory_requirement_guess def runtime(self, simulation_length): """ Return an estimated run time (seconds) for the simulator's current configuration and a specified simulation length. """ self.simulation_length = simulation_length self._guesstimate_runtime() return self._runtime def storage_requirement(self, simulation_length): """ Return an estimated storage requirement (Bytes) for the simulator's current configuration and a specified simulation length. """ self.simulation_length = simulation_length self._calculate_storage_requirement() return self._storage_requirement def _guesstimate_memory_requirement(self): """ Guestimate the memroy required for this simulator. Guesstimate is based on the shape of the dominant arrays, and as such can operate before configuration. NOTE: Assumes returned/yeilded data is in some sense "taken care of" in the world outside the simulator, and so doesn't consider it, making the simulator's history, and surface if present, the dominant memory pigs... """ if self.surface: number_of_nodes = self.surface.number_of_vertices else: number_of_nodes = self.connectivity.number_of_regions number_of_regions = self.connectivity.number_of_regions magic_number = 2.42 # Current guesstimate is low by about a factor of 2, seems safer to over estimate... bits_64 = 8.0 # Bytes bits_32 = 4.0 # Bytes #NOTE: The speed hack for getting the first element of hist shape should # partially resolves calling of this method with a non-configured # connectivity, there remains the less common issue if no tract_lengths... hist_shape = ( self.connectivity.tract_lengths.max() / (self.conduction_speed or self.connectivity.speed or 3.0) / self.integrator.dt, #self.connectivity.delays.max() self.model.nvar, number_of_nodes, self.model.number_of_modes) memreq = numpy.prod(hist_shape) * bits_64 if self.surface: memreq += self.surface.number_of_triangles * 3 * bits_32 * 2 # normals memreq += self.surface.number_of_vertices * 3 * bits_64 * 2 # normals memreq += number_of_nodes * number_of_regions * bits_64 * 4 #vertex_mapping, region_average, region_sum #???memreq += self.surface.local_connectivity.matrix.nnz * 8 if not isinstance(self.monitors, (list, tuple)): monitors = [self.monitors] else: monitors = self.monitors for monitor in monitors: if not isinstance(monitor, monitors_module.Bold): stock_shape = (monitor.period / self.integrator.dt, self.model.variables_of_interest.shape[0], number_of_nodes, self.model.number_of_modes) memreq += numpy.prod(stock_shape) * bits_64 if hasattr(monitor, "sensors"): try: memreq += number_of_nodes * monitor.sensors.number_of_sensors * bits_64 #projection_matrix except AttributeError: LOG.debug( "No sensors specified, guessing memory based on default EEG." ) memreq += number_of_nodes * 62.0 * bits_64 else: stock_shape = (monitor.hrf_length * monitor._stock_sample_rate, self.model.variables_of_interest.shape[0], number_of_nodes, self.model.number_of_modes) interim_stock_shape = ( 1.0 / (2.0**-2 * self.integrator.dt), self.model.variables_of_interest.shape[0], number_of_nodes, self.model.number_of_modes) memreq += numpy.prod(stock_shape) * bits_64 memreq += numpy.prod(interim_stock_shape) * bits_64 #available_memory = 25769803776 #TODO: (this is for my machine) if psutil and memreq > psutil.virtual_memory().total: LOG.error("This is gonna get ugly...") self._memory_requirement_guess = magic_number * memreq #import pdb; pdb.set_trace() msg = "Memory requirement guesstimate: simulation will need about %.1f MB" LOG.info(msg % (self._memory_requirement_guess / 1048576.0)) def _census_memory_requirement(self): """ Guesstimate the memory required for this simulator. Guesstimate is based on a census of the dominant arrays after the simulator has been configured. NOTE: Assumes returned/yeilded data is in some sense "taken care of" in the world outside the simulator, and so doesn't consider it, making the simulator's history, and surface if present, the dominant memory pigs... """ magic_number = 2.42 # Current guesstimate is low by about a factor of 2, seems safer to over estimate... #magic_number = 8.0 # Bytes memreq = self.history.nbytes #LOG.info("Memory required by this simulatin will be approximately %s Bytes" % (memreq)) try: memreq += self.surface.triangles.nbytes * 2 # normals memreq += self.surface.vertices.nbytes * 2 # normals memreq += self.surface.vertex_mapping.nbytes * 4 #vertex_mapping, region_average, region_sum memreq += self.surface.eeg_projection.nbytes memreq += self.surface.local_connectivity.matrix.nnz * 8 except AttributeError: pass for monitor in self.monitors: memreq += monitor._stock.nbytes if isinstance(monitor, monitors_module.Bold): memreq += monitor._interim_stock.nbytes if psutil and memreq > psutil.virtual_memory().total: LOG.error("This is gonna get ugly...") self._memory_requirement_census = magic_number * memreq #import pdb; pdb.set_trace() msg = "Memory requirement census: simulation will need about %.1f MB" LOG.info(msg % (self._memory_requirement_census / 1048576.0)) def _guesstimate_runtime(self): """ Estimate the runtime for this simulator. Spread in parallel executions of larger arrays means this will be an over-estimation, or rather a single threaded estimation... Different choice of integrators and monitors has an additional effect, on the magic number though relatively minor """ magic_number = 6.57e-06 # seconds self._runtime = (magic_number * self.number_of_nodes * self.model.nvar * self.model.number_of_modes * self.simulation_length / self.integrator.dt) msg = "Simulation single-threaded runtime should be about %s seconds!" LOG.info(msg % str(int(self._runtime))) def _calculate_storage_requirement(self): """ Calculate the storage requirement for the simulator, configured with models, monitors, etc being run for a particular simulation length. While this is only approximate, it is far more reliable/accurate than the memory and runtime guesstimates. """ LOG.info("Calculating storage requirement for ...") strgreq = 0 for monitor in self.monitors: strgreq += (TVBSettings.MAGIC_NUMBER * self.simulation_length * self.number_of_nodes * self.model.nvar * self.model.number_of_modes / monitor.period) self._storage_requirement = int(strgreq)
mon_tavg = monitors.TemporalAverage(period=2**-2) mon_savg = monitors.SpatialAverage(period=2**-2) mon_eeg = monitors.EEG(period=2**-2) #Bundle them what_to_watch = (mon_tavg, mon_savg, mon_eeg) #Initialise a surface local_coupling_strength = numpy.array([0.0121]) grey_matter = surfaces.LocalConnectivity(equation=equations.Gaussian(), cutoff=60.0) grey_matter.equation.parameters['sigma'] = 10.0 grey_matter.equation.parameters['amp'] = 0.0 default_cortex = surfaces.Cortex(local_connectivity=grey_matter, coupling_strength=local_coupling_strength) #Define the stimulus eqn_t = equations.Gaussian() eqn_t.parameters["amp"] = 0.0 eqn_t.parameters["midpoint"] = 8.0 eqn_x = equations.Gaussian() eqn_x.parameters["amp"] = -0.0625 eqn_x.parameters["sigma"] = 28.0 stimulus = patterns.StimuliSurface( surface= default_cortex, #TODO: This is required because UI requires the surface associated with the Stimuli temporal=eqn_t, spatial=eqn_x,
import tvb.analyzers.pca as pca from tvb.simulator.plot import timeseries_interactive as timeseries_interactive from tvb.simulator.plot.tools import * #Load the demo surface timeseries dataset try: data = numpy.load("demo_data_surface_8s_2048Hz.npy") except IOError: LOG.error("Can't load demo data. Run demos/generate_region_demo_data.py") raise period = 0.00048828125 #s #Initialse a default surface default_cortex = surfaces_datatypes.Cortex() #Put the data into a TimeSeriesSurface datatype tsr = TimeSeriesSurface(surface = default_cortex, data = data, sample_period = period) tsr.configure() #Create and run the analyser pca_analyser = pca.PCA(time_series = tsr) pca_data = pca_analyser.evaluate() #Generate derived data, such as, compnent time series, etc. pca_data.configure() #Put the data into a TimeSeriesSurface datatype