Beispiel #1
0
class ComplexCoherenceSpectrumData(arrays.MappedArray):
    """
    Result of a NodeComplexCoherence Analysis.
    """

    cross_spectrum = arrays.ComplexArray(
        label="The cross spectrum",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc=""" A complex ndarray that contains the nodes x nodes cross
            spectrum for every frequency frequency and for every segment.""")

    array_data = arrays.ComplexArray(
        label="Complex Coherence",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc="""The complex coherence coefficients calculated from the cross
            spectrum. The imaginary values of this complex ndarray represent the 
            imaginary coherence.""")

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="""Links to the time-series on which the node_coherence is
            applied.""")

    epoch_length = basic.Float(
        label="Epoch length",
        doc="""The timeseries was segmented into equally sized blocks
            (overlapping if necessary), prior to the application of the FFT.
            The segement length determines the frequency resolution of the
            resulting spectra.""")

    segment_length = basic.Float(
        label="Segment length",
        doc="""The timeseries was segmented into equally sized blocks
            (overlapping if necessary), prior to the application of the FFT.
            The segement length determines the frequency resolution of the
            resulting spectra.""")

    #    frequency = arrays.FloatArray(
    #        label = "Frequency",
    #        doc = """DOC ME""")

    windowing_function = basic.String(
        label="Windowing function",
        doc="""The windowing function applied to each time segment prior to
            application of the FFT.""")

    #    number_of_epochs = basic.Integer(
    #        label = "Number of epochs",
    #        doc = """DOC ME""")
    #
    #    number_of_segments = basic.Integer(
    #        label = "Number of segments",
    #        doc = """DOC ME""")

    __generate_table__ = True
Beispiel #2
0
class Fcd(arrays.MappedArray):

    array_data = arrays.FloatArray(file_storage=core.FILE_STORAGE_DEFAULT)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which FCD is calculated.")

    sw = basic.Float(
        label="Sliding window length (ms)",
        default=120000,
        doc="""Length of the time window used to divided the time series.
                FCD matrix is calculated in the following way: the time series is divided in time window of fixed length and with an overlapping of fixed length.
                The datapoints within each window, centered at time ti, are used to calculate FC(ti) as Pearson correlation.
                The ij element of the FCD matrix is calculated as the Pearson correlation between FC(ti) and FC(tj) arranged in a vector."""
    )

    sp = basic.Float(
        label="Spanning between two consecutive sliding window (ms)",
        default=2000,
        doc=
        """Spanning= (time windows length)-(overlapping between two consecutive time window).
                FCD matrix is calculated in the following way: the time series is divided in time window of fixed length and with an overlapping of fixed length.
                The datapoints within each window, centered at time ti, are used to calculate FC(ti) as Pearson correlation.
                The ij element of the FCD matrix is calculated as the Pearson correlation between FC(ti) and FC(tj) arranged in a vector"""
    )

    labels_ordering = basic.List(
        label="Dimension Names",
        default=["Time", "Time", "State Variable", "Mode"],
        doc="""List of strings representing names of each data dimension""")

    __generate_table__ = True

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.nr_dimensions = len(self.read_data_shape())
        for i in range(self.nr_dimensions):
            setattr(self, 'length_%dd' % (i + 1),
                    int(self.read_data_shape()[i]))

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "FCD type": self.__class__.__name__,
            "Source": self.source.title,
            "Dimensions": self.labels_ordering
        }

        summary.update(self.get_info_about_array('array_data'))
        return summary
Beispiel #3
0
class TimeSeriesData(MappedType):
    """
    Base time-series dataType.
    """

    title = basic.String

    data = arrays.FloatArray(
        label="Time-series data",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc=
        """An array of time-series data, with a shape of [tpts, :], where ':' represents 1 or more dimensions"""
    )

    nr_dimensions = basic.Integer(label="Number of dimension in timeseries",
                                  default=4)

    length_1d, length_2d, length_3d, length_4d = [basic.Integer] * 4

    labels_ordering = basic.List(
        default=["Time", "State Variable", "Space", "Mode"],
        label="Dimension Names",
        doc="""List of strings representing names of each data dimension""")

    labels_dimensions = basic.Dict(
        default={},
        label=
        "Specific labels for each dimension for the data stored in this timeseries.",
        doc=
        """ A dictionary containing mappings of the form {'dimension_name' : [labels for this dimension] }"""
    )
    ## TODO (for Stuart) : remove TimeLine and make sure the correct Period/start time is returned by different monitors in the simulator

    time = arrays.FloatArray(
        file_storage=core.FILE_STORAGE_EXPAND,
        label="Time-series time",
        required=False,
        doc=
        """An array of time values for the time-series, with a shape of [tpts,].
        This is 'time' as returned by the simulator's monitors.""")

    start_time = basic.Float(label="Start Time:")

    sample_period = basic.Float(label="Sample period", default=1.0)

    # Specify the measure unit for sample period (e.g sec, msec, usec, ...)
    sample_period_unit = basic.String(label="Sample Period Measure Unit",
                                      default="ms")

    sample_rate = basic.Float(label="Sample rate",
                              doc="""The sample rate of the timeseries""")

    has_surface_mapping = basic.Bool(default=True)
    has_volume_mapping = basic.Bool(default=False)
Beispiel #4
0
class Raw(Monitor):
    """
    A monitor that records the output raw data from a tvb simulation:
    It collects:

        - all state variables and modes from class :Model:
        - all nodes of a region or surface based 
        - all the integration time steps

    """
    _ui_name = "Raw recording"

    period = basic.Float(
        label = "Sampling period is ignored for Raw Monitor",
        order = -1)

    variables_of_interest = arrays.IntegerArray(
        label = "Raw Monitor sees all!!! Resistance is futile...",
        order = -1)

    def config_for_sim(self, simulator):
        if self.period != simulator.integrator.dt:
            LOG.debug('Raw period not equal to integration time step, overriding')
        self.period = simulator.integrator.dt
        super(Raw, self).config_for_sim(simulator)
        self.istep = 1
        self.voi = numpy.arange(len(simulator.model.variables_of_interest))

    def sample(self, step, state):
        time = step * self.dt
        return [time, state]
Beispiel #5
0
class iEEG(Projection):
    "Forward solution for intracranial EEG (not ECoG!)."

    _ui_name = "Intracerebral / Stereo EEG"

    projection = ProjectionSurfaceSEEG(
        default=None,
        label='Projection matrix',
        doc='Projection matrix to apply to sources.')

    sigma = basic.Float(label="conductivity", default=1.0)

    sensors = sensors_module.SensorsInternal(
        label="Internal brain sensors",
        default=None,
        required=True,
        doc=
        "The set of SEEG sensors for which the forward solution will be calculated."
    )

    @classmethod
    def from_file(cls,
                  sensors_fname='seeg_588.txt',
                  projection_fname='projection_seeg_588_surface_16k.npy',
                  **kwargs):
        return Projection.from_file.im_func(cls, sensors_fname,
                                            projection_fname, **kwargs)

    def analytic(self, loc, ori):
        """Compute the projection matrix -- simple distance weight for now.
        Equation 12 from sarvas1987basic (point dipole in homogeneous space):
          V(r) = 1/(4*pi*\sigma)*Q*(r-r_0)/|r-r_0|^3
        """
        r_0, Q = loc, ori
        V_r = numpy.zeros((self.sensors.locations.shape[0], r_0.shape[0]))
        for sensor_k in numpy.arange(self.sensors.locations.shape[0]):
            a = self.sensors.locations[sensor_k, :] - r_0
            na = numpy.sqrt(numpy.sum(a**2, axis=1))[:, numpy.newaxis]
            V_r[sensor_k, :] = numpy.sum(
                Q * (a / na**3), axis=1) / (4.0 * numpy.pi * self.sigma)
        return V_r

    def create_time_series(self,
                           storage_path,
                           connectivity=None,
                           surface=None,
                           region_map=None,
                           region_volume_map=None):
        return TimeSeriesSEEG(storage_path=storage_path,
                              sensors=self.sensors,
                              sample_period=self.period,
                              title=' ' + self.__class__.__name__,
                              **self._transform_user_tags())
Beispiel #6
0
class WaveletCoefficientsData(arrays.MappedArray):
    """
    This class bundles all the elements of a Wavelet Analysis into a single 
    object, including the input TimeSeries datatype and the output results as 
    arrays (FloatArray)
    """
    #Overwrite attribute from superclass
    array_data = arrays.ComplexArray()

    source = time_series.TimeSeries(label="Source time-series")

    mother = basic.String(
        label="Mother wavelet",
        default="morlet",
        doc="""A string specifying the type of mother wavelet to use,
            default is 'morlet'.""")  # default to 'morlet'

    sample_period = basic.Float(label="Sample period")
    #sample_rate = basic.Integer(label = "")  inversely related

    frequencies = arrays.FloatArray(
        label="Frequencies", doc="A vector that maps scales to frequencies.")

    normalisation = basic.String(label="Normalisation type")
    # 'unit energy' | 'gabor'

    q_ratio = basic.Float(label="Q-ratio", default=5.0)

    amplitude = arrays.FloatArray(label="Amplitude",
                                  file_storage=core.FILE_STORAGE_EXPAND)

    phase = arrays.FloatArray(label="Phase",
                              file_storage=core.FILE_STORAGE_EXPAND)

    power = arrays.FloatArray(label="Power",
                              file_storage=core.FILE_STORAGE_EXPAND)

    __generate_table__ = True
Beispiel #7
0
class Scaling(Coupling):
    r"""
    Provides a simple scaling of the connectivity of the form

    .. math::
        a x

    """

    a = basic.Float(label="Scaling factor",
                    default=0.00390625,
                    range=basic.Range(lo=0.0, hi=1.0, step=0.01),
                    doc="Rescales the connection strength while maintaining "
                    "the ratio between different values.")

    def post(self, gx):
        return self.a * gx
Beispiel #8
0
class BaseTimeseriesMetricAlgorithm(core.Type):
    """
    This is a base class for all metrics on timeSeries dataTypes.
    Metric means an algorithm computing a single value for an entire TimeSeries.

    """
    ### Make sure this "abstract" class does not get listed in UI.
    _base_classes = ['BaseTimeseriesMetricAlgorithm']

    accept_filter = None

    time_series = time_series_module.TimeSeries(
        label="Time Series",
        required=True,
        order=1,
        doc="The TimeSeries for which the metric(s) will be computed.")

    start_point = basic.Float(
        label="Start point (ms)",
        default=500.0,
        required=False,
        order=2,
        doc=""" The start point determines how many points of the TimeSeries will
        be discarded before computing the metric. By default it drops the
        first 500 ms.""")

    segment = basic.Integer(
        label="Segmentation factor",
        default=4,
        required=False,
        order=3,
        doc=
        """ Divide the input time-series into discrete equally sized sequences and
        use the last segment to compute the metric. It is only used when
        the start point is larger than the time-series length.""")

    def evaluate(self):
        """
        This method needs to be implemented in each subclass.
        Will describe current algorithm.

        :return: single numeric value or a dictionary (displayLabel: numeric value) to be persisted.
        """
        raise Exception(
            "Every metric algorithm should implement an 'evaluate' method that returns the metric result."
        )
class Scaling(Coupling):
    """
    Scaling Coupling function.

    .. #Currently there seems to be a clash betwen traits and autodoc, autodoc
    .. #can't find the methods of the class, the class specific names below get
    .. #us around this...
    .. automethod:: Scaling.__init__
    .. automethod:: Scaling.__call__

    """

    a = basic.Float(
        label="Scaling factor",
        default = 0.00390625,
        range = basic.Range(lo = 0.0, hi = 0.2, step = 0.01),
        doc = """Rescales the connection strength while maintaining the ratio
        between different values.""")


    def __call__(self, g_ij, x_i, x_j):
        """
        Evaluate the Linear function for the arg ``x``. The equation being
        evaluated has the following form:

            .. math::
                a x 


        """
        coupled_input = (g_ij * x_j).sum(axis=0)
        return self.a * coupled_input

    device_info = coupling_device_info(
        pars = ['a'],
        kernel = """

        // parameters
        float a = P(0);

        I = 0.0;
        for (int j_node=0; j_node<n_node; j_node++, idel++, conn++)
            I += a*GIJ*XJ;

        """
        )
Beispiel #10
0
class FourierSpectrumData(arrays.MappedArray):
    """
    Result of a Fourier  Analysis.
    """
    #Overwrite attribute from superclass
    array_data = arrays.ComplexArray(file_storage=core.FILE_STORAGE_EXPAND)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which the FFT is applied.")

    segment_length = basic.Float(
        label="Segment length",
        doc="""The timeseries was segmented into equally sized blocks
            (overlapping if necessary), prior to the application of the FFT.
            The segement length determines the frequency resolution of the
            resulting spectra.""")

    windowing_function = basic.String(
        label="Windowing function",
        doc="""The windowing function applied to each time segment prior to
            application of the FFT.""")

    amplitude = arrays.FloatArray(label="Amplitude",
                                  file_storage=core.FILE_STORAGE_EXPAND)

    phase = arrays.FloatArray(label="Phase",
                              file_storage=core.FILE_STORAGE_EXPAND)

    power = arrays.FloatArray(label="Power",
                              file_storage=core.FILE_STORAGE_EXPAND)

    average_power = arrays.FloatArray(label="Average Power",
                                      file_storage=core.FILE_STORAGE_EXPAND)

    normalised_average_power = arrays.FloatArray(
        label="Normalised Power", file_storage=core.FILE_STORAGE_EXPAND)

    __generate_table__ = True
Beispiel #11
0
class Ornstein_Ulhenbeck_process(Additive):
    tau_OU = basic.Float(label="time scale of decay",
                         required=True,
                         default=1.0,
                         doc="""The noise time scale """)
    mu = arrays.FloatArray(label=":math:`mu`",
                           required=True,
                           default=numpy.array([1.0]),
                           doc="""Mean of noise noise""")
    weights = arrays.FloatArray(label=":math:`mu`",
                                required=True,
                                default=numpy.array([0.0]),
                                doc="""Mean of noise noise""")
    _noise = None

    def configure_white(self, dt, shape):
        """
        Run base classes configure to setup traited attributes, then ensure that
        the ``random_stream`` attribute is properly configured.

        """
        self.dt = dt
        self._noise = 0.0
        self.mu = numpy.reshape(self.mu, (7, 1, 1))

    def generate(self, shape, lo=-1.0, hi=1.0):
        self._noise = self._noise - self.dt / self.tau_OU * self._noise + numpy.sqrt(
            self.dt) * self.random_stream.normal(size=shape)
        noise = self.mu + self.nsig * self._noise
        return noise

    def gfun(self, state_variables):
        """
            Drop noise in order to avoid negative frequency

        """
        # drop value for negative noise
        return self.weights * 1e-3
class LocalConnectivity(types_mapped.MappedType):
    """
    A sparse matrix for representing the local connectivity within the Cortex.
    """
    _ui_name = "Local connectivity"

    surface = surfaces.CorticalSurface(label="Surface", order=1)

    matrix = types_mapped.SparseMatrix(order=-1)

    equation = equations.FiniteSupportEquation(label="Spatial",
                                               required=False,
                                               default=equations.Gaussian,
                                               order=2)

    cutoff = basic.Float(
        label="Cutoff distance (mm)",
        default=40.0,
        doc="Distance at which to truncate the evaluation in mm.",
        order=3)

    def compute(self):
        """
        Compute current Matrix.
        """
        LOG.info("Mapping geodesic distance through the LocalConnectivity.")

        #Start with data being geodesic_distance_matrix, then map it through equation
        self.equation.pattern = self.matrix_gdist.data

        #Then replace original data with result...
        self.matrix_gdist.data = self.equation.pattern

        #Homogenise spatial discretisation effects across the surface
        nv = self.matrix_gdist.shape[0]
        ind = numpy.arange(nv, dtype=int)
        pos_mask = self.matrix_gdist.data > 0.0
        neg_mask = self.matrix_gdist.data < 0.0
        pos_con = self.matrix_gdist.copy()
        neg_con = self.matrix_gdist.copy()
        pos_con.data[neg_mask] = 0.0
        neg_con.data[pos_mask] = 0.0
        pos_contrib = pos_con.sum(axis=1)
        pos_contrib = numpy.array(pos_contrib).squeeze()
        neg_contrib = neg_con.sum(axis=1)
        neg_contrib = numpy.array(neg_contrib).squeeze()
        pos_mean = pos_contrib.mean()
        neg_mean = neg_contrib.mean()
        if ((pos_mean != 0.0 and any(pos_contrib == 0.0))
                or (neg_mean != 0.0 and any(neg_contrib == 0.0))):
            msg = "Cortical mesh is too coarse for requested LocalConnectivity."
            LOG.warning(msg)
            bad_verts = ()
            if pos_mean != 0.0:
                bad_verts = bad_verts + numpy.nonzero(pos_contrib == 0.0)
            if neg_mean != 0.0:
                bad_verts = bad_verts + numpy.nonzero(neg_contrib == 0.0)
            LOG.debug("Problem vertices are: %s" % str(bad_verts))
        pos_hf = numpy.zeros(shape=pos_contrib.shape)
        pos_hf[pos_contrib != 0] = pos_mean / pos_contrib[pos_contrib != 0]
        neg_hf = numpy.zeros(shape=neg_contrib.shape)
        neg_hf[neg_contrib != 0] = neg_mean / neg_contrib[neg_contrib != 0]
        pos_hf_diag = scipy.sparse.csc_matrix((pos_hf, (ind, ind)),
                                              shape=(nv, nv))
        neg_hf_diag = scipy.sparse.csc_matrix((neg_hf, (ind, ind)),
                                              shape=(nv, nv))
        homogenious_conn = (pos_hf_diag * pos_con) + (neg_hf_diag * neg_con)

        #Then replace unhomogenised result with the spatially homogeneous one...
        if not homogenious_conn.has_sorted_indices:
            homogenious_conn.sort_indices()

        self.matrix = homogenious_conn

    def _validate_before_store(self):
        """
        Overrides MappedType._validate_before_store to use a custom error for missing matrix.
        """
        # Sparse Matrix is required so we should check if there is any data stored for it
        if self.matrix is None:
            msg = ("LocalConnectivity can not be stored because it "
                   "has no SparseMatrix attached.")
            raise exceptions.ValidationException(msg)

        super(LocalConnectivity, self)._validate_before_store()

    @staticmethod
    def from_file(source_file="local_connectivity_16384.mat", instance=None):

        if instance is None:
            result = LocalConnectivity()
        else:
            result = instance

        source_full_path = try_get_absolute_path("tvb_data.local_connectivity",
                                                 source_file)
        reader = FileReader(source_full_path)

        result.matrix = reader.read_array(matlab_data_name="LocalCoupling")
        return result

    def get_min_max_values(self):
        """
        Retrieve the minimum and maximum values from the metadata.
        :returns: (minimum_value, maximum_value)
        """
        metadata = self.get_metadata('matrix')
        return metadata[self.METADATA_ARRAY_MIN], metadata[
            self.METADATA_ARRAY_MAX]

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance
        of this datatype.
        """
        return self.get_info_about_array('matrix', [
            self.METADATA_ARRAY_MAX, self.METADATA_ARRAY_MIN,
            self.METADATA_ARRAY_MEAN, self.METADATA_ARRAY_SHAPE
        ])

    def compute_sparse_matrix(self):
        """
        NOTE: Before calling this method, the surface field
        should already be set on the local connectivity.

        Computes the sparse matrix for this local connectivity.
        """
        if self.surface is None:
            raise AttributeError(
                'Require surface to compute local connectivity.')

        self.matrix_gdist = surfaces.gdist.local_gdist_matrix(
            self.surface.vertices.astype(numpy.float64),
            self.surface.triangles.astype(numpy.int32),
            max_distance=self.cutoff)

        self.compute()
        # Avoid having a large data-set in memory.
        self.matrix_gdist = None
Beispiel #13
0
class Simulator(core.Type):
    "A Simulator assembles components required to perform simulations."

    connectivity = connectivity.Connectivity(
        label="Long-range connectivity",
        default=None,
        order=1,
        required=True,
        filters_ui=[
            UIFilter(linked_elem_name="region_mapping_data",
                     linked_elem_field=FilterChain.datatype + "._connectivity",
                     linked_elem_parent_name="surface",
                     linked_elem_parent_option=None),
            UIFilter(linked_elem_name="region_mapping",
                     linked_elem_field=FilterChain.datatype + "._connectivity",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="EEG"),
            UIFilter(linked_elem_name="region_mapping",
                     linked_elem_field=FilterChain.datatype + "._connectivity",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="MEG"),
            UIFilter(linked_elem_name="region_mapping",
                     linked_elem_field=FilterChain.datatype + "._connectivity",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="iEEG")
        ],
        doc="""A tvb.datatypes.Connectivity object which contains the
        structural long-range connectivity data (i.e., white-matter tracts). In
        combination with the ``Long-range coupling function`` it defines the inter-regional
        connections. These couplings undergo a time delay via signal propagation 
        with a propagation speed of ``Conduction Speed``""")

    conduction_speed = basic.Float(
        label="Conduction Speed",
        default=3.0,
        order=2,
        required=False,
        range=basic.Range(lo=0.01, hi=100.0, step=1.0),
        doc="""Conduction speed for ``Long-range connectivity`` (mm/ms)""")

    coupling = coupling.Coupling(
        label="Long-range coupling function",
        default=coupling.Linear(),
        required=True,
        order=2,
        doc="""The coupling function is applied to the activity propagated
        between regions by the ``Long-range connectivity`` before it enters the local
        dynamic equations of the Model. Its primary purpose is to 'rescale' the
        incoming activity to a level appropriate to Model.""")

    surface = cortex.Cortex(
        label="Cortical surface",
        default=None,
        order=3,
        required=False,
        filters_backend=FilterChain(
            fields=[FilterChain.datatype + '._valid_for_simulations'],
            operations=["=="],
            values=[True]),
        filters_ui=[
            UIFilter(linked_elem_name="projection_matrix_data",
                     linked_elem_field=FilterChain.datatype + "._sources",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="EEG"),
            UIFilter(linked_elem_name="local_connectivity",
                     linked_elem_field=FilterChain.datatype + "._surface",
                     linked_elem_parent_name="surface",
                     linked_elem_parent_option=None)
        ],
        doc="""By default, a Cortex object which represents the
        cortical surface defined by points in the 3D physical space and their 
        neighborhood relationship. In the current TVB version, when setting up a 
        surface-based simulation, the option to configure the spatial spread of 
        the ``Local Connectivity`` is available.""")

    stimulus = patterns.SpatioTemporalPattern(
        label="Spatiotemporal stimulus",
        default=None,
        order=4,
        required=False,
        doc=
        """A ``Spatiotemporal stimulus`` can be defined at the region or surface level.
        It's composed of spatial and temporal components. For region defined stimuli
        the spatial component is just the strength with which the temporal
        component is applied to each region. For surface defined stimuli,  a
        (spatial) function, with finite-support, is used to define the strength 
        of the stimuli on the surface centred around one or more focal points. 
        In the current version of TVB, stimuli are applied to the first state 
        variable of the ``Local dynamic model``.""")

    model = models.Model(
        label="Local dynamic model",
        default=models.Generic2dOscillator,
        required=True,
        order=5,
        doc="""A tvb.simulator.Model object which describe the local dynamic
        equations, their parameters, and, to some extent, where connectivity
        (local and long-range) enters and which state-variables the Monitors
        monitor. By default the 'Generic2dOscillator' model is used. Read the 
        Scientific documentation to learn more about this model.""")

    integrator = integrators.Integrator(
        label="Integration scheme",
        default=integrators.HeunDeterministic,
        required=True,
        order=6,
        doc="""A tvb.simulator.Integrator object which is
            an integration scheme with supporting attributes such as 
            integration step size and noise specification for stochastic 
            methods. It is used to compute the time courses of the model state 
            variables.""")

    initial_conditions = arrays.FloatArray(
        label="Initial Conditions",
        default=None,
        order=-1,
        required=False,
        doc="""Initial conditions from which the simulation will begin. By
        default, random initial conditions are provided. Needs to be the same shape
        as simulator 'history', ie, initial history function which defines the 
        minimal initial state of the network with time delays before time t=0. 
        If the number of time points in the provided array is insufficient the 
        array will be padded with random values based on the 'state_variables_range'
        attribute.""")

    monitors = monitors.Monitor(
        label="Monitor(s)",
        default=monitors.TemporalAverage,
        required=True,
        order=8,
        select_multiple=True,
        doc="""A tvb.simulator.Monitor or a list of tvb.simulator.Monitor
        objects that 'know' how to record relevant data from the simulation. Two
        main types exist: 1) simple, spatial and temporal, reductions (subsets
        or averages); 2) physiological measurements, such as EEG, MEG and fMRI.
        By default the Model's specified variables_of_interest are returned,
        temporally downsampled from the raw integration rate to a sample rate of
        1024Hz.""")

    simulation_length = basic.Float(
        label="Simulation Length (ms, s, m, h)",
        default=1000.0,  # ie 1 second
        required=True,
        order=9,
        doc="""The length of a simulation (default in milliseconds).""")

    history = None  # type: SparseHistory

    @property
    def good_history_shape(self):
        "Returns expected history shape."
        n_reg = self.connectivity.number_of_regions
        shape = self.horizon, len(
            self.model.state_variables), n_reg, self.model.number_of_modes
        return shape

    calls = 0
    current_step = 0
    number_of_nodes = None
    _memory_requirement_guess = None
    _memory_requirement_census = None
    _storage_requirement = None
    _runtime = None

    # methods consist of
    # 1) generic configure
    # 2) component specific configure
    # 3) loop preparation
    # 4) loop step
    # 5) estimations

    def preconfigure(self):
        "Configure just the basic fields, so that memory can be estimated."
        self.connectivity.configure()
        if self.surface:
            self.surface.configure()
        if self.stimulus:
            self.stimulus.configure()
        self.coupling.configure()
        self.model.configure()
        self.integrator.configure()
        # monitors needs to be a list or tuple, even if there is only one...
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]
        # Configure monitors
        for monitor in self.monitors:
            monitor.configure()
        # "Nodes" refers to either regions or vertices + non-cortical regions.
        if self.surface is None:
            self.number_of_nodes = self.connectivity.number_of_regions
            LOG.info('Region simulation with %d ROI nodes',
                     self.number_of_nodes)
        else:
            rm = self.surface.region_mapping
            unmapped = self.connectivity.unmapped_indices(rm)
            self._regmap = numpy.r_[rm, unmapped]
            self.number_of_nodes = self._regmap.shape[0]
            LOG.info(
                'Surface simulation with %d vertices + %d non-cortical, %d total nodes',
                rm.size, unmapped.size, self.number_of_nodes)
        self._guesstimate_memory_requirement()

    def configure(self, full_configure=True):
        """Configure simulator and its components.

        The first step of configuration is to run the configure methods of all
        the Simulator's components, ie its traited attributes.

        Configuration of a Simulator primarily consists of calculating the
        attributes, etc, which depend on the combinations of the Simulator's
        traited attributes (keyword args).

        Converts delays from physical time units into integration steps
        and updates attributes that depend on combinations of the 6 inputs.

        Returns
        -------
        sim: Simulator
            The configured Simulator instance.

        """
        if full_configure:
            # When run from GUI, preconfigure is run separately, and we want to avoid running that part twice
            self.preconfigure()
        # Make sure spatialised model parameters have the right shape (number_of_nodes, 1)
        excluded_params = ("state_variable_range", "variables_of_interest",
                           "noise", "psi_table", "nerf_table")
        spatial_reshape = self.model.spatial_param_reshape
        for param in self.model.trait.keys():
            if param in excluded_params:
                continue
            # If it's a surface sim and model parameters were provided at the region level
            region_parameters = getattr(self.model, param)
            if self.surface is not None:
                if region_parameters.size == self.connectivity.number_of_regions:
                    new_parameters = region_parameters[
                        self.surface.region_mapping].reshape(spatial_reshape)
                    setattr(self.model, param, new_parameters)
            region_parameters = getattr(self.model, param)
            if region_parameters.size == self.number_of_nodes:
                new_parameters = region_parameters.reshape(spatial_reshape)
                setattr(self.model, param, new_parameters)
        # Configure spatial component of any stimuli
        self._configure_stimuli()
        # Set delays, provided in physical units, in integration steps.
        self.connectivity.set_idelays(self.integrator.dt)
        self.horizon = self.connectivity.idelays.max() + 1
        # Reshape integrator.noise.nsig, if necessary.
        if isinstance(self.integrator, integrators.IntegratorStochastic):
            self._configure_integrator_noise()
        # Setup history
        self._configure_history(self.initial_conditions)
        # Configure Monitors to work with selected Model, etc...
        self._configure_monitors()
        # Estimate of memory usage.
        self._census_memory_requirement()
        # Allow user to chain configure to another call or assignment.
        return self

    def _handle_random_state(self, random_state):
        if random_state is not None:
            if isinstance(self.integrator, integrators.IntegratorStochastic):
                self.integrator.noise.random_stream.set_state(random_state)
                msg = "random_state supplied with seed %s"
                LOG.info(msg,
                         self.integrator.noise.random_stream.get_state()[1][0])
            else:
                LOG.warn(
                    "random_state supplied for non-stochastic integration")

    def _prepare_local_coupling(self):
        if self.surface is None:
            local_coupling = 0.0
        else:
            if self.surface.coupling_strength.size == 1:
                local_coupling = (self.surface.coupling_strength[0] *
                                  self.surface.local_connectivity.matrix)
            elif self.surface.coupling_strength.size == self.surface.number_of_vertices:
                ind = numpy.arange(self.number_of_nodes, dtype=numpy.intc)
                vec_cs = numpy.zeros((self.number_of_nodes, ))
                vec_cs[:self.surface.
                       number_of_vertices] = self.surface.coupling_strength
                sp_cs = scipy.sparse.csc_matrix(
                    (vec_cs, (ind, ind)),
                    shape=(self.number_of_nodes, self.number_of_nodes))
                local_coupling = sp_cs * self.surface.local_connectivity.matrix
            if local_coupling.shape[1] < self.number_of_nodes:
                # must match unmapped indices handling in preconfigure
                from scipy.sparse import csr_matrix, vstack, hstack
                nn = self.number_of_nodes
                npad = nn - local_coupling.shape[0]
                rpad = csr_matrix((local_coupling.shape[0], npad))
                bpad = csr_matrix((npad, nn))
                local_coupling = vstack([hstack([local_coupling, rpad]), bpad])
        return local_coupling

    def _prepare_stimulus(self):
        if self.stimulus is None:
            stimulus = 0.0
        else:
            time = numpy.r_[0.0:self.simulation_length:self.integrator.dt]
            self.stimulus.configure_time(time.reshape((1, -1)))
            stimulus = numpy.zeros((self.model.nvar, self.number_of_nodes, 1))
            LOG.debug("stimulus shape is: %s", stimulus.shape)
        return stimulus

    def _loop_compute_node_coupling(self, step):
        "Compute delayed node coupling values."
        coupling = self.coupling(step, self.history)
        if self.surface is not None:
            coupling = coupling[:, self._regmap]
        return coupling

    def _loop_update_stimulus(self, step, stimulus):
        "Update stimulus values for current time step."
        if self.stimulus is not None:
            # TODO stim_step != current step
            stim_step = step - (self.current_step + 1)
            stimulus[self.model.cvar, :, :] = self.stimulus(stim_step).reshape(
                (1, -1, 1))

    def _loop_update_history(self, step, n_reg, state):
        "Update history."
        if self.surface is not None and state.shape[
                1] > self.connectivity.number_of_regions:
            region_state = numpy.zeros(
                (n_reg, state.shape[0],
                 state.shape[2]))  # temp (node, cvar, mode)
            numpy_add_at(region_state, self._regmap, state.transpose(
                (1, 0, 2)))  # sum within region
            region_state /= numpy.bincount(self._regmap).reshape(
                (-1, 1, 1))  # div by n node in region
            state = region_state.transpose((1, 0, 2))  # (cvar, node, mode)
        self.history.update(step, state)

    def _loop_monitor_output(self, step, state):
        observed = self.model.observe(state)
        output = [monitor.record(step, observed) for monitor in self.monitors]
        if any(outputi is not None for outputi in output):
            return output

    def __call__(self, simulation_length=None, random_state=None):
        """
        Return an iterator which steps through simulation time, generating monitor outputs.

        See the run method for a convenient way to collect all output in one call.

        :param simulation_length: Length of the simulation to perform in ms.
        :param random_state:  State of NumPy RNG to use for stochastic integration.
        :return: Iterator over monitor outputs.
        """

        self.calls += 1
        if simulation_length is not None:
            self.simulation_length = simulation_length

        # intialization
        self._guesstimate_runtime()
        self._calculate_storage_requirement()
        self._handle_random_state(random_state)
        n_reg = self.connectivity.number_of_regions
        local_coupling = self._prepare_local_coupling()
        stimulus = self._prepare_stimulus()
        state = self.current_state

        # integration loop
        n_steps = int(math.ceil(self.simulation_length / self.integrator.dt))
        for step in range(self.current_step + 1,
                          self.current_step + n_steps + 1):
            # needs implementing by hsitory + coupling?
            node_coupling = self._loop_compute_node_coupling(step)
            self._loop_update_stimulus(step, stimulus)
            state = self.integrator.scheme(state, self.model.dfun,
                                           node_coupling, local_coupling,
                                           stimulus)
            self._loop_update_history(step, n_reg, state)
            output = self._loop_monitor_output(step, state)
            if output is not None:
                yield output

        self.current_state = state
        self.current_step = self.current_step + n_steps - 1  # -1 : don't repeat last point

    def _configure_history(self, initial_conditions):
        """
        Set initial conditions for the simulation using either the provided
        initial_conditions or, if none are provided, the model's initial()
        method. This method is called durin the Simulator's __init__().

        Any initial_conditions that are provided as an argument are expected
        to have dimensions 1, 2, and 3 with shapse corresponding to the number
        of state_variables, nodes and modes, respectively. If the provided
        inital_conditions are shorter in time (dim=0) than the required history
        the model's initial() method is called to make up the difference.

        """
        rng = numpy.random
        if hasattr(self.integrator, 'noise'):
            rng = self.integrator.noise.random_stream
        # Default initial conditions
        if initial_conditions is None:
            n_time, n_svar, n_node, n_mode = self.good_history_shape
            LOG.info(
                'Preparing initial history of shape %r using model.initial()',
                self.good_history_shape)
            if self.surface is not None:
                n_node = self.number_of_nodes
            history = self.model.initial(self.integrator.dt,
                                         (n_time, n_svar, n_node, n_mode), rng)
        # ICs provided
        else:
            # history should be [timepoints, state_variables, nodes, modes]
            LOG.info('Using provided initial history of shape %r',
                     initial_conditions.shape)
            n_time, n_svar, n_node, n_mode = ic_shape = initial_conditions.shape
            nr = self.connectivity.number_of_regions
            if self.surface is not None and n_node == nr:
                initial_conditions = initial_conditions[:, :, self._regmap]
                return self._configure_history(initial_conditions)
            elif ic_shape[1:] != self.good_history_shape[1:]:
                raise ValueError(
                    "Incorrect history sample shape %s, expected %s" %
                    ic_shape[1:], self.good_history_shape[1:])
            else:
                if ic_shape[0] >= self.horizon:
                    LOG.debug("Using last %d time-steps for history.",
                              self.horizon)
                    history = initial_conditions[
                        -self.horizon:, :, :, :].copy()
                else:
                    LOG.debug('Padding initial conditions with model.initial')
                    history = self.model.initial(self.integrator.dt,
                                                 self.good_history_shape, rng)
                    shift = self.current_step % self.horizon
                    history = numpy.roll(history, -shift, axis=0)
                    history[:ic_shape[0], :, :, :] = initial_conditions
                    history = numpy.roll(history, shift, axis=0)
                self.current_step += ic_shape[0] - 1
        LOG.info('Final initial history shape is %r', history.shape)
        # create initial state from history
        self.current_state = history[self.current_step % self.horizon].copy()
        LOG.debug('initial state has shape %r' % (self.current_state.shape, ))
        if self.surface is not None and history.shape[
                2] > self.connectivity.number_of_regions:
            n_reg = self.connectivity.number_of_regions
            (nt, ns, _, nm), ax = history.shape, (2, 0, 1, 3)
            region_history = numpy.zeros((nt, ns, n_reg, nm))
            numpy_add_at(region_history.transpose(ax), self._regmap,
                         history.transpose(ax))
            region_history /= numpy.bincount(self._regmap).reshape((-1, 1))
            history = region_history
        # create history query implementation
        self.history = SparseHistory(self.connectivity.weights,
                                     self.connectivity.idelays,
                                     self.model.cvar,
                                     self.model.number_of_modes)
        # initialize its buffer
        self.history.initialize(history)

    def _configure_integrator_noise(self):
        """
        This enables having noise to be state variable specific and/or to enter 
        only via specific brain structures, for example it we only want to 
        consider noise as an external input entering the brain via appropriate
        thalamic nuclei.

        Support 3 possible shapes:
            1) number_of_nodes;

            2) number_of_state_variables; and 

            3) (number_of_state_variables, number_of_nodes).

        """

        noise = self.integrator.noise

        if self.integrator.noise.ntau > 0.0:
            self.integrator.noise.configure_coloured(
                self.integrator.dt, self.good_history_shape[1:])
        else:
            self.integrator.noise.configure_white(self.integrator.dt,
                                                  self.good_history_shape[1:])

        if self.surface is not None:
            if self.integrator.noise.nsig.size == self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[
                    self.surface.region_mapping]
            elif self.integrator.noise.nsig.size == self.model.nvar * self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[:,
                                                                        self.
                                                                        surface
                                                                        .
                                                                        region_mapping]

        good_nsig_shape = (self.model.nvar, self.number_of_nodes,
                           self.model.number_of_modes)
        nsig = self.integrator.noise.nsig
        LOG.debug("Given noise shape is %s", nsig.shape)
        if nsig.shape in (good_nsig_shape, (1, )):
            return
        elif nsig.shape == (self.model.nvar, ):
            nsig = nsig.reshape((self.model.nvar, 1, 1))
        elif nsig.shape == (self.number_of_nodes, ):
            nsig = nsig.reshape((1, self.number_of_nodes, 1))
        elif nsig.shape == (self.model.nvar, self.number_of_nodes):
            nsig = nsig.reshape((self.model.nvar, self.number_of_nodes, 1))
        else:
            msg = "Bad Simulator.integrator.noise.nsig shape: %s"
            LOG.error(msg % str(nsig.shape))

        LOG.debug("Corrected noise shape is %s", nsig.shape)
        self.integrator.noise.nsig = nsig

    def _configure_monitors(self):
        """ Configure the requested Monitors for this Simulator """
        # Coerce to list if required
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]
        # Configure monitors
        for monitor in self.monitors:
            monitor.config_for_sim(self)

    def _configure_stimuli(self):
        """ Configure the defined Stimuli for this Simulator """
        if self.stimulus is not None:
            if self.surface:
                self.stimulus.configure_space(self.surface.region_mapping)
            else:
                self.stimulus.configure_space()

    # used by simulator adaptor
    def memory_requirement(self):
        """
        Return an estimated of the memory requirements (Bytes) for this
        simulator's current configuration.
        """
        self._guesstimate_memory_requirement()
        return self._memory_requirement_guess

    # appears to be unused
    def runtime(self, simulation_length):
        """
        Return an estimated run time (seconds) for the simulator's current 
        configuration and a specified simulation length.

        """
        self.simulation_length = simulation_length
        self._guesstimate_runtime()
        return self._runtime

    # used by simulator adaptor
    def storage_requirement(self, simulation_length):
        """
        Return an estimated storage requirement (Bytes) for the simulator's
        current configuration and a specified simulation length.

        """
        self.simulation_length = simulation_length
        self._calculate_storage_requirement()
        return self._storage_requirement

    def _guesstimate_memory_requirement(self):
        """
        guesstimate the memory required for this simulator.

        Guesstimate is based on the shape of the dominant arrays, and as such 
        can operate before configuration.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant 
            memory pigs...

        """
        if self.surface:
            number_of_nodes = self.surface.number_of_vertices
        else:
            number_of_nodes = self.connectivity.number_of_regions

        number_of_regions = self.connectivity.number_of_regions

        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        bits_64 = 8.0  # Bytes
        bits_32 = 4.0  # Bytes
        #NOTE: The speed hack for getting the first element of hist shape should
        #      partially resolves calling of this method with a non-configured
        #     connectivity, there remains the less common issue if no tract_lengths...
        hist_shape = (
            self.connectivity.tract_lengths.max() /
            (self.conduction_speed or self.connectivity.speed or 3.0) /
            self.integrator.dt, self.model.nvar, number_of_nodes,
            self.model.number_of_modes)
        LOG.debug("Estimated history shape is %r", hist_shape)

        memreq = numpy.prod(hist_shape) * bits_64
        if self.surface:
            memreq += self.surface.number_of_triangles * 3 * bits_32 * 2  # normals
            memreq += self.surface.number_of_vertices * 3 * bits_64 * 2  # normals
            memreq += number_of_nodes * number_of_regions * bits_64 * 4  # region_mapping, region_average, region_sum
            #???memreq += self.surface.local_connectivity.matrix.nnz * 8

        if not hasattr(self.monitors, '__len__'):
            self.monitors = [self.monitors]

        for monitor in self.monitors:
            if not isinstance(monitor, monitors.Bold):
                stock_shape = (monitor.period / self.integrator.dt,
                               self.model.variables_of_interest.shape[0],
                               number_of_nodes, self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                if hasattr(monitor, "sensors"):
                    try:
                        memreq += number_of_nodes * monitor.sensors.number_of_sensors * bits_64  # projection_matrix
                    except AttributeError:
                        LOG.debug(
                            "No sensors specified, guessing memory based on default EEG."
                        )
                        memreq += number_of_nodes * 62.0 * bits_64

            else:
                stock_shape = (monitor.hrf_length * monitor._stock_sample_rate,
                               self.model.variables_of_interest.shape[0],
                               number_of_nodes, self.model.number_of_modes)
                interim_stock_shape = (
                    1.0 / (2.0**-2 * self.integrator.dt),
                    self.model.variables_of_interest.shape[0], number_of_nodes,
                    self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                memreq += numpy.prod(interim_stock_shape) * bits_64

        if psutil and memreq > psutil.virtual_memory().total:
            LOG.warning(
                "There may be insufficient memory for this simulation.")

        self._memory_requirement_guess = magic_number * memreq
        msg = "Memory requirement estimate: simulation will need about %.1f MB"
        LOG.info(msg, self._memory_requirement_guess / 2**20)

    def _census_memory_requirement(self):
        """
        Guesstimate the memory required for this simulator. 

        Guesstimate is based on a census of the dominant arrays after the
        simulator has been configured.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant 
            memory pigs...

        """
        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        memreq = self.history.nbytes
        try:
            memreq += self.surface.triangles.nbytes * 2
            memreq += self.surface.vertices.nbytes * 2
            memreq += self.surface.region_mapping.nbytes * self.number_of_nodes * 8. * 4  # region_average, region_sum
            memreq += self.surface.eeg_projection.nbytes
            memreq += self.surface.local_connectivity.matrix.nnz * 8
        except AttributeError:
            pass

        for monitor in self.monitors:
            memreq += monitor._stock.nbytes
            if isinstance(monitor, monitors.Bold):
                memreq += monitor._interim_stock.nbytes

        if psutil and memreq > psutil.virtual_memory().total:
            LOG.warning("Memory estimate exceeds total available RAM.")

        self._memory_requirement_census = magic_number * memreq
        #import pdb; pdb.set_trace()
        msg = "Memory requirement census: simulation will need about %.1f MB"
        LOG.info(msg % (self._memory_requirement_census / 1048576.0))

    def _guesstimate_runtime(self):
        """
        Estimate the runtime for this simulator.

        Spread in parallel executions of larger arrays means this will be an over-estimation,
        or rather a single threaded estimation...
        Different choice of integrators and monitors has an additional effect,
        on the magic number though relatively minor

        """
        magic_number = 6.57e-06  # seconds
        self._runtime = (magic_number * self.number_of_nodes *
                         self.model.nvar * self.model.number_of_modes *
                         self.simulation_length / self.integrator.dt)
        msg = "Simulation runtime should be about %0.3f seconds"
        LOG.info(msg, self._runtime)

    def _calculate_storage_requirement(self):
        """
        Calculate the storage requirement for the simulator, configured with
        models, monitors, etc being run for a particular simulation length. 
        While this is only approximate, it is far more reliable/accurate than
        the memory and runtime guesstimates.
        """
        LOG.info("Calculating storage requirement for ...")
        strgreq = 0
        for monitor in self.monitors:
            # Avoid division by zero for monitor not yet configured
            # (in framework this is executed, when only preconfigure has been called):
            current_period = monitor.period or self.integrator.dt
            strgreq += (TvbProfile.current.MAGIC_NUMBER *
                        self.simulation_length * self.number_of_nodes *
                        self.model.nvar * self.model.number_of_modes /
                        current_period)
        LOG.info("Calculated storage requirement for simulation: %d " %
                 int(strgreq))
        self._storage_requirement = int(strgreq)

    def run(self, **kwds):
        "Convenience method to call the simulator with **kwds and collect output data."
        ts, xs = [], []
        for _ in self.monitors:
            ts.append([])
            xs.append([])
        wall_time_start = time.time()
        for data in self(**kwds):
            for tl, xl, t_x in zip(ts, xs, data):
                if t_x is not None:
                    t, x = t_x
                    tl.append(t)
                    xl.append(x)
        elapsed_wall_time = time.time() - wall_time_start
        LOG.info("%.3f s elapsed, %.3fx real time", elapsed_wall_time,
                 elapsed_wall_time * 1e3 / self.simulation_length)
        for i in range(len(ts)):
            ts[i] = numpy.array(ts[i])
            xs[i] = numpy.array(xs[i])
        return list(zip(ts, xs))
class CorrelationCoefficient(core.Type):
    """
    Compute the node-pairwise pearson correlation coefficient of the
    given input 4D TimeSeries  datatype.
    
    Return a CrossCorrelation datatype, whose values of are between -1
    and 1, inclusive.
    
    See: http://docs.scipy.org/doc/numpy/reference/generated/numpy.corrcoef.html
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The time-series for which the cross correlation matrices are
        calculated.""")

    t_start = basic.Float(
        label=":math:`t_{start}`",
        default=0.9765625,
        required=True,
        doc=
        """Time start point (ms). By default it uses the default Monitor sample period.
        The starting time point of a time series is not zero, but the monitor's sample period. """
    )

    t_end = basic.Float(label=":math:`t_{end}`",
                        default=1000.,
                        required=True,
                        doc=""" End time point (ms) """)

    def evaluate(self):
        """
        Compute the correlation coefficients of a 2D array (tpts x nodes).
        Yields an array of size nodes x nodes x state-variables x modes.

        The time interval over which the correlation coefficients are computed 
        is defined by t_start, t_end

        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        #(nodes, nodes, state-variables, modes)
        input_shape = self.time_series.read_data_shape()
        result_shape = self.result_shape(input_shape)
        LOG.info("result shape will be: %s" % str(result_shape))

        result = numpy.zeros(result_shape)

        t_lo = int((1. / self.time_series.sample_period) *
                   (self.t_start - self.time_series.sample_period))
        t_hi = int((1. / self.time_series.sample_period) *
                   (self.t_end - self.time_series.sample_period))
        t_lo = max(t_lo, 0)
        t_hi = max(t_hi, input_shape[0])

        #One correlation coeff matrix, for each state-var & mode.
        for mode in range(result_shape[3]):
            for var in range(result_shape[2]):
                current_slice = tuple([
                    slice(t_lo, t_hi + 1),
                    slice(var, var + 1),
                    slice(input_shape[2]),
                    slice(mode, mode + 1)
                ])
                data = self.time_series.read_data_slice(
                    current_slice).squeeze()
                result[:, :, var, mode] = numpy.corrcoef(data.T)

        util.log_debug_array(LOG, result, "result")

        corr_coeff = graph.CorrelationCoefficients(source=self.time_series,
                                                   array_data=result,
                                                   use_storage=False)
        return corr_coeff

    def result_shape(self, input_shape):
        """Returns the shape of the main result of ...."""
        result_shape = (input_shape[2], input_shape[2], input_shape[1],
                        input_shape[3])
        return result_shape

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result of .
        """
        result_size = numpy.sum(map(
            numpy.prod, self.result_shape(input_shape))) * 8.0  # Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the ....
        That is, it includes storage of the evaluated ... attributes
        such as ..., etc.
        """
        extend_size = self.result_size(
            input_shape)  # Currently no derived attributes.
        return extend_size
Beispiel #15
0
class TimeSeries(types_mapped.MappedType):
    """
    Base time-series dataType.
    """

    title = basic.String

    data = arrays.FloatArray(
        label="Time-series data",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc="""An array of time-series data, with a shape of [tpts, :], where ':' represents 1 or more dimensions""")

    nr_dimensions = basic.Integer(
        label="Number of dimension in timeseries",
        default=4)

    length_1d, length_2d, length_3d, length_4d = [basic.Integer] * 4

    labels_ordering = basic.List(
        default=["Time", "State Variable", "Space", "Mode"],
        label="Dimension Names",
        doc="""List of strings representing names of each data dimension""")

    labels_dimensions = basic.Dict(
        default={},
        label="Specific labels for each dimension for the data stored in this timeseries.",
        doc=""" A dictionary containing mappings of the form {'dimension_name' : [labels for this dimension] }""")

    time = arrays.FloatArray(
        file_storage=core.FILE_STORAGE_EXPAND,
        label="Time-series time",
        required=False,
        doc="""An array of time values for the time-series, with a shape of [tpts,].
        This is 'time' as returned by the simulator's monitors.""")

    start_time = basic.Float(label="Start Time:")

    sample_period = basic.Float(label="Sample period", default=1.0)

    # Specify the measure unit for sample period (e.g sec, msec, usec, ...)
    sample_period_unit = basic.String(
        label="Sample Period Measure Unit",
        default="ms")

    sample_rate = basic.Float(
        label="Sample rate",
        doc="""The sample rate of the timeseries""")

    has_surface_mapping = basic.Bool(default=True)
    has_volume_mapping = basic.Bool(default=False)

    def configure(self):
        """
        After populating few fields, compute the rest of the fields
        """
        super(TimeSeries, self).configure()
        data_shape = self.read_data_shape()
        self.nr_dimensions = len(data_shape)
        self.sample_rate = 1.0 / self.sample_period

        for i in range(min(self.nr_dimensions, 4)):
            setattr(self, 'length_%dd' % (i + 1), int(data_shape[i]))

    def read_data_shape(self):
        """
        Expose shape read on field data.
        """
        try:
            return self.get_data_shape('data')
        except exceptions.TVBException:
            self.logger.exception("Could not read data shape for TS!")
            raise exceptions.TVBException("Invalid empty TimeSeries!")

    def read_data_slice(self, data_slice):
        """
        Expose chunked-data access.
        """
        return self.get_data('data', data_slice)

    def read_time_page(self, current_page, page_size, max_size=None):
        """
        Compute time for current page.
        :param current_page: Starting from 0
        """
        current_page = int(current_page)
        page_size = int(page_size)

        if max_size is None:
            max_size = page_size
        else:
            max_size = int(max_size)

        page_real_size = page_size * self.sample_period
        start_time = self.start_time + current_page * page_real_size
        end_time = start_time + min(page_real_size, max_size * self.sample_period)

        return numpy.arange(start_time, end_time, self.sample_period)

    def read_channels_page(self, from_idx, to_idx, step=None, specific_slices=None, channels_list=None):
        """
        Read and return only the data page for the specified channels list.

        :param from_idx: the starting time idx from which to read data
        :param to_idx: the end time idx up until to which you read data
        :param step: increments in which to read the data. Optional, default to 1.
        :param specific_slices: optional parameter. If speficied slices the data accordingly.
        :param channels_list: the list of channels for which we want data
        """
        if channels_list:
            channels_list = json.loads(channels_list)
            for i in range(len(channels_list)):
                channels_list[i] = int(channels_list[i])

        if channels_list:
            channel_slice = tuple(channels_list)
        else:
            channel_slice = slice(None)

        data_page = self.read_data_page(from_idx, to_idx, step, specific_slices)
        # This is just a 1D array like in the case of Global Average monitor.
        # No need for the channels list
        if len(data_page.shape) == 1:
            return data_page.reshape(data_page.shape[0], 1)
        else:
            return data_page[:, channel_slice]

    def read_data_page(self, from_idx, to_idx, step=None, specific_slices=None):
        """
        Retrieve one page of data (paging done based on time).
        """
        from_idx, to_idx = int(from_idx), int(to_idx)

        if isinstance(specific_slices, basestring):
            specific_slices = json.loads(specific_slices)
        if step is None:
            step = 1
        else:
            step = int(step)

        slices = []
        overall_shape = self.read_data_shape()
        for i in range(len(overall_shape)):
            if i == 0:
                # Time slice
                slices.append(
                    slice(from_idx, min(to_idx, overall_shape[0]), step))
                continue
            if i == 2:
                # Read full of the main_dimension (space for the simulator)
                slices.append(slice(overall_shape[i]))
                continue
            if specific_slices is None:
                slices.append(slice(0, 1))
            else:
                slices.append(slice(specific_slices[i], min(specific_slices[i] + 1, overall_shape[i]), 1))

        data = self.read_data_slice(tuple(slices))
        if len(data) == 1:
            # Do not allow time dimension to get squeezed, a 2D result need to
            # come out of this method.
            data = data.squeeze()
            data = data.reshape((1, len(data)))
        else:
            data = data.squeeze()

        return data

    def read_data_page_split(self, from_idx, to_idx, step=None, specific_slices=None):
        """
        No Split needed in case of basic TS (sensors and region level)
        """
        return self.read_data_page(from_idx, to_idx, step, specific_slices)


    def write_time_slice(self, partial_result):
        """
        Append a new value to the ``time`` attribute.
        """
        self.store_data_chunk("time", partial_result, grow_dimension=0, close_file=False)

    def write_data_slice(self, partial_result, grow_dimension=0):
        """
        Append a chunk of time-series data to the ``data`` attribute.
        """
        self.store_data_chunk("data", partial_result, grow_dimension=grow_dimension, close_file=False)

    def get_min_max_values(self):
        """
        Retrieve the minimum and maximum values from the metadata.
        :returns: (minimum_value, maximum_value)
        """
        metadata = self.get_metadata('data')
        return metadata[self.METADATA_ARRAY_MIN], metadata[self.METADATA_ARRAY_MAX]

    def get_space_labels(self):
        """
        It assumes that we want to select in the 3'rd dimension,
        and generates labels for each point in that dimension.
        Subclasses are more specific.
        :return: An array of strings.
        """
        if self.nr_dimensions > 2:
            return ['signal-%d' % i for i in range(self._length_3d)]
        else:
            return []

    def get_grouped_space_labels(self):
        """
        :return: A list of label groups. A label group is a tuple (name, [(label_idx, label)...]).
                 Default all labels in a group named ''
        """
        return [('', list(enumerate(self.get_space_labels())))]

    def get_default_selection(self):
        """
        :return: The measure point indices that have to be shown by default. By default show all.
        """
        return range(len(self.get_space_labels()))

    def get_measure_points_selection_gid(self):
        """
        :return: a datatype gid with which to obtain al valid measure point selection for this time series
                 We have to decide if the default should be all selections or none
        """
        return ''

    @staticmethod
    def accepted_filters():
        filters = types_mapped.MappedType.accepted_filters()
        filters.update({'datatype_class._nr_dimensions': {'type': 'int', 'display': 'No of Dimensions',
                                                          'operations': ['==', '<', '>']},
                        'datatype_class._sample_period': {'type': 'float', 'display': 'Sample Period',
                                                          'operations': ['==', '<', '>']},
                        'datatype_class._sample_rate': {'type': 'float', 'display': 'Sample Rate',
                                                        'operations': ['==', '<', '>']},
                        'datatype_class._title': {'type': 'string', 'display': 'Title',
                                                  'operations': ['==', '!=', 'like']}})
        return filters

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {"Time-series type": self.__class__.__name__,
                   "Time-series name": self.title,
                   "Dimensions": self.labels_ordering,
                   "Time units": self.sample_period_unit,
                   "Sample period": self.sample_period,
                   "Length": self.sample_period * self.get_data_shape('data')[0]}
        summary.update(self.get_info_about_array('data'))
        return summary
Beispiel #16
0
class Noise(core.Type):
    """
    Defines a base class for noise. Specific noises are derived from this class
    for use in stochastic integrations.

    .. [KloedenPlaten_1995] Kloeden and Platen, Springer 1995, *Numerical
        solution of stochastic differential equations.*

    .. [ManellaPalleschi_1989] Manella, R. and Palleschi V., *Fast and precise
        algorithm for computer simulation of stochastic differential equations*,
        Physical Review A, Vol. 40, Number 6, 1989. [3381-3385]

    .. [Mannella_2002] Mannella, R.,  *Integration of Stochastic Differential
        Equations on a Computer*, Int J. of Modern Physics C 13(9): 1177--1194,
        2002.

    .. [FoxVemuri_1988] Fox, R., Gatland, I., Rot, R. and Vemuri, G., * Fast ,
        accurate algorithm for simulation of exponentially correlated colored
        noise*, Physical Review A, Vol. 38, Number 11, 1988. [5938-5940]


    .. #Currently there seems to be a clash betwen traits and autodoc, autodoc
    .. #can't find the methods of the class, the class specific names below get
    .. #us around this...
    .. automethod:: Noise.__init__
    .. automethod:: Noise.configure_white
    .. automethod:: Noise.generate
    .. automethod:: Noise.white
    .. automethod:: Noise.coloured

    """
    _base_classes = ['Noise', 'MultiplicativeSimple']

    #NOTE: nsig is not declared here because we use this class directly as the
    #      inital conditions noise source, and in that use the job of nsig is
    #      filled by the state_variable_range attribute of the Model.

    ntau = basic.Float(label=r":math:`\tau`",
                       required=True,
                       default=0.0,
                       range=basic.Range(lo=0.0, hi=20.0, step=1.0),
                       doc="""The noise correlation time""")

    random_stream = RandomStream(
        label="Random Stream",
        required=True,
        doc="""An instance of numpy's RandomState associated with this
        specific Noise object.""")

    dt = None
    # For use if coloured
    _E = None
    _sqrt_1_E2 = None
    _eta = None
    _h = None

    def configure(self):
        """
        Run base classes configure to setup traited attributes, then ensure that
        the ``random_stream`` attribute is properly configured.

        """
        super(Noise, self).configure()
        self.random_stream.configure()

    def __str__(self):
        return simple_gen_astr(self, 'dt ntau')

    def configure_white(self, dt, shape=None):
        """Set the time step (dt) of noise or integration time"""
        self.dt = dt
        LOG.info('White noise configured with dt=%g', self.dt)

    def configure_coloured(self, dt, shape):
        r"""
        One of the simplest forms for coloured noise is exponentially correlated
        Gaussian noise [KloedenPlaten_1995]_.

        We give the initial conditions for coloured noise using the integral
        algorith for simulating exponentially correlated noise proposed by
        [FoxVemuri_1988]_

        To start the simulation, an initial value for :math:`\eta` is needed.
        It is obtained in accord with Eqs.[13-15]:

            .. math::
                m &= \text{random number}\\
                n &= \text{random number}\\
                \eta &= \sqrt{-2D\lambda\ln(m)}\,\cos(2\pi\,n)

        where :math:`D` is standard deviation of the noise amplitude and
        :math:`\lambda = \frac{1}{\tau_n}` is the inverse of the noise
        correlation time. Then we set :math:`E = \exp{-\lambda\,\delta\,t}`
        where :math:`\delta\,t` is the integration time step.

        After that the exponentially correlated, coloured noise, is obtained:

            .. math::
                a &= \text{random number}\\
                b &= \text{random number}\\
                h &= \sqrt{-2D\lambda\,(1 - E^2)\,\ln{a}}\,\cos(2\pi\,b)\\
                \eta_{t+\delta\,t} &= \eta_{t}E + h

        """
        #TODO: Probably best to change the docstring to be consistent with the
        #      below, ie, factoring out the explicit Box-Muller.
        #NOTE: The actual implementation factors out the explicit Box-Muller,
        #      using numpy's normal() instead.
        self.dt = dt
        self._E = numpy.exp(-self.dt / self.ntau)
        self._sqrt_1_E2 = numpy.sqrt((1.0 - self._E**2))
        self._eta = self.random_stream.normal(size=shape)
        self._dt_sqrt_lambda = self.dt * numpy.sqrt(1.0 / self.ntau)
        LOG.info(
            'Colored noise configured with dt=%g E=%g sqrt_1_E2=%g eta=%g & dt_sqrt_lambda=%g',
            self.dt, self._E, self._sqrt_1_E2, self._eta, self._dt_sqrt_lambda)

    def generate(self, shape, lo=-1.0, hi=1.0):
        "Generate noise realization."
        if self.ntau > 0.0:
            noise = self.coloured(shape)
        else:
            noise = self.white(shape)
        return noise

    def coloured(self, shape):
        "Generate colored noise. [FoxVemuri_1988]_"
        self._h = self._sqrt_1_E2 * self.random_stream.normal(size=shape)
        self._eta = self._eta * self._E + self._h
        return self._dt_sqrt_lambda * self._eta

    def white(self, shape):
        "Generate white noise."
        noise = numpy.sqrt(self.dt) * self.random_stream.normal(size=shape)
        return noise
Beispiel #17
0
class FourierSpectrum(arrays.MappedArray):
    """
    Result of a Fourier  Analysis.
    """
    #Overwrite attribute from superclass
    array_data = arrays.ComplexArray(file_storage=core.FILE_STORAGE_EXPAND)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which the FFT is applied.")

    segment_length = basic.Float(
        label="Segment length",
        doc="""The timeseries was segmented into equally sized blocks
            (overlapping if necessary), prior to the application of the FFT.
            The segement length determines the frequency resolution of the
            resulting spectra.""")

    windowing_function = basic.String(
        label="Windowing function",
        doc="""The windowing function applied to each time segment prior to
            application of the FFT.""")

    amplitude = arrays.FloatArray(label="Amplitude",
                                  file_storage=core.FILE_STORAGE_EXPAND)

    phase = arrays.FloatArray(label="Phase",
                              file_storage=core.FILE_STORAGE_EXPAND)

    power = arrays.FloatArray(label="Power",
                              file_storage=core.FILE_STORAGE_EXPAND)

    average_power = arrays.FloatArray(label="Average Power",
                                      file_storage=core.FILE_STORAGE_EXPAND)

    normalised_average_power = arrays.FloatArray(
        label="Normalised Power", file_storage=core.FILE_STORAGE_EXPAND)

    _frequency = None
    _freq_step = None
    _max_freq = None

    __generate_table__ = True

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.nr_dimensions = len(self.read_data_shape())
        for i in range(self.nr_dimensions):
            setattr(self, 'length_%dd' % (i + 1),
                    int(self.read_data_shape()[i]))

        if self.trait.use_storage is False and sum(
                self.get_data_shape('array_data')) != 0:
            if self.amplitude.size == 0:
                self.compute_amplitude()
            if self.phase.size == 0:
                self.compute_phase()
            if self.power.size == 0:
                self.compute_power()
            if self.average_power.size == 0:
                self.compute_average_power()
            if self.normalised_average_power.size == 0:
                self.compute_normalised_average_power()

    def write_data_slice(self, partial_result):
        """
        Append chunk.
        """
        # self.store_data_chunk('array_data', partial_result, grow_dimension=2, close_file=False)

        self.store_data_chunk('array_data',
                              partial_result.array_data,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_amplitude()
        self.store_data_chunk('amplitude',
                              partial_result.amplitude,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_phase()
        self.store_data_chunk('phase',
                              partial_result.phase,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_power()
        self.store_data_chunk('power',
                              partial_result.power,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_average_power()
        self.store_data_chunk('average_power',
                              partial_result.average_power,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_normalised_average_power()
        self.store_data_chunk('normalised_average_power',
                              partial_result.normalised_average_power,
                              grow_dimension=2,
                              close_file=False)

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "Spectral type": self.__class__.__name__,
            "Source": self.source.title,
            "Segment length": self.segment_length,
            "Windowing function": self.windowing_function,
            "Frequency step": self.freq_step,
            "Maximum frequency": self.max_freq
        }
        return summary

    @property
    def freq_step(self):
        """ Frequency step size of the complex Fourier spectrum."""
        if self._freq_step is None:
            self._freq_step = 1.0 / self.segment_length
            msg = "%s: Frequency step size is %s"
            LOG.debug(msg % (str(self), str(self._freq_step)))
        return self._freq_step

    @property
    def max_freq(self):
        """ Amplitude of the complex Fourier spectrum."""
        if self._max_freq is None:
            self._max_freq = 0.5 / self.source.sample_period
            msg = "%s: Max frequency is %s"
            LOG.debug(msg % (str(self), str(self._max_freq)))
        return self._max_freq

    @property
    def frequency(self):
        """ Frequencies represented the complex Fourier spectrum."""
        if self._frequency is None:
            self._frequency = numpy.arange(self.freq_step,
                                           self.max_freq + self.freq_step,
                                           self.freq_step)
            util.log_debug_array(LOG, self._frequency, "frequency")
        return self._frequency

    def compute_amplitude(self):
        """ Amplitude of the complex Fourier spectrum."""
        self.amplitude = numpy.abs(self.array_data)
        self.trait["amplitude"].log_debug(owner=self.__class__.__name__)

    def compute_phase(self):
        """ Phase of the Fourier spectrum."""
        self.phase = numpy.angle(self.array_data)
        self.trait["phase"].log_debug(owner=self.__class__.__name__)

    def compute_power(self):
        """ Power of the complex Fourier spectrum."""
        self.power = numpy.abs(self.array_data)**2
        self.trait["power"].log_debug(owner=self.__class__.__name__)

    def compute_average_power(self):
        """ Average-power of the complex Fourier spectrum."""
        self.average_power = numpy.mean(numpy.abs(self.array_data)**2, axis=-1)
        self.trait["average_power"].log_debug(owner=self.__class__.__name__)

    def compute_normalised_average_power(self):
        """ Normalised-average-power of the complex Fourier spectrum."""
        self.normalised_average_power = (self.average_power /
                                         numpy.sum(self.average_power, axis=0))
        self.trait["normalised_average_power"].log_debug(
            owner=self.__class__.__name__)
Beispiel #18
0
class Integrator(core.Type):
    """
    The Integrator class is a base class for the integration methods...

    .. [1] Kloeden and Platen, Springer 1995, *Numerical solution of stochastic
        differential equations.*

    .. [2] Riccardo Mannella, *Integration of Stochastic Differential Equations
        on a Computer*, Int J. of Modern Physics C 13(9): 1177--1194, 2002.

    .. [3] R. Mannella and V. Palleschi, *Fast and precise algorithm for 
        computer simulation of stochastic differential equations*, Phys. Rev. A
        40: 3381, 1989.

    .. #Currently there seems to be a clash betwen traits and autodoc, autodoc
    .. #can't find the methods of the class, the class specific names below get
    .. #us around this...
    .. automethod:: Integrator.__init__
    .. automethod:: Integrator.scheme

    """
    _base_classes = ['Integrator', 'IntegratorStochastic', 'RungeKutta4thOrderDeterministic']

    dt = basic.Float(
        label = "Integration-step size (ms)", 
        default =  0.01220703125, #0.015625,
        #range = basic.Range(lo= 0.0048828125, hi=0.244140625, step= 0.1, base=2.)
        required = True,
        doc = """The step size used by the integration routine in ms. This
        should be chosen to be small enough for the integration to be
        numerically stable. It is also necessary to consider the desired sample
        period of the Monitors, as they are restricted to being integral
        multiples of this value. The default value is set such that all built-in
        models are numerically stable with there default parameters and because
        it is consitent with Monitors using sample periods corresponding to
        powers of 2 from 128 to 4096Hz.""")


    def __init__(self, **kwargs):
        """Integrators are intialized using their integration step, dt."""
        super(Integrator, self).__init__(**kwargs) 
        LOG.debug(str(kwargs))


    def __repr__(self):
        """A formal, executable, representation of a Model object."""
        class_name = self.__class__.__name__ 
        traited_kwargs = self.trait.keys()
        formal = class_name + "(" + "=%s, ".join(traited_kwargs) + "=%s)"
        return formal % eval("(self." + ", self.".join(traited_kwargs) + ")")


    def __str__(self):
        """An informal, human readable, representation of a Model object."""
        class_name = self.__class__.__name__ 
        traited_kwargs = self.trait.keys()
        informal = class_name + "(" + ", ".join(traited_kwargs) + ")"
        return informal


    def scheme(self, X, dfun, coupling, local_coupling, stimulus):
        """
        The scheme of integrator should take a state and provide the next
        state in time, e.g. for a differential equation, scheme should take
        :math:`X` and provide an appropriate :math:`X + dX` (dfun in the code).

        """

        pass
Beispiel #19
0
class Simulator(core.Type):
    """
    The Simulator class coordinates classes from all other modules in the
    simulator package in order to perform simulations. 

    In general, it is necessary to initialiaze a simulator with the desired
    components and then call the simulator in a loop to obtain simulation
    data:
    
    >>> sim = Simulator(...)
    >>> for output in sim(simulation_length=1000):
            ...
    
    Please refer to the user guide and the demos for more detail.


    .. #Currently there seems to be a clash betwen traits and autodoc, autodoc
    .. #can't find the methods of the class, the class specific names below get
    .. #us around this...
    .. automethod:: Simulator.__init__
    .. automethod:: Simulator.configure
    .. automethod:: Simulator.__call__
    .. automethod:: Simulator.configure_history
    .. automethod:: Simulator.configure_integrator_noise
    .. automethod:: Simulator.memory_requirement
    .. automethod:: Simulator.runtime
    .. automethod:: Simulator.storage_requirement


    """

    connectivity = connectivity_dtype.Connectivity(
        label="Long-range connectivity",
        default=None,
        order=1,
        required=True,
        filters_ui=[
            UIFilter(linked_elem_name="projection_matrix_data",
                     linked_elem_field=FilterChain.datatype + "._sources",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="EEG"),
            UIFilter(linked_elem_name="region_mapping_data",
                     linked_elem_field=FilterChain.datatype + "._connectivity",
                     linked_elem_parent_name="surface",
                     linked_elem_parent_option=None)
        ],
        doc="""A tvb.datatypes.Connectivity object which contains the
        structural long-range connectivity data (i.e., white-matter tracts). In
        combination with the ``Long-range coupling function`` it defines the inter-regional
        connections. These couplings undergo a time delay via signal propagation 
        with a propagation speed of ``Conduction Speed``""")

    conduction_speed = basic.Float(
        label="Conduction Speed",
        default=3.0,
        order=2,
        required=False,
        range=basic.Range(lo=0.01, hi=100.0, step=1.0),
        doc="""Conduction speed for ``Long-range connectivity`` (mm/ms)""")

    coupling = coupling_module.Coupling(
        label="Long-range coupling function",
        default=coupling_module.Linear(),
        required=True,
        order=2,
        doc="""The coupling function is applied to the activity propagated
        between regions by the ``Long-range connectivity`` before it enters the local
        dynamic equations of the Model. Its primary purpose is to 'rescale' the
        incoming activity to a level appropriate to Model.""")

    surface = Cortex(
        label="Cortical surface",
        default=None,
        order=3,
        required=False,
        filters_backend=FilterChain(
            fields=[FilterChain.datatype + '._valid_for_simulations'],
            operations=["=="],
            values=[True]),
        filters_ui=[
            UIFilter(linked_elem_name="projection_matrix_data",
                     linked_elem_field=FilterChain.datatype + "._sources",
                     linked_elem_parent_name="monitors",
                     linked_elem_parent_option="EEG"),
            UIFilter(linked_elem_name="local_connectivity",
                     linked_elem_field=FilterChain.datatype + "._surface",
                     linked_elem_parent_name="surface",
                     linked_elem_parent_option=None)
        ],
        doc="""By default, a tvb.datatypes.Cortex object which represents the
        cortical surface defined by points in the 3D physical space and their 
        neighborhood relationship. In the current TVB version, when setting up a 
        surface-based simulation, the option to configure the spatial spread of 
        the ``Local Connectivity`` is available.""")

    stimulus = patterns_dtype.SpatioTemporalPattern(
        label="Spatiotemporal stimulus",
        default=None,
        order=4,
        required=False,
        doc=
        """A ``Spatiotemporal stimulus`` can be defined at the region or surface level.
        It's composed of spatial and temporal components. For region defined stimuli
        the spatial component is just the strength with which the temporal
        component is applied to each region. For surface defined stimuli,  a
        (spatial) function, with finite-support, is used to define the strength 
        of the stimuli on the surface centred around one or more focal points. 
        In the current version of TVB, stimuli are applied to the first state 
        variable of the ``Local dynamic model``.""")

    model = models_module.Model(
        label="Local dynamic model",
        default=models_module.Generic2dOscillator,
        required=True,
        order=5,
        doc="""A tvb.simulator.Model object which describe the local dynamic
        equations, their parameters, and, to some extent, where connectivity
        (local and long-range) enters and which state-variables the Monitors
        monitor. By default the 'Generic2dOscillator' model is used. Read the 
        Scientific documentation to learn more about this model.""")

    integrator = integrators_module.Integrator(
        label="Integration scheme",
        default=integrators_module.HeunDeterministic,
        required=True,
        order=6,
        doc="""A tvb.simulator.Integrator object which is
            an integration scheme with supporting attributes such as 
            integration step size and noise specification for stochastic 
            methods. It is used to compute the time courses of the model state 
            variables.""")

    initial_conditions = arrays_dtype.FloatArray(
        label="Initial Conditions",
        default=None,
        order=-1,
        required=False,
        doc="""Initial conditions from which the simulation will begin. By
        default, random initial conditions are provided. Needs to be the same shape
        as simulator 'history', ie, initial history function which defines the 
        minimal initial state of the network with time delays before time t=0. 
        If the number of time points in the provided array is insufficient the 
        array will be padded with random values based on the 'state_variables_range'
        attribute.""")

    monitors = monitors_module.Monitor(
        label="Monitor(s)",
        default=monitors_module.TemporalAverage,
        required=True,
        order=8,
        select_multiple=True,
        doc="""A tvb.simulator.Monitor or a list of tvb.simulator.Monitor
        objects that 'know' how to record relevant data from the simulation. Two
        main types exist: 1) simple, spatial and temporal, reductions (subsets
        or averages); 2) physiological measurements, such as EEG, MEG and fMRI.
        By default the Model's specified variables_of_interest are returned,
        temporally downsampled from the raw integration rate to a sample rate of
        1024Hz.""")

    simulation_length = basic.Float(
        label="Simulation Length (ms)",
        default=1000.0,  # ie 1 second
        required=True,
        order=9,
        doc="""The length of a simulation in milliseconds (ms).""")

    def __init__(self, **kwargs):
        """
        Use the base class' mechanisms to initialise the traited attributes 
        declared above, overriding defaults with any provided keywords. Then
        declare any non-traited attributes.

        """
        super(Simulator, self).__init__(**kwargs)
        LOG.debug(str(kwargs))

        self.calls = 0
        self.current_step = 0

        self.number_of_nodes = None
        self.horizon = None
        self.good_history_shape = None
        self.history = None
        self._memory_requirement_guess = None
        self._memory_requirement_census = None
        self._storage_requirement = None
        self._runtime = None

    def __str__(self):
        return "Simulator(**kwargs)"

    def preconfigure(self):
        """
        Configure just the basic fields, so that memory can be estimated
        """
        self.connectivity.configure()

        if self.surface:
            self.surface.configure()

        if self.stimulus:
            self.stimulus.configure()

        self.coupling.configure()
        self.model.configure()
        self.integrator.configure()

        # monitors needs to be a list or tuple, even if there is only one...
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]

        # Configure monitors
        for monitor in self.monitors:
            monitor.configure()

        ##------------- Now the the interdependant configuration -------------##

        #"Nodes" refers to either regions or vertices + non-cortical regions.
        if self.surface is None:
            self.number_of_nodes = self.connectivity.number_of_regions
        else:
            #try:
            self.number_of_nodes = self.surface.region_mapping.shape[0]
            #except AttributeError:
            #    msg = "%s: Surface needs region mapping defined... "
            #    LOG.error(msg % (repr(self)))

        # Estimate of memory usage
        self._guesstimate_memory_requirement()

    def configure(self, full_configure=True):
        """
        The first step of configuration is to run the configure methods of all
        the Simulator's components, ie its traited attributes.

        Configuration of a Simulator primarily consists of calculating the
        attributes, etc, which depend on the combinations of the Simulator's
        traited attributes (keyword args).

        Converts delays from physical time units into integration steps
        and updates attributes that depend on combinations of the 6 inputs.
        """
        if full_configure:
            # When run from GUI, preconfigure is run separately, and we want to avoid running that part twice
            self.preconfigure()

        #Make sure spatialised model parameters have the right shape (number_of_nodes, 1)
        excluded_params = ("state_variable_range", "variables_of_interest",
                           "noise", "psi_table", "nerf_table")

        for param in self.model.trait.keys():
            if param in excluded_params:
                continue
            #If it's a surface sim and model parameters were provided at the region level
            region_parameters = getattr(self.model, param)
            if self.surface is not None:
                if region_parameters.size == self.connectivity.number_of_regions:
                    new_parameters = region_parameters[
                        self.surface.region_mapping].reshape((-1, 1))
                    setattr(self.model, param, new_parameters)
            region_parameters = getattr(self.model, param)
            if region_parameters.size == self.number_of_nodes:
                new_parameters = region_parameters.reshape((-1, 1))
                setattr(self.model, param, new_parameters)

        #Configure spatial component of any stimuli
        self.configure_stimuli()

        #Set delays, provided in physical units, in integration steps.
        self.connectivity.set_idelays(self.integrator.dt)

        self.horizon = numpy.max(self.connectivity.idelays) + 1
        LOG.info("horizon is %d steps" % self.horizon)

        # workspace -- minimal state of network with delays
        self.good_history_shape = (self.horizon, self.model.nvar,
                                   self.number_of_nodes,
                                   self.model.number_of_modes)
        msg = "%s: History shape will be: %s"
        LOG.debug(msg % (repr(self), str(self.good_history_shape)))

        #Reshape integrator.noise.nsig, if necessary.
        if isinstance(self.integrator,
                      integrators_module.IntegratorStochastic):
            self.configure_integrator_noise()

        self.configure_history(self.initial_conditions)

        #Configure Monitors to work with selected Model, etc...
        self.configure_monitors()

        #Estimate of memory usage.
        self._census_memory_requirement()

    def __call__(self, simulation_length=None, random_state=None):
        """
        When a Simulator is called it returns an iterator.

        kwargs:

        ``simulation_length``:
           total time of simulation

        ``random_state``: 
           a state for the NumPy random number generator, saved from a previous 
           call to permit consistent continuation of a simulation.

        """
        #The number of times this Simulator has been called.
        self.calls += 1

        #Update the simulator objects simulation_length attribute,
        if simulation_length is None:
            simulation_length = self.simulation_length
        else:
            self.simulation_length = simulation_length

        #Estimate run time and storage requirements, with logging.
        self._guesstimate_runtime()
        self._calculate_storage_requirement()

        if random_state is not None:
            if isinstance(self.integrator,
                          integrators_module.IntegratorStochastic):
                self.integrator.noise.random_stream.set_state(random_state)
                msg = "%s: random_state supplied. Seed is: %s"
                LOG.info(
                    msg %
                    (str(self),
                     str(self.integrator.noise.random_stream.get_state()[1][0])
                     ))
            else:
                msg = "%s: random_state supplied for non-stochastic integration"
                LOG.warn(msg % str(self))

        #Determine the number of integration steps required to produce
        #data of simulation_length
        int_steps = int(simulation_length / self.integrator.dt)
        LOG.info("%s: gonna do %d integration steps" % (str(self), int_steps))

        # locals for cleaner code.
        horizon = self.horizon
        history = self.history
        dfun = self.model.dfun
        coupling = self.coupling
        scheme = self.integrator.scheme
        npsum = numpy.sum
        npdot = numpy.dot
        ncvar = len(self.model.cvar)
        number_of_regions = self.connectivity.number_of_regions
        nsn = (number_of_regions, 1, number_of_regions)

        # Exact dtypes and alignment are required by c speedups. Once we have history objects these will be encapsulated
        # cvar index array broadcastable to nodes, cvars, nodes
        cvar = numpy.array(self.model.cvar[numpy.newaxis, :, numpy.newaxis],
                           dtype=numpy.intc)
        LOG.debug("%s: cvar is: %s" % (str(self), str(cvar)))
        # idelays array broadcastable to nodes, cvars, nodes
        idelays = numpy.array(self.connectivity.idelays[:, numpy.newaxis, :],
                              dtype=numpy.intc,
                              order='c')
        LOG.debug("%s: idelays shape is: %s" % (str(self), str(idelays.shape)))
        # weights array broadcastable to nodes, cva, nodes, modes
        weights = self.connectivity.weights[:, numpy.newaxis, :, numpy.newaxis]
        LOG.debug("%s: weights shape is: %s" % (str(self), str(weights.shape)))
        # node_ids broadcastable to nodes, cvars, nodes
        node_ids = numpy.array(
            numpy.arange(number_of_regions)[numpy.newaxis, numpy.newaxis, :],
            dtype=numpy.intc)
        LOG.debug("%s: node_ids shape is: %s" %
                  (str(self), str(node_ids.shape)))

        if self.surface is None:
            local_coupling = 0.0
        else:
            region_average = self.surface.region_average
            region_history = npdot(
                region_average, history
            )  # this may be very expensive ~60sec for epileptor (many states and modes ...)
            region_history = region_history.transpose((1, 2, 0, 3))
            region_history = numpy.ascontiguousarray(
                region_history)  # required by the c speedups
            if self.surface.coupling_strength.size == 1:
                local_coupling = (self.surface.coupling_strength[0] *
                                  self.surface.local_connectivity.matrix)
            elif self.surface.coupling_strength.size == self.surface.number_of_vertices:
                ind = numpy.arange(self.number_of_nodes, dtype=int)
                vec_cs = numpy.zeros((self.number_of_nodes, ))
                vec_cs[:self.surface.
                       number_of_vertices] = self.surface.coupling_strength
                sp_cs = sparse.csc_matrix(
                    (vec_cs, (ind, ind)),
                    shape=(self.number_of_nodes, self.number_of_nodes))
                local_coupling = sp_cs * self.surface.local_connectivity.matrix

        if self.stimulus is None:
            stimulus = 0.0
        else:  # TODO: Consider changing to simulator absolute time... This is an open discussion, a matter of interpretation of the stimuli time axis.
            time = numpy.arange(0, simulation_length, self.integrator.dt)
            time = time[numpy.newaxis, :]
            self.stimulus.configure_time(time)
            stimulus = numpy.zeros((self.model.nvar, self.number_of_nodes, 1))
            LOG.debug("%s: stimulus shape is: %s" %
                      (str(self), str(stimulus.shape)))

        # initial state, history[timepoint[0], state_variables, nodes, modes]
        state = history[self.current_step % horizon, :]
        LOG.debug("%s: state shape is: %s" % (str(self), str(state.shape)))

        if self.surface is not None:
            # the vertex mapping array is huge but sparse.
            # csr because I expect the row to have one value and I expect the dot to proceed row wise.
            vertex_mapping = sparse.csr_matrix(self.surface.vertex_mapping)
            # this is big a well. same shape as the vertex mapping.
            region_average = sparse.csr_matrix(region_average)

            node_coupling_shape = (vertex_mapping.shape[0], ncvar,
                                   self.model.number_of_modes)

        delayed_state = numpy.zeros(
            (number_of_regions, ncvar, number_of_regions,
             self.model.number_of_modes))

        for step in xrange(self.current_step + 1,
                           self.current_step + int_steps + 1):
            time_indices = (step - 1 - idelays) % horizon
            if self.surface is None:
                get_state(history,
                          time_indices,
                          cvar,
                          node_ids,
                          out=delayed_state)
                node_coupling = coupling(weights, state[self.model.cvar],
                                         delayed_state)
            else:
                get_state(region_history,
                          time_indices,
                          cvar,
                          node_ids,
                          out=delayed_state)
                region_coupling = coupling(
                    weights, region_history[(step - 1) % horizon,
                                            self.model.cvar], delayed_state)
                node_coupling = numpy.empty(node_coupling_shape)

                # sparse matrices cannot multiply with 3d arrays so we use a loop over the modes
                for mi in xrange(self.model.number_of_modes):
                    node_coupling[...,
                                  mi] = vertex_mapping * region_coupling[...,
                                                                         mi].T

                node_coupling = node_coupling.transpose((1, 0, 2))

            if self.stimulus is not None:
                stimulus[self.model.cvar, :, :] = numpy.reshape(
                    self.stimulus(step - (self.current_step + 1)), (1, -1, 1))

            state = scheme(state, dfun, node_coupling, local_coupling,
                           stimulus)
            history[step % horizon, :] = state

            if self.surface is not None:
                # this optimisation is similar to the one done for vertex_mapping above
                step_avg = numpy.empty((number_of_regions, state.shape[0],
                                        self.model.number_of_modes))
                for mi in xrange(self.model.number_of_modes):
                    step_avg[..., mi] = region_average.dot(state[..., mi].T)

                region_history[step % horizon, :] = step_avg.transpose(
                    (1, 0, 2))

            # monitor.things e.g. raw, average, eeg, meg, fmri...
            output = [monitor.record(step, state) for monitor in self.monitors]
            if any(outputi is not None for outputi in output):
                yield output

        # This -1 is here for not repeating the point on resume
        self.current_step = self.current_step + int_steps - 1
        self.history = history

    def configure_history(self, initial_conditions=None):
        """
        Set initial conditions for the simulation using either the provided 
        initial_conditions or, if none are provided, the model's initial() 
        method. This method is called durin the Simulator's __init__(). 

        Any initial_conditions that are provided as an argument are expected 
        to have dimensions 1, 2, and 3 with shapse corresponding to the number
        of state_variables, nodes and modes, respectively. If the provided 
        inital_conditions are shorter in time (dim=0) than the required history
        the model's initial() method is called to make up the difference.

        """

        history = self.history
        if initial_conditions is None:
            msg = "%s: Setting default history using model's initial() method."
            LOG.info(msg % str(self))
            history = self.model.initial(self.integrator.dt,
                                         self.good_history_shape)
        else:
            # history should be [timepoints, state_variables, nodes, modes]
            LOG.info("%s: Received initial conditions as arg." % str(self))
            ic_shape = initial_conditions.shape
            if ic_shape[1:] != self.good_history_shape[1:]:
                msg = "%s: bad initial_conditions[1:] shape %s, should be %s"
                msg %= self, ic_shape[1:], self.good_history_shape[1:]
                raise ValueError(msg)
            else:
                if ic_shape[0] >= self.horizon:
                    msg = "%s: Using last %s time-steps for history."
                    LOG.info(msg % (str(self), self.horizon))
                    history = initial_conditions[
                        -self.horizon:, :, :, :].copy()
                else:
                    msg = "%s: initial_conditions shorter than required."
                    LOG.info(msg % str(self))
                    msg = "%s: Using model's initial() method for difference."
                    LOG.info(msg % str(self))
                    history = self.model.initial(self.integrator.dt,
                                                 self.good_history_shape)
                    csmh = self.current_step % self.horizon
                    history = numpy.roll(history, -csmh, axis=0)
                    history[:ic_shape[0], :, :, :] = initial_conditions
                    history = numpy.roll(history, csmh, axis=0)
                self.current_step += ic_shape[0] - 1
            msg = "%s: history shape is: %s"
            LOG.debug(msg % (str(self), str(history.shape)))
        self.history = history

    def configure_integrator_noise(self):
        """
        This enables having noise to be state variable specific and/or to enter 
        only via specific brain structures, for example it we only want to 
        consider noise as an external input entering the brain via appropriate
        thalamic nuclei.

        Support 3 possible shapes:
            1) number_of_nodes;

            2) number_of_state_variables; and 

            3) (number_of_state_variables, number_of_nodes).

        """

        noise = self.integrator.noise

        if self.integrator.noise.ntau > 0.0:
            self.integrator.noise.configure_coloured(
                self.integrator.dt, self.good_history_shape[1:])
        else:
            self.integrator.noise.configure_white(self.integrator.dt,
                                                  self.good_history_shape[1:])

        if self.surface is not None:
            if self.integrator.noise.nsig.size == self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[
                    self.surface.region_mapping]
            elif self.integrator.noise.nsig.size == self.model.nvar * self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[:,
                                                                        self.
                                                                        surface
                                                                        .
                                                                        region_mapping]

        good_nsig_shape = (self.model.nvar, self.number_of_nodes,
                           self.model.number_of_modes)
        nsig = self.integrator.noise.nsig
        LOG.debug("Simulator.integrator.noise.nsig shape: %s" %
                  str(nsig.shape))
        if nsig.shape in (good_nsig_shape, (1, )):
            return
        elif nsig.shape == (self.model.nvar, ):
            nsig = nsig.reshape((self.model.nvar, 1, 1))
        elif nsig.shape == (self.number_of_nodes, ):
            nsig = nsig.reshape((1, self.number_of_nodes, 1))
        elif nsig.shape == (self.model.nvar, self.number_of_nodes):
            nsig = nsig.reshape((self.model.nvar, self.number_of_nodes, 1))
        else:
            msg = "Bad Simulator.integrator.noise.nsig shape: %s"
            LOG.error(msg % str(nsig.shape))

        LOG.debug("Simulator.integrator.noise.nsig shape: %s" %
                  str(nsig.shape))
        self.integrator.noise.nsig = nsig

    def configure_monitors(self):
        """ Configure the requested Monitors for this Simulator """
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]

        # Configure monitors
        for monitor in self.monitors:
            monitor.config_for_sim(self)

    def configure_stimuli(self):
        """ Configure the defined Stimuli for this Simulator """
        if self.stimulus is not None:
            if self.surface:
                self.stimulus.configure_space(self.surface.region_mapping)
            else:
                self.stimulus.configure_space()

    def memory_requirement(self):
        """
        Return an estimated of the memory requirements (Bytes) for this
        simulator's current configuration.
        """
        self._guesstimate_memory_requirement()
        return self._memory_requirement_guess

    def runtime(self, simulation_length):
        """
        Return an estimated run time (seconds) for the simulator's current 
        configuration and a specified simulation length.

        """
        self.simulation_length = simulation_length
        self._guesstimate_runtime()
        return self._runtime

    def storage_requirement(self, simulation_length):
        """
        Return an estimated storage requirement (Bytes) for the simulator's
        current configuration and a specified simulation length.

        """
        self.simulation_length = simulation_length
        self._calculate_storage_requirement()
        return self._storage_requirement

    def _guesstimate_memory_requirement(self):
        """
        guesstimate the memory required for this simulator.

        Guesstimate is based on the shape of the dominant arrays, and as such 
        can operate before configuration.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant 
            memory pigs...

        """
        if self.surface:
            number_of_nodes = self.surface.number_of_vertices
        else:
            number_of_nodes = self.connectivity.number_of_regions

        number_of_regions = self.connectivity.number_of_regions

        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        bits_64 = 8.0  # Bytes
        bits_32 = 4.0  # Bytes
        #NOTE: The speed hack for getting the first element of hist shape should
        #      partially resolves calling of this method with a non-configured
        #     connectivity, there remains the less common issue if no tract_lengths...
        hist_shape = (
            self.connectivity.tract_lengths.max() /
            (self.conduction_speed or self.connectivity.speed or 3.0) /
            self.integrator.dt, self.model.nvar, number_of_nodes,
            self.model.number_of_modes)
        memreq = numpy.prod(hist_shape) * bits_64
        if self.surface:
            memreq += self.surface.number_of_triangles * 3 * bits_32 * 2  # normals
            memreq += self.surface.number_of_vertices * 3 * bits_64 * 2  # normals
            memreq += number_of_nodes * number_of_regions * bits_64 * 4  # vertex_mapping, region_average, region_sum
            #???memreq += self.surface.local_connectivity.matrix.nnz * 8

        if not isinstance(self.monitors, (list, tuple)):
            monitors = [self.monitors]
        else:
            monitors = self.monitors
        for monitor in monitors:
            if not isinstance(monitor, monitors_module.Bold):
                stock_shape = (monitor.period / self.integrator.dt,
                               self.model.variables_of_interest.shape[0],
                               number_of_nodes, self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                if hasattr(monitor, "sensors"):
                    try:
                        memreq += number_of_nodes * monitor.sensors.number_of_sensors * bits_64  # projection_matrix
                    except AttributeError:
                        LOG.debug(
                            "No sensors specified, guessing memory based on default EEG."
                        )
                        memreq += number_of_nodes * 62.0 * bits_64

            else:
                stock_shape = (monitor.hrf_length * monitor._stock_sample_rate,
                               self.model.variables_of_interest.shape[0],
                               number_of_nodes, self.model.number_of_modes)
                interim_stock_shape = (
                    1.0 / (2.0**-2 * self.integrator.dt),
                    self.model.variables_of_interest.shape[0], number_of_nodes,
                    self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                memreq += numpy.prod(interim_stock_shape) * bits_64

        if psutil and memreq > psutil.virtual_memory().total:
            LOG.error("This is gonna get ugly...")

        self._memory_requirement_guess = magic_number * memreq
        msg = "Memory requirement guesstimate: simulation will need about %.1f MB"
        LOG.info(msg % (self._memory_requirement_guess / 1048576.0))

    def _census_memory_requirement(self):
        """
        Guesstimate the memory required for this simulator. 

        Guesstimate is based on a census of the dominant arrays after the
        simulator has been configured.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant 
            memory pigs...

        """
        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        memreq = self.history.nbytes
        try:
            memreq += self.surface.triangles.nbytes * 2
            memreq += self.surface.vertices.nbytes * 2
            memreq += self.surface.vertex_mapping.nbytes * 4  # vertex_mapping, region_average, region_sum
            memreq += self.surface.eeg_projection.nbytes
            memreq += self.surface.local_connectivity.matrix.nnz * 8
        except AttributeError:
            pass

        for monitor in self.monitors:
            memreq += monitor._stock.nbytes
            if isinstance(monitor, monitors_module.Bold):
                memreq += monitor._interim_stock.nbytes

        if psutil and memreq > psutil.virtual_memory().total:
            LOG.error("This is gonna get ugly...")

        self._memory_requirement_census = magic_number * memreq
        #import pdb; pdb.set_trace()
        msg = "Memory requirement census: simulation will need about %.1f MB"
        LOG.info(msg % (self._memory_requirement_census / 1048576.0))

    def _guesstimate_runtime(self):
        """
        Estimate the runtime for this simulator.

        Spread in parallel executions of larger arrays means this will be an over-estimation,
        or rather a single threaded estimation...
        Different choice of integrators and monitors has an additional effect,
        on the magic number though relatively minor

        """
        magic_number = 6.57e-06  # seconds
        self._runtime = (magic_number * self.number_of_nodes *
                         self.model.nvar * self.model.number_of_modes *
                         self.simulation_length / self.integrator.dt)
        msg = "Simulation single-threaded runtime should be about %s seconds!"
        LOG.info(msg % str(int(self._runtime)))

    def _calculate_storage_requirement(self):
        """
        Calculate the storage requirement for the simulator, configured with
        models, monitors, etc being run for a particular simulation length. 
        While this is only approximate, it is far more reliable/accurate than
        the memory and runtime guesstimates.
        """
        LOG.info("Calculating storage requirement for ...")
        strgreq = 0
        for monitor in self.monitors:
            # Avoid division by zero for monitor not yet configured
            # (in framework this is executed, when only preconfigure has been called):
            current_period = monitor.period or self.integrator.dt
            strgreq += (TvbProfile.current.MAGIC_NUMBER *
                        self.simulation_length * self.number_of_nodes *
                        self.model.nvar * self.model.number_of_modes /
                        current_period)
        LOG.info("Calculated storage requirement for simulation: %d " %
                 int(strgreq))
        self._storage_requirement = int(strgreq)
Beispiel #20
0
class Monitor(core.Type):
    """
    Abstract base class for monitor implementations.

    """

    # list of class names not shown in UI
    _base_classes = ['Monitor', 'Projection', 'ProgressLogger']

    period = basic.Float(
        label = "Sampling period (ms)", order=10,
        default = 0.9765625, #ms. 0.9765625 => 1024Hz #ms, 0.5 => 2000Hz
        doc = """Sampling period in milliseconds, must be an integral multiple
        of integration-step size. As a guide: 2048 Hz => 0.48828125 ms ;  
        1024 Hz => 0.9765625 ms ; 512 Hz => 1.953125 ms.""")

    variables_of_interest = arrays.IntegerArray(
        label = "Model variables to watch", order=11,
        doc = ("Indices of model's variables of interest (VOI) that this monitor should record. "
               "Note that the indices should start at zero, so that if a model offers VOIs V, W and "
               "V+W, and W is selected, and this monitor should record W, then the correct index is 0.")
        #order = -1
    )

    istep = None
    dt = None
    voi = None
    _stock = numpy.empty([])

    def __str__(self):
        clsname = self.__class__.__name__
        return '%s(period=%f, voi=%s)' % (clsname, self.period, self.variables_of_interest.tolist())

    def config_for_sim(self, simulator):
        """Configure monitor for given simulator.

        Grab the Simulator's integration step size. Set the monitor's variables
        of interest based on the Monitor's 'variables_of_interest' attribute, if
        it was specified, otherwise use the 'variables_of_interest' specified 
        for the Model. Calculate the number of integration steps (isteps)
        between returns by the record method. This method is called from within
        the the Simulator's configure() method.

        """
        self.dt = simulator.integrator.dt
        self.istep = iround(self.period / self.dt)
        self.voi = self.variables_of_interest
        if self.voi is None or self.voi.size == 0:
            self.voi = numpy.r_[:len(simulator.model.variables_of_interest)]

    def record(self, step, observed):
        """Record a sample of the observed state at given step.

        This is a final method called by the simulator to obtain samples from a
        monitor instance. Monitor subclasses should not override this method, but
        rather implement the `sample` method.

        """

        return self.sample(step, observed)

    def sample(self, step, state):
        """
        This method provides monitor output, and should be overridden by subclasses.

        """

        raise NotImplementedError(
            "The Monitor base class does not provide any observation model and "
            "should be subclasses with an implementation of the `sample` method.")

    def create_time_series(self, storage_path, connectivity=None, surface=None,
                           region_map=None, region_volume_map=None):
        """
        Create a time series instance that will be populated by this monitor
        :param surface: if present a TimeSeriesSurface is returned
        :param connectivity: if present a TimeSeriesRegion is returned
        Otherwise a plain TimeSeries will be returned
        """
        if surface is not None:
            return TimeSeriesSurface(storage_path=storage_path,
                                     surface=surface,
                                     sample_period=self.period,
                                     title='Surface ' + self.__class__.__name__,
                                     **self._transform_user_tags())
        if connectivity is not None:
            return TimeSeriesRegion(storage_path=storage_path,
                                    connectivity=connectivity,
                                    region_mapping=region_map,
                                    region_mapping_volume=region_volume_map,
                                    sample_period=self.period,
                                    title='Regions ' + self.__class__.__name__,
                                    **self._transform_user_tags())

        return TimeSeries(storage_path=storage_path,
                          sample_period=self.period,
                          title=' ' + self.__class__.__name__,
                          **self._transform_user_tags())

    def _transform_user_tags(self):
        return {}
Beispiel #21
0
class Bold(Monitor):
    """

    Base class for the Bold monitor.

    **Attributes**

        hrf_kernel: the haemodynamic response function (HRF) used to compute
                    the BOLD (Blood Oxygenation Level Dependent) signal.

        length    : duration of the hrf in seconds.

        period    : the monitor's period

    **References**:

    .. [B_1997] Buxton, R. and Frank, L., *A Model for the Coupling between
        Cerebral Blood Flow and Oxygen Metabolism During Neural Stimulation*,
        17:64-72, 1997.

    .. [Fr_2000] Friston, K., Mechelli, A., Turner, R., and Price, C., *Nonlinear
        Responses in fMRI: The Balloon Model, Volterra Kernels, and Other
        Hemodynamics*, NeuroImage, 12, 466 - 477, 2000.

    .. [Bo_1996] Geoffrey M. Boynton, Stephen A. Engel, Gary H. Glover and David
        J. Heeger (1996). Linear Systems Analysis of Functional Magnetic Resonance
        Imaging in Human V1. J Neurosci 16: 4207-4221

    .. [Po_2000] Alex Polonsky, Randolph Blake, Jochen Braun and David J. Heeger
        (2000). Neuronal activity in human primary visual cortex correlates with
        perception during binocular rivalry. Nature Neuroscience 3: 1153-1159

    .. [Gl_1999] Glover, G. *Deconvolution of Impulse Response in Event-Related BOLD fMRI*.
        NeuroImage 9, 416-429, 1999.

    .. note:: gamma and polonsky are based on the nitime implementation
              http://nipy.org/nitime/api/generated/nitime.fmri.hrf.html

    .. note:: see Tutorial_Exploring_The_Bold_Monitor

    """
    _ui_name = "BOLD"

    period = basic.Float(
        label = "Sampling period (ms)",
        default = 2000.0,
        doc = """For the BOLD monitor, sampling period in milliseconds must be
        an integral multiple of 500. Typical measurment interval (repetition
        time TR) is between 1-3 s. If TR is 2s, then Bold period is 2000ms.""")

    hrf_kernel = equations.HRFKernelEquation(
        label = "Haemodynamic Response Function",
        default = equations.FirstOrderVolterra,
        required = True,
        doc = """A tvb.datatypes.equation object which describe the haemodynamic
        response function used to compute the BOLD signal.""")

    hrf_length = basic.Float(
        label = "Duration (ms)",
        default = 20000.,
        doc= """Duration of the hrf kernel""",
        order=-1)

    _interim_period = None
    _interim_istep = None
    _interim_stock = None
    _stock_steps = None
    _stock_time = None
    _stock_sample_rate = 2 ** -2
    hemodynamic_response_function = None

    def compute_hrf(self):
        """
        Compute the hemodynamic response function.

        """
        self._stock_sample_rate = 2.0**-2 #/ms    # NOTE: An integral multiple of dt
        magic_number = self.hrf_length #* 0.8      # truncates G, volterra kernel, once ~zero
        #Length of history needed for convolution in steps @ _stock_sample_rate
        required_history_length = self._stock_sample_rate * magic_number # 3840 for tau_s=0.8
        self._stock_steps = numpy.ceil(required_history_length).astype(int)
        stock_time_max    = magic_number/1000.0                                # [s]
        stock_time_step   = stock_time_max / self._stock_steps                 # [s]
        self._stock_time  = numpy.arange(0.0, stock_time_max, stock_time_step) # [s]
        LOG.debug("Bold requires %d steps for HRF kernel convolution", self._stock_steps)
        #Compute the HRF kernel
        self.hrf_kernel.pattern = self._stock_time
        G = self.hrf_kernel.pattern
        #Reverse it, need it into the past for matrix-multiply of stock
        G = G[::-1]
        self.hemodynamic_response_function = G[numpy.newaxis, :]
        #Interim stock configuration
        self._interim_period = 1.0 / self._stock_sample_rate #period in ms
        self._interim_istep = int(round(self._interim_period / self.dt)) # interim period in integration time steps
        LOG.debug('Bold HRF shape %s, interim period & istep %d & %d',
                  self.hemodynamic_response_function.shape, self._interim_period, self._interim_istep)

    def config_for_sim(self, simulator):
        super(Bold, self).config_for_sim(simulator)
        self.compute_hrf()
        sample_shape = self.voi.shape[0], simulator.number_of_nodes, simulator.model.number_of_modes
        self._interim_stock = numpy.zeros((self._interim_istep,) + sample_shape)
        LOG.debug("BOLD inner buffer %s %.2f MB" % (
            self._interim_stock.shape, self._interim_stock.nbytes/2**20))
        self._stock = numpy.zeros((self._stock_steps,) + sample_shape)
        LOG.debug("BOLD outer buffer %s %.2f MB" % (
            self._stock.shape, self._stock.nbytes/2**20))


    def sample(self, step, state):
        # Update the interim-stock at every step
        self._interim_stock[((step % self._interim_istep) - 1), :] = state[self.voi, :]
        # At stock's period update it with the temporal average of interim-stock
        if step % self._interim_istep == 0:
            avg_interim_stock = numpy.mean(self._interim_stock, axis=0)
            self._stock[((step/self._interim_istep % self._stock_steps) - 1), :] = avg_interim_stock
        # At the monitor's period, apply the heamodynamic response function to
        # the stock and return the resulting BOLD signal.
        if step % self.istep == 0:
            time = step * self.dt
            hrf = numpy.roll(self.hemodynamic_response_function,
                             ((step/self._interim_istep % self._stock_steps) - 1),
                             axis=1)
            if isinstance(self.hrf_kernel, equations.FirstOrderVolterra):
                k1_V0 = self.hrf_kernel.parameters["k_1"] * self.hrf_kernel.parameters["V_0"]
                bold = (numpy.dot(hrf, self._stock.transpose((1, 2, 0, 3))) - 1.0) * k1_V0
            else:
                bold = numpy.dot(hrf, self._stock.transpose((1, 2, 0, 3)))
            bold = bold.reshape(self._stock.shape[1:])
            return [time, bold]
Beispiel #22
0
class EEG(Projection):
    """
    Forward solution monitor for electroencephalogy (EEG). If a
    precomputed lead field is not available, a single sphere analytic
    formula due to Sarvas 1987 is used.

    **References**:

    .. [Sarvas_1987] Sarvas, J., *Basic mathematical and electromagnetic
        concepts of the biomagnetic inverse problem*, Physics in Medicine and
        Biology, 1987.

    """
    _ui_name = "EEG"

    projection = ProjectionSurfaceEEG(
        default=None, label='Projection matrix', order=2,
        doc='Projection matrix to apply to sources.')

    reference = basic.String(required=False, label="EEG Reference", order=5,
                             doc='EEG Electrode to be used as reference, or "average" to '
                                 'apply an average reference. If none is provided, the '
                                 'produced time-series are the idealized or reference-free.')

    sensors = SensorsEEG(required=True, label="EEG Sensors", order=1,
                         doc='Sensors to use for this EEG monitor')

    sigma = basic.Float(label="Conductivity (w/o projection)", default=1.0, order=4,
                        doc='When a projection matrix is not used, this provides '
                            'the value of conductivity in the formula for the single '
                            'sphere approximation of the head (Sarvas 1987).')


    @classmethod
    def from_file(cls, sensors_fname='eeg_brainstorm_65.txt', projection_fname='projection_eeg_65_surface_16k.npy', **kwargs):
        return Projection.from_file.im_func(cls, sensors_fname, projection_fname, **kwargs)

    def config_for_sim(self, simulator):
        super(EEG, self).config_for_sim(simulator)
        self._ref_vec = numpy.zeros((self.sensors.number_of_sensors, ))
        if self.reference:
            if self.reference.lower() != 'average':
                sensor_names = self.sensors.labels.tolist()
                self._ref_vec[sensor_names.index(self.reference)] = 1.0
            else:
                self._ref_vec[:] = 1.0 / self.sensors.number_of_sensors
        self._ref_vec_mask = numpy.isfinite(self.gain).all(axis=1)
        self._ref_vec = self._ref_vec[self._ref_vec_mask]

    def analytic(self, loc, ori):
        "Equation 12 of [Sarvas_1987]_"
        # r => sensor positions
        # r_0 => source positions
        # a => vector from sources_to_sensor
        # Q => source unit vectors
        r_0, Q = loc, ori
        center = numpy.mean(r_0, axis=0)[numpy.newaxis, ]
        radius = 1.05125 * max(numpy.sqrt(numpy.sum((r_0 - center)**2, axis=1)))
        loc = self.sensors.locations.copy()
        sen_dis = numpy.sqrt(numpy.sum((loc)**2, axis=1))
        loc = loc / sen_dis[:, numpy.newaxis] * radius + center
        V_r = numpy.zeros((loc.shape[0], r_0.shape[0]))
        for sensor_k in numpy.arange(loc.shape[0]):
            a = loc[sensor_k, :] - r_0
            na = numpy.sqrt(numpy.sum(a**2, axis=1))[:, numpy.newaxis]
            V_r[sensor_k, :] = numpy.sum(Q * (a / na**3), axis=1 ) / (4.0 * numpy.pi * self.sigma)
        return V_r

    def sample(self, step, state):
        maybe_sample = super(EEG, self).sample(step, state)
        if maybe_sample is not None:
            time, sample = maybe_sample
            sample -= self._ref_vec.dot(sample[:, self._ref_vec_mask])[:, numpy.newaxis]
            return time, sample.reshape((state.shape[0], -1, 1))

    def create_time_series(self, storage_path, connectivity=None, surface=None,
                           region_map=None, region_volume_map=None):
        return TimeSeriesEEG(storage_path=storage_path,
                             sensors=self.sensors,
                             sample_period=self.period,
                             title=' ' + self.__class__.__name__,
                             **self._transform_user_tags())
Beispiel #23
0
class BalloonModel(core.Type):
    """

    A class for calculating the simulated BOLD signal given a TimeSeries
    object of TVB and returning another TimeSeries object.

    The haemodynamic model parameters based on constants for a 1.5 T scanner.
        
    """

    #NOTE: a potential problem when the input is a TimeSeriesSurface.
    #TODO: add an spatial averaging for TimeSeriesSurface.

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries that represents the input neural activity""",
        order=1)
    # it also sets the bold sampling period.
    dt = basic.Float(
        label=":math:`dt`",
        default=0.002,
        required=True,
        doc="""The integration time step size for the balloon model (s).
        If none is provided, by default, the TimeSeries sample period is used.""",
        order=2)

    integrator = integrators_module.Integrator(
        label="Integration scheme",
        default=integrators_module.HeunDeterministic,
        required=True,
        order=-1,
        doc=""" A tvb.simulator.Integrator object which is
        an integration scheme with supporting attributes such as 
        integration step size and noise specification for stochastic 
        methods. It is used to compute the time courses of the balloon model state 
        variables.""")

    bold_model = basic.Enumerate(
        label="Select BOLD model equations",
        options=["linear", "nonlinear"],
        default=["nonlinear"],
        select_multiple=False,
        doc="""Select the set of equations for the BOLD model.""",
        order=4)

    RBM = basic.Bool(
        label="Revised BOLD Model",
        default=True,
        required=True,
        doc="""Select classical vs revised BOLD model (CBM or RBM).
        Coefficients  k1, k2 and k3 will be derived accordingly.""",
        order=5)

    neural_input_transformation = basic.Enumerate(
        label="Neural input transformation",
        options=["none", "abs_diff", "sum"],
        default=["none"],
        select_multiple=False,
        doc=
        """ This represents the operation to perform on the state-variable(s) of
        the model used to generate the input TimeSeries. ``none`` takes the
        first state-variable as neural input; `` abs_diff`` is the absolute
        value of the derivative (first order difference) of the first state variable; 
        ``sum``: sum all the state-variables of the input TimeSeries.""",
        order=3)

    tau_s = basic.Float(
        label=r":math:`\tau_s`",
        default=0.65,
        required=True,
        doc="""Balloon model parameter. Time of signal decay (s)""",
        order=-1)

    tau_f = basic.Float(
        label=r":math:`\tau_f`",
        default=0.41,
        required=True,
        doc=""" Balloon model parameter. Time of flow-dependent elimination or
        feedback regulation (s). The average  time blood take to traverse the
        venous compartment. It is the  ratio of resting blood volume (V0) to
        resting blood flow (F0).""",
        order=-1)

    tau_o = basic.Float(label=r":math:`\tau_o`",
                        default=0.98,
                        required=True,
                        doc="""
        Balloon model parameter. Haemodynamic transit time (s). The average
        time blood take to traverse the venous compartment. It is the  ratio
        of resting blood volume (V0) to resting blood flow (F0).""",
                        order=-1)

    alpha = basic.Float(
        label=r":math:`\tau_f`",
        default=0.32,
        required=True,
        doc=
        """Balloon model parameter. Stiffness parameter. Grubb's exponent.""",
        order=-1)

    TE = basic.Float(label=":math:`TE`",
                     default=0.04,
                     required=True,
                     doc="""BOLD parameter. Echo Time""",
                     order=-1)

    V0 = basic.Float(label=":math:`V_0`",
                     default=4.0,
                     required=True,
                     doc="""BOLD parameter. Resting blood volume fraction.""",
                     order=-1)

    E0 = basic.Float(
        label=":math:`E_0`",
        default=0.4,
        required=True,
        doc="""BOLD parameter. Resting oxygen extraction fraction.""",
        order=-1)

    epsilon = arrays.FloatArray(
        label=":math:`\epsilon`",
        default=numpy.array([0.5]),
        range=basic.Range(lo=0.5, hi=2.0, step=0.25),
        required=True,
        doc=
        """ BOLD parameter. Ratio of intra- and extravascular signals. In principle  this
        parameter could be derived from empirical data and spatialized.""",
        order=-1)

    nu_0 = basic.Float(
        label=r":math:`\nu_0`",
        default=40.3,
        required=True,
        doc=
        """BOLD parameter. Frequency offset at the outer surface of magnetized vessels (Hz).""",
        order=-1)

    r_0 = basic.Float(
        label=":math:`r_0`",
        default=25.,
        required=True,
        doc=
        """ BOLD parameter. Slope r0 of intravascular relaxation rate (Hz). Only used for
        ``revised`` coefficients. """,
        order=-1)

    def evaluate(self):
        """
        Calculate simulated BOLD signal
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        #NOTE: Just using the first state variable, although in the Bold monitor
        #      input is the sum over the state-variables. Only time-series
        #      from basic monitors should be used as inputs.

        neural_activity, t_int = self.input_transformation(
            self.time_series, self.neural_input_transformation)
        input_shape = neural_activity.shape
        result_shape = self.result_shape(input_shape)
        LOG.debug("Result shape will be: %s" % str(result_shape))

        if self.dt is None:
            self.dt = self.time_series.sample_period / 1000.  # (s) integration time step
            msg = "Integration time step size for the balloon model is %s seconds" % str(
                self.dt)
            LOG.debug(msg)

        #NOTE: Avoid upsampling ...
        if self.dt < (self.time_series.sample_period / 1000.):
            msg = "Integration time step shouldn't be smaller than the sampling period of the input signal."
            LOG.error(msg)

        balloon_nvar = 4

        #NOTE: hard coded initial conditions
        state = numpy.zeros((input_shape[0], balloon_nvar, input_shape[2],
                             input_shape[3]))  # s
        state[0, 1, :] = 1.  # f
        state[0, 2, :] = 1.  # v
        state[0, 3, :] = 1.  # q

        # BOLD model coefficients
        k = self.compute_derived_parameters()
        k1, k2, k3 = k[0], k[1], k[2]

        # prepare integrator
        self.integrator.dt = self.dt
        self.integrator.configure()
        LOG.debug("Integration time step size will be: %s seconds" %
                  str(self.integrator.dt))

        scheme = self.integrator.scheme

        # NOTE: the following variables are not used in this integration but
        # required due to the way integrators scheme has been defined.

        local_coupling = 0.0
        stimulus = 0.0

        # Do some checks:
        if numpy.isnan(neural_activity).any():
            LOG.warning("NaNs detected in the neural activity!!")

        # normalise the time-series.
        neural_activity = neural_activity - neural_activity.mean(
            axis=0)[numpy.newaxis, :]

        # solve equations
        for step in range(1, t_int.shape[0]):
            state[step, :] = scheme(state[step - 1, :], self.balloon_dfun,
                                    neural_activity[step, :], local_coupling,
                                    stimulus)
            if numpy.isnan(state[step, :]).any():
                LOG.warning("NaNs detected...")

        # NOTE: just for the sake of clarity, define the variables used in the BOLD model
        s = state[:, 0, :]
        f = state[:, 1, :]
        v = state[:, 2, :]
        q = state[:, 3, :]

        #import pdb; pdb.set_trace()

        # BOLD models
        if self.bold_model == "nonlinear":
            """
            Non-linear BOLD model equations.
            Page 391. Eq. (13) top in [Stephan2007]_
            """
            y_bold = numpy.array(self.V0 * (k1 * (1. - q) + k2 *
                                            (1. - q / v) + k3 * (1. - v)))
            y_b = y_bold[:, numpy.newaxis, :, :]
            LOG.debug("Max value: %s" % str(y_b.max()))

        else:
            """
            Linear BOLD model equations.
            Page 391. Eq. (13) bottom in [Stephan2007]_ 
            """
            y_bold = numpy.array(self.V0 * ((k1 + k2) * (1. - q) + (k3 - k2) *
                                            (1. - v)))
            y_b = y_bold[:, numpy.newaxis, :, :]

        sample_period = 1. / self.dt

        bold_signal = time_series.TimeSeriesRegion(data=y_b,
                                                   time=t_int,
                                                   sample_period=sample_period,
                                                   sample_period_unit='s',
                                                   use_storage=False)

        return bold_signal

    def compute_derived_parameters(self):
        """
        Compute derived parameters :math:`k_1`, :math:`k_2` and :math:`k_3`.
        """

        if not self.RBM:
            """
            Classical BOLD Model Coefficients [Obata2004]_
            Page 389 in [Stephan2007]_, Eq. (3)
            """
            k1 = 7. * self.E0
            k2 = 2. * self.E0
            k3 = 1. - self.epsilon
        else:
            """
            Revised BOLD Model Coefficients.
            Generalized BOLD signal model.
            Page 400 in [Stephan2007]_, Eq. (12)
            """
            k1 = 4.3 * self.nu_0 * self.E0 * self.TE
            k2 = self.epsilon * self.r_0 * self.E0 * self.TE
            k3 = 1 - self.epsilon

        return numpy.array([k1, k2, k3])

    def input_transformation(self, time_series, mode):
        """
        Perform an operation on the input time-series.
        """

        LOG.debug("Computing: %s on the input time series" % str(mode))

        if mode == "none":
            ts = time_series.data[:, 0, :, :]
            ts = ts[:, numpy.newaxis, :, :]
            t_int = time_series.time / 1000.  # (s)

        elif mode == "abs_diff":
            ts = abs(numpy.diff(time_series.data, axis=0))
            t_int = (time_series.time[1:] -
                     time_series.time[0:-1]) / 1000.  # (s)

        elif mode == "sum":
            ts = numpy.sum(time_series.data, axis=1)
            ts = ts[:, numpy.newaxis, :, :]
            t_int = time_series.time / 1000.  # (s)

        else:
            LOG.error("Bad operation/transformation mode, must be one of:")
            LOG.error("('abs_diff', 'sum', 'none')")
            raise Exception("Bad transformation mode")

        return ts, t_int

    def balloon_dfun(self, state_variables, neural_input, local_coupling=0.0):
        r"""
        The Balloon model equations. See Eqs. (4-10) in [Stephan2007]_
        .. math::
                \frac{ds}{dt} &= x - \kappa\,s - \gamma \,(f-1) \\
                \frac{df}{dt} &= s \\
                \frac{dv}{dt} &= \frac{1}{\tau_o} \, (f - v^{1/\alpha})\\
                \frac{dq}{dt} &= \frac{1}{\tau_o}(f \, \frac{1-(1-E_0)^{1/\alpha}}{E_0} - v^{&/\alpha} \frac{q}{v})\\
                \kappa &= \frac{1}{\tau_s}\\
                \gamma &= \frac{1}{\tau_f}
        """

        s = state_variables[0, :]
        f = state_variables[1, :]
        v = state_variables[2, :]
        q = state_variables[3, :]

        x = neural_input[0, :]

        ds = x - (1. / self.tau_s) * s - (1. / self.tau_f) * (f - 1)
        df = s
        dv = (1. / self.tau_o) * (f - v**(1. / self.alpha))
        dq = (1. / self.tau_o) * ((f * (1. -
                                        (1. - self.E0)**(1. / f)) / self.E0) -
                                  (v**(1. / self.alpha)) * (q / v))

        return numpy.array([ds, df, dv, dq])

    def result_shape(self, input_shape):
        """Returns the shape of the main result of fmri balloon ..."""
        result_shape = (input_shape[0], input_shape[1], input_shape[2],
                        input_shape[3])
        return result_shape

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result of .
        """
        result_size = numpy.sum(map(
            numpy.prod, self.result_shape(input_shape))) * 8.0  # Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the ....
        That is, it includes storage of the evaluated ... attributes
        such as ..., etc.
        """
        extend_size = self.result_size(
            input_shape)  # Currently no derived attributes.
        return extend_size
class NodeComplexCoherence(core.Type):
    """
    A class for calculating the FFT of a TimeSeries and returning
    a ComplexCoherenceSpectrum datatype.
   
  
    This algorithm is based on the matlab function data2cs_event.m written by Guido Nolte:
        .. [Freyer_2012] Freyer, F.; Reinacher, M.; Nolte, G.; Dinse, H. R. and
            Ritter, P. *Repetitive tactile stimulation changes resting-state
            functional connectivity-implications for treatment of sensorimotor decline*.
            Front Hum Neurosci, Bernstein Focus State Dependencies of Learning and
            Bernstein Center for Computational Neuroscience Berlin, Germany., 2012, 6, 144
    
    Input: 
    originally the input could be 2D (tpts x nodes/channels), and it was possible
    to give a 3D array (e.g., tpspt x nodes/cahnnels x trials) via the segment_length
    attribute. 
    Current TVB implementation can handle 4D or 2D TimeSeries datatypes. 
    Be warned: the 4D TimeSeries will be averaged and squeezed.
    
    Output: (main arrays)
    - the cross-spectrum
    - the complex coherence, from which the imaginary part can be extracted 
        
    By default the time series is segmented into 1 second `epoch` blocks and 0.5
    second 50% overlapping `segments` to which a Hanning function is applied. 
    
    """

    time_series = TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries for which the CrossCoherence and ComplexCoherence
        is to be computed.""")

    epoch_length = basic.Float(
        label="Epoch length [ms]",
        default=1000.0,
        order=-1,
        required=False,
        doc="""In general for lengthy EEG recordings (~30 min), the timeseries 
        are divided into equally sized segments (~ 20-40s). These contain the 
        event that is to be characterized by means of the cross coherence. 
        Additionally each epoch block will be further divided into segments to 
        which the FFT will be applied.""")

    segment_length = basic.Float(
        label="Segment length [ms]",
        default=500.0,
        order=-1,
        required=False,
        doc="""The timeseries can be segmented into equally sized blocks
            (overlapping if necessary). The segement length determines the
            frequency resolution of the resulting power spectra -- longer
            windows produce finer frequency resolution. """)

    segment_shift = basic.Float(
        label="Segment shift [ms]",
        default=250.0,
        required=False,
        order=-1,
        doc="""Time length by which neighboring segments are shifted. e.g.
                `segment shift` = `segment_length` / 2 means 50% overlapping 
                segments.""")

    window_function = basic.String(
        label="Windowing function",
        default='hanning',
        required=False,
        order=-1,
        doc="""Windowing functions can be applied before the FFT is performed.
             Default is `hanning`, possibilities are: 'hamming'; 'bartlett';
            'blackman'; and 'hanning'. See, numpy.<function_name>.""")

    average_segments = basic.Bool(
        label="Average across segments",
        default=True,
        required=False,
        order=-1,
        doc="""Flag. If `True`, compute the mean Cross Spectrum across 
                segments.""")

    subtract_epoch_average = basic.Bool(
        label="Subtract average across epochs",
        default=True,
        required=False,
        order=-1,
        doc="""Flag. If `True` and if the number of epochs is > 1, you can 
                optionally subtract the mean across epochs before computing the 
                complex coherence.""")

    zeropad = basic.Integer(
        label="Zeropadding",
        default=0,
        required=False,
        order=-1,
        doc="""Adds `n` zeros at the end of each segment and at the end 
        of window_function. It is not yet functional.""")

    detrend_ts = basic.Bool(
        label="Detrend time series",
        default=False,
        required=False,
        order=-1,
        doc="""Flag. If `True` removes linear trend along the time dimension 
                before applying FFT.""")

    max_freq = basic.Float(
        label="Maximum frequency",
        default=1024.0,
        order=-1,
        required=False,
        doc="""Maximum frequency points (e.g. 32., 64., 128.) represented in 
                the output. Default is segment_length / 2 + 1.""")

    npat = basic.Float(
        label="dummy variable",
        default=1.0,
        required=False,
        order=-1,
        doc="""This attribute appears to be related to an input projection 
            matrix... Which is not yet implemented""")

    def evaluate(self):
        """
        Calculate the FFT, Cross Coherence and Complex Coherence of time_series 
        broken into (possibly) epochs and segments of length `epoch_length` and 
        `segment_length` respectively, filtered by `window_function`.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)
        tpts = self.time_series.data.shape[0]
        time_series_length = tpts * self.time_series.sample_period

        if len(self.time_series.data.shape) > 2:
            time_series_data = numpy.squeeze(
                (self.time_series.data.mean(axis=-1)).mean(axis=1))

        #nchan = time_series_data.shape[1]

        #NOTE: if we get a projection matrix ... then ...
        #if self.npat > 1:
        #    data = data * proj
        #    nchan = self.npat

        #Divide time-series into epochs, no overlapping
        if self.epoch_length > 0.0:
            nepochs = int(numpy.floor(time_series_length / self.epoch_length))
            epoch_tpts = self.epoch_length / self.time_series.sample_period
            time_series_length = self.epoch_length
            tpts = epoch_tpts
        else:
            self.epoch_length = time_series_length
            nepochs = int(numpy.ceil(time_series_length / self.epoch_length))

        #Segment time-series, overlapping if necessary
        nseg = int(numpy.floor(time_series_length / self.segment_length))
        if nseg > 1:
            seg_tpts = self.segment_length / self.time_series.sample_period
            seg_shift_tpts = self.segment_shift / self.time_series.sample_period
            nseg = int(numpy.floor((tpts - seg_tpts) / seg_shift_tpts) + 1)
        else:
            self.segment_length = time_series_length
            seg_tpts = time_series_data.shape[0]

        # Frequency vectors
        freqs = numpy.fft.fftfreq(int(seg_tpts))
        nfreq = numpy.min(
            [self.max_freq,
             numpy.floor((seg_tpts + self.zeropad) / 2.0) + 1])
        freqs = freqs[0:nfreq, ] * (1.0 / self.time_series.sample_period)

        result_shape, av_result_shape = self.result_shape(
            self.time_series.data.shape, self.max_freq, self.epoch_length,
            self.segment_length, self.segment_shift,
            self.time_series.sample_period, self.zeropad,
            self.average_segments)

        cs = numpy.zeros(result_shape, dtype=numpy.complex128)
        av = numpy.matrix(numpy.zeros(av_result_shape, dtype=numpy.complex128))
        coh = numpy.zeros(result_shape, dtype=numpy.complex128)

        # NOTE: result for individual epochs are kept only if npat > 1. Skipping ...
        #if self.npat > 1:
        #    if not self.average_segments:
        #        cs = numpy.zeros((nchan, nchan, nfreq, nepochs, nseg), dtype=numpy.complex128)
        #        av = numpy.zeros((nchan, nfreq, nepochs, nseg), dtype=numpy.complex128)
        #    else:
        #        av = numpy.zeros((nchan, nfreq, nepochs), dtype=numpy.complex128)
        #        cs = numpy.zeros((nchan, nchan, nfreq, nepochs), dtype=numpy.complex128)

        #Apply windowing function
        if self.window_function is not None:
            if self.window_function not in SUPPORTED_WINDOWING_FUNCTIONS:
                LOG.error("Windowing function is: %s" % self.window_function)
                LOG.error("Must be in: %s" %
                          str(SUPPORTED_WINDOWING_FUNCTIONS))

            window_function = eval("".join(("numpy.", self.window_function)))
            win = window_function(seg_tpts)
            window_mask = (numpy.kron(
                numpy.ones((time_series_data.shape[1], 1)), win)).T

        nave = 0

        for j in numpy.arange(nepochs):
            data = time_series_data[j * epoch_tpts:(j + 1) * epoch_tpts, :]

            for i in numpy.arange(nseg):  #average over all segments;
                time_series = data[i * seg_shift_tpts:i * seg_shift_tpts +
                                   seg_tpts, :]

                if self.detrend_ts:
                    time_series = sp_signal.detrend(time_series, axis=0)

                datalocfft = numpy.fft.fft(time_series * window_mask, axis=0)
                datalocfft = numpy.matrix(datalocfft)

                for f in numpy.arange(nfreq):  #for all frequencies
                    if self.npat == 1:
                        if not self.average_segments:
                            cs[:, :, f, i] += numpy.conjugate(
                                              datalocfft[f, :].conj().T * \
                                              datalocfft[f, :])
                            av[:, f,
                               i] += numpy.conjugate(datalocfft[f, :].conj().T)
                        else:
                            cs[:, :, f] += numpy.conjugate(
                                           datalocfft[f,:].conj().T * \
                                           datalocfft[f, :])
                            av[:,
                               f] += numpy.conjugate(datalocfft[f, :].conj().T)
                    else:
                        if not self.average_segments:
                            cs[:, :, f, j, i] = numpy.conjugate(
                                                datalocfft[f, :].conj().T * \
                                                datalocfft[f, :])
                            av[:, f, j,
                               i] = numpy.conjugate(datalocfft[f, :].conj().T)
                        else:
                            cs[:, :, f, j] += numpy.conjugate(
                                           datalocfft[f,:].conj().T *\
                                           datalocfft[f,:])

                            av[:, f,
                               j] += numpy.conjugate(datalocfft[f, :].conj().T)
                del datalocfft

            nave += 1.0

        # End of FORs
        if not self.average_segments:
            cs = cs / nave
            av = av / nave
        else:
            nave = nave * nseg
            cs = cs / nave
            av = av / nave

        # Subtract average
        for f in numpy.arange(nfreq):
            if self.subtract_epoch_average:
                if self.npat == 1:
                    if not self.average_segments:
                        for i in numpy.arange(nseg):
                            cs[:, :, f,
                               i] = cs[:, :, f,
                                       i] - av[:, f, i] * av[:, f, i].conj().T
                    else:
                        cs[:, :,
                           f] = cs[:, :, f] - av[:, f] * av[:, f].conj().T
                else:
                    if not self.average_segments:
                        for i in numpy.arange(nseg):
                            for j in numpy.arange(nepochs):
                                cs[:, :, f, j,
                                   i] = cs[:, :, f, j,
                                           i] - av[:, f, j, i] * av[:, f, j,
                                                                    i].conj().T

                    else:
                        for j in numpy.arange(nepochs):
                            cs[:, :, f,
                               j] = cs[:, :, f,
                                       j] - av[:, f, j] * av[:, f, j].conj().T

        #Compute Complex Coherence
        ndim = len(cs.shape)
        if ndim == 3:
            for i in numpy.arange(cs.shape[2]):
                temp = numpy.matrix(cs[:, :, i])
                coh[:, :, i] = cs[:, :, i] / numpy.sqrt(
                    (temp.diagonal().conj().T) * temp.diagonal())

        elif ndim == 4:
            for i in numpy.arange(cs.shape[2]):
                for j in numpy.arange(cs.shape[3]):
                    temp = numpy.matrix(numpy.squeeze(cs[:, :, i, j]))
                    coh[:, :, i, j] = temp / numpy.sqrt(
                        (temp.diagonal().conj().T) * temp.diagonal().T)

        util.log_debug_array(LOG, cs, "result")
        spectra = spectral.ComplexCoherenceSpectrum(
            source=self.time_series,
            array_data=coh,
            cross_spectrum=cs,
            #                              frequency = freqs,
            epoch_length=self.epoch_length,
            segment_length=self.segment_length,
            windowing_function=self.window_function,
            #                             fft_points = seg_tpts,
            use_storage=False)
        return spectra

    @staticmethod
    def result_shape(input_shape, max_freq, epoch_length, segment_length,
                     segment_shift, sample_period, zeropad, average_segments):
        """
        Returns the shape of the main result and the average over epochs
        """
        # this is useless here unless the input could actually be a 2D timeseries
        nchan = numpy.where(
            len(input_shape) > 2, input_shape[2], input_shape[1])
        seg_tpts = segment_length / sample_period
        seg_shift_tpts = segment_shift / sample_period
        tpts = numpy.where(epoch_length > 0.0, epoch_length / sample_period,
                           input_shape[0])
        nfreq = numpy.min(
            [max_freq, numpy.floor((seg_tpts + zeropad) / 2.0) + 1])
        #nep   = int(numpy.floor(input_shape[0] / epoch_length))
        nseg = int(numpy.floor((tpts - seg_tpts) / seg_shift_tpts) + 1)

        if not average_segments:
            result_shape = (nchan, nchan, nfreq, nseg)
            av_result_shape = (nchan, nfreq, nseg)
        else:
            result_shape = (nchan, nchan, nfreq)
            av_result_shape = (nchan, nfreq)

        return [result_shape, av_result_shape]

    def result_size(self, input_shape, max_freq, epoch_length, segment_length,
                    segment_shift, sample_period, zeropad, average_segments):
        """
        Returns the storage size in Bytes of the main result (complex array) of 
        the ComplexCoherence
        """
        result_size = numpy.prod(
            self.result_shape(input_shape, max_freq, epoch_length,
                              segment_length, segment_shift, sample_period,
                              zeropad,
                              average_segments)[0]) * 2.0 * 8.0  #complex*Bytes
        return result_size

    def extended_result_size(self, input_shape, max_freq, epoch_length,
                             segment_length, segment_shift, sample_period,
                             zeropad, average_segments):
        """
        Returns the storage size in Bytes of the extended result of the ComplexCoherence. 
        That is, it includes storage of the evaluated ComplexCoherence attributes
        such as ...
        """
        result_shape = self.result_shape(input_shape, max_freq, epoch_length,
                                         segment_length, segment_shift,
                                         sample_period, zeropad,
                                         average_segments)[0]
        result_size = self.result_size(input_shape, max_freq, epoch_length,
                                       segment_length, segment_shift,
                                       sample_period, zeropad,
                                       average_segments)
        extend_size = result_size * 2.0  #Main arrays: cross spectrum and complex coherence
        extend_size = extend_size + result_shape[2] * 8.0  #Frequency
        extend_size = extend_size + 8.0  # Epoch length
        extend_size = extend_size + 8.0  # Segment length
        return extend_size
Beispiel #25
0
class LocalConnectivityData(MappedType):
    """
    A sparse matrix for representing the local connectivity within the Cortex.
    """
    _ui_name = "Local connectivity"

    surface = CorticalSurfaceData(label="Surface", order=1)

    matrix = SparseMatrix(order=-1)

    equation = equations.FiniteSupportEquation(
        label="Spatial",
        required=False,
        default=equations.Gaussian,
        order=2)

    cutoff = basic.Float(
        label="Cutoff distance (mm)",
        default=40.0,
        doc="Distance at which to truncate the evaluation in mm.",
        order=3)


    def compute(self):
        """
        Compute current Matrix.
        """
        LOG.info("Mapping geodesic distance through the LocalConnectivity.")

        #Start with data being geodesic_distance_matrix, then map it through equation
        self.equation.pattern = self.matrix_gdist.data

        #Then replace original data with result...
        self.matrix_gdist.data = self.equation.pattern

        #Homogenise spatial discretisation effects across the surface
        nv = self.matrix_gdist.shape[0]
        ind = numpy.arange(nv, dtype=int)
        pos_mask = self.matrix_gdist.data > 0.0
        neg_mask = self.matrix_gdist.data < 0.0
        pos_con = self.matrix_gdist.copy()
        neg_con = self.matrix_gdist.copy()
        pos_con.data[neg_mask] = 0.0
        neg_con.data[pos_mask] = 0.0
        pos_contrib = pos_con.sum(axis=1)
        pos_contrib = numpy.array(pos_contrib).squeeze()
        neg_contrib = neg_con.sum(axis=1)
        neg_contrib = numpy.array(neg_contrib).squeeze()
        pos_mean = pos_contrib.mean()
        neg_mean = neg_contrib.mean()
        if ((pos_mean != 0.0 and any(pos_contrib == 0.0)) or
                (neg_mean != 0.0 and any(neg_contrib == 0.0))):
            msg = "Cortical mesh is too coarse for requested LocalConnectivity."
            LOG.warning(msg)
            bad_verts = ()
            if pos_mean != 0.0:
                bad_verts = bad_verts + numpy.nonzero(pos_contrib == 0.0)
            if neg_mean != 0.0:
                bad_verts = bad_verts + numpy.nonzero(neg_contrib == 0.0)
            LOG.debug("Problem vertices are: %s" % str(bad_verts))
        pos_hf = numpy.zeros(shape=pos_contrib.shape)
        pos_hf[pos_contrib != 0] = pos_mean / pos_contrib[pos_contrib != 0]
        neg_hf = numpy.zeros(shape=neg_contrib.shape)
        neg_hf[neg_contrib != 0] = neg_mean / neg_contrib[neg_contrib != 0]
        pos_hf_diag = sparse.csc_matrix((pos_hf, (ind, ind)), shape=(nv, nv))
        neg_hf_diag = sparse.csc_matrix((neg_hf, (ind, ind)), shape=(nv, nv))
        homogenious_conn = (pos_hf_diag * pos_con) + (neg_hf_diag * neg_con)

        #Then replace unhomogenised result with the spatially homogeneous one...
        if not homogenious_conn.has_sorted_indices:
            homogenious_conn.sort_indices()

        self.matrix = homogenious_conn


    def _validate_before_store(self):
        """
        Overrides MappedType._validate_before_store to use a custom error for missing matrix.
        """
        # Sparse Matrix is required so we should check if there is any data stored for it
        if self.matrix is None:
            msg = ("LocalConnectivity can not be stored because it "
                   "has no SparseMatrix attached.")
            raise exceptions.ValidationException(msg)

        super(LocalConnectivityData, self)._validate_before_store()
Beispiel #26
0
class ContinuousWaveletTransform(core.Type):
    """
    A class for calculating the wavelet transform of a TimeSeries object of TVB
    and returning a WaveletSpectrum object. The sampling period and frequency
    range of the result can be specified. The mother wavelet can also be 
    specified... (So far, only Morlet.)
    
    References:
        .. [TBetal_1996] C. Tallon-Baudry et al, *Stimulus Specificity of 
            Phase-Locked and Non-Phase-Locked 40 Hz Visual Responses in Human.*,
            J Neurosci 16(13):4240-4249, 1996. 
        
        .. [Mallat_1999] S. Mallat, *A wavelet tour of signal processing.*,
            book, Academic Press, 1999.
        
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries to which the wavelet is to be applied.""")

    mother = basic.String(
        label="Wavelet function",
        default="morlet",
        required=True,
        doc="""The mother wavelet function used in the transform. Default is
            'morlet', possibilities are: 'morlet'...""")

    sample_period = basic.Float(
        label="Sample period of result (ms)",
        default=7.8125,  #7.8125 => 128 Hz
        required=True,
        doc="""The sampling period of the computed wavelet spectrum. NOTE:
            This should be an integral multiple of the of the sampling period 
            of the source time series, otherwise the actual resulting sample
            period will be the first correct value below that requested.""")

    frequencies = basic.Range(
        label="Frequency range of result (kHz).",
        default=basic.Range(lo=0.008, hi=0.060, step=0.002),
        required=True,
        doc="""The frequency resolution and range returned. Requested
            frequencies are converted internally into appropriate scales.""")

    normalisation = basic.String(
        label="Normalisation",
        default="energy",
        required=True,
        doc="""The type of normalisation for the resulting wavet spectrum.
            Default is 'energy', options are: 'energy'; 'gabor'.""")

    q_ratio = basic.Float(
        label="Q-ratio",
        default=5.0,
        required=True,
        doc=
        """NFC. Must be greater than 5. Ratios of the center frequencies to bandwidths."""
    )

    def evaluate(self):
        """
        Calculate the continuous wavelet transform of time_series.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        ts_shape = self.time_series.data.shape

        if self.frequencies.step == 0:
            LOG.warning(
                "Frequency step can't be 0! Trying default step, 2e-3.")
            self.frequencies.step = 0.002

        freqs = numpy.arange(self.frequencies.lo, self.frequencies.hi,
                             self.frequencies.step)

        if (freqs.size == 0) or any(
                freqs <= 0.0
        ):  #TODO: Maybe should limit number of freqs... ~100 is probably a reasonable upper bound.
            LOG.warning("Invalid frequency range! Falling back to default.")
            util.log_debug_array(LOG, freqs, "freqs")
            self.frequencies = basic.Range(lo=0.008, hi=0.060, step=0.002)
            freqs = numpy.arange(self.frequencies.lo, self.frequencies.hi,
                                 self.frequencies.step)

        util.log_debug_array(LOG, freqs, "freqs")

        sample_rate = self.time_series.sample_rate

        # Duke: code below is as given by Andreas Spiegler, I've just wrapped
        # some of the original argument names
        nf = len(freqs)
        temporal_step = max(
            (1, iround(self.sample_period / self.time_series.sample_period)))
        nt = int(numpy.ceil(ts_shape[0] / temporal_step))

        if not isinstance(self.q_ratio, numpy.ndarray):
            Q_ratio = self.q_ratio * numpy.ones((1, nf))

        if numpy.nanmin(Q_ratio) < 5:
            msg = "Q_ratio must be not lower than 5 !"
            LOG.error(msg)
            raise Exception(msg)

        if numpy.nanmax(freqs) > sample_rate / 2.0:
            msg = "Sampling rate is too low for the requested frequency range !"
            LOG.error(msg)
            raise Exception(msg)

        #TODO: This isn't used, but min frequency seems like it should be important... Check with A.S.
        #  fmin = 3.0 * numpy.nanmin(Q_ratio) * sample_rate / numpy.pi / nt
        sigma_f = freqs / Q_ratio
        sigma_t = 1.0 / (2.0 * numpy.pi * sigma_f)

        if self.normalisation == 'energy':
            Amp = 1.0 / numpy.sqrt(
                sample_rate * numpy.sqrt(numpy.pi) * sigma_t)
        elif self.normalisation == 'gabor':
            Amp = numpy.sqrt(2.0 / numpy.pi) / sample_rate / sigma_t

        coef_shape = (nf, nt, ts_shape[1], ts_shape[2], ts_shape[3])

        coef = numpy.zeros(coef_shape, dtype=numpy.complex128)
        util.log_debug_array(LOG, coef, "coef")
        scales = numpy.arange(0, nf, 1)
        for i in scales:
            f0 = freqs[i]
            SDt = sigma_t[(0, i)]
            A = Amp[(0, i)]
            x = numpy.arange(0, 4.0 * SDt * sample_rate, 1) / sample_rate
            wvlt = A * numpy.exp(-x**2 / (2.0 * SDt**2)) * numpy.exp(
                2j * numpy.pi * f0 * x)
            wvlt = numpy.hstack((numpy.conjugate(wvlt[-1:0:-1]), wvlt))
            #util.log_debug_array(LOG, wvlt, "wvlt")

            for var in range(ts_shape[1]):
                for node in range(ts_shape[2]):
                    for mode in range(ts_shape[3]):
                        data = self.time_series.data[:, var, node, mode]
                        wt = signal.convolve(data, wvlt, 'same')
                        #util.log_debug_array(LOG, wt, "wt")
                        res = wt[0::temporal_step]
                        #NOTE: this is a horrible horrible quick hack (alas, a solution) to avoid broadcasting errors
                        # when using dt and sample periods which are not powers of 2.
                        coef[i, :, var, node, mode] = res if len(
                            res) == nt else res[:coef.shape[1]]

        util.log_debug_array(LOG, coef, "coef")

        spectra = spectral.WaveletCoefficients(
            source=self.time_series,
            mother=self.mother,
            sample_period=self.sample_period,
            frequencies=self.frequencies,
            normalisation=self.normalisation,
            q_ratio=self.q_ratio,
            array_data=coef,
            use_storage=False)

        return spectra

    def result_shape(self, input_shape):
        """
        Returns the shape of the main result (complex array) of the continuous
        wavelet transform.
        """
        freq_len = int((self.frequencies.hi - self.frequencies.lo) /
                       self.frequencies.step)
        temporal_step = max(
            (1, self.sample_period / self.time_series.sample_period))
        nt = int(round(input_shape[0] / temporal_step))
        result_shape = (
            freq_len,
            nt,
        ) + input_shape[1:]
        return result_shape

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result (complex array) of
        the continuous wavelet transform.
        """
        result_size = numpy.prod(
            self.result_shape(input_shape)) * 2.0 * 8.0  #complex*Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the 
        continuous wavelet transform.  That is, it includes storage of the
        evaluated WaveletCoefficients attributes such as power, phase, 
        amplitude, etc.
        """
        result_shape = self.result_shape(input_shape)
        result_size = self.result_size(input_shape)
        extend_size = result_size  #Main array
        extend_size = extend_size + 0.5 * result_size  #Amplitude
        extend_size = extend_size + 0.5 * result_size  #Phase
        extend_size = extend_size + 0.5 * result_size  #Power
        extend_size = extend_size + result_shape[0] * 8.0  #Frequency
        return extend_size
Beispiel #27
0
class FFT(core.Type):
    """
    A class for calculating the FFT of a TimeSeries object of TVB and returning
    a FourierSpectrum object. A segment length and windowing function can be
    optionally specified. By default the time series is segmented into 1 second
    blocks and no windowing function is applied.
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries to which the FFT is to be applied.""")

    segment_length = basic.Float(
        label="Segment(window) length (ms)",
        default=1000.0,
        required=False,
        doc="""The timeseries can be segmented into equally sized blocks
            (overlapping if necessary). The segement length determines the
            frequency resolution of the resulting power spectra -- longer
            windows produce finer frequency resolution.""")

    window_function = basic.String(
        label="Windowing function",
        default=None,
        required=False,
        doc="""Windowing functions can be applied before the FFT is performed.
             Default is None, possibilities are: 'hamming'; 'bartlett';
            'blackman'; and 'hanning'. See, numpy.<function_name>.""")

    def evaluate(self):
        """
        Calculate the FFT of time_series broken into segments of length
        segment_length and filtered by window_function.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        tpts = self.time_series.data.shape[0]
        time_series_length = tpts * self.time_series.sample_period

        #Segment time-series, overlapping if necessary
        nseg = int(numpy.ceil(time_series_length / self.segment_length))
        if nseg > 1:
            seg_tpts = self.segment_length / self.time_series.sample_period
            overlap = ((seg_tpts * nseg) - tpts) / (nseg - 1)
            starts = [
                max(seg * (seg_tpts - overlap), 0) for seg in range(nseg)
            ]
            segments = [
                self.time_series.data[start:start + seg_tpts]
                for start in starts
            ]
            segments = [
                segment[:, :, :, numpy.newaxis] for segment in segments
            ]
            time_series = numpy.concatenate(segments, axis=4)
        else:
            self.segment_length = time_series_length
            time_series = self.time_series.data[:, :, :, numpy.newaxis]
            seg_tpts = time_series.shape[0]

        LOG.debug("Segment length being used is: %s" % self.segment_length)

        #Base-line correct the segmented time-series
        time_series = sp_signal.detrend(time_series, axis=0)
        util.log_debug_array(LOG, time_series, "time_series")

        #Apply windowing function
        if self.window_function is not None:
            if self.window_function not in SUPPORTED_WINDOWING_FUNCTIONS:
                LOG.error("Windowing function is: %s" % self.window_function)
                LOG.error("Must be in: %s" %
                          str(SUPPORTED_WINDOWING_FUNCTIONS))

            window_function = eval("".join(("numpy.", self.window_function)))
            window_mask = numpy.reshape(window_function(seg_tpts),
                                        (seg_tpts, 1, 1, 1, 1))
            time_series = time_series * window_mask

        #Calculate the FFT
        result = numpy.fft.fft(time_series, axis=0)
        nfreq = result.shape[0] / 2
        result = result[1:nfreq + 1, :]
        util.log_debug_array(LOG, result, "result")

        spectra = spectral.FourierSpectrum(
            source=self.time_series,
            segment_length=self.segment_length,
            window_function=self.window_function,
            array_data=result,
            use_storage=False)

        return spectra

    def result_shape(self, input_shape, segment_length, sample_period):
        """Returns the shape of the main result (complex array) of the FFT."""
        freq_len = (segment_length / sample_period) / 2.0
        freq_len = int(min((input_shape[0], freq_len)))
        nseg = max(
            (1,
             int(numpy.ceil(input_shape[0] * sample_period / segment_length))))
        result_shape = (freq_len, input_shape[1], input_shape[2],
                        input_shape[3], nseg)
        return result_shape

    def result_size(self, input_shape, segment_length, sample_period):
        """
        Returns the storage size in Bytes of the main result (complex array) of 
        the FFT.
        """
        result_size = numpy.prod(
            self.result_shape(input_shape, segment_length,
                              sample_period)) * 2.0 * 8.0  #complex*Bytes
        return result_size

    def extended_result_size(self, input_shape, segment_length, sample_period):
        """
        Returns the storage size in Bytes of the extended result of the FFT. 
        That is, it includes storage of the evaluated FourierSpectrum attributes
        such as power, phase, amplitude, etc.
        """
        result_shape = self.result_shape(input_shape, segment_length,
                                         sample_period)
        result_size = self.result_size(input_shape, segment_length,
                                       sample_period)
        extend_size = result_size  #Main array
        extend_size = extend_size + 0.5 * result_size  #Amplitude
        extend_size = extend_size + 0.5 * result_size  #Phase
        extend_size = extend_size + 0.5 * result_size  #Power
        extend_size = extend_size + 0.5 * result_size / result_shape[
            4]  #Average power
        extend_size = extend_size + 0.5 * result_size / result_shape[
            4]  #Normalised Average power
        extend_size = extend_size + result_shape[0] * 8.0  #Frequency
        return extend_size
Beispiel #28
0
class WaveletCoefficients(arrays.MappedArray):
    """
    This class bundles all the elements of a Wavelet Analysis into a single
    object, including the input TimeSeries datatype and the output results as
    arrays (FloatArray)
    """
    #Overwrite attribute from superclass
    array_data = arrays.ComplexArray()

    source = time_series.TimeSeries(label="Source time-series")

    mother = basic.String(
        label="Mother wavelet",
        default="morlet",
        doc="""A string specifying the type of mother wavelet to use,
            default is 'morlet'.""")  # default to 'morlet'

    sample_period = basic.Float(label="Sample period")
    #sample_rate = basic.Integer(label = "")  inversely related

    frequencies = arrays.FloatArray(
        label="Frequencies", doc="A vector that maps scales to frequencies.")

    normalisation = basic.String(label="Normalisation type")
    # 'unit energy' | 'gabor'

    q_ratio = basic.Float(label="Q-ratio", default=5.0)

    amplitude = arrays.FloatArray(label="Amplitude",
                                  file_storage=core.FILE_STORAGE_EXPAND)

    phase = arrays.FloatArray(label="Phase",
                              file_storage=core.FILE_STORAGE_EXPAND)

    power = arrays.FloatArray(label="Power",
                              file_storage=core.FILE_STORAGE_EXPAND)

    _frequency = None
    _time = None

    __generate_table__ = True

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.nr_dimensions = len(self.read_data_shape())
        for i in range(self.nr_dimensions):
            setattr(self, 'length_%dd' % (i + 1),
                    int(self.read_data_shape()[i]))

        if self.trait.use_storage is False and sum(
                self.get_data_shape('array_data')) != 0:
            if self.amplitude.size == 0:
                self.compute_amplitude()
            if self.phase.size == 0:
                self.compute_phase()
            if self.power.size == 0:
                self.compute_power()

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "Spectral type": self.__class__.__name__,
            "Source": self.source.title,
            "Wavelet type": self.mother,
            "Normalisation": self.normalisation,
            "Q-ratio": self.q_ratio,
            "Sample period": self.sample_period,
            "Number of scales": self.frequencies.shape[0],
            "Minimum frequency": self.frequencies[0],
            "Maximum frequency": self.frequencies[-1]
        }
        return summary

    @property
    def frequency(self):
        """ Frequencies represented by the wavelet spectrogram."""
        if self._frequency is None:
            self._frequency = numpy.arange(self.frequencies.lo,
                                           self.frequencies.hi,
                                           self.frequencies.step)
            util.log_debug_array(LOG, self._frequency, "frequency")
        return self._frequency

    def compute_amplitude(self):
        """ Amplitude of the complex Wavelet coefficients."""
        self.amplitude = numpy.abs(self.array_data)

    def compute_phase(self):
        """ Phase of the Wavelet coefficients."""
        self.phase = numpy.angle(self.array_data)

    def compute_power(self):
        """ Power of the complex Wavelet coefficients."""
        self.power = numpy.abs(self.array_data)**2

    def write_data_slice(self, partial_result):
        """
        Append chunk.
        """
        self.store_data_chunk('array_data',
                              partial_result.array_data,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_amplitude()
        self.store_data_chunk('amplitude',
                              partial_result.amplitude,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_phase()
        self.store_data_chunk('phase',
                              partial_result.phase,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_power()
        self.store_data_chunk('power',
                              partial_result.power,
                              grow_dimension=2,
                              close_file=False)
Beispiel #29
0
class Noise(core.Type):
    """
    Defines a base class for noise. Specific noises are derived from this class
    for use in stochastic integrations. 

    .. [KloedenPlaten_1995] Kloeden and Platen, Springer 1995, *Numerical
        solution of stochastic differential equations.*

    .. [ManellaPalleschi_1989] Manella, R. and Palleschi V., *Fast and precise
        algorithm for computer simulation of stochastic differential equations*,
        Physical Review A, Vol. 40, Number 6, 1989. [3381-3385]

    .. [Mannella_2002] Mannella, R.,  *Integration of Stochastic Differential
        Equations on a Computer*, Int J. of Modern Physics C 13(9): 1177--1194,
        2002.

    .. [FoxVemuri_1988] Fox, R., Gatland, I., Rot, R. and Vemuri, G., * Fast ,
        accurate algorithm for simulation of exponentially correlated colored
        noise*, Physical Review A, Vol. 38, Number 11, 1988. [5938-5940]


    .. #Currently there seems to be a clash betwen traits and autodoc, autodoc
    .. #can't find the methods of the class, the class specific names below get
    .. #us around this...
    .. automethod:: Noise.__init__
    .. automethod:: Noise.configure_white
    .. automethod:: Noise.generate
    .. automethod:: Noise.white
    .. automethod:: Noise.coloured

    """
    _base_classes = ['Noise', 'MultiplicativeSimple']

    #NOTE: nsig is not declared here because we use this class directly as the
    #      inital conditions noise source, and in that use the job of nsig is
    #      filled by the state_variable_range attribute of the Model.

    ntau = basic.Float(label=r":math:`\tau`",
                       required=True,
                       default=0.0,
                       doc="""The noise correlation time""")

    random_stream = RandomStream(
        label="Random Stream",
        #fixed_type = True, #Can only be RandomStream in UI
        #default = RandomStream,
        required=True,
        doc="""An instance of numpy's RandomState associated with this 
        specific Noise object.""")

    def __init__(self, **kwargs):
        """
        Initialise the noise with parameters as keywords arguments, a sensible
        default parameter set should be provided via the trait mechanism.

        """
        super(Noise, self).__init__(**kwargs)
        LOG.debug(str(kwargs))

        self.dt = None

        #For use if coloured
        self._E = None
        self._sqrt_1_E2 = None
        self._eta = None
        self._h = None

    def configure(self):
        """
        Run base classes configure to setup traited attributes, then ensure that
        the ``random_stream`` attribute is properly configured.

        """
        super(Noise, self).configure()
        #self.random_stream.configure()
        self.trait["random_stream"].configure()  #stupid wraps
        #TODO: Check with Lia that explicit cascading of configure() fits with
        #       intended/expected usage...

    def __repr__(self):
        """A formal, executable, representation of a Noise object."""
        class_name = self.__class__.__name__
        traited_kwargs = self.trait.keys()
        formal = class_name + "(" + "=%s, ".join(traited_kwargs) + "=%s)"
        return formal % eval("(self." + ", self.".join(traited_kwargs) + ")")

    def __str__(self):
        """An informal, human readable, representation of a Noise object."""
        class_name = self.__class__.__name__
        traited_kwargs = self.trait.keys()
        informal = class_name + "(" + ", ".join(traited_kwargs) + ")"
        return informal

    def configure_white(self, dt, shape=None):
        """Set the time step (dt) of noise or integration time"""
        self.dt = dt

    def configure_coloured(self, dt, shape):
        r"""
        One of the simplest forms for coloured noise is exponentially correlated
        Gaussian noise [KloedenPlaten_1995]_. 

        We give the initial conditions for coloured noise using the integral
        algorith for simulating exponentially correlated noise proposed by 
        [FoxVemuri_1988]_

        To start the simulation, an initial value for :math:`\eta` is needed.
        It is obtained in accord with Eqs.[13-15]:

            .. math::
                m &= \text{random number}\\
                n &= \text{random number}\\
                \eta &= \sqrt{-2D\lambda\ln(m)}\,\cos(2\pi\,n)

        where :math:`D` is standard deviation of the noise amplitude and
        :math:`\lambda = \frac{1}{\tau_n}` is the inverse of the noise 
        correlation time. Then we set :math:`E = \exp{-\lambda\,\delta\,t}`
        where :math:`\delta\,t` is the integration time step.

        After that the exponentially correlated, coloured noise, is obtained:

            .. math::
                a &= \text{random number}\\
                b &= \text{random number}\\
                h &= \sqrt{-2D\lambda\,(1 - E^2)\,\ln{a}}\,\cos(2\pi\,b)\\
                \eta_{t+\delta\,t} &= \eta_{t}E + h

        """
        #TODO: Probably best to change the docstring to be consistent with the
        #      below, ie, factoring out the explicit Box-Muller.
        #NOTE: The actual implementation factors out the explicit Box-Muller,
        #      using numpy's normal() instead.
        self.dt = dt
        self._E = numpy.exp(-self.dt / self.ntau)
        self._sqrt_1_E2 = numpy.sqrt((1.0 - self._E**2))
        self._eta = self.random_stream.normal(size=shape)
        self._dt_sqrt_lambda = self.dt * numpy.sqrt(1.0 / self.ntau)

    #TODO: Check performance, if issue, inline coloured and white...
    def generate(
        self,
        shape,
        truncate=False,
        lo=-1.0,
        hi=1.0,
    ):
        """Generate and return some "noise" of the requested ``shape``."""
        if self.ntau > 0.0:
            noise = self.coloured(shape)
        else:
            if truncate:
                noise = self.truncated_white(shape, lo, hi)
            else:
                noise = self.white(shape)
        return noise

    def coloured(self, shape):
        """See, [FoxVemuri_1988]_"""

        self._h = self._sqrt_1_E2 * self.random_stream.normal(size=shape)
        self._eta = self._eta * self._E + self._h
        return self._dt_sqrt_lambda * self._eta

    def white(self, shape):
        """
        Return Gaussian random variates as an array of shape ``shape``, with the
        amplitude scaled by :math:`\\sqrt{dt}`.

        """

        noise = numpy.sqrt(self.dt) * self.random_stream.normal(size=shape)
        return noise

    def truncated_white(self, shape, lo, hi):
        """

        Return truncated Gaussian random variates in the range ``[lo, hi]``, as an
        array of shape ``shape``, with the amplitude scaled by
        :math:`\\sqrt{dt}`.

        See:
        http://docs.scipy.org/doc/scipy-0.7.x/reference/generated/scipy.stats.truncnorm.html

        """

        noise = numpy.sqrt(self.dt) * scipy_stats.truncnorm.rvs(
            lo, hi, size=shape)
        return noise
Beispiel #30
0
class ComplexCoherenceSpectrum(arrays.MappedArray):
    """
    Result of a NodeComplexCoherence Analysis.
    """

    cross_spectrum = arrays.ComplexArray(
        label="The cross spectrum",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc=""" A complex ndarray that contains the nodes x nodes cross
                spectrum for every frequency frequency and for every segment."""
    )

    array_data = arrays.ComplexArray(
        label="Complex Coherence",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc="""The complex coherence coefficients calculated from the cross
                spectrum. The imaginary values of this complex ndarray represent the
                imaginary coherence.""")

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="""Links to the time-series on which the node_coherence is
                applied.""")

    epoch_length = basic.Float(
        label="Epoch length",
        doc="""The timeseries was segmented into equally sized blocks
                (overlapping if necessary), prior to the application of the FFT.
                The segement length determines the frequency resolution of the
                resulting spectra.""")

    segment_length = basic.Float(
        label="Segment length",
        doc="""The timeseries was segmented into equally sized blocks
                (overlapping if necessary), prior to the application of the FFT.
                The segement length determines the frequency resolution of the
                resulting spectra.""")

    windowing_function = basic.String(
        label="Windowing function",
        doc="""The windowing function applied to each time segment prior to
                application of the FFT.""")

    __generate_table__ = True

    _frequency = None
    _freq_step = None
    _max_freq = None

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.configure_chunk_safe()

    def write_data_slice(self, partial_result):
        """
        Append chunk.
        """
        self.store_data_chunk('cross_spectrum',
                              partial_result.cross_spectrum,
                              grow_dimension=2,
                              close_file=False)

        self.store_data_chunk('array_data',
                              partial_result.array_data,
                              grow_dimension=2,
                              close_file=False)

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "Spectral type": self.__class__.__name__,
            "Source": self.source.title,
            "Frequency step": self.freq_step,
            "Maximum frequency": self.max_freq
        }
        # summary["FFT length (time-points)"] = self.fft_points
        # summary["Number of epochs"] = self.number_of_epochs
        return summary

    @property
    def freq_step(self):
        """ Frequency step size of the Complex Coherence Spectrum."""
        if self._freq_step is None:
            self._freq_step = 1.0 / self.segment_length
            msg = "%s: Frequency step size is %s"
            LOG.debug(msg % (str(self), str(self._freq_step)))
        return self._freq_step

    @property
    def max_freq(self):
        """ Maximum frequency represented in the Complex Coherence Spectrum."""
        if self._max_freq is None:
            self._max_freq = 0.5 / self.source.sample_period
            msg = "%s: Max frequency is %s"
            LOG.debug(msg % (str(self), str(self._max_freq)))
        return self._max_freq

    @property
    def frequency(self):
        """ Frequencies represented in the Complex Coherence Spectrum."""
        if self._frequency is None:
            self._frequency = numpy.arange(self.freq_step,
                                           self.max_freq + self.freq_step,
                                           self.freq_step)
        util.log_debug_array(LOG, self._frequency, "frequency")
        return self._frequency