Esempio n. 1
0
class FcdCalculator(HasTraits):
    """
    Model class defining the traited attributes used by the FcdAdapter.
    """
    time_series = Attr(
        field_type=TimeSeriesRegion,
        label="Time Series",
        required=True,
        doc="""The time-series for which the fcd matrices are calculated.""")

    sw = Float(
        label="Sliding window length (ms)",
        default=120000,
        doc="""Length of the time window used to divided the time series.
        FCD matrix is calculated in the following way: the time series is divided in time window of fixed length and
        with an overlapping of fixed length. The data-points within each window, centered at time ti, are used to
        calculate FC(ti) as Pearson correlation. The ij element of the FCD matrix is calculated as the Pearson
        Correlation between FC(ti) and FC(tj) arranged in a vector.""")

    sp = Float(
        label="Spanning between two consecutive sliding window (ms)",
        default=2000,
        doc=
        """Spanning= (time windows length)-(overlapping between two consecutive time window). FCD matrix is
        calculated in the following way: the time series is divided in time window of fixed length and with an
        overlapping of fixed length. The data-points within each window, centered at time ti, are used to calculate
        FC(ti) as Pearson Correlation. The ij element of the FCD matrix is calculated as the Pearson correlation
        between FC(ti) and FC(tj) arranged in a vector""")
Esempio n. 2
0
class AllenConnectModel(ViewModel):
    resolution = Int(label="Spatial resolution (micron)",
                     default=list(RESOLUTION_OPTIONS.values())[2],
                     choices=RESOLUTION_OPTIONS.values(),
                     required=True,
                     doc="""Definition of the weights of the connectivity :""")

    weighting = Int(label="Definition of the weights of the connectivity :",
                    default=list(WEIGHTS_OPTIONS.values())[0],
                    choices=WEIGHTS_OPTIONS.values(),
                    required=True,
                    doc="""""")

    inj_f_thresh = Float(
        label="Injected percentage of voxels in the inj site",
        default=80,
        required=True,
        doc=
        """To build the volume and the connectivity select only the areas that have a volume 
        greater than (micron^3): """)

    vol_thresh = Float(
        label="Min volume",
        default=1000000000,
        required=True,
        doc=
        """To build the connectivity select only the experiment where the percentage of infected voxels 
        in the injection structure is greater than: """)
Esempio n. 3
0
class AllenConnectModel(ViewModel):
    resolution = EnumAttr(
        label="Spatial resolution (micron)",
        default=ResolutionOptionsEnum.ONE_HUNDRED,
        doc="""Definition of the weights of the connectivity :""")

    weighting = EnumAttr(
        label="Definition of the weights of the connectivity :",
        default=WeightsOptionsEnum.PROJECTION_DENSITY_INJECTION_DENSITY,
        doc="""""")

    inj_f_thresh = Float(
        label="Injected percentage of voxels in the inj site",
        default=80,
        required=True,
        doc=
        """To build the volume and the connectivity select only the areas that have a volume 
        greater than (micron^3): """)

    vol_thresh = Float(
        label="Min volume",
        default=1000000000,
        required=True,
        doc=
        """To build the connectivity select only the experiment where the percentage of infected voxels 
        in the injection structure is greater than: """)
 def __init__(self):
     super(EquationTemporalPlotForm, self).__init__()
     self.min_tmp_x = FloatField(Float(label='Temporal Start Time(ms)', default=0, doc="The minimum value of the "
                                             "x-axis for temporal equation plot. Not persisted, used only for "
                                             "visualization."), name='min_tmp_x')
     self.max_tmp_x = FloatField(Float(label='Temporal End Time(ms)', default=100, doc="The maximum value of the"
                                             " x-axis for temporal equation plot. Not persisted, used only for"
                                             " visualization."), name='max_tmp_x')
Esempio n. 5
0
class BalloonModelAdapterModel(ViewModel):
    time_series = DataTypeGidAttr(
        linked_datatype=TimeSeriesRegion,
        label="Time Series",
        required=True,
        doc="""The timeseries that represents the input neural activity"""
    )

    dt = Float(
        label=":math:`dt`",
        default=0.002,
        required=True,
        doc="""The integration time step size for the balloon model (s).
            If none is provided, by default, the TimeSeries sample period is used."""
    )

    tau_s = Float(
        label=r":math:`\tau_s`",
        default=1.54,
        required=True,
        doc="""Balloon model parameter. Time of signal decay (s)""")

    tau_f = Float(
        label=r":math:`\tau_f`",
        default=1.44,
        required=True,
        doc=""" Balloon model parameter. Time of flow-dependent elimination or
            feedback regulation (s). The average  time blood take to traverse the
            venous compartment. It is the  ratio of resting blood volume (V0) to
            resting blood flow (F0).""")

    neural_input_transformation = EnumAttr(
        label="Neural input transformation",
        default=NeuralInputTransformations.NONE,
        doc=""" This represents the operation to perform on the state-variable(s) of
            the model used to generate the input TimeSeries. ``none`` takes the
            first state-variable as neural input; `` abs_diff`` is the absolute
            value of the derivative (first order difference) of the first state variable; 
            ``sum``: sum all the state-variables of the input TimeSeries."""
    )

    bold_model = EnumAttr(
        label="Select BOLD model equations",
        default=BoldModels.NONLINEAR,
        doc="""Select the set of equations for the BOLD model."""
    )

    RBM = Attr(
        field_type=bool,
        label="Revised BOLD Model",
        default=True,
        required=True,
        doc="""Select classical vs revised BOLD model (CBM or RBM).
            Coefficients  k1, k2 and k3 will be derived accordingly."""
    )
Esempio n. 6
0
 def __init__(self, prefix=''):
     super(RangeForm, self).__init__(prefix)
     self.lo = FloatField(
         Float(label='Lo', default=ContinuousWaveletTransform.frequencies.default.lo, doc='start of range'), self,
         name='Lo')
     self.hi = FloatField(
         Float(label='Hi', default=ContinuousWaveletTransform.frequencies.default.hi, doc='end of range'), self,
         name='Hi')
     self.step = FloatField(
         Float(label='Step', default=ContinuousWaveletTransform.frequencies.default.step, doc='step of range'), self,
         name='Step')
Esempio n. 7
0
 def __init__(self):
     super(RangeForm, self).__init__()
     self.lo = FloatField(
         Float(label='Lo', default=WaveletAdapterModel.frequencies.default.lo, doc='start of range'),
         name='Lo')
     self.hi = FloatField(
         Float(label='Hi', default=WaveletAdapterModel.frequencies.default.hi, doc='end of range'),
         name='Hi')
     self.step = FloatField(
         Float(label='Step', default=WaveletAdapterModel.frequencies.default.step, doc='step of range'),
         name='Step')
Esempio n. 8
0
 def __init__(self):
     super(EquationPlotForm, self).__init__()
     self.min_x = FloatField(Float(
         label='Min distance(mm)',
         default=0,
         doc="The minimum value of the x-axis for spatial equation plot."),
                             name='min_x')
     self.max_x = FloatField(Float(
         label='Max distance(mm)',
         default=100,
         doc="The maximum value of the x-axis for spatial equation plot."),
                             name='max_x')
Esempio n. 9
0
 def __init__(self):
     super(EquationSpatialPlotForm, self).__init__()
     self.min_space_x = FloatField(Float(
         label='Spatial Start Distance(mm)',
         default=0,
         doc="The minimum value of"
         " the x-axis for spatial equation plot."),
                                   name='min_space_x')
     self.max_space_x = FloatField(Float(
         label='Spatial End Distance(mm)',
         default=100,
         doc="The maximum value of "
         "the x-axis for spatial equation plot."),
                                   name='max_space_x')
Esempio n. 10
0
class TimeseriesMetricsAdapterModel(ViewModel):
    time_series = DataTypeGidAttr(
        linked_datatype=TimeSeries,
        label="Time Series",
        required=True,
        doc="The TimeSeries for which the metric(s) will be computed.")

    algorithms = List(
        of=str,
        choices=tuple(ALGORITHMS.keys()),
        label='Selected metrics to be applied',
        doc=
        'The selected algorithms will all be applied on the input TimeSeries')

    start_point = Float(
        label="Start point (ms)",
        default=500.0,
        required=False,
        doc=""" The start point determines how many points of the TimeSeries will
        be discarded before computing the metric. By default it drops the
        first 500 ms.""")

    segment = Int(
        label="Segmentation factor",
        default=4,
        required=False,
        doc=
        """ Divide the input time-series into discrete equally sized sequences and
        use the last segment to compute the metric. It is only used when
        the start point is larger than the time-series length.""")
Esempio n. 11
0
class Raw(Monitor):
    """
    A monitor that records the output raw data from a tvb simulation:
    It collects:

        - all state variables and modes from class :Model:
        - all nodes of a region or surface based 
        - all the integration time steps

    """
    _ui_name = "Raw recording"

    period = Float(default=0.0,
                   label="Sampling period is ignored for Raw Monitor")
    # order = -1

    variables_of_interest = NArray(
        dtype=int,
        label="Raw Monitor sees all!!! Resistance is futile...",
        required=False)

    # order = -1

    def config_for_sim(self, simulator):
        if self.period != simulator.integrator.dt:
            self.log.debug(
                'Raw period not equal to integration time step, overriding')
        self.period = simulator.integrator.dt
        super(Raw, self).config_for_sim(simulator)
        self.istep = 1
        self.voi = numpy.arange(len(simulator.model.variables_of_interest))

    def sample(self, step, state):
        time = step * self.dt
        return [time, state]
Esempio n. 12
0
    def test_float_field_required_empty(self):
        float_attr = Float(label='Dummy Float', default=0.)
        float_field = FloatField(float_attr, self.name)

        post_data = {'dummy_name': ''}
        float_field.fill_from_post(post_data)
        assert float_field.validate() is False, "Validation should have failed on FloatField!"
        assert float_field.value == ''
Esempio n. 13
0
    def test_float_field_optional(self):
        float_attr = Float(label='Dummy Float', default=0., required=False)
        float_field = FloatField(float_attr, self.name)

        post_data = {'dummy_name': ''}
        float_field.fill_from_post(post_data)
        assert float_field.data is None, "Empty data was not set correctly on FloatField!"
        assert float_field.value == ''
Esempio n. 14
0
    def test_float_field_required(self):
        float_attr = Float(label='Dummy Float', default=0.)
        float_field = FloatField(float_attr, self.name)

        post_data = {'dummy_name': '10.5'}
        float_field.fill_from_post(post_data)
        assert float_field.data == float(post_data[self.name]), "Float data was not set correctly on FloatField!"
        assert float_field.value == float_field.data
Esempio n. 15
0
 def _add_fields_for_float(self, param, param_key):
     # type: (RangeParameter, str) -> None
     pse_param_lo = FloatField(Float(label='LO for {}'.format(param.name),
                                     default=param.range_definition.lo,
                                     required=True),
                               name=self.LO_FIELD.format(param_key))
     self.__setattr__(self.LO_FIELD.format(param_key), pse_param_lo)
     pse_param_hi = FloatField(Float(label='HI for {}'.format(param.name),
                                     default=param.range_definition.hi,
                                     required=True),
                               name=self.HI_FIELD.format(param_key))
     self.__setattr__(self.HI_FIELD.format(param_key), pse_param_hi)
     pse_param_step = FloatField(Float(label='STEP for {}'.format(
         param.name),
                                       default=param.range_definition.step,
                                       required=True),
                                 name=self.STEP_FIELD.format(param_key))
     self.__setattr__(self.STEP_FIELD.format(param_key), pse_param_step)
Esempio n. 16
0
class Fcd(HasTraits):
    array_data = NArray()

    source = Attr(field_type=time_series.TimeSeries,
                  label="Source time-series",
                  doc="Links to the time-series on which FCD is calculated.")

    sw = Float(
        label="Sliding window length (ms)",
        default=120000,
        doc="""Length of the time window used to divided the time series.
                FCD matrix is calculated in the following way: the time series is divided in time window of fixed length and with an overlapping of fixed length.
                The datapoints within each window, centered at time ti, are used to calculate FC(ti) as Pearson correlation.
                The ij element of the FCD matrix is calculated as the Pearson correlation between FC(ti) and FC(tj) arranged in a vector."""
    )

    sp = Float(
        label="Spanning between two consecutive sliding window (ms)",
        default=2000,
        doc=
        """Spanning= (time windows length)-(overlapping between two consecutive time window).
                FCD matrix is calculated in the following way: the time series is divided in time window of fixed length and with an overlapping of fixed length.
                The datapoints within each window, centered at time ti, are used to calculate FC(ti) as Pearson correlation.
                The ij element of the FCD matrix is calculated as the Pearson correlation between FC(ti) and FC(tj) arranged in a vector"""
    )

    labels_ordering = List(
        of=str,
        label="Dimension Names",
        default=("Time", "Time", "State Variable", "Mode"),
        doc="""List of strings representing names of each data dimension""")

    def summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "FCD type": self.__class__.__name__,
            "Source": self.source.title,
            "Dimensions": self.labels_ordering
        }
        summary.update(narray_summary_info(self.array_data))
        return summary
Esempio n. 17
0
 def __init__(self):
     super(EquationForm, self).__init__()
     self.equation = StrField(self.get_traited_equation().equation,
                              disabled=True)
     for param_key, param in self.get_traited_equation().parameters.default(
     ).items():
         setattr(
             self, param_key,
             FloatField(Float(label=param_key, default=param),
                        name=param_key))
Esempio n. 18
0
class WaveletAdapterModel(ViewModel):
    time_series = DataTypeGidAttr(
        linked_datatype=TimeSeries,
        label="Time Series",
        required=True,
        doc="""The timeseries to which the wavelet is to be applied.""")

    mother = Attr(
        field_type=str,
        label="Wavelet function",
        default="morlet",
        doc="""The mother wavelet function used in the transform. Default is
            'morlet', possibilities are: 'morlet'...""")

    sample_period = Float(
        label="Sample period of result (ms)",
        default=7.8125,  # 7.8125 => 128 Hz
        doc="""The sampling period of the computed wavelet spectrum. NOTE:
            This should be an integral multiple of the of the sampling period
            of the source time series, otherwise the actual resulting sample
            period will be the first correct value below that requested.""")

    frequencies = Attr(
        field_type=Range,
        label="Frequency range of result (kHz).",
        default=Range(lo=0.008, hi=0.060, step=0.002),
        doc="""The frequency resolution and range returned. Requested
            frequencies are converted internally into appropriate scales.""")

    normalisation = Attr(
        field_type=str,
        label="Normalisation",
        default="energy",
        doc="""The type of normalisation for the resulting wavet spectrum.
            Default is 'energy', options are: 'energy'; 'gabor'.""")

    q_ratio = Float(
        label="Q-ratio",
        default=5.0,
        doc=
        """NFC. Must be greater than 5. Ratios of the center frequencies to bandwidths."""
    )
class PearsonCorrelationCoefficientAdapterModel(ViewModel):
    time_series = DataTypeGidAttr(
        linked_datatype=TimeSeries,
        label="Time Series",
        required=True,
        doc="""The time-series for which the cross correlation matrices are calculated."""
    )

    t_start = Float(
        label=":math:`t_{start}`",
        default=0.9765625,
        required=True,
        doc="""Time start point (ms). By default it uses the default Monitor sample period.
        The starting time point of a time series is not zero, but the monitor's sample period. """)

    t_end = Float(
        label=":math:`t_{end}`",
        default=1000.,
        required=True,
        doc=""" End time point (ms) """)
Esempio n. 20
0
class iEEG(Projection):
    "Forward solution for intracranial EEG (not ECoG!)."

    _ui_name = "Intracerebral / Stereo EEG"

    projection = Attr(
        projections_module.ProjectionSurfaceSEEG,
        default=None,
        label='Projection matrix',  #order=2,
        doc='Projection matrix to apply to sources.')

    sigma = Float(label="conductivity", default=1.0)  #, order=4)

    sensors = Attr(
        sensors_module.SensorsInternal,
        label="Internal brain sensors",
        default=None,
        required=True,  #order=1,
        doc=
        "The set of SEEG sensors for which the forward solution will be calculated."
    )

    @classmethod
    def from_file(cls,
                  sensors_fname='seeg_588.txt',
                  projection_fname='projection_seeg_588_surface_16k.npy',
                  **kwargs):
        return Projection.from_file.__func__(cls, sensors_fname,
                                             projection_fname, **kwargs)

    def analytic(self, loc, ori):
        """Compute the projection matrix -- simple distance weight for now.
        Equation 12 from sarvas1987basic (point dipole in homogeneous space):
          V(r) = 1/(4*pi*\sigma)*Q*(r-r_0)/|r-r_0|^3
        """
        r_0, Q = loc, ori
        V_r = numpy.zeros((self.sensors.locations.shape[0], r_0.shape[0]))
        for sensor_k in numpy.arange(self.sensors.locations.shape[0]):
            a = self.sensors.locations[sensor_k, :] - r_0
            na = numpy.sqrt(numpy.sum(a**2, axis=1))[:, numpy.newaxis]
            V_r[sensor_k, :] = numpy.sum(
                Q * (a / na**3), axis=1) / (4.0 * numpy.pi * self.sigma)
        return V_r

    def create_time_series(self,
                           connectivity=None,
                           surface=None,
                           region_map=None,
                           region_volume_map=None):
        return TimeSeriesSEEG(sensors=self.sensors,
                              sample_period=self.period,
                              title=' ' + self.__class__.__name__)
Esempio n. 21
0
class CorrelationCoefficient(HasTraits):
    """
    Model class defining the traited attributes used by the CorrelationCoefficientAdapter.
    """
    time_series = Attr(
        field_type=TimeSeries,
        label="Time Series",
        required=True,
        doc="""The time-series for which the cross correlation matrices are
        calculated.""")

    t_start = Float(
        label=":math:`t_{start}`",
        default=0.9765625,
        required=True,
        doc="""Time start point (ms). By default it uses the default Monitor sample period.
        The starting time point of a time series is not zero, but the monitor's sample period. """)

    t_end = Float(
        label=":math:`t_{end}`",
        default=1000.,
        required=True,
        doc=""" End time point (ms) """)
class BalloonModelAdapterModel(ViewModel):
    time_series = DataTypeGidAttr(
        linked_datatype=TimeSeriesRegion,
        label="Time Series",
        required=True,
        doc="""The timeseries that represents the input neural activity"""
    )

    dt = Float(
        label=":math:`dt`",
        default=0.002,
        required=True,
        doc="""The integration time step size for the balloon model (s).
            If none is provided, by default, the TimeSeries sample period is used."""
    )

    neural_input_transformation = Attr(
        field_type=str,
        label="Neural input transformation",
        choices=("none", "abs_diff", "sum"),
        default="none",
        doc=""" This represents the operation to perform on the state-variable(s) of
            the model used to generate the input TimeSeries. ``none`` takes the
            first state-variable as neural input; `` abs_diff`` is the absolute
            value of the derivative (first order difference) of the first state variable; 
            ``sum``: sum all the state-variables of the input TimeSeries."""
    )

    bold_model = Attr(
        field_type=str,
        label="Select BOLD model equations",
        choices=("linear", "nonlinear"),
        default="nonlinear",
        doc="""Select the set of equations for the BOLD model."""
    )

    RBM = Attr(
        field_type=bool,
        label="Revised BOLD Model",
        default=True,
        required=True,
        doc="""Select classical vs revised BOLD model (CBM or RBM).
            Coefficients  k1, k2 and k3 will be derived accordingly."""
    )
Esempio n. 23
0
class BaseTimeseriesMetricAlgorithm(HasTraits):
    """
    This is a base class for all metrics on timeSeries dataTypes.
    Metric means an algorithm computing a single value for an entire TimeSeries.

    """

    time_series = Attr(
        field_type=time_series_module.TimeSeries,
        label="Time Series",
        required=True,
        doc="The TimeSeries for which the metric(s) will be computed.")

    start_point = Float(
        label="Start point (ms)",
        default=500.0,
        required=False,
        doc=""" The start point determines how many points of the TimeSeries will
        be discarded before computing the metric. By default it drops the
        first 500 ms.""")

    segment = Int(
        label="Segmentation factor",
        default=4,
        required=False,
        doc=
        """ Divide the input time-series into discrete equally sized sequences and
        use the last segment to compute the metric. It is only used when
        the start point is larger than the time-series length.""")

    def evaluate(self):
        """
        This method needs to be implemented in each subclass.
        Will describe current algorithm.

        :return: single numeric value or a dictionary (displayLabel: numeric value) to be persisted.
        """
        raise Exception(
            "Every metric algorithm should implement an 'evaluate' method that returns the metric result."
        )
Esempio n. 24
0
    def __init__(self, path):
        super(SimulationStateH5, self).__init__(path)
        self.history = DataSet(NArray(), self, name='history')
        self.current_state = DataSet(NArray(), self, name='current_state')
        self.current_step = Scalar(Int(), self, name='current_step')

        for i in range(1, 16):
            setattr(self, 'monitor_stock_%i' % i,
                    DataSet(NArray(), self, name='monitor_stock_%i' % i))

        self.integrator_noise_rng_state_algo = Scalar(
            Attr(str), self, name='integrator_noise_rng_state_algo')
        self.integrator_noise_rng_state_keys = DataSet(
            NArray(dtype='uint32'),
            self,
            name='integrator_noise_rng_state_keys')
        self.integrator_noise_rng_state_pos = Scalar(
            Int(), self, name='integrator_noise_rng_state_pos')
        self.integrator_noise_rng_state_has_gauss = Scalar(
            Int(), self, name='integrator_noise_rng_state_has_gauss')
        self.integrator_noise_rng_state_cached_gauss = Scalar(
            Float(), self, name='integrator_noise_rng_state_cached_gauss')
Esempio n. 25
0
class FFTAdapterModel(ViewModel):
    """
    Parameters have the following meaning:
    - time_series: the input time series to which the fft is to be applied
    - segment_length: the block size which determines the frequency resolution of the resulting power spectra
    - window_function: windowing functions can be applied before the FFT is performed
    - detrend: None; specify if detrend is performed on the time series
    """
    time_series = DataTypeGidAttr(
        linked_datatype=TimeSeries,
        label="Time Series",
        doc="""The TimeSeries to which the FFT is to be applied.""")

    segment_length = Float(
        label="Segment(window) length (ms)",
        default=1000.0,
        required=False,
        doc="""The TimeSeries can be segmented into equally sized blocks
            (overlapping if necessary). The segment length determines the
            frequency resolution of the resulting power spectra -- longer
            windows produce finer frequency resolution.""")

    window_function = Attr(
        field_type=str,
        label="Windowing function",
        choices=tuple(SUPPORTED_WINDOWING_FUNCTIONS),
        required=False,
        doc="""Windowing functions can be applied before the FFT is performed.
             Default is None, possibilities are: 'hamming'; 'bartlett';
            'blackman'; and 'hanning'. See, numpy.<function_name>.""")

    detrend = Attr(field_type=bool,
                   label="Detrending",
                   default=True,
                   required=False,
                   doc="""Detrending is not always appropriate.
            Default is True, False means no detrending is performed on the time series"""
                   )
Esempio n. 26
0
class LocalConnectivity(HasTraits):
    """
    A sparse matrix for representing the local connectivity within the Cortex.
    """
    surface = Attr(field_type=surfaces.CorticalSurface, label="Surface")

    matrix = Attr(field_type=scipy.sparse.spmatrix, required=False)

    equation = Attr(
        field_type=equations.FiniteSupportEquation,
        label="Spatial",
        required=False,
        default=equations.Gaussian())

    cutoff = Float(
        label="Cutoff distance (mm)",
        default=40.0,
        doc="Distance at which to truncate the evaluation in mm.")

    # Temporary obj
    matrix_gdist = None

    def compute(self):
        """
        Compute current Matrix.
        """
        self.log.info("Mapping geodesic distance through the LocalConnectivity.")

        # Start with data being geodesic_distance_matrix, then map it through equation
        # Then replace original data with result...
        self.matrix_gdist.data = self.equation.evaluate(self.matrix_gdist.data)

        # Homogenise spatial discretisation effects across the surface
        nv = self.matrix_gdist.shape[0]
        ind = numpy.arange(nv, dtype=int)
        pos_mask = self.matrix_gdist.data > 0.0
        neg_mask = self.matrix_gdist.data < 0.0
        pos_con = self.matrix_gdist.copy()
        neg_con = self.matrix_gdist.copy()
        pos_con.data[neg_mask] = 0.0
        neg_con.data[pos_mask] = 0.0
        pos_contrib = pos_con.sum(axis=1)
        pos_contrib = numpy.array(pos_contrib).squeeze()
        neg_contrib = neg_con.sum(axis=1)
        neg_contrib = numpy.array(neg_contrib).squeeze()
        pos_mean = pos_contrib.mean()
        neg_mean = neg_contrib.mean()
        if ((pos_mean != 0.0 and any(pos_contrib == 0.0)) or
                (neg_mean != 0.0 and any(neg_contrib == 0.0))):
            msg = "Cortical mesh is too coarse for requested LocalConnectivity."
            self.log.warning(msg)
            bad_verts = ()
            if pos_mean != 0.0:
                bad_verts = bad_verts + numpy.nonzero(pos_contrib == 0.0)
            if neg_mean != 0.0:
                bad_verts = bad_verts + numpy.nonzero(neg_contrib == 0.0)
            self.log.debug("Problem vertices are: %s" % str(bad_verts))
        pos_hf = numpy.zeros(shape=pos_contrib.shape)
        pos_hf[pos_contrib != 0] = pos_mean / pos_contrib[pos_contrib != 0]
        neg_hf = numpy.zeros(shape=neg_contrib.shape)
        neg_hf[neg_contrib != 0] = neg_mean / neg_contrib[neg_contrib != 0]
        pos_hf_diag = scipy.sparse.csc_matrix((pos_hf, (ind, ind)), shape=(nv, nv))
        neg_hf_diag = scipy.sparse.csc_matrix((neg_hf, (ind, ind)), shape=(nv, nv))
        homogenious_conn = (pos_hf_diag * pos_con) + (neg_hf_diag * neg_con)

        # Then replace unhomogenised result with the spatially homogeneous one...
        if not homogenious_conn.has_sorted_indices:
            homogenious_conn.sort_indices()

        self.matrix = homogenious_conn

    @staticmethod
    def from_file(source_file="local_connectivity_16384.mat"):

        result = LocalConnectivity()

        source_full_path = try_get_absolute_path("tvb_data.local_connectivity", source_file)
        reader = FileReader(source_full_path)

        result.matrix = reader.read_array(matlab_data_name="LocalCoupling")
        return result

    def summary_info(self):
        """
        Gather scientifically interesting summary information from an instance
        of this datatype.
        """
        _, _, v = scipy.sparse.find(self.matrix)
        return narray_summary_info(v, ar_name='matrix-nonzero')

    def compute_sparse_matrix(self):
        """
        NOTE: Before calling this method, the surface field
        should already be set on the local connectivity.

        Computes the sparse matrix for this local connectivity.
        """
        if self.surface is None:
            raise AttributeError('Require surface to compute local connectivity.')

        self.matrix_gdist = surfaces.gdist.local_gdist_matrix(
            self.surface.vertices.astype(numpy.float64),
            self.surface.triangles.astype(numpy.int32),
            max_distance=self.cutoff)

        self.compute()
        # Avoid having a large data-set in memory.
        self.matrix_gdist = None
Esempio n. 27
0
class Simulator(HasTraits):
    """A Simulator assembles components required to perform simulations."""

    connectivity = Attr(
        field_type=connectivity.Connectivity,
        label="Long-range connectivity",
        default=None,
        required=True,
        doc="""A tvb.datatypes.Connectivity object which contains the
         structural long-range connectivity data (i.e., white-matter tracts). In
         combination with the ``Long-range coupling function`` it defines the inter-regional
         connections. These couplings undergo a time delay via signal propagation
         with a propagation speed of ``Conduction Speed``""")

    conduction_speed = Float(
        label="Conduction Speed",
        default=3.0,
        required=False,
        # range=basic.Range(lo=0.01, hi=100.0, step=1.0),
        doc="""Conduction speed for ``Long-range connectivity`` (mm/ms)""")

    coupling = Attr(
        field_type=coupling.Coupling,
        label="Long-range coupling function",
        default=coupling.Linear(),
        required=True,
        doc="""The coupling function is applied to the activity propagated
        between regions by the ``Long-range connectivity`` before it enters the local
        dynamic equations of the Model. Its primary purpose is to 'rescale' the
        incoming activity to a level appropriate to Model.""")

    surface = Attr(field_type=cortex.Cortex,
                   label="Cortical surface",
                   default=None,
                   required=False,
                   doc="""By default, a Cortex object which represents the
        cortical surface defined by points in the 3D physical space and their
        neighborhood relationship. In the current TVB version, when setting up a
        surface-based simulation, the option to configure the spatial spread of
        the ``Local Connectivity`` is available.""")

    stimulus = Attr(
        field_type=patterns.SpatioTemporalPattern,
        label="Spatiotemporal stimulus",
        default=None,
        required=False,
        doc=
        """A ``Spatiotemporal stimulus`` can be defined at the region or surface level.
        It's composed of spatial and temporal components. For region defined stimuli
        the spatial component is just the strength with which the temporal
        component is applied to each region. For surface defined stimuli,  a
        (spatial) function, with finite-support, is used to define the strength
        of the stimuli on the surface centred around one or more focal points.
        In the current version of TVB, stimuli are applied to the first state
        variable of the ``Local dynamic model``.""")

    model = Attr(
        field_type=models.Model,
        label="Local dynamic model",
        default=models.Generic2dOscillator(),
        required=True,
        doc="""A tvb.simulator.Model object which describe the local dynamic
        equations, their parameters, and, to some extent, where connectivity
        (local and long-range) enters and which state-variables the Monitors
        monitor. By default the 'Generic2dOscillator' model is used. Read the
        Scientific documentation to learn more about this model.""")

    integrator = Attr(field_type=integrators.Integrator,
                      label="Integration scheme",
                      default=integrators.HeunDeterministic(),
                      required=True,
                      doc="""A tvb.simulator.Integrator object which is
            an integration scheme with supporting attributes such as
            integration step size and noise specification for stochastic
            methods. It is used to compute the time courses of the model state
            variables.""")

    initial_conditions = NArray(
        label="Initial Conditions",
        required=False,
        doc="""Initial conditions from which the simulation will begin. By
        default, random initial conditions are provided. Needs to be the same shape
        as simulator 'history', ie, initial history function which defines the 
        minimal initial state of the network with time delays before time t=0. 
        If the number of time points in the provided array is insufficient the 
        array will be padded with random values based on the 'state_variables_range'
        attribute.""")

    monitors = List(
        of=monitors.Monitor,
        label="Monitor(s)",
        default=(monitors.TemporalAverage(), ),
        doc="""A tvb.simulator.Monitor or a list of tvb.simulator.Monitor
        objects that 'know' how to record relevant data from the simulation. Two
        main types exist: 1) simple, spatial and temporal, reductions (subsets
        or averages); 2) physiological measurements, such as EEG, MEG and fMRI.
        By default the Model's specified variables_of_interest are returned,
        temporally downsampled from the raw integration rate to a sample rate of
        1024Hz.""")

    simulation_length = Float(
        label="Simulation Length (ms, s, m, h)",
        default=1000.0,  # ie 1 second
        required=True,
        doc="""The length of a simulation (default in milliseconds).""")

    history = None  # type: SparseHistory

    @property
    def good_history_shape(self):
        """Returns expected history shape."""
        n_reg = self.connectivity.number_of_regions
        shape = self.horizon, len(
            self.model.state_variables), n_reg, self.model.number_of_modes
        return shape

    calls = 0
    current_step = 0
    number_of_nodes = None
    _memory_requirement_guess = None
    _memory_requirement_census = None
    _storage_requirement = None
    _runtime = None

    # methods consist of
    # 1) generic configure
    # 2) component specific configure
    # 3) loop preparation
    # 4) loop step
    # 5) estimations

    @property
    def is_surface_simulation(self):
        if self.surface:
            return True
        return False

    def _configure_integrator_boundaries(self):
        if self.model.state_variable_boundaries is not None:
            indices = []
            boundaries = []
            for sv, sv_bounds in self.model.state_variable_boundaries.items():
                indices.append(self.model.state_variables.index(sv))
                boundaries.append(sv_bounds)
            sort_inds = numpy.argsort(indices)
            self.integrator.bounded_state_variable_indices = numpy.array(
                indices)[sort_inds]
            self.integrator.state_variable_boundaries = numpy.array(
                boundaries).astype("float64")[sort_inds]
        else:
            self.integrator.bounded_state_variable_indices = None
            self.integrator.state_variable_boundaries = None

    def preconfigure(self):
        """Configure just the basic fields, so that memory can be estimated."""
        self.connectivity.configure()
        if self.surface:
            self.surface.configure()
        if self.stimulus:
            self.stimulus.configure()
        self.coupling.configure()
        self.model.configure()
        self.integrator.configure()
        self._configure_integrator_boundaries()
        # monitors needs to be a list or tuple, even if there is only one...
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]
        # Configure monitors
        for monitor in self.monitors:
            monitor.configure()
        # "Nodes" refers to either regions or vertices + non-cortical regions.
        if self.surface is None:
            self.number_of_nodes = self.connectivity.number_of_regions
            self.log.info('Region simulation with %d ROI nodes',
                          self.number_of_nodes)
        else:
            rm = self.surface.region_mapping
            unmapped = self.connectivity.unmapped_indices(rm)
            self._regmap = numpy.r_[rm, unmapped]
            self.number_of_nodes = self._regmap.shape[0]
            self.log.info(
                'Surface simulation with %d vertices + %d non-cortical, %d total nodes',
                rm.size, unmapped.size, self.number_of_nodes)
        self._guesstimate_memory_requirement()

    def configure(self, full_configure=True):
        """Configure simulator and its components.

        The first step of configuration is to run the configure methods of all
        the Simulator's components, ie its traited attributes.

        Configuration of a Simulator primarily consists of calculating the
        attributes, etc, which depend on the combinations of the Simulator's
        traited attributes (keyword args).

        Converts delays from physical time units into integration steps
        and updates attributes that depend on combinations of the 6 inputs.

        Returns
        -------
        sim: Simulator
            The configured Simulator instance.

        """
        if full_configure:
            # When run from GUI, preconfigure is run separately, and we want to avoid running that part twice
            self.preconfigure()
        # Make sure spatialised model parameters have the right shape (number_of_nodes, 1)
        # todo: this exclusion list is fragile, consider excluding declarative attrs that are not arrays
        excluded_params = ("state_variable_range", "state_variable_boundaries",
                           "variables_of_interest", "noise", "psi_table",
                           "nerf_table", "gid")
        spatial_reshape = self.model.spatial_param_reshape
        for param in type(self.model).declarative_attrs:
            if param in excluded_params:
                continue
            # If it's a surface sim and model parameters were provided at the region level
            region_parameters = getattr(self.model, param)
            if self.surface is not None:
                if region_parameters.size == self.connectivity.number_of_regions:
                    new_parameters = region_parameters[
                        self.surface.region_mapping].reshape(spatial_reshape)
                    setattr(self.model, param, new_parameters)
            region_parameters = getattr(self.model, param)
            if region_parameters.size == self.number_of_nodes:
                new_parameters = region_parameters.reshape(spatial_reshape)
                setattr(self.model, param, new_parameters)
        # Configure spatial component of any stimuli
        self._configure_stimuli()
        # Set delays, provided in physical units, in integration steps.
        self.connectivity.set_idelays(self.integrator.dt)
        self.horizon = self.connectivity.idelays.max() + 1
        # Reshape integrator.noise.nsig, if necessary.
        if isinstance(self.integrator, integrators.IntegratorStochastic):
            self._configure_integrator_noise()
        # Setup history
        self._configure_history(self.initial_conditions)
        # Configure Monitors to work with selected Model, etc...
        self._configure_monitors()
        # Estimate of memory usage.
        self._census_memory_requirement()
        # Allow user to chain configure to another call or assignment.
        return self

    def _handle_random_state(self, random_state):
        if random_state is not None:
            if isinstance(self.integrator, integrators.IntegratorStochastic):
                self.integrator.noise.random_stream.set_state(random_state)
                msg = "random_state supplied with seed %s"
                self.log.info(
                    msg,
                    self.integrator.noise.random_stream.get_state()[1][0])
            else:
                self.log.warn(
                    "random_state supplied for non-stochastic integration")

    def _prepare_local_coupling(self):
        if self.surface is None:
            local_coupling = 0.0
        else:
            if self.surface.coupling_strength.size == 1:
                local_coupling = (self.surface.coupling_strength[0] *
                                  self.surface.local_connectivity.matrix)
            elif self.surface.coupling_strength.size == self.surface.number_of_vertices:
                ind = numpy.arange(self.number_of_nodes, dtype=numpy.intc)
                vec_cs = numpy.zeros((self.number_of_nodes, ))
                vec_cs[:self.surface.
                       number_of_vertices] = self.surface.coupling_strength
                sp_cs = scipy.sparse.csc_matrix(
                    (vec_cs, (ind, ind)),
                    shape=(self.number_of_nodes, self.number_of_nodes))
                local_coupling = sp_cs * self.surface.local_connectivity.matrix
            if local_coupling.shape[1] < self.number_of_nodes:
                # must match unmapped indices handling in preconfigure
                from scipy.sparse import csr_matrix, vstack, hstack
                nn = self.number_of_nodes
                npad = nn - local_coupling.shape[0]
                rpad = csr_matrix((local_coupling.shape[0], npad))
                bpad = csr_matrix((npad, nn))
                local_coupling = vstack([hstack([local_coupling, rpad]), bpad])
        return local_coupling

    def _prepare_stimulus(self):
        if self.stimulus is None:
            stimulus = 0.0
        else:
            time = numpy.r_[0.0:self.simulation_length:self.integrator.dt]
            self.stimulus.configure_time(time.reshape((1, -1)))
            stimulus = numpy.zeros((self.model.nvar, self.number_of_nodes, 1))
            self.log.debug("stimulus shape is: %s", stimulus.shape)
        return stimulus

    def _loop_compute_node_coupling(self, step):
        """Compute delayed node coupling values."""
        coupling = self.coupling(step, self.history)
        if self.surface is not None:
            coupling = coupling[:, self._regmap]
        return coupling

    def _loop_update_stimulus(self, step, stimulus):
        """Update stimulus values for current time step."""
        if self.stimulus is not None:
            # TODO stim_step != current step
            stim_step = step - (self.current_step + 1)
            stimulus[self.model.stvar, :, :] = self.stimulus(
                stim_step).reshape((1, -1, 1))

    def _loop_update_history(self, step, n_reg, state):
        """Update history."""
        if self.surface is not None and state.shape[
                1] > self.connectivity.number_of_regions:
            region_state = numpy.zeros(
                (n_reg, state.shape[0],
                 state.shape[2]))  # temp (node, cvar, mode)
            numpy_add_at(region_state, self._regmap, state.transpose(
                (1, 0, 2)))  # sum within region
            region_state /= numpy.bincount(self._regmap).reshape(
                (-1, 1, 1))  # div by n node in region
            state = region_state.transpose((1, 0, 2))  # (cvar, node, mode)
        self.history.update(step, state)

    def _loop_monitor_output(self, step, state):
        observed = self.model.observe(state)
        output = [monitor.record(step, observed) for monitor in self.monitors]
        if any(outputi is not None for outputi in output):
            return output

    def __call__(self, simulation_length=None, random_state=None):
        """
        Return an iterator which steps through simulation time, generating monitor outputs.

        See the run method for a convenient way to collect all output in one call.

        :param simulation_length: Length of the simulation to perform in ms.
        :param random_state:  State of NumPy RNG to use for stochastic integration.
        :return: Iterator over monitor outputs.
        """

        self.calls += 1
        if simulation_length is not None:
            self.simulation_length = float(simulation_length)

        # intialization
        self._guesstimate_runtime()
        self._calculate_storage_requirement()
        self._handle_random_state(random_state)
        n_reg = self.connectivity.number_of_regions
        local_coupling = self._prepare_local_coupling()
        stimulus = self._prepare_stimulus()
        state = self.current_state

        # integration loop
        n_steps = int(math.ceil(self.simulation_length / self.integrator.dt))
        for step in range(self.current_step + 1,
                          self.current_step + n_steps + 1):
            # needs implementing by hsitory + coupling?
            node_coupling = self._loop_compute_node_coupling(step)
            self._loop_update_stimulus(step, stimulus)
            state = self.integrator.scheme(state, self.model.dfun,
                                           node_coupling, local_coupling,
                                           stimulus)
            self._loop_update_history(step, n_reg, state)
            output = self._loop_monitor_output(step, state)
            if output is not None:
                yield output

        self.current_state = state
        self.current_step = self.current_step + n_steps

    def _configure_history(self, initial_conditions):
        """
        Set initial conditions for the simulation using either the provided
        initial_conditions or, if none are provided, the model's initial()
        method. This method is called durin the Simulator's __init__().

        Any initial_conditions that are provided as an argument are expected
        to have dimensions 1, 2, and 3 with shapse corresponding to the number
        of state_variables, nodes and modes, respectively. If the provided
        inital_conditions are shorter in time (dim=0) than the required history
        the model's initial() method is called to make up the difference.

        """
        rng = numpy.random
        if hasattr(self.integrator, 'noise'):
            rng = self.integrator.noise.random_stream
        # Default initial conditions
        if initial_conditions is None:
            n_time, n_svar, n_node, n_mode = self.good_history_shape
            self.log.info(
                'Preparing initial history of shape %r using model.initial()',
                self.good_history_shape)
            if self.surface is not None:
                n_node = self.number_of_nodes
            history = self.model.initial(self.integrator.dt,
                                         (n_time, n_svar, n_node, n_mode), rng)
        # ICs provided
        else:
            # history should be [timepoints, state_variables, nodes, modes]
            self.log.info('Using provided initial history of shape %r',
                          initial_conditions.shape)
            n_time, n_svar, n_node, n_mode = ic_shape = initial_conditions.shape
            nr = self.connectivity.number_of_regions
            if self.surface is not None and n_node == nr:
                initial_conditions = initial_conditions[:, :, self._regmap]
                return self._configure_history(initial_conditions)
            elif ic_shape[1:] != self.good_history_shape[1:]:
                raise ValueError(
                    "Incorrect history sample shape %s, expected %s" %
                    (ic_shape[1:], self.good_history_shape[1:]))
            else:
                if ic_shape[0] >= self.horizon:
                    self.log.debug("Using last %d time-steps for history.",
                                   self.horizon)
                    history = initial_conditions[
                        -self.horizon:, :, :, :].copy()
                else:
                    self.log.debug(
                        'Padding initial conditions with model.initial')
                    history = self.model.initial(self.integrator.dt,
                                                 self.good_history_shape, rng)
                    shift = self.current_step % self.horizon
                    history = numpy.roll(history, -shift, axis=0)
                    history[:ic_shape[0], :, :, :] = initial_conditions
                    history = numpy.roll(history, shift, axis=0)
                self.current_step += ic_shape[0] - 1

        if self.integrator.state_variable_boundaries is not None:
            self.integrator.bound_state(numpy.swapaxes(history, 0, 1))
        self.log.info('Final initial history shape is %r', history.shape)

        # create initial state from history
        self.current_state = history[self.current_step % self.horizon].copy()
        self.log.debug('initial state has shape %r' %
                       (self.current_state.shape, ))
        if self.surface is not None and history.shape[
                2] > self.connectivity.number_of_regions:
            n_reg = self.connectivity.number_of_regions
            (nt, ns, _, nm), ax = history.shape, (2, 0, 1, 3)
            region_history = numpy.zeros((nt, ns, n_reg, nm))
            numpy_add_at(region_history.transpose(ax), self._regmap,
                         history.transpose(ax))
            region_history /= numpy.bincount(self._regmap).reshape((-1, 1))
            history = region_history
        # create history query implementation
        self.history = SparseHistory(self.connectivity.weights,
                                     self.connectivity.idelays,
                                     self.model.cvar,
                                     self.model.number_of_modes)
        # initialize its buffer
        self.history.initialize(history)

    def _configure_integrator_noise(self):
        """
        This enables having noise to be state variable specific and/or to enter 
        only via specific brain structures, for example it we only want to 
        consider noise as an external input entering the brain via appropriate
        thalamic nuclei.

        Support 3 possible shapes:
            1) number_of_nodes;

            2) number_of_state_variables; and 

            3) (number_of_state_variables, number_of_nodes).

        """

        noise = self.integrator.noise

        if self.integrator.noise.ntau > 0.0:
            self.integrator.noise.configure_coloured(
                self.integrator.dt, self.good_history_shape[1:])
        else:
            self.integrator.noise.configure_white(self.integrator.dt,
                                                  self.good_history_shape[1:])

        if self.surface is not None:
            if self.integrator.noise.nsig.size == self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[
                    self.surface.region_mapping]
            elif self.integrator.noise.nsig.size == self.model.nvar * self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[:,
                                                                        self.
                                                                        surface
                                                                        .
                                                                        region_mapping]

        good_nsig_shape = (self.model.nvar, self.number_of_nodes,
                           self.model.number_of_modes)
        nsig = self.integrator.noise.nsig
        self.log.debug("Given noise shape is %s", nsig.shape)
        if nsig.shape in (good_nsig_shape, (1, )):
            return
        elif nsig.shape == (self.model.nvar, ):
            nsig = nsig.reshape((self.model.nvar, 1, 1))
        elif nsig.shape == (self.number_of_nodes, ):
            nsig = nsig.reshape((1, self.number_of_nodes, 1))
        elif nsig.shape == (self.model.nvar, self.number_of_nodes):
            nsig = nsig.reshape((self.model.nvar, self.number_of_nodes, 1))
        else:
            msg = "Bad Simulator.integrator.noise.nsig shape: %s"
            self.log.error(msg % str(nsig.shape))

        self.log.debug("Corrected noise shape is %s", nsig.shape)
        self.integrator.noise.nsig = nsig

    def _configure_monitors(self):
        """ Configure the requested Monitors for this Simulator """
        # Coerce to list if required
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]
        # Configure monitors
        for monitor in self.monitors:
            monitor.config_for_sim(self)

    def _configure_stimuli(self):
        """ Configure the defined Stimuli for this Simulator """
        if self.stimulus is not None:
            if self.surface:
                self.stimulus.configure_space(self.surface.region_mapping)
            else:
                self.stimulus.configure_space()

    # used by simulator adaptor
    def memory_requirement(self):
        """
        Return an estimated of the memory requirements (Bytes) for this
        simulator's current configuration.
        """
        self._guesstimate_memory_requirement()
        return self._memory_requirement_guess

    # appears to be unused
    def runtime(self, simulation_length):
        """
        Return an estimated run time (seconds) for the simulator's current 
        configuration and a specified simulation length.

        """
        self.simulation_length = simulation_length
        self._guesstimate_runtime()
        return self._runtime

    # used by simulator adaptor
    def storage_requirement(self):
        """
        Return an estimated storage requirement (Bytes) for the simulator's
        current configuration and a specified simulation length.

        """
        self._calculate_storage_requirement()
        return self._storage_requirement

    def _guesstimate_memory_requirement(self):
        """
        guesstimate the memory required for this simulator.

        Guesstimate is based on the shape of the dominant arrays, and as such 
        can operate before configuration.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant 
            memory pigs...

        """
        if self.surface:
            number_of_nodes = self.surface.number_of_vertices
        else:
            number_of_nodes = self.connectivity.number_of_regions

        number_of_regions = self.connectivity.number_of_regions

        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        bits_64 = 8.0  # Bytes
        bits_32 = 4.0  # Bytes
        # NOTE: The speed hack for getting the first element of hist shape should
        #      partially resolves calling of this method with a non-configured
        #     connectivity, there remains the less common issue if no tract_lengths...
        hist_shape = (
            self.connectivity.tract_lengths.max() /
            (self.conduction_speed or self.connectivity.speed or 3.0) /
            self.integrator.dt, self.model.nvar, number_of_nodes,
            self.model.number_of_modes)
        self.log.debug("Estimated history shape is %r", hist_shape)

        memreq = numpy.prod(hist_shape) * bits_64
        if self.surface:
            memreq += self.surface.number_of_triangles * 3 * bits_32 * 2  # normals
            memreq += self.surface.number_of_vertices * 3 * bits_64 * 2  # normals
            memreq += number_of_nodes * number_of_regions * bits_64 * 4  # region_mapping, region_average, region_sum
            # ???memreq += self.surface.local_connectivity.matrix.nnz * 8

        if not hasattr(self.monitors, '__len__'):
            self.monitors = [self.monitors]

        for monitor in self.monitors:
            if not isinstance(monitor, monitors.Bold):
                stock_shape = (monitor.period / self.integrator.dt,
                               len(self.model.variables_of_interest),
                               number_of_nodes, self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                if hasattr(monitor, "sensors"):
                    try:
                        memreq += number_of_nodes * monitor.sensors.number_of_sensors * bits_64  # projection_matrix
                    except AttributeError:
                        self.log.debug(
                            "No sensors specified, guessing memory based on default EEG."
                        )
                        memreq += number_of_nodes * 62.0 * bits_64

            else:
                stock_shape = (monitor.hrf_length * monitor._stock_sample_rate,
                               len(self.model.variables_of_interest),
                               number_of_nodes, self.model.number_of_modes)
                interim_stock_shape = (1.0 / (2.0**-2 * self.integrator.dt),
                                       len(self.model.variables_of_interest),
                                       number_of_nodes,
                                       self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                memreq += numpy.prod(interim_stock_shape) * bits_64

        if psutil and memreq > psutil.virtual_memory().total:
            self.log.warning(
                "There may be insufficient memory for this simulation.")

        self._memory_requirement_guess = magic_number * memreq
        msg = "Memory requirement estimate: simulation will need about %.1f MB"
        self.log.info(msg, self._memory_requirement_guess / 2**20)

    def _census_memory_requirement(self):
        """
        Guesstimate the memory required for this simulator. 

        Guesstimate is based on a census of the dominant arrays after the
        simulator has been configured.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant 
            memory pigs...

        """
        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        memreq = self.history.nbytes
        try:
            memreq += self.surface.triangles.nbytes * 2
            memreq += self.surface.vertices.nbytes * 2
            memreq += self.surface.region_mapping.nbytes * self.number_of_nodes * 8. * 4  # region_average, region_sum
            memreq += self.surface.local_connectivity.matrix.nnz * 8
        except AttributeError:
            pass

        for monitor in self.monitors:
            memreq += monitor._stock.nbytes
            if isinstance(monitor, monitors.Bold):
                memreq += monitor._interim_stock.nbytes

        if psutil and memreq > psutil.virtual_memory().total:
            self.log.warning("Memory estimate exceeds total available RAM.")

        self._memory_requirement_census = magic_number * memreq
        # import pdb; pdb.set_trace()
        msg = "Memory requirement census: simulation will need about %.1f MB"
        self.log.info(msg % (self._memory_requirement_census / 1048576.0))

    def _guesstimate_runtime(self):
        """
        Estimate the runtime for this simulator.

        Spread in parallel executions of larger arrays means this will be an over-estimation,
        or rather a single threaded estimation...
        Different choice of integrators and monitors has an additional effect,
        on the magic number though relatively minor

        """
        magic_number = 6.57e-06  # seconds
        self._runtime = (magic_number * self.number_of_nodes *
                         self.model.nvar * self.model.number_of_modes *
                         self.simulation_length / self.integrator.dt)
        msg = "Simulation runtime should be about %0.3f seconds"
        self.log.info(msg, self._runtime)

    def _calculate_storage_requirement(self):
        """
        Calculate the storage requirement for the simulator, configured with
        models, monitors, etc being run for a particular simulation length. 
        While this is only approximate, it is far more reliable/accurate than
        the memory and runtime guesstimates.
        """
        self.log.info("Calculating storage requirement for ...")
        strgreq = 0
        for monitor in self.monitors:
            # Avoid division by zero for monitor not yet configured
            # (in framework this is executed, when only preconfigure has been called):
            current_period = monitor.period or self.integrator.dt
            strgreq += (TvbProfile.current.MAGIC_NUMBER *
                        self.simulation_length * self.number_of_nodes *
                        self.model.nvar * self.model.number_of_modes /
                        current_period)
        self.log.info("Calculated storage requirement for simulation: %d " %
                      int(strgreq))
        self._storage_requirement = int(strgreq)

    def run(self, **kwds):
        """Convenience method to call the simulator with **kwds and collect output data."""
        ts, xs = [], []
        for _ in self.monitors:
            ts.append([])
            xs.append([])
        wall_time_start = time.time()
        for data in self(**kwds):
            for tl, xl, t_x in zip(ts, xs, data):
                if t_x is not None:
                    t, x = t_x
                    tl.append(t)
                    xl.append(x)
        elapsed_wall_time = time.time() - wall_time_start
        self.log.info("%.3f s elapsed, %.3fx real time", elapsed_wall_time,
                      elapsed_wall_time * 1e3 / self.simulation_length)
        for i in range(len(ts)):
            ts[i] = numpy.array(ts[i])
            xs[i] = numpy.array(xs[i])
        return list(zip(ts, xs))
Esempio n. 28
0
class Integrator(HasTraits):
    """
    The Integrator class is a base class for the integration methods...

    .. [1] Kloeden and Platen, Springer 1995, *Numerical solution of stochastic
        differential equations.*

    .. [2] Riccardo Mannella, *Integration of Stochastic Differential Equations
        on a Computer*, Int J. of Modern Physics C 13(9): 1177--1194, 2002.

    .. [3] R. Mannella and V. Palleschi, *Fast and precise algorithm for 
        computer simulation of stochastic differential equations*, Phys. Rev. A
        40: 3381, 1989.

    """

    dt = Float(
        label="Integration-step size (ms)",
        default=0.01220703125,  #0.015625,
        #range = basic.Range(lo= 0.0048828125, hi=0.244140625, step= 0.1, base=2.)  mh: was commented
        required=True,
        doc="""The step size used by the integration routine in ms. This
        should be chosen to be small enough for the integration to be
        numerically stable. It is also necessary to consider the desired sample
        period of the Monitors, as they are restricted to being integral
        multiples of this value. The default value is set such that all built-in
        models are numerically stable with there default parameters and because
        it is consitent with Monitors using sample periods corresponding to
        powers of 2 from 128 to 4096Hz.""")

    bounded_state_variable_indices = NArray(
        dtype=int,
        label="indices of the state variables to be bounded by the integrators "
        "within the boundaries in the boundaries' values array",
        required=False)

    state_variable_boundaries = NArray(
        label="The boundary values of the state variables", required=False)

    clamped_state_variable_indices = NArray(
        dtype=int,
        label="indices of the state variables to be clamped by the integrators "
        "to the values in the clamped_values array",
        required=False)

    clamped_state_variable_values = NArray(
        label="The values of the state variables which are clamped ",
        required=False)

    _bounded_integration_state_variable_indices = None
    _integration_state_variable_boundaries = None
    _clamped_integration_state_variable_indices = None
    _clamped_integration_state_variable_values = None

    @abc.abstractmethod
    def scheme(self, X, dfun, coupling, local_coupling, stimulus):
        """
        The scheme of integrator should take a state and provide the next
        state in time, e.g. for a differential equation, scheme should take
        :math:`X` and provide an appropriate :math:`X + dX` (dfun in the code).

        """

    def set_random_state(self, random_state):
        self.log.warning(
            "random_state supplied for non-stochastic integration")

    def configure(self):
        # Set default configurations:
        self._clamped_integration_state_variable_indices = self.clamped_state_variable_indices
        self._clamped_integration_state_variable_values = self.clamped_state_variable_values
        self._bounded_integration_state_variable_indices = self.bounded_state_variable_indices
        self._integration_state_variable_boundaries = self.state_variable_boundaries
        super(Integrator, self).configure()

    def configure_boundaries(self, model):
        if model.state_variable_boundaries is not None:
            indices = []
            boundaries = []
            for sv, sv_bounds in model.state_variable_boundaries.items():
                indices.append(model.state_variables.index(sv))
                boundaries.append(sv_bounds)
            sort_inds = numpy.argsort(indices)
            self.bounded_state_variable_indices = numpy.array(
                indices)[sort_inds]
            self.state_variable_boundaries = numpy.array(boundaries).astype(
                "float64")[sort_inds]
            self._bounded_integration_state_variable_indices = numpy.copy(
                self.bounded_state_variable_indices)
            self._integration_state_variable_boundaries = numpy.copy(
                self.state_variable_boundaries)

    def reconfigure_boundaries_and_clamping_for_integration_state_variables(
            self, model):
        integration_state_variable_indices = numpy.where(
            model.state_variable_mask)[0].tolist()
        if self.state_variable_boundaries is not None:
            # If there are any state_variable_boundaries...
            bounded_integration_state_variable_indices = []
            integration_state_variable_boundaries = []
            # ...for each one of the bounded state variable indices and boundary values...
            for bound_sv_ind, bounds in zip(
                    self._bounded_integration_state_variable_indices,
                    self.state_variable_boundaries):
                # ...if the boundary indice corresponds to an integrated state variable...
                if bound_sv_ind in integration_state_variable_indices:
                    # ...add its integration state vector indice...
                    bounded_integration_state_variable_indices.append(
                        integration_state_variable_indices.index(bound_sv_ind))
                    # ...and the corresponding boundaries
                    integration_state_variable_boundaries.append(bounds)
            self._bounded_integration_state_variable_indices = \
                numpy.array(bounded_integration_state_variable_indices)
            self._integration_state_variable_boundaries = \
                numpy.array(integration_state_variable_boundaries)
        if self.clamped_state_variable_values is not None:
            # If there are any clamped values...
            clamped_integration_state_variable_indices = []
            clamped_integration_state_variable_values = []
            # ...for each one of the clamped state variable indices and clamped values...
            for clamp_sv_ind, clampval in zip(
                    self.clamped_state_variable_indices,
                    self.clamped_state_variable_values):
                # ...if the clamped indice corresponds to an integrated state variable...
                if clamp_sv_ind in integration_state_variable_indices:
                    # ...add its integration state vector indice...
                    clamped_integration_state_variable_indices.append(
                        integration_state_variable_indices.index(clamp_sv_ind))
                    # ...and the corresponding clamped value
                    clamped_integration_state_variable_values.append(clampval)
            self._clamped_integration_state_variable_indices = \
                numpy.array(clamped_integration_state_variable_indices)
            self._clamped_integration_state_variable_values = \
                numpy.array(clamped_integration_state_variable_values)

    def _bound_state(self, X, indices, boundaries):
        for sv_ind, sv_bounds in zip(indices, boundaries):
            if sv_bounds[0] is not None:
                X[sv_ind][X[sv_ind] < sv_bounds[0]] = sv_bounds[0]
            if sv_bounds[1] is not None:
                X[sv_ind][X[sv_ind] > sv_bounds[1]] = sv_bounds[1]

    def bound_state(self, X):
        self._bound_state(X, self.bounded_state_variable_indices,
                          self.state_variable_boundaries)

    def bound_integration_state(self, X):
        self._bound_state(X, self._bounded_integration_state_variable_indices,
                          self._integration_state_variable_boundaries)

    def clamp_state(self, X):
        X[self.
          clamped_state_variable_indices] = self.clamped_state_variable_values

    def clamp_integration_state(self, X):
        X[self.
          _clamped_integration_state_variable_indices] = self._clamped_integration_state_variable_values

    def bound_and_clamp(self, state):
        # If there is a state boundary...
        if self.state_variable_boundaries is not None:
            # ...use the integrator's bound_state
            self.bound_state(state)
        # If there is a state clamping...
        if self.clamped_state_variable_values is not None:
            # ...use the integrator's clamp_state
            self.clamp_state(state)

    def integration_bound_and_clamp(self, state):
        # If there is a state boundary...
        if self._integration_state_variable_boundaries is not None:
            # ...use the integrator's bound_state
            self.bound_integration_state(state)
        # If there is a state clamping...
        if self._clamped_integration_state_variable_values is not None:
            # ...use the integrator's clamp_state
            self.clamp_integration_state(state)

    def integrate_with_update(self, X, model, coupling, local_coupling,
                              stimulus):
        temp = model.update_state_variables_before_integration(
            X, coupling, local_coupling, stimulus)
        if temp is not None:
            X = temp
            self.bound_and_clamp(X)
        X = self.integrate(X, model, coupling, local_coupling, stimulus)
        temp = model.update_state_variables_after_integration(X)
        if temp is not None:
            X = temp
            self.bound_and_clamp(X)
        return X

    def integrate(self, X, model, coupling, local_coupling, stimulus):
        X[model.state_variable_mask] = self.scheme(
            X[model.state_variable_mask], model.dfun, coupling, local_coupling,
            stimulus)
        return X

    def __str__(self):
        return simple_gen_astr(self, 'dt')
Esempio n. 29
0
 class A(HasTraits):
     a = Float()
     b = Float(field_type=np.float32)
     c = Float(field_type=np.float16)
Esempio n. 30
0
class FourierSpectrum(HasTraits):
    """
    Result of a Fourier  Analysis.
    """
    # Overwrite attribute from superclass
    array_data = NArray(dtype=numpy.complex128)

    source = Attr(
        field_type=time_series.TimeSeries,
        label="Source time-series",
        doc="Links to the time-series on which the FFT is applied.")

    segment_length = Float(
        label="Segment length",
        doc="""The timeseries was segmented into equally sized blocks
            (overlapping if necessary), prior to the application of the FFT.
            The segement length determines the frequency resolution of the
            resulting spectra.""")

    windowing_function = Attr(
        field_type=str,
        required=False,
        label="Windowing function",
        doc="""The windowing function applied to each time segment prior to
            application of the FFT.""")

    amplitude = NArray(label="Amplitude")

    phase = NArray(label="Phase")

    power = NArray(label="Power")

    average_power = NArray(label="Average Power")

    normalised_average_power = NArray(label="Normalised Power", required=False)

    _frequency = None
    _freq_step = None
    _max_freq = None

    def configure(self):
        """ compute dependent fields like amplitude """
        self.compute_amplitude()
        self.compute_phase()
        self.compute_average_power()
        self.compute_normalised_average_power()

    def summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        return {
            "Spectral type": self.__class__.__name__,
            "Source": self.source.title,
            "Segment length": self.segment_length,
            "Windowing function": self.windowing_function,
            "Frequency step": self.freq_step,
            "Maximum frequency": self.max_freq
        }

    @property
    def freq_step(self):
        """ Frequency step size of the complex Fourier spectrum."""
        if self._freq_step is None:
            self._freq_step = 1.0 / self.segment_length
            msg = "%s: Frequency step size is %s"
            self.log.debug(msg % (str(self), str(self._freq_step)))
        return self._freq_step

    @property
    def max_freq(self):
        """ Amplitude of the complex Fourier spectrum."""
        if self._max_freq is None:
            self._max_freq = 0.5 / self.source.sample_period
            msg = "%s: Max frequency is %s"
            self.log.debug(msg % (str(self), str(self._max_freq)))
        return self._max_freq

    @property
    def frequency(self):
        """ Frequencies represented the complex Fourier spectrum."""
        if self._frequency is None:
            self._frequency = numpy.arange(self.freq_step,
                                           self.max_freq + self.freq_step,
                                           self.freq_step)
        return self._frequency

    def compute_amplitude(self):
        """ Amplitude of the complex Fourier spectrum."""
        self.amplitude = numpy.abs(self.array_data)

    def compute_phase(self):
        """ Phase of the Fourier spectrum."""
        self.phase = numpy.angle(self.array_data)

    def compute_power(self):
        """ Power of the complex Fourier spectrum."""
        self.power = numpy.abs(self.array_data) ** 2

    def compute_average_power(self):
        """ Average-power of the complex Fourier spectrum."""
        self.average_power = numpy.mean(numpy.abs(self.array_data) ** 2, axis=-1)

    def compute_normalised_average_power(self):
        """ Normalised-average-power of the complex Fourier spectrum."""
        self.normalised_average_power = (self.average_power /
                                         numpy.sum(self.average_power, axis=0))