class HyperbolicTangent(Coupling):
    """
    Hyperbolic tangent.

    """

    a = arrays.FloatArray(
        label = ":math:`a`", 
        default = numpy.array([0.0]),
        range = basic.Range(lo = -1000.0, hi = 1000.0, step = 10.0),
        doc = """Minimum of the sigmoid function""",
        order = 1)

    midpoint = arrays.FloatArray(
        label = "midpoint", 
        default = numpy.array([0.0,]),
        range = basic.Range(lo = -1000.0, hi = 1000.0, step = 10.0),
        doc = """Midpoint of the linear portion of the sigmoid""",
        order = 3)

    sigma = arrays.FloatArray(
        label = r":math:`\sigma`",
        default = numpy.array([1.0,]),
        range = basic.Range(lo = 0.01, hi = 1000.0, step = 10.0),
        doc = """Standard deviation of the ...""",
        order = 4)

    normalise = basic.Bool(
        label = "normalise by in-strength",
        default = True,
        doc = """Normalise the node coupling by the node's in-strenght""",
        order = 4)


    def __call__(self, g_ij, x_i, x_j):
        r"""
        Evaluate the Sigmoidal function for the arg ``x``. The equation being
        evaluated has the following form:

            .. math::
                        a * (1 + tanh((x - midpoint)/sigma))

        """
        temp =  self.a * (1 +  numpy.tanh((x_j - self.midpoint) / self.sigma))

        if self.normalise: # yeeeeahhh, let's make simulations slower ...
            #NOTE: normalising by the strength or degrees may yield NaNs, so fill these values with inf
            in_strength = g_ij.sum(axis=2)[:, :, numpy.newaxis, :]
            in_strength[in_strength==0] = numpy.inf
            temp *= (g_ij / in_strength) #region mode normalisation
            
            coupled_input = temp.mean(axis=0)
        else: 
            coupled_input = (g_ij*temp).mean(axis=0)
        
        return coupled_input
Beispiel #2
0
class SigmoidalJansenRit(Coupling):
    r"""
    Provides a sigmoidal coupling function as described in the 
    Jansen and Rit model, of the following form

    .. math::
        c_{min} + (c_{max} - c_{min}) / (1.0 + \exp(-a(x-midpoint)/\sigma))

    Assumes that x has have two state variables.

    """

    cmin = arrays.FloatArray(
        label=":math:`c_{min}`",
        default=numpy.array([0.0,]),
        range=basic.Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="Minimum of the sigmoid function",
        order=1)

    cmax = arrays.FloatArray(
        label=":math:`c_{max}`",
        default=numpy.array([2.0 * 0.0025,]),
        range=basic.Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="Maximum of the sigmoid function",
        order=2)

    midpoint = arrays.FloatArray(
        label="midpoint",
        default=numpy.array([6.0,]),
        range=basic.Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="Midpoint of the linear portion of the sigmoid",
        order=3)

    r  = arrays.FloatArray(
        label=r":math:`r`",
        default=numpy.array([1.0,]),
        range=basic.Range(lo=0.01, hi=1000.0, step=10.0),
        doc="the steepness of the sigmoidal transformation",
        order=4)

    a = arrays.FloatArray(
        label=r":math:`a`",
        default=numpy.array([0.56,]),
        range=basic.Range(lo=0.01, hi=1000.0, step=10.0),
        doc="Scaling of the coupling term",
        order=5)

    def __str__(self):
        return simple_gen_astr(self, 'cmin cmax midpoint a r')

    def pre(self, x_i, x_j):
        pre = self.cmax / (1.0 + numpy.exp(self.r * (self.midpoint - (x_j[:, 0] - x_j[:, 1]))))
        return pre[:, numpy.newaxis]

    def post(self, gx):
        return self.a * gx
Beispiel #3
0
class TimeSeriesData(MappedType):
    """
    Base time-series dataType.
    """

    title = basic.String

    data = arrays.FloatArray(
        label="Time-series data",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc=
        """An array of time-series data, with a shape of [tpts, :], where ':' represents 1 or more dimensions"""
    )

    nr_dimensions = basic.Integer(label="Number of dimension in timeseries",
                                  default=4)

    length_1d, length_2d, length_3d, length_4d = [basic.Integer] * 4

    labels_ordering = basic.List(
        default=["Time", "State Variable", "Space", "Mode"],
        label="Dimension Names",
        doc="""List of strings representing names of each data dimension""")

    labels_dimensions = basic.Dict(
        default={},
        label=
        "Specific labels for each dimension for the data stored in this timeseries.",
        doc=
        """ A dictionary containing mappings of the form {'dimension_name' : [labels for this dimension] }"""
    )
    ## TODO (for Stuart) : remove TimeLine and make sure the correct Period/start time is returned by different monitors in the simulator

    time = arrays.FloatArray(
        file_storage=core.FILE_STORAGE_EXPAND,
        label="Time-series time",
        required=False,
        doc=
        """An array of time values for the time-series, with a shape of [tpts,].
        This is 'time' as returned by the simulator's monitors.""")

    start_time = basic.Float(label="Start Time:")

    sample_period = basic.Float(label="Sample period", default=1.0)

    # Specify the measure unit for sample period (e.g sec, msec, usec, ...)
    sample_period_unit = basic.String(label="Sample Period Measure Unit",
                                      default="ms")

    sample_rate = basic.Float(label="Sample rate",
                              doc="""The sample rate of the timeseries""")

    has_surface_mapping = basic.Bool(default=True)
    has_volume_mapping = basic.Bool(default=False)
Beispiel #4
0
class Sigmoidal(Coupling):
    r"""
    Provides a sigmoidal coupling function of the form

    .. math::
        c_{min} + (c_{max} - c_{min}) / (1.0 + \exp(-a(x-midpoint)/\sigma))

    NB: using a = numpy.pi / numpy.sqrt(3.0) and the default parameter 
        produces something close to the current default for
        Linear (a=0.00390625, b=0) over the linear portion of the sigmoid,
        with saturation at -1 and 1.

    """

    cmin = arrays.FloatArray(
        label=":math:`c_{min}`",
        default=numpy.array([-1.0,]),
        range=basic.Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="""Minimum of the sigmoid function""",
        order=1)

    cmax = arrays.FloatArray(
        label=":math:`c_{max}`",
        default=numpy.array([1.0,]),
        range=basic.Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="""Maximum of the sigmoid function""",
        order=2)

    midpoint = arrays.FloatArray(
        label="midpoint",
        default=numpy.array([0.0,]),
        range=basic.Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="Midpoint of the linear portion of the sigmoid",
        order=3)

    a = arrays.FloatArray(
        label=r":math:`a`",
        default=numpy.array([1.0,]),
        range=basic.Range(lo=0.01, hi=1000.0, step=10.0),
        doc="Scaling of sigmoidal",
        order=4)

    sigma = arrays.FloatArray(
        label=r":math:`\sigma`",
        default=numpy.array([230.0,]),
        range=basic.Range(lo=0.01, hi=1000.0, step=10.0),
        doc="Standard deviation of the sigmoidal",
        order=5)

    def __str__(self):
        return simple_gen_astr(self, 'cmin cmax midpoint a sigma')

    def post(self, gx):
        return self.cmin + ((self.cmax - self.cmin) / (1.0 + numpy.exp(-self.a *((gx - self.midpoint) / self.sigma))))
Beispiel #5
0
class Linear(Coupling):
    """
    Linear Coupling function.

    """

    a = arrays.FloatArray(
        label=":math:`a`",
        default=numpy.array([
            0.00390625,
        ]),
        range=basic.Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Rescales the connection strength while maintaining the ratio
        between different values.""",
        order=1)

    b = arrays.FloatArray(
        label=":math:`b`",
        default=numpy.array([
            0.0,
        ]),
        doc="""Shifts the base of the connection strength while maintaining
        the absolute difference between different values.""",
        order=2)

    def __call__(self, g_ij, x_i, x_j):
        """
        Evaluate the Linear function for the arg ``x``. The equation being
        evaluated has the following form:

            .. math::
                a x + b


        """
        coupled_input = (g_ij.transpose((2, 1, 0, 3)) * x_j).sum(axis=0)

        return self.a * coupled_input + self.b

    device_info = coupling_device_info(pars=['a', 'b'],
                                       kernel="""

        // parameters
        float a = P(0)
            , b = P(1);

        I = 0.0;
        for (int j_node=0; j_node<n_node; j_node++, idel++, conn++)
            I += a*GIJ*XJ;
        I += b;

        """)
class Kuramoto(Coupling):

    a = arrays.FloatArray(
        label = ":math:`a`",
        default=numpy.array([0.00390625,]),
        range = basic.Range(lo = 0.0, hi = 0.2, step = 0.01),
        doc = """Rescales the connection strength while maintaining the ratio
        between different values.""",
        order = 1)


    def __call__(self, g_ij, x_i, x_j, sin=numpy.sin):
        r"""
        Evaluates the Kuramoto-style coupling, a periodic difference:

            .. math::
                a \sum_j^N g_ij sin(x_j - x_i)

        """

        return self.a*(g_ij*sin(x_j - x_i)).sum(axis=0)

    device_info = coupling_device_info(
        pars = ['a'],
        kernel = """
        // load parameters
        float a = P(0);

        I = 0.0;
        for (int j_node=0; j_node<n_node; j_node++, idel++, conn++)
            I += a*GIJ*sin(XJ - XI);
        """
        )
Beispiel #7
0
class Difference(Coupling):

    a = arrays.FloatArray(
        label=":math:`a`",
        default=numpy.array([
            0.1,
        ]),
        range=basic.Range(lo=0.0, hi=10., step=0.1),
        doc="""Rescales the connection strength while maintaining the ratio
        between different values.""",
        order=1)

    def __call__(self, g_ij, x_i, x_j):
        r"""
        Evaluates a difference coupling:

            .. math::
                a \sum_j^N g_ij (x_j - x_i)

        """

        return self.a * (g_ij.transpose(
            (2, 1, 0, 3)) * (x_j - x_i)).sum(axis=0)

    device_info = coupling_device_info(pars=['a'],
                                       kernel="""
        // load parameters
        float a = P(0);

        I = 0.0;
        for (int j_node=0; j_node<n_node; j_node++, idel++, conn++)
            I += a*GIJ*(XJ - XI);
        """)
class VolumeData(MappedType):
    """
    Data having voxels as their elementary units.
    """
    origin = arrays.PositionArray(label="Volume origin coordinates")
    voxel_size = arrays.FloatArray(label="Voxel size")  # need a triplet, xyz
    voxel_unit = basic.String(label="Voxel Measure Unit", default="mm")
Beispiel #9
0
class Difference(SparseCoupling):
    r"""
    Provides a difference coupling function, between pre and post synaptic
    activity of the form

    .. math::

        a G_ij (x_j - x_i)

    """

    a = arrays.FloatArray(
        label=":math:`a`",
        default=numpy.array([0.1,]),
        range=basic.Range(lo=0.0, hi=10., step=0.1),
        doc="Rescales the connection strength.",
        order=1)

    def __str__(self):
        return simple_gen_astr(self, 'a')

    def pre(self, x_i, x_j):
        return x_j - x_i

    def post(self, gx):
        return self.a * gx
Beispiel #10
0
class CorrelationCoefficients(arrays.MappedArray):
    """Correlation coefficients datatype."""

    array_data = arrays.FloatArray(file_storage=core.FILE_STORAGE_DEFAULT)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which Correlation (coefficients) is applied.")

    labels_ordering = basic.List(
        label="Dimension Names",
        default=["Node", "Node", "State Variable", "Mode"],
        doc="""List of strings representing names of each data dimension""")

    __generate_table__ = True

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.nr_dimensions = len(self.read_data_shape())
        for i in range(self.nr_dimensions):
            setattr(self, 'length_%dd' % (i + 1), int(self.read_data_shape()[i]))

    def _find_summary_info(self):
        summary = {"Graph type": self.__class__.__name__,
                   "Source": self.source.title,
                   "Dimensions": self.labels_ordering}
        summary.update(self.get_info_about_array('array_data'))
        return summary
Beispiel #11
0
class Additive(Noise):
    """
    Additive noise which, assuming the source noise is Gaussian with unit
    variance, will result in noise with a standard deviation of nsig.

    """

    nsig = arrays.FloatArray(
        configurable_noise=True,
        label=":math:`D`",
        required=True,
        default=numpy.array([1.0]),
        range=basic.Range(lo=0.0, hi=10.0, step=0.1),
        order=1,
        doc="""The noise dispersion, it is the standard deviation of the
        distribution from which the Gaussian random variates are drawn. NOTE:
        Sensible values are typically ~<< 1% of the dynamic range of a Model's
        state variables.""")

    def gfun(self, state_variables):
        r"""
        Linear additive noise, thus it ignores the state_variables.

        .. math::
            g(x) = \sqrt{2D}

        """
        g_x = numpy.sqrt(2.0 * self.nsig)
        return g_x
Beispiel #12
0
class Kuramoto(SparseCoupling):
    r"""
    Provides a Kuramoto-style coupling, a periodic difference of the form
    
    .. math::
        a / N G_ij sin(x_j - x_i)
    
    """
   

    a = arrays.FloatArray(
        label=":math:`a`",
        default=numpy.array([1.0,]),
        range=basic.Range(lo=0.0, hi=1.0, step=0.01),
        doc="Rescales the connection strength.",
        order=1)

    def __str__(self):
        return simple_gen_astr(self, 'a')

    def pre(self, x_i, x_j):
        return numpy.sin(x_j - x_i)

    def post(self, gx):
        return self.a / gx.shape[0] * gx
Beispiel #13
0
class HyperbolicTangent(SparseCoupling):
    r"""
    Provides a sigmoidal coupling function of the form

    .. math::
        a * (1 + tanh((x - midpoint)/sigma))

    NB: This coupling function is applied pre-summation. For a post-summation
        sigmoidal, see `Sigmoidal`.

    """

    a = arrays.FloatArray(label=":math:`a`",
                          default=numpy.array([1.0]),
                          range=basic.Range(lo=-1000.0, hi=1000.0, step=10.0),
                          doc="Minimum of the sigmoid function",
                          order=1)

    b = arrays.FloatArray(label=":math:`b`",
                          default=numpy.array([1.0]),
                          range=basic.Range(lo=-1.0, hi=1.0, step=10.0),
                          doc="Scaling factor for the variable",
                          order=2)

    midpoint = arrays.FloatArray(
        label="midpoint",
        default=numpy.array([
            0.0,
        ]),
        range=basic.Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="Midpoint of the linear portion of the sigmoid",
        order=3)

    sigma = arrays.FloatArray(label=r":math:`\sigma`",
                              default=numpy.array([
                                  1.0,
                              ]),
                              range=basic.Range(lo=0.01, hi=1000.0, step=10.0),
                              doc="Standard deviation of the coupling",
                              order=4)

    def pre(self, x_i, x_j):
        return self.a * (1 + numpy.tanh(
            (self.b * x_j - self.midpoint) / self.sigma))

    def __str__(self):
        return simple_gen_astr(self, 'a b midpoint sigma')
Beispiel #14
0
class CoherenceSpectrum(arrays.MappedArray):
    """
    Result of a NodeCoherence Analysis.
    """
    #Overwrite attribute from superclass
    array_data = arrays.FloatArray(file_storage=core.FILE_STORAGE_EXPAND)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="""Links to the time-series on which the node_coherence is
            applied.""")

    nfft = basic.Integer(label="Data-points per block",
                         default=256,
                         doc="""NOTE: must be a power of 2""")

    frequency = arrays.FloatArray(label="Frequency")

    __generate_table__ = True

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.configure_chunk_safe()

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "Spectral type": self.__class__.__name__,
            "Source": self.source.title,
            "Number of frequencies": self.frequency.shape[0],
            "Minimum frequency": self.frequency[0],
            "Maximum frequency": self.frequency[-1],
            "FFT length (time-points)": self.nfft
        }
        return summary

    def write_data_slice(self, partial_result):
        """
        Append chunk.
        """
        self.store_data_chunk('array_data',
                              partial_result.array_data,
                              grow_dimension=3,
                              close_file=False)
Beispiel #15
0
 def test_float_array(self):
     """
     Create a float array, check that shape is correct.
     """
     data = numpy.random.random((10, 10))
     array_dt = arrays.FloatArray()
     array_dt.data = data
     self.assertEqual(array_dt.shape, (10, 10))
Beispiel #16
0
class Additive(Noise):
    """
    Additive noise which, assuming the source noise is Gaussian with unit
    variance, will result in noise with a standard deviation of nsig.

    .. #Currently there seems to be a clash betwen traits and autodoc, autodoc
    .. #can't find the methods of the class, the class specific names below get
    .. #us around this...
    .. automethod:: Additive.__init__
    .. automethod:: Additive.gfun

    """

    nsig = arrays.FloatArray(
        configurable_noise=True,
        label=":math:`D`",
        required=True,
        default=numpy.array([1.0]), range=basic.Range(lo=0.0, hi=10.0, step=0.1),
        order=1,
        doc="""The noise dispersion, it is the standard deviation of the
        distribution from which the Gaussian random variates are drawn. NOTE:
        Sensible values are typically ~<< 1% of the dynamic range of a Model's
        state variables.""")


    def __init__(self, **kwargs):
        """Initialise an Additive noise source."""
        LOG.info('%s: initing...' % str(self))
        super(Additive, self).__init__(**kwargs)
        LOG.debug('%s: inited.' % repr(self))


    def gfun(self, state_variables):
        r"""
        Linear additive noise, thus it ignores the state_variables.

        .. math::
            g(x) = \sqrt{2D}

        """
        g_x = numpy.sqrt(2.0 * self.nsig)

        return g_x


    device_info = noise_device_info(
        pars=['nsig'],
        kernel="""
        float nsig;
        for (int i_svar=0; i_svar<n_svar; i_svar++)
        {
            nsig = P(i_svar);
            GX(i_svar) = sqrt(2.0*nsig);
        }
        """
    )
Beispiel #17
0
class CoherenceSpectrumData(arrays.MappedArray):
    """
    Result of a NodeCoherence Analysis.
    """
    #Overwrite attribute from superclass
    array_data = arrays.FloatArray(file_storage=core.FILE_STORAGE_EXPAND)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="""Links to the time-series on which the node_coherence is
            applied.""")

    nfft = basic.Integer(label="Data-points per block",
                         default=256,
                         doc="""NOTE: must be a power of 2""")

    frequency = arrays.FloatArray(label="Frequency")

    __generate_table__ = True
Beispiel #18
0
 def test_traits_default(self):
     """
     Tests for default values upon creation of a FloatArray traited class.
     """
     array_dt = arrays.FloatArray()
     assert array_dt.trait.file_storage == FILE_STORAGE_DEFAULT
     assert array_dt.trait.order_number == 0
     assert array_dt.trait.required == True
     assert array_dt.trait.use_storage == True
     assert array_dt.trait.range_interval == None
Beispiel #19
0
 def test_traits_default(self):
     """
     Tests for default values upon creation of a FloatArray traited class.
     """
     array_dt = arrays.FloatArray()
     self.assertEqual(array_dt.trait.file_storage, FILE_STORAGE_DEFAULT)
     self.assertEqual(array_dt.trait.order_number, 0)
     self.assertEqual(array_dt.trait.required, True)
     self.assertEqual(array_dt.trait.use_storage, True)
     self.assertEqual(array_dt.trait.range_interval, None)
Beispiel #20
0
class Fcd(arrays.MappedArray):

    array_data = arrays.FloatArray(file_storage=core.FILE_STORAGE_DEFAULT)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which FCD is calculated.")

    sw = basic.Float(
        label="Sliding window length (ms)",
        default=120000,
        doc="""Length of the time window used to divided the time series.
                FCD matrix is calculated in the following way: the time series is divided in time window of fixed length and with an overlapping of fixed length.
                The datapoints within each window, centered at time ti, are used to calculate FC(ti) as Pearson correlation.
                The ij element of the FCD matrix is calculated as the Pearson correlation between FC(ti) and FC(tj) arranged in a vector."""
    )

    sp = basic.Float(
        label="Spanning between two consecutive sliding window (ms)",
        default=2000,
        doc=
        """Spanning= (time windows length)-(overlapping between two consecutive time window).
                FCD matrix is calculated in the following way: the time series is divided in time window of fixed length and with an overlapping of fixed length.
                The datapoints within each window, centered at time ti, are used to calculate FC(ti) as Pearson correlation.
                The ij element of the FCD matrix is calculated as the Pearson correlation between FC(ti) and FC(tj) arranged in a vector"""
    )

    labels_ordering = basic.List(
        label="Dimension Names",
        default=["Time", "Time", "State Variable", "Mode"],
        doc="""List of strings representing names of each data dimension""")

    __generate_table__ = True

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.nr_dimensions = len(self.read_data_shape())
        for i in range(self.nr_dimensions):
            setattr(self, 'length_%dd' % (i + 1),
                    int(self.read_data_shape()[i]))

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "FCD type": self.__class__.__name__,
            "Source": self.source.title,
            "Dimensions": self.labels_ordering
        }

        summary.update(self.get_info_about_array('array_data'))
        return summary
Beispiel #21
0
 def test_index_array(self):
     """
     Create an index array, check that shape is correct.
     """
     target_data = numpy.random.random((10, 3))
     target_array = arrays.FloatArray()
     target_array.data = target_data
     array_dt = arrays.IndexArray(target=target_array)
     array_dt.data = numpy.arange(30).reshape((10, 3))
     self.assertEqual(array_dt.shape, (10, 3))
     self.assertEqual(array_dt.target.shape, (10, 3))
Beispiel #22
0
class CrossCorrelationData(MappedType):
    """
    Result of a CrossCorrelation Analysis.
    """
    #Overwrite attribute from superclass
    array_data = arrays.FloatArray(file_storage=core.FILE_STORAGE_EXPAND)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="""Links to the time-series on which the cross_correlation is
            applied.""")

    time = arrays.FloatArray(label="Temporal Offsets")

    labels_ordering = basic.List(
        label="Dimension Names",
        default=["Offsets", "Node", "Node", "State Variable", "Mode"],
        doc="""List of strings representing names of each data dimension""")

    __generate_table__ = True
Beispiel #23
0
class ProjectionData(MappedType):
    """
    Base DataType for representing a ProjectionMatrix.
    The projection is between a source of type CorticalSurface and a set of Sensors.
    """

    projection_type = basic.String

    __mapper_args__ = {'polymorphic_on': 'projection_type'}

    brain_skull = surfaces.BrainSkull(
        label="Brain Skull",
        default=None,
        required=False,
        doc="""Boundary between skull and cortex domains.""")

    skull_skin = surfaces.SkullSkin(
        label="Skull Skin",
        default=None,
        required=False,
        doc="""Boundary between skull and skin domains.""")

    skin_air = surfaces.SkinAir(
        label="Skin Air",
        default=None,
        required=False,
        doc="""Boundary between skin and air domains.""")

    conductances = basic.Dict(
        label="Domain conductances",
        required=False,
        default={
            'air': 0.0,
            'skin': 1.0,
            'skull': 0.01,
            'brain': 1.0
        },
        doc=""" A dictionary representing the conductances of ... """)

    sources = surfaces.CorticalSurface(label="surface or region",
                                       default=None,
                                       required=True)

    sensors = sensors.Sensors(
        label="Sensors",
        default=None,
        required=False,
        doc=""" A set of sensors to compute projection matrix for them. """)

    projection_data = arrays.FloatArray(label="Projection Matrix Data",
                                        default=None,
                                        required=True)
Beispiel #24
0
class Multiplicative(Noise):
    r"""
    With "external" fluctuations the intensity of the noise often depends on
    the state of the system. This results in the (general) stochastic
    differential formulation:

    .. math::
        dX_t = a(X_t)\,dt + b(X_t)\,dW_t

    for appropriate coefficients :math:`a(x)` and :math:`b(x)`, which might be
    constants.

    From [KloedenPlaten_1995]_, Equation 1.9, page 104.

    """

    nsig = arrays.FloatArray(
        configurable_noise=True,
        label=":math:`D`",
        required=True,
        default=numpy.array([
            1.0,
        ]),
        range=basic.Range(lo=0.0, hi=10.0, step=0.1),
        order=1,
        doc="""The noise dispersion, it is the standard deviation of the
        distribution from which the Gaussian random variates are drawn. NOTE:
        Sensible values are typically ~<< 1% of the dynamic range of a Model's
        state variables.""")

    b = equations.TemporalApplicableEquation(
        label=":math:`b`",
        default=equations.Linear(parameters={
            "a": 1.0,
            "b": 0.0
        }),
        doc=
        """A function evaluated on the state-variables, the result of which enters as the diffusion coefficient."""
    )

    def gfun(self, state_variables):
        """
        Scale the noise by the noise dispersion and the diffusion coefficient.
        By default, the diffusion coefficient :math:`b` is a constant.
        It reduces to the simplest scheme of a linear SDE with Multiplicative
        Noise: homogeneous constant coefficients. See [KloedenPlaten_1995]_,
        Equation 4.6, page 119.

        """
        self.b.pattern = state_variables
        g_x = numpy.sqrt(2.0 * self.nsig) * self.b.pattern
        return g_x
Beispiel #25
0
class FourierSpectrumData(arrays.MappedArray):
    """
    Result of a Fourier  Analysis.
    """
    #Overwrite attribute from superclass
    array_data = arrays.ComplexArray(file_storage=core.FILE_STORAGE_EXPAND)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which the FFT is applied.")

    segment_length = basic.Float(
        label="Segment length",
        doc="""The timeseries was segmented into equally sized blocks
            (overlapping if necessary), prior to the application of the FFT.
            The segement length determines the frequency resolution of the
            resulting spectra.""")

    windowing_function = basic.String(
        label="Windowing function",
        doc="""The windowing function applied to each time segment prior to
            application of the FFT.""")

    amplitude = arrays.FloatArray(label="Amplitude",
                                  file_storage=core.FILE_STORAGE_EXPAND)

    phase = arrays.FloatArray(label="Phase",
                              file_storage=core.FILE_STORAGE_EXPAND)

    power = arrays.FloatArray(label="Power",
                              file_storage=core.FILE_STORAGE_EXPAND)

    average_power = arrays.FloatArray(label="Average Power",
                                      file_storage=core.FILE_STORAGE_EXPAND)

    normalised_average_power = arrays.FloatArray(
        label="Normalised Power", file_storage=core.FILE_STORAGE_EXPAND)

    __generate_table__ = True
Beispiel #26
0
class Ornstein_Ulhenbeck_process(Additive):
    tau_OU = basic.Float(label="time scale of decay",
                         required=True,
                         default=1.0,
                         doc="""The noise time scale """)
    mu = arrays.FloatArray(label=":math:`mu`",
                           required=True,
                           default=numpy.array([1.0]),
                           doc="""Mean of noise noise""")
    weights = arrays.FloatArray(label=":math:`mu`",
                                required=True,
                                default=numpy.array([0.0]),
                                doc="""Mean of noise noise""")
    _noise = None

    def configure_white(self, dt, shape):
        """
        Run base classes configure to setup traited attributes, then ensure that
        the ``random_stream`` attribute is properly configured.

        """
        self.dt = dt
        self._noise = 0.0
        self.mu = numpy.reshape(self.mu, (7, 1, 1))

    def generate(self, shape, lo=-1.0, hi=1.0):
        self._noise = self._noise - self.dt / self.tau_OU * self._noise + numpy.sqrt(
            self.dt) * self.random_stream.normal(size=shape)
        noise = self.mu + self.nsig * self._noise
        return noise

    def gfun(self, state_variables):
        """
            Drop noise in order to avoid negative frequency

        """
        # drop value for negative noise
        return self.weights * 1e-3
Beispiel #27
0
class WaveletCoefficientsData(arrays.MappedArray):
    """
    This class bundles all the elements of a Wavelet Analysis into a single 
    object, including the input TimeSeries datatype and the output results as 
    arrays (FloatArray)
    """
    #Overwrite attribute from superclass
    array_data = arrays.ComplexArray()

    source = time_series.TimeSeries(label="Source time-series")

    mother = basic.String(
        label="Mother wavelet",
        default="morlet",
        doc="""A string specifying the type of mother wavelet to use,
            default is 'morlet'.""")  # default to 'morlet'

    sample_period = basic.Float(label="Sample period")
    #sample_rate = basic.Integer(label = "")  inversely related

    frequencies = arrays.FloatArray(
        label="Frequencies", doc="A vector that maps scales to frequencies.")

    normalisation = basic.String(label="Normalisation type")
    # 'unit energy' | 'gabor'

    q_ratio = basic.Float(label="Q-ratio", default=5.0)

    amplitude = arrays.FloatArray(label="Amplitude",
                                  file_storage=core.FILE_STORAGE_EXPAND)

    phase = arrays.FloatArray(label="Phase",
                              file_storage=core.FILE_STORAGE_EXPAND)

    power = arrays.FloatArray(label="Power",
                              file_storage=core.FILE_STORAGE_EXPAND)

    __generate_table__ = True
Beispiel #28
0
 def test_traits_specific(self):
     """
     Tests for correct creation of a FloatArray traited class with initial values specified.
     """
     array_dt = arrays.FloatArray(file_storage = "txt",
                                  order = 6,
                                  required = False,
                                  use_storage = False,
                                  range = [1, 2, 3])
     self.assertEqual(array_dt.trait.file_storage, 'txt')
     self.assertEqual(array_dt.trait.order_number, 6)
     self.assertEqual(array_dt.trait.required, False)
     self.assertEqual(array_dt.trait.use_storage, False)
     self.assertEqual(array_dt.trait.range_interval, [1, 2, 3]) 
Beispiel #29
0
 def test_traits_specific(self):
     """
     Tests for correct creation of a FloatArray traited class with initial values specified.
     """
     array_dt = arrays.FloatArray(file_storage="txt",
                                  order=6,
                                  required=False,
                                  use_storage=False,
                                  range=[1, 2, 3])
     assert array_dt.trait.file_storage == 'txt'
     assert array_dt.trait.order_number == 6
     assert array_dt.trait.required is False
     assert array_dt.trait.use_storage is False
     assert array_dt.trait.range_interval == [1, 2, 3]
Beispiel #30
0
class StructuralMRI(VolumetricDataMixin, arrays.MappedArray):
    """
    Quantitative volumetric data recorded by means of Magnetic Resonance Imaging.

    """
    # without the field below weighting and volume columns are going to be added to the MAPPED_ARRAY table
    __generate_table__ = True

    array_data = arrays.FloatArray(label="contrast")

    weighting = basic.String(
        label="MRI weighting")  # eg, "T1", "T2", "T2*", "PD", ...

    volume = volumes.Volume