Exemplo n.º 1
0
class BarDatatype(FooDatatype):
    array_str = NArray(dtype='S32', shape=(Dim.any, ))
Exemplo n.º 2
0
class BarAndBaz(HasTraits):
    bar = Attr(field_type=Bar)
    baz = Attr(field_type=Baz)
    array = NArray(dtype=int,
                   shape=(Dim.any, Dim.any),
                   default=numpy.arange(6).reshape((2, 3)))
Exemplo n.º 3
0
class Linear(Linear):
    I_o = NArray(
        label=r":math:`I_o`",
        default=numpy.array([0.0]),
        domain=Range(lo=-100.0, hi=100.0, step=1.0),
        doc="External stimulus")

    G = NArray(
        label=r":math:`G`",
        default=numpy.array([0.0]),
        domain=Range(lo=-0.0, hi=100.0, step=1.0),
        doc="Global coupling scaling")

    tau = NArray(
        label=r":math:`\tau`",
        default=numpy.array([1.0]),
        domain=Range(lo=-0.1, hi=100.0, step=0.1),
        doc="Time constant")

    tau_rin = NArray(
        label=r":math:`\tau_rin_e`",
        default=numpy.array([10., ]),
        domain=Range(lo=1., hi=100., step=1.0),
        doc="""[ms]. Excitatory population instant spiking rate time constant.""")

    # Used for phase-plane axis ranges and to bound random initial() conditions.
    state_variable_boundaries = Final(
        default={"R": numpy.array([0.0, None]),
                 "Rin": numpy.array([0.0, None])},
        label="State Variable boundaries [lo, hi]",
        doc="""The values for each state-variable should be set to encompass
                the boundaries of the dynamic range of that state-variable. 
                Set None for one-sided boundaries""")

    state_variable_range = Final(
        label="State Variable ranges [lo, hi]",
        default={"R": numpy.array([0, 100]),
                 "Rin": numpy.array([0, 100])},
        doc="Range used for state variable initialization and visualization.")

    variables_of_interest = List(
        of=str,
        label="Variables watched by Monitors",
        choices=("R", 'Rin'),
        default=("R", 'Rin'), )

    state_variables = ('R', 'Rin')
    integration_variables = ('R',)
    _nvar = 2
    cvar = numpy.array([0], dtype=numpy.int32)

    def update_derived_parameters(self):
        """
        When needed, this should be a method for calculating parameters that are
        calculated based on paramaters directly set by the caller. For example,
        see, ReducedSetFitzHughNagumo. When not needed, this pass simplifies
        code that updates an arbitrary models parameters -- ie, this can be
        safely called on any model, whether it's used or not.
        """
        if hasattr(self, "Rin"):
            setattr(self, "_Rin", getattr(self, "Rin") > 0)
        else:
            setattr(self, "Rin", numpy.array([0.0, ]))
            setattr(self, "_Rin", numpy.array([False, ]))

    def update_non_state_variables_after_integration(self, state_variables):
        # Reset to 0 the Rin for nodes not updated by Spiking Network
        state_variables[1] = numpy.where(self._Rin, state_variables[1], 0.0)
        return state_variables

    def dfun(self, state, coupling, local_coupling=0.0):
        """
        .. math::
            dR/dt = (-R + G * coupling) / {\tau} + I_o
        """
        dR = numpy.where(self._Rin,
                         (- state[0] + state[1]) / self.tau_rin,
                         (-state[0] + self.G * coupling[0] + local_coupling * state[0] ) / self.tau + self.I_o)
        return numpy.array([dR, 0.0*dR])
Exemplo n.º 4
0
class Sigmoidal(Coupling):
    r"""
    Provides a sigmoidal coupling function of the form

    .. math::
        c_{min} + (c_{max} - c_{min}) / (1.0 + \exp(-a(x-midpoint)/\sigma))

    NB: using a = numpy.pi / numpy.sqrt(3.0) and the default parameter 
        produces something close to the current default for
        Linear (a=0.00390625, b=0) over the linear portion of the sigmoid,
        with saturation at -1 and 1.

    """

    cmin = NArray(
        label=":math:`c_{min}`",
        default=numpy.array([
            -1.0,
        ]),
        domain=Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="""Minimum of the sigmoid function""",
    )

    cmax = NArray(
        label=":math:`c_{max}`",
        default=numpy.array([
            1.0,
        ]),
        domain=Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="""Maximum of the sigmoid function""",
    )

    midpoint = NArray(
        label="midpoint",
        default=numpy.array([
            0.0,
        ]),
        domain=Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="Midpoint of the linear portion of the sigmoid",
    )

    a = NArray(
        label=r":math:`a`",
        default=numpy.array([
            1.0,
        ]),
        domain=Range(lo=0.01, hi=1000.0, step=10.0),
        doc="Scaling of sigmoidal",
    )

    sigma = NArray(
        label=r":math:`\sigma`",
        default=numpy.array([
            230.0,
        ]),
        domain=Range(lo=0.01, hi=1000.0, step=10.0),
        doc="Standard deviation of the sigmoidal",
    )

    parameter_names = 'cmin cmax midpoint a sigma'.split()
    pre_expr = 'x_j'
    post_expr = 'cmin + ((cmax - cmin) / (1.0 + exp(-a *((gx - midpoint) / sigma))))'

    def __str__(self):
        return simple_gen_astr(self, 'cmin cmax midpoint a sigma')

    def post(self, gx):
        return self.cmin + ((self.cmax - self.cmin) /
                            (1.0 + numpy.exp(-self.a * (
                                (gx - self.midpoint) / self.sigma))))
Exemplo n.º 5
0
class PreSigmoidal(Coupling):
    r"""
    Provides a pre-summation sigmoidal coupling function with a static or dynamic
    and local or global threshold.

    .. math::
        H * (Q + \tanh(G * (P*x - \theta)))

    The dynamic threshold as state variable given by the second state variable.
    With the coupling term, returns the direct node output for the dynamic threshold.

    """

    H = NArray(
        label="H",
        default=numpy.array([
            0.5,
        ]),
        domain=Range(lo=-100.0, hi=100.0, step=1.0),
        doc="Global Factor.",
    )

    Q = NArray(
        label="Q",
        default=numpy.array([
            1.,
        ]),
        domain=Range(lo=-100.0, hi=100.0, step=1.0),
        doc="Average.",
    )

    G = NArray(
        label="G",
        default=numpy.array([
            60.,
        ]),
        domain=Range(lo=-1000.0, hi=1000.0, step=1.),
        doc="Gain.",
    )

    P = NArray(
        label="P",
        default=numpy.array([
            1.,
        ]),
        domain=Range(lo=-100.0, hi=100.0, step=0.01),
        doc="Excitation-Inhibition ratio.",
    )

    theta = NArray(
        label=":math:`\\theta`",
        default=numpy.array([
            0.5,
        ]),
        domain=Range(lo=-100.0, hi=100.0, step=0.01),
        doc="Threshold.",
    )

    dynamic = Attr(
        field_type=bool,
        label="Dynamic",
        default=True,
        doc="Use dynamic threshold (otherwise static).",
    )

    globalT = Attr(
        field_type=bool,
        label=":math:`global_{\\theta}`",
        default=False,
        doc="Use global threshold (otherwise local).",
    )

    def __str__(self):
        return simple_gen_astr(self, 'H Q G P theta dynamic globalT')

    def configure(self):
        """Set the right indirect call."""
        super(PreSigmoidal, self).configure()
        self.sliceT = 0 if self.globalT else slice(None)

    # override __call__ directly simpler than pre/post form
    # TODO check use of arrays dims here
    def __call__(self, step, history, na=numpy.newaxis):
        g_ij = history.es_weights
        x_i, x_j = history.query(step)
        if self.dynamic:
            _ = (self.P * x_j[:, 0] - x_j[:, 1, self.sliceT])[:, na]
        else:
            _ = self.P * x_j - self.theta[self.sliceT, na]
        A_j = self.H * (self.Q + numpy.tanh(self.G * _))
        if self.dynamic:
            c_0 = (g_ij[:, 0] * A_j[:, 0]).sum(axis=0)
            c_1 = numpy.diag(A_j[:, 0, :, 0])[:, na]
            if self.globalT:
                c_1[:] = c_1.mean()
            return numpy.array([c_0, c_1])
        else:  # static threshold
            return (g_ij.transpose((2, 1, 0, 3)) * A_j).sum(axis=0)
Exemplo n.º 6
0
class ReducedSetFitzHughNagumo(ReducedSetBase):
    r"""
    A reduced representation of a set of Fitz-Hugh Nagumo oscillators,
    [SJ_2008]_.

    The models (:math:`\xi`, :math:`\eta`) phase-plane, including a
    representation of the vector field as well as its nullclines, using default
    parameters, can be seen below:

        .. _phase-plane-rFHN_0:
        .. figure :: img/ReducedSetFitzHughNagumo_01_mode_0_pplane.svg
            :alt: Reduced set of FitzHughNagumo phase plane (xi, eta), 1st mode.

            The (:math:`\xi`, :math:`\eta`) phase-plane for the first mode of
            a reduced set of Fitz-Hugh Nagumo oscillators.

        .. _phase-plane-rFHN_1:
        .. figure :: img/ReducedSetFitzHughNagumo_01_mode_1_pplane.svg
            :alt: Reduced set of FitzHughNagumo phase plane (xi, eta), 2nd mode.

            The (:math:`\xi`, :math:`\eta`) phase-plane for the second mode of
            a reduced set of Fitz-Hugh Nagumo oscillators.

        .. _phase-plane-rFHN_2:
        .. figure :: img/ReducedSetFitzHughNagumo_01_mode_2_pplane.svg
            :alt: Reduced set of FitzHughNagumo phase plane (xi, eta), 3rd mode.

            The (:math:`\xi`, :math:`\eta`) phase-plane for the third mode of
            a reduced set of Fitz-Hugh Nagumo oscillators.


    The system's equations for the i-th mode at node q are:

    .. math::
                \dot{\xi}_{i}    &=  c\left(\xi_i-e_i\frac{\xi_{i}^3}{3} -\eta_{i}\right)
                                  + K_{11}\left[\sum_{k=1}^{o} A_{ik}\xi_k-\xi_i\right]
                                  - K_{12}\left[\sum_{k =1}^{o} B_{i k}\alpha_k-\xi_i\right] + cIE_i \\
                                 &\, + \left[\sum_{k=1}^{o} \mathbf{\Gamma}(\xi_{kq}, \xi_{kr}, u_{qr})\right]
                                  +  \left[\sum_{k=1}^{o} W_{\zeta}\cdot\xi_{kr} \right] \\
                \dot{\eta}_i     &= \frac{1}{c}\left(\xi_i-b\eta_i+m_i\right) \\
                & \\
                \dot{\alpha}_i   &= c\left(\alpha_i-f_i\frac{\alpha_i^3}{3}-\beta_i\right)
                                  + K_{21}\left[\sum_{k=1}^{o} C_{ik}\xi_i-\alpha_i\right] + cII_i \\
                                 & \, + \left[\sum_{k=1}^{o} \mathbf{\Gamma}(\xi_{kq}, \xi_{kr}, u_{qr})\right]
                                  + \left[\sum_{k=1}^{o} W_{\zeta}\cdot\xi_{kr}\right] \\
                                 & \\
                \dot{\beta}_i    &= \frac{1}{c}\left(\alpha_i-b\beta_i+n_i\right)

    .. automethod:: ReducedSetFitzHughNagumo.update_derived_parameters

    #NOTE: In the Article this modelis called StefanescuJirsa2D

    """

    # Define traited attributes for this model, these represent possible kwargs.
    tau = NArray(
        label=r":math:`\tau`",
        default=numpy.array([3.0]),
        domain=Range(lo=1.5, hi=4.5, step=0.01),
        doc="""doc...(prob something about timescale seperation)""")

    a = NArray(
        label=":math:`a`",
        default=numpy.array([0.45]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""doc...""")

    b = NArray(
        label=":math:`b`",
        default=numpy.array([0.9]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""doc...""")

    K11 = NArray(
        label=":math:`K_{11}`",
        default=numpy.array([0.5]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Internal coupling, excitatory to excitatory""")

    K12 = NArray(
        label=":math:`K_{12}`",
        default=numpy.array([0.15]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Internal coupling, inhibitory to excitatory""")

    K21 = NArray(
        label=":math:`K_{21}`",
        default=numpy.array([0.15]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Internal coupling, excitatory to inhibitory""")

    sigma = NArray(
        label=r":math:`\sigma`",
        default=numpy.array([0.35]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Standard deviation of Gaussian distribution""")

    mu = NArray(
        label=r":math:`\mu`",
        default=numpy.array([0.0]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Mean of Gaussian distribution""")

    # Used for phase-plane axis ranges and to bound random initial() conditions.
    state_variable_range = Final(
        label="State Variable ranges [lo, hi]",
        default={"xi": numpy.array([-4.0, 4.0]),
                 "eta": numpy.array([-3.0, 3.0]),
                 "alpha": numpy.array([-4.0, 4.0]),
                 "beta": numpy.array([-3.0, 3.0])},
        doc="""The values for each state-variable should be set to encompass
        the expected dynamic range of that state-variable for the current
        parameters, it is used as a mechanism for bounding random inital
        conditions when the simulation isn't started from an explicit history,
        it is also provides the default range of phase-plane plots.""")


    variables_of_interest = List(
        of=str,
        label="Variables watched by Monitors",
        choices=("xi", "eta", "alpha", "beta"),
        default=("xi", "alpha"),
        doc=r"""This represents the default state-variables of this Model to be
                monitored. It can be overridden for each Monitor if desired. The
                corresponding state-variable indices for this model are :math:`\xi = 0`,
                :math:`\eta = 1`, :math:`\alpha = 2`, and :math:`\beta= 3`.""")

    state_variables = tuple('xi eta alpha beta'.split())
    _nvar = 4
    cvar = numpy.array([0, 2], dtype=numpy.int32)
    # Derived parameters
    Aik = None
    Bik = None
    Cik = None
    e_i = None
    f_i = None
    IE_i = None
    II_i = None
    m_i = None
    n_i = None

    def dfun(self, state_variables, coupling, local_coupling=0.0):
        r"""


        The system's equations for the i-th mode at node q are:

        .. math::
                \dot{\xi}_{i}    &=  c\left(\xi_i-e_i\frac{\xi_{i}^3}{3} -\eta_{i}\right)
                                  + K_{11}\left[\sum_{k=1}^{o} A_{ik}\xi_k-\xi_i\right]
                                  - K_{12}\left[\sum_{k =1}^{o} B_{i k}\alpha_k-\xi_i\right] + cIE_i                       \\
                                 &\, + \left[\sum_{k=1}^{o} \mathbf{\Gamma}(\xi_{kq}, \xi_{kr}, u_{qr})\right]
                                  +  \left[\sum_{k=1}^{o} W_{\zeta}\cdot\xi_{kr} \right] \\
                \dot{\eta}_i     &= \frac{1}{c}\left(\xi_i-b\eta_i+m_i\right)                                              \\
                & \\
                \dot{\alpha}_i   &= c\left(\alpha_i-f_i\frac{\alpha_i^3}{3}-\beta_i\right)
                                  + K_{21}\left[\sum_{k=1}^{o} C_{ik}\xi_i-\alpha_i\right] + cII_i                          \\
                                 & \, + \left[\sum_{k=1}^{o} \mathbf{\Gamma}(\xi_{kq}, \xi_{kr}, u_{qr})\right]
                                  + \left[\sum_{k=1}^{o} W_{\zeta}\cdot\xi_{kr}\right] \\
                                 & \\
                \dot{\beta}_i    &= \frac{1}{c}\left(\alpha_i-b\beta_i+n_i\right)

        """

        xi = state_variables[0, :]
        eta = state_variables[1, :]
        alpha = state_variables[2, :]
        beta = state_variables[3, :]
        derivative = numpy.empty_like(state_variables)
        # sum the activity from the modes
        c_0 = coupling[0, :].sum(axis=1)[:, numpy.newaxis]

        # TODO: generalize coupling variables to a matrix form
        # c_1 = coupling[1, :] # this cv represents alpha

        derivative[0] = (self.tau * (xi - self.e_i * xi ** 3 / 3.0 - eta) +
               self.K11 * (numpy.dot(xi, self.Aik) - xi) -
               self.K12 * (numpy.dot(alpha, self.Bik) - xi) +
               self.tau * (self.IE_i + c_0 + local_coupling * xi))

        derivative[1] = (xi - self.b * eta + self.m_i) / self.tau

        derivative[2] = (self.tau * (alpha - self.f_i * alpha ** 3 / 3.0 - beta) +
                  self.K21 * (numpy.dot(xi, self.Cik) - alpha) +
                  self.tau * (self.II_i + c_0 + local_coupling * xi))

        derivative[3] = (alpha - self.b * beta + self.n_i) / self.tau

        return derivative

    def update_derived_parameters(self):
        """
        Calculate coefficients for the Reduced FitzHugh-Nagumo oscillator based
        neural field model. Specifically, this method implements equations for
        calculating coefficients found in the supplemental material of
        [SJ_2008]_.

        Include equations here...

        """

        newaxis = numpy.newaxis
        trapz = scipy_integrate_trapz

        stepu = 1.0 / (self.nu + 2 - 1)
        stepv = 1.0 / (self.nv + 2 - 1)

        norm = scipy_stats_norm(loc=self.mu, scale=self.sigma)

        Zu = norm.ppf(numpy.arange(stepu, 1.0, stepu))
        Zv = norm.ppf(numpy.arange(stepv, 1.0, stepv))

        # Define the modes
        V = numpy.zeros((self.number_of_modes, self.nv))
        U = numpy.zeros((self.number_of_modes, self.nu))

        nv_per_mode = self.nv // self.number_of_modes
        nu_per_mode = self.nu // self.number_of_modes

        for i in range(self.number_of_modes):
            V[i, i * nv_per_mode:(i + 1) * nv_per_mode] = numpy.ones(nv_per_mode)
            U[i, i * nu_per_mode:(i + 1) * nu_per_mode] = numpy.ones(nu_per_mode)

        # Normalise the modes
        V = V / numpy.tile(numpy.sqrt(trapz(V * V, Zv, axis=1)), (self.nv, 1)).T
        U = U / numpy.tile(numpy.sqrt(trapz(U * U, Zu, axis=1)), (self.nv, 1)).T

        # Get Normal PDF's evaluated with sampling Zv and Zu
        g1 = norm.pdf(Zv)
        g2 = norm.pdf(Zu)
        G1 = numpy.tile(g1, (self.number_of_modes, 1))
        G2 = numpy.tile(g2, (self.number_of_modes, 1))

        cV = numpy.conj(V)
        cU = numpy.conj(U)

        intcVdZ = trapz(cV, Zv, axis=1)[:, newaxis]
        intG1VdZ = trapz(G1 * V, Zv, axis=1)[newaxis, :]
        intcUdZ = trapz(cU, Zu, axis=1)[:, newaxis]
        # import pdb; pdb.set_trace()
        # Calculate coefficients
        self.Aik = numpy.dot(intcVdZ, intG1VdZ).T
        self.Bik = numpy.dot(intcVdZ, trapz(G2 * U, Zu, axis=1)[newaxis, :])
        self.Cik = numpy.dot(intcUdZ, intG1VdZ).T

        self.e_i = trapz(cV * V ** 3, Zv, axis=1)[newaxis, :]
        self.f_i = trapz(cU * U ** 3, Zu, axis=1)[newaxis, :]

        self.IE_i = trapz(Zv * cV, Zv, axis=1)[newaxis, :]
        self.II_i = trapz(Zu * cU, Zu, axis=1)[newaxis, :]

        self.m_i = (self.a * intcVdZ).T
        self.n_i = (self.a * intcUdZ).T
Exemplo n.º 7
0
class LookUpTable(HasTraits):
    """
    Lookup Tables for storing pre-computed functions.
    Specific table subclasses are implemented below.
    """

    xmin = Float(
        label="x-min",
        doc="""Minimum value""")

    xmax = Float(
        label="x-max",
        doc="""Maximum value""")

    data = NArray(
        label="data",
        doc="""Tabulated values""")

    number_of_values = Int(
        label="Number of values",
        default=0,
        doc="""The number of values in the table """)

    df = NArray(
        label="df",
        doc=""".""")

    dx = Float(
        label="dx",
        default=0.,
        doc="""Tabulation step""")

    invdx = Float(
        label="invdx",
        default=0.,
        doc=""".""")

    @staticmethod
    def populate_table(result, source_file):
        source_full_path = try_get_absolute_path("tvb_data.tables", source_file)
        zip_data = numpy.load(source_full_path)

        result.df = zip_data['df']
        result.xmin = zip_data['min_max'][0]
        result.xmax = zip_data['min_max'][1]
        result.data = zip_data['f']
        return result

    def configure(self):
        """
        Invoke the compute methods for computable attributes that haven't been
        set during initialization.
        """
        super(LookUpTable, self).configure()

        # Check if dx and invdx have been computed
        if self.number_of_values == 0:
            self.number_of_values = self.data.shape[0]

        self.compute_search_indices()

    def summary_info(self):
        """
        Gather scientifically interesting summary information from an instance
        of this dataType, if any ...
        """
        return {"Number of values": self.number_of_values}

    def compute_search_indices(self):
        """
        ...
        """
        self.dx = ((self.xmax - self.xmin) / (self.number_of_values) - 1)
        self.invdx = 1 / self.dx

    def search_value(self, val):
        """
        Search a value in this look up table
        """

        if self.xmin:
            y = val - self.xmin
        else:
            y = val

        ind = numpy.array(y * self.invdx, dtype=int)

        try:
            return self.data[ind] + self.df[ind] * (y - ind * self.dx)
        except IndexError:  # out of bounds
            return numpy.NaN
Exemplo n.º 8
0
class SpatialAverage(Monitor):
    """
    Monitors the averaged value for the models variable of interest over sets of
    nodes -- defined by spatial_mask. This is primarily intended for use with
    surface simulations, with a default behaviour, when no spatial_mask is
    specified, of using surface.region_mapping in order to reduce a surface
    simulation back to a single average timeseries for each region in the
    associated Connectivity. However, any vector of length nodes containing
    integers, from a set contiguous from zero, specifying the new grouping to
    which each node belongs should work.

    Additionally, this monitor temporally sub-samples the simulation every `istep` 
    integration steps.

    """
    _ui_name = "Spatial average with temporal sub-sample"

    spatial_mask = NArray(  #TODO: Check it's a vector of length Nodes (like region mapping for surface)
        dtype=int,
        label="An index mask of nodes into areas",
        required=False,
        doc="""A vector of length==nodes that assigns an index to each node
            specifying the "region" to which it belongs. The default usage is
            for mapping a surface based simulation back to the regions used in 
            its `Long-range Connectivity.`""")

    default_mask = Attr(
        str,
        choices=("cortical", "hemispheres"),
        default="hemispheres",
        label="Default Mask",
        doc=("Fallback in case spatial mask is none and no surface provided" 
             "to use either connectivity hemispheres or cortical attributes."))
        # order = -1)

    def config_for_sim(self, simulator):

        # initialize base attributes
        super(SpatialAverage, self).config_for_sim(simulator)
        self.is_default_special_mask = False

        # setup given spatial mask or default to region mapping
        if self.spatial_mask is None:
            self.is_default_special_mask = True
            if not (simulator.surface is None):
                self.spatial_mask = simulator.surface.region_mapping
            else:
                conn = simulator.connectivity
                if self.default_mask[0] == 'cortical':
                    if conn is not None and conn.cortical is not None and conn.cortical.size > 0:
                        ## Use as spatial-mask cortical/non cortical areas
                        self.spatial_mask = numpy.array([int(c) for c in conn.cortical])
                    else:
                        msg = "Must fill Spatial Mask parameter for non-surface simulations when using SpatioTemporal monitor!"
                        raise Exception(msg)
                if self.default_mask[0] == 'hemispheres':
                    if conn is not None and conn.hemispheres is not None and conn.hemispheres.size > 0:
                        ## Use as spatial-mask left/right hemisphere
                        self.spatial_mask = numpy.array([int(h) for h in conn.hemispheres])
                    else:
                        msg = "Must fill Spatial Mask parameter for non-surface simulations when using SpatioTemporal monitor!"
                        raise Exception(msg)

        number_of_nodes = simulator.number_of_nodes
        if self.spatial_mask.size != number_of_nodes:
            msg = "spatial_mask must be a vector of length number_of_nodes."
            raise Exception(msg)

        areas = numpy.unique(self.spatial_mask)
        number_of_areas = len(areas)
        if not numpy.all(areas == numpy.arange(number_of_areas)):
            msg = ("Areas in the spatial_mask must be specified as a "
                    "contiguous set of indices starting from zero.")
            raise Exception(msg)

        self.log.debug("spatial_mask")
        self.log.debug(narray_describe(self.spatial_mask))
        spatial_sum = numpy.zeros((number_of_nodes, number_of_areas))
        spatial_sum[numpy.arange(number_of_nodes), self.spatial_mask] = 1
        spatial_sum = spatial_sum.T
        self.log.debug("spatial_sum")
        self.log.debug(narray_describe(spatial_sum))
        nodes_per_area = numpy.sum(spatial_sum, axis=1)[:, numpy.newaxis]
        self.spatial_mean = spatial_sum / nodes_per_area
        self.log.debug("spatial_mean")
        self.log.debug(narray_describe(self.spatial_mean))


    def sample(self, step, state):
        if step % self.istep == 0:
            time = step * self.dt
            monitored_state = numpy.dot(self.spatial_mean, state[self.voi, :])
            return [time, monitored_state.transpose((1, 0, 2))]

    def create_time_series(self, connectivity=None, surface=None,
                           region_map=None, region_volume_map=None):
        if self.is_default_special_mask:
            return TimeSeriesRegion(sample_period=self.period,
                                    region_mapping=region_map,
                                    region_mapping_volume=region_volume_map,
                                    title='Regions ' + self.__class__.__name__,
                                    connectivity=connectivity)
        else:
            # mask does not correspond to the number of regions
            # let the parent create a plain TimeSeries
            return super(SpatialAverage, self).create_time_series()
Exemplo n.º 9
0
class ReducedWongWangExcInh(ModelNumbaDfun):
    r"""
    .. [WW_2006] Kong-Fatt Wong and Xiao-Jing Wang,  *A Recurrent Network
                Mechanism of Time Integration in Perceptual Decisions*.
                Journal of Neuroscience 26(4), 1314-1328, 2006.

    .. [DPA_2014] Deco Gustavo, Ponce Alvarez Adrian, Patric Hagmann,
                  Gian Luca Romani, Dante Mantini, and Maurizio Corbetta. *How Local
                  Excitation–Inhibition Ratio Impacts the Whole Brain Dynamics*.
                  The Journal of Neuroscience 34(23), 7886 –7898, 2014.


    Equations taken from [DPA_2013]_ , page 11242

    .. math::
                 x_{ek}       &=   w_p\,J_N \, S_{ek} - J_iS_{ik} + W_eI_o + GJ_N \mathbf\Gamma(S_{ek}, S_{ej}, u_{kj}) \\
                 H(x_{ek})    &=  \dfrac{a_ex_{ek}- b_e}{1 - \exp(-d_e(a_ex_{ek} -b_e))} \\
                 \dot{S}_{ek} &= -\dfrac{S_{ek}}{\tau_e} + (1 - S_{ek}) \, {\gamma}H(x_{ek}) \\

                 x_{ik}       &=   J_N \, S_{ek} - S_{ik} + W_iI_o + {\lambda}GJ_N \mathbf\Gamma(S_{ik}, S_{ej}, u_{kj}) \\
                 H(x_{ik})    &=  \dfrac{a_ix_{ik} - b_i}{1 - \exp(-d_i(a_ix_{ik} -b_i))} \\
                 \dot{S}_{ik} &= -\dfrac{S_{ik}}{\tau_i} + \gamma_iH(x_{ik}) \

    """

    # Define traited attributes for this model, these represent possible kwargs.

    a_e = NArray(
        label=":math:`a_e`",
        default=numpy.array([
            310.,
        ]),
        domain=Range(lo=0., hi=500., step=1.),
        doc=
        "[n/C]. Excitatory population input gain parameter, chosen to fit numerical solutions."
    )

    b_e = NArray(
        label=":math:`b_e`",
        default=numpy.array([
            125.,
        ]),
        domain=Range(lo=0., hi=200., step=1.),
        doc=
        "[Hz]. Excitatory population input shift parameter chosen to fit numerical solutions."
    )

    d_e = NArray(
        label=":math:`d_e`",
        default=numpy.array([
            0.160,
        ]),
        domain=Range(lo=0.0, hi=0.2, step=0.001),
        doc=
        """[s]. Excitatory population input scaling parameter chosen to fit numerical solutions."""
    )

    gamma_e = NArray(label=r":math:`\gamma_e`",
                     default=numpy.array([
                         0.641 / 1000,
                     ]),
                     domain=Range(lo=0.0, hi=1.0 / 1000, step=0.01 / 1000),
                     doc="""Excitatory population kinetic parameter""")

    tau_e = NArray(
        label=r":math:`\tau_e`",
        default=numpy.array([
            100.,
        ]),
        domain=Range(lo=50., hi=150., step=1.),
        doc="""[ms]. Excitatory population NMDA decay time constant.""")

    w_p = NArray(label=r":math:`w_p`",
                 default=numpy.array([
                     1.4,
                 ]),
                 domain=Range(lo=0.0, hi=2.0, step=0.01),
                 doc="""Excitatory population recurrence weight""")

    J_N = NArray(label=r":math:`J_N`",
                 default=numpy.array([
                     0.15,
                 ]),
                 domain=Range(lo=0.001, hi=0.5, step=0.001),
                 doc="""[nA] NMDA current""")

    W_e = NArray(label=r":math:`W_e`",
                 default=numpy.array([
                     1.0,
                 ]),
                 domain=Range(lo=0.0, hi=2.0, step=0.01),
                 doc="""Excitatory population external input scaling weight""")

    a_i = NArray(
        label=":math:`a_i`",
        default=numpy.array([
            615.,
        ]),
        domain=Range(lo=0., hi=1000., step=1.),
        doc=
        "[n/C]. Inhibitory population input gain parameter, chosen to fit numerical solutions."
    )

    b_i = NArray(
        label=":math:`b_i`",
        default=numpy.array([
            177.0,
        ]),
        domain=Range(lo=0.0, hi=200.0, step=1.0),
        doc=
        "[Hz]. Inhibitory population input shift parameter chosen to fit numerical solutions."
    )

    d_i = NArray(
        label=":math:`d_i`",
        default=numpy.array([
            0.087,
        ]),
        domain=Range(lo=0.0, hi=0.2, step=0.001),
        doc=
        """[s]. Inhibitory population input scaling parameter chosen to fit numerical solutions."""
    )

    gamma_i = NArray(label=r":math:`\gamma_i`",
                     default=numpy.array([
                         1.0 / 1000,
                     ]),
                     domain=Range(lo=0.0, hi=2.0 / 1000, step=0.01 / 1000),
                     doc="""Inhibitory population kinetic parameter""")

    tau_i = NArray(
        label=r":math:`\tau_i`",
        default=numpy.array([
            10.,
        ]),
        domain=Range(lo=5., hi=100., step=1.0),
        doc="""[ms]. Inhibitory population NMDA decay time constant.""")

    J_i = NArray(label=r":math:`J_{i}`",
                 default=numpy.array([
                     1.0,
                 ]),
                 domain=Range(lo=0.001, hi=2.0, step=0.001),
                 doc="""[nA] Local inhibitory current""")

    W_i = NArray(label=r":math:`W_i`",
                 default=numpy.array([
                     0.7,
                 ]),
                 domain=Range(lo=0.0, hi=1.0, step=0.01),
                 doc="""Inhibitory population external input scaling weight""")

    I_o = NArray(label=":math:`I_{o}`",
                 default=numpy.array([
                     0.382,
                 ]),
                 domain=Range(lo=0.0, hi=1.0, step=0.001),
                 doc="""[nA]. Effective external input""")

    I_ext = NArray(label=":math:`I_{ext}`",
                   default=numpy.array([
                       0.0,
                   ]),
                   domain=Range(lo=0.0, hi=1.0, step=0.001),
                   doc="""[nA]. Effective external stimulus input""")

    G = NArray(label=":math:`G`",
               default=numpy.array([
                   2.0,
               ]),
               domain=Range(lo=0.0, hi=10.0, step=0.01),
               doc="""Global coupling scaling""")

    lamda = NArray(label=":math:`\lambda`",
                   default=numpy.array([
                       0.0,
                   ]),
                   domain=Range(lo=0.0, hi=1.0, step=0.01),
                   doc="""Inhibitory global coupling scaling""")

    state_variable_range = Final(default={
        "S_e": numpy.array([0.0, 1.0]),
        "S_i": numpy.array([0.0, 1.0])
    },
                                 label="State variable ranges [lo, hi]",
                                 doc="Population firing rate")

    # Used for phase-plane axis ranges and to bound random initial() conditions.
    state_variable_boundaries = Final(
        label="State Variable boundaries [lo, hi]",
        default={
            "S_e": numpy.array([0.0, 1.0]),
            "S_i": numpy.array([0.0, 1.0])
        },
        doc="""The values for each state-variable should be set to encompass
            the boundaries of the dynamic range of that state-variable. Set None for one-sided boundaries"""
    )

    variables_of_interest = List(
        of=str,
        label="Variables watched by Monitors",
        choices=('S_e', 'S_i'),
        default=('S_e', 'S_i'),
        doc="""default state variables to be monitored""")

    state_variables = ['S_e', 'S_i']
    _nvar = 2
    cvar = numpy.array([0], dtype=numpy.int32)

    def configure(self):
        """  """
        super(ReducedWongWangExcInh, self).configure()
        self.update_derived_parameters()

    def _numpy_dfun(self, state_variables, coupling, local_coupling=0.0):
        S = state_variables[:, :]

        c_0 = coupling[0, :]

        # if applicable
        lc_0 = local_coupling * S[0]

        coupling = self.G * self.J_N * (c_0 + lc_0)

        J_N_S_e = self.J_N * S[0]

        x_e = self.w_p * J_N_S_e - self.J_i * S[
            1] + self.W_e * self.I_o + coupling + self.I_ext

        x_e = self.a_e * x_e - self.b_e
        H_e = x_e / (1 - numpy.exp(-self.d_e * x_e))

        dS_e = -(S[0] / self.tau_e) + (1 - S[0]) * H_e * self.gamma_e

        x_i = J_N_S_e - S[1] + self.W_i * self.I_o + self.lamda * coupling

        x_i = self.a_i * x_i - self.b_i
        H_i = x_i / (1 - numpy.exp(-self.d_i * x_i))

        dS_i = -(S[1] / self.tau_i) + H_i * self.gamma_i

        derivative = numpy.array([dS_e, dS_i])

        return derivative

    def dfun(self, x, c, local_coupling=0.0, **kwargs):
        r"""
        Equations taken from [DPA_2013]_ , page 11242

        .. math::
                 x_{ek}       &=   w_p\,J_N \, S_{ek} - J_iS_{ik} + W_eI_o + GJ_N \mathbf\Gamma(S_{ek}, S_{ej}, u_{kj}) \\
                 H(x_{ek})    &=  \dfrac{a_ex_{ek}- b_e}{1 - \exp(-d_e(a_ex_{ek} -b_e))} \\
                 \dot{S}_{ek} &= -\dfrac{S_{ek}}{\tau_e} + (1 - S_{ek}){\gamma}H(x_{ek}) \\

                 x_{ik}       &=   J_N \, S_{ek} - S_{ik} + W_iI_o + {\lambda}GJ_N \mathbf\Gamma(S_{ik}, S_{ej}, u_{kj}) \\
                 H(x_{ik})    &=  \dfrac{a_ix_{ik} - b_i}{1 - \exp(-d_i(a_ix_{ik} -b_i))} \\
                 \dot{S}_{ik} &= -\dfrac{S_{ik}}{\tau_i} + \gamma_iH(x_{ik}) \

        """
        x_ = x.reshape(x.shape[:-1]).T
        c_ = c.reshape(c.shape[:-1]).T + local_coupling * x[0]
        deriv = _numba_dfun(x_, c_, self.a_e, self.b_e, self.d_e, self.gamma_e,
                            self.tau_e, self.w_p, self.W_e, self.J_N, self.a_i,
                            self.b_i, self.d_i, self.gamma_i, self.tau_i,
                            self.W_i, self.J_i, self.G, self.lamda, self.I_o,
                            self.I_ext)
        return deriv.T[..., numpy.newaxis]
Exemplo n.º 10
0
class TimeSeries(HasTraits):
    """
    Base time-series dataType.
    """
    title = Attr(str)

    data = NArray(
        label="Time-series data",
        doc=
        """An array of time-series data, with a shape of [tpts, :], where ':' represents 1 or more dimensions"""
    )

    labels_ordering = List(
        default=("Time", "State Variable", "Space", "Mode"),
        label="Dimension Names",
        doc="""List of strings representing names of each data dimension""")

    labels_dimensions = Attr(
        field_type=dict,
        default={},
        label=
        "Specific labels for each dimension for the data stored in this timeseries.",
        doc=
        """ A dictionary containing mappings of the form {'dimension_name' : [labels for this dimension] }"""
    )

    time = NArray(
        label="Time-series time",
        required=False,
        doc=
        """An array of time values for the time-series, with a shape of [tpts,].
            This is 'time' as returned by the simulator's monitors.""")

    start_time = Float(label="Start Time:")

    sample_period = Float(label="Sample period", default=1.0)

    # Specify the measure unit for sample period (e.g sec, msec, usec, ...)
    sample_period_unit = Attr(field_type=str,
                              label="Sample Period Measure Unit",
                              default="ms")

    @property
    def nr_dimensions(self):
        return self.data.ndim

    @property
    def sample_rate(self):
        """:returns samples per second [kHz] """
        if self.sample_period_unit in ("ms", "msec"):
            return 1000.0 / self.sample_period
        if self.sample_period_unit in ("us", "usec"):
            return 1000000.0 / self.sample_period
        return 1.0 / self.sample_period

    def summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "Time-series type": self.__class__.__name__,
            "Time-series name": self.title,
            "Dimensions": self.labels_ordering,
            "Time units": self.sample_period_unit,
            "Sample period": self.sample_period,
            "Length": self.sample_period * self.data.shape[0]
        }
        summary.update(narray_summary_info(self.data))
        return summary

    def duplicate(self, **kwargs):
        duplicate = deepcopy(self)
        for attr, value in kwargs.items():
            setattr(duplicate, attr, value)
        duplicate.configure()
        return duplicate

    def _get_index_of_state_variable(self, sv_label):
        try:
            sv_index = numpy.where(self.variables_labels == sv_label)[0][0]
        except KeyError:
            self.logger.error(
                "There are no state variables defined for this instance. Its shape is: %s",
                self.data.shape)
            raise
        except IndexError:
            self.logger.error(
                "Cannot access index of state variable label: %s. Existing state variables: %s"
                % (sv_label, self.variables_labels))
            raise
        return sv_index

    def get_state_variable(self, sv_label):
        sv_data = self.data[:,
                            self._get_index_of_state_variable(sv_label), :, :]
        subspace_labels_dimensions = deepcopy(self.labels_dimensions)
        subspace_labels_dimensions[self.labels_ordering[1]] = [sv_label]
        if sv_data.ndim == 3:
            sv_data = numpy.expand_dims(sv_data, 1)
        return self.duplicate(data=sv_data,
                              labels_dimensions=subspace_labels_dimensions)

    def _get_indices_for_labels(self, list_of_labels):
        list_of_indices_for_labels = []
        for label in list_of_labels:
            try:
                space_index = numpy.where(self.space_labels == label)[0][0]
            except ValueError:
                self.logger.error(
                    "Cannot access index of space label: %s. Existing space labels: %s"
                    % (label, self.space_labels))
                raise
            list_of_indices_for_labels.append(space_index)
        return list_of_indices_for_labels

    def get_subspace_by_index(self, list_of_index, **kwargs):
        self._check_space_indices(list_of_index)
        subspace_data = self.data[:, :, list_of_index, :]
        subspace_labels_dimensions = deepcopy(self.labels_dimensions)
        subspace_labels_dimensions[self.labels_ordering[
            2]] = self.space_labels[list_of_index].tolist()
        if subspace_data.ndim == 3:
            subspace_data = numpy.expand_dims(subspace_data, 2)
        return self.duplicate(data=subspace_data,
                              labels_dimensions=subspace_labels_dimensions,
                              **kwargs)

    def get_subspace_by_labels(self, list_of_labels):
        list_of_indices_for_labels = self._get_indices_for_labels(
            list_of_labels)
        return self.get_subspace_by_index(list_of_indices_for_labels)

    def __getattr__(self, attr_name):
        if self.labels_ordering[1] in self.labels_dimensions.keys():
            if attr_name in self.variables_labels:
                return self.get_state_variable(attr_name)
        if self.labels_ordering[2] in self.labels_dimensions.keys():
            if attr_name in self.space_labels:
                return self.get_subspace_by_labels([attr_name])
        raise AttributeError("%r object has no attribute %r" %
                             (self.__class__.__name__, attr_name))

    def _get_index_for_slice_label(self, slice_label, slice_idx):
        if slice_idx == 1:
            return self._get_indices_for_labels([slice_label])[0]
        if slice_idx == 2:
            return self._get_index_of_state_variable(slice_label)

    @property
    def shape(self):
        return self.data.shape

    @property
    def time_unit(self):
        return self.sample_period_unit

    @property
    def space_labels(self):
        return numpy.array(
            self.labels_dimensions.get(self.labels_ordering[2], []))

    @property
    def variables_labels(self):
        return numpy.array(
            self.labels_dimensions.get(self.labels_ordering[1], []))

    def _check_space_indices(self, list_of_index):
        for index in list_of_index:
            if index < 0 or index > self.data.shape[1]:
                self.logger.error(
                    "Some of the given indices are out of space range: [0, %s]",
                    self.data.shape[1])
                raise IndexError
Exemplo n.º 11
0
class SpatialPatternVolume(SpatialPattern):
    """ A spatio-temporal pattern defined in a volume. """

    volume = Attr(volumes.Volume, label="Volume")

    focal_points_volume = NArray(dtype=int, label="Focal points")
Exemplo n.º 12
0
class Epileptor(ModelNumbaDfun):
    r"""
    The Epileptor is a composite neural mass model of six dimensions which
    has been crafted to model the phenomenology of epileptic seizures.
    (see [Jirsaetal_2014]_)
    Equations and default parameters are taken from [Jirsaetal_2014]_.
          +------------------------------------------------------+
          |                         Table 1                      |
          +----------------------+-------------------------------+
          |        Parameter     |           Value               |
          +======================+===============================+
          |         I_rest1      |              3.1              |
          +----------------------+-------------------------------+
          |         I_rest2      |              0.45             |
          +----------------------+-------------------------------+
          |         r            |            0.00035            |
          +----------------------+-------------------------------+
          |         x_0          |             -1.6              |
          +----------------------+-------------------------------+
          |         slope        |              0.0              |
          +----------------------+-------------------------------+
          |             Integration parameter                    |
          +----------------------+-------------------------------+
          |           dt         |              0.1              |
          +----------------------+-------------------------------+
          |  simulation_length   |              4000             |
          +----------------------+-------------------------------+
          |                    Noise                             |
          +----------------------+-------------------------------+
          |         nsig         | [0., 0., 0., 1e-3, 1e-3, 0.]  |
          +----------------------+-------------------------------+
          |              Jirsa et al. 2014                       |
          +------------------------------------------------------+
    .. figure :: img/Epileptor_01_mode_0_pplane.svg
        :alt: Epileptor phase plane
    .. [Jirsaetal_2014] Jirsa, V. K.; Stacey, W. C.; Quilichini, P. P.;
        Ivanov, A. I.; Bernard, C. *On the nature of seizure dynamics.* Brain,
        2014.
    Variables of interest to be used by monitors: -y[0] + y[3]
        .. math::
            \dot{x_{1}} &=& y_{1} - f_{1}(x_{1}, x_{2}) - z + I_{ext1} \\
            \dot{y_{1}} &=& c - d x_{1}^{2} - y{1} \\
            \dot{z} &=&
            \begin{cases}
            r(4 (x_{1} - x_{0}) - z-0.1 z^{7}) & \text{if } x<0 \\
            r(4 (x_{1} - x_{0}) - z) & \text{if } x \geq 0
            \end{cases} \\
            \dot{x_{2}} &=& -y_{2} + x_{2} - x_{2}^{3} + I_{ext2} + 0.002 g - 0.3 (z-3.5) \\
            \dot{y_{2}} &=& 1 / \tau (-y_{2} + f_{2}(x_{2}))\\
            \dot{g} &=& -0.01 (g - 0.1 x_{1})
    where:
        .. math::
            f_{1}(x_{1}, x_{2}) =
            \begin{cases}
            a x_{1}^{3} - b x_{1}^2 & \text{if } x_{1} <0\\
            -(slope - x_{2} + 0.6(z-4)^2) x_{1} &\text{if }x_{1} \geq 0
            \end{cases}
    and:
        .. math::
            f_{2}(x_{2}) =
            \begin{cases}
            0 & \text{if } x_{2} <-0.25\\
            a_{2}(x_{2} + 0.25) & \text{if } x_{2} \geq -0.25
            \end{cases}
    Note Feb. 2017: the slow permittivity variable can be modify to account for the time
    difference between interictal and ictal states (see [Proixetal_2014]).
    .. [Proixetal_2014] Proix, T.; Bartolomei, F; Chauvel, P; Bernard, C; Jirsa, V.K. *
        Permittivity coupling across brain regions determines seizure recruitment in
        partial epilepsy.* J Neurosci 2014, 34:15009-21.
    """

    a = NArray(label=":math:`a`",
               default=numpy.array([1.0]),
               doc="Coefficient of the cubic term in the first state variable")

    b = NArray(
        label=":math:`b`",
        default=numpy.array([3.0]),
        doc="Coefficient of the squared term in the first state variabel")

    c = NArray(label=":math:`c`",
               default=numpy.array([1.0]),
               doc="Additive coefficient for the second state variable, \
        called :math:`y_{0}` in Jirsa paper")

    d = NArray(
        label=":math:`d`",
        default=numpy.array([5.0]),
        doc="Coefficient of the squared term in the second state variable")

    r = NArray(label=":math:`r`",
               domain=Range(lo=0.0, hi=0.001, step=0.00005),
               default=numpy.array([0.00035]),
               doc="Temporal scaling in the third state variable, \
        called :math:`1/\\tau_{0}` in Jirsa paper")

    s = NArray(label=":math:`s`",
               default=numpy.array([4.0]),
               doc="Linear coefficient in the third state variable")

    x0 = NArray(label=":math:`x_0`",
                domain=Range(lo=-3.0, hi=-1.0, step=0.1),
                default=numpy.array([-1.6]),
                doc="Epileptogenicity parameter")

    Iext = NArray(label=":math:`I_{ext}`",
                  domain=Range(lo=1.5, hi=5.0, step=0.1),
                  default=numpy.array([3.1]),
                  doc="External input current to the first population")

    slope = NArray(label=":math:`slope`",
                   domain=Range(lo=-16.0, hi=6.0, step=0.1),
                   default=numpy.array([0.]),
                   doc="Linear coefficient in the first state variable")

    Iext2 = NArray(label=":math:`I_{ext2}`",
                   domain=Range(lo=0.0, hi=1.0, step=0.05),
                   default=numpy.array([0.45]),
                   doc="External input current to the second population")

    tau = NArray(label=":math:`\\tau`",
                 default=numpy.array([10.0]),
                 doc="Temporal scaling coefficient in fifth state variable")

    aa = NArray(label=":math:`aa`",
                default=numpy.array([6.0]),
                doc="Linear coefficient in fifth state variable")

    bb = NArray(
        label=":math:`bb`",
        default=numpy.array([2.0]),
        doc=
        "Linear coefficient of lowpass excitatory coupling in fourth state variable"
    )

    Kvf = NArray(label=":math:`K_{vf}`",
                 default=numpy.array([0.0]),
                 domain=Range(lo=0.0, hi=4.0, step=0.5),
                 doc="Coupling scaling on a very fast time scale.")

    Kf = NArray(label=":math:`K_{f}`",
                default=numpy.array([0.0]),
                domain=Range(lo=0.0, hi=4.0, step=0.5),
                doc="Correspond to the coupling scaling on a fast time scale.")

    Ks = NArray(
        label=":math:`K_{s}`",
        default=numpy.array([0.0]),
        domain=Range(lo=-4.0, hi=4.0, step=0.1),
        doc=
        "Permittivity coupling, that is from the fast time scale toward the slow time scale"
    )

    tt = NArray(label=":math:`K_{tt}`",
                default=numpy.array([1.0]),
                domain=Range(lo=0.001, hi=10.0, step=0.001),
                doc="Time scaling of the whole system")

    modification = NArray(
        dtype=bool,
        label=":math:`modification`",
        default=numpy.array([False]),
        doc="When modification is True, then use nonlinear influence on z. \
        The default value is False, i.e., linear influence.")

    state_variable_range = Final(
        default={
            "x1": numpy.array([-2., 1.]),
            "y1": numpy.array([-20., 2.]),
            "z": numpy.array([2.0, 5.0]),
            "x2": numpy.array([-2., 0.]),
            "y2": numpy.array([0., 2.]),
            "g": numpy.array([-1., 1.])
        },
        label="State variable ranges [lo, hi]",
        doc="Typical bounds on state variables in the Epileptor model.")

    variables_of_interest = List(
        of=str,
        label="Variables watched by Monitors",
        choices=('x1', 'y1', 'z', 'x2', 'y2', 'g', 'x2 - x1'),
        default=("x2 - x1", 'z'),
        doc="Quantities of the Epileptor available to monitor.",
    )

    state_variables = ('x1', 'y1', 'z', 'x2', 'y2', 'g')

    _nvar = 6
    cvar = numpy.array(
        [0, 3], dtype=numpy.int32)  # should these not be constant Attr's?
    cvar.setflags(write=False)  # todo review this

    def _numpy_dfun(self,
                    state_variables,
                    coupling,
                    local_coupling=0.0,
                    array=numpy.array,
                    where=numpy.where,
                    concat=numpy.concatenate):

        y = state_variables
        ydot = numpy.empty_like(state_variables)

        Iext = self.Iext + local_coupling * y[0]
        c_pop1 = coupling[0, :]
        c_pop2 = coupling[1, :]

        # population 1
        if_ydot0 = -self.a * y[0]**2 + self.b * y[0]
        else_ydot0 = self.slope - y[3] + 0.6 * (y[2] - 4.0)**2
        ydot[0] = self.tt * (y[1] - y[2] + Iext + self.Kvf * c_pop1 +
                             where(y[0] < 0., if_ydot0, else_ydot0) * y[0])
        ydot[1] = self.tt * (self.c - self.d * y[0]**2 - y[1])

        # energy
        if_ydot2 = -0.1 * y[2]**7
        else_ydot2 = 0
        if self.modification:
            h = self.x0 + 3. / (1. + numpy.exp(-(y[0] + 0.5) / 0.1))
        else:
            h = 4 * (y[0] - self.x0) + where(y[2] < 0., if_ydot2, else_ydot2)
        ydot[2] = self.tt * (self.r * (h - y[2] + self.Ks * c_pop1))

        # population 2
        ydot[3] = self.tt * (-y[4] + y[3] - y[3]**3 + self.Iext2 +
                             self.bb * y[5] - 0.3 *
                             (y[2] - 3.5) + self.Kf * c_pop2)
        if_ydot4 = 0
        else_ydot4 = self.aa * (y[3] + 0.25)
        ydot[4] = self.tt * (
            (-y[4] + where(y[3] < -0.25, if_ydot4, else_ydot4)) / self.tau)

        # filter
        ydot[5] = self.tt * (-0.01 * (y[5] - 0.1 * y[0]))

        return ydot

    def dfun(self, x, c, local_coupling=0.0):
        r"""
        Computes the derivatives of the state variables of the Epileptor
        with respect to time.
        Implementation note: we expect this version of the Epileptor to be used
        in a vectorized manner. Concretely, y has a shape of (6, n) where n is
        the number of nodes in the network. An consequence is that
        the original use of if/else is translated by calculated both the true
        and false forms and mixing them using a boolean mask.
        Variables of interest to be used by monitors: -y[0] + y[3]
            .. math::
                \dot{x_{1}} &=& y_{1} - f_{1}(x_{1}, x_{2}) - z + I_{ext1} \\
                \dot{y_{1}} &=& c - d x_{1}^{2} - y{1} \\
                \dot{z} &=&
                \begin{cases}
                r(4 (x_{1} - x_{0}) - z-0.1 z^{7}) & \text{if } x<0 \\
                r(4 (x_{1} - x_{0}) - z) & \text{if } x \geq 0
                \end{cases} \\
                \dot{x_{2}} &=& -y_{2} + x_{2} - x_{2}^{3} + I_{ext2} + 0.002 g - 0.3 (z-3.5) \\
                \dot{y_{2}} &=& 1 / \tau (-y_{2} + f_{2}(x_{2}))\\
                \dot{g} &=& -0.01 (g - 0.1 x_{1})
        where:
            .. math::
                f_{1}(x_{1}, x_{2}) =
                \begin{cases}
                a x_{1}^{3} - b x_{1}^2 & \text{if } x_{1} <0\\
                -(slope - x_{2} + 0.6(z-4)^2) x_{1} &\text{if }x_{1} \geq 0
                \end{cases}
        and:
            .. math::
                f_{2}(x_{2}) =
                \begin{cases}
                0 & \text{if } x_{2} <-0.25\\
                a_{2}(x_{2} + 0.25) & \text{if } x_{2} \geq -0.25
                \end{cases}
        """
        x_ = x.reshape(x.shape[:-1]).T
        c_ = c.reshape(c.shape[:-1]).T
        Iext = self.Iext + local_coupling * x[0, :, 0]
        deriv = _numba_dfun(x_, c_, self.x0, Iext, self.Iext2, self.a, self.b,
                            self.slope, self.tt, self.Kvf, self.c, self.d,
                            self.r, self.Ks, self.Kf, self.aa, self.bb,
                            self.tau, self.modification)
        return deriv.T[..., numpy.newaxis]
Exemplo n.º 13
0
class Epileptor2D(ModelNumbaDfun):
    r"""
        Two-dimensional reduction of the Epileptor.

        .. moduleauthor:: [email protected]

        Taking advantage of time scale separation and focusing on the slower time scale,
        the five-dimensional Epileptor reduces to a two-dimensional system (see [Proixetal_2014,
        Proixetal_2017]).

        Note: the slow permittivity variable can be modify to account for the time
        difference between interictal and ictal states (see [Proixetal_2014]).

        Equations and default parameters are taken from [Proixetal_2014]:

        .. math::
            \dot{x_{1,i}} &=& - x_{1,i}^{3} - 2x_{1,i}^{2}  + 1 - z_{i} + I_{ext1,i} \\
            \dot{z_{i}} &=& r(h - z_{i})

        with
            h =
            \begin{cases}
            x_{0} + 3 / (exp((x_{1} + 0.5)/0.1)) & \text{if } modification\\
            4 (x_{1,i} - x_{0}) & \text{else }
            \end{cases}
        References:
            [Proixetal_2014] Proix, T.; Bartolomei, F; Chauvel, P; Bernard, C; Jirsa, V.K. *
            Permittivity coupling across brain regions determines seizure recruitment in
            partial epilepsy.* J Neurosci 2014, 34:15009-21.

            [Proixetal_2017] Proix, T.; Bartolomei, F; Guye, M.; Jirsa, V.K. *Individual brain
            structure and modelling predict seizure propagation.* Brain 2017, 140; 641–654.
    """

    a = NArray(
        label=":math:`a`",
        default=numpy.array([1.0]),
        doc="Coefficient of the cubic term in the first state-variable.")

    b = NArray(
        label=":math:`b`",
        default=numpy.array([3.0]),
        doc="Coefficient of the squared term in the first state-variable.")

    c = NArray(label=":math:`c`",
               default=numpy.array([1.0]),
               doc="Additive coefficient for the second state-variable x_{2}, \
        called :math:`y_{0}` in Jirsa paper.")

    d = NArray(
        label=":math:`d`",
        default=numpy.array([5.0]),
        doc=
        "Coefficient of the squared term in the second state-variable x_{2}.")

    r = NArray(label=":math:`r`",
               domain=Range(lo=0.0, hi=0.001, step=0.00005),
               default=numpy.array([0.00035]),
               doc="Temporal scaling in the slow state-variable, \
        called :math:`1\\tau_{0}` in Jirsa paper (see class Epileptor).")

    x0 = NArray(label=":math:`x_0`",
                domain=Range(lo=-3.0, hi=-1.0, step=0.1),
                default=numpy.array([-1.6]),
                doc="Epileptogenicity parameter.")

    Iext = NArray(label=":math:`I_{ext}`",
                  domain=Range(lo=1.5, hi=5.0, step=0.1),
                  default=numpy.array([3.1]),
                  doc="External input current to the first state-variable.")

    slope = NArray(label=":math:`slope`",
                   domain=Range(lo=-16.0, hi=6.0, step=0.1),
                   default=numpy.array([0.]),
                   doc="Linear coefficient in the first state-variable.")

    Kvf = NArray(label=":math:`K_{vf}`",
                 default=numpy.array([0.0]),
                 domain=Range(lo=0.0, hi=4.0, step=0.5),
                 doc="Coupling scaling on a very fast time scale.")

    Ks = NArray(
        label=":math:`K_{s}`",
        default=numpy.array([0.0]),
        domain=Range(lo=-4.0, hi=4.0, step=0.1),
        doc=
        "Permittivity coupling, that is from the fast time scale toward the slow time scale."
    )

    tt = NArray(
        label=":math:`tt`",
        default=numpy.array([1.0]),
        domain=Range(lo=0.001, hi=1.0, step=0.001),
        doc="Time scaling of the whole system to the system in real time.")

    modification = NArray(
        dtype=bool,
        label=":math:`modification`",
        default=numpy.array([False]),
        doc="When modification is True, then use nonlinear influence on z. \
        The default value is False, i.e., linear influence.")

    state_variable_range = Final(
        default={
            "x1": numpy.array([-2., 1.]),
            "z": numpy.array([2.0, 5.0])
        },
        label="State variable ranges [lo, hi]",
        doc="Typical bounds on state-variables in the Epileptor 2D model.")

    variables_of_interest = List(
        of=str,
        label="Variables watched by Monitors",
        choices=('x1', 'z'),
        default=('x1', ),
        doc="Quantities of the Epileptor 2D available to monitor.")

    state_variables = ('x1', 'z')

    _nvar = 2
    cvar = numpy.array([0], dtype=numpy.int32)

    def _numpy_dfun(self,
                    state_variables,
                    coupling,
                    local_coupling=0.0,
                    array=numpy.array,
                    where=numpy.where,
                    concat=numpy.concatenate):

        y = state_variables
        ydot = numpy.empty_like(state_variables)

        Iext = self.Iext + local_coupling * y[0]
        c_pop = coupling[0, :]

        # population 1
        if_ydot0 = self.a * y[0]**2 + (self.d - self.b) * y[0]
        else_ydot0 = -self.slope - 0.6 * (y[1] - 4.0)**2 + self.d * y[0]

        ydot[0] = self.tt * (self.c - y[1] + Iext + self.Kvf * c_pop -
                             (where(y[0] < 0., if_ydot0, else_ydot0)) * y[0])

        # energy
        if_ydot1 = -0.1 * y[1]**7
        else_ydot1 = 0

        if self.modification:
            h = self.x0 + 3. / (1. + numpy.exp(-(y[0] + 0.5) / 0.1))
        else:
            h = 4 * (y[0] - self.x0) + where(y[1] < 0., if_ydot1, else_ydot1)

        ydot[1] = self.tt * (self.r * (h - y[1] + self.Ks * c_pop))

        return ydot

    def dfun(self, x, c, local_coupling=0.0):
        r"""
        Computes the derivatives of the state-variables of the Epileptor 2D
        with respect to time.
        Equations and default parameters are taken from [Proixetal_2014]:
        .. math::
            \dot{x_{1,i}} &=& - x_{1,i}^{3} - 2x_{1,i}^{2}  + 1 - z_{i} + I_{ext1,i} \\
            \dot{z_{i}} &=& r(h - z_{i})
        with
            h =
            \begin{cases}
            x_{0} + 3 / (exp((x_{1} + 0.5)/0.1)) & \text{if } modification\\
            4 (x_{1,i} - x_{0}) & \text{else }
            \end{cases}
        """

        x_ = x.reshape(x.shape[:-1]).T
        c_ = c.reshape(c.shape[:-1]).T
        Iext = self.Iext + local_coupling * x[0, :, 0]
        deriv = _numba_dfun_epi2d(x_, c_, self.x0, Iext, self.a, self.b,
                                  self.slope, self.c, self.d, self.r, self.Kvf,
                                  self.Ks, self.tt, self.modification)
        return deriv.T[..., numpy.newaxis]
Exemplo n.º 14
0
class Simulator(HasTraits):
    """A Simulator assembles components required to perform simulations."""

    connectivity = Attr(
        field_type=connectivity.Connectivity,
        label="Long-range connectivity",
        default=None,
        required=True,
        doc="""A tvb.datatypes.Connectivity object which contains the
         structural long-range connectivity data (i.e., white-matter tracts). In
         combination with the ``Long-range coupling function`` it defines the inter-regional
         connections. These couplings undergo a time delay via signal propagation
         with a propagation speed of ``Conduction Speed``""")

    conduction_speed = Float(
        label="Conduction Speed",
        default=3.0,
        required=False,
        # range=basic.Range(lo=0.01, hi=100.0, step=1.0),
        doc="""Conduction speed for ``Long-range connectivity`` (mm/ms)""")

    coupling = Attr(
        field_type=coupling.Coupling,
        label="Long-range coupling function",
        default=coupling.Linear(),
        required=True,
        doc="""The coupling function is applied to the activity propagated
        between regions by the ``Long-range connectivity`` before it enters the local
        dynamic equations of the Model. Its primary purpose is to 'rescale' the
        incoming activity to a level appropriate to Model.""")

    surface: cortex.Cortex = Attr(
        field_type=cortex.Cortex,
        label="Cortical surface",
        default=None,
        required=False,
        doc="""By default, a Cortex object which represents the
        cortical surface defined by points in the 3D physical space and their
        neighborhood relationship. In the current TVB version, when setting up a
        surface-based simulation, the option to configure the spatial spread of
        the ``Local Connectivity`` is available.""")

    stimulus = Attr(
        field_type=patterns.SpatioTemporalPattern,
        label="Spatiotemporal stimulus",
        default=None,
        required=False,
        doc="""A ``Spatiotemporal stimulus`` can be defined at the region or surface level.
        It's composed of spatial and temporal components. For region defined stimuli
        the spatial component is just the strength with which the temporal
        component is applied to each region. For surface defined stimuli,  a
        (spatial) function, with finite-support, is used to define the strength
        of the stimuli on the surface centred around one or more focal points.
        In the current version of TVB, stimuli are applied to the first state
        variable of the ``Local dynamic model``.""")

    model: Model = Attr(
        field_type=models.Model,
        label="Local dynamic model",
        default=models.Generic2dOscillator(),
        required=True,
        doc="""A tvb.simulator.Model object which describe the local dynamic
        equations, their parameters, and, to some extent, where connectivity
        (local and long-range) enters and which state-variables the Monitors
        monitor. By default the 'Generic2dOscillator' model is used. Read the
        Scientific documentation to learn more about this model.""")

    integrator = Attr(
        field_type=integrators.Integrator,
        label="Integration scheme",
        default=integrators.HeunDeterministic(),
        required=True,
        doc="""A tvb.simulator.Integrator object which is
            an integration scheme with supporting attributes such as
            integration step size and noise specification for stochastic
            methods. It is used to compute the time courses of the model state
            variables.""")

    initial_conditions = NArray(
        label="Initial Conditions",
        required=False,
        doc="""Initial conditions from which the simulation will begin. By
        default, random initial conditions are provided. Needs to be the same shape
        as simulator 'history', ie, initial history function which defines the 
        minimal initial state of the network with time delays before time t=0. 
        If the number of time points in the provided array is insufficient the 
        array will be padded with random values based on the 'state_variables_range'
        attribute.""")

    monitors = List(
        of=monitors.Monitor,
        label="Monitor(s)",
        default=(monitors.TemporalAverage(),),
        doc="""A tvb.simulator.Monitor or a list of tvb.simulator.Monitor
        objects that 'know' how to record relevant data from the simulation. Two
        main types exist: 1) simple, spatial and temporal, reductions (subsets
        or averages); 2) physiological measurements, such as EEG, MEG and fMRI.
        By default the Model's specified variables_of_interest are returned,
        temporally downsampled from the raw integration rate to a sample rate of
        1024Hz.""")

    simulation_length = Float(
        label="Simulation Length (ms, s, m, h)",
        default=1000.0,  # ie 1 second
        required=True,
        doc="""The length of a simulation (default in milliseconds).""")

    backend = ReferenceBackend()

    history = None  # type: SparseHistory

    @property
    def good_history_shape(self):
        """Returns expected history shape."""
        n_reg = self.connectivity.number_of_regions
        shape = self.connectivity.horizon, len(self.model.state_variables), n_reg, self.model.number_of_modes
        return shape

    calls = 0
    current_step = 0
    number_of_nodes = None
    _memory_requirement_guess = None
    _memory_requirement_census = None
    _storage_requirement = None
    _runtime = None

    integrate_next_step = None

    # methods consist of
    # 1) generic configure
    # 2) component specific configure
    # 3) loop preparation
    # 4) loop step
    # 5) estimations

    @property
    def is_surface_simulation(self):
        if self.surface:
            return True
        return False

    def configure_integration_for_model(self):
        self.integrator.configure_boundaries(self.model)
        if self.model.has_nonint_vars:
            self.integrate_next_step = self.integrator.integrate_with_update
            self.integrator. \
                reconfigure_boundaries_and_clamping_for_integration_state_variables(self.model)
        else:
            self.integrate_next_step = self.integrator.integrate

    def preconfigure(self):
        """Configure just the basic fields, so that memory can be estimated."""
        self.connectivity.configure()
        if self.surface:
            self.surface.configure()
        if self.stimulus:
            self.stimulus.configure()
        self.coupling.configure()
        # ------- Keep this order of configurations ----
        self.model.configure()  # 1
        self.integrator.configure()  # 2
        # Configure integrators' next step computation
        # and state variables' boundaries and clamping,
        # based on model attributes  # 3
        self.configure_integration_for_model()
        # ----------------------------------------------
        # monitors needs to be a list or tuple, even if there is only one...
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]
        # Configure monitors
        for monitor in self.monitors:
            monitor.configure()
        self._set_number_of_nodes()
        self._guesstimate_memory_requirement()

    def _set_number_of_nodes(self):
        # "Nodes" refers to either regions or vertices + non-cortical regions.
        if self.surface is None:
            self.number_of_nodes = self.connectivity.number_of_regions
            self.log.info('Region simulation with %d ROI nodes', self.number_of_nodes)
        else:
            self._regmap, nc, nsc = self.backend.full_region_map(self.surface, self.connectivity)
            self.number_of_nodes = nc + nsc
            self.log.info('Surface simulation with %d vertices + %d non-cortical, %d total nodes',
                          nc, nsc, self.number_of_nodes)

    def configure(self, full_configure=True):
        """Configure simulator and its components.

        The first step of configuration is to run the configure methods of all
        the Simulator's components, ie its traited attributes.

        Configuration of a Simulator primarily consists of calculating the
        attributes, etc, which depend on the combinations of the Simulator's
        traited attributes (keyword args).

        Converts delays from physical time units into integration steps
        and updates attributes that depend on combinations of the 6 inputs.

        Returns
        -------
        sim: Simulator
            The configured Simulator instance.

        """
        if full_configure:
            # When run from GUI, preconfigure is run separately, and we want to avoid running that part twice
            self.preconfigure()
        self.model._spatialize_model_parameters(sim=self)
        # Configure spatial component of any stimuli
        self._configure_stimuli()
        # Set delays, provided in physical units, in integration steps.
        self.connectivity.set_idelays(self.integrator.dt)
        # Reshape integrator.noise.nsig, if necessary.
        if isinstance(self.integrator, integrators.IntegratorStochastic):
            self._configure_integrator_noise()
        # create history
        # TODO refactor history impl to backend
        self._configure_history()
        # Configure Monitors to work with selected Model, etc...
        self._configure_monitors()
        # Estimate of memory usage.
        self._census_memory_requirement()
        # Allow user to chain configure to another call or assignment.
        return self

    def _prepare_local_coupling(self):
        if self.surface is None:
            return 0.0
        return self.surface.prepare_local_coupling(self.number_of_nodes)

    def _loop_compute_node_coupling(self, step):
        """Compute delayed node coupling values."""
        coupling = self.coupling(step, self.history)
        if self.surface is not None:
            coupling = coupling[:, self._regmap]
        return coupling

    def _prepare_stimulus(self):
        if self.stimulus is None:
            stimulus = 0.0
        else:
            # TODO time grid wrong for continuations
            time = numpy.r_[0.0: self.simulation_length: self.integrator.dt]
            self.stimulus.configure_time(time.reshape((1, -1)))
            stimulus = numpy.zeros((self.model.nvar, self.number_of_nodes, 1))
            self.log.debug("stimulus shape is: %s", stimulus.shape)
        return stimulus

    def _loop_update_stimulus(self, step, stimulus):
        """Update stimulus values for current time step."""
        if self.stimulus is not None:
            # TODO stim_step != current step
            stim_step = step - (self.current_step + 1)
            stimulus[self.model.stvar, :, :] = self.stimulus(stim_step).reshape((1, -1, 1))

    def _loop_update_history(self, step, state):
        """Update history."""
        if self.surface is not None and state.shape[1] > self.connectivity.number_of_regions:
            state = self.backend.surface_state_to_rois(self._regmap, self.connectivity.number_of_regions, state)
        self.history.update(step, state)

    def _loop_monitor_output(self, step, state, node_coupling):
        observed = self.model.observe(state)
        output = [monitor.record(step,
                                 node_coupling if isinstance(monitor, monitors.AfferentCoupling) else observed)
                  for monitor in self.monitors]
        if any(outputi is not None for outputi in output):
            return output

    def __call__(self, simulation_length=None, random_state=None, n_steps=None):
        """
        Return an iterator which steps through simulation time, generating monitor outputs.

        See the run method for a convenient way to collect all output in one call.

        :param simulation_length: Length of the simulation to perform in ms.
        :param random_state:  State of NumPy RNG to use for stochastic integration.
        :param n_steps: Length of the simulation to perform in integration steps. Overrides simulation_length.
        :return: Iterator over monitor outputs.
        """

        self.calls += 1
        if simulation_length is not None:
            self.simulation_length = float(simulation_length)

        # initialization
        self._guesstimate_runtime()
        self._calculate_storage_requirement()
        # TODO a provided random_state should be used for history init
        self.integrator.set_random_state(random_state)
        local_coupling = self._prepare_local_coupling()
        stimulus = self._prepare_stimulus()
        state = self.current_state
        start_step = self.current_step + 1
        node_coupling = self._loop_compute_node_coupling(start_step)

        # integration loop
        if n_steps is None:
            n_steps = int(math.ceil(self.simulation_length / self.integrator.dt))
        else:
            if not numpy.issubdtype(type(n_steps), numpy.integer):
                raise TypeError("Incorrect type for n_steps: %s, expected integer" % type(n_steps))

        for step in range(start_step, start_step + n_steps):
            self._loop_update_stimulus(step, stimulus)
            state = self.integrate_next_step(state, self.model, node_coupling, local_coupling, stimulus)
            self._loop_update_history(step, state)
            node_coupling = self._loop_compute_node_coupling(step + 1)
            output = self._loop_monitor_output(step, state, node_coupling)
            if output is not None:
                yield output

        self.current_state = state
        self.current_step = self.current_step + n_steps

    def _configure_history(self, initial_conditions=None):
        self.history = SparseHistory.from_simulator(self, initial_conditions)

    def _configure_integrator_noise(self):
        """
        This enables having noise to be state variable specific and/or to enter
        only via specific brain structures, for example it we only want to
        consider noise as an external input entering the brain via appropriate
        thalamic nuclei.

        Support 3 possible shapes:
            1) number_of_nodes;

            2) number_of_state_variables or number_of_integrated_state_variables; and

            3) (number_of_state_variables or number_of_integrated_state_variables, number_of_nodes).

        """
        # Noise has to have a shape corresponding to only the integrated state variables!
        good_history_shape = list(self.good_history_shape[1:])
        good_history_shape[0] = self.model.nintvar
        if self.integrator.noise.ntau > 0.0:
            self.integrator.noise.configure_coloured(self.integrator.dt, tuple(good_history_shape))
        else:
            self.integrator.noise.configure_white(self.integrator.dt, tuple(good_history_shape))

        if self.surface is not None:
            if self.integrator.noise.nsig.size == self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[self.surface.region_mapping]
            elif self.integrator.noise.nsig.size == self.model.nvar * self.connectivity.number_of_regions:
                self.integrator.noise.nsig = \
                    self.integrator.noise.nsig[self.model.state_variable_mask][:, self.surface.region_mapping]
            elif self.integrator.noise.nsig.size == self.model.nintvar * self.connectivity.number_of_regions:
                self.integrator.noise.nsig = self.integrator.noise.nsig[:, self.surface.region_mapping]

        good_nsig_shape = (self.model.nintvar, self.number_of_nodes, self.model.number_of_modes)
        nsig = self.integrator.noise.nsig
        self.log.debug("Given noise shape is %s", nsig.shape)
        if nsig.shape in (good_nsig_shape, (1,)):
            return
        elif nsig.shape == (self.model.nvar,):
            nsig = nsig[self.model.state_variable_mask].reshape((self.model.nintvar, 1, 1))
        elif nsig.shape == (self.model.nintvar,):
            nsig = nsig.reshape((self.model.nintvar, 1, 1))
        elif nsig.shape == (self.number_of_nodes,):
            nsig = nsig.reshape((1, self.number_of_nodes, 1))
        elif nsig.shape == (self.model.nvar, self.number_of_nodes):
            nsig = nsig[self.model.state_variable_mask].reshape((self.n_intvar, self.number_of_nodes, 1))
        elif nsig.shape == (self.model.nintvar, self.number_of_nodes):
            nsig = nsig.reshape((self.model.nintvar, self.number_of_nodes, 1))
        else:
            msg = "Bad Simulator.integrator.noise.nsig shape: %s"
            self.log.error(msg % str(nsig.shape))

        self.log.debug("Corrected noise shape is %s", nsig.shape)
        self.integrator.noise.nsig = nsig

    def _configure_monitors(self):
        """ Configure the requested Monitors for this Simulator """
        # Coerce to list if required
        if not isinstance(self.monitors, (list, tuple)):
            self.monitors = [self.monitors]
        # Configure monitors
        for monitor in self.monitors:
            monitor.config_for_sim(self)

    def _configure_stimuli(self):
        """ Configure the defined Stimuli for this Simulator """
        if self.stimulus is not None:
            if self.surface:
                # NOTE the region mapping of the stimuli should also include the subcortical areas
                self.stimulus.configure_space(region_mapping=self._regmap)
            else:
                self.stimulus.configure_space()

    # used by simulator adaptor
    def memory_requirement(self):
        """
        Return an estimated of the memory requirements (Bytes) for this
        simulator's current configuration.
        """
        self._guesstimate_memory_requirement()
        return self._memory_requirement_guess

    # appears to be unused
    def runtime(self, simulation_length):
        """
        Return an estimated run time (seconds) for the simulator's current
        configuration and a specified simulation length.

        """
        self.simulation_length = simulation_length
        self._guesstimate_runtime()
        return self._runtime

    # used by simulator adaptor
    def storage_requirement(self):
        """
        Return an estimated storage requirement (Bytes) for the simulator's
        current configuration and a specified simulation length.

        """
        self._calculate_storage_requirement()
        return self._storage_requirement

    def _guesstimate_memory_requirement(self):
        """
        guesstimate the memory required for this simulator.

        Guesstimate is based on the shape of the dominant arrays, and as such
        can operate before configuration.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant
            memory pigs...

        """
        if self.surface:
            number_of_nodes = self.surface.number_of_vertices
        else:
            number_of_nodes = self.connectivity.number_of_regions

        number_of_regions = self.connectivity.number_of_regions

        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        bits_64 = 8.0  # Bytes
        bits_32 = 4.0  # Bytes
        # NOTE: The speed hack for getting the first element of hist shape should
        #      partially resolves calling of this method with a non-configured
        #     connectivity, there remains the less common issue if no tract_lengths...
        hist_shape = (self.connectivity.tract_lengths.max() / (self.conduction_speed or
                                                               self.connectivity.speed or 3.0) / self.integrator.dt,
                      self.model.nvar, number_of_nodes,
                      self.model.number_of_modes)
        self.log.debug("Estimated history shape is %r", hist_shape)

        memreq = numpy.prod(hist_shape) * bits_64
        if self.surface:
            memreq += self.surface.number_of_triangles * 3 * bits_32 * 2  # normals
            memreq += self.surface.number_of_vertices * 3 * bits_64 * 2  # normals
            memreq += number_of_nodes * number_of_regions * bits_64 * 4  # region_mapping, region_average, region_sum
            # ???memreq += self.surface.local_connectivity.matrix.nnz * 8

        if not hasattr(self.monitors, '__len__'):
            self.monitors = [self.monitors]

        for monitor in self.monitors:
            if not isinstance(monitor, monitors.Bold):
                stock_shape = (monitor.period / self.integrator.dt,
                               len(self.model.variables_of_interest),
                               number_of_nodes,
                               self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                if hasattr(monitor, "sensors"):
                    try:
                        memreq += number_of_nodes * monitor.sensors.number_of_sensors * bits_64  # projection_matrix
                    except AttributeError:
                        self.log.debug("No sensors specified, guessing memory based on default EEG.")
                        memreq += number_of_nodes * 62.0 * bits_64

            else:
                stock_shape = (monitor.hrf_length * monitor._stock_sample_rate,
                               len(self.model.variables_of_interest),
                               number_of_nodes,
                               self.model.number_of_modes)
                interim_stock_shape = (1.0 / (2.0 ** -2 * self.integrator.dt),
                                       len(self.model.variables_of_interest),
                                       number_of_nodes,
                                       self.model.number_of_modes)
                memreq += numpy.prod(stock_shape) * bits_64
                memreq += numpy.prod(interim_stock_shape) * bits_64

        if psutil and memreq > psutil.virtual_memory().total:
            self.log.warning("There may be insufficient memory for this simulation.")

        self._memory_requirement_guess = magic_number * memreq
        msg = "Memory requirement estimate: simulation will need about %.1f MB"
        self.log.info(msg, self._memory_requirement_guess / 2 ** 20)

    def _census_memory_requirement(self):
        """
        Guesstimate the memory required for this simulator.

        Guesstimate is based on a census of the dominant arrays after the
        simulator has been configured.

        NOTE: Assumes returned/yeilded data is in some sense "taken care of" in
            the world outside the simulator, and so doesn't consider it, making
            the simulator's history, and surface if present, the dominant
            memory pigs...

        """
        magic_number = 2.42  # Current guesstimate is low by about a factor of 2, seems safer to over estimate...
        memreq = self.history.nbytes
        try:
            memreq += self.surface.triangles.nbytes * 2
            memreq += self.surface.vertices.nbytes * 2
            memreq += self.surface.region_mapping.nbytes * self.number_of_nodes * 8. * 4  # region_average, region_sum
            memreq += self.surface.local_connectivity.matrix.nnz * 8
        except AttributeError:
            pass

        for monitor in self.monitors:
            memreq += monitor._stock.nbytes
            if isinstance(monitor, monitors.Bold):
                memreq += monitor._interim_stock.nbytes

        if psutil and memreq > psutil.virtual_memory().total:
            self.log.warning("Memory estimate exceeds total available RAM.")

        self._memory_requirement_census = magic_number * memreq
        # import pdb; pdb.set_trace()
        msg = "Memory requirement census: simulation will need about %.1f MB"
        self.log.info(msg % (self._memory_requirement_census / 1048576.0))

    def _guesstimate_runtime(self):
        """
        Estimate the runtime for this simulator.

        Spread in parallel executions of larger arrays means this will be an over-estimation,
        or rather a single threaded estimation...
        Different choice of integrators and monitors has an additional effect,
        on the magic number though relatively minor

        """
        magic_number = 6.57e-06  # seconds
        self._runtime = (magic_number * self.number_of_nodes * self.model.nvar * self.model.number_of_modes *
                         self.simulation_length / self.integrator.dt)
        msg = "Simulation runtime should be about %0.3f seconds"
        self.log.info(msg, self._runtime)

    def _calculate_storage_requirement(self):
        """
        Calculate the storage requirement for the simulator, configured with
        models, monitors, etc being run for a particular simulation length.
        While this is only approximate, it is far more reliable/accurate than
        the memory and runtime guesstimates.
        """
        self.log.info("Calculating storage requirement for ...")
        strgreq = 0
        for monitor in self.monitors:
            # Avoid division by zero for monitor not yet configured
            # (in framework this is executed, when only preconfigure has been called):
            current_period = monitor.period or self.integrator.dt
            strgreq += (TvbProfile.current.MAGIC_NUMBER * self.simulation_length *
                        self.number_of_nodes * self.model.nvar *
                        self.model.number_of_modes / current_period)
        self.log.info("Calculated storage requirement for simulation: %d " % int(strgreq))
        self._storage_requirement = int(strgreq)

    def run(self, **kwds):
        """Convenience method to call the simulator with **kwds and collect output data."""
        ts, xs = [], []
        for _ in self.monitors:
            ts.append([])
            xs.append([])
        wall_time_start = time.time()
        for data in self(**kwds):
            for tl, xl, t_x in zip(ts, xs, data):
                if t_x is not None:
                    t, x = t_x
                    tl.append(t)
                    xl.append(x)
        elapsed_wall_time = time.time() - wall_time_start
        self.log.info("%.3f s elapsed, %.3fx real time", elapsed_wall_time,
                      elapsed_wall_time * 1e3 / self.simulation_length)
        for i in range(len(ts)):
            ts[i] = numpy.array(ts[i])
            xs[i] = numpy.array(xs[i])
        return list(zip(ts, xs))
Exemplo n.º 15
0
class MorrisLecar(models.Model):
    """
    The Morris-Lecar model is a mathematically simple excitation model having
    two nonlinear, non-inactivating conductances.

    .. [ML_1981] Morris, C. and Lecar, H. *Voltage oscillations in the Barnacle
        giant muscle fibre*, Biophysical Journal 35: 193, 1981.

    See also, http://www.scholarpedia.org/article/Morris-Lecar_model
    
        .. figure :: img/MorrisLecar_01_mode_0_pplane.svg
            :alt: Morris-Lecar phase plane (V, N)
            
            The (:math:`V`, :math:`N`) phase-plane for the Morris-Lecar model.

    .. automethod:: MorrisLecar.dfun

    """

    # Define traited attributes for this model, these represent possible kwargs.
    gCa = NArray(
        label=":math:`g_{Ca}`",
        default=numpy.array([4.0]),
        domain=Range(lo=2.0, hi=6.0, step=0.01),
        doc="""Conductance of population of Ca++ channels [mmho/cm2]""")

    gK = NArray(label=":math:`g_K`",
                default=numpy.array([8.0]),
                domain=Range(lo=4.0, hi=12.0, step=0.01),
                doc="""Conductance of population of K+ channels [mmho/cm2]""")

    gL = NArray(
        label=":math:`g_L`",
        default=numpy.array([2.0]),
        domain=Range(lo=1.0, hi=3.0, step=0.01),
        doc="""Conductance of population of leak channels [mmho/cm2]""")

    C = NArray(label=":math:`C`",
               default=numpy.array([20.0]),
               domain=Range(lo=10.0, hi=30.0, step=0.01),
               doc="""Membrane capacitance [uF/cm2]""")

    lambda_Nbar = NArray(label=":math:`\\lambda_{Nbar}`",
                         default=numpy.array([0.06666667]),
                         domain=Range(lo=0.0, hi=1.0, step=0.00000001),
                         doc="""Maximum rate for K+ channel opening [1/s]""")

    V1 = NArray(
        label=":math:`V_1`",
        default=numpy.array([10.0]),
        domain=Range(lo=5.0, hi=15.0, step=0.01),
        doc="""Potential at which half of the Ca++ channels are open at steady
        state [mV]""")

    V2 = NArray(
        label=":math:`V_2`",
        default=numpy.array([15.0]),
        domain=Range(lo=7.5, hi=22.5, step=0.01),
        doc="""1/slope of voltage dependence of the fraction of Ca++ channels
        that are open at steady state [mV].""")

    V3 = NArray(
        label=":math:`V_3`",
        default=numpy.array([-1.0]),
        domain=Range(lo=-1.5, hi=-0.5, step=0.01),
        doc="""Potential at which half of the K+ channels are open at steady
        state [mV].""")

    V4 = NArray(
        label=":math:`V_4`",
        default=numpy.array([14.5]),
        domain=Range(lo=7.25, hi=22.0, step=0.01),
        doc="""1/slope of voltage dependence of the fraction of K+ channels
        that are open at steady state [mV].""")

    VCa = NArray(label=":math:`V_{Ca}`",
                 default=numpy.array([100.0]),
                 domain=Range(lo=50.0, hi=150.0, step=0.01),
                 doc="""Ca++ Nernst potential [mV]""")

    VK = NArray(label=":math:`V_K`",
                default=numpy.array([-70.0]),
                domain=Range(lo=-105.0, hi=-35.0, step=0.01),
                doc="""K+ Nernst potential [mV]""")

    VL = NArray(label=":math:`V_L`",
                default=numpy.array([-50.0]),
                domain=Range(lo=-75.0, hi=-25.0, step=0.01),
                doc="""Nernst potential leak channels [mV]""")

    # Used for phase-plane axis ranges and to bound random initial() conditions.
    state_variable_range = Final(
        {
            "V": numpy.array([-70.0, 50.0]),
            "N": numpy.array([-0.2, 0.8])
        },
        label="State Variable ranges [lo, hi]",
        doc="""The values for each state-variable should be set to encompass
        the expected dynamic range of that state-variable for the current 
        parameters, it is used as a mechanism for bounding random inital 
        conditions when the simulation isn't started from an explicit history,
        it is also provides the default range of phase-plane plots.""")

    variables_of_interest = NArray(
        dtype=numpy.int,
        label="Variables watched by Monitors",
        domain=Range(lo=0, hi=2, step=1),
        default=numpy.array([0], dtype=numpy.int32),
        doc="""This represents the default state-variables of this Model to be
        monitored. It can be overridden for each Monitor if desired. The 
        corresponding state-variable indices for this model are :math:`V = 0`,
        and :math:`N = 1`.""")

    #    coupling_variables = arrays.IntegerArray(
    #        label = "Variables to couple activity through",
    #        default = numpy.array([0], dtype=numpy.int32))

    #    nsig = arrays.FloatArray(
    #        label = "Noise dispersion",
    #        default = numpy.array([0.0]),
    #        range = basic.Range(lo = 0.0, hi = 1.0))

    def __init__(self, **kwargs):
        """
        Initialize the MorrisLecar model's traited attributes, any provided
        as keywords will overide their traited default.
        
        """
        LOG.info('%s: initing...' % str(self))
        super(MorrisLecar, self).__init__(**kwargs)

        self._state_variables = ["V", "N"]
        self._nvar = 2

        self.cvar = numpy.array([0], dtype=numpy.int32)

        LOG.debug('%s: inited.' % repr(self))

    def dfun(self, state_variables, coupling, local_coupling=0.0):
        """
        The dynamics of the membrane potential :math:`V` rely on the fraction
        of Ca++ channels :math:`M` and K+ channels :math:`N` open at a given
        time. In order to have a planar model, we make the simplifying
        assumption (following [ML_1981]_, Equation 9) that Ca++ system is much
        faster than K+ system so that :math:`M = M_{\\infty}` at all times:
        
        .. math::
             C \\, \\dot{V} &= I - g_{L}(V - V_L) - g_{Ca} \\, M_{\\infty}(V)
                              (V - V_{Ca}) - g_{K} \\, N \\, (V - V_{K}) \\\\
             \\dot{N} &= \\lambda_{N}(V) \\, (N_{\\infty}(V) - N) \\\\
             M_{\\infty}(V) &= 1/2 \\, (1 + \\tanh((V - V_{1})/V_{2}))\\\\
             N_{\\infty}(V) &= 1/2 \\, (1 + \\tanh((V - V_{3})/V_{4}))\\\\
             \\lambda_{N}(V) &= \\overline{\\lambda_{N}}
                                \\cosh((V - V_{3})/2V_{4})
        
        where external currents :math:`I` provide the entry point for local and
        long-range connectivity. Default parameters are set as per Figure 9 of
        [ML_1981]_ so that the model shows oscillatory behaviour as :math:`I` is
        varied.
        """

        V = state_variables[0, :]
        N = state_variables[1, :]

        c_0 = coupling[0, :]

        M_inf = 0.5 * (1 + numpy.tanh((V - self.V1) / self.V2))
        N_inf = 0.5 * (1 + numpy.tanh((V - self.V3) / self.V4))
        lambda_N = self.lambda_Nbar * numpy.cosh(
            (V - self.V3) / (2.0 * self.V4))

        dV = (1.0 / self.C) * (c_0 + (local_coupling * V) - self.gL *
                               (V - self.VL) - self.gCa * M_inf *
                               (V - self.VCa) - self.gK * N * (V - self.VK))

        dN = lambda_N * (N_inf - N)

        derivative = numpy.array([dV, dN])

        return derivative
Exemplo n.º 16
0
##  Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
##  Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
##      The Virtual Brain: a simulator of primate brain network dynamics.
##   Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
##
##

from tvb.simulator.models.base import Model, ModelNumbaDfun
import numpy
from numpy import *
from numba import guvectorize, float64
from tvb.basic.neotraits.api import NArray, Final, List, Range

class ${modelname}(ModelNumbaDfun):
    %for mconst in const:
        ${NArray(mconst)}
    %endfor

    state_variable_range = Final(
        label="State Variable ranges [lo, hi]",
        default={\
%for itemA in dynamics.state_variables:
"${itemA.name}": numpy.array([${itemA.dimension}])${'' if loop.last else ', \n\t\t\t\t '}\
%endfor
},
        doc="""state variables"""
    )

% if svboundaries:
    state_variable_boundaries = Final(
        label="State Variable boundaries [lo, hi]",
Exemplo n.º 17
0
class ReducedSetHindmarshRose(ReducedSetBase):
    r"""
    .. [SJ_2008] Stefanescu and Jirsa, PLoS Computational Biology, *A Low
        Dimensional Description of Globally Coupled Heterogeneous Neural
        Networks of Excitatory and Inhibitory*  4, 11, 26--36, 2008.

    The models (:math:`\xi`, :math:`\eta`) phase-plane, including a
    representation of the vector field as well as its nullclines, using default
    parameters, can be seen below:

        .. _phase-plane-rHR_0:
        .. figure :: img/ReducedSetHindmarshRose_01_mode_0_pplane.svg
            :alt: Reduced set of FitzHughNagumo phase plane (xi, eta), 1st mode.

            The (:math:`\xi`, :math:`\eta`) phase-plane for the first mode of
            a reduced set of Hindmarsh-Rose oscillators.

        .. _phase-plane-rHR_1:
        .. figure :: img/ReducedSetHindmarshRose_01_mode_1_pplane.svg
            :alt: Reduced set of FitzHughNagumo phase plane (xi, eta), 2nd mode.

            The (:math:`\xi`, :math:`\eta`) phase-plane for the second mode of
            a reduced set of Hindmarsh-Rose oscillators.

        .. _phase-plane-rHR_2:
        .. figure :: img/ReducedSetHindmarshRose_01_mode_2_pplane.svg
            :alt: Reduced set of FitzHughNagumo phase plane (xi, eta), 3rd mode.

            The (:math:`\xi`, :math:`\eta`) phase-plane for the third mode of
            a reduced set of Hindmarsh-Rose oscillators.


    The dynamic equations were orginally taken from [SJ_2008]_.

    The equations of the population model for i-th mode at node q are:

    .. math::
                \dot{\xi}_i     &=  \eta_i-a_i\xi_i^3 + b_i\xi_i^2- \tau_i
                                 + K_{11} \left[\sum_{k=1}^{o} A_{ik} \xi_k - \xi_i \right]
                                 - K_{12} \left[\sum_{k=1}^{o} B_{ik} \alpha_k - \xi_i\right] + IE_i \\
                                &\, + \left[\sum_{k=1}^{o} \mathbf{\Gamma}(\xi_{kq}, \xi_{kr}, u_{qr})\right]
                                 + \left[\sum_{k=1}^{o} W_{\zeta}\cdot\xi_{kr} \right] \\
                & \\
                \dot{\eta}_i    &=  c_i-d_i\xi_i^2 -\tau_i \\
                & \\
                \dot{\tau}_i    &=  rs\xi_i - r\tau_i -m_i \\
                & \\
                \dot{\alpha}_i  &=  \beta_i - e_i \alpha_i^3 + f_i \alpha_i^2 - \gamma_i
                                 + K_{21} \left[\sum_{k=1}^{o} C_{ik} \xi_k - \alpha_i \right] + II_i \\
                                &\, +\left[\sum_{k=1}^{o}\mathbf{\Gamma}(\xi_{kq}, \xi_{kr}, u_{qr})\right]
                                 + \left[\sum_{k=1}^{o}W_{\zeta}\cdot\xi_{kr}\right] \\
                & \\
                \dot{\beta}_i   &= h_i - p_i \alpha_i^2 - \beta_i \\
                \dot{\gamma}_i  &= rs \alpha_i - r \gamma_i - n_i

    .. automethod:: ReducedSetHindmarshRose.update_derived_parameters

    #NOTE: In the Article this modelis called StefanescuJirsa3D

    """

    # Define traited attributes for this model, these represent possible kwargs.
    r = NArray(
        label=":math:`r`",
        default=numpy.array([0.006]),
        domain=Range(lo=0.0, hi=0.1, step=0.0005),
        doc="""Adaptation parameter""")

    a = NArray(
        label=":math:`a`",
        default=numpy.array([1.0]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Dimensionless parameter as in the Hindmarsh-Rose model""")

    b = NArray(
        label=":math:`b`",
        default=numpy.array([3.0]),
        domain=Range(lo=0.0, hi=3.0, step=0.01),
        doc="""Dimensionless parameter as in the Hindmarsh-Rose model""")

    c = NArray(
        label=":math:`c`",
        default=numpy.array([1.0]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Dimensionless parameter as in the Hindmarsh-Rose model""")

    d = NArray(
        label=":math:`d`",
        default=numpy.array([5.0]),
        domain=Range(lo=2.5, hi=7.5, step=0.01),
        doc="""Dimensionless parameter as in the Hindmarsh-Rose model""")

    s = NArray(
        label=":math:`s`",
        default=numpy.array([4.0]),
        domain=Range(lo=2.0, hi=6.0, step=0.01),
        doc="""Adaptation paramters, governs feedback""")

    xo = NArray(
        label=":math:`x_{o}`",
        default=numpy.array([-1.6]),
        domain=Range(lo=-2.4, hi=-0.8, step=0.01),
        doc="""Leftmost equilibrium point of x""")

    K11 = NArray(
        label=":math:`K_{11}`",
        default=numpy.array([0.5]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Internal coupling, excitatory to excitatory""")

    K12 = NArray(
        label=":math:`K_{12}`",
        default=numpy.array([0.1]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Internal coupling, inhibitory to excitatory""")

    K21 = NArray(
        label=":math:`K_{21}`",
        default=numpy.array([0.15]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Internal coupling, excitatory to inhibitory""")

    sigma = NArray(
        label=r":math:`\sigma`",
        default=numpy.array([0.3]),
        domain=Range(lo=0.0, hi=1.0, step=0.01),
        doc="""Standard deviation of Gaussian distribution""")

    mu = NArray(
        label=r":math:`\mu`",
        default=numpy.array([3.3]),
        domain=Range(lo=1.1, hi=3.3, step=0.01),
        doc="""Mean of Gaussian distribution""")

    # Used for phase-plane axis ranges and to bound random initial() conditions.
    state_variable_range = Final(
        label="State Variable ranges [lo, hi]",
        default={"xi": numpy.array([-4.0, 4.0]),
                 "eta": numpy.array([-25.0, 20.0]),
                 "tau": numpy.array([2.0, 10.0]),
                 "alpha": numpy.array([-4.0, 4.0]),
                 "beta": numpy.array([-20.0, 20.0]),
                 "gamma": numpy.array([2.0, 10.0])},
        doc="""The values for each state-variable should be set to encompass
        the expected dynamic range of that state-variable for the current
        parameters, it is used as a mechanism for bounding random inital
        conditions when the simulation isn't started from an explicit history,
        it is also provides the default range of phase-plane plots.""")

    variables_of_interest = List(
        of=str,
        label="Variables watched by Monitors",
        choices=("xi", "eta", "tau", "alpha", "beta", "gamma"),
        default=("xi", "eta", "tau"),
        doc=r"""This represents the default state-variables of this Model to be
                monitored. It can be overridden for each Monitor if desired. The
                corresponding state-variable indices for this model are :math:`\xi = 0`,
                :math:`\eta = 1`, :math:`\tau = 2`, :math:`\alpha = 3`,
                :math:`\beta = 4`, and :math:`\gamma = 5`""")

    state_variables = 'xi eta tau alpha beta gamma'.split()
    _nvar = 6
    cvar = numpy.array([0, 3], dtype=numpy.int32)
    # derived parameters
    A_ik = None
    B_ik = None
    C_ik = None
    a_i = None
    b_i = None
    c_i = None
    d_i = None
    e_i = None
    f_i = None
    h_i = None
    p_i = None
    IE_i = None
    II_i = None
    m_i = None
    n_i = None

    def dfun(self, state_variables, coupling, local_coupling=0.0):
        r"""
        The equations of the population model for i-th mode at node q are:

        .. math::
                \dot{\xi}_i     &=  \eta_i-a_i\xi_i^3 + b_i\xi_i^2- \tau_i
                                 + K_{11} \left[\sum_{k=1}^{o} A_{ik} \xi_k - \xi_i \right]
                                 - K_{12} \left[\sum_{k=1}^{o} B_{ik} \alpha_k - \xi_i\right] + IE_i \\
                                &\, + \left[\sum_{k=1}^{o} \mathbf{\Gamma}(\xi_{kq}, \xi_{kr}, u_{qr})\right]
                                 + \left[\sum_{k=1}^{o} W_{\zeta}\cdot\xi_{kr} \right] \\
                & \\
                \dot{\eta}_i    &=  c_i-d_i\xi_i^2 -\tau_i \\
                & \\
                \dot{\tau}_i    &=  rs\xi_i - r\tau_i -m_i \\
                & \\
                \dot{\alpha}_i  &=  \beta_i - e_i \alpha_i^3 + f_i \alpha_i^2 - \gamma_i
                                 + K_{21} \left[\sum_{k=1}^{o} C_{ik} \xi_k - \alpha_i \right] + II_i \\
                                &\, +\left[\sum_{k=1}^{o}\mathbf{\Gamma}(\xi_{kq}, \xi_{kr}, u_{qr})\right]
                                 + \left[\sum_{k=1}^{o}W_{\zeta}\cdot\xi_{kr}\right] \\
                & \\
                \dot{\beta}_i   &= h_i - p_i \alpha_i^2 - \beta_i \\
                \dot{\gamma}_i  &= rs \alpha_i - r \gamma_i - n_i

        """

        xi = state_variables[0, :]
        eta = state_variables[1, :]
        tau = state_variables[2, :]
        alpha = state_variables[3, :]
        beta = state_variables[4, :]
        gamma = state_variables[5, :]
        derivative = numpy.empty_like(state_variables)

        c_0 = coupling[0, :].sum(axis=1)[:, numpy.newaxis]
        # c_1 = coupling[1, :]

        derivative[0] = (eta - self.a_i * xi ** 3 + self.b_i * xi ** 2 - tau +
               self.K11 * (numpy.dot(xi, self.A_ik) - xi) -
               self.K12 * (numpy.dot(alpha, self.B_ik) - xi) +
               self.IE_i + c_0 + local_coupling * xi)

        derivative[1] = self.c_i - self.d_i * xi ** 2 - eta

        derivative[2] = self.r * self.s * xi - self.r * tau - self.m_i

        derivative[3] = (beta - self.e_i * alpha ** 3 + self.f_i * alpha ** 2 - gamma +
                  self.K21 * (numpy.dot(xi, self.C_ik) - alpha) +
                  self.II_i + c_0 + local_coupling * xi)

        derivative[4] = self.h_i - self.p_i * alpha ** 2 - beta

        derivative[5] = self.r * self.s * alpha - self.r * gamma - self.n_i

        return derivative

    def update_derived_parameters(self, corrected_d_p=True):
        """
        Calculate coefficients for the neural field model based on a Reduced set
        of Hindmarsh-Rose oscillators. Specifically, this method implements
        equations for calculating coefficients found in the supplemental
        material of [SJ_2008]_.

        Include equations here...

        """

        newaxis = numpy.newaxis
        trapz = scipy_integrate_trapz

        stepu = 1.0 / (self.nu + 2 - 1)
        stepv = 1.0 / (self.nv + 2 - 1)

        norm = scipy_stats_norm(loc=self.mu, scale=self.sigma)

        Iu = norm.ppf(numpy.arange(stepu, 1.0, stepu))
        Iv = norm.ppf(numpy.arange(stepv, 1.0, stepv))

        # Define the modes
        V = numpy.zeros((self.number_of_modes, self.nv))
        U = numpy.zeros((self.number_of_modes, self.nu))

        nv_per_mode = self.nv // self.number_of_modes
        nu_per_mode = self.nu // self.number_of_modes

        for i in range(self.number_of_modes):
            V[i, i * nv_per_mode:(i + 1) * nv_per_mode] = numpy.ones(nv_per_mode)
            U[i, i * nu_per_mode:(i + 1) * nu_per_mode] = numpy.ones(nu_per_mode)

        # Normalise the modes
        V = V / numpy.tile(numpy.sqrt(trapz(V * V, Iv, axis=1)), (self.nv, 1)).T
        U = U / numpy.tile(numpy.sqrt(trapz(U * U, Iu, axis=1)), (self.nu, 1)).T

        # Get Normal PDF's evaluated with sampling Zv and Zu
        g1 = norm.pdf(Iv)
        g2 = norm.pdf(Iu)
        G1 = numpy.tile(g1, (self.number_of_modes, 1))
        G2 = numpy.tile(g2, (self.number_of_modes, 1))

        cV = numpy.conj(V)
        cU = numpy.conj(U)

        #import pdb; pdb.set_trace()
        intcVdI = trapz(cV, Iv, axis=1)[:, newaxis]
        intG1VdI = trapz(G1 * V, Iv, axis=1)[newaxis, :]
        intcUdI = trapz(cU, Iu, axis=1)[:, newaxis]

        #Calculate coefficients
        self.A_ik = numpy.dot(intcVdI, intG1VdI).T
        self.B_ik = numpy.dot(intcVdI, trapz(G2 * U, Iu, axis=1)[newaxis, :])
        self.C_ik = numpy.dot(intcUdI, intG1VdI).T

        self.a_i = self.a * trapz(cV * V ** 3, Iv, axis=1)[newaxis, :]
        self.e_i = self.a * trapz(cU * U ** 3, Iu, axis=1)[newaxis, :]
        self.b_i = self.b * trapz(cV * V ** 2, Iv, axis=1)[newaxis, :]
        self.f_i = self.b * trapz(cU * U ** 2, Iu, axis=1)[newaxis, :]
        self.c_i = (self.c * intcVdI).T
        self.h_i = (self.c * intcUdI).T

        self.IE_i = trapz(Iv * cV, Iv, axis=1)[newaxis, :]
        self.II_i = trapz(Iu * cU, Iu, axis=1)[newaxis, :]

        if corrected_d_p:
            # correction identified by Shrey Dutta & Arpan Bannerjee, confirmed by RS
            self.d_i = self.d * trapz(cV * V ** 2, Iv, axis=1)[newaxis, :]
            self.p_i = self.d * trapz(cU * U ** 2, Iu, axis=1)[newaxis, :]
        else:
            # typo in the original paper by RS & VJ, kept for comparison purposes.
            self.d_i = (self.d * intcVdI).T
            self.p_i = (self.d * intcUdI).T

        self.m_i = (self.r * self.s * self.xo * intcVdI).T
        self.n_i = (self.r * self.s * self.xo * intcUdI).T
Exemplo n.º 18
0
class LinearReducedWongWangExcIO(ReducedWongWangExcIO):

    d = NArray(label=":math:`d`",
               default=numpy.array([
                   0.2,
               ]),
               domain=Range(lo=0.0, hi=0.200, step=0.001),
               doc="""[s]. Parameter chosen to fit numerical solutions.""")

    non_integrated_variables = ["R", "Rin", "I"]

    def update_state_variables_before_integration(self,
                                                  state_variables,
                                                  coupling,
                                                  local_coupling=0.0,
                                                  stimulus=0.0):
        if self.use_numba:
            state_variables = \
                _numba_update_non_state_variables(state_variables.reshape(state_variables.shape[:-1]).T,
                                                  coupling.reshape(coupling.shape[:-1]).T +
                                                  local_coupling * state_variables[0],
                                                  self.a, self.b, self.d,
                                                  self.w, self.J_N, self.Rin,
                                                  self.G, self.I_o)
            return state_variables.T[..., numpy.newaxis]

        # In this case, rates (H_e, H_i) are non-state variables,
        # i.e., they form part of state_variables but have no dynamics assigned on them
        # Most of the computations of this dfun aim at computing rates, including coupling considerations.
        # Therefore, we compute and update them only once a new state is computed,
        # and we consider them constant for any subsequent possible call to this function,
        # by any integration scheme

        S = state_variables[0, :]  # synaptic gating dynamics
        Rint = state_variables[1, :]  # Rates from Spiking Network, integrated
        Rin = state_variables[3, :]  # Input rates from Spiking Network

        c_0 = coupling[0, :]

        # if applicable
        lc_0 = local_coupling * S[0]

        coupling = self.G * self.J_N * (c_0 + lc_0)

        # Currents
        I = self.w * self.J_N * S + self.I_o + coupling
        x = self.a * I - self.b

        # Rates
        # Only rates with _Rin <= 0 0 will be updated by TVB.
        # The rest, are updated from the Spiking Network
        R = numpy.where(self._Rin, Rint, x / (1 - numpy.exp(-self.d * x)))

        Rin = numpy.where(
            self._Rin, Rin,
            0.0)  # Reset to 0 the Rin for nodes not updated by Spiking Network

        # We now update the state_variable vector with the new rates and currents:
        state_variables[2, :] = R
        state_variables[3, :] = Rin
        state_variables[4, :] = I

        # Keep them here so that they are not recomputed in the dfun
        self._R = numpy.copy(R)
        self._Rin = numpy.copy(Rin)

        return state_variables

    def _numpy_dfun(self, integration_variables, R, Rin):
        r"""
        Equations taken from [DPA_2013]_ , page 11242

        .. math::
                  x_k       &=   w\,J_N \, S_k + I_o + J_N \mathbf\Gamma(S_k, S_j, u_{kj}),\\
                 H(x_k)    &=  \dfrac{ax_k - b}{1 - \exp(-d(ax_k -b))},\\
                 \dot{S}_k &= -\dfrac{S_k}{\tau_s} + (1 - S_k) \, H(x_k) \, \gamma

        """

        S = integration_variables[0, :]  # Synaptic gating dynamics
        Rint = integration_variables[
            1, :]  # Rates from Spiking Network, integrated

        # Synaptic gating dynamics
        dS = -(S / self.tau_s) + R * self.gamma

        # Rates
        # Low pass filtering, linear dynamics for rates updated from the spiking network
        # No dynamics in the case of TVB rates
        dRint = numpy.where(self._Rin_mask, (-Rint + Rin) / self.tau_rin, 0.0)

        return numpy.array([dS, dRint])

    def dfun(self, x, c, local_coupling=0.0):
        if self._R is None or self._Rin is None:
            state_variables = self._integration_to_state_variables(x)
            state_variables = \
                self.update_state_variables_before_integration(state_variables, c, local_coupling,
                                                               self._stimulus)
            R = state_variables[2]  # Rates
            Rin = state_variables[3]  # input instant spiking rates
        else:
            R = self._R
            Rin = self._Rin
        if self.use_numba:
            deriv = _numba_dfun(
                x.reshape(x.shape[:-1]).T, R, Rin, self.gamma, self.tau_s,
                self.Rin, self.tau_rin).T[..., numpy.newaxis]
        else:
            deriv = self._numpy_dfun(x, R, Rin)
        #  Set them to None so that they are recomputed on subsequent steps
        #  for multistep integration schemes such as Runge-Kutta:
        self._R = None
        self._Rin = None
        return deriv
Exemplo n.º 19
0
class EpileptorRestingState(ModelNumbaDfun):
    r"""
        EpileptorRestingState is an extension of the phenomenological neural mass model of partial seizures 
        Epileptor [Jirsaetal_2014], tuned to express regionally specific physiological oscillations in addition
        to the epileptiform discharges. This extension was made using the Generic 2-dimensional Oscillator model
        (parametrized close to a supercritical Hopf Bifurcation) [SanzLeonetal_2013] to reproduce the spontaneous
        local field potential-like signal.
        
        This model, its motivation and derivation can be found in the published article [Courtioletal_2020].
        
        .. Tutorial: Modeling_Resting-State_in_Epilepsy.ipynb
        
        .. References:
            [Jirsaetal_2014] Jirsa, V. K.; Stacey, W. C.; Quilichini, P. P.; Ivanov, A. I.; Bernard, 
            C. *On the nature of seizure dynamics.* Brain, 2014.
            [SanzLeonetal_2013] Sanz Leon, P.; Knock, S. A.; Woodman, M. M.; Domide, L.; Mersmann, 
            J.; McIntosh, A. R.; Jirsa, V. K. *The Virtual Brain: a simulator of primate brain 
            network dynamics.* Front.Neuroinf., 2013.
            [Courtioletal_2020] Courtiol, J.; Guye, M.; Bartolomei, F.; Petkoski, S.; Jirsa, V. K.
            *Dynamical Mechanisms of Interictal Resting-State Functional Connectivity in Epilepsy.*
            J.Neurosci., 2020.
        
        Variables of interest to be used by monitors: p * (-x_{1} + x_{2}) + (1 - p) * x_{rs}
        
            .. math::
                \dot{x_{1}} &=& y_{1} - f_{1}(x_{1}, x_{2}) - z + I_{ext1} \\
                \dot{y_{1}} &=& c - d x_{1}^{2} - y_{1} \\
                \dot{z} &=&
                    \begin{cases}
                        r(4 (x_{1} - x_{0}) - z -0.1 z^{7}) & \text{if} x<0 \\
                        r(4 (x_{1} - x_{0}) - z) & \text{if} x \geq 0
                    \end{cases} \\
                \dot{x_{2}} &=& -y_{2} + x_{2} - x_{2}^{3} + I_{ext2} + b_{2} g(x_{1}) - 0.3 (z-3.5) \\
                \dot{y_{2}} &=& 1 / \tau (-y_{2} + f_{2}(x_{2}))\\
                \dot{g} &=& -0.01 (g - 0.1 x_{1})\\
                \dot{x_{rs}} &=& d_{rs} \tau_{rs} (-f_{rs} x_{rs}^3 + e_{rs} x_{rs}^2 + \alpha_{rs} y_{rs} +
                \gamma_{rs} I_{rs}) \\
                \dot{y_{rs}} &=& d_{rs} (b_{rs}  x_{rs} - \beta_{rs} y_{rs} + a_{rs}) / \tau_{rs}
        
        where:
            .. math::
                f_{1}(x_{1}, x_{2}) =
                    \begin{cases}
                        a x_{1}^{3} - b x_{1}^2 & \text{if } x_{1} <0\\
                        -(slope - x_{2} + 0.6(z-4)^2) x_{1} &\text{if }x_{1} \geq 0
                    \end{cases}
            
        and:
            .. math::
                f_{2}(x_{2}) =
                    \begin{cases}
                        0 & \text{if } x_{2} <-0.25\\
                        a_{2}(x_{2} + 0.25) & \text{if } x_{2} \geq -0.25
                    \end{cases}


    """

    a = NArray(
        label=":math:`a`",
        default=numpy.array([1.0]),
        doc="Coefficient of the cubic term in the first state-variable x1.")

    b = NArray(
        label=":math:`b`",
        default=numpy.array([3.0]),
        doc="Coefficient of the squared term in the first state-variable x1.")

    c = NArray(label=":math:`c`",
               default=numpy.array([1.0]),
               doc="Additive coefficient for the second state-variable y1, \
        called :math:'y_{0}' in Jirsa et al. (2014).")

    d = NArray(
        label=":math:`d`",
        default=numpy.array([5.0]),
        doc="Coefficient of the squared term in the second state-variable y1.")

    r = NArray(label=":math:`r`",
               domain=Range(lo=0.0, hi=0.001, step=0.00005),
               default=numpy.array([0.00035]),
               doc="Temporal scaling in the third state-variable z, \
        called :math:'1/\tau_{0}' in Jirsa et al. (2014).")

    s = NArray(label=":math:`s`",
               default=numpy.array([4.0]),
               doc="Linear coefficient in the third state-variable z.")

    x0 = NArray(label=":math:`x_0`",
                domain=Range(lo=-3.0, hi=-1.0, step=0.1),
                default=numpy.array([-1.6]),
                doc="Epileptogenicity parameter.")

    Iext = NArray(
        label=":math:`I_{ext}`",
        domain=Range(lo=1.5, hi=5.0, step=0.1),
        default=numpy.array([3.1]),
        doc="External input current to the first population (x1, y1).")

    slope = NArray(label=":math:`slope`",
                   domain=Range(lo=-16.0, hi=6.0, step=0.1),
                   default=numpy.array([0.]),
                   doc="Linear coefficient in the first state-variable x1.")

    Iext2 = NArray(
        label=":math:`I_{ext2}`",
        domain=Range(lo=0.0, hi=1.0, step=0.05),
        default=numpy.array([0.45]),
        doc="External input current to the second population (x2, y2).")

    tau = NArray(
        label=":math:`/tau`",
        default=numpy.array([10.0]),
        doc="Temporal scaling coefficient in the fifth state-variable y2.")

    aa = NArray(label=":math:`aa`",
                default=numpy.array([6.0]),
                doc="Linear coefficient in the fifth state-variable y2.")

    bb = NArray(
        label=":math:`bb`",
        default=numpy.array([2.0]),
        doc="Linear coefficient of lowpass excitatory coupling in the fourth \
        state-variable x2.")

    Kvf = NArray(label=":math:`K_{vf}`",
                 default=numpy.array([0.0]),
                 domain=Range(lo=0.0, hi=4.0, step=0.5),
                 doc="Coupling scaling on a very fast time scale.")

    Kf = NArray(label=":math:`K_f`",
                default=numpy.array([0.0]),
                domain=Range(lo=0.0, hi=4.0, step=0.5),
                doc="Coupling scaling on a fast time scale.")

    Ks = NArray(
        label=":math:`K_s`",
        default=numpy.array([0.0]),
        domain=Range(lo=-4.0, hi=4.0, step=0.1),
        doc="Permittivity coupling, that is from the very fast time scale \
        toward the slow time scale.")

    tt = NArray(label=":math:`tt`",
                default=numpy.array([1.0]),
                domain=Range(lo=0.001, hi=10.0, step=0.001),
                doc="Time scaling of the Epileptor.")

    # Generic-2D's parameters
    tau_rs = NArray(
        label=r":math:`\tau_{rs}`",
        default=numpy.array([1.0]),
        domain=Range(lo=1.0, hi=5.0, step=0.01),
        doc="Temporal scaling coefficient in the third population (x_rs, y_rs)."
    )

    I_rs = NArray(
        label=":math:`I_{rs}`",
        default=numpy.array([0.0]),
        domain=Range(lo=-5.0, hi=5.0, step=0.01),
        doc="External input current to the third population (x_rs, y_rs).")

    a_rs = NArray(label=":math:`a_{rs}`",
                  default=numpy.array([-2.0]),
                  domain=Range(lo=-5.0, hi=5.0, step=0.01),
                  doc="Vertical shift of the configurable nullcline \
        in the state-variable y_rs.")

    b_rs = NArray(label=":math:`b_{rs}`",
                  default=numpy.array([-10.0]),
                  domain=Range(lo=-20.0, hi=15.0, step=0.01),
                  doc="Linear coefficient of the state-variable y_rs.")

    d_rs = NArray(
        label=":math:`d_{rs}`",
        default=numpy.array([0.02]),
        domain=Range(lo=0.0001, hi=1.0, step=0.0001),
        doc="Temporal scaling of the whole third system (x_rs, y_rs).")

    e_rs = NArray(
        label=":math:`e_{rs}`",
        default=numpy.array([3.0]),
        domain=Range(lo=-5.0, hi=5.0, step=0.0001),
        doc="Coefficient of the squared term in the sixth state-variable x_rs."
    )

    f_rs = NArray(
        label=":math:`f_{rs}`",
        default=numpy.array([1.0]),
        domain=Range(lo=-5.0, hi=5.0, step=0.0001),
        doc="Coefficient of the cubic term in the sixth state-variable x_rs.")

    alpha_rs = NArray(
        label=r":math:`\alpha_{rs}`",
        default=numpy.array([1.0]),
        domain=Range(lo=-5.0, hi=5.0, step=0.0001),
        doc="Constant parameter to scale the rate of feedback from the \
        slow variable y_rs to the fast variable x_rs.")

    beta_rs = NArray(
        label=r":math:`\beta_{rs}`",
        default=numpy.array([1.0]),
        domain=Range(lo=-5.0, hi=5.0, step=0.0001),
        doc="Constant parameter to scale the rate of feedback from the \
        slow variable y_rs to itself.")

    gamma_rs = NArray(label=r":math:`\gamma_{rs}`",
                      default=numpy.array([1.0]),
                      domain=Range(lo=-1.0, hi=1.0, step=0.1),
                      doc="Constant parameter to reproduce FHN dynamics where \
        excitatory input currents are negative.\
        Note: It scales both I_rs and the long-range coupling term.")

    K_rs = NArray(label=r":math:`K_{rs}`",
                  default=numpy.array([1.0]),
                  domain=Range(lo=0.0, hi=10.0, step=0.001),
                  doc="Coupling scaling on a fast time scale.")

    # Combination 2 models
    p = NArray(label=r":math:`p`",
               default=numpy.array([0.]),
               domain=Range(lo=-1.0, hi=1.0, step=0.1),
               doc="Linear coefficient.")

    # Initialization.
    # Epileptor model is set in a fixed point by default.
    state_variable_range = Final(
        label="State variable ranges [lo, hi]",
        default={
            "x1": numpy.array([-1.8, -1.4]),
            "y1": numpy.array([-15, -10]),
            "z": numpy.array([3.6, 4.0]),
            "x2": numpy.array([-1.1, -0.9]),
            "y2": numpy.array([0.001, 0.01]),
            "g": numpy.array([-1., 1.]),
            "x_rs": numpy.array([-2.0, 4.0]),
            "y_rs": numpy.array([-6.0, 6.0])
        },
        doc="Typical bounds on state-variables in EpileptorRestingState model."
    )

    variables_of_interest = List(
        of=str,
        label="Variables watched by Monitors",
        choices=("x1", "y1", "z", "x2", "y2", "g", "x_rs", "y_rs", "x2 - x1"),
        default=("x2 - x1", "z", "x_rs"),
        doc="Quantities of EpileptorRestingState available to monitor.")

    state_variables = ("x1", "y1", "z", "x2", "y2", "g", "x_rs", "y_rs")

    _nvar = 8  # number of state-variables
    cvar = numpy.array([0, 3, 6], dtype=numpy.int32)  # coupling variables

    def _numpy_dfun(self,
                    state_variables,
                    coupling,
                    local_coupling=0.0,
                    array=numpy.array,
                    where=numpy.where,
                    concat=numpy.concatenate):

        y = state_variables
        ydot = numpy.empty_like(state_variables)

        # long-range coupling
        c_pop1 = coupling[0]
        c_pop2 = coupling[1]
        c_pop3 = coupling[2]

        # short-range (local) coupling
        Iext = self.Iext + local_coupling * y[0]
        lc_1 = local_coupling * y[6]

        # Epileptor's equations:
        # population 1
        if_ydot0 = -self.a * y[0]**2 + self.b * y[0]
        else_ydot0 = self.slope - y[3] + 0.6 * (y[2] - 4.0)**2
        ydot[0] = self.tt * (y[1] - y[2] + Iext + self.Kvf * c_pop1 +
                             where(y[0] < 0., if_ydot0, else_ydot0) * y[0])
        ydot[1] = self.tt * (self.c - self.d * y[0]**2 - y[1])

        # energy
        if_ydot2 = -0.1 * y[2]**7
        else_ydot2 = 0
        ydot[2] = self.tt * (self.r * (4 * (y[0] - self.x0) - y[2] + where(
            y[2] < 0., if_ydot2, else_ydot2) + self.Ks * c_pop1))

        # population 2
        ydot[3] = self.tt * (-y[4] + y[3] - y[3]**3 + self.Iext2 +
                             self.bb * y[5] - 0.3 *
                             (y[2] - 3.5) + self.Kf * c_pop2)
        if_ydot4 = 0
        else_ydot4 = self.aa * (y[3] + 0.25)
        ydot[4] = self.tt * (
            (-y[4] + where(y[3] < -0.25, if_ydot4, else_ydot4)) / self.tau)

        # filter
        ydot[5] = self.tt * (-0.01 * (y[5] - 0.1 * y[0]))  # 0.01 = \gamma

        # G2D's equations:
        ydot[6] = self.d_rs * self.tau_rs * (
            self.alpha_rs * y[7] - self.f_rs * y[6]**3 + self.e_rs * y[6]**2 +
            self.gamma_rs * self.I_rs + self.gamma_rs * self.K_rs * c_pop3 +
            lc_1)
        ydot[7] = self.d_rs * (self.a_rs + self.b_rs * y[6] -
                               self.beta_rs * y[7]) / self.tau_rs

        # output: LFP
        self.output = self.p * (-y[0] + y[3]) + (1 - self.p) * y[6]

        return ydot

    def dfun(self, x, c, local_coupling=0.0):
        r"""
            Computes the derivatives of the state-variables of EpileptorRestingState
            with respect to time.
        """

        x_ = x.reshape(x.shape[:-1]).T
        c_ = c.reshape(c.shape[:-1]).T
        Iext = self.Iext + local_coupling * x[0, :, 0]
        lc_1 = local_coupling * x[6, :, 0]
        deriv = _numba_dfun(x_, c_, self.x0, Iext, self.Iext2, self.a, self.b,
                            self.slope, self.tt, self.Kvf, self.c, self.d,
                            self.r, self.Ks, self.Kf, self.aa, self.bb,
                            self.tau, self.tau_rs, self.I_rs, self.a_rs,
                            self.b_rs, self.d_rs, self.e_rs, self.f_rs,
                            self.beta_rs, self.alpha_rs, self.gamma_rs,
                            self.K_rs, lc_1)
        return deriv.T[..., numpy.newaxis]
Exemplo n.º 20
0
class Connectivity(HasTraits):
    region_labels = NArray(
        dtype='U128',
        label="Region labels",
        doc=
        """Short strings, 'labels', for the regions represented by the connectivity matrix."""
    )

    weights = NArray(
        label="Connection strengths",
        doc=
        """Matrix of values representing the strength of connections between regions, arbitrary units."""
    )

    undirected = Attr(
        field_type=bool,
        default=False,
        required=False,
        doc=
        "1, when the weights matrix is square and symmetric over the main diagonal, 0 when directed graph."
    )

    tract_lengths = NArray(
        label="Tract lengths",
        doc="""The length of myelinated fibre tracts between regions.
            If not provided Euclidean distance between region centres is used."""
    )

    speed = NArray(
        label="Conduction speed",
        default=numpy.array([3.0]),
        doc=
        """A single number or matrix of conduction speeds for the myelinated fibre tracts between regions."""
    )

    centres = NArray(
        label="Region centres",
        doc="An array specifying the location of the centre of each region.")

    cortical = NArray(
        dtype=bool,
        label="Cortical",
        required=False,
        doc=
        """A boolean vector specifying whether or not a region is part of the cortex."""
    )

    hemispheres = NArray(
        dtype=bool,
        label="Hemispheres (True for Right and False for Left Hemisphere",
        required=False,
        doc=
        """A boolean vector specifying whether or not a region is part of the right hemisphere"""
    )

    orientations = NArray(
        label="Average region orientation",
        required=False,
        doc=
        """Unit vectors of the average orientation of the regions represented in the connectivity matrix.
            NOTE: Unknown data should be zeros.""")

    areas = NArray(
        label="Area of regions",
        required=False,
        doc=
        """Estimated area represented by the regions in the connectivity matrix.
            NOTE: Unknown data should be zeros.""")

    idelays = NArray(
        dtype=int,
        label="Conduction delay indices",
        required=False,
        doc="An array of time delays between regions in integration steps.")

    delays = NArray(
        label="Conduction delay",
        required=False,
        doc=
        """Matrix of time delays between regions in physical units, setting conduction speed automatically
            combines with tract lengths to update this matrix, i.e. don't try and change it manually."""
    )

    number_of_regions = Int(
        field_type=int,
        label="Number of regions",
        doc="""The number of regions represented in this Connectivity """)

    number_of_connections = Int(
        field_type=int,
        label="Number of connections",
        doc=
        """The number of non-zero entries represented in this Connectivity """)

    # Original Connectivity, from which current connectivity was edited.
    parent_connectivity = Attr(field_type=str, required=False)

    # In case of edited Connectivity, this are the nodes left in interest area,
    # the rest were part of a lesion, so they were removed.
    saved_selection = List(of=int)

    @property
    def display_name(self):
        """
        Overwrite from superclass and add number of regions field (as title on DataStructure tree)
        """
        previous = "Connectivity"
        return previous + " " + str(self.number_of_regions)

    @property
    def saved_selection_labels(self):
        """
        Taking the entity field saved_selection, convert indexes in that array
        into labels.
        """
        if self.saved_selection:
            idxs = [int(i) for i in self.saved_selection]
            result = ''
            for i in idxs:
                result += self.region_labels[i] + ','
            return result[:-1]
        else:
            return ''

    def is_right_hemisphere(self, idx):
        """
        :param idx:  Region IDX
        :return: True when hemispheres information is present and it shows that the current node is in the right
        hemisphere. When hemispheres info is not present, return True for the second half of the indices and
        False otherwise.
        """
        if self.hemispheres is not None and self.hemispheres.size:
            return self.hemispheres[idx]
        return idx >= self.number_of_regions / 2

    @property
    def hemisphere_order_indices(self):
        """
        A sequence of indices of rows/colums.
        These permute rows/columns so that the first half would belong to the first hemisphere
        If there is no hemisphere information returns the identity permutation
        """
        if self.hemispheres is not None and self.hemispheres.size:
            li, ri = [], []
            for i, is_right in enumerate(self.hemispheres):
                if is_right:
                    ri.append(i)
                else:
                    li.append(i)
            return numpy.array(li + ri)
        else:
            return numpy.arange(self.number_of_regions)

    @property
    def ordered_weights(self):
        """
        This view of the weights matrix lists all left hemisphere nodes before the right ones.
        It is used by viewers of the connectivity.
        """
        permutation = self.hemisphere_order_indices
        # how this works:
        # w[permutation, :] selects all rows at the indices present in the permutation array thus permuting the rows
        # [:, permutation] does the same to columns. See numpy index arrays
        return self.weights[permutation, :][:, permutation]

    @property
    def ordered_tracts(self):
        """
        Similar to :meth:`ordered_weights`
        """
        permutation = self.hemisphere_order_indices
        return self.tract_lengths[permutation, :][:, permutation]

    @property
    def ordered_labels(self):
        """
        Similar to :meth:`ordered_weights`
        """
        permutation = self.hemisphere_order_indices
        return self.region_labels[permutation]

    @property
    def ordered_centres(self):
        """
        Similar to :method:`ordered_weights`
        """
        permutation = self.hemisphere_order_indices
        return self.centres[permutation]

    def get_grouped_space_labels(self):
        """
        :return: A list [('left', [lh_labels)], ('right': [rh_labels])]
        """
        if self.hemispheres is not None and self.hemispheres.size:
            l, r = [], []

            for i, (is_right, label) in enumerate(
                    zip(self.hemispheres, self.region_labels)):
                if is_right:
                    r.append((i, label))
                else:
                    l.append((i, label))
            return [('left', l), ('right', r)]
        else:
            return [('', list(enumerate(self.region_labels)))]

    def get_default_selection(self):
        # should this be sub-selection or all always?
        sel = self.saved_selection
        if sel is not None and len(sel) > 0:
            return sel
        else:
            return list(range(len(self.region_labels)))

    def get_measure_points_selection_gid(self):
        """
        :return: the associated connectivity gid
        """
        return self.gid

    @property
    def binarized_weights(self):
        """
        :return: a matrix of he same size as weights, with 1 where weight > 0, and 0 in rest
        """
        result = numpy.zeros_like(self.weights)
        result = numpy.where(self.weights > 0, 1, result)
        return result

    def configure(self):
        """
        Invoke the compute methods for computable attributes that haven't been
        set during initialization.
        """

        self.number_of_regions = int(self.weights.shape[0])
        # NOTE: In numpy 1.8 there is a function called count_zeros
        self.number_of_connections = int(self.weights.nonzero()[0].shape[0])

        if self.tract_lengths is None or self.tract_lengths.size == 0:
            self.compute_tract_lengths()
        if self.region_labels is None or self.region_labels.size == 0:
            self.compute_region_labels()
        if self.hemispheres is None or self.hemispheres.size == 0:
            self.try_compute_hemispheres()

        # This can not go into compute, as it is too complex reference
        # if self.delays.size == 0:
        # TODO: Because delays are stored and loaded the size was never 0.0 and
        #      so this wasn't being run, making the conduction_speed hack on the
        #      simulator non-functional. Inn the longer run it'll probably be
        #      necessary for delays to never be stored but always calculated
        #      from tract-lengths and speed...
        if self.speed is None:  # TODO: this is a hack fix...
            self.log.warning(
                "Connectivity.speed attribute not initialized properly, setting it to 3.0..."
            )
            self.speed = numpy.array(
                [3.0])  # F£$%^&*!!!#self.trait["speed"].value

        # NOTE: Because of the conduction_speed hack for UI this must be evaluated here, even if delays
        # already has a value, otherwise setting speed in the UI has no effect...
        self.delays = self.tract_lengths / self.speed

        if (self.weights.transpose() == self.weights).all():
            self.undirected = True

        self.validate()

    def summary_info(self):
        summary = {
            "Number of regions": self.number_of_regions,
            "Number of connections": self.number_of_connections,
            "Undirected": self.undirected,
        }
        summary.update(narray_summary_info(self.areas, ar_name='areas'))
        summary.update(narray_summary_info(self.weights, ar_name='weights'))
        summary.update(
            narray_summary_info(self.weights[self.weights.nonzero()],
                                ar_name='weights-non-zero',
                                omit_shape=True))
        summary.update(
            narray_summary_info(self.tract_lengths,
                                ar_name='tract_lengths',
                                omit_shape=True))
        summary.update(
            narray_summary_info(
                self.tract_lengths[self.tract_lengths.nonzero()],
                ar_name='tract_lengths-non-zero',
                omit_shape=True))
        summary.update(
            narray_summary_info(self.tract_lengths[self.weights.nonzero()],
                                ar_name='tract_lengths (connections)',
                                omit_shape=True))
        return summary

    def set_idelays(self, dt):
        """
        Convert the time delays between regions in physical units into an array
        of linear indices into the simulator's history attribute.

        args:
            ``dt (float64)``: Length of integration time step...

        Updates attribute:
            ``idelays (numpy.array)``: Transmission delay between brain regions
            in integration steps.
        """
        # Express delays in integration steps
        self.idelays = numpy.rint(self.delays / dt).astype(numpy.int32)

    def compute_tract_lengths(self):
        """
        If no tract lengths data are available, this can be used to calculate
        the Euclidean distance between region centres to use as a proxy.

        """
        nor = self.number_of_regions
        tract_lengths = numpy.zeros((nor, nor))
        # TODO: redundant by half, do half triangle then flip...
        for region in range(nor):
            temp = self.centres - self.centres[region, :][numpy.newaxis, :]
            tract_lengths[region, :] = numpy.sqrt(numpy.sum(temp**2, axis=1))

        self.tract_lengths = tract_lengths

    def compute_region_labels(self):
        """
        Compute some labers, if missing
        """
        labels = ["region_%03d" % n for n in range(self.number_of_regions)]
        self.region_labels = numpy.array(labels, dtype="128a")

    def try_compute_hemispheres(self):
        """
        If all region labels are prefixed with L or R, then compute hemisphere side with that.
        """
        if self.region_labels is not None and self.region_labels.size > 0:
            hemispheres = []
            # Check if all labels are prefixed with R / L
            for label in self.region_labels:
                if label is not None and label.lower().startswith('r'):
                    hemispheres.append(True)
                elif label is not None and label.lower().startswith('l'):
                    hemispheres.append(False)
                else:
                    hemispheres = None
                    break
            # Check if all labels are sufixed with R / L
            if hemispheres is None:
                hemispheres = []
                for label in self.region_labels:
                    if label is not None and label.lower().endswith('r'):
                        hemispheres.append(True)
                    elif label is not None and label.lower().endswith('l'):
                        hemispheres.append(False)
                    else:
                        hemispheres = None
                        break
            if hemispheres is not None:
                self.hemispheres = numpy.array(hemispheres, dtype=numpy.bool)

    def transform_remove_self_connections(self):
        """
        Remove the values from the main diagonal (self-connections)
        """
        nor = self.number_of_regions
        result = copy(self.weights)
        result = result - result * numpy.eye(nor, nor)
        return result

    def scaled_weights(self, mode='tract'):
        """
        Scale the connection strengths (weights) and return the scaled matrix.
        Three simple types of scaling are supported.
        The ``scaling_mode``  is one of the following:

            'tract': Scale by a value such that the maximum absolute value of a single
                connection is 1.0. (Global scaling)

            'region': Scale by a value such that the maximum absolute value of the
                cumulative input to any region is 1.0. (Global-wise scaling)

            None: does nothing.

        NOTE: Currently multiple 'tract' and/or 'region' scalings without
            intermediate 'none' scaling mode destroy the ability to recover
            the original un-scaled weights matrix.

        """
        # NOTE: It is not yet clear how or if we will integrate this functinality
        #      into the UI. Currently the same effect can be achieved manually
        #      by using the coupling functions, it is just that, in certain
        #      situations, things are simplified by starting from a normalised
        #      weights matrix. However, in other situations it is not desirable
        #      to have a simple normalisation of this sort.
        # NOTE: We should probably separate the two cases implemented here into
        #      'scaling' and 'normalisation'. Normalisation implies that the norm
        #      of the samples is equal to 1, while here it is only scaling by a factor.

        self.log.info("Starting to normalize to mode: %s" % str(mode))

        normalisation_factor = None
        if mode in ("tract", "edge"):
            # global scaling
            normalisation_factor = numpy.abs(self.weights).max()
        elif mode in ("region", "node"):
            # node-wise scaling
            normalisation_factor = numpy.max(
                numpy.abs(self.weights.sum(axis=1)))
        elif mode in (None, "none"):
            normalisation_factor = 1.0
        else:
            self.log.error("Bad weights normalisation mode, must be one of:")
            self.log.error("('tract', 'edge', 'region', 'node', 'none')")
            raise Exception("Bad weights normalisation mode")

        self.log.debug("Normalization factor is: %s" %
                       str(normalisation_factor))
        mask = self.weights != 0.0
        result = copy(self.weights)
        result[mask] = self.weights[mask] / normalisation_factor
        return result

    def transform_binarize_matrix(self):
        """
        Transforms the weights matrix into a binary (unweighted) matrix
        """
        self.log.info("Transforming weighted matrix into unweighted matrix")

        result = copy(self.weights)
        result = numpy.where(result > 0, 1, result)
        return result

    def motif_linear_directed(self,
                              number_of_regions=4,
                              max_radius=100.,
                              return_type=None):
        """
        Generates a linear (open chain) unweighted directed graph with equidistant nodes.
        """

        iu1 = numpy.triu_indices(number_of_regions, 1)
        iu2 = numpy.triu_indices(number_of_regions, 2)

        self.weights = numpy.zeros((number_of_regions, number_of_regions))
        self.weights[iu1] = 1.0
        self.weights[iu2] = 0.0

        self.tract_lengths = max_radius * copy(self.weights)
        self.number_of_regions = number_of_regions
        self.create_region_labels(mode='numeric')

        if return_type is not None:
            return self.weights, self.tract_lengths
        else:
            pass

    def motif_linear_undirected(self, number_of_regions=4, max_radius=42.):
        """
        Generates a linear (open chain) unweighted undirected graph with equidistant nodes.
        """

        self.weights, self.tract_lengths = self.motif_linear_directed(
            number_of_regions=number_of_regions,
            max_radius=max_radius,
            return_type=True)

        self.weights += self.weights.T
        self.tract_lengths += self.tract_lengths.T
        self.number_of_regions = number_of_regions
        self.create_region_labels(mode='numeric')

    def motif_chain_directed(self,
                             number_of_regions=4,
                             max_radius=42.,
                             return_type=None):
        """
        Generates a closed unweighted directed graph with equidistant nodes.
        Depending on the centres it could be a box or a ring.
        """

        self.weights, self.tract_lengths = self.motif_linear_directed(
            number_of_regions=number_of_regions,
            max_radius=max_radius,
            return_type=True)

        self.weights[-1, 0] = 1.0
        self.tract_lengths[-1, 0] = max_radius
        self.number_of_regions = number_of_regions
        self.create_region_labels(mode='numeric')

        if return_type is not None:
            return self.weights, self.tract_lengths
        else:
            pass

    def motif_chain_undirected(self, number_of_regions=4, max_radius=42.):
        """
        Generates a closed unweighted undirected graph with equidistant nodes.
        Depending on the centres it could be a box or a ring.
        """

        self.weights, self.tract_lengths = self.motif_chain_directed(
            number_of_regions=number_of_regions,
            max_radius=max_radius,
            return_type=True)

        self.weights[0, -1] = 1.0
        self.tract_lengths[0, -1] = max_radius
        self.number_of_regions = number_of_regions
        self.create_region_labels(mode='numeric')

    def motif_all_to_all(self, number_of_regions=4, max_radius=42.):
        """
        Generates an all-to-all closed unweighted undirected graph with equidistant nodes.
        Self-connections are not included.
        """

        diagonal_elements = numpy.diag_indices(number_of_regions)

        self.weights = numpy.ones((number_of_regions, number_of_regions))
        self.weights[diagonal_elements] = 0.0
        self.tract_lengths = max_radius * copy(self.weights)
        self.number_of_regions = number_of_regions
        self.create_region_labels(mode='numeric')

    def centres_spherical(self,
                          number_of_regions=4,
                          max_radius=42.,
                          flat=False):
        """
        The nodes positions are distributed on a sphere.
        See: http://mathworld.wolfram.com/SphericalCoordinates.html

        If flat is true, then theta=0.0, the nodes are lying inside a circle.

        r    : radial
        theta: azimuthal
        polar: phi
        """

        # azimuth
        theta = numpy.random.uniform(low=-numpy.pi,
                                     high=numpy.pi,
                                     size=number_of_regions)

        # side of the cube
        u = numpy.random.uniform(low=0.0, high=1.0, size=number_of_regions)

        if flat:
            cosphi = 0.0
        else:
            # cos(elevation)
            cosphi = numpy.random.uniform(low=-1.0,
                                          high=1.0,
                                          size=number_of_regions)

        phi = numpy.arccos(cosphi)
        r = max_radius * pow(u, 1 / 3.0)

        # To Cartesian coordinates
        x = r * numpy.sin(phi) * numpy.cos(theta)
        y = r * numpy.sin(phi) * numpy.sin(theta)
        z = r * numpy.cos(phi)

        self.centres = numpy.array([x, y, z]).T
        norm_xyz = numpy.sqrt(numpy.sum(self.centres**2, axis=0))
        self.orientations = self.centres / norm_xyz[numpy.newaxis, :]

    def centres_toroidal(self,
                         number_of_regions=4,
                         max_radius=77.,
                         min_radius=13.,
                         mu=numpy.pi,
                         kappa=numpy.pi / 6):
        """
        The nodes are lying on  a torus.
        See: http://mathworld.wolfram.com/Torus.html

        """

        u = scipy.stats.vonmises.rvs(kappa, loc=mu, size=number_of_regions)
        v = scipy.stats.vonmises.rvs(kappa, loc=mu, size=number_of_regions)

        # To cartesian coordinates
        x = (max_radius + min_radius * numpy.cos(v)) * numpy.cos(u)
        y = (max_radius + min_radius * numpy.cos(v)) * numpy.sin(u)
        z = min_radius * numpy.sin(v)

        # Tangent vector with respect to max_radius
        tx = -numpy.sin(u)
        ty = -numpy.cos(u)
        tz = 0

        # Tangent vector with respect to min_radius
        sx = -numpy.cos(u) * (-numpy.sin(v))
        sy = numpy.sin(u) * (-numpy.sin(v))
        sz = numpy.cos(v)

        # Normal vector
        nx = ty * sz - tz * sy
        ny = tz * sx - tx * sz
        nz = tx * sy - ty * sx

        # Normalize normal vectors
        norm = numpy.sqrt(nx**2 + ny**2 + nz**2)
        nx /= norm
        ny /= norm
        nz /= norm

        self.orientations = numpy.array([nx, ny, nz]).T
        self.centres = numpy.array([x, y, z]).T

    def centres_annular(self,
                        number_of_regions=4,
                        max_radius=77.,
                        min_radius=13.,
                        mu=numpy.pi,
                        kappa=numpy.pi / 6):
        """
        The nodes are lying inside an annulus.

        """

        r = numpy.random.uniform(low=min_radius,
                                 high=max_radius,
                                 size=number_of_regions)
        theta = scipy.stats.vonmises.rvs(kappa, loc=mu, size=number_of_regions)

        # To cartesian coordinates
        x = r * numpy.cos(theta)
        y = r * numpy.sin(theta)
        z = numpy.zeros(number_of_regions)

        self.centres = numpy.array([x, y, z]).T

    def centres_cubic(self, number_of_regions=4, max_radius=42., flat=False):
        """
        The nodes are positioined in a 3D grid inside the cube centred at the origin and
        with edges parallel to the axes, with an edge length of 2*max_radius.

        """

        # To cartesian coordinates
        x = numpy.linspace(-max_radius, max_radius, number_of_regions)
        y = numpy.linspace(-max_radius, max_radius, number_of_regions)

        if flat:
            z = numpy.zeros(number_of_regions)
        else:
            z = numpy.linspace(-max_radius, max_radius, number_of_regions)

        self.centres = numpy.array([x, y, z]).T

    def generate_surrogate_connectivity(self,
                                        number_of_regions,
                                        motif='chain',
                                        undirected=True,
                                        these_centres='spherical'):
        """
        This one generates some defaults.
        For more specific motifs, generate invoking each method separetly.

        """

        # NOTE: Luckily I went for 5 motifs ...
        if motif == 'chain' and undirected:
            self.motif_chain_undirected(number_of_regions=number_of_regions)
        elif motif == "chain" and not undirected:
            self.motif_chain_directed(number_of_regions=number_of_regions)
        elif motif == 'linear' and undirected:
            self.motif_linear_undirected(number_of_regions=number_of_regions)
        elif motif == 'linear' and not undirected:
            self.motif_linear_directed(number_of_regions=number_of_regions)
        else:
            self.log.info("Generating all-to-all connectivity \\")
            self.motif_all_to_all(number_of_regions=number_of_regions)

        # centres
        if these_centres in ("spherical", "annular", "toroidal", "cubic"):
            eval("self.centres_" + these_centres +
                 "(number_of_regions=number_of_regions)")
        else:
            raise Exception("Bad centres geometry")

    def create_region_labels(self, mode="numeric"):
        """
        Assumes weights already exists
        """

        self.log.info("Create labels: %s" % str(mode))

        if mode in ("numeric", "num"):
            region_labels = [n for n in range(self.number_of_regions)]
            self.region_labels = numpy.array(region_labels).astype(str)
        elif mode in ("alphabetic", "alpha"):
            if self.number_of_regions < 26:
                self.region_labels = numpy.array(
                    list(map(chr, list(range(
                        65, 65 + self.number_of_regions))))).astype(str)
            else:
                self.log.info(
                    "I'm too lazy to create several strategies to label regions. \\"
                )
                self.log.info(
                    "Please choose mode 'numeric' or set your own labels\\")
        else:
            self.log.error("Bad region labels mode, must be one of:")
            self.log.error("('numeric', 'num', 'alphabetic', 'alpha')")
            raise Exception("Bad region labels mode")

    def unmapped_indices(self, region_mapping):
        """
        Compute vector of indices of regions in connectivity which are not in the given
        region mapping.

        """

        return numpy.setdiff1d(numpy.r_[:self.number_of_regions],
                               region_mapping)

    @staticmethod
    def from_file(source_file="connectivity_76.zip"):

        result = Connectivity()
        source_full_path = try_get_absolute_path("tvb_data.connectivity",
                                                 source_file)

        if source_file.endswith(".h5"):
            reader = H5Reader(source_full_path)

            result.weights = reader.read_field("weights")
            result.centres = reader.read_field("centres")
            result.region_labels = reader.read_field("region_labels")
            result.orientations = reader.read_optional_field("orientations")
            result.cortical = reader.read_optional_field("cortical")
            result.hemispheres = reader.read_field("hemispheres")
            result.areas = reader.read_optional_field("areas")
            result.tract_lengths = reader.read_field("tract_lengths")

        else:
            reader = ZipReader(source_full_path)

            result.weights = reader.read_array_from_file("weights")
            if reader.has_file_like("centres"):
                result.centres = reader.read_array_from_file("centres",
                                                             use_cols=(1, 2,
                                                                       3))
                result.region_labels = reader.read_array_from_file(
                    "centres", dtype=numpy.str, use_cols=(0, ))
            else:
                result.centres = reader.read_array_from_file("centers",
                                                             use_cols=(1, 2,
                                                                       3))
                result.region_labels = reader.read_array_from_file(
                    "centers", dtype=numpy.str, use_cols=(0, ))
            result.orientations = reader.read_optional_array_from_file(
                "average_orientations")
            result.cortical = reader.read_optional_array_from_file(
                "cortical", dtype=numpy.bool)
            result.hemispheres = reader.read_optional_array_from_file(
                "hemispheres", dtype=numpy.bool)
            result.areas = reader.read_optional_array_from_file("areas")
            result.tract_lengths = reader.read_array_from_file("tract_lengths")

        return result
Exemplo n.º 21
0
class EpileptorT(ModelNumbaDfun):

    a = NArray(label=":math:`a`", default=numpy.array([1.0]), doc="""""")

    b = NArray(label=":math:`b`", default=numpy.array([3.0]), doc="""""")

    c = NArray(label=":math:`c`", default=numpy.array([1.0]), doc="""""")

    d = NArray(label=":math:`d`", default=numpy.array([5.0]), doc="""""")

    r = NArray(label=":math:`r`",
               default=numpy.array([0.00035]),
               domain=Range(lo=0.0, hi=0.001, step=0.00005),
               doc="""""")

    s = NArray(label=":math:`s`", default=numpy.array([4.0]), doc="""""")

    x0 = NArray(label=":math:`x0`",
                default=numpy.array([-1.6]),
                domain=Range(lo=-3.0, hi=-1.0, step=0.1),
                doc="""""")

    Iext = NArray(label=":math:`Iext`",
                  default=numpy.array([3.1]),
                  domain=Range(lo=1.5, hi=5.0, step=0.1),
                  doc="""""")

    slope = NArray(label=":math:`slope`",
                   default=numpy.array([0.]),
                   domain=Range(lo=-16.0, hi=6.0, step=0.1),
                   doc="""""")

    Iext2 = NArray(label=":math:`Iext2`",
                   default=numpy.array([0.45]),
                   domain=Range(lo=0.0, hi=1.0, step=0.05),
                   doc="""""")

    tau = NArray(label=":math:`tau`", default=numpy.array([10.0]), doc="""""")

    aa = NArray(label=":math:`aa`", default=numpy.array([6.0]), doc="""""")

    bb = NArray(label=":math:`bb`", default=numpy.array([2.0]), doc="""""")

    Kvf = NArray(label=":math:`Kvf`",
                 default=numpy.array([0.0]),
                 domain=Range(lo=0.0, hi=4.0, step=0.5),
                 doc="""""")

    Kf = NArray(label=":math:`Kf`",
                default=numpy.array([0.0]),
                domain=Range(lo=0.0, hi=4.0, step=0.5),
                doc="""""")

    Ks = NArray(label=":math:`Ks`",
                default=numpy.array([0.0]),
                domain=Range(lo=-4.0, hi=4.0, step=0.1),
                doc="""""")

    tt = NArray(label=":math:`tt`",
                default=numpy.array([1.0]),
                domain=Range(lo=0.001, hi=10.0, step=0.001),
                doc="""""")

    modification = NArray(label=":math:`modification`",
                          default=numpy.array([0]),
                          doc="""""")

    state_variable_range = Final(label="State Variable ranges [lo, hi]",
                                 default={
                                     "x1": numpy.array([0.0]),
                                     "y1": numpy.array([0.0]),
                                     "z": numpy.array([0.0]),
                                     "x2": numpy.array([0.0]),
                                     "y2": numpy.array([0.0]),
                                     "g": numpy.array([0.0])
                                 },
                                 doc="""state variables""")

    state_variable_boundaries = Final(
        label="State Variable boundaries [lo, hi]",
        default={
            "x1": numpy.array([-2.0, 1.0]),
            "y1": numpy.array([-20.0, 2.0]),
            "z": numpy.array([-2.0, 5.0]),
            "x2": numpy.array([-2.0, 0.0]),
            "y2": numpy.array([0.0, 2.0]),
            "g": numpy.array([-1.0, 1.0])
        },
    )
    variables_of_interest = List(
        of=str,
        label="Variables or quantities available to Monitors",
        choices=(
            'x1 ** x2',
            'x2',
        ),
        default=(
            'x1',
            'y1',
            'z',
            'x2',
            'y2',
            'g',
        ),
        doc="Variables to monitor")

    state_variables = ['x1', 'y1', 'z', 'x2', 'y2', 'g']

    _nvar = 6
    cvar = numpy.array([
        0,
        1,
        2,
        3,
        4,
        5,
    ], dtype=numpy.int32)

    def dfun(self, vw, c, local_coupling=0.0):
        vw_ = vw.reshape(vw.shape[:-1]).T
        c_ = c.reshape(c.shape[:-1]).T
        deriv = _numba_dfun_EpileptorT(vw_, c_, self.a, self.b, self.c, self.d,
                                       self.r, self.s, self.x0, self.Iext,
                                       self.slope, self.Iext2, self.tau,
                                       self.aa, self.bb, self.Kvf, self.Kf,
                                       self.Ks, self.tt, self.modification,
                                       local_coupling)

        return deriv.T[..., numpy.newaxis]
Exemplo n.º 22
0
class Sensors(HasTraits):
    """
    Base Sensors class.
    All sensors have locations.
    Some will have orientations, e.g. MEG.
    """
    sensors_type = Attr(str, required=False)

    labels = NArray(dtype='U128', label="Sensor labels")

    locations = NArray(label="Sensor locations")

    has_orientation = Attr(field_type=bool, default=False)

    orientations = NArray(required=False)

    number_of_sensors = Int(field_type=int, label="Number of sensors",
                            doc="""The number of sensors described by these Sensors.""")

    # introduced to accommodate real sensors sets which have sensors
    # that should be zero during simulation i.e. ECG (heart), EOG,
    # reference gradiometers, etc.
    usable = NArray(dtype=bool, required=False, label="Usable sensors",
                    doc="The sensors in set which are used for signal data.")

    @classmethod
    def from_file(cls, source_file="eeg_brainstorm_65.txt"):

        result = cls()
        source_full_path = try_get_absolute_path("tvb_data.sensors", source_file)
        reader = FileReader(source_full_path)

        result.labels = reader.read_array(dtype=numpy.str, use_cols=(0,))
        result.locations = reader.read_array(use_cols=(1, 2, 3))
        return result

    def configure(self):
        """
        Invoke the compute methods for computable attributes that haven't been
        set during initialization.
        """
        super(Sensors, self).configure()
        self.number_of_sensors = int(self.labels.shape[0])

    def summary_info(self):
        """
        Gather scientifically interesting summary information from an instance
        of this datatype.
        """
        return {
            "Sensor type": self.sensors_type,
            "Number of Sensors": self.number_of_sensors
        }

    def sensors_to_surface(self, surface_to_map):
        """
        Map EEG sensors onto the head surface (skin-air).

        EEG sensor locations are typically only given on a unit sphere, that is,
        they are effectively only identified by their orientation with respect
        to a coordinate system. This method is used to map these unit vector
        sensor "locations" to a specific location on the surface of the skin.

        Assumes coordinate systems are aligned, i.e. common x,y,z and origin.

        """
        # Normalize sensor and vertex locations to unit vectors
        norm_sensors = numpy.sqrt(numpy.sum(self.locations ** 2, axis=1))
        unit_sensors = self.locations / norm_sensors[:, numpy.newaxis]
        norm_verts = numpy.sqrt(numpy.sum(surface_to_map.vertices ** 2, axis=1))
        unit_vertices = surface_to_map.vertices / norm_verts[:, numpy.newaxis]

        sensor_locations = numpy.zeros((self.number_of_sensors, 3))
        for k in range(self.number_of_sensors):
            # Find the surface vertex most closely aligned with current sensor.
            current_sensor = unit_sensors[k]
            alignment = numpy.dot(current_sensor, unit_vertices.T)
            one_ring = []

            while not one_ring:
                closest_vertex = alignment.argmax()
                # Get the set of triangles in the neighbourhood of that vertex.
                # NOTE: Intersection doesn't always fall within the 1-ring, so, all
                #      triangles contained in the 2-ring are considered.
                one_ring = surface_to_map.vertex_neighbours[closest_vertex]
                if not one_ring:
                    alignment[closest_vertex] = min(alignment)

            local_tri = [surface_to_map.vertex_triangles[v] for v in one_ring]
            local_tri = list(set([tri for subar in local_tri for tri in subar]))

            # Calculate a parametrized plane line intersection [t,u,v] for the
            # set of local triangles, which are considered as defining a plane.
            tuv = numpy.zeros((len(local_tri), 3))
            for i, tri in enumerate(local_tri):
                edge_01 = (surface_to_map.vertices[surface_to_map.triangles[tri, 0]] -
                           surface_to_map.vertices[surface_to_map.triangles[tri, 1]])
                edge_02 = (surface_to_map.vertices[surface_to_map.triangles[tri, 0]] -
                           surface_to_map.vertices[surface_to_map.triangles[tri, 2]])
                see_mat = numpy.vstack((current_sensor, edge_01, edge_02))

                tuv[i] = numpy.linalg.solve(see_mat.T, surface_to_map.vertices[surface_to_map.triangles[tri, 0].T])

            # Find  which line-plane intersection falls within its triangle
            # by imposing the condition that u, v, & u+v are contained in [0 1]
            local_triangle_index = ((0 <= tuv[:, 1]) * (tuv[:, 1] < 1) *
                                    (0 <= tuv[:, 2]) * (tuv[:, 2] < 1) *
                                    (0 <= (tuv[:, 1] + tuv[:, 2])) * ((tuv[:, 1] + tuv[:, 2]) < 2)).nonzero()[0]

            if len(local_triangle_index) == 1:
                # Scale sensor unit vector by t so that it lies on the surface.
                sensor_locations[k] = current_sensor * tuv[local_triangle_index[0], 0]

            elif len(local_triangle_index) < 1:
                # No triangle was found in proximity. Draw the sensor somehow in the surface extension area
                self.log.warning("Could not find a proper position on the given surface for sensor %d:%s. "
                                 "with direction %s" % (k, self.labels[k], str(self.locations[k])))
                distances = (abs(tuv[:, 1] + tuv[:, 2]))
                local_triangle_index = distances.argmin()
                # Scale sensor unit vector by t so that it lies on the surface.
                sensor_locations[k] = current_sensor * tuv[local_triangle_index, 0]

            else:
                # More than one triangle was found in proximity. Pick the first.
                # Scale sensor unit vector by t so that it lies on the surface.
                sensor_locations[k] = current_sensor * tuv[local_triangle_index[0], 0]

        return sensor_locations
Exemplo n.º 23
0
class SigmoidalJansenRit(Coupling):
    r"""
    Provides a sigmoidal coupling function as described in the 
    Jansen and Rit model, of the following form

    .. math::
        c_{min} + (c_{max} - c_{min}) / (1.0 + \exp(-a(x-midpoint)/\sigma))

    Assumes that x has have two state variables.

    """

    cmin = NArray(
        label=":math:`c_{min}`",
        default=numpy.array([
            0.0,
        ]),
        domain=Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="Minimum of the sigmoid function",
    )

    cmax = NArray(
        label=":math:`c_{max}`",
        default=numpy.array([
            2.0 * 0.0025,
        ]),
        domain=Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="Maximum of the sigmoid function",
    )

    midpoint = NArray(
        label="midpoint",
        default=numpy.array([
            6.0,
        ]),
        domain=Range(lo=-1000.0, hi=1000.0, step=10.0),
        doc="Midpoint of the linear portion of the sigmoid",
    )

    r = NArray(
        label=r":math:`r`",
        default=numpy.array([
            1.0,
        ]),
        domain=Range(lo=0.01, hi=1000.0, step=10.0),
        doc="the steepness of the sigmoidal transformation",
    )

    a = NArray(
        label=r":math:`a`",
        default=numpy.array([
            0.56,
        ]),
        domain=Range(lo=0.01, hi=1000.0, step=10.0),
        doc="Scaling of the coupling term",
    )

    def __str__(self):
        return simple_gen_astr(self, 'cmin cmax midpoint a r')

    def pre(self, x_i, x_j):
        pre = self.cmax / (1.0 + numpy.exp(self.r * (self.midpoint -
                                                     (x_j[:, 0] - x_j[:, 1]))))
        return pre[:, numpy.newaxis]

    def post(self, gx):
        return self.a * gx
Exemplo n.º 24
0
class LarterBreakspear(models.Model):
    """
    A modified Morris-Lecar model that includes a third equation which simulates
    the effect of a population of inhibitory interneurons synapsing on
    the pyramidal cells.
    
    .. [Larteretal_1999] Larter et.al. *A coupled ordinary differential equation
        lattice model for the simulation of epileptic seizures.* Chaos. 9(3):
        795, 1999.
    

    .. [Breaksetal_2003_a] Breakspear, M.; Terry, J. R. & Friston, K. J.  *Modulation of excitatory
        synaptic coupling facilitates synchronization and complex dynamics in an
        onlinear model of neuronal dynamics*. Neurocomputing 52–54 (2003).151–158

    .. [Breaksetal_2003_b] M. J. Breakspear et.al. *Modulation of excitatory 
        synaptic coupling facilitates synchronization and complex dynamics in a
        biophysical model of neuronal dynamics.* Network: Computation in Neural
        Systems 14: 703-732, 2003.
    
    Equations and default parameters are taken from [Breaksetal_2003_b]_. 
    All equations and parameters are non-dimensional and normalized.
    For values of d_v  < 0.55, the dynamics of a single column settles onto a 
    solitary fixed point attractor.


    Parameters used for simulations in [Breaksetal_2003_a]_ Table 1. Page 153.
    Two nodes were coupled.

    +---------------------------+
    |          Table 1          | 
    +--------------+------------+
    |Parameter     |  Value     |
    +--------------+------------+
    | I            |      0.3   |
    | a_ee         |      0.4   |
    | a_ei         |      0.1   |
    | a_ie         |      1.0   |
    | a_ne         |      1.0   |
    | a_ni         |      0.4   |
    | r_NMDA       |      0.2   |
    | delta        |      0.001 |
    +---------------------------+



    +---------------------------+
    |          Table 2          | 
    +--------------+------------+
    |Parameter     |  Value     |
    +--------------+------------+
    | gK           |      2.0   |
    | gL           |      0.5   |
    | gNa          |      6.7   |
    | gCa          |      1.0   |
    | a_ne         |      1.0   |
    | a_ni         |      0.4   |
    | a_ee         |      0.36  |
    | a_ei         |      2.0   |
    | a_ie         |      2.0   |
    | VK           |     -0.7   |
    | VL           |     -0.5   |
    | VNa          |      0.53  |
    | VCa          |      1.0   |
    | phi          |      0.7   | 
    | b            |      0.1   |
    | I            |      0.3   |
    | r_NMDA       |      0.25  |
    | C            |      0.1   |
    | TCa          |     -0.01  |
    | d_Ca         |      0.15  |
    | TK           |      0.0   |
    | d_K          |      0.3   |
    | VT           |      0.0   |
    | ZT           |      0.0   |
    | TNa          |      0.3   |
    | d_Na         |      0.15  |
    | d_V          |      0.65  |
    | d_Z          |      d_V   |  # note, this parameter might be spatialized: ones(N,1).*0.65 + modn*(rand(N,1)-0.5);
    | QV_max       |      1.0   |
    | QZ_max       |      1.0   |
    +---------------------------+
    |   Alstott et al. 2009     |
    +---------------------------+


    NOTES about parameters

    d_V
    For d_V < 0.55, uncoupled network, the system exhibits fixed point dynamics; 
    for 55 < lb.d_V < 0.59, limit cycle atractors; 
    and for d_V > 0.59 chaotic attractors (eg, d_V=0.6,aee=0.5,aie=0.5, 
                                               gNa=0, Iext=0.165)

    C
    The long-range coupling 'C' is ‘weak’ in the sense that 
    they investigated parameter values for which C < a_ee and C << a_ie.


    
    .. figure :: img/LarterBreakspear_01_mode_0_pplane.svg
            :alt: Larter-Breaskpear phase plane (V, W)
            
            The (:math:`V`, :math:`W`) phase-plane for the Larter-Breakspear model.
    
    """

    # Define traited attributes for this model, these represent possible kwargs.
    gCa = NArray(label=":math:`g_{Ca}`",
                 default=numpy.array([1.1]),
                 domain=Range(lo=0.9, hi=1.5, step=0.1),
                 doc="""Conductance of population of Ca++ channels.""")

    gK = NArray(label=":math:`g_{K}`",
                default=numpy.array([2.0]),
                domain=Range(lo=1.95, hi=2.05, step=0.025),
                doc="""Conductance of population of K channels.""")

    gL = NArray(label=":math:`g_{L}`",
                default=numpy.array([0.5]),
                domain=Range(lo=0.45, hi=0.55, step=0.05),
                doc="""Conductance of population of leak channels.""")

    phi = NArray(label=":math:`\\phi`",
                 default=numpy.array([0.7]),
                 domain=Range(lo=0.3, hi=0.9, step=0.1),
                 doc="""Temperature scaling factor.""")

    gNa = NArray(label=":math:`g_{Na}`",
                 default=numpy.array([6.7]),
                 domain=Range(lo=0.0, hi=10.0, step=0.1),
                 doc="""Conductance of population of Na channels.""")

    TK = NArray(label=":math:`T_{K}`",
                default=numpy.array([0.0]),
                domain=Range(lo=0.0, hi=0.0001, step=0.00001),
                doc="""Threshold value for K channels.""")

    TCa = NArray(label=":math:`T_{Ca}`",
                 default=numpy.array([-0.01]),
                 domain=Range(lo=-0.02, hi=-0.01, step=0.0025),
                 doc="Threshold value for Ca channels.")

    TNa = NArray(label=":math:`T_{Na}`",
                 default=numpy.array([0.3]),
                 domain=Range(lo=0.25, hi=0.3, step=0.025),
                 doc="Threshold value for Na channels.")

    VCa = NArray(label=":math:`V_{Ca}`",
                 default=numpy.array([1.0]),
                 domain=Range(lo=0.9, hi=1.1, step=0.05),
                 doc="""Ca Nernst potential.""")

    VK = NArray(label=":math:`V_{K}`",
                default=numpy.array([-0.7]),
                domain=Range(lo=-0.8, hi=1., step=0.1),
                doc="""K Nernst potential.""")

    VL = NArray(label=":math:`V_{L}`",
                default=numpy.array([-0.5]),
                domain=Range(lo=-0.7, hi=-0.4, step=0.1),
                doc="""Nernst potential leak channels.""")

    VNa = NArray(label=":math:`V_{Na}`",
                 default=numpy.array([0.53]),
                 domain=Range(lo=0.51, hi=0.55, step=0.01),
                 doc="""Na Nernst potential.""")

    d_K = NArray(label=":math:`\\delta_{K}`",
                 default=numpy.array([0.3]),
                 domain=Range(lo=0.1, hi=0.4, step=0.1),
                 doc="""Variance of K channel threshold.""")

    tau_K = NArray(label=":math:`\\tau_{K}`",
                   default=numpy.array([1.0]),
                   domain=Range(lo=0.01, hi=0.0, step=0.1),
                   doc="""Time constant for K relaxation time (ms)""")

    d_Na = NArray(label=":math:`\\delta_{Na}`",
                  default=numpy.array([0.15]),
                  domain=Range(lo=0.1, hi=0.2, step=0.05),
                  doc="Variance of Na channel threshold.")

    d_Ca = NArray(label=":math:`\\delta_{Ca}`",
                  default=numpy.array([0.15]),
                  domain=Range(lo=0.1, hi=0.2, step=0.05),
                  doc="Variance of Ca channel threshold.")

    aei = NArray(label=":math:`a_{ei}`",
                 default=numpy.array([2.0]),
                 domain=Range(lo=0.1, hi=2.0, step=0.1),
                 doc="""Excitatory-to-inhibitory synaptic strength.""")

    aie = NArray(label=":math:`a_{ie}`",
                 default=numpy.array([2.0]),
                 domain=Range(lo=0.5, hi=2.0, step=0.1),
                 doc="""Inhibitory-to-excitatory synaptic strength.""")

    b = NArray(
        label=":math:`b`",
        default=numpy.array([0.1]),
        domain=Range(lo=0.0001, hi=1.0, step=0.0001),
        doc="""Time constant scaling factor. The original value is 0.1""")

    C = NArray(
        label=":math:`c`",
        default=numpy.array([0.0]),
        domain=Range(lo=0.0, hi=0.2, step=0.05),
        doc="""Strength of excitatory coupling. Balance between internal and
        local (and global) coupling strength. C > 0 introduces interdependences between 
        consecutive columns/nodes. C=1 corresponds to maximum coupling.
        This strenght should be set to sensible values when a whole network is connected. """
    )

    ane = NArray(label=":math:`a_{ne}`",
                 default=numpy.array([1.0]),
                 domain=Range(lo=0.4, hi=1.0, step=0.05),
                 doc="""Non-specific-to-excitatory synaptic strength.""")

    ani = NArray(label=":math:`a_{ni}`",
                 default=numpy.array([0.4]),
                 domain=Range(lo=0.3, hi=0.5, step=0.05),
                 doc="""Non-specific-to-inhibitory synaptic strength.""")

    aee = NArray(label=":math:`a_{ee}`",
                 default=numpy.array([0.4]),
                 domain=Range(lo=0.4, hi=0.6, step=0.05),
                 doc="""Excitatory-to-excitatory synaptic strength.""")

    Iext = NArray(
        label=":math:`I_{ext}`",
        default=numpy.array([0.3]),
        domain=Range(lo=0.165, hi=0.3, step=0.005),
        doc="""Subcortical input strength. It represents a non-specific
       excitation or thalamic inputs.""")

    rNMDA = NArray(label=":math:`r_{NMDA}`",
                   default=numpy.array([0.25]),
                   domain=Range(lo=0.2, hi=0.3, step=0.05),
                   doc="""Ratio of NMDA to AMPA receptors.""")

    VT = NArray(label=":math:`V_{T}`",
                default=numpy.array([0.0]),
                domain=Range(lo=0.0, hi=0.7, step=0.01),
                doc="""Threshold potential (mean) for excitatory neurons. 
        In [Breaksetal_2003_b]_ this values is 0.""")

    d_V = NArray(
        label=":math:`\\delta_{V}`",
        default=numpy.array([0.65]),
        domain=Range(lo=0.49, hi=0.7, step=0.01),
        doc="""Variance of the excitatory threshold. It is one of the main
        parameters explored in [Breaksetal_2003_b]_.""")

    ZT = NArray(label=":math:`Z_{T}`",
                default=numpy.array([0.0]),
                domain=Range(lo=0.0, hi=0.1, step=0.005),
                doc="""Threshold potential (mean) for inihibtory neurons.""")

    d_Z = NArray(label=":math:`\\delta_{Z}`",
                 default=numpy.array([0.7]),
                 domain=Range(lo=0.001, hi=0.75, step=0.05),
                 doc="""Variance of the inhibitory threshold.""")

    # NOTE: the values were not in the article.
    # I took these ones from DESTEXHE 2001
    QV_max = NArray(
        label=":math:`Q_{max}`",
        default=numpy.array([1.0]),
        domain=Range(lo=0.1, hi=1., step=0.001),
        doc="""Maximal firing rate for excitatory populations (kHz)""")

    QZ_max = NArray(
        label=":math:`Q_{max}`",
        default=numpy.array([1.0]),
        domain=Range(lo=0.1, hi=1., step=0.001),
        doc="""Maximal firing rate for excitatory populations (kHz)""")

    variables_of_interest = List(
        of=str,
        label="Variables watched by Monitors",
        choices=("V", "W", "Z"),
        default=("V", "W", "Z"),
        doc="""This represents the default state-variables of this Model to be
        monitored. It can be overridden for each Monitor if desired.""")

    # Informational attribute, used for phase-plane and initial()
    state_variable_range = Final(
        {
            "V": numpy.array([-1.5, 1.5]),
            "W": numpy.array([-1.0, 1.0]),
            "Z": numpy.array([-1.5, 1.5])
        },
        label="State Variable ranges [lo, hi]",
        doc="""The values for each state-variable should be set to encompass
            the expected dynamic range of that state-variable for the current 
            parameters, it is used as a mechanism for bounding random inital 
            conditions when the simulation isn't started from an explicit
            history, it is also provides the default range of phase-plane plots."""
    )

    state_variables = ["V", "W", "Z"]
    _nvar = 3

    def __init__(self, **kwargs):
        """
        .. May need to put kwargs back if we can't get them from trait...
        
        """
        super(LarterBreakspear, self).__init__(**kwargs)

        LOG.info('%s: initing...' % str(self))

        self.cvar = numpy.array([0], dtype=numpy.int32)

        LOG.debug('%s: inited.' % repr(self))

    def dfun(self, state_variables, coupling, local_coupling=0.0):
        """
        .. math::
             \\dot{V} &= - (g_{Ca} + (1 - C) \\, r_{NMDA} \\, a_{ee} Q_V^i +
            C \\, r_{NMDA} \\, a_{ee} \\langle Q_V \\rangle) \\, m_{Ca} \\,(V - V_{Ca})
            - g_K\\, W\\, (V - V_K) - g_L\\, (V - V_L)
            - (g_{Na} m_{Na} + (1 - C) \\, a_{ee} Q_V^i + 
            C \\, a_{ee} \\langle Q_V \\rangle) \\, (V - V_{Na})
            - a_{ie}\\, Z \\, Q_Z^i + a_{ne} \\, I_{\\delta}
            
            \\dot{W} &= \\frac{\\phi \\, (m_K - W)}{\\tau_K} \\\\
            \\dot{Z} &= b \\, (a_{ni} \\, I_{\\delta} + a_{ei} \\, V \\, Q_V)\\\\
            
            m_{ion}(X) &= 0.5 \\, (1 + tanh(\\frac{V-T_{ion}}{\\delta_{ion}})
            
        See Equations (7), (3), (6) and (2) respectively in [Breaksetal_2003]_.
        Pag: 705-706

        NOTE: Equation (8) has an error the sign before the term :math:`a_{ie}\\, Z \\, Q_Z^i`
        should be a minus (-) and not a plus (+).
        
        """
        V = state_variables[0, :]
        W = state_variables[1, :]
        Z = state_variables[2, :]

        c_0 = coupling[0, :]
        lc_0 = local_coupling

        # relationship between membrane voltage and channel conductance
        m_Ca = 0.5 * (1 + numpy.tanh((V - self.TCa) / self.d_Ca))
        m_Na = 0.5 * (1 + numpy.tanh((V - self.TNa) / self.d_Na))
        m_K = 0.5 * (1 + numpy.tanh((V - self.TK) / self.d_K))

        # voltage to firing rate
        QV = 0.5 * self.QV_max * (1 + numpy.tanh((V - self.VT) / self.d_V))
        QZ = 0.5 * self.QZ_max * (1 + numpy.tanh((Z - self.ZT) / self.d_Z))

        dV = (-(self.gCa + (1.0 - self.C) * self.rNMDA * self.aee * QV +
                self.C * self.rNMDA * self.aee * c_0) * m_Ca * (V - self.VCa) -
              self.gK * W * (V - self.VK) - self.gL * (V - self.VL) -
              (self.gNa * m_Na +
               (1.0 - self.C) * self.aee * QV + self.C * self.aee * c_0) *
              (V - self.VNa) - self.aei * Z * QZ + self.ane * self.Iext)

        dW = (self.phi * (m_K - W) / self.tau_K)

        dZ = (self.b * (self.ani * self.Iext + self.aei * V * QV))

        derivative = numpy.array([dV, dW, dZ])

        return derivative
Exemplo n.º 25
0
class DummyTrait(HasTraits):
    """ Test class with traited attributes"""

    test_array = NArray(label="State Variables range [[lo],[hi]]",
                        default=numpy.array([[-3.0, -6.0], [3.0, 6.0]]), dtype="float")
Exemplo n.º 26
0
class SpatialAverage(Monitor):
    """
    Monitors the averaged value for the models variable of interest over sets of
    nodes -- defined by spatial_mask. This is primarily intended for use with
    surface simulations, with a default behaviour, when no spatial_mask is
    specified, of using surface.region_mapping in order to reduce a surface
    simulation back to a single average timeseries for each region in the
    associated Connectivity. However, any vector of length nodes containing
    integers, from a set contiguous from zero, specifying the new grouping to
    which each node belongs should work.

    Additionally, this monitor temporally sub-samples the simulation every `istep` 
    integration steps.

    """
    _ui_name = "Spatial average with temporal sub-sample"
    CORTICAL = "cortical"
    HEMISPHERES = "hemispheres"
    REGION_MAPPING = "region mapping"

    spatial_mask = NArray(  #TODO: Check it's a vector of length Nodes (like region mapping for surface)
        dtype=int,
        label="Spatial Mask",
        required=False,
        doc="""A vector of length==nodes that assigns an index to each node
            specifying the "region" to which it belongs. The default usage is
            for mapping a surface based simulation back to the regions used in 
            its `Long-range Connectivity.`""")

    default_mask = Attr(
        str,
        choices=(CORTICAL, HEMISPHERES, REGION_MAPPING),
        default=HEMISPHERES,
        label="Default Mask",
        required=False,
        doc=("Fallback in case spatial mask is none and no surface provided"
             "to use either connectivity hemispheres or cortical attributes."))
    # order = -1)

    backend = ReferenceBackend()

    def _support_bool_mask(self, mask):
        """
        Ensure we support also the case of a boolean mask (eg: connectivity.cortical) with all values being 1,
        by transforming them all to 0.
        Otherwise, the later check not numpy.all(areas == numpy.arange(number_of_areas)) would fail for all regions
        being cortical or in one hemisphere.
        """
        spatial_mask = numpy.array([int(val) for val in mask])
        unique_mask = numpy.unique(spatial_mask)
        if len(unique_mask) == 1 and unique_mask[0] == 1:
            return numpy.zeros(len(spatial_mask), dtype=numpy.int)
        return spatial_mask

    def config_for_sim(self, simulator):

        # initialize base attributes
        super(SpatialAverage, self).config_for_sim(simulator)
        self.is_default_special_mask = False

        # setup given spatial mask or default to region mapping
        if self.spatial_mask is None:
            self.is_default_special_mask = True
            if simulator.surface is not None:
                self.spatial_mask, _, _ = self.backend.full_region_map(
                    simulator.surface, simulator.connectivity)
            else:
                conn = simulator.connectivity
                if self.default_mask == self.CORTICAL:
                    self.spatial_mask = self._support_bool_mask(conn.cortical)
                elif self.default_mask == self.HEMISPHERES:
                    self.spatial_mask = self._support_bool_mask(
                        conn.hemispheres)
                else:
                    msg = "Must fill either the Spatial Mask parameter or choose a Default Mask for non-surface" \
                          " simulations when using SpatioTemporal monitor!"
                    raise Exception(msg)

        number_of_nodes = simulator.number_of_nodes
        if self.spatial_mask.size != number_of_nodes:
            msg = "spatial_mask must be a vector of length number_of_nodes."
            raise Exception(msg)

        areas = numpy.unique(self.spatial_mask)
        number_of_areas = len(areas)
        if not numpy.all(areas == numpy.arange(number_of_areas)):
            msg = ("Areas in the spatial_mask must be specified as a "
                   "contiguous set of indices starting from zero.")
            raise Exception(msg)

        self.log.debug("spatial_mask")
        self.log.debug(narray_describe(self.spatial_mask))
        spatial_sum = numpy.zeros((number_of_nodes, number_of_areas))
        spatial_sum[numpy.arange(number_of_nodes), self.spatial_mask] = 1
        spatial_sum = spatial_sum.T
        self.log.debug("spatial_sum")
        self.log.debug(narray_describe(spatial_sum))
        nodes_per_area = numpy.sum(spatial_sum, axis=1)[:, numpy.newaxis]
        self.spatial_mean = spatial_sum / nodes_per_area
        self.log.debug("spatial_mean")
        self.log.debug(narray_describe(self.spatial_mean))

    def sample(self, step, state):
        if step % self.istep == 0:
            time = step * self.dt
            monitored_state = numpy.dot(self.spatial_mean, state[self.voi, :])
            return [time, monitored_state.transpose((1, 0, 2))]

    def create_time_series(self,
                           connectivity=None,
                           surface=None,
                           region_map=None,
                           region_volume_map=None):
        if self.is_default_special_mask:
            return TimeSeriesRegion(sample_period=self.period,
                                    region_mapping=region_map,
                                    region_mapping_volume=region_volume_map,
                                    title='Regions ' + self.__class__.__name__,
                                    connectivity=connectivity)
        else:
            # mask does not correspond to the number of regions
            # let the parent create a plain TimeSeries
            return super(SpatialAverage, self).create_time_series()
Exemplo n.º 27
0
class Surface(TVBSurface, BaseModel):
    vox2ras = NArray(dtype=np.float,
                     label="vox2ras",
                     default=np.array([]),
                     required=False,
                     doc="""Voxel to RAS coordinates transformation array.""")

    def get_vertex_normals(self):
        # If there is at least 3 vertices and 1 triangle...
        if self.number_of_vertices > 2 and self.number_of_triangles > 0:
            if self.vertex_normals.shape[0] != self.number_of_vertices:
                self.vertex_normals = self.compute_vertex_normals()
        return self.vertex_normals

    def get_triangle_normals(self):
        # If there is at least 3 vertices and 1 triangle...
        if self.number_of_vertices > 2 and self.number_of_triangles > 0:
            if self.triangle_normals.shape[0] != self.number_of_triangles:
                self.triangle_normals = self.compute_triangle_normals()
        return self.triangle_normals

    def get_vertex_areas(self):
        triangle_areas = self._find_triangle_areas()
        vertex_areas = np.zeros((self.number_of_vertices, ))
        for triang, vertices in enumerate(self.triangles):
            for i in range(3):
                vertex_areas[vertices[i]] += 1. / 3. * triangle_areas[triang]
        return vertex_areas

    def add_vertices_and_triangles(self,
                                   new_vertices,
                                   new_triangles,
                                   new_vertex_normals=np.array([]),
                                   new_triangle_normals=np.array([])):
        self.triangles = np.array(self.triangles.tolist() +
                                  (new_triangles +
                                   self.number_of_vertices).tolist())
        self.vertices = np.array(self.vertices.tolist() +
                                 new_vertices.tolist())
        self.vertex_normals = np.array(self.vertex_normals.tolist() +
                                       new_vertex_normals.tolist())
        self.triangle_normals = np.array(self.triangle_normals.tolist() +
                                         new_triangle_normals.tolist())
        self.get_vertex_normals()
        self.get_triangle_normals()

    def compute_surface_area(self):
        """
            This function computes the surface area
            :param: surface: input surface object
            :return: (sub)surface area, float
            """
        return np.sum(self._find_triangle_areas())

    def configure(self):
        try:
            self.zero_based_triangles
        except:
            self.zero_based_triangles = False
        super(Surface, self).configure()

    def to_tvb_instance(self, datatype=TVBSurface, **kwargs):
        return super(Surface, self).to_tvb_instance(datatype, **kwargs)
Exemplo n.º 28
0
class Monitor(HasTraits):
    """
    Abstract base class for monitor implementations.
    """

    period = Float(
        label="Sampling period (ms)",  # order = 10
        default=0.9765625,  # ms. 0.9765625 => 1024Hz #ms, 0.5 => 2000Hz
        doc="""Sampling period in milliseconds, must be an integral multiple
        of integration-step size. As a guide: 2048 Hz => 0.48828125 ms ;  
        1024 Hz => 0.9765625 ms ; 512 Hz => 1.953125 ms.""")

    variables_of_interest = NArray(
        dtype=int,
        label="Model variables to watch",  # order=11,
        doc=
        ("Indices of model's variables of interest (VOI) that this monitor should record. "
         "Note that the indices should start at zero, so that if a model offers VOIs V, W and "
         "V+W, and W is selected, and this monitor should record W, then the correct index is 0."
         ),
        required=False)

    istep = None
    dt = None
    voi = None
    _stock = numpy.empty([])

    def __str__(self):
        clsname = self.__class__.__name__
        return '%s(period=%f, voi=%s)' % (clsname, self.period,
                                          self.variables_of_interest.tolist())

    def _config_vois(self, simulator):
        self.voi = self.variables_of_interest
        if self.voi is None or self.voi.size == 0:
            self.voi = numpy.r_[:len(simulator.model.variables_of_interest)]

    def _config_time(self, simulator):
        self.dt = simulator.integrator.dt
        self.istep = ReferenceBackend.iround(self.period / self.dt)

    def config_for_sim(self, simulator):
        """Configure monitor for given simulator.

        Grab the Simulator's integration step size. Set the monitor's variables
        of interest based on the Monitor's 'variables_of_interest' attribute, if
        it was specified, otherwise use the 'variables_of_interest' specified 
        for the Model. Calculate the number of integration steps (isteps)
        between returns by the record method. This method is called from within
        the the Simulator's configure() method.

        """
        self._config_vois(simulator)
        self._config_time(simulator)

    def record(self, step, observed):
        """Record a sample of the observed state at given step.

        This is a final method called by the simulator to obtain samples from a
        monitor instance. Monitor subclasses should not override this method, but
        rather implement the `sample` method.

        """
        return self.sample(step, observed)

    @abc.abstractmethod
    def sample(self, step, state):
        """
        This method provides monitor output, and should be overridden by subclasses.

        """

    def create_time_series(self,
                           connectivity=None,
                           surface=None,
                           region_map=None,
                           region_volume_map=None):
        """
        Create a time series instance that will be populated by this monitor
        :param surface: if present a TimeSeriesSurface is returned
        :param connectivity: if present a TimeSeriesRegion is returned
        Otherwise a plain TimeSeries will be returned
        """
        if surface is not None:
            return TimeSeriesSurface(
                surface=surface.region_mapping_data.surface,
                sample_period=self.period,
                title='Surface ' + self.__class__.__name__)
        if connectivity is not None:
            return TimeSeriesRegion(connectivity=connectivity,
                                    region_mapping=region_map,
                                    region_mapping_volume=region_volume_map,
                                    sample_period=self.period,
                                    title='Regions ' + self.__class__.__name__)

        return TimeSeries(sample_period=self.period,
                          title=' ' + self.__class__.__name__)
Exemplo n.º 29
0
class Integrator(HasTraits):
    """
    The Integrator class is a base class for the integration methods...

    .. [1] Kloeden and Platen, Springer 1995, *Numerical solution of stochastic
        differential equations.*

    .. [2] Riccardo Mannella, *Integration of Stochastic Differential Equations
        on a Computer*, Int J. of Modern Physics C 13(9): 1177--1194, 2002.

    .. [3] R. Mannella and V. Palleschi, *Fast and precise algorithm for 
        computer simulation of stochastic differential equations*, Phys. Rev. A
        40: 3381, 1989.

    """

    dt = Float(
        label="Integration-step size (ms)",
        default=0.01220703125,  #0.015625,
        #range = basic.Range(lo= 0.0048828125, hi=0.244140625, step= 0.1, base=2.)  mh: was commented
        required=True,
        doc="""The step size used by the integration routine in ms. This
        should be chosen to be small enough for the integration to be
        numerically stable. It is also necessary to consider the desired sample
        period of the Monitors, as they are restricted to being integral
        multiples of this value. The default value is set such that all built-in
        models are numerically stable with there default parameters and because
        it is consitent with Monitors using sample periods corresponding to
        powers of 2 from 128 to 4096Hz.""")

    bounded_state_variable_indices = NArray(
        dtype=int,
        label="indices of the state variables to be bounded by the integrators "
        "within the boundaries in the boundaries' values array",
        required=False)

    state_variable_boundaries = NArray(
        label="The boundary values of the state variables", required=False)

    clamped_state_variable_indices = NArray(
        dtype=int,
        label="indices of the state variables to be clamped by the integrators "
        "to the values in the clamped_values array",
        required=False)

    clamped_state_variable_values = NArray(
        label="The values of the state variables which are clamped ",
        required=False)

    _bounded_integration_state_variable_indices = None
    _integration_state_variable_boundaries = None
    _clamped_integration_state_variable_indices = None
    _clamped_integration_state_variable_values = None

    @abc.abstractmethod
    def scheme(self, X, dfun, coupling, local_coupling, stimulus):
        """
        The scheme of integrator should take a state and provide the next
        state in time, e.g. for a differential equation, scheme should take
        :math:`X` and provide an appropriate :math:`X + dX` (dfun in the code).

        """

    def set_random_state(self, random_state):
        self.log.warning(
            "random_state supplied for non-stochastic integration")

    def configure(self):
        # Set default configurations:
        self._clamped_integration_state_variable_indices = self.clamped_state_variable_indices
        self._clamped_integration_state_variable_values = self.clamped_state_variable_values
        self._bounded_integration_state_variable_indices = self.bounded_state_variable_indices
        self._integration_state_variable_boundaries = self.state_variable_boundaries
        super(Integrator, self).configure()

    def configure_boundaries(self, model):
        if model.state_variable_boundaries is not None:
            indices = []
            boundaries = []
            for sv, sv_bounds in model.state_variable_boundaries.items():
                indices.append(model.state_variables.index(sv))
                boundaries.append(sv_bounds)
            sort_inds = numpy.argsort(indices)
            self.bounded_state_variable_indices = numpy.array(
                indices)[sort_inds]
            self.state_variable_boundaries = numpy.array(boundaries).astype(
                "float64")[sort_inds]
            self._bounded_integration_state_variable_indices = numpy.copy(
                self.bounded_state_variable_indices)
            self._integration_state_variable_boundaries = numpy.copy(
                self.state_variable_boundaries)

    def reconfigure_boundaries_and_clamping_for_integration_state_variables(
            self, model):
        integration_state_variable_indices = numpy.where(
            model.state_variable_mask)[0].tolist()
        if self.state_variable_boundaries is not None:
            # If there are any state_variable_boundaries...
            bounded_integration_state_variable_indices = []
            integration_state_variable_boundaries = []
            # ...for each one of the bounded state variable indices and boundary values...
            for bound_sv_ind, bounds in zip(
                    self._bounded_integration_state_variable_indices,
                    self.state_variable_boundaries):
                # ...if the boundary indice corresponds to an integrated state variable...
                if bound_sv_ind in integration_state_variable_indices:
                    # ...add its integration state vector indice...
                    bounded_integration_state_variable_indices.append(
                        integration_state_variable_indices.index(bound_sv_ind))
                    # ...and the corresponding boundaries
                    integration_state_variable_boundaries.append(bounds)
            self._bounded_integration_state_variable_indices = \
                numpy.array(bounded_integration_state_variable_indices)
            self._integration_state_variable_boundaries = \
                numpy.array(integration_state_variable_boundaries)
        if self.clamped_state_variable_values is not None:
            # If there are any clamped values...
            clamped_integration_state_variable_indices = []
            clamped_integration_state_variable_values = []
            # ...for each one of the clamped state variable indices and clamped values...
            for clamp_sv_ind, clampval in zip(
                    self.clamped_state_variable_indices,
                    self.clamped_state_variable_values):
                # ...if the clamped indice corresponds to an integrated state variable...
                if clamp_sv_ind in integration_state_variable_indices:
                    # ...add its integration state vector indice...
                    clamped_integration_state_variable_indices.append(
                        integration_state_variable_indices.index(clamp_sv_ind))
                    # ...and the corresponding clamped value
                    clamped_integration_state_variable_values.append(clampval)
            self._clamped_integration_state_variable_indices = \
                numpy.array(clamped_integration_state_variable_indices)
            self._clamped_integration_state_variable_values = \
                numpy.array(clamped_integration_state_variable_values)

    def _bound_state(self, X, indices, boundaries):
        for sv_ind, sv_bounds in zip(indices, boundaries):
            if sv_bounds[0] is not None:
                X[sv_ind][X[sv_ind] < sv_bounds[0]] = sv_bounds[0]
            if sv_bounds[1] is not None:
                X[sv_ind][X[sv_ind] > sv_bounds[1]] = sv_bounds[1]

    def bound_state(self, X):
        self._bound_state(X, self.bounded_state_variable_indices,
                          self.state_variable_boundaries)

    def bound_integration_state(self, X):
        self._bound_state(X, self._bounded_integration_state_variable_indices,
                          self._integration_state_variable_boundaries)

    def clamp_state(self, X):
        X[self.
          clamped_state_variable_indices] = self.clamped_state_variable_values

    def clamp_integration_state(self, X):
        X[self.
          _clamped_integration_state_variable_indices] = self._clamped_integration_state_variable_values

    def bound_and_clamp(self, state):
        # If there is a state boundary...
        if self.state_variable_boundaries is not None:
            # ...use the integrator's bound_state
            self.bound_state(state)
        # If there is a state clamping...
        if self.clamped_state_variable_values is not None:
            # ...use the integrator's clamp_state
            self.clamp_state(state)

    def integration_bound_and_clamp(self, state):
        # If there is a state boundary...
        if self._integration_state_variable_boundaries is not None:
            # ...use the integrator's bound_state
            self.bound_integration_state(state)
        # If there is a state clamping...
        if self._clamped_integration_state_variable_values is not None:
            # ...use the integrator's clamp_state
            self.clamp_integration_state(state)

    def integrate_with_update(self, X, model, coupling, local_coupling,
                              stimulus):
        temp = model.update_state_variables_before_integration(
            X, coupling, local_coupling, stimulus)
        if temp is not None:
            X = temp
            self.bound_and_clamp(X)
        X = self.integrate(X, model, coupling, local_coupling, stimulus)
        temp = model.update_state_variables_after_integration(X)
        if temp is not None:
            X = temp
            self.bound_and_clamp(X)
        return X

    def integrate(self, X, model, coupling, local_coupling, stimulus):
        X[model.state_variable_mask] = self.scheme(
            X[model.state_variable_mask], model.dfun, coupling, local_coupling,
            stimulus)
        return X

    def __str__(self):
        return simple_gen_astr(self, 'dt')
Exemplo n.º 30
0
class FooDatatype(HasTraits):
    array_float = NArray()
    array_int = NArray(dtype=int, shape=(Dim.any, Dim.any))
    scalar_int = Attr(int)
    abaz = Attr(field_type=BazDataType)
    some_transient = NArray(shape=(Dim.any, Dim.any, Dim.any), required=False)