Esempio n. 1
0
class TimeSeriesData(MappedType):
    """
    Base time-series dataType.
    """

    title = basic.String

    data = arrays.FloatArray(
        label="Time-series data",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc=
        """An array of time-series data, with a shape of [tpts, :], where ':' represents 1 or more dimensions"""
    )

    nr_dimensions = basic.Integer(label="Number of dimension in timeseries",
                                  default=4)

    length_1d, length_2d, length_3d, length_4d = [basic.Integer] * 4

    labels_ordering = basic.List(
        default=["Time", "State Variable", "Space", "Mode"],
        label="Dimension Names",
        doc="""List of strings representing names of each data dimension""")

    labels_dimensions = basic.Dict(
        default={},
        label=
        "Specific labels for each dimension for the data stored in this timeseries.",
        doc=
        """ A dictionary containing mappings of the form {'dimension_name' : [labels for this dimension] }"""
    )
    ## TODO (for Stuart) : remove TimeLine and make sure the correct Period/start time is returned by different monitors in the simulator

    time = arrays.FloatArray(
        file_storage=core.FILE_STORAGE_EXPAND,
        label="Time-series time",
        required=False,
        doc=
        """An array of time values for the time-series, with a shape of [tpts,].
        This is 'time' as returned by the simulator's monitors.""")

    start_time = basic.Float(label="Start Time:")

    sample_period = basic.Float(label="Sample period", default=1.0)

    # Specify the measure unit for sample period (e.g sec, msec, usec, ...)
    sample_period_unit = basic.String(label="Sample Period Measure Unit",
                                      default="ms")

    sample_rate = basic.Float(label="Sample rate",
                              doc="""The sample rate of the timeseries""")

    has_surface_mapping = basic.Bool(default=True)
    has_volume_mapping = basic.Bool(default=False)
class SensorsEEGData(SensorsData):
    """
    EEG sensor locations are represented as unit vectors, these need to be
    combined with a head(outer-skin) surface to obtain actual sensor locations
    ::
        
                              position
                                 |
                                / \\
                               /   \\
        file columns: labels, x, y, z
        
    """
    _ui_name = "EEG Sensors"

    __tablename__ = None

    __mapper_args__ = {'polymorphic_identity': EEG_POLYMORPHIC_IDENTITY}

    sensors_type = basic.String(default=EEG_POLYMORPHIC_IDENTITY)

    has_orientation = basic.Bool(default=False, order=-1)

    def __init__(self, **kwargs):
        super(SensorsEEGData, self).__init__(**kwargs)
        self.default.reload(
            self.__class__,
            folder_path="sensors",
            file_name="EEG_unit_vectors_BrainProducts_62.txt.bz2")
Esempio n. 3
0
class SensorsMEGData(SensorsData):
    """
    These are actually just SQUIDS. Axial or planar gradiometers are achieved
    by calculating lead fields for two sets of sensors and then subtracting...
    ::
        
                              position  orientation
                                 |           |
                                / \         / \\
                               /   \       /   \\
        file columns: labels, x, y, z,   dx, dy, dz
        
    """
    _ui_name = "MEG sensors"

    __tablename__ = None

    __mapper_args__ = {'polymorphic_identity': MEG_POLYMORPHIC_IDENTITY}

    sensors_type = basic.String(default=MEG_POLYMORPHIC_IDENTITY)

    orientations = arrays.OrientationArray(
        label="Sensor orientations",
        doc="An array representing the orientation of the MEG SQUIDs")

    has_orientation = basic.Bool(default=True, order=-1)
Esempio n. 4
0
class SensorsData(MappedType):
    """
    Base Sensors class.
    All sensors have locations. 
    Some will have orientations, e.g. MEG.
    """

    _ui_name = "Unknown sensors"

    sensors_type = basic.String

    __mapper_args__ = {'polymorphic_on': 'sensors_type'}

    labels = arrays.StringArray(label="Sensor labels")

    locations = arrays.PositionArray(label="Sensor locations")

    has_orientation = basic.Bool(default=False)

    orientations = arrays.OrientationArray(required=False)

    number_of_sensors = basic.Integer(
        label="Number of sensors",
        doc="""The number of sensors described by these Sensors.""")

    # introduced to accommodate real sensors sets which have sensors
    # that should be zero during simulation i.e. ECG (heart), EOG,
    # reference gradiometers, etc.
    usable = arrays.BoolArray(
        required=False,
        label="Usable sensors",
        doc="The sensors in set which are used for signal data.")
class SensorsMEGData(SensorsData):
    """
    These are actually just SQUIDS. Axial or planar gradiometers are achieved
    by calculating lead fields for two sets of sensors and then subtracting...
    ::
        
                              position  orientation
                                 |           |
                                / \         / \\
                               /   \       /   \\
        file columns: labels, x, y, z,   dx, dy, dz
        
    """
    _ui_name = "MEG sensors"

    __tablename__ = None

    __mapper_args__ = {'polymorphic_identity': MEG_POLYMORPHIC_IDENTITY}

    sensors_type = basic.String(default=MEG_POLYMORPHIC_IDENTITY)

    orientations = arrays.OrientationArray(
        label="Sensor orientations",
        console_default=SensorsData.default.read_data(usecols=(4, 5, 6),
                                                      field="orientations",
                                                      lazy_load=True),
        doc="An array representing the orientation of the MEG SQUIDs")

    has_orientation = basic.Bool(default=True, order=-1)

    def __init__(self, **kwargs):
        super(SensorsMEGData, self).__init__(**kwargs)
        self.default.reload(self.__class__,
                            folder_path="sensors",
                            file_name="meg_channels_reg13.txt.bz2")
class SensorsData(MappedType):
    """
    Base Sensors class.
    All sensors have locations. 
    Some will have orientations, e.g. MEG.
    """

    _ui_name = "Unknown sensors"

    sensors_type = basic.String

    __mapper_args__ = {'polymorphic_on': 'sensors_type'}

    default = readers.File(
        folder_path="sensors",
        file_name='EEG_unit_vectors_BrainProducts_62.txt.bz2')

    labels = arrays.StringArray(label="Sensor labels",
                                console_default=default.read_data(
                                    usecols=(0, ),
                                    dtype="string",
                                    field="labels"))

    locations = arrays.PositionArray(label="Sensor locations",
                                     console_default=default.read_data(
                                         usecols=(1, 2, 3), field="locations"))

    has_orientation = basic.Bool(default=False)

    orientations = arrays.OrientationArray(required=False)

    number_of_sensors = basic.Integer(
        label="Number of sensors",
        doc="""The number of sensors described by these Sensors.""")
Esempio n. 7
0
class HyperbolicTangent(Coupling):
    """
    Hyperbolic tangent.

    """

    a = arrays.FloatArray(
        label = ":math:`a`", 
        default = numpy.array([0.0]),
        range = basic.Range(lo = -1000.0, hi = 1000.0, step = 10.0),
        doc = """Minimum of the sigmoid function""",
        order = 1)

    midpoint = arrays.FloatArray(
        label = "midpoint", 
        default = numpy.array([0.0,]),
        range = basic.Range(lo = -1000.0, hi = 1000.0, step = 10.0),
        doc = """Midpoint of the linear portion of the sigmoid""",
        order = 3)

    sigma = arrays.FloatArray(
        label = r":math:`\sigma`",
        default = numpy.array([1.0,]),
        range = basic.Range(lo = 0.01, hi = 1000.0, step = 10.0),
        doc = """Standard deviation of the ...""",
        order = 4)

    normalise = basic.Bool(
        label = "normalise by in-strength",
        default = True,
        doc = """Normalise the node coupling by the node's in-strenght""",
        order = 4)


    def __call__(self, g_ij, x_i, x_j):
        r"""
        Evaluate the Sigmoidal function for the arg ``x``. The equation being
        evaluated has the following form:

            .. math::
                        a * (1 + tanh((x - midpoint)/sigma))

        """
        temp =  self.a * (1 +  numpy.tanh((x_j - self.midpoint) / self.sigma))

        if self.normalise: # yeeeeahhh, let's make simulations slower ...
            #NOTE: normalising by the strength or degrees may yield NaNs, so fill these values with inf
            in_strength = g_ij.sum(axis=2)[:, :, numpy.newaxis, :]
            in_strength[in_strength==0] = numpy.inf
            temp *= (g_ij / in_strength) #region mode normalisation
            
            coupled_input = temp.mean(axis=0)
        else: 
            coupled_input = (g_ij*temp).mean(axis=0)
        
        return coupled_input
Esempio n. 8
0
class SensorsEEGData(SensorsData):
    """
    EEG sensor locations are represented as unit vectors, these need to be
    combined with a head(outer-skin) surface to obtain actual sensor locations
    ::
        
                              position
                                 |
                                / \\
                               /   \\
        file columns: labels, x, y, z
        
    """
    _ui_name = "EEG Sensors"

    __tablename__ = None

    __mapper_args__ = {'polymorphic_identity': EEG_POLYMORPHIC_IDENTITY}

    sensors_type = basic.String(default=EEG_POLYMORPHIC_IDENTITY)

    has_orientation = basic.Bool(default=False, order=-1)
Esempio n. 9
0
class SensorsMEG(Sensors):
    """
    These are actually just SQUIDS. Axial or planar gradiometers are achieved
    by calculating lead fields for two sets of sensors and then subtracting...
    ::

                              position  orientation
                                 |           |
                                / \         / \\
                               /   \       /   \\
        file columns: labels, x, y, z,   dx, dy, dz

    """
    _ui_name = "MEG sensors"

    __tablename__ = None

    __mapper_args__ = {'polymorphic_identity': MEG_POLYMORPHIC_IDENTITY}

    sensors_type = types_basic.String(default=MEG_POLYMORPHIC_IDENTITY)

    orientations = arrays.OrientationArray(
        label="Sensor orientations",
        doc="An array representing the orientation of the MEG SQUIDs")

    has_orientation = types_basic.Bool(default=True, order=-1)

    @classmethod
    def from_file(cls, source_file="meg_151.txt.bz2", instance=None):
        result = super(SensorsMEG, cls).from_file(source_file, instance)

        source_full_path = try_get_absolute_path("tvb_data.sensors",
                                                 source_file)
        reader = FileReader(source_full_path)
        result.orientations = reader.read_array(use_cols=(4, 5, 6))

        return result
Esempio n. 10
0
class SensorsData(MappedType):
    """
    Base Sensors class.
    All sensors have locations. 
    Some will have orientations, e.g. MEG.
    """

    _ui_name = "Unknown sensors"

    sensors_type = basic.String

    __mapper_args__ = {'polymorphic_on': 'sensors_type'}

    labels = arrays.StringArray(label="Sensor labels")

    locations = arrays.PositionArray(label="Sensor locations")

    has_orientation = basic.Bool(default=False)

    orientations = arrays.OrientationArray(required=False)

    number_of_sensors = basic.Integer(
        label="Number of sensors",
        doc="""The number of sensors described by these Sensors.""")
Esempio n. 11
0
class SurfaceData(MappedType):
    """
    This class primarily exists to bundle the structural Surface data into a 
    single object.
    """

    default = readers.File(folder_path="surfaces/cortex_reg13")

    vertices = arrays.PositionArray(
        label="Vertex positions",
        order=-1,
        console_default=default.read_data(file_name="vertices.txt.bz2",
                                          field="vertices"),
        doc="""An array specifying coordinates for the surface vertices.""")

    triangles = arrays.IndexArray(
        label="Triangles",
        order=-1,
        target=vertices,
        console_default=default.read_data(file_name="triangles.txt.bz2",
                                          dtype=numpy.int32,
                                          field="triangles"),
        doc=
        """Array of indices into the vertices, specifying the triangles which define the surface."""
    )

    vertex_normals = arrays.OrientationArray(
        label="Vertex normal vectors",
        order=-1,
        console_default=default.read_data(file_name="vertex_normals.txt.bz2",
                                          field="vertex_normals"),
        doc="""An array of unit normal vectors for the surfaces vertices.""")

    triangle_normals = arrays.OrientationArray(
        label="Triangle normal vectors",
        order=-1,
        doc="""An array of unit normal vectors for the surfaces triangles.""")

    geodesic_distance_matrix = SparseMatrix(
        label="Geodesic distance matrix",
        order=-1,
        required=False,
        file_storage=FILE_STORAGE_NONE,
        doc="""A sparse matrix of truncated geodesic distances""")  # 'CS'

    number_of_vertices = basic.Integer(
        label="Number of vertices",
        order=-1,
        doc="""The number of vertices making up this surface.""")

    number_of_triangles = basic.Integer(
        label="Number of triangles",
        order=-1,
        doc="""The number of triangles making up this surface.""")

    ##--------------------- FRAMEWORK ATTRIBUTES -----------------------------##

    hemisphere_mask = arrays.BoolArray(
        label="An array specifying if a vertex belongs to the right hemisphere",
        file_storage=FILE_STORAGE_NONE,
        required=False,
        order=-1)

    zero_based_triangles = basic.Bool(order=-1)

    split_triangles = arrays.IndexArray(order=-1, required=False)

    number_of_split_slices = basic.Integer(order=-1)

    split_slices = basic.Dict(order=-1)

    bi_hemispheric = basic.Bool(order=-1)

    surface_type = basic.String

    __mapper_args__ = {'polymorphic_on': 'surface_type'}
Esempio n. 12
0
class BalloonModel(core.Type):
    """

    A class for calculating the simulated BOLD signal given a TimeSeries
    object of TVB and returning another TimeSeries object.

    The haemodynamic model parameters based on constants for a 1.5 T scanner.
        
    """

    #NOTE: a potential problem when the input is a TimeSeriesSurface.
    #TODO: add an spatial averaging for TimeSeriesSurface.

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries that represents the input neural activity""",
        order=1)
    # it also sets the bold sampling period.
    dt = basic.Float(
        label=":math:`dt`",
        default=0.002,
        required=True,
        doc="""The integration time step size for the balloon model (s).
        If none is provided, by default, the TimeSeries sample period is used.""",
        order=2)

    integrator = integrators_module.Integrator(
        label="Integration scheme",
        default=integrators_module.HeunDeterministic,
        required=True,
        order=-1,
        doc=""" A tvb.simulator.Integrator object which is
        an integration scheme with supporting attributes such as 
        integration step size and noise specification for stochastic 
        methods. It is used to compute the time courses of the balloon model state 
        variables.""")

    bold_model = basic.Enumerate(
        label="Select BOLD model equations",
        options=["linear", "nonlinear"],
        default=["nonlinear"],
        select_multiple=False,
        doc="""Select the set of equations for the BOLD model.""",
        order=4)

    RBM = basic.Bool(
        label="Revised BOLD Model",
        default=True,
        required=True,
        doc="""Select classical vs revised BOLD model (CBM or RBM).
        Coefficients  k1, k2 and k3 will be derived accordingly.""",
        order=5)

    neural_input_transformation = basic.Enumerate(
        label="Neural input transformation",
        options=["none", "abs_diff", "sum"],
        default=["none"],
        select_multiple=False,
        doc=
        """ This represents the operation to perform on the state-variable(s) of
        the model used to generate the input TimeSeries. ``none`` takes the
        first state-variable as neural input; `` abs_diff`` is the absolute
        value of the derivative (first order difference) of the first state variable; 
        ``sum``: sum all the state-variables of the input TimeSeries.""",
        order=3)

    tau_s = basic.Float(
        label=r":math:`\tau_s`",
        default=0.65,
        required=True,
        doc="""Balloon model parameter. Time of signal decay (s)""",
        order=-1)

    tau_f = basic.Float(
        label=r":math:`\tau_f`",
        default=0.41,
        required=True,
        doc=""" Balloon model parameter. Time of flow-dependent elimination or
        feedback regulation (s). The average  time blood take to traverse the
        venous compartment. It is the  ratio of resting blood volume (V0) to
        resting blood flow (F0).""",
        order=-1)

    tau_o = basic.Float(label=r":math:`\tau_o`",
                        default=0.98,
                        required=True,
                        doc="""
        Balloon model parameter. Haemodynamic transit time (s). The average
        time blood take to traverse the venous compartment. It is the  ratio
        of resting blood volume (V0) to resting blood flow (F0).""",
                        order=-1)

    alpha = basic.Float(
        label=r":math:`\tau_f`",
        default=0.32,
        required=True,
        doc=
        """Balloon model parameter. Stiffness parameter. Grubb's exponent.""",
        order=-1)

    TE = basic.Float(label=":math:`TE`",
                     default=0.04,
                     required=True,
                     doc="""BOLD parameter. Echo Time""",
                     order=-1)

    V0 = basic.Float(label=":math:`V_0`",
                     default=4.0,
                     required=True,
                     doc="""BOLD parameter. Resting blood volume fraction.""",
                     order=-1)

    E0 = basic.Float(
        label=":math:`E_0`",
        default=0.4,
        required=True,
        doc="""BOLD parameter. Resting oxygen extraction fraction.""",
        order=-1)

    epsilon = arrays.FloatArray(
        label=":math:`\epsilon`",
        default=numpy.array([0.5]),
        range=basic.Range(lo=0.5, hi=2.0, step=0.25),
        required=True,
        doc=
        """ BOLD parameter. Ratio of intra- and extravascular signals. In principle  this
        parameter could be derived from empirical data and spatialized.""",
        order=-1)

    nu_0 = basic.Float(
        label=r":math:`\nu_0`",
        default=40.3,
        required=True,
        doc=
        """BOLD parameter. Frequency offset at the outer surface of magnetized vessels (Hz).""",
        order=-1)

    r_0 = basic.Float(
        label=":math:`r_0`",
        default=25.,
        required=True,
        doc=
        """ BOLD parameter. Slope r0 of intravascular relaxation rate (Hz). Only used for
        ``revised`` coefficients. """,
        order=-1)

    def evaluate(self):
        """
        Calculate simulated BOLD signal
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        #NOTE: Just using the first state variable, although in the Bold monitor
        #      input is the sum over the state-variables. Only time-series
        #      from basic monitors should be used as inputs.

        neural_activity, t_int = self.input_transformation(
            self.time_series, self.neural_input_transformation)
        input_shape = neural_activity.shape
        result_shape = self.result_shape(input_shape)
        LOG.debug("Result shape will be: %s" % str(result_shape))

        if self.dt is None:
            self.dt = self.time_series.sample_period / 1000.  # (s) integration time step
            msg = "Integration time step size for the balloon model is %s seconds" % str(
                self.dt)
            LOG.debug(msg)

        #NOTE: Avoid upsampling ...
        if self.dt < (self.time_series.sample_period / 1000.):
            msg = "Integration time step shouldn't be smaller than the sampling period of the input signal."
            LOG.error(msg)

        balloon_nvar = 4

        #NOTE: hard coded initial conditions
        state = numpy.zeros((input_shape[0], balloon_nvar, input_shape[2],
                             input_shape[3]))  # s
        state[0, 1, :] = 1.  # f
        state[0, 2, :] = 1.  # v
        state[0, 3, :] = 1.  # q

        # BOLD model coefficients
        k = self.compute_derived_parameters()
        k1, k2, k3 = k[0], k[1], k[2]

        # prepare integrator
        self.integrator.dt = self.dt
        self.integrator.configure()
        LOG.debug("Integration time step size will be: %s seconds" %
                  str(self.integrator.dt))

        scheme = self.integrator.scheme

        # NOTE: the following variables are not used in this integration but
        # required due to the way integrators scheme has been defined.

        local_coupling = 0.0
        stimulus = 0.0

        # Do some checks:
        if numpy.isnan(neural_activity).any():
            LOG.warning("NaNs detected in the neural activity!!")

        # normalise the time-series.
        neural_activity = neural_activity - neural_activity.mean(
            axis=0)[numpy.newaxis, :]

        # solve equations
        for step in range(1, t_int.shape[0]):
            state[step, :] = scheme(state[step - 1, :], self.balloon_dfun,
                                    neural_activity[step, :], local_coupling,
                                    stimulus)
            if numpy.isnan(state[step, :]).any():
                LOG.warning("NaNs detected...")

        # NOTE: just for the sake of clarity, define the variables used in the BOLD model
        s = state[:, 0, :]
        f = state[:, 1, :]
        v = state[:, 2, :]
        q = state[:, 3, :]

        #import pdb; pdb.set_trace()

        # BOLD models
        if self.bold_model == "nonlinear":
            """
            Non-linear BOLD model equations.
            Page 391. Eq. (13) top in [Stephan2007]_
            """
            y_bold = numpy.array(self.V0 * (k1 * (1. - q) + k2 *
                                            (1. - q / v) + k3 * (1. - v)))
            y_b = y_bold[:, numpy.newaxis, :, :]
            LOG.debug("Max value: %s" % str(y_b.max()))

        else:
            """
            Linear BOLD model equations.
            Page 391. Eq. (13) bottom in [Stephan2007]_ 
            """
            y_bold = numpy.array(self.V0 * ((k1 + k2) * (1. - q) + (k3 - k2) *
                                            (1. - v)))
            y_b = y_bold[:, numpy.newaxis, :, :]

        sample_period = 1. / self.dt

        bold_signal = time_series.TimeSeriesRegion(data=y_b,
                                                   time=t_int,
                                                   sample_period=sample_period,
                                                   sample_period_unit='s',
                                                   use_storage=False)

        return bold_signal

    def compute_derived_parameters(self):
        """
        Compute derived parameters :math:`k_1`, :math:`k_2` and :math:`k_3`.
        """

        if not self.RBM:
            """
            Classical BOLD Model Coefficients [Obata2004]_
            Page 389 in [Stephan2007]_, Eq. (3)
            """
            k1 = 7. * self.E0
            k2 = 2. * self.E0
            k3 = 1. - self.epsilon
        else:
            """
            Revised BOLD Model Coefficients.
            Generalized BOLD signal model.
            Page 400 in [Stephan2007]_, Eq. (12)
            """
            k1 = 4.3 * self.nu_0 * self.E0 * self.TE
            k2 = self.epsilon * self.r_0 * self.E0 * self.TE
            k3 = 1 - self.epsilon

        return numpy.array([k1, k2, k3])

    def input_transformation(self, time_series, mode):
        """
        Perform an operation on the input time-series.
        """

        LOG.debug("Computing: %s on the input time series" % str(mode))

        if mode == "none":
            ts = time_series.data[:, 0, :, :]
            ts = ts[:, numpy.newaxis, :, :]
            t_int = time_series.time / 1000.  # (s)

        elif mode == "abs_diff":
            ts = abs(numpy.diff(time_series.data, axis=0))
            t_int = (time_series.time[1:] -
                     time_series.time[0:-1]) / 1000.  # (s)

        elif mode == "sum":
            ts = numpy.sum(time_series.data, axis=1)
            ts = ts[:, numpy.newaxis, :, :]
            t_int = time_series.time / 1000.  # (s)

        else:
            LOG.error("Bad operation/transformation mode, must be one of:")
            LOG.error("('abs_diff', 'sum', 'none')")
            raise Exception("Bad transformation mode")

        return ts, t_int

    def balloon_dfun(self, state_variables, neural_input, local_coupling=0.0):
        r"""
        The Balloon model equations. See Eqs. (4-10) in [Stephan2007]_
        .. math::
                \frac{ds}{dt} &= x - \kappa\,s - \gamma \,(f-1) \\
                \frac{df}{dt} &= s \\
                \frac{dv}{dt} &= \frac{1}{\tau_o} \, (f - v^{1/\alpha})\\
                \frac{dq}{dt} &= \frac{1}{\tau_o}(f \, \frac{1-(1-E_0)^{1/\alpha}}{E_0} - v^{&/\alpha} \frac{q}{v})\\
                \kappa &= \frac{1}{\tau_s}\\
                \gamma &= \frac{1}{\tau_f}
        """

        s = state_variables[0, :]
        f = state_variables[1, :]
        v = state_variables[2, :]
        q = state_variables[3, :]

        x = neural_input[0, :]

        ds = x - (1. / self.tau_s) * s - (1. / self.tau_f) * (f - 1)
        df = s
        dv = (1. / self.tau_o) * (f - v**(1. / self.alpha))
        dq = (1. / self.tau_o) * ((f * (1. -
                                        (1. - self.E0)**(1. / f)) / self.E0) -
                                  (v**(1. / self.alpha)) * (q / v))

        return numpy.array([ds, df, dv, dq])

    def result_shape(self, input_shape):
        """Returns the shape of the main result of fmri balloon ..."""
        result_shape = (input_shape[0], input_shape[1], input_shape[2],
                        input_shape[3])
        return result_shape

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result of .
        """
        result_size = numpy.sum(map(
            numpy.prod, self.result_shape(input_shape))) * 8.0  # Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the ....
        That is, it includes storage of the evaluated ... attributes
        such as ..., etc.
        """
        extend_size = self.result_size(
            input_shape)  # Currently no derived attributes.
        return extend_size
Esempio n. 13
0
class FFT(core.Type):
    """
    A class for calculating the FFT of a TimeSeries object of TVB and returning
    a FourierSpectrum object. A segment length and windowing function can be
    optionally specified. By default the time series is segmented into 1 second
    blocks and no windowing function is applied.
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The TimeSeries to which the FFT is to be applied.""",
        order=1)

    segment_length = basic.Float(
        label="Segment(window) length (ms)",
        default=1000.0,
        required=False,
        doc="""The TimeSeries can be segmented into equally sized blocks
            (overlapping if necessary). The segment length determines the
            frequency resolution of the resulting power spectra -- longer
            windows produce finer frequency resolution.""",
        order=2)

    window_function = basic.Enumerate(
        label="Windowing function",
        options=SUPPORTED_WINDOWING_FUNCTIONS,
        default=None,
        required=False,
        select_multiple=False,
        doc="""Windowing functions can be applied before the FFT is performed.
             Default is None, possibilities are: 'hamming'; 'bartlett';
            'blackman'; and 'hanning'. See, numpy.<function_name>.""",
        order=3)

    detrend = basic.Bool(label="Detrending",
                         default=True,
                         required=False,
                         doc="""Detrending is not always appropriate.
            Default is True, False means no detrending is performed on the time series""",
                         order=4)

    def evaluate(self):
        """
        Calculate the FFT of time_series broken into segments of length
        segment_length and filtered by window_function.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        tpts = self.time_series.data.shape[0]
        time_series_length = tpts * self.time_series.sample_period

        #Segment time-series, overlapping if necessary
        nseg = int(numpy.ceil(time_series_length / self.segment_length))
        if nseg > 1:
            seg_tpts = numpy.ceil(self.segment_length /
                                  self.time_series.sample_period)
            overlap = (seg_tpts * nseg - tpts) / (nseg - 1.0)
            starts = [
                max(seg * (seg_tpts - overlap), 0) for seg in range(nseg)
            ]
            segments = [
                self.time_series.data[int(start):int(start) + int(seg_tpts)]
                for start in starts
            ]
            segments = [
                segment[:, :, :, :, numpy.newaxis] for segment in segments
            ]
            time_series = numpy.concatenate(segments, axis=4)
        else:
            self.segment_length = time_series_length
            time_series = self.time_series.data[:, :, :, :, numpy.newaxis]
            seg_tpts = time_series.shape[0]

        LOG.debug("Segment length being used is: %s" % self.segment_length)

        #Base-line correct the segmented time-series
        if self.detrend:
            time_series = sp_signal.detrend(time_series, axis=0)
            util.log_debug_array(LOG, time_series, "time_series")

        #Apply windowing function
        #Enumerate basic type wraps single values into a list
        if self.window_function != [None]:
            window_function = SUPPORTED_WINDOWING_FUNCTIONS[
                self.window_function[0]]
            window_mask = numpy.reshape(window_function(int(seg_tpts)),
                                        (int(seg_tpts), 1, 1, 1, 1))
            time_series = time_series * window_mask

        #Calculate the FFT
        result = numpy.fft.fft(time_series, axis=0)
        nfreq = result.shape[0] / 2
        result = result[1:nfreq + 1, :]
        util.log_debug_array(LOG, result, "result")

        spectra = spectral.FourierSpectrum(source=self.time_series,
                                           segment_length=self.segment_length,
                                           array_data=result,
                                           use_storage=False)

        return spectra

    def result_shape(self, input_shape, segment_length, sample_period):
        """Returns the shape of the main result (complex array) of the FFT."""
        freq_len = (segment_length / sample_period) / 2.0
        freq_len = int(min((input_shape[0], freq_len)))
        nseg = max(
            (1,
             int(numpy.ceil(input_shape[0] * sample_period / segment_length))))
        result_shape = (freq_len, input_shape[1], input_shape[2],
                        input_shape[3], nseg)
        return result_shape

    def result_size(self, input_shape, segment_length, sample_period):
        """
        Returns the storage size in Bytes of the main result (complex array) of 
        the FFT.
        """
        result_size = numpy.prod(
            self.result_shape(input_shape, segment_length,
                              sample_period)) * 2.0 * 8.0  # complex*Bytes
        return result_size

    def extended_result_size(self, input_shape, segment_length, sample_period):
        """
        Returns the storage size in Bytes of the extended result of the FFT. 
        That is, it includes storage of the evaluated FourierSpectrum attributes
        such as power, phase, amplitude, etc.
        """
        result_shape = self.result_shape(input_shape, segment_length,
                                         sample_period)
        result_size = self.result_size(input_shape, segment_length,
                                       sample_period)
        extend_size = result_size  # Main array
        extend_size += 0.5 * result_size  # Amplitude
        extend_size += 0.5 * result_size  # Phase
        extend_size += 0.5 * result_size  # Power
        extend_size += 0.5 * result_size / result_shape[4]  # Average power
        extend_size += 0.5 * result_size / result_shape[
            4]  # Normalised Average power
        extend_size += result_shape[0] * 8.0  # Frequency
        return extend_size
Esempio n. 14
0
class TimeSeries(types_mapped.MappedType):
    """
    Base time-series dataType.
    """

    title = basic.String

    data = arrays.FloatArray(
        label="Time-series data",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc="""An array of time-series data, with a shape of [tpts, :], where ':' represents 1 or more dimensions""")

    nr_dimensions = basic.Integer(
        label="Number of dimension in timeseries",
        default=4)

    length_1d, length_2d, length_3d, length_4d = [basic.Integer] * 4

    labels_ordering = basic.List(
        default=["Time", "State Variable", "Space", "Mode"],
        label="Dimension Names",
        doc="""List of strings representing names of each data dimension""")

    labels_dimensions = basic.Dict(
        default={},
        label="Specific labels for each dimension for the data stored in this timeseries.",
        doc=""" A dictionary containing mappings of the form {'dimension_name' : [labels for this dimension] }""")

    time = arrays.FloatArray(
        file_storage=core.FILE_STORAGE_EXPAND,
        label="Time-series time",
        required=False,
        doc="""An array of time values for the time-series, with a shape of [tpts,].
        This is 'time' as returned by the simulator's monitors.""")

    start_time = basic.Float(label="Start Time:")

    sample_period = basic.Float(label="Sample period", default=1.0)

    # Specify the measure unit for sample period (e.g sec, msec, usec, ...)
    sample_period_unit = basic.String(
        label="Sample Period Measure Unit",
        default="ms")

    sample_rate = basic.Float(
        label="Sample rate",
        doc="""The sample rate of the timeseries""")

    has_surface_mapping = basic.Bool(default=True)
    has_volume_mapping = basic.Bool(default=False)

    def configure(self):
        """
        After populating few fields, compute the rest of the fields
        """
        super(TimeSeries, self).configure()
        data_shape = self.read_data_shape()
        self.nr_dimensions = len(data_shape)
        self.sample_rate = 1.0 / self.sample_period

        for i in range(min(self.nr_dimensions, 4)):
            setattr(self, 'length_%dd' % (i + 1), int(data_shape[i]))

    def read_data_shape(self):
        """
        Expose shape read on field data.
        """
        try:
            return self.get_data_shape('data')
        except exceptions.TVBException:
            self.logger.exception("Could not read data shape for TS!")
            raise exceptions.TVBException("Invalid empty TimeSeries!")

    def read_data_slice(self, data_slice):
        """
        Expose chunked-data access.
        """
        return self.get_data('data', data_slice)

    def read_time_page(self, current_page, page_size, max_size=None):
        """
        Compute time for current page.
        :param current_page: Starting from 0
        """
        current_page = int(current_page)
        page_size = int(page_size)

        if max_size is None:
            max_size = page_size
        else:
            max_size = int(max_size)

        page_real_size = page_size * self.sample_period
        start_time = self.start_time + current_page * page_real_size
        end_time = start_time + min(page_real_size, max_size * self.sample_period)

        return numpy.arange(start_time, end_time, self.sample_period)

    def read_channels_page(self, from_idx, to_idx, step=None, specific_slices=None, channels_list=None):
        """
        Read and return only the data page for the specified channels list.

        :param from_idx: the starting time idx from which to read data
        :param to_idx: the end time idx up until to which you read data
        :param step: increments in which to read the data. Optional, default to 1.
        :param specific_slices: optional parameter. If speficied slices the data accordingly.
        :param channels_list: the list of channels for which we want data
        """
        if channels_list:
            channels_list = json.loads(channels_list)
            for i in range(len(channels_list)):
                channels_list[i] = int(channels_list[i])

        if channels_list:
            channel_slice = tuple(channels_list)
        else:
            channel_slice = slice(None)

        data_page = self.read_data_page(from_idx, to_idx, step, specific_slices)
        # This is just a 1D array like in the case of Global Average monitor.
        # No need for the channels list
        if len(data_page.shape) == 1:
            return data_page.reshape(data_page.shape[0], 1)
        else:
            return data_page[:, channel_slice]

    def read_data_page(self, from_idx, to_idx, step=None, specific_slices=None):
        """
        Retrieve one page of data (paging done based on time).
        """
        from_idx, to_idx = int(from_idx), int(to_idx)

        if isinstance(specific_slices, basestring):
            specific_slices = json.loads(specific_slices)
        if step is None:
            step = 1
        else:
            step = int(step)

        slices = []
        overall_shape = self.read_data_shape()
        for i in range(len(overall_shape)):
            if i == 0:
                # Time slice
                slices.append(
                    slice(from_idx, min(to_idx, overall_shape[0]), step))
                continue
            if i == 2:
                # Read full of the main_dimension (space for the simulator)
                slices.append(slice(overall_shape[i]))
                continue
            if specific_slices is None:
                slices.append(slice(0, 1))
            else:
                slices.append(slice(specific_slices[i], min(specific_slices[i] + 1, overall_shape[i]), 1))

        data = self.read_data_slice(tuple(slices))
        if len(data) == 1:
            # Do not allow time dimension to get squeezed, a 2D result need to
            # come out of this method.
            data = data.squeeze()
            data = data.reshape((1, len(data)))
        else:
            data = data.squeeze()

        return data

    def read_data_page_split(self, from_idx, to_idx, step=None, specific_slices=None):
        """
        No Split needed in case of basic TS (sensors and region level)
        """
        return self.read_data_page(from_idx, to_idx, step, specific_slices)


    def write_time_slice(self, partial_result):
        """
        Append a new value to the ``time`` attribute.
        """
        self.store_data_chunk("time", partial_result, grow_dimension=0, close_file=False)

    def write_data_slice(self, partial_result, grow_dimension=0):
        """
        Append a chunk of time-series data to the ``data`` attribute.
        """
        self.store_data_chunk("data", partial_result, grow_dimension=grow_dimension, close_file=False)

    def get_min_max_values(self):
        """
        Retrieve the minimum and maximum values from the metadata.
        :returns: (minimum_value, maximum_value)
        """
        metadata = self.get_metadata('data')
        return metadata[self.METADATA_ARRAY_MIN], metadata[self.METADATA_ARRAY_MAX]

    def get_space_labels(self):
        """
        It assumes that we want to select in the 3'rd dimension,
        and generates labels for each point in that dimension.
        Subclasses are more specific.
        :return: An array of strings.
        """
        if self.nr_dimensions > 2:
            return ['signal-%d' % i for i in range(self._length_3d)]
        else:
            return []

    def get_grouped_space_labels(self):
        """
        :return: A list of label groups. A label group is a tuple (name, [(label_idx, label)...]).
                 Default all labels in a group named ''
        """
        return [('', list(enumerate(self.get_space_labels())))]

    def get_default_selection(self):
        """
        :return: The measure point indices that have to be shown by default. By default show all.
        """
        return range(len(self.get_space_labels()))

    def get_measure_points_selection_gid(self):
        """
        :return: a datatype gid with which to obtain al valid measure point selection for this time series
                 We have to decide if the default should be all selections or none
        """
        return ''

    @staticmethod
    def accepted_filters():
        filters = types_mapped.MappedType.accepted_filters()
        filters.update({'datatype_class._nr_dimensions': {'type': 'int', 'display': 'No of Dimensions',
                                                          'operations': ['==', '<', '>']},
                        'datatype_class._sample_period': {'type': 'float', 'display': 'Sample Period',
                                                          'operations': ['==', '<', '>']},
                        'datatype_class._sample_rate': {'type': 'float', 'display': 'Sample Rate',
                                                        'operations': ['==', '<', '>']},
                        'datatype_class._title': {'type': 'string', 'display': 'Title',
                                                  'operations': ['==', '!=', 'like']}})
        return filters

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {"Time-series type": self.__class__.__name__,
                   "Time-series name": self.title,
                   "Dimensions": self.labels_ordering,
                   "Time units": self.sample_period_unit,
                   "Sample period": self.sample_period,
                   "Length": self.sample_period * self.get_data_shape('data')[0]}
        summary.update(self.get_info_about_array('data'))
        return summary
Esempio n. 15
0
class PreSigmoidal(Coupling):
    r"""
    Provides a pre-summation sigmoidal coupling function with a static or dynamic
    and local or global threshold.

    .. math::
        H * (Q + \tanh(G * (P*x - \theta)))

    The dynamic threshold as state variable given by the second state variable.
    With the coupling term, returns the direct node output for the dynamic threshold.

    """

    H = arrays.FloatArray(
        label="H",
        default=numpy.array([0.5,]),
        range=basic.Range(lo=-100.0, hi=100.0, step=1.0),
        doc="Global Factor.",
        order=1)

    Q = arrays.FloatArray(
        label="Q",
        default=numpy.array([1.,]),
        range=basic.Range(lo=-100.0, hi=100.0, step=1.0),
        doc="Average.",
        order=2)

    G = arrays.FloatArray(
        label="G",
        default=numpy.array([60.,]),
        range=basic.Range(lo=-1000.0, hi=1000.0, step=1.),
        doc="Gain.",
        order=3)

    P = arrays.FloatArray(
        label="P",
        default=numpy.array([1.,]),
        range=basic.Range(lo=-100.0, hi=100.0, step=0.01),
        doc="Excitation-Inhibition ratio.",
        order=4)

    theta = arrays.FloatArray(
        label=":math:`\\theta`",
        default=numpy.array([0.5,]),
        range=basic.Range(lo=-100.0, hi=100.0, step=0.01),
        doc="Threshold.",
        order=5)

    dynamic = basic.Bool(
        label="Dynamic",
        default=True,
        doc="Use dynamic threshold (otherwise static).",
        order=6)

    globalT = basic.Bool(
        label=":math:`global_{\\theta}`",
        default=False,
        doc="Use global threshold (otherwise local).",
        order=7)

    def __str__(self):
        return simple_gen_astr(self, 'H Q G P theta dynamic globalT')

    def configure(self):
        """Set the right indirect call."""
        super(PreSigmoidal, self).configure()
        self.sliceT = 0 if self.globalT else slice(None)

    # override __call__ directly simpler than pre/post form
    # TODO check use of arrays dims here
    def __call__(self, step, history, na=numpy.newaxis):
        g_ij = history.es_weights
        x_i, x_j = history.query(step)
        if self.dynamic:
            _ = (self.P * x_j[:,0] - x_j[:,1,self.sliceT])[:,na]
        else:
            _ = self.P * x_j - self.theta[self.sliceT,na]
        A_j = self.H * (self.Q + numpy.tanh(self.G * _))
        if self.dynamic:
            c_0 = (g_ij[:,0] * A_j[:,0]).sum(axis=0)
            c_1 = numpy.diag(A_j[:,0,:,0])[:, na]
            if self.globalT:
                c_1[:] = c_1.mean()
            return numpy.array([c_0, c_1])
        else: # static threshold
            return (g_ij.transpose((2, 1, 0, 3)) * A_j).sum(axis=0)
class NodeComplexCoherence(core.Type):
    """
    A class for calculating the FFT of a TimeSeries and returning
    a ComplexCoherenceSpectrum datatype.
   
  
    This algorithm is based on the matlab function data2cs_event.m written by Guido Nolte:
        .. [Freyer_2012] Freyer, F.; Reinacher, M.; Nolte, G.; Dinse, H. R. and
            Ritter, P. *Repetitive tactile stimulation changes resting-state
            functional connectivity-implications for treatment of sensorimotor decline*.
            Front Hum Neurosci, Bernstein Focus State Dependencies of Learning and
            Bernstein Center for Computational Neuroscience Berlin, Germany., 2012, 6, 144
    
    Input: 
    originally the input could be 2D (tpts x nodes/channels), and it was possible
    to give a 3D array (e.g., tpspt x nodes/cahnnels x trials) via the segment_length
    attribute. 
    Current TVB implementation can handle 4D or 2D TimeSeries datatypes. 
    Be warned: the 4D TimeSeries will be averaged and squeezed.
    
    Output: (main arrays)
    - the cross-spectrum
    - the complex coherence, from which the imaginary part can be extracted 
        
    By default the time series is segmented into 1 second `epoch` blocks and 0.5
    second 50% overlapping `segments` to which a Hanning function is applied. 
    
    """

    time_series = TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries for which the CrossCoherence and ComplexCoherence
        is to be computed.""")

    epoch_length = basic.Float(
        label="Epoch length [ms]",
        default=1000.0,
        order=-1,
        required=False,
        doc="""In general for lengthy EEG recordings (~30 min), the timeseries 
        are divided into equally sized segments (~ 20-40s). These contain the 
        event that is to be characterized by means of the cross coherence. 
        Additionally each epoch block will be further divided into segments to 
        which the FFT will be applied.""")

    segment_length = basic.Float(
        label="Segment length [ms]",
        default=500.0,
        order=-1,
        required=False,
        doc="""The timeseries can be segmented into equally sized blocks
            (overlapping if necessary). The segement length determines the
            frequency resolution of the resulting power spectra -- longer
            windows produce finer frequency resolution. """)

    segment_shift = basic.Float(
        label="Segment shift [ms]",
        default=250.0,
        required=False,
        order=-1,
        doc="""Time length by which neighboring segments are shifted. e.g.
                `segment shift` = `segment_length` / 2 means 50% overlapping 
                segments.""")

    window_function = basic.String(
        label="Windowing function",
        default='hanning',
        required=False,
        order=-1,
        doc="""Windowing functions can be applied before the FFT is performed.
             Default is `hanning`, possibilities are: 'hamming'; 'bartlett';
            'blackman'; and 'hanning'. See, numpy.<function_name>.""")

    average_segments = basic.Bool(
        label="Average across segments",
        default=True,
        required=False,
        order=-1,
        doc="""Flag. If `True`, compute the mean Cross Spectrum across 
                segments.""")

    subtract_epoch_average = basic.Bool(
        label="Subtract average across epochs",
        default=True,
        required=False,
        order=-1,
        doc="""Flag. If `True` and if the number of epochs is > 1, you can 
                optionally subtract the mean across epochs before computing the 
                complex coherence.""")

    zeropad = basic.Integer(
        label="Zeropadding",
        default=0,
        required=False,
        order=-1,
        doc="""Adds `n` zeros at the end of each segment and at the end 
        of window_function. It is not yet functional.""")

    detrend_ts = basic.Bool(
        label="Detrend time series",
        default=False,
        required=False,
        order=-1,
        doc="""Flag. If `True` removes linear trend along the time dimension 
                before applying FFT.""")

    max_freq = basic.Float(
        label="Maximum frequency",
        default=1024.0,
        order=-1,
        required=False,
        doc="""Maximum frequency points (e.g. 32., 64., 128.) represented in 
                the output. Default is segment_length / 2 + 1.""")

    npat = basic.Float(
        label="dummy variable",
        default=1.0,
        required=False,
        order=-1,
        doc="""This attribute appears to be related to an input projection 
            matrix... Which is not yet implemented""")

    def evaluate(self):
        """
        Calculate the FFT, Cross Coherence and Complex Coherence of time_series 
        broken into (possibly) epochs and segments of length `epoch_length` and 
        `segment_length` respectively, filtered by `window_function`.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)
        tpts = self.time_series.data.shape[0]
        time_series_length = tpts * self.time_series.sample_period

        if len(self.time_series.data.shape) > 2:
            time_series_data = numpy.squeeze(
                (self.time_series.data.mean(axis=-1)).mean(axis=1))

        #nchan = time_series_data.shape[1]

        #NOTE: if we get a projection matrix ... then ...
        #if self.npat > 1:
        #    data = data * proj
        #    nchan = self.npat

        #Divide time-series into epochs, no overlapping
        if self.epoch_length > 0.0:
            nepochs = int(numpy.floor(time_series_length / self.epoch_length))
            epoch_tpts = self.epoch_length / self.time_series.sample_period
            time_series_length = self.epoch_length
            tpts = epoch_tpts
        else:
            self.epoch_length = time_series_length
            nepochs = int(numpy.ceil(time_series_length / self.epoch_length))

        #Segment time-series, overlapping if necessary
        nseg = int(numpy.floor(time_series_length / self.segment_length))
        if nseg > 1:
            seg_tpts = self.segment_length / self.time_series.sample_period
            seg_shift_tpts = self.segment_shift / self.time_series.sample_period
            nseg = int(numpy.floor((tpts - seg_tpts) / seg_shift_tpts) + 1)
        else:
            self.segment_length = time_series_length
            seg_tpts = time_series_data.shape[0]

        # Frequency vectors
        freqs = numpy.fft.fftfreq(int(seg_tpts))
        nfreq = numpy.min(
            [self.max_freq,
             numpy.floor((seg_tpts + self.zeropad) / 2.0) + 1])
        freqs = freqs[0:nfreq, ] * (1.0 / self.time_series.sample_period)

        result_shape, av_result_shape = self.result_shape(
            self.time_series.data.shape, self.max_freq, self.epoch_length,
            self.segment_length, self.segment_shift,
            self.time_series.sample_period, self.zeropad,
            self.average_segments)

        cs = numpy.zeros(result_shape, dtype=numpy.complex128)
        av = numpy.matrix(numpy.zeros(av_result_shape, dtype=numpy.complex128))
        coh = numpy.zeros(result_shape, dtype=numpy.complex128)

        # NOTE: result for individual epochs are kept only if npat > 1. Skipping ...
        #if self.npat > 1:
        #    if not self.average_segments:
        #        cs = numpy.zeros((nchan, nchan, nfreq, nepochs, nseg), dtype=numpy.complex128)
        #        av = numpy.zeros((nchan, nfreq, nepochs, nseg), dtype=numpy.complex128)
        #    else:
        #        av = numpy.zeros((nchan, nfreq, nepochs), dtype=numpy.complex128)
        #        cs = numpy.zeros((nchan, nchan, nfreq, nepochs), dtype=numpy.complex128)

        #Apply windowing function
        if self.window_function is not None:
            if self.window_function not in SUPPORTED_WINDOWING_FUNCTIONS:
                LOG.error("Windowing function is: %s" % self.window_function)
                LOG.error("Must be in: %s" %
                          str(SUPPORTED_WINDOWING_FUNCTIONS))

            window_function = eval("".join(("numpy.", self.window_function)))
            win = window_function(seg_tpts)
            window_mask = (numpy.kron(
                numpy.ones((time_series_data.shape[1], 1)), win)).T

        nave = 0

        for j in numpy.arange(nepochs):
            data = time_series_data[j * epoch_tpts:(j + 1) * epoch_tpts, :]

            for i in numpy.arange(nseg):  #average over all segments;
                time_series = data[i * seg_shift_tpts:i * seg_shift_tpts +
                                   seg_tpts, :]

                if self.detrend_ts:
                    time_series = sp_signal.detrend(time_series, axis=0)

                datalocfft = numpy.fft.fft(time_series * window_mask, axis=0)
                datalocfft = numpy.matrix(datalocfft)

                for f in numpy.arange(nfreq):  #for all frequencies
                    if self.npat == 1:
                        if not self.average_segments:
                            cs[:, :, f, i] += numpy.conjugate(
                                              datalocfft[f, :].conj().T * \
                                              datalocfft[f, :])
                            av[:, f,
                               i] += numpy.conjugate(datalocfft[f, :].conj().T)
                        else:
                            cs[:, :, f] += numpy.conjugate(
                                           datalocfft[f,:].conj().T * \
                                           datalocfft[f, :])
                            av[:,
                               f] += numpy.conjugate(datalocfft[f, :].conj().T)
                    else:
                        if not self.average_segments:
                            cs[:, :, f, j, i] = numpy.conjugate(
                                                datalocfft[f, :].conj().T * \
                                                datalocfft[f, :])
                            av[:, f, j,
                               i] = numpy.conjugate(datalocfft[f, :].conj().T)
                        else:
                            cs[:, :, f, j] += numpy.conjugate(
                                           datalocfft[f,:].conj().T *\
                                           datalocfft[f,:])

                            av[:, f,
                               j] += numpy.conjugate(datalocfft[f, :].conj().T)
                del datalocfft

            nave += 1.0

        # End of FORs
        if not self.average_segments:
            cs = cs / nave
            av = av / nave
        else:
            nave = nave * nseg
            cs = cs / nave
            av = av / nave

        # Subtract average
        for f in numpy.arange(nfreq):
            if self.subtract_epoch_average:
                if self.npat == 1:
                    if not self.average_segments:
                        for i in numpy.arange(nseg):
                            cs[:, :, f,
                               i] = cs[:, :, f,
                                       i] - av[:, f, i] * av[:, f, i].conj().T
                    else:
                        cs[:, :,
                           f] = cs[:, :, f] - av[:, f] * av[:, f].conj().T
                else:
                    if not self.average_segments:
                        for i in numpy.arange(nseg):
                            for j in numpy.arange(nepochs):
                                cs[:, :, f, j,
                                   i] = cs[:, :, f, j,
                                           i] - av[:, f, j, i] * av[:, f, j,
                                                                    i].conj().T

                    else:
                        for j in numpy.arange(nepochs):
                            cs[:, :, f,
                               j] = cs[:, :, f,
                                       j] - av[:, f, j] * av[:, f, j].conj().T

        #Compute Complex Coherence
        ndim = len(cs.shape)
        if ndim == 3:
            for i in numpy.arange(cs.shape[2]):
                temp = numpy.matrix(cs[:, :, i])
                coh[:, :, i] = cs[:, :, i] / numpy.sqrt(
                    (temp.diagonal().conj().T) * temp.diagonal())

        elif ndim == 4:
            for i in numpy.arange(cs.shape[2]):
                for j in numpy.arange(cs.shape[3]):
                    temp = numpy.matrix(numpy.squeeze(cs[:, :, i, j]))
                    coh[:, :, i, j] = temp / numpy.sqrt(
                        (temp.diagonal().conj().T) * temp.diagonal().T)

        util.log_debug_array(LOG, cs, "result")
        spectra = spectral.ComplexCoherenceSpectrum(
            source=self.time_series,
            array_data=coh,
            cross_spectrum=cs,
            #                              frequency = freqs,
            epoch_length=self.epoch_length,
            segment_length=self.segment_length,
            windowing_function=self.window_function,
            #                             fft_points = seg_tpts,
            use_storage=False)
        return spectra

    @staticmethod
    def result_shape(input_shape, max_freq, epoch_length, segment_length,
                     segment_shift, sample_period, zeropad, average_segments):
        """
        Returns the shape of the main result and the average over epochs
        """
        # this is useless here unless the input could actually be a 2D timeseries
        nchan = numpy.where(
            len(input_shape) > 2, input_shape[2], input_shape[1])
        seg_tpts = segment_length / sample_period
        seg_shift_tpts = segment_shift / sample_period
        tpts = numpy.where(epoch_length > 0.0, epoch_length / sample_period,
                           input_shape[0])
        nfreq = numpy.min(
            [max_freq, numpy.floor((seg_tpts + zeropad) / 2.0) + 1])
        #nep   = int(numpy.floor(input_shape[0] / epoch_length))
        nseg = int(numpy.floor((tpts - seg_tpts) / seg_shift_tpts) + 1)

        if not average_segments:
            result_shape = (nchan, nchan, nfreq, nseg)
            av_result_shape = (nchan, nfreq, nseg)
        else:
            result_shape = (nchan, nchan, nfreq)
            av_result_shape = (nchan, nfreq)

        return [result_shape, av_result_shape]

    def result_size(self, input_shape, max_freq, epoch_length, segment_length,
                    segment_shift, sample_period, zeropad, average_segments):
        """
        Returns the storage size in Bytes of the main result (complex array) of 
        the ComplexCoherence
        """
        result_size = numpy.prod(
            self.result_shape(input_shape, max_freq, epoch_length,
                              segment_length, segment_shift, sample_period,
                              zeropad,
                              average_segments)[0]) * 2.0 * 8.0  #complex*Bytes
        return result_size

    def extended_result_size(self, input_shape, max_freq, epoch_length,
                             segment_length, segment_shift, sample_period,
                             zeropad, average_segments):
        """
        Returns the storage size in Bytes of the extended result of the ComplexCoherence. 
        That is, it includes storage of the evaluated ComplexCoherence attributes
        such as ...
        """
        result_shape = self.result_shape(input_shape, max_freq, epoch_length,
                                         segment_length, segment_shift,
                                         sample_period, zeropad,
                                         average_segments)[0]
        result_size = self.result_size(input_shape, max_freq, epoch_length,
                                       segment_length, segment_shift,
                                       sample_period, zeropad,
                                       average_segments)
        extend_size = result_size * 2.0  #Main arrays: cross spectrum and complex coherence
        extend_size = extend_size + result_shape[2] * 8.0  #Frequency
        extend_size = extend_size + 8.0  # Epoch length
        extend_size = extend_size + 8.0  # Segment length
        return extend_size
Esempio n. 17
0
class Sensors(MappedType):
    """
    Base Sensors class.
    All sensors have locations.
    Some will have orientations, e.g. MEG.
    """

    _ui_name = "Unknown sensors"

    sensors_type = types_basic.String

    __mapper_args__ = {'polymorphic_on': 'sensors_type'}

    labels = arrays.StringArray(label="Sensor labels")

    locations = arrays.PositionArray(label="Sensor locations")

    has_orientation = types_basic.Bool(default=False)

    orientations = arrays.OrientationArray(required=False)

    number_of_sensors = types_basic.Integer(
        label="Number of sensors",
        doc="""The number of sensors described by these Sensors.""")

    # introduced to accommodate real sensors sets which have sensors
    # that should be zero during simulation i.e. ECG (heart), EOG,
    # reference gradiometers, etc.
    usable = arrays.BoolArray(
        required=False,
        label="Usable sensors",
        doc="The sensors in set which are used for signal data.")

    @classmethod
    def from_file(cls, source_file="eeg_brainstorm_65.txt", instance=None):

        if instance is None:
            result = cls()
        else:
            result = instance

        source_full_path = try_get_absolute_path("tvb_data.sensors",
                                                 source_file)
        reader = FileReader(source_full_path)

        result.labels = reader.read_array(dtype="string", use_cols=(0, ))
        result.locations = reader.read_array(use_cols=(1, 2, 3))

        return result

    def configure(self):
        """
        Invoke the compute methods for computable attributes that haven't been
        set during initialization.
        """
        super(Sensors, self).configure()
        self.number_of_sensors = self.labels.shape[0]

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance
        of this datatype.
        """
        summary = {
            "Sensor type": self.sensors_type,
            "Number of Sensors": self.number_of_sensors
        }
        return summary

    def sensors_to_surface(self, surface_to_map):
        """
        Map EEG sensors onto the head surface (skin-air).

        EEG sensor locations are typically only given on a unit sphere, that is,
        they are effectively only identified by their orientation with respect
        to a coordinate system. This method is used to map these unit vector
        sensor "locations" to a specific location on the surface of the skin.

        Assumes coordinate systems are aligned, i.e. common x,y,z and origin.

        """
        # Normalize sensor and vertex locations to unit vectors
        norm_sensors = numpy.sqrt(numpy.sum(self.locations**2, axis=1))
        unit_sensors = self.locations / norm_sensors[:, numpy.newaxis]
        norm_verts = numpy.sqrt(numpy.sum(surface_to_map.vertices**2, axis=1))
        unit_vertices = surface_to_map.vertices / norm_verts[:, numpy.newaxis]

        sensor_locations = numpy.zeros((self.number_of_sensors, 3))
        for k in xrange(self.number_of_sensors):
            # Find the surface vertex most closely aligned with current sensor.
            current_sensor = unit_sensors[k]
            alignment = numpy.dot(current_sensor, unit_vertices.T)
            one_ring = []

            while not one_ring:
                closest_vertex = alignment.argmax()
                # Get the set of triangles in the neighbourhood of that vertex.
                # NOTE: Intersection doesn't always fall within the 1-ring, so, all
                #      triangles contained in the 2-ring are considered.
                one_ring = surface_to_map.vertex_neighbours[closest_vertex]
                if not one_ring:
                    alignment[closest_vertex] = min(alignment)

            local_tri = [surface_to_map.vertex_triangles[v] for v in one_ring]
            local_tri = list(set([tri for subar in local_tri
                                  for tri in subar]))

            # Calculate a parametrized plane line intersection [t,u,v] for the
            # set of local triangles, which are considered as defining a plane.
            tuv = numpy.zeros((len(local_tri), 3))
            for i, tri in enumerate(local_tri):
                edge_01 = (
                    surface_to_map.vertices[surface_to_map.triangles[tri, 0]] -
                    surface_to_map.vertices[surface_to_map.triangles[tri, 1]])
                edge_02 = (
                    surface_to_map.vertices[surface_to_map.triangles[tri, 0]] -
                    surface_to_map.vertices[surface_to_map.triangles[tri, 2]])
                see_mat = numpy.vstack((current_sensor, edge_01, edge_02))

                tuv[i] = numpy.linalg.solve(
                    see_mat.T,
                    surface_to_map.vertices[surface_to_map.triangles[tri,
                                                                     0].T])

            # Find  which line-plane intersection falls within its triangle
            # by imposing the condition that u, v, & u+v are contained in [0 1]
            local_triangle_index = ((0 <= tuv[:, 1]) * (tuv[:, 1] < 1) *
                                    (0 <= tuv[:, 2]) * (tuv[:, 2] < 1) *
                                    (0 <= (tuv[:, 1] + tuv[:, 2])) *
                                    ((tuv[:, 1] + tuv[:, 2]) < 2)).nonzero()[0]

            if len(local_triangle_index) == 1:
                # Scale sensor unit vector by t so that it lies on the surface.
                sensor_locations[k] = current_sensor * tuv[
                    local_triangle_index[0], 0]

            elif len(local_triangle_index) < 1:
                # No triangle was found in proximity. Draw the sensor somehow in the surface extension area
                LOG.warning(
                    "Could not find a proper position on the given surface for sensor %d:%s. "
                    "with direction %s" %
                    (k, self.labels[k], str(self.locations[k])))
                distances = (abs(tuv[:, 1] + tuv[:, 2]))
                local_triangle_index = distances.argmin()
                # Scale sensor unit vector by t so that it lies on the surface.
                sensor_locations[k] = current_sensor * tuv[
                    local_triangle_index, 0]

            else:
                # More than one triangle was found in proximity. Pick the first.
                # Scale sensor unit vector by t so that it lies on the surface.
                sensor_locations[k] = current_sensor * tuv[
                    local_triangle_index[0], 0]

        return sensor_locations