Пример #1
0
class SensorsData(MappedType):
    """
    Base Sensors class.
    All sensors have locations. 
    Some will have orientations, e.g. MEG.
    """

    _ui_name = "Unknown sensors"

    sensors_type = basic.String

    __mapper_args__ = {'polymorphic_on': 'sensors_type'}

    labels = arrays.StringArray(label="Sensor labels")

    locations = arrays.PositionArray(label="Sensor locations")

    has_orientation = basic.Bool(default=False)

    orientations = arrays.OrientationArray(required=False)

    number_of_sensors = basic.Integer(
        label="Number of sensors",
        doc="""The number of sensors described by these Sensors.""")

    # introduced to accommodate real sensors sets which have sensors
    # that should be zero during simulation i.e. ECG (heart), EOG,
    # reference gradiometers, etc.
    usable = arrays.BoolArray(
        required=False,
        label="Usable sensors",
        doc="The sensors in set which are used for signal data.")
class SensorsData(MappedType):
    """
    Base Sensors class.
    All sensors have locations. 
    Some will have orientations, e.g. MEG.
    """

    _ui_name = "Unknown sensors"

    sensors_type = basic.String

    __mapper_args__ = {'polymorphic_on': 'sensors_type'}

    default = readers.File(
        folder_path="sensors",
        file_name='EEG_unit_vectors_BrainProducts_62.txt.bz2')

    labels = arrays.StringArray(label="Sensor labels",
                                console_default=default.read_data(
                                    usecols=(0, ),
                                    dtype="string",
                                    field="labels"))

    locations = arrays.PositionArray(label="Sensor locations",
                                     console_default=default.read_data(
                                         usecols=(1, 2, 3), field="locations"))

    has_orientation = basic.Bool(default=False)

    orientations = arrays.OrientationArray(required=False)

    number_of_sensors = basic.Integer(
        label="Number of sensors",
        doc="""The number of sensors described by these Sensors.""")
Пример #3
0
class RandomStream(core.Type):
    """
    This class provides the ability to create multiple random streams which can
    be independently seeded or set to an explicit state.

    .. #Currently there seems to be a clash betwen traits and autodoc, autodoc
    .. #can't find the methods of the class, the class specific names below get
    .. #us around this...
    .. automethod:: RandomStream.set_state
    .. automethod:: RandomStream.reset

    """
    _ui_name = "Random state"
    wraps = numpy.random.RandomState
    defaults = ((42,), {}) # for init wrapped value: wraps(*def[0], **def[1])

    init_seed = basic.Integer(
        label = "A random seed", 
        default = 42,
        doc = """A random seed used to initialise the state of an instance of
        numpy's RandomState.""")


    def configure(self):
        """
        Run base classes configure to setup traited attributes, then initialise
        the stream's state using ``init_seed``.
        """
        super(RandomStream, self).configure()
        self.reset()


    def __str__(self):
        """An informal, 'human readable', representation of a RandomStream."""
        informal = "RandomStream(init_seed)"
        return informal


    def set_state(self, value):
        """
        Set the state of the random number stream based on a previously stored
        state. This is to enable consistent noise state on continuation from a
        previous simulation.

        """
        try:
            numpy.random.RandomState.set_state(self, state=value)
            LOG.info("%s: set with state %s"%(str(self), str(value)))
        except TypeError:
            msg = "%s: bad state, see numpy.random.set_state"
            LOG.error(msg % str(self))
            raise msg


    def reset(self):
        """Reset the random stream to its initial state, using initial seed."""
        numpy.random.RandomState.__init__(self.value, seed=self.init_seed)
Пример #4
0
class TimeSeriesData(MappedType):
    """
    Base time-series dataType.
    """

    title = basic.String

    data = arrays.FloatArray(
        label="Time-series data",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc=
        """An array of time-series data, with a shape of [tpts, :], where ':' represents 1 or more dimensions"""
    )

    nr_dimensions = basic.Integer(label="Number of dimension in timeseries",
                                  default=4)

    length_1d, length_2d, length_3d, length_4d = [basic.Integer] * 4

    labels_ordering = basic.List(
        default=["Time", "State Variable", "Space", "Mode"],
        label="Dimension Names",
        doc="""List of strings representing names of each data dimension""")

    labels_dimensions = basic.Dict(
        default={},
        label=
        "Specific labels for each dimension for the data stored in this timeseries.",
        doc=
        """ A dictionary containing mappings of the form {'dimension_name' : [labels for this dimension] }"""
    )
    ## TODO (for Stuart) : remove TimeLine and make sure the correct Period/start time is returned by different monitors in the simulator

    time = arrays.FloatArray(
        file_storage=core.FILE_STORAGE_EXPAND,
        label="Time-series time",
        required=False,
        doc=
        """An array of time values for the time-series, with a shape of [tpts,].
        This is 'time' as returned by the simulator's monitors.""")

    start_time = basic.Float(label="Start Time:")

    sample_period = basic.Float(label="Sample period", default=1.0)

    # Specify the measure unit for sample period (e.g sec, msec, usec, ...)
    sample_period_unit = basic.String(label="Sample Period Measure Unit",
                                      default="ms")

    sample_rate = basic.Float(label="Sample rate",
                              doc="""The sample rate of the timeseries""")

    has_surface_mapping = basic.Bool(default=True)
    has_volume_mapping = basic.Bool(default=False)
Пример #5
0
class NodeCoherence(core.Type):
    "Adapter for cross-coherence algorithm(s)"

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries to which the FFT is to be applied.""")

    nfft = basic.Integer(label="Data-points per block",
                         default=256,
                         doc="""Should be a power of 2...""")

    def evaluate(self):
        "Evaluate coherence on time series."
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)
        srate = self.time_series.sample_rate
        coh, freq = coherence(self.time_series.data, srate, nfft=self.nfft)
        util.log_debug_array(LOG, coh, "coherence")
        util.log_debug_array(LOG, freq, "freq")
        spec = spectral.CoherenceSpectrum(source=self.time_series,
                                          nfft=self.nfft,
                                          array_data=coh,
                                          frequency=freq,
                                          use_storage=False)
        return spec

    def result_shape(self, input_shape):
        """Returns the shape of the main result of NodeCoherence."""
        freq_len = self.nfft / 2 + 1
        freq_shape = (freq_len, )
        result_shape = (freq_len, input_shape[2], input_shape[2],
                        input_shape[1], input_shape[3])
        return [result_shape, freq_shape]

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result of NodeCoherence.
        """
        # TODO This depends on input array dtype!
        result_size = numpy.sum(map(
            numpy.prod, self.result_shape(input_shape))) * 8.0  #Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the FFT.
        That is, it includes storage of the evaluated FourierSpectrum attributes
        such as power, phase, amplitude, etc.
        """
        extend_size = self.result_size(
            input_shape)  #Currently no derived attributes.
        return extend_size
Пример #6
0
        class Internal_Class(MappedType):
            """ Dummy persisted class"""
            x = 5
            z = basic.Integer()
            j = basic.JSONType()

            class In_Internal_Class(object):
                """Internal of Internal class"""
                t = numpy.array(range(10))

            @property
            def y(self):
                return self.x
class LookUpTableData(MappedType):
    """
    Lookup Tables for storing pre-computed functions.
    Specific table subclasses are implemented below.
    
    """
    
    _base_classes = ['LookUpTables']

    table = readers.Table(folder_path = "tables")
    
    equation = basic.String(
        label = "String representation of the precalculated function",
        doc = """A latex representation of the function whose values are stored 
            in the table, with the extra escaping needed for interpretation via sphinx.""")
    
    xmin = arrays.FloatArray(
        label = "x-min",
        console_default = table.read_dimension('min_max', 0, field = "xmin"), 
        doc = """Minimum value""")
        
    xmax = arrays.FloatArray(
        label = "x-max",
        console_default = table.read_dimension('min_max', 1, field = "xmax"), 
        doc = """Maximum value""")
    
    data = arrays.FloatArray(
        label = "data",
        console_default = table.read_dimension('f', field = "data"), 
        doc = """Tabulated values""")
    
    number_of_values = basic.Integer(
        label = "Number of values",
        default = 0,
        doc = """The number of values in the table """)
        
    df = arrays.FloatArray(
        label = "df",
        console_default = table.read_dimension('df', field = "df"), 
        doc = """.""")
     
    dx = arrays.FloatArray(
        label = "dx",
        default = numpy.array([]), 
        doc = """Tabulation step""")    
    
    invdx = arrays.FloatArray(
       label = "invdx",
       default = numpy.array([]),
       doc = """.""")
Пример #8
0
class CoherenceSpectrum(arrays.MappedArray):
    """
    Result of a NodeCoherence Analysis.
    """
    #Overwrite attribute from superclass
    array_data = arrays.FloatArray(file_storage=core.FILE_STORAGE_EXPAND)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="""Links to the time-series on which the node_coherence is
            applied.""")

    nfft = basic.Integer(label="Data-points per block",
                         default=256,
                         doc="""NOTE: must be a power of 2""")

    frequency = arrays.FloatArray(label="Frequency")

    __generate_table__ = True

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.configure_chunk_safe()

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "Spectral type": self.__class__.__name__,
            "Source": self.source.title,
            "Number of frequencies": self.frequency.shape[0],
            "Minimum frequency": self.frequency[0],
            "Maximum frequency": self.frequency[-1],
            "FFT length (time-points)": self.nfft
        }
        return summary

    def write_data_slice(self, partial_result):
        """
        Append chunk.
        """
        self.store_data_chunk('array_data',
                              partial_result.array_data,
                              grow_dimension=3,
                              close_file=False)
Пример #9
0
class BaseTimeseriesMetricAlgorithm(core.Type):
    """
    This is a base class for all metrics on timeSeries dataTypes.
    Metric means an algorithm computing a single value for an entire TimeSeries.

    """
    ### Make sure this "abstract" class does not get listed in UI.
    _base_classes = ['BaseTimeseriesMetricAlgorithm']

    accept_filter = None

    time_series = time_series_module.TimeSeries(
        label="Time Series",
        required=True,
        order=1,
        doc="The TimeSeries for which the metric(s) will be computed.")

    start_point = basic.Float(
        label="Start point (ms)",
        default=500.0,
        required=False,
        order=2,
        doc=""" The start point determines how many points of the TimeSeries will
        be discarded before computing the metric. By default it drops the
        first 500 ms.""")

    segment = basic.Integer(
        label="Segmentation factor",
        default=4,
        required=False,
        order=3,
        doc=
        """ Divide the input time-series into discrete equally sized sequences and
        use the last segment to compute the metric. It is only used when
        the start point is larger than the time-series length.""")

    def evaluate(self):
        """
        This method needs to be implemented in each subclass.
        Will describe current algorithm.

        :return: single numeric value or a dictionary (displayLabel: numeric value) to be persisted.
        """
        raise Exception(
            "Every metric algorithm should implement an 'evaluate' method that returns the metric result."
        )
Пример #10
0
class RandomStream(core.Type):
    """
    This class provides the ability to create multiple random streams which can
    be independently seeded or set to an explicit state.

    """
    _ui_name = "Random state"
    wraps = numpy.random.RandomState
    defaults = ((42, ), {})  # for init wrapped value: wraps(*def[0], **def[1])

    init_seed = basic.Integer(
        label="A random seed",
        default=42,
        doc="""A random seed used to initialise the state of an instance of
        numpy's RandomState.""")

    def configure(self):
        """
        Run base classes configure to setup traited attributes, then initialise
        the stream's state using ``init_seed``.
        """
        super(RandomStream, self).configure()
        self.reset()

    def __str__(self):
        return simple_gen_astr(self, 'init_seed')

    # TODO how does this method work?
    def set_state(self, value):
        """
        Set the state of the random number stream based on a previously stored
        state. This is to enable consistent noise state on continuation from a
        previous simulation.

        """
        try:
            numpy.random.RandomState.set_state(self, state=value)
        except TypeError:
            msg = "%s: bad state, see numpy.random.set_state"
            LOG.error(msg % str(self))
            raise msg

    def reset(self):
        """Reset the random stream to its initial state, using initial seed."""
        numpy.random.RandomState.__init__(self.value, seed=self.init_seed)
Пример #11
0
class CoherenceSpectrumData(arrays.MappedArray):
    """
    Result of a NodeCoherence Analysis.
    """
    #Overwrite attribute from superclass
    array_data = arrays.FloatArray(file_storage=core.FILE_STORAGE_EXPAND)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="""Links to the time-series on which the node_coherence is
            applied.""")

    nfft = basic.Integer(label="Data-points per block",
                         default=256,
                         doc="""NOTE: must be a power of 2""")

    frequency = arrays.FloatArray(label="Frequency")

    __generate_table__ = True
Пример #12
0
class IndependentComponentsData(MappedType):
    """
    Result of TEMPORAL (Fast) Independent Component Analysis
    """

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which the ICA is applied.")

    mixing_matrix = arrays.FloatArray(
        label="Mixing matrix - Spatial Maps",
        doc="""The linear mixing matrix (Mixing matrix) """)

    unmixing_matrix = arrays.FloatArray(
        label="Unmixing matrix - Spatial maps",
        doc="""The estimated unmixing matrix used to obtain the unmixed
            sources from the data""")

    prewhitening_matrix = arrays.FloatArray(
        label="Pre-whitening matrix",
        doc=""" """)

    n_components = basic.Integer(
        label="Number of independent components",
        doc=""" Observed data matrix is considered to be a linear combination
        of :math:`n` non-Gaussian independent components""")

    norm_source = arrays.FloatArray(
        label="Normalised source time series. Zero centered and whitened.",
        file_storage=core.FILE_STORAGE_EXPAND)

    component_time_series = arrays.FloatArray(
        label="Component time series. Unmixed sources.",
        file_storage=core.FILE_STORAGE_EXPAND)

    normalised_component_time_series = arrays.FloatArray(
        label="Normalised component time series",
        file_storage=core.FILE_STORAGE_EXPAND)

    __generate_table__ = True
Пример #13
0
class SensorsData(MappedType):
    """
    Base Sensors class.
    All sensors have locations. 
    Some will have orientations, e.g. MEG.
    """

    _ui_name = "Unknown sensors"

    sensors_type = basic.String

    __mapper_args__ = {'polymorphic_on': 'sensors_type'}

    labels = arrays.StringArray(label="Sensor labels")

    locations = arrays.PositionArray(label="Sensor locations")

    has_orientation = basic.Bool(default=False)

    orientations = arrays.OrientationArray(required=False)

    number_of_sensors = basic.Integer(
        label="Number of sensors",
        doc="""The number of sensors described by these Sensors.""")
Пример #14
0
class SimulationState(MappedType):
    """
    Simulation State, prepared for H5 file storage.
    """

    # History Array
    history = arrays.FloatArray(required=False)

    # State array, all state variables
    current_state = arrays.FloatArray(required=False)

    # Simulator's current step number (in time)
    current_step = basic.Integer()

    # Array with _stock array for every monitor configured in current simulation.
    # As the monitors are dynamic, we prepare a bunch of arrays for storage in H5 file.
    monitor_stock_1 = arrays.FloatArray(required=False)
    monitor_stock_2 = arrays.FloatArray(required=False)
    monitor_stock_3 = arrays.FloatArray(required=False)
    monitor_stock_4 = arrays.FloatArray(required=False)
    monitor_stock_5 = arrays.FloatArray(required=False)
    monitor_stock_6 = arrays.FloatArray(required=False)
    monitor_stock_7 = arrays.FloatArray(required=False)
    monitor_stock_8 = arrays.FloatArray(required=False)
    monitor_stock_9 = arrays.FloatArray(required=False)
    monitor_stock_10 = arrays.FloatArray(required=False)
    monitor_stock_11 = arrays.FloatArray(required=False)
    monitor_stock_12 = arrays.FloatArray(required=False)
    monitor_stock_13 = arrays.FloatArray(required=False)
    monitor_stock_14 = arrays.FloatArray(required=False)
    monitor_stock_15 = arrays.FloatArray(required=False)

    def __init__(self, **kwargs):
        """ 
        Constructor for Simulator State
        """
        super(SimulationState, self).__init__(**kwargs)
        self.visible = False

    def populate_from(self, simulator_algorithm):
        """
        Prepare a state for storage from a Simulator object.
        """
        self.history = simulator_algorithm.history.buffer.copy()
        self.current_step = simulator_algorithm.current_step
        self.current_state = simulator_algorithm.current_state

        for i, monitor in enumerate(simulator_algorithm.monitors):
            field_name = "monitor_stock_" + str(i + 1)
            setattr(self, field_name, monitor._stock)

            if hasattr(monitor, "_ui_name"):
                self.set_metadata({'monitor_name': monitor._ui_name},
                                  field_name)
            else:
                self.set_metadata({'monitor_name': monitor.__class__.__name__},
                                  field_name)

    def fill_into(self, simulator_algorithm):
        """
        Populate a Simulator object from current stored-state.
        """
        simulator_algorithm.history.initialize(self.history)
        simulator_algorithm.current_step = self.current_step
        simulator_algorithm.current_state = self.current_state

        for i, monitor in enumerate(simulator_algorithm.monitors):
            monitor._stock = getattr(self, "monitor_stock_" + str(i + 1))
Пример #15
0
class Poisson_noise(Noise):
    nsig = arrays.FloatArray(label=r"rate",
                             required=True,
                             default=numpy.array([0.0]),
                             doc="""rate of neurons in Hz""")
    weights = arrays.FloatArray(label=r"unit firing rate",
                                required=True,
                                default=numpy.array([0.0]),
                                doc="""TODO""")

    noise_seed = basic.Integer(
        default=42,
        doc=
        "A random seed used to initialise the random_stream if it is missing.")

    random_stream = RandomStream(
        required=False,
        label="Random Stream",
        doc="An instance of numpy's RandomState associated with this"
        "specific Noise object. Used when you need to resume a simulation from a state saved to disk"
    )

    def __init__(self, **kwargs):
        super(Poisson_noise, self).__init__(**kwargs)
        if self.random_stream is None:
            self.random_stream = numpy.random.RandomState(self.noise_seed)

        self.dt = None

    # For use if coloured
    def configure(self):
        """
        Run base classes configure to setup traited attributes, then ensure that
        the ``random_stream`` attribute is properly configured.

        """
        super(Poisson_noise, self).configure()
        # XXX: reseeding here will destroy a maybe carefully set random_stream!
        # self.random_stream.seed(self.noise_seed)

    def configure_white(self, dt, shape=None):
        """Set the time step (dt) of noise or integration time"""
        self.dt = dt

    def generate(self, shape, lo=0.0, hi=1.0):
        "Generate noise realization."
        lambda_poisson = self.nsig * self.dt * 1e-3  # rate on the intervale dt (dt is in ms)
        noise = self.random_stream.poisson(
            lam=lambda_poisson,
            size=shape)  # firing rate in Khz of the poisson generator
        # print(numpy.max(noise))
        noise[3] = numpy.sqrt(noise[0])
        return noise * self.weights

    def gfun(self, state_variables):
        r"""
        Linear additive noise, thus it ignores the state_variables.

        .. math::
            g(x) = \sqrt{2D}

        """
        g_x = numpy.ones(state_variables.shape)
        return g_x
Пример #16
0
class IndependentComponents(MappedType):
    """
    Result of an Independent Component Analysis.

    """
    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which the ICA is applied.")

    mixing_matrix = arrays.FloatArray(
        label="Mixing matrix - Spatial Maps",
        doc="""The linear mixing matrix (Mixing matrix) """)

    unmixing_matrix = arrays.FloatArray(
        label="Unmixing matrix - Spatial maps",
        doc="""The estimated unmixing matrix used to obtain the unmixed
            sources from the data""")

    prewhitening_matrix = arrays.FloatArray(
        label="Pre-whitening matrix",
        doc=""" """)

    n_components = basic.Integer(
        label="Number of independent components",
        doc=""" Observed data matrix is considered to be a linear combination
        of :math:`n` non-Gaussian independent components""")

    norm_source = arrays.FloatArray(
        label="Normalised source time series. Zero centered and whitened.",
        file_storage=core.FILE_STORAGE_EXPAND)

    component_time_series = arrays.FloatArray(
        label="Component time series. Unmixed sources.",
        file_storage=core.FILE_STORAGE_EXPAND)

    normalised_component_time_series = arrays.FloatArray(
        label="Normalised component time series",
        file_storage=core.FILE_STORAGE_EXPAND)


    def write_data_slice(self, partial_result):
        """
        Append chunk.

        """
        self.store_data_chunk('unmixing_matrix', partial_result.unmixing_matrix, grow_dimension=2, close_file=False)
        self.store_data_chunk('prewhitening_matrix', partial_result.prewhitening_matrix,
                              grow_dimension=2, close_file=False)
        partial_result.compute_norm_source()
        self.store_data_chunk('norm_source', partial_result.norm_source, grow_dimension=1, close_file=False)
        partial_result.compute_component_time_series()
        self.store_data_chunk('component_time_series', partial_result.component_time_series,
                              grow_dimension=1, close_file=False)
        partial_result.compute_normalised_component_time_series()
        self.store_data_chunk('normalised_component_time_series', partial_result.normalised_component_time_series,
                              grow_dimension=1, close_file=False)
        partial_result.compute_mixing_matrix()
        self.store_data_chunk('mixing_matrix', partial_result.mixing_matrix, grow_dimension=2, close_file=False)

    def configure(self):
        """
        Invoke the compute methods for computable attributes that haven't been
        set during initialisation.
        """
        super(IndependentComponents, self).configure()
        if self.trait.use_storage is False and sum(self.get_data_shape('unmixing_matrix')) != 0:
            if self.norm_source.size == 0:
                self.compute_norm_source()
            if self.component_time_series.size == 0:
                self.compute_component_time_series()
            if self.normalised_component_time_series.size == 0:
                self.compute_normalised_component_time_series()

    def compute_norm_source(self):
        """Normalised source time-series."""
        self.norm_source = ((self.source.data - self.source.data.mean(axis=0)) /
                            self.source.data.std(axis=0))

    def compute_component_time_series(self):
        ts_shape = self.source.data.shape
        component_ts_shape = (ts_shape[0], ts_shape[1], self.n_components, ts_shape[3])
        component_ts = numpy.zeros(component_ts_shape)
        for var in range(ts_shape[1]):
            for mode in range(ts_shape[3]):
                w = self.unmixing_matrix[:, :, var, mode]
                k = self.prewhitening_matrix[:, :, var, mode]
                ts = self.source.data[:, var, :, mode]
                component_ts[:, var, :, mode] = numpy.dot(w, numpy.dot(k, ts.T)).T
        self.component_time_series = component_ts

    def compute_normalised_component_time_series(self):
        ts_shape = self.source.data.shape
        component_ts_shape = (ts_shape[0], ts_shape[1], self.n_components, ts_shape[3])
        component_nts = numpy.zeros(component_ts_shape)
        for var in range(ts_shape[1]):
            for mode in range(ts_shape[3]):
                w = self.unmixing_matrix[:, :, var, mode]
                k = self.prewhitening_matrix[:, :, var, mode]
                nts = self.norm_source[:, var, :, mode]
                component_nts[:, var, :, mode] = numpy.dot(w, numpy.dot(k, nts.T)).T
        self.normalised_component_time_series = component_nts

    def compute_mixing_matrix(self):
        """
        Compute the linear mixing matrix A, so X = A * S ,
        where X is the observed data and S contain the independent components
            """
        ts_shape = self.source.data.shape
        mixing_matrix_shape = (ts_shape[2], self.n_components, ts_shape[1], ts_shape[3])
        mixing_matrix = numpy.zeros(mixing_matrix_shape)
        for var in range(ts_shape[1]):
            for mode in range(ts_shape[3]):
                w = self.unmixing_matrix[:, :, var, mode]
                k = self.prewhitening_matrix[:, :, var, mode]
                temp = numpy.matrix(numpy.dot(w, k))
                mixing_matrix[:, :, var, mode] = numpy.array(numpy.dot(temp.T, (numpy.dot(temp, temp.T)).T))
        self.mixing_matrix = mixing_matrix

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance
        of this datatype.
        """
        summary = {"Mode decomposition type": self.__class__.__name__}
        summary["Source"] = self.source.title
        return summary
class PowerSpectraInteractive(core.Type):
    """
    The graphical interface for visualising the power-spectra (FFT) of a
    timeseries provide controls for setting:

        - which state-variable and mode to display [sets]
        - log or linear scaling for the power or frequency axis [binary]
        - sementation lenth [set]
        - windowing function [set]
        - power normalisation [binary] (emphasise relative frequency contribution)
        - show std or sem [binary]


    """

    time_series = time_series_datatypes.TimeSeries(
        label="Timeseries",
        default=None,
        required=True,
        doc=""" The timeseries to which the FFT is to be applied.""")

    first_n = basic.Integer(
        label="Display the first 'n'",
        default=-1,
        required=True,
        doc="""Primarily intended for displaying the first N components of a 
            surface PCA timeseries. Defaults to -1, meaning it'll display all
            of 'space' (ie, regions or vertices or channels). In other words,
            for Region or M/EEG timeseries you can ignore this, but, for a 
            surface timeseries it really must be set.""")

    def __init__(self, **kwargs):
        """
        Initialise based on provided keywords or their traited defaults. Also,
        initialise the place-holder attributes that aren't filled until the
        show() method is called.

        """
        #figure
        self.ifft_fig = None

        #time-series
        self.fft_ax = None

        #Current state
        self.xscale = "linear"
        self.yscale = "log"
        self.mode = 0
        self.variable = 0
        self.show_sem = False
        self.show_std = False
        self.normalise_power = "no"
        self.window_length = 0.25
        self.window_function = "None"

        #Selectors
        self.xscale_selector = None
        self.yscale_selector = None
        self.mode_selector = None
        self.variable_selector = None
        self.show_sem_selector = None
        self.show_std_selector = None
        self.normalise_power_selector = None
        self.window_length_selector = None
        self.window_function_selector = None

        #
        possible_freq_steps = [2**x for x in range(-2, 7)]  #Hz
        #possible_freq_steps.append(1.0 / self.time_series_length) #Hz
        self.possible_window_lengths = 1.0 / numpy.array(
            possible_freq_steps)  #s
        self.freq_step = 1.0 / self.window_length
        self.frequency = None
        self.spectra = None
        self.spectra_norm = None

        #Sliders
        #self.window_length_slider = None

    def configure(self):
        """ Seperate configure cause ttraits be busted... """
        LOG.debug("time_series shape: %s" % str(self.time_series.data.shape))
        #TODO: if isinstance(self.time_series, TimeSeriesSurface) and self.first_n == -1: #LOG.error, return.
        self.data = self.time_series.data[:, :, :self.first_n, :]
        self.period = self.time_series.sample_period
        self.max_freq = 0.5 / self.period
        self.units = "Hz"
        self.tpts = self.data.shape[0]
        self.nsrs = self.data.shape[2]
        self.time_series_length = self.tpts * self.period
        self.time = numpy.arange(self.tpts) * self.period
        self.labels = ["channel_%0.3d" % k for k in range(self.nsrs)]

    def show(self):
        """ Generate the interactive power-spectra figure. """
        #Make sure everything is configured
        self.configure()

        #Make the figure:
        self.create_figure()

        #Selectors
        self.add_xscale_selector()
        self.add_yscale_selector()
        self.add_mode_selector()
        self.add_variable_selector()
        self.add_normalise_power_selector()
        self.add_window_length_selector()
        self.add_window_function_selector()

        #Sliders
        #self.add_window_length_slider() #Want discrete values
        #self.add_scaling_slider()

        #...
        self.calc_fft()

        #Plot timeseries
        self.plot_spectra()

        pylab.show()

    ##------------------------------------------------------------------------##
    ##------------------ Functions for building the figure -------------------##
    ##------------------------------------------------------------------------##
    def create_figure(self):
        """ Create the figure and time-series axes. """
        time_series_type = self.time_series.__class__.__name__
        try:
            figure_window_title = "Interactive power spectra: " + time_series_type
            pylab.close(figure_window_title)
            self.ifft_fig = pylab.figure(num=figure_window_title,
                                         figsize=(16, 8),
                                         facecolor=BACKGROUNDCOLOUR,
                                         edgecolor=EDGECOLOUR)
        except ValueError:
            LOG.info("My life would be easier if you'd update your PyLab...")
            figure_number = 42
            pylab.close(figure_number)
            self.ifft_fig = pylab.figure(num=figure_number,
                                         figsize=(16, 8),
                                         facecolor=BACKGROUNDCOLOUR,
                                         edgecolor=EDGECOLOUR)

        self.fft_ax = self.ifft_fig.add_axes([0.15, 0.2, 0.7, 0.75])

    def add_xscale_selector(self):
        """
        Add a radio button to the figure for selecting which scaling the x-axes
        should use.
        """
        pos_shp = [0.45, 0.02, 0.05, 0.104]
        rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="xscale")
        xscale_tuple = ("log", "linear")
        self.xscale_selector = widgets.RadioButtons(rax,
                                                    xscale_tuple,
                                                    active=1)
        self.xscale_selector.on_clicked(self.update_xscale)

    def add_yscale_selector(self):
        """
        Add a radio button to the figure for selecting which scaling the y-axes
        should use.
        """
        pos_shp = [0.02, 0.5, 0.05, 0.104]
        rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="yscale")
        yscale_tuple = ("log", "linear")
        self.yscale_selector = widgets.RadioButtons(rax,
                                                    yscale_tuple,
                                                    active=0)
        self.yscale_selector.on_clicked(self.update_yscale)

    def add_mode_selector(self):
        """
        Add a radio button to the figure for selecting which mode of the model
        should be displayed.
        """
        pos_shp = [0.02, 0.07, 0.05, 0.1 + 0.002 * self.data.shape[3]]
        rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="Mode")
        mode_tuple = tuple(range(self.data.shape[3]))
        self.mode_selector = widgets.RadioButtons(rax, mode_tuple, active=0)
        self.mode_selector.on_clicked(self.update_mode)

    def add_variable_selector(self):
        """
        Generate radio selector buttons to set which state variable is 
        displayed.
        """
        noc = self.data.shape[1]  # number of choices
        #State variable for the x axis
        pos_shp = [0.02, 0.22, 0.05, 0.12 + 0.008 * noc]
        rax = self.ifft_fig.add_axes(pos_shp,
                                     axisbg=AXCOLOUR,
                                     title="state variable")
        self.variable_selector = widgets.RadioButtons(rax,
                                                      tuple(range(noc)),
                                                      active=0)
        self.variable_selector.on_clicked(self.update_variable)

    def add_window_length_selector(self):
        """
        Generate radio selector buttons to set the window length is seconds.
        """
        noc = self.possible_window_lengths.shape[0]  # number of choices
        #State variable for the x axis
        pos_shp = [0.88, 0.07, 0.1, 0.12 + 0.02 * noc]
        rax = self.ifft_fig.add_axes(pos_shp,
                                     axisbg=AXCOLOUR,
                                     title="Segment length")
        wl_tup = tuple(self.possible_window_lengths)
        self.window_length_selector = widgets.RadioButtons(rax,
                                                           wl_tup,
                                                           active=4)
        self.window_length_selector.on_clicked(self.update_window_length)

    def add_window_function_selector(self):
        """
        Generate radio selector buttons to set the windowing function.
        """
        #TODO: add support for kaiser, requiers specification of beta.
        wf_tup = ("None", "hamming", "bartlett", "blackman", "hanning")
        noc = len(wf_tup)  # number of choices
        #State variable for the x axis
        pos_shp = [0.88, 0.77, 0.085, 0.12 + 0.01 * noc]
        rax = self.ifft_fig.add_axes(pos_shp,
                                     axisbg=AXCOLOUR,
                                     title="Windowing function")
        self.window_function_selector = widgets.RadioButtons(rax,
                                                             wf_tup,
                                                             active=0)
        self.window_function_selector.on_clicked(self.update_window_function)

    def add_normalise_power_selector(self):
        """
        Add a radio button to chose whether or not the power of all spectra 
        shouold be normalised to 1.
        """
        pos_shp = [0.02, 0.8, 0.05, 0.104]
        rax = self.ifft_fig.add_axes(pos_shp,
                                     axisbg=AXCOLOUR,
                                     title="normalise")
        np_tuple = ("yes", "no")
        self.normalise_power_selector = widgets.RadioButtons(rax,
                                                             np_tuple,
                                                             active=1)
        self.normalise_power_selector.on_clicked(self.update_normalise_power)

    ##------------------------------------------------------------------------##
    ##------------------ Functions for updating the state --------------------##
    ##------------------------------------------------------------------------##
    def calc_fft(self):
        """
        Calculate FFT using current state of the window_length, window_function,
        """
        #Segment time-series, overlapping if necessary
        nseg = int(numpy.ceil(self.time_series_length / self.window_length))
        if nseg != 1:
            seg_tpts = self.window_length / self.period
            overlap = ((seg_tpts * nseg) - self.tpts) / (nseg - 1)
            starts = [
                max(seg * (seg_tpts - overlap), 0) for seg in range(nseg)
            ]
            segments = [self.data[start:start + seg_tpts] for start in starts]
            segments = [
                segment[:, :, :, numpy.newaxis] for segment in segments
            ]
            time_series = numpy.concatenate(segments, axis=4)
        else:
            time_series = self.data[:, :, :, :, numpy.newaxis]
            seg_tpts = time_series.shape[0]

        #Base-line correct segmented time-series
        time_series = time_series - time_series.mean(axis=0)[numpy.newaxis, :]

        #Apply windowing function
        if self.window_function != "None":
            window_function = eval("".join(("numpy.", self.window_function)))
            window_mask = numpy.reshape(window_function(seg_tpts),
                                        (seg_tpts, 1, 1, 1, 1))
            time_series = time_series * window_mask

        #Calculate the FFT
        result = numpy.fft.fft(time_series, axis=0)
        nfreq = len(result) / 2

        self.frequency = numpy.arange(0, self.max_freq, self.freq_step)
        LOG.debug("frequency shape: %s" % str(self.frequency.shape))

        self.spectra = numpy.mean(numpy.abs(result[1:nfreq + 1])**2, axis=-1)
        LOG.debug("spectra shape: %s" % str(self.spectra.shape))

        self.spectra_norm = (self.spectra / numpy.sum(self.spectra, axis=0))
        LOG.debug("spectra_norm shape: %s" % str(self.spectra_norm.shape))

        #import pdb; pdb.set_trace()
#        self.spectra_std = numpy.std(numpy.abs(result[:nfreq]), axis=4)
#        self.spectra_sem = self.spectra_std / time_series.shape[4]

##------------------------------------------------------------------------##
##------------------ Functions for updating the figure -------------------##
##------------------------------------------------------------------------##

    def update_xscale(self, xscale):
        """ 
        Update the FFT axes' xscale to either log or linear based on radio
        button selection.
        """
        self.xscale = xscale
        self.fft_ax.set_xscale(self.xscale)
        pylab.draw()

    def update_yscale(self, yscale):
        """ 
        Update the FFT axes' yscale to either log or linear based on radio
        button selection.
        """
        self.yscale = yscale
        self.fft_ax.set_yscale(self.yscale)
        pylab.draw()

    def update_mode(self, mode):
        """ Update the visualised mode based on radio button selection. """
        self.mode = mode
        self.plot_spectra()

    def update_variable(self, variable):
        """ 
        Update state variable being plotted based on radio buttton selection.
        """
        self.variable = variable
        self.plot_spectra()

    def update_normalise_power(self, normalise_power):
        """ Update whether to normalise based on radio button selection. """
        self.normalise_power = normalise_power
        self.plot_spectra()

    def update_window_length(self, length):
        """
        Update timeseries window length based on the selected value.
        """
        #TODO: need this casting but not sure why, don't need int() with mode...
        self.window_length = numpy.float64(length)
        #import pdb; pdb.set_trace()
        self.freq_step = 1.0 / self.window_length
        self.update_spectra()

    def update_window_function(self, window_function):
        """
        Update windowing function based on the radio button selection.
        """
        self.window_function = window_function
        self.update_spectra()

    def update_spectra(self):
        """ Clear the axes and redraw the power-spectra. """
        self.calc_fft()
        self.plot_spectra()

#    def plot_std(self):
#        """ Plot """
#        std = (self.spectra[:, self.variable, :, self.mode] +
#               self.spectra_std[:, self.variable, :, self.mode])
#        self.fft_ax.plot(self.frequency, std, "--")
#
#
#    def plot_sem(self):
#        """  """
#        sem = (self.spectra[:, self.variable, :, self.mode] +
#               self.spectra_sem[:, self.variable, :, self.mode])
#        self.fft_ax.plot(self.frequency, sem, ":")

    def plot_spectra(self):
        """ Plot the power spectra. """
        self.fft_ax.clear()
        # Set title and axis labels
        time_series_type = self.time_series.__class__.__name__
        self.fft_ax.set(title=time_series_type)
        self.fft_ax.set(xlabel="Frequency (%s)" % self.units)
        self.fft_ax.set(ylabel="Power")

        # Set x and y scale based on curent radio button selection.
        self.fft_ax.set_xscale(self.xscale)
        self.fft_ax.set_yscale(self.yscale)

        if hasattr(self.fft_ax, 'autoscale'):
            self.fft_ax.autoscale(enable=True, axis='both', tight=True)

        #import pdb; pdb.set_trace()
        #Plot the power spectra
        if self.normalise_power == "yes":
            self.fft_ax.plot(self.frequency,
                             self.spectra_norm[:, self.variable, :, self.mode])
        else:
            self.fft_ax.plot(self.frequency, self.spectra[:, self.variable, :,
                                                          self.mode])


#        #TODO: Need to ensure colour matching... and allow region selection.
#        #If requested, add standard deviation
#        if self.show_std:
#            self.plot_std(self)
#
#        #If requested, add standard error in mean
#        if self.show_sem:
#            self.plot_sem(self)

        pylab.draw()
Пример #18
0
class ConnectivityData(MappedType):
    """
    This class primarily exists to bundle the long range structural connectivity
    data into a single object. 
    """

    region_labels = arrays.StringArray(
        label="Region labels",
        doc=
        """Short strings, 'labels', for the regions represented by the connectivity matrix."""
    )

    weights = arrays.FloatArray(
        label="Connection strengths",
        stored_metadata=[key for key in MappedType.DEFAULT_WITH_ZERO_METADATA],
        doc=
        """Matrix of values representing the strength of connections between regions, arbitrary units."""
    )

    undirected = basic.Integer(
        default=0,
        required=False,
        doc=
        "1, when the weights matrix is square and symmetric over the main diagonal, 0 when directed graph."
    )

    tract_lengths = arrays.FloatArray(
        label="Tract lengths",
        stored_metadata=[key for key in MappedType.DEFAULT_WITH_ZERO_METADATA],
        doc="""The length of myelinated fibre tracts between regions.
        If not provided Euclidean distance between region centres is used.""")

    speed = arrays.FloatArray(
        label="Conduction speed",
        default=numpy.array([3.0]),
        file_storage=core.FILE_STORAGE_NONE,
        doc=
        """A single number or matrix of conduction speeds for the myelinated fibre tracts between regions."""
    )

    centres = arrays.PositionArray(
        label="Region centres",
        doc="An array specifying the location of the centre of each region.")

    cortical = arrays.BoolArray(
        label="Cortical",
        required=False,
        doc=
        """A boolean vector specifying whether or not a region is part of the cortex."""
    )

    hemispheres = arrays.BoolArray(
        label="Hemispheres (True for Right and False for Left Hemisphere",
        required=False,
        doc=
        """A boolean vector specifying whether or not a region is part of the right hemisphere"""
    )

    orientations = arrays.OrientationArray(
        label="Average region orientation",
        required=False,
        doc=
        """Unit vectors of the average orientation of the regions represented in the connectivity matrix.
        NOTE: Unknown data should be zeros.""")

    areas = arrays.FloatArray(
        label="Area of regions",
        required=False,
        doc=
        """Estimated area represented by the regions in the connectivity matrix.
        NOTE: Unknown data should be zeros.""")

    idelays = arrays.IndexArray(
        label="Conduction delay indices",
        required=False,
        file_storage=core.FILE_STORAGE_NONE,
        doc="An array of time delays between regions in integration steps.")

    delays = arrays.FloatArray(
        label="Conduction delay",
        file_storage=core.FILE_STORAGE_NONE,
        required=False,
        doc=
        """Matrix of time delays between regions in physical units, setting conduction speed automatically
        combines with tract lengths to update this matrix, i.e. don't try and change it manually."""
    )

    number_of_regions = basic.Integer(
        label="Number of regions",
        doc="""The number of regions represented in this Connectivity """)

    number_of_connections = basic.Integer(
        label="Number of connections",
        doc=
        """The number of non-zero entries represented in this Connectivity """)

    # ------------- FRAMEWORK ATTRIBUTES -----------------------------

    # Original Connectivity, from which current connectivity was edited.
    parent_connectivity = basic.String(required=False)

    # In case of edited Connectivity, this are the nodes left in interest area,
    # the rest were part of a lesion, so they were removed.
    saved_selection = basic.JSONType(required=False)
class NodeCoherence(core.Type):
    """
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries to which the FFT is to be applied.""")

    nfft = basic.Integer(label="Data-points per block",
                         default=256,
                         doc="""Should be a power of 2...""")

    def evaluate(self):
        """ 
        Coherence function.  Matplotlib.mlab implementation.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        data_shape = self.time_series.data.shape

        #(frequency, nodes, nodes, state-variables, modes)
        result_shape = (self.nfft / 2 + 1, data_shape[2], data_shape[2],
                        data_shape[1], data_shape[3])
        LOG.info("result shape will be: %s" % str(result_shape))

        result = numpy.zeros(result_shape)

        #TODO: For region level, 4s, 2000Hz, this takes ~2min... (which is stupidly slow)
        #One inter-node coherence, across frequencies for each state-var & mode.
        for mode in range(data_shape[3]):
            for var in range(data_shape[1]):
                data = self.time_series.data[:, var, :, mode]
                data = data - data.mean(axis=0)[numpy.newaxis, :]
                #TODO: Work out a way around the 4 level loop,
                #TODO: coherence isn't directional, so, get rid of redundancy...
                for n1 in range(data_shape[2]):
                    for n2 in range(data_shape[2]):
                        cxy, freq = mlab.cohere(
                            data[:, n1],
                            data[:, n2],
                            NFFT=self.nfft,
                            Fs=self.time_series.sample_rate,
                            detrend=detrend_linear,
                            window=mlab.window_none)
                        result[:, n1, n2, var, mode] = cxy

        util.log_debug_array(LOG, result, "result")
        util.log_debug_array(LOG, freq, "freq")

        coherence = spectral.CoherenceSpectrum(source=self.time_series,
                                               nfft=self.nfft,
                                               array_data=result,
                                               frequency=freq,
                                               use_storage=False)

        return coherence

    def result_shape(self, input_shape):
        """Returns the shape of the main result of NodeCoherence."""
        freq_len = self.nfft / 2 + 1
        freq_shape = (freq_len, )
        result_shape = (freq_len, input_shape[2], input_shape[2],
                        input_shape[1], input_shape[3])
        return [result_shape, freq_shape]

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result of NodeCoherence.
        """
        result_size = numpy.sum(map(
            numpy.prod, self.result_shape(input_shape))) * 8.0  #Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the FFT.
        That is, it includes storage of the evaluated FourierSpectrum attributes
        such as power, phase, amplitude, etc.
        """
        extend_size = self.result_size(
            input_shape)  #Currently no derived attributes.
        return extend_size
Пример #20
0
class SurfaceData(MappedType):
    """
    This class primarily exists to bundle the structural Surface data into a 
    single object.
    """

    default = readers.File(folder_path="surfaces/cortex_reg13")

    vertices = arrays.PositionArray(
        label="Vertex positions",
        order=-1,
        console_default=default.read_data(file_name="vertices.txt.bz2",
                                          field="vertices"),
        doc="""An array specifying coordinates for the surface vertices.""")

    triangles = arrays.IndexArray(
        label="Triangles",
        order=-1,
        target=vertices,
        console_default=default.read_data(file_name="triangles.txt.bz2",
                                          dtype=numpy.int32,
                                          field="triangles"),
        doc=
        """Array of indices into the vertices, specifying the triangles which define the surface."""
    )

    vertex_normals = arrays.OrientationArray(
        label="Vertex normal vectors",
        order=-1,
        console_default=default.read_data(file_name="vertex_normals.txt.bz2",
                                          field="vertex_normals"),
        doc="""An array of unit normal vectors for the surfaces vertices.""")

    triangle_normals = arrays.OrientationArray(
        label="Triangle normal vectors",
        order=-1,
        doc="""An array of unit normal vectors for the surfaces triangles.""")

    geodesic_distance_matrix = SparseMatrix(
        label="Geodesic distance matrix",
        order=-1,
        required=False,
        file_storage=FILE_STORAGE_NONE,
        doc="""A sparse matrix of truncated geodesic distances""")  # 'CS'

    number_of_vertices = basic.Integer(
        label="Number of vertices",
        order=-1,
        doc="""The number of vertices making up this surface.""")

    number_of_triangles = basic.Integer(
        label="Number of triangles",
        order=-1,
        doc="""The number of triangles making up this surface.""")

    ##--------------------- FRAMEWORK ATTRIBUTES -----------------------------##

    hemisphere_mask = arrays.BoolArray(
        label="An array specifying if a vertex belongs to the right hemisphere",
        file_storage=FILE_STORAGE_NONE,
        required=False,
        order=-1)

    zero_based_triangles = basic.Bool(order=-1)

    split_triangles = arrays.IndexArray(order=-1, required=False)

    number_of_split_slices = basic.Integer(order=-1)

    split_slices = basic.Dict(order=-1)

    bi_hemispheric = basic.Bool(order=-1)

    surface_type = basic.String

    __mapper_args__ = {'polymorphic_on': 'surface_type'}
class NodeComplexCoherence(core.Type):
    """
    A class for calculating the FFT of a TimeSeries and returning
    a ComplexCoherenceSpectrum datatype.
   
  
    This algorithm is based on the matlab function data2cs_event.m written by Guido Nolte:
        .. [Freyer_2012] Freyer, F.; Reinacher, M.; Nolte, G.; Dinse, H. R. and
            Ritter, P. *Repetitive tactile stimulation changes resting-state
            functional connectivity-implications for treatment of sensorimotor decline*.
            Front Hum Neurosci, Bernstein Focus State Dependencies of Learning and
            Bernstein Center for Computational Neuroscience Berlin, Germany., 2012, 6, 144
    
    Input: 
    originally the input could be 2D (tpts x nodes/channels), and it was possible
    to give a 3D array (e.g., tpspt x nodes/cahnnels x trials) via the segment_length
    attribute. 
    Current TVB implementation can handle 4D or 2D TimeSeries datatypes. 
    Be warned: the 4D TimeSeries will be averaged and squeezed.
    
    Output: (main arrays)
    - the cross-spectrum
    - the complex coherence, from which the imaginary part can be extracted 
        
    By default the time series is segmented into 1 second `epoch` blocks and 0.5
    second 50% overlapping `segments` to which a Hanning function is applied. 
    
    """

    time_series = TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries for which the CrossCoherence and ComplexCoherence
        is to be computed.""")

    epoch_length = basic.Float(
        label="Epoch length [ms]",
        default=1000.0,
        order=-1,
        required=False,
        doc="""In general for lengthy EEG recordings (~30 min), the timeseries 
        are divided into equally sized segments (~ 20-40s). These contain the 
        event that is to be characterized by means of the cross coherence. 
        Additionally each epoch block will be further divided into segments to 
        which the FFT will be applied.""")

    segment_length = basic.Float(
        label="Segment length [ms]",
        default=500.0,
        order=-1,
        required=False,
        doc="""The timeseries can be segmented into equally sized blocks
            (overlapping if necessary). The segement length determines the
            frequency resolution of the resulting power spectra -- longer
            windows produce finer frequency resolution. """)

    segment_shift = basic.Float(
        label="Segment shift [ms]",
        default=250.0,
        required=False,
        order=-1,
        doc="""Time length by which neighboring segments are shifted. e.g.
                `segment shift` = `segment_length` / 2 means 50% overlapping 
                segments.""")

    window_function = basic.String(
        label="Windowing function",
        default='hanning',
        required=False,
        order=-1,
        doc="""Windowing functions can be applied before the FFT is performed.
             Default is `hanning`, possibilities are: 'hamming'; 'bartlett';
            'blackman'; and 'hanning'. See, numpy.<function_name>.""")

    average_segments = basic.Bool(
        label="Average across segments",
        default=True,
        required=False,
        order=-1,
        doc="""Flag. If `True`, compute the mean Cross Spectrum across 
                segments.""")

    subtract_epoch_average = basic.Bool(
        label="Subtract average across epochs",
        default=True,
        required=False,
        order=-1,
        doc="""Flag. If `True` and if the number of epochs is > 1, you can 
                optionally subtract the mean across epochs before computing the 
                complex coherence.""")

    zeropad = basic.Integer(
        label="Zeropadding",
        default=0,
        required=False,
        order=-1,
        doc="""Adds `n` zeros at the end of each segment and at the end 
        of window_function. It is not yet functional.""")

    detrend_ts = basic.Bool(
        label="Detrend time series",
        default=False,
        required=False,
        order=-1,
        doc="""Flag. If `True` removes linear trend along the time dimension 
                before applying FFT.""")

    max_freq = basic.Float(
        label="Maximum frequency",
        default=1024.0,
        order=-1,
        required=False,
        doc="""Maximum frequency points (e.g. 32., 64., 128.) represented in 
                the output. Default is segment_length / 2 + 1.""")

    npat = basic.Float(
        label="dummy variable",
        default=1.0,
        required=False,
        order=-1,
        doc="""This attribute appears to be related to an input projection 
            matrix... Which is not yet implemented""")

    def evaluate(self):
        """
        Calculate the FFT, Cross Coherence and Complex Coherence of time_series 
        broken into (possibly) epochs and segments of length `epoch_length` and 
        `segment_length` respectively, filtered by `window_function`.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)
        tpts = self.time_series.data.shape[0]
        time_series_length = tpts * self.time_series.sample_period

        if len(self.time_series.data.shape) > 2:
            time_series_data = numpy.squeeze(
                (self.time_series.data.mean(axis=-1)).mean(axis=1))

        #nchan = time_series_data.shape[1]

        #NOTE: if we get a projection matrix ... then ...
        #if self.npat > 1:
        #    data = data * proj
        #    nchan = self.npat

        #Divide time-series into epochs, no overlapping
        if self.epoch_length > 0.0:
            nepochs = int(numpy.floor(time_series_length / self.epoch_length))
            epoch_tpts = self.epoch_length / self.time_series.sample_period
            time_series_length = self.epoch_length
            tpts = epoch_tpts
        else:
            self.epoch_length = time_series_length
            nepochs = int(numpy.ceil(time_series_length / self.epoch_length))

        #Segment time-series, overlapping if necessary
        nseg = int(numpy.floor(time_series_length / self.segment_length))
        if nseg > 1:
            seg_tpts = self.segment_length / self.time_series.sample_period
            seg_shift_tpts = self.segment_shift / self.time_series.sample_period
            nseg = int(numpy.floor((tpts - seg_tpts) / seg_shift_tpts) + 1)
        else:
            self.segment_length = time_series_length
            seg_tpts = time_series_data.shape[0]

        # Frequency vectors
        freqs = numpy.fft.fftfreq(int(seg_tpts))
        nfreq = numpy.min(
            [self.max_freq,
             numpy.floor((seg_tpts + self.zeropad) / 2.0) + 1])
        freqs = freqs[0:nfreq, ] * (1.0 / self.time_series.sample_period)

        result_shape, av_result_shape = self.result_shape(
            self.time_series.data.shape, self.max_freq, self.epoch_length,
            self.segment_length, self.segment_shift,
            self.time_series.sample_period, self.zeropad,
            self.average_segments)

        cs = numpy.zeros(result_shape, dtype=numpy.complex128)
        av = numpy.matrix(numpy.zeros(av_result_shape, dtype=numpy.complex128))
        coh = numpy.zeros(result_shape, dtype=numpy.complex128)

        # NOTE: result for individual epochs are kept only if npat > 1. Skipping ...
        #if self.npat > 1:
        #    if not self.average_segments:
        #        cs = numpy.zeros((nchan, nchan, nfreq, nepochs, nseg), dtype=numpy.complex128)
        #        av = numpy.zeros((nchan, nfreq, nepochs, nseg), dtype=numpy.complex128)
        #    else:
        #        av = numpy.zeros((nchan, nfreq, nepochs), dtype=numpy.complex128)
        #        cs = numpy.zeros((nchan, nchan, nfreq, nepochs), dtype=numpy.complex128)

        #Apply windowing function
        if self.window_function is not None:
            if self.window_function not in SUPPORTED_WINDOWING_FUNCTIONS:
                LOG.error("Windowing function is: %s" % self.window_function)
                LOG.error("Must be in: %s" %
                          str(SUPPORTED_WINDOWING_FUNCTIONS))

            window_function = eval("".join(("numpy.", self.window_function)))
            win = window_function(seg_tpts)
            window_mask = (numpy.kron(
                numpy.ones((time_series_data.shape[1], 1)), win)).T

        nave = 0

        for j in numpy.arange(nepochs):
            data = time_series_data[j * epoch_tpts:(j + 1) * epoch_tpts, :]

            for i in numpy.arange(nseg):  #average over all segments;
                time_series = data[i * seg_shift_tpts:i * seg_shift_tpts +
                                   seg_tpts, :]

                if self.detrend_ts:
                    time_series = sp_signal.detrend(time_series, axis=0)

                datalocfft = numpy.fft.fft(time_series * window_mask, axis=0)
                datalocfft = numpy.matrix(datalocfft)

                for f in numpy.arange(nfreq):  #for all frequencies
                    if self.npat == 1:
                        if not self.average_segments:
                            cs[:, :, f, i] += numpy.conjugate(
                                              datalocfft[f, :].conj().T * \
                                              datalocfft[f, :])
                            av[:, f,
                               i] += numpy.conjugate(datalocfft[f, :].conj().T)
                        else:
                            cs[:, :, f] += numpy.conjugate(
                                           datalocfft[f,:].conj().T * \
                                           datalocfft[f, :])
                            av[:,
                               f] += numpy.conjugate(datalocfft[f, :].conj().T)
                    else:
                        if not self.average_segments:
                            cs[:, :, f, j, i] = numpy.conjugate(
                                                datalocfft[f, :].conj().T * \
                                                datalocfft[f, :])
                            av[:, f, j,
                               i] = numpy.conjugate(datalocfft[f, :].conj().T)
                        else:
                            cs[:, :, f, j] += numpy.conjugate(
                                           datalocfft[f,:].conj().T *\
                                           datalocfft[f,:])

                            av[:, f,
                               j] += numpy.conjugate(datalocfft[f, :].conj().T)
                del datalocfft

            nave += 1.0

        # End of FORs
        if not self.average_segments:
            cs = cs / nave
            av = av / nave
        else:
            nave = nave * nseg
            cs = cs / nave
            av = av / nave

        # Subtract average
        for f in numpy.arange(nfreq):
            if self.subtract_epoch_average:
                if self.npat == 1:
                    if not self.average_segments:
                        for i in numpy.arange(nseg):
                            cs[:, :, f,
                               i] = cs[:, :, f,
                                       i] - av[:, f, i] * av[:, f, i].conj().T
                    else:
                        cs[:, :,
                           f] = cs[:, :, f] - av[:, f] * av[:, f].conj().T
                else:
                    if not self.average_segments:
                        for i in numpy.arange(nseg):
                            for j in numpy.arange(nepochs):
                                cs[:, :, f, j,
                                   i] = cs[:, :, f, j,
                                           i] - av[:, f, j, i] * av[:, f, j,
                                                                    i].conj().T

                    else:
                        for j in numpy.arange(nepochs):
                            cs[:, :, f,
                               j] = cs[:, :, f,
                                       j] - av[:, f, j] * av[:, f, j].conj().T

        #Compute Complex Coherence
        ndim = len(cs.shape)
        if ndim == 3:
            for i in numpy.arange(cs.shape[2]):
                temp = numpy.matrix(cs[:, :, i])
                coh[:, :, i] = cs[:, :, i] / numpy.sqrt(
                    (temp.diagonal().conj().T) * temp.diagonal())

        elif ndim == 4:
            for i in numpy.arange(cs.shape[2]):
                for j in numpy.arange(cs.shape[3]):
                    temp = numpy.matrix(numpy.squeeze(cs[:, :, i, j]))
                    coh[:, :, i, j] = temp / numpy.sqrt(
                        (temp.diagonal().conj().T) * temp.diagonal().T)

        util.log_debug_array(LOG, cs, "result")
        spectra = spectral.ComplexCoherenceSpectrum(
            source=self.time_series,
            array_data=coh,
            cross_spectrum=cs,
            #                              frequency = freqs,
            epoch_length=self.epoch_length,
            segment_length=self.segment_length,
            windowing_function=self.window_function,
            #                             fft_points = seg_tpts,
            use_storage=False)
        return spectra

    @staticmethod
    def result_shape(input_shape, max_freq, epoch_length, segment_length,
                     segment_shift, sample_period, zeropad, average_segments):
        """
        Returns the shape of the main result and the average over epochs
        """
        # this is useless here unless the input could actually be a 2D timeseries
        nchan = numpy.where(
            len(input_shape) > 2, input_shape[2], input_shape[1])
        seg_tpts = segment_length / sample_period
        seg_shift_tpts = segment_shift / sample_period
        tpts = numpy.where(epoch_length > 0.0, epoch_length / sample_period,
                           input_shape[0])
        nfreq = numpy.min(
            [max_freq, numpy.floor((seg_tpts + zeropad) / 2.0) + 1])
        #nep   = int(numpy.floor(input_shape[0] / epoch_length))
        nseg = int(numpy.floor((tpts - seg_tpts) / seg_shift_tpts) + 1)

        if not average_segments:
            result_shape = (nchan, nchan, nfreq, nseg)
            av_result_shape = (nchan, nfreq, nseg)
        else:
            result_shape = (nchan, nchan, nfreq)
            av_result_shape = (nchan, nfreq)

        return [result_shape, av_result_shape]

    def result_size(self, input_shape, max_freq, epoch_length, segment_length,
                    segment_shift, sample_period, zeropad, average_segments):
        """
        Returns the storage size in Bytes of the main result (complex array) of 
        the ComplexCoherence
        """
        result_size = numpy.prod(
            self.result_shape(input_shape, max_freq, epoch_length,
                              segment_length, segment_shift, sample_period,
                              zeropad,
                              average_segments)[0]) * 2.0 * 8.0  #complex*Bytes
        return result_size

    def extended_result_size(self, input_shape, max_freq, epoch_length,
                             segment_length, segment_shift, sample_period,
                             zeropad, average_segments):
        """
        Returns the storage size in Bytes of the extended result of the ComplexCoherence. 
        That is, it includes storage of the evaluated ComplexCoherence attributes
        such as ...
        """
        result_shape = self.result_shape(input_shape, max_freq, epoch_length,
                                         segment_length, segment_shift,
                                         sample_period, zeropad,
                                         average_segments)[0]
        result_size = self.result_size(input_shape, max_freq, epoch_length,
                                       segment_length, segment_shift,
                                       sample_period, zeropad,
                                       average_segments)
        extend_size = result_size * 2.0  #Main arrays: cross spectrum and complex coherence
        extend_size = extend_size + result_shape[2] * 8.0  #Frequency
        extend_size = extend_size + 8.0  # Epoch length
        extend_size = extend_size + 8.0  # Segment length
        return extend_size
Пример #22
0
class LookUpTable(types_mapped.MappedType):
    """
    Lookup Tables for storing pre-computed functions.
    Specific table subclasses are implemented below.
    """

    _base_classes = ['LookUpTables']

    equation = basic.String(
        label="String representation of the precalculated function",
        doc="""A latex representation of the function whose values are stored
            in the table, with the extra escaping needed for interpretation via sphinx."""
    )

    xmin = arrays.FloatArray(label="x-min", doc="""Minimum value""")

    xmax = arrays.FloatArray(label="x-max", doc="""Maximum value""")

    data = arrays.FloatArray(label="data", doc="""Tabulated values""")

    number_of_values = basic.Integer(
        label="Number of values",
        default=0,
        doc="""The number of values in the table """)

    df = arrays.FloatArray(label="df", doc=""".""")

    dx = arrays.FloatArray(label="dx",
                           default=numpy.array([]),
                           doc="""Tabulation step""")

    invdx = arrays.FloatArray(label="invdx",
                              default=numpy.array([]),
                              doc=""".""")

    @staticmethod
    def populate_table(result, source_file):
        source_full_path = try_get_absolute_path("tvb_data.tables",
                                                 source_file)
        zip_data = numpy.load(source_full_path)

        result.df = zip_data['df']
        result.xmin, result.xmax = zip_data['min_max']
        result.data = zip_data['f']
        return result

    def configure(self):
        """
        Invoke the compute methods for computable attributes that haven't been
        set during initialization.
        """
        super(LookUpTable, self).configure()

        # Check if dx and invdx have been computed
        if self.number_of_values == 0:
            self.number_of_values = self.data.shape[0]

        if self.dx.size == 0:
            self.compute_search_indices()

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance
        of this dataType, if any ...
        """
        summary = {"Number of values": self.number_of_values}
        return summary

    def compute_search_indices(self):
        """
        ...
        """
        self.dx = ((self.xmax - self.xmin) / (self.number_of_values) - 1)
        self.invdx = 1 / self.dx

    def search_value(self, val):
        """
        Search a value in this look up table
        """

        if self.xmin:
            y = val - self.xmin
        else:
            y = val

        ind = numpy.array(y * self.invdx, dtype=int)

        try:
            return self.data[ind] + self.df[ind] * (y - ind * self.dx)
        except IndexError:  # out of bounds
            return numpy.NaN
class ConnectivityData(MappedType):
    """
    This class primarily exists to bundle the long range structural connectivity
    data into a single object. 
    """

    default = readers.File(folder_path="connectivity/o52r00_irp2008")

    parcellation_mask = volumes.ParcellationMask(
        label="Parcellation mask (volume)",
        required=False,
        doc=
        """A 3D volume mask defining the parcellation of the brain into distinct regions."""
    )

    region_labels = arrays.StringArray(
        label="Region labels",
        console_default=default.read_data(file_name="centres.txt.bz2",
                                          usecols=(0, ),
                                          dtype="string",
                                          field="region_labels"),
        doc=
        """Short strings, 'labels', for the regions represented by the connectivity matrix."""
    )

    weights = arrays.FloatArray(
        label="Connection strengths",
        console_default=default.read_data(file_name="weights.txt.bz2",
                                          field="weights"),
        doc=
        """Matrix of values representing the strength of connections between regions, arbitrary units."""
    )

    unidirectional = basic.Integer(
        default=0,
        required=False,
        doc=
        "1, when the weights matrix is square and symmetric over the main diagonal, 0 when bi-directional matrix."
    )

    tract_lengths = arrays.FloatArray(
        label="Tract lengths",
        console_default=default.read_data(file_name="tract_lengths.txt.bz2",
                                          field="tract_lengths"),
        doc="""The length of myelinated fibre tracts between regions.
        If not provided Euclidean distance between region centres is used.""")

    speed = arrays.FloatArray(
        label="Conduction speed",
        default=numpy.array([3.0]),
        file_storage=core.FILE_STORAGE_NONE,
        doc=
        """A single number or matrix of conduction speeds for the myelinated fibre tracts between regions."""
    )

    centres = arrays.PositionArray(
        label="Region centres",
        console_default=default.read_data(file_name="centres.txt.bz2",
                                          usecols=(1, 2, 3),
                                          field="centres"),
        doc="An array specifying the location of the centre of each region.")

    cortical = arrays.BoolArray(
        label="Cortical",
        console_default=default.read_data(file_name="cortical.txt.bz2",
                                          dtype=numpy.bool,
                                          field="cortical"),
        required=False,
        doc=
        """A boolean vector specifying whether or not a region is part of the cortex."""
    )

    hemispheres = arrays.BoolArray(
        label="Hemispheres (True for Right and False for Left Hemisphere",
        required=False,
        doc=
        """A boolean vector specifying whether or not a region is part of the right hemisphere"""
    )

    orientations = arrays.OrientationArray(
        label="Average region orientation",
        console_default=default.read_data(
            file_name="average_orientations.txt.bz2", field="orientations"),
        required=False,
        doc=
        """Unit vectors of the average orientation of the regions represented in the connectivity matrix.
        NOTE: Unknown data should be zeros.""")

    areas = arrays.FloatArray(
        label="Area of regions",
        console_default=default.read_data(file_name="areas.txt.bz2",
                                          field="areas"),
        required=False,
        doc=
        """Estimated area represented by the regions in the connectivity matrix.
        NOTE: Unknown data should be zeros.""")

    idelays = arrays.IndexArray(
        label="Conduction delay indices",
        required=False,
        file_storage=core.FILE_STORAGE_NONE,
        doc="An array of time delays between regions in integration steps.")

    delays = arrays.FloatArray(
        label="Conduction delay",
        file_storage=core.FILE_STORAGE_NONE,
        required=False,
        doc=
        """Matrix of time delays between regions in physical units, setting conduction speed automatically
        combines with tract lengths to update this matrix, i.e. don't try and change it manually."""
    )

    number_of_regions = basic.Integer(
        label="Number of regions",
        doc="""The number of regions represented in this Connectivity """)

    # ------------- FRAMEWORK ATTRIBUTES -----------------------------

    # Rotation if positions are not normalized.
    nose_correction = basic.JSONType(required=False)

    # Original Connectivity, from which current connectivity was edited.
    parent_connectivity = basic.String(required=False)

    # In case of edited Connectivity, this are the nodes left in interest area,
    # the rest were part of a lesion, so they were removed.
    saved_selection = basic.JSONType(required=False)
Пример #24
0
class fastICA(core.Type):
    """
    Takes a TimeSeries datatype (x) and returns the unmixed temporal sources (S) 
    and the estimated mixing matrix (A).
    
    :math: x = A S
    
    ICA takes time-points as observations and nodes as variables.
    
    It uses the FastICA algorithm implemented in the scikit-learn toolkit, and
    its intended usage is as a `blind source separation` method.
    
    See also: http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.fastica.html#sklearn.decomposition.fastica

    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="The timeseries to which the ICA is to be applied.")

    n_components = basic.Integer(
        label="Number of principal components to unmix.",
        required=False,
        default=None,
        doc="Number of principal components to unmix.")

    def evaluate(self):
        "Run FastICA on the given time series data."

        # problem dimensions
        data = self.time_series.data
        n_time, n_svar, n_node, n_mode = data.shape
        self.n_components = n_comp = self.n_components or n_node

        if n_time < n_comp:
            msg = (
                "ICA requires more time points (received %d) than number of components (received %d)."
                " Please run a longer simulation, use a higher sampling frequency or specify a lower"
                " number of components to extract.")
            msg %= n_time, n_comp
            raise ValueError(msg)

        # ICA operates on matrices, here we perform for all state variables and modes
        W = numpy.zeros((n_comp, n_comp, n_svar, n_mode))  # unmixing
        K = numpy.zeros((n_comp, n_node, n_svar, n_mode))  # whitening matrix
        src = numpy.zeros(
            (n_time, n_comp, n_svar, n_mode))  # component time series

        for mode in range(n_mode):
            for var in range(n_svar):
                sl = Ellipsis, var, mode
                K[sl], W[sl], src[sl] = fastica(data[:, var, :, mode],
                                                self.n_components)

        return mode_decompositions.IndependentComponents(
            source=self.time_series,
            component_time_series=src,
            prewhitening_matrix=K,
            unmixing_matrix=W,
            n_components=n_comp,
            use_storage=False)

    def result_shape(self, input_shape):
        "Returns the shape of the mixing matrix."
        n = self.n_components or input_shape[2]
        return n, n, input_shape[1], input_shape[3]

    def result_size(self, input_shape):
        "Returns the storage size in bytes of the mixing matrix of the ICA analysis, assuming 64-bit float."
        return numpy.prod(self.result_shape(input_shape)) * 8

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in bytes of the extended result of the ICA.
        """

        n_time, n_svar, n_node, n_mode = input_shape
        n_comp = self.n_components or n_node

        n = numpy.prod(self.result_shape(input_shape))
        n += numpy.prod((n_comp, n_comp, n_svar, n_mode))  # unmixing
        n += numpy.prod((n_comp, n_node, n_svar, n_mode))  # whitening
        n += numpy.prod((n_time, n_comp, n_svar, n_mode))  # sources

        return n * 8
Пример #25
0
class NodeCoherence(core.Type):
    """Compute cross coherence between nodes.
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries to which the FFT is to be applied.""")

    nfft = basic.Integer(label="Data-points per block",
                         default=256,
                         doc="""Should be a power of 2...""")

    def evaluate(self):
        """ 
        Coherence function.  Matplotlib.mlab implementation.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        data_shape = self.time_series.data.shape

        #(frequency, nodes, nodes, state-variables, modes)
        result_shape = (self.nfft / 2 + 1, data_shape[2], data_shape[2],
                        data_shape[1], data_shape[3])
        LOG.info("result shape will be: %s" % str(result_shape))

        result = numpy.zeros(result_shape)

        #TODO: For region level, 4s, 2000Hz, this takes ~2min... (which is stupidly slow)
        #One inter-node coherence, across frequencies for each state-var & mode.
        for mode in range(data_shape[3]):
            for var in range(data_shape[1]):
                data = self.time_series.data[:, var, :, mode]
                data = data - data.mean(axis=0)[numpy.newaxis, :]
                #TODO: Work out a way around the 4 level loop,
                #TODO: coherence isn't directional, so, get rid of redundancy...
                for n1 in range(data_shape[2]):
                    for n2 in range(data_shape[2]):
                        cxy, freq = mlab.cohere(
                            data[:, n1],
                            data[:, n2],
                            NFFT=self.nfft,
                            Fs=self.time_series.sample_rate,
                            detrend=detrend_linear,
                            window=mlab.window_none)
                        result[:, n1, n2, var, mode] = cxy

        util.log_debug_array(LOG, result, "result")
        util.log_debug_array(LOG, freq, "freq")

        coherence = spectral.CoherenceSpectrum(source=self.time_series,
                                               nfft=self.nfft,
                                               array_data=result,
                                               frequency=freq,
                                               use_storage=False)

        return coherence

    def _new_evaluate(self):
        "New implementation of cross-coherence w/o for loops"
        # TODO: adapt to tvb timeseries shape
        t, Y = unknown
        nfft = self.nfft
        imag = False
        fs = np.fft.fftfreq(nfft, t[1] - t[0])
        # shape [ch_i, ch_j, ..., window, time]
        wY = Y.reshape((Y.shape[0], -1, nfft)) * hamming(nfft)
        F = np.fft.fft(wY)
        G = F[:, np.newaxis] * F
        if imag:
            G = G.imag
        dG = np.array([G[i, i] for i in range(G.shape[0])])
        C = (np.abs(G)**2 / (dG[:, np.newaxis] * dG)).mean(axis=-2)
        mask = fs > 0.0
        C_ = np.abs(C.mean(axis=0).mean(axis=0))
        return fs[mask], C_[mask], C[..., mask]

    def result_shape(self, input_shape):
        """Returns the shape of the main result of NodeCoherence."""
        freq_len = self.nfft / 2 + 1
        freq_shape = (freq_len, )
        result_shape = (freq_len, input_shape[2], input_shape[2],
                        input_shape[1], input_shape[3])
        return [result_shape, freq_shape]

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result of NodeCoherence.
        """
        # TODO This depends on input array dtype!
        result_size = numpy.sum(map(
            numpy.prod, self.result_shape(input_shape))) * 8.0  #Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the FFT.
        That is, it includes storage of the evaluated FourierSpectrum attributes
        such as power, phase, amplitude, etc.
        """
        extend_size = self.result_size(
            input_shape)  #Currently no derived attributes.
        return extend_size
Пример #26
0
class Sensors(MappedType):
    """
    Base Sensors class.
    All sensors have locations.
    Some will have orientations, e.g. MEG.
    """

    _ui_name = "Unknown sensors"

    sensors_type = types_basic.String

    __mapper_args__ = {'polymorphic_on': 'sensors_type'}

    labels = arrays.StringArray(label="Sensor labels")

    locations = arrays.PositionArray(label="Sensor locations")

    has_orientation = types_basic.Bool(default=False)

    orientations = arrays.OrientationArray(required=False)

    number_of_sensors = types_basic.Integer(
        label="Number of sensors",
        doc="""The number of sensors described by these Sensors.""")

    # introduced to accommodate real sensors sets which have sensors
    # that should be zero during simulation i.e. ECG (heart), EOG,
    # reference gradiometers, etc.
    usable = arrays.BoolArray(
        required=False,
        label="Usable sensors",
        doc="The sensors in set which are used for signal data.")

    @classmethod
    def from_file(cls, source_file="eeg_brainstorm_65.txt", instance=None):

        if instance is None:
            result = cls()
        else:
            result = instance

        source_full_path = try_get_absolute_path("tvb_data.sensors",
                                                 source_file)
        reader = FileReader(source_full_path)

        result.labels = reader.read_array(dtype="string", use_cols=(0, ))
        result.locations = reader.read_array(use_cols=(1, 2, 3))

        return result

    def configure(self):
        """
        Invoke the compute methods for computable attributes that haven't been
        set during initialization.
        """
        super(Sensors, self).configure()
        self.number_of_sensors = self.labels.shape[0]

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance
        of this datatype.
        """
        summary = {
            "Sensor type": self.sensors_type,
            "Number of Sensors": self.number_of_sensors
        }
        return summary

    def sensors_to_surface(self, surface_to_map):
        """
        Map EEG sensors onto the head surface (skin-air).

        EEG sensor locations are typically only given on a unit sphere, that is,
        they are effectively only identified by their orientation with respect
        to a coordinate system. This method is used to map these unit vector
        sensor "locations" to a specific location on the surface of the skin.

        Assumes coordinate systems are aligned, i.e. common x,y,z and origin.

        """
        # Normalize sensor and vertex locations to unit vectors
        norm_sensors = numpy.sqrt(numpy.sum(self.locations**2, axis=1))
        unit_sensors = self.locations / norm_sensors[:, numpy.newaxis]
        norm_verts = numpy.sqrt(numpy.sum(surface_to_map.vertices**2, axis=1))
        unit_vertices = surface_to_map.vertices / norm_verts[:, numpy.newaxis]

        sensor_locations = numpy.zeros((self.number_of_sensors, 3))
        for k in xrange(self.number_of_sensors):
            # Find the surface vertex most closely aligned with current sensor.
            current_sensor = unit_sensors[k]
            alignment = numpy.dot(current_sensor, unit_vertices.T)
            one_ring = []

            while not one_ring:
                closest_vertex = alignment.argmax()
                # Get the set of triangles in the neighbourhood of that vertex.
                # NOTE: Intersection doesn't always fall within the 1-ring, so, all
                #      triangles contained in the 2-ring are considered.
                one_ring = surface_to_map.vertex_neighbours[closest_vertex]
                if not one_ring:
                    alignment[closest_vertex] = min(alignment)

            local_tri = [surface_to_map.vertex_triangles[v] for v in one_ring]
            local_tri = list(set([tri for subar in local_tri
                                  for tri in subar]))

            # Calculate a parametrized plane line intersection [t,u,v] for the
            # set of local triangles, which are considered as defining a plane.
            tuv = numpy.zeros((len(local_tri), 3))
            for i, tri in enumerate(local_tri):
                edge_01 = (
                    surface_to_map.vertices[surface_to_map.triangles[tri, 0]] -
                    surface_to_map.vertices[surface_to_map.triangles[tri, 1]])
                edge_02 = (
                    surface_to_map.vertices[surface_to_map.triangles[tri, 0]] -
                    surface_to_map.vertices[surface_to_map.triangles[tri, 2]])
                see_mat = numpy.vstack((current_sensor, edge_01, edge_02))

                tuv[i] = numpy.linalg.solve(
                    see_mat.T,
                    surface_to_map.vertices[surface_to_map.triangles[tri,
                                                                     0].T])

            # Find  which line-plane intersection falls within its triangle
            # by imposing the condition that u, v, & u+v are contained in [0 1]
            local_triangle_index = ((0 <= tuv[:, 1]) * (tuv[:, 1] < 1) *
                                    (0 <= tuv[:, 2]) * (tuv[:, 2] < 1) *
                                    (0 <= (tuv[:, 1] + tuv[:, 2])) *
                                    ((tuv[:, 1] + tuv[:, 2]) < 2)).nonzero()[0]

            if len(local_triangle_index) == 1:
                # Scale sensor unit vector by t so that it lies on the surface.
                sensor_locations[k] = current_sensor * tuv[
                    local_triangle_index[0], 0]

            elif len(local_triangle_index) < 1:
                # No triangle was found in proximity. Draw the sensor somehow in the surface extension area
                LOG.warning(
                    "Could not find a proper position on the given surface for sensor %d:%s. "
                    "with direction %s" %
                    (k, self.labels[k], str(self.locations[k])))
                distances = (abs(tuv[:, 1] + tuv[:, 2]))
                local_triangle_index = distances.argmin()
                # Scale sensor unit vector by t so that it lies on the surface.
                sensor_locations[k] = current_sensor * tuv[
                    local_triangle_index, 0]

            else:
                # More than one triangle was found in proximity. Pick the first.
                # Scale sensor unit vector by t so that it lies on the surface.
                sensor_locations[k] = current_sensor * tuv[
                    local_triangle_index[0], 0]

        return sensor_locations
Пример #27
0
class Connectivity(MappedType):

    # data
    region_labels = arrays.StringArray(
        label="Region labels",
        doc="""Short strings, 'labels', for the regions represented by the connectivity matrix.""")

    weights = arrays.FloatArray(
        label="Connection strengths",
        stored_metadata=[key for key in MappedType.DEFAULT_WITH_ZERO_METADATA],
        doc="""Matrix of values representing the strength of connections between regions, arbitrary units.""")

    undirected = basic.Integer(
        default=0, required=False,
        doc="1, when the weights matrix is square and symmetric over the main diagonal, 0 when directed graph.")

    tract_lengths = arrays.FloatArray(
        label="Tract lengths",
        stored_metadata=[key for key in MappedType.DEFAULT_WITH_ZERO_METADATA],
        doc="""The length of myelinated fibre tracts between regions.
        If not provided Euclidean distance between region centres is used.""")

    speed = arrays.FloatArray(
        label="Conduction speed",
        default=numpy.array([3.0]), file_storage=core.FILE_STORAGE_NONE,
        doc="""A single number or matrix of conduction speeds for the myelinated fibre tracts between regions.""")

    centres = arrays.PositionArray(
        label="Region centres",
        doc="An array specifying the location of the centre of each region.")

    cortical = arrays.BoolArray(
        label="Cortical",
        required=False,
        doc="""A boolean vector specifying whether or not a region is part of the cortex.""")

    hemispheres = arrays.BoolArray(
        label="Hemispheres (True for Right and False for Left Hemisphere",
        required=False,
        doc="""A boolean vector specifying whether or not a region is part of the right hemisphere""")

    orientations = arrays.OrientationArray(
        label="Average region orientation",
        required=False,
        doc="""Unit vectors of the average orientation of the regions represented in the connectivity matrix.
        NOTE: Unknown data should be zeros.""")

    areas = arrays.FloatArray(
        label="Area of regions",
        required=False,
        doc="""Estimated area represented by the regions in the connectivity matrix.
        NOTE: Unknown data should be zeros.""")

    idelays = arrays.IndexArray(
        label="Conduction delay indices",
        required=False, file_storage=core.FILE_STORAGE_NONE,
        doc="An array of time delays between regions in integration steps.")

    delays = arrays.FloatArray(
        label="Conduction delay",
        file_storage=core.FILE_STORAGE_NONE, required=False,
        doc="""Matrix of time delays between regions in physical units, setting conduction speed automatically
        combines with tract lengths to update this matrix, i.e. don't try and change it manually.""")

    number_of_regions = basic.Integer(
        label="Number of regions",
        doc="""The number of regions represented in this Connectivity """)

    number_of_connections = basic.Integer(
        label="Number of connections",
        doc="""The number of non-zero entries represented in this Connectivity """)

    # Original Connectivity, from which current connectivity was edited.
    parent_connectivity = basic.String(required=False)

    # In case of edited Connectivity, this are the nodes left in interest area,
    # the rest were part of a lesion, so they were removed.
    saved_selection = basic.JSONType(required=False)

    # framework
    @property
    def display_name(self):
        """
        Overwrite from superclass and add number of regions field (as title on DataStructure tree)
        """
        previous = "Connectivity"
        return previous + " " + str(self.number_of_regions)

    def branch_connectivity(self, new_weights, interest_areas, storage_path, new_tracts=None):
        """
        Generate new Connectivity based on current one, by changing weights (e.g. simulate lesion).
        The returned connectivity has the same number of nodes. The edges of unselected nodes will have weight 0.
        :param new_weights: weights matrix for the new connectivity
        :param interest_areas: ndarray of the selected node id's
        :param new_tracts: tracts matrix for the new connectivity
        """
        if new_tracts is None:
            new_tracts = self.tract_lengths

        for i in xrange(len(self.weights)):
            for j in xrange(len(self.weights)):
                if i not in interest_areas or j not in interest_areas:
                    new_weights[i][j] = 0

        final_conn = self.__class__()
        final_conn.parent_connectivity = self.gid
        final_conn.storage_path = storage_path
        final_conn.weights = new_weights
        final_conn.centres = self.centres
        final_conn.region_labels = self.region_labels
        final_conn.orientations = self.orientations
        final_conn.cortical = self.cortical
        final_conn.hemispheres = self.hemispheres
        final_conn.areas = self.areas
        final_conn.tract_lengths = new_tracts
        final_conn.saved_selection = interest_areas.tolist()
        final_conn.subject = self.subject
        return final_conn

    def cut_connectivity(self, new_weights, interest_areas, storage_path, new_tracts=None):
        """
        Generate new Connectivity object based on current one, by removing nodes (e.g. simulate lesion).
        Only the selected nodes will get used in the result. The order of the indices in interest_areas matters.
        If indices are not sorted then the nodes will be permuted accordingly.
        :param new_weights: weights matrix for the new connectivity
        :param interest_areas: ndarray with the selected node id's.
        :param new_tracts: tracts matrix for the new connectivity
        """
        if new_tracts is None:
            new_tracts = self.tract_lengths[interest_areas, :][:, interest_areas]
        else:
            new_tracts = new_tracts[interest_areas, :][:, interest_areas]
        new_weights = new_weights[interest_areas, :][:, interest_areas]

        final_conn = self.__class__()
        final_conn.parent_connectivity = None
        final_conn.storage_path = storage_path
        final_conn.weights = new_weights
        final_conn.centres = self.centres[interest_areas, :]
        final_conn.region_labels = self.region_labels[interest_areas]
        if self.orientations is not None and len(self.orientations):
            final_conn.orientations = self.orientations[interest_areas, :]
        if self.cortical is not None and len(self.cortical):
            final_conn.cortical = self.cortical[interest_areas]
        if self.hemispheres is not None and len(self.hemispheres):
            final_conn.hemispheres = self.hemispheres[interest_areas]
        if self.areas is not None and len(self.areas):
            final_conn.areas = self.areas[interest_areas]
        final_conn.tract_lengths = new_tracts
        final_conn.saved_selection = None
        final_conn.subject = self.subject
        return final_conn

    def _reorder_arrays(self, new_weights, interest_areas, new_tracts=None):
        """
        Returns ordered versions of the parameters according to the hemisphere permutation.
        """
        permutation = self.hemisphere_order_indices
        inverse_permutation = numpy.argsort(permutation)  # trick to invert a permutation represented as an array
        interest_areas = inverse_permutation[interest_areas]
        # see :meth"`ordered_weights` for why [p:][:p]
        new_weights = new_weights[inverse_permutation, :][:, inverse_permutation]

        if new_tracts is not None:
            new_tracts = new_tracts[inverse_permutation, :][:, inverse_permutation]

        return new_weights, interest_areas, new_tracts

    def branch_connectivity_from_ordered_arrays(self, new_weights, interest_areas, storage_path, new_tracts=None):
        """
        Similar to :meth:`branch_connectivity` but the parameters are consistent with the ordered versions of weights, tracts, labels
        Used by the connectivity viewer to save a lesion.
        """
        new_weights, interest_areas, new_tracts = self._reorder_arrays(new_weights, interest_areas, new_tracts)
        return self.branch_connectivity(new_weights, interest_areas, storage_path, new_tracts)

    def cut_new_connectivity_from_ordered_arrays(self, new_weights, interest_areas, storage_path, new_tracts=None):
        """
        Similar to :meth:`cut_connectivity` but using hemisphere ordered parameters.
        Used by the connectivity viewer to save a smaller connectivity.
        """
        new_weights, interest_areas, new_tracts = self._reorder_arrays(new_weights, interest_areas, new_tracts)
        return self.cut_connectivity(new_weights, interest_areas, storage_path, new_tracts)

    @property
    def saved_selection_labels(self):
        """
        Taking the entity field saved_selection, convert indexes in that array
        into labels.
        """
        if self.saved_selection:
            idxs = [int(i) for i in self.saved_selection]
            result = ''
            for i in idxs:
                result += self.region_labels[i] + ','
            return result[:-1]
        else:
            return ''

    @staticmethod
    def accepted_filters():
        filters = MappedType.accepted_filters()
        filters.update({'datatype_class._number_of_regions': {'type': 'int', 'display': 'No of Regions',
                                                              'operations': ['==', '<', '>']}})
        return filters

    def is_right_hemisphere(self, idx):
        """
        :param idx:  Region IDX
        :return: True when hemispheres information is present and it shows that the current node is in the right
        hemisphere. When hemispheres info is not present, return True for the second half of the indices and
        False otherwise.
        """
        if self.hemispheres is not None and self.hemispheres.size:
            return self.hemispheres[idx]
        return idx >= self.number_of_regions / 2

    @property
    def hemisphere_order_indices(self):
        """
        A sequence of indices of rows/colums.
        These permute rows/columns so that the first half would belong to the first hemisphere
        If there is no hemisphere information returns the identity permutation
        """
        if self.hemispheres is not None and self.hemispheres.size:
            li, ri = [], []
            for i, is_right in enumerate(self.hemispheres):
                if is_right:
                    ri.append(i)
                else:
                    li.append(i)
            return numpy.array(li + ri)
        else:
            return numpy.arange(self.number_of_regions)

    @property
    def ordered_weights(self):
        """
        This view of the weights matrix lists all left hemisphere nodes before the right ones.
        It is used by viewers of the connectivity.
        """
        permutation = self.hemisphere_order_indices
        # how this works:
        # w[permutation, :] selects all rows at the indices present in the permutation array thus permuting the rows
        # [:, permutation] does the same to columns. See numpy index arrays
        return self.weights[permutation, :][:, permutation]

    @property
    def ordered_tracts(self):
        """
        Similar to :meth:`ordered_weights`
        """
        permutation = self.hemisphere_order_indices
        return self.tract_lengths[permutation, :][:, permutation]

    @property
    def ordered_labels(self):
        """
        Similar to :meth:`ordered_weights`
        """
        permutation = self.hemisphere_order_indices
        return self.region_labels[permutation]

    @property
    def ordered_centres(self):
        """
        Similar to :method:`ordered_weights`
        """
        permutation = self.hemisphere_order_indices
        return self.centres[permutation]

    def get_grouped_space_labels(self):
        """
        :return: A list [('left', [lh_labels)], ('right': [rh_labels])]
        """
        if self.hemispheres is not None and self.hemispheres.size:
            l, r = [], []

            for i, (is_right, label) in enumerate(zip(self.hemispheres, self.region_labels)):
                if is_right:
                    r.append((i, label))
                else:
                    l.append((i, label))
            return [('left', l), ('right', r)]
        else:
            return [('', list(enumerate(self.region_labels)))]

    def get_default_selection(self):
        # should this be sub-selection or all always?
        sel = self.saved_selection
        if sel is not None:
            return sel
        else:
            return range(len(self.region_labels))

    def get_measure_points_selection_gid(self):
        """
        :return: the associated connectivity gid
        """
        return self.gid

    @property
    def binarized_weights(self):
        """
        :return: a matrix of he same size as weights, with 1 where weight > 0, and 0 in rest
        """
        result = numpy.zeros_like(self.weights)
        result = numpy.where(self.weights > 0, 1, result)
        return result

    # scientific


    def configure(self):
        """
        Invoke the compute methods for computable attributes that haven't been
        set during initialization.
        """
        super(Connectivity, self).configure()

        self.number_of_regions = self.weights.shape[0]
        # NOTE: In numpy 1.8 there is a function called count_zeros
        self.number_of_connections = self.weights.nonzero()[0].shape[0]

        self.trait["weights"].log_debug(owner=self.__class__.__name__)
        self.trait["tract_lengths"].log_debug(owner=self.__class__.__name__)
        self.trait["speed"].log_debug(owner=self.__class__.__name__)
        self.trait["centres"].log_debug(owner=self.__class__.__name__)
        self.trait["orientations"].log_debug(owner=self.__class__.__name__)
        self.trait["areas"].log_debug(owner=self.__class__.__name__)

        if self.tract_lengths.size == 0:
            self.compute_tract_lengths()

        if self.region_labels.size == 0:
            self.compute_region_labels()

        if self.hemispheres is None or self.hemispheres.size == 0:
            self.try_compute_hemispheres()

        # This can not go into compute, as it is too complex reference
        # if self.delays.size == 0:
        # TODO: Because delays are stored and loaded the size was never 0.0 and
        #      so this wasn't being run, making the conduction_speed hack on the
        #      simulator non-functional. Inn the longer run it'll probably be
        #      necessary for delays to never be stored but always calculated
        #      from tract-lengths and speed...
        if self.speed is None:  # TODO: this is a hack fix...
            LOG.warning("Connectivity.speed attribute not initialized properly, setting it to 3.0...")
            self.speed = numpy.array([3.0])  # F£$%^&*!!!#self.trait["speed"].value

        # NOTE: Because of the conduction_speed hack for UI this must be evaluated here, even if delays
        # already has a value, otherwise setting speed in the UI has no effect...
        self.delays = self.tract_lengths / self.speed
        self.trait["delays"].log_debug(owner=self.__class__.__name__)

        if (self.weights.transpose() == self.weights).all():
            self.undirected = 1

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance
        of this dataType.
        """
        summary = {"Number of regions": self.number_of_regions,
                   "Number of connections": self.number_of_connections,
                   "Undirected": self.undirected}

        summary.update(self.get_info_about_array('areas',
                                                 [self.METADATA_ARRAY_MAX,
                                                  self.METADATA_ARRAY_MIN,
                                                  self.METADATA_ARRAY_MEAN]))

        summary.update(self.get_info_about_array('weights',
                                                 [self.METADATA_ARRAY_MAX,
                                                  self.METADATA_ARRAY_MEAN,
                                                  self.METADATA_ARRAY_VAR,
                                                  self.METADATA_ARRAY_MIN_NON_ZERO,
                                                  self.METADATA_ARRAY_MEAN_NON_ZERO,
                                                  self.METADATA_ARRAY_VAR_NON_ZERO]))

        summary.update(self.get_info_about_array('tract_lengths',
                                                 [self.METADATA_ARRAY_MAX,
                                                  self.METADATA_ARRAY_MEAN,
                                                  self.METADATA_ARRAY_VAR,
                                                  self.METADATA_ARRAY_MIN_NON_ZERO,
                                                  self.METADATA_ARRAY_MEAN_NON_ZERO,
                                                  self.METADATA_ARRAY_VAR_NON_ZERO]))

        summary.update(self.get_info_about_array('tract_lengths',
                                                 [self.METADATA_ARRAY_MAX_NON_ZERO,
                                                  self.METADATA_ARRAY_MIN_NON_ZERO,
                                                  self.METADATA_ARRAY_MEAN_NON_ZERO,
                                                  self.METADATA_ARRAY_VAR_NON_ZERO],
                                                 mask_array_name='weights', key_suffix=" (connections)"))

        return summary

    def set_idelays(self, dt):
        """
        Convert the time delays between regions in physical units into an array
        of linear indices into the simulator's history attribute.

        args:
            ``dt (float64)``: Length of integration time step...

        Updates attribute:
            ``idelays (numpy.array)``: Transmission delay between brain regions
            in integration steps.
        """
        # Express delays in integration steps
        self.idelays = numpy.rint(self.delays / dt).astype(numpy.int32)
        self.trait["idelays"].log_debug(owner=self.__class__.__name__)

    def compute_tract_lengths(self):
        """
        If no tract lengths data are available, this can be used to calculate
        the Euclidean distance between region centres to use as a proxy.

        """
        nor = self.number_of_regions
        tract_lengths = numpy.zeros((nor, nor))
        # TODO: redundant by half, do half triangle then flip...
        for region in range(nor):
            temp = self.centres - self.centres[region, :][numpy.newaxis, :]
            tract_lengths[region, :] = numpy.sqrt(numpy.sum(temp ** 2, axis=1))

        self.tract_lengths = tract_lengths
        self.trait["tract_lengths"].log_debug(owner=self.__class__.__name__)

    def compute_region_labels(self):
        """ """
        labels = ["region_%03d" % n for n in range(self.number_of_regions)]
        self.region_labels = numpy.array(labels, dtype="128a")

    def try_compute_hemispheres(self):
        """
        If all region labels are prefixed with L or R, then compute hemisphere side with that.
        """
        if self.region_labels is not None and self.region_labels.size > 0:
            hemispheres = []
            ## Check if all labels are prefixed with R / L
            for label in self.region_labels:
                if label is not None and label.lower().startswith('r'):
                    hemispheres.append(True)
                elif label is not None and label.lower().startswith('l'):
                    hemispheres.append(False)
                else:
                    hemispheres = None
                    break
            ## Check if all labels are sufixed with R / L
            if hemispheres is None:
                hemispheres = []
                for label in self.region_labels:
                    if label is not None and label.lower().endswith('r'):
                        hemispheres.append(True)
                    elif label is not None and label.lower().endswith('l'):
                        hemispheres.append(False)
                    else:
                        hemispheres = None
                        break
            if hemispheres is not None:
                self.hemispheres = numpy.array(hemispheres, dtype=numpy.bool)

    def transform_remove_self_connections(self):
        """
        Remove the values from the main diagonal (self-connections)

        """

        nor = self.number_of_regions
        result = copy(self.weights)
        result = result - result * numpy.eye(nor, nor)
        return result

    def scaled_weights(self, mode='tract'):
        """
        Scale the connection strengths (weights) and return the scaled matrix.
        Three simple types of scaling are supported.
        The ``scaling_mode``  is one of the following:

            'tract': Scale by a value such that the maximum absolute value of a single
                connection is 1.0. (Global scaling)

            'region': Scale by a value such that the maximum absolute value of the
                cumulative input to any region is 1.0. (Global-wise scaling)

            None: does nothing.

        NOTE: Currently multiple 'tract' and/or 'region' scalings without
            intermediate 'none' scaling mode destroy the ability to recover
            the original un-scaled weights matrix.

        """
        # NOTE: It is not yet clear how or if we will integrate this functinality
        #      into the UI. Currently the same effect can be achieved manually
        #      by using the coupling functions, it is just that, in certain
        #      situations, things are simplified by starting from a normalised
        #      weights matrix. However, in other situations it is not desirable
        #      to have a simple normalisation of this sort.
        # NOTE: We should probably separate the two cases implemented here into
        #      'scaling' and 'normalisation'. Normalisation implies that the norm
        #      of the samples is equal to 1, while here it is only scaling by a factor.

        LOG.info("Starting to normalize to mode: %s" % str(mode))

        normalisation_factor = None
        if mode in ("tract", "edge"):
            # global scaling
            normalisation_factor = numpy.abs(self.weights).max()
        elif mode in ("region", "node"):
            # node-wise scaling
            normalisation_factor = numpy.max(numpy.abs(self.weights.sum(axis=1)))
        elif mode in (None, "none"):
            normalisation_factor = 1.0
        else:
            LOG.error("Bad weights normalisation mode, must be one of:")
            LOG.error("('tract', 'edge', 'region', 'node', 'none')")
            raise Exception("Bad weights normalisation mode")

        LOG.debug("Normalization factor is: %s" % str(normalisation_factor))
        mask = self.weights != 0.0
        result = copy(self.weights)
        result[mask] = self.weights[mask] / normalisation_factor
        return result

    def transform_binarize_matrix(self):
        """
        Transforms the weights matrix into a binary (unweighted) matrix
        """
        LOG.info("Transforming weighted matrix into unweighted matrix")

        result = copy(self.weights)
        result = numpy.where(result > 0, 1, result)
        return result

    def switch_distribution(self, matrix='tract_lengths', mode='none', seed=42):
        """
        Permutation and resampling methods for the weights and distance
        (tract_lengths) matrices.
        'normal'    : leaves the matrix unchanged
        'shuffle'   : randomize the elements of the 'matrix' matrix. Fisher-Yates
                      algorithm.

                      for i from n - 1 downto 1 do
                          j <- random integer with 0 :math:`\leq` j :math:`\leq` i
                          exchange a[j] and a[i]

        'mean'      : sets all the values to the sample mean value.
        'empirical' : uses the gaussian_kde to estimate the underlying pdf of the
                      values and randomly samples a new matrix.

        'analytical': defined pdf. Fits the data to the distribution to get the
                      corresponding parameters and then randomly samples a new
                      matrix.
        """
        # Empirical seems to fail on some scipy installations. Error is not pinned down
        # so far, it seems to only happen on some machines. Most relevant related to this:
        #
        # http://projects.scipy.org/scipy/ticket/1735
        # http://comments.gmane.org/gmane.comp.python.scientific.devel/14816
        # http://permalink.gmane.org/gmane.comp.python.numeric.general/42082
        numpy.random.RandomState(seed)
        temp = eval("self." + matrix)
        D = copy(temp)
        msg = "The distribution of the %s matrix will be changed" % matrix
        LOG.info(msg)

        if mode == 'none':
            LOG.info("Maybe not ... Doing nothing")

        elif mode == 'shuffle':

            for i in reversed(xrange(1, D.shape[0])):
                j = int(numpy.random.rand() * (i + 1))
                D[:, i], D[:, j] = D[:, j].copy(), D[:, i].copy()
                D[i, :], D[j, :] = D[j, :].copy(), D[i, :].copy()

        elif mode == 'mean':
            D[:] = D[self.weights > 0].mean()

        elif mode == 'empirical':

            from scipy import stats
            kernel = stats.gaussian_kde(D[D > 0].flatten())
            D = kernel.resample(size=(D.shape))

            if numpy.any(D < 0):
                # NOTE: The KDE method is not perfect, there are still very
                #       small probabilities for negative values around 0.
                # TODO: change the kde bandwidth method
                LOG.warning("Found negative values. Setting them to 0.0")
                D = numpy.where(D < 0.0, 0.0, D)

                # NOTE: if we need the cdf: kernel.integrate_box_1d(lo, hi)
                # TODO: make a subclass using rv_continous, might be more accurate

        elif mode == 'analytical':
            LOG.warning("Analytical mode has not been implemented yet.")
            # NOTE: pdf name could be an argument.
        D = numpy.where(temp > 0, D, 0)
        # NOTE: Consider saving a copy of the original delays matrix?
        # exec("self." + matrix + "[:] = D")

    def motif_linear_directed(self, number_of_regions=4, max_radius=100., return_type=None):
        """
        Generates a linear (open chain) unweighted directed graph with equidistant nodes.
        """

        iu1 = numpy.triu_indices(number_of_regions, 1)
        iu2 = numpy.triu_indices(number_of_regions, 2)

        self.weights = numpy.zeros((number_of_regions, number_of_regions))
        self.weights[iu1] = 1.0
        self.weights[iu2] = 0.0

        self.tract_lengths = max_radius * copy(self.weights)
        self.number_of_regions = number_of_regions
        self.create_region_labels(mode='numeric')

        if return_type is not None:
            return self.weights, self.tract_lengths
        else:
            pass

    def motif_linear_undirected(self, number_of_regions=4, max_radius=42.):
        """
        Generates a linear (open chain) unweighted undirected graph with equidistant nodes.
        """

        self.weights, self.tract_lengths = self.motif_linear_directed(number_of_regions=number_of_regions,
                                                                      max_radius=max_radius,
                                                                      return_type=True)

        self.weights += self.weights.T
        self.tract_lengths += self.tract_lengths.T
        self.number_of_regions = number_of_regions
        self.create_region_labels(mode='numeric')

    def motif_chain_directed(self, number_of_regions=4, max_radius=42., return_type=None):
        """
        Generates a closed unweighted directed graph with equidistant nodes.
        Depending on the centres it could be a box or a ring.
        """

        self.weights, self.tract_lengths = self.motif_linear_directed(number_of_regions=number_of_regions,
                                                                      max_radius=max_radius,
                                                                      return_type=True)

        self.weights[-1, 0] = 1.0
        self.tract_lengths[-1, 0] = max_radius
        self.number_of_regions = number_of_regions
        self.create_region_labels(mode='numeric')

        if return_type is not None:
            return self.weights, self.tract_lengths
        else:
            pass

    def motif_chain_undirected(self, number_of_regions=4, max_radius=42.):
        """
        Generates a closed unweighted undirected graph with equidistant nodes.
        Depending on the centres it could be a box or a ring.
        """

        self.weights, self.tract_lengths = self.motif_chain_directed(number_of_regions=number_of_regions,
                                                                     max_radius=max_radius,
                                                                     return_type=True)

        self.weights[0, -1] = 1.0
        self.tract_lengths[0, -1] = max_radius
        self.number_of_regions = number_of_regions
        self.create_region_labels(mode='numeric')

    def motif_all_to_all(self, number_of_regions=4, max_radius=42.):
        """
        Generates an all-to-all closed unweighted undirected graph with equidistant nodes.
        Self-connections are not included.
        """

        diagonal_elements = numpy.diag_indices(number_of_regions)

        self.weights = numpy.ones((number_of_regions, number_of_regions))
        self.weights[diagonal_elements] = 0.0
        self.tract_lengths = max_radius * copy(self.weights)
        self.number_of_regions = number_of_regions
        self.create_region_labels(mode='numeric')

    def centres_spherical(self, number_of_regions=4, max_radius=42., flat=False):
        """
        The nodes positions are distributed on a sphere.
        See: http://mathworld.wolfram.com/SphericalCoordinates.html

        If flat is true, then theta=0.0, the nodes are lying inside a circle.

        r    : radial
        theta: azimuthal
        polar: phi
        """

        # azimuth
        theta = numpy.random.uniform(low=-numpy.pi, high=numpy.pi, size=number_of_regions)

        # side of the cube
        u = numpy.random.uniform(low=0.0, high=1.0, size=number_of_regions)

        if flat:
            cosphi = 0.0
        else:
            # cos(elevation)
            cosphi = numpy.random.uniform(low=-1.0, high=1.0, size=number_of_regions)

        phi = numpy.arccos(cosphi)
        r = max_radius * pow(u, 1 / 3.0)

        # To Cartesian coordinates
        x = r * numpy.sin(phi) * numpy.cos(theta)
        y = r * numpy.sin(phi) * numpy.sin(theta)
        z = r * numpy.cos(phi)

        self.centres = numpy.array([x, y, z]).T
        norm_xyz = numpy.sqrt(numpy.sum(self.centres ** 2, axis=0))
        self.orientations = self.centres / norm_xyz[numpy.newaxis, :]

    def centres_toroidal(self, number_of_regions=4, max_radius=77., min_radius=13., mu=numpy.pi, kappa=numpy.pi / 6):
        """
        The nodes are lying on  a torus.
        See: http://mathworld.wolfram.com/Torus.html

        """

        u = scipy.stats.vonmises.rvs(kappa, loc=mu, size=number_of_regions)
        v = scipy.stats.vonmises.rvs(kappa, loc=mu, size=number_of_regions)

        # To cartesian coordinates
        x = (max_radius + min_radius * numpy.cos(v)) * numpy.cos(u)
        y = (max_radius + min_radius * numpy.cos(v)) * numpy.sin(u)
        z = min_radius * numpy.sin(v)

        # Tangent vector with respect to max_radius
        tx = -numpy.sin(u)
        ty = -numpy.cos(u)
        tz = 0

        # Tangent vector with respect to min_radius
        sx = -numpy.cos(u) * (-numpy.sin(v))
        sy = numpy.sin(u) * (-numpy.sin(v))
        sz = numpy.cos(v)

        # Normal vector
        nx = ty * sz - tz * sy
        ny = tz * sx - tx * sz
        nz = tx * sy - ty * sx

        # Normalize normal vectors
        norm = numpy.sqrt(nx ** 2 + ny ** 2 + nz ** 2)
        nx /= norm
        ny /= norm
        nz /= norm

        self.orientations = numpy.array([nx, ny, nz]).T
        self.centres = numpy.array([x, y, z]).T

    def centres_annular(self, number_of_regions=4, max_radius=77., min_radius=13., mu=numpy.pi, kappa=numpy.pi / 6):
        """
        The nodes are lying inside an annulus.

        """

        r = numpy.random.uniform(low=min_radius, high=max_radius, size=number_of_regions)
        theta = scipy.stats.vonmises.rvs(kappa, loc=mu, size=number_of_regions)

        # To cartesian coordinates
        x = r * numpy.cos(theta)
        y = r * numpy.sin(theta)
        z = numpy.zeros(number_of_regions)

        self.centres = numpy.array([x, y, z]).T

    def centres_cubic(self, number_of_regions=4, max_radius=42., flat=False):
        """
        The nodes are positioined in a 3D grid inside the cube centred at the origin and
        with edges parallel to the axes, with an edge length of 2*max_radius.

        """

        # To cartesian coordinates
        x = numpy.linspace(-max_radius, max_radius, number_of_regions)
        y = numpy.linspace(-max_radius, max_radius, number_of_regions)

        if flat:
            z = numpy.zeros(number_of_regions)
        else:
            z = numpy.linspace(-max_radius, max_radius, number_of_regions)

        self.centres = numpy.array([x, y, z]).T

    def generate_surrogate_connectivity(self, number_of_regions, motif='chain', undirected=True,
                                        these_centres='spherical'):
        """
        This one generates some defaults.
        For more specific motifs, generate invoking each method separetly.

        """

        # NOTE: Luckily I went for 5 motifs ...
        if motif == 'chain' and undirected:
            self.motif_chain_undirected(number_of_regions=number_of_regions)
        elif motif == "chain" and not undirected:
            self.motif_chain_directed(number_of_regions=number_of_regions)
        elif motif == 'linear' and undirected:
            self.motif_linear_undirected(number_of_regions=number_of_regions)
        elif motif == 'linear' and not undirected:
            self.motif_linear_directed(number_of_regions=number_of_regions)
        else:
            LOG.info("Generating all-to-all connectivity \\")
            self.motif_all_to_all(number_of_regions=number_of_regions)

        # centres
        if these_centres in ("spherical", "annular", "toroidal", "cubic"):
            eval("self.centres_" + these_centres + "(number_of_regions=number_of_regions)")
        else:
            raise Exception("Bad centres geometry")

    def create_region_labels(self, mode="numeric"):

        """
        Assumes weights already exists
        """

        LOG.info("Create labels: %s" % str(mode))

        if mode in ("numeric", "num"):
            self.region_labels = [n for n in xrange(self.number_of_regions)]
            self.region_labels = numpy.array(self.region_labels).astype(str)
        elif mode in ("alphabetic", "alpha"):
            import string
            if self.number_of_regions < 26:
                self.region_labels = numpy.array(list(map(chr, range(65, 65 + self.number_of_regions)))).astype(str)
            else:
                LOG.info("I'm too lazy to create several strategies to label regions. \\")
                LOG.info("Please choose mode 'numeric' or set your own labels\\")
        else:
            LOG.error("Bad region labels mode, must be one of:")
            LOG.error("('numeric', 'num', 'alphabetic', 'alpha')")
            raise Exception("Bad region labels mode")

    def unmapped_indices(self, region_mapping):
        """
        Compute vector of indices of regions in connectivity which are not in the given
        region mapping.

        """

        return numpy.setdiff1d(numpy.r_[:self.number_of_regions], region_mapping)

    # final
    @staticmethod
    def from_file(source_file="connectivity_76.zip", instance=None):

        if instance is None:
            result = Connectivity()
        else:
            result = instance

        source_full_path = try_get_absolute_path("tvb_data.connectivity", source_file)

        if source_file.endswith(".h5"):

            reader = H5Reader(source_full_path)

            result.weights = reader.read_field("weights")
            result.centres = reader.read_field("centres")
            result.region_labels = reader.read_field("region_labels")
            result.orientations = reader.read_optional_field("orientations")
            result.cortical = reader.read_optional_field("cortical")
            result.hemispheres = reader.read_field("hemispheres")
            result.areas = reader.read_optional_field("areas")
            result.tract_lengths = reader.read_field("tract_lengths")

        else:
            reader = ZipReader(source_full_path)

            result.weights = reader.read_array_from_file("weights")
            result.centres = reader.read_array_from_file("centres", use_cols=(1, 2, 3))
            result.region_labels = reader.read_array_from_file("centres", dtype=numpy.str, use_cols=(0,))
            result.orientations = reader.read_optional_array_from_file("average_orientations")
            result.cortical = reader.read_optional_array_from_file("cortical", dtype=numpy.bool)
            result.hemispheres = reader.read_optional_array_from_file("hemispheres", dtype=numpy.bool)
            result.areas = reader.read_optional_array_from_file("areas")
            result.tract_lengths = reader.read_array_from_file("tract_lengths")

        return result
Пример #28
0
class TimeSeries(types_mapped.MappedType):
    """
    Base time-series dataType.
    """

    title = basic.String

    data = arrays.FloatArray(
        label="Time-series data",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc="""An array of time-series data, with a shape of [tpts, :], where ':' represents 1 or more dimensions""")

    nr_dimensions = basic.Integer(
        label="Number of dimension in timeseries",
        default=4)

    length_1d, length_2d, length_3d, length_4d = [basic.Integer] * 4

    labels_ordering = basic.List(
        default=["Time", "State Variable", "Space", "Mode"],
        label="Dimension Names",
        doc="""List of strings representing names of each data dimension""")

    labels_dimensions = basic.Dict(
        default={},
        label="Specific labels for each dimension for the data stored in this timeseries.",
        doc=""" A dictionary containing mappings of the form {'dimension_name' : [labels for this dimension] }""")

    time = arrays.FloatArray(
        file_storage=core.FILE_STORAGE_EXPAND,
        label="Time-series time",
        required=False,
        doc="""An array of time values for the time-series, with a shape of [tpts,].
        This is 'time' as returned by the simulator's monitors.""")

    start_time = basic.Float(label="Start Time:")

    sample_period = basic.Float(label="Sample period", default=1.0)

    # Specify the measure unit for sample period (e.g sec, msec, usec, ...)
    sample_period_unit = basic.String(
        label="Sample Period Measure Unit",
        default="ms")

    sample_rate = basic.Float(
        label="Sample rate",
        doc="""The sample rate of the timeseries""")

    has_surface_mapping = basic.Bool(default=True)
    has_volume_mapping = basic.Bool(default=False)

    def configure(self):
        """
        After populating few fields, compute the rest of the fields
        """
        super(TimeSeries, self).configure()
        data_shape = self.read_data_shape()
        self.nr_dimensions = len(data_shape)
        self.sample_rate = 1.0 / self.sample_period

        for i in range(min(self.nr_dimensions, 4)):
            setattr(self, 'length_%dd' % (i + 1), int(data_shape[i]))

    def read_data_shape(self):
        """
        Expose shape read on field data.
        """
        try:
            return self.get_data_shape('data')
        except exceptions.TVBException:
            self.logger.exception("Could not read data shape for TS!")
            raise exceptions.TVBException("Invalid empty TimeSeries!")

    def read_data_slice(self, data_slice):
        """
        Expose chunked-data access.
        """
        return self.get_data('data', data_slice)

    def read_time_page(self, current_page, page_size, max_size=None):
        """
        Compute time for current page.
        :param current_page: Starting from 0
        """
        current_page = int(current_page)
        page_size = int(page_size)

        if max_size is None:
            max_size = page_size
        else:
            max_size = int(max_size)

        page_real_size = page_size * self.sample_period
        start_time = self.start_time + current_page * page_real_size
        end_time = start_time + min(page_real_size, max_size * self.sample_period)

        return numpy.arange(start_time, end_time, self.sample_period)

    def read_channels_page(self, from_idx, to_idx, step=None, specific_slices=None, channels_list=None):
        """
        Read and return only the data page for the specified channels list.

        :param from_idx: the starting time idx from which to read data
        :param to_idx: the end time idx up until to which you read data
        :param step: increments in which to read the data. Optional, default to 1.
        :param specific_slices: optional parameter. If speficied slices the data accordingly.
        :param channels_list: the list of channels for which we want data
        """
        if channels_list:
            channels_list = json.loads(channels_list)
            for i in range(len(channels_list)):
                channels_list[i] = int(channels_list[i])

        if channels_list:
            channel_slice = tuple(channels_list)
        else:
            channel_slice = slice(None)

        data_page = self.read_data_page(from_idx, to_idx, step, specific_slices)
        # This is just a 1D array like in the case of Global Average monitor.
        # No need for the channels list
        if len(data_page.shape) == 1:
            return data_page.reshape(data_page.shape[0], 1)
        else:
            return data_page[:, channel_slice]

    def read_data_page(self, from_idx, to_idx, step=None, specific_slices=None):
        """
        Retrieve one page of data (paging done based on time).
        """
        from_idx, to_idx = int(from_idx), int(to_idx)

        if isinstance(specific_slices, basestring):
            specific_slices = json.loads(specific_slices)
        if step is None:
            step = 1
        else:
            step = int(step)

        slices = []
        overall_shape = self.read_data_shape()
        for i in range(len(overall_shape)):
            if i == 0:
                # Time slice
                slices.append(
                    slice(from_idx, min(to_idx, overall_shape[0]), step))
                continue
            if i == 2:
                # Read full of the main_dimension (space for the simulator)
                slices.append(slice(overall_shape[i]))
                continue
            if specific_slices is None:
                slices.append(slice(0, 1))
            else:
                slices.append(slice(specific_slices[i], min(specific_slices[i] + 1, overall_shape[i]), 1))

        data = self.read_data_slice(tuple(slices))
        if len(data) == 1:
            # Do not allow time dimension to get squeezed, a 2D result need to
            # come out of this method.
            data = data.squeeze()
            data = data.reshape((1, len(data)))
        else:
            data = data.squeeze()

        return data

    def read_data_page_split(self, from_idx, to_idx, step=None, specific_slices=None):
        """
        No Split needed in case of basic TS (sensors and region level)
        """
        return self.read_data_page(from_idx, to_idx, step, specific_slices)


    def write_time_slice(self, partial_result):
        """
        Append a new value to the ``time`` attribute.
        """
        self.store_data_chunk("time", partial_result, grow_dimension=0, close_file=False)

    def write_data_slice(self, partial_result, grow_dimension=0):
        """
        Append a chunk of time-series data to the ``data`` attribute.
        """
        self.store_data_chunk("data", partial_result, grow_dimension=grow_dimension, close_file=False)

    def get_min_max_values(self):
        """
        Retrieve the minimum and maximum values from the metadata.
        :returns: (minimum_value, maximum_value)
        """
        metadata = self.get_metadata('data')
        return metadata[self.METADATA_ARRAY_MIN], metadata[self.METADATA_ARRAY_MAX]

    def get_space_labels(self):
        """
        It assumes that we want to select in the 3'rd dimension,
        and generates labels for each point in that dimension.
        Subclasses are more specific.
        :return: An array of strings.
        """
        if self.nr_dimensions > 2:
            return ['signal-%d' % i for i in range(self._length_3d)]
        else:
            return []

    def get_grouped_space_labels(self):
        """
        :return: A list of label groups. A label group is a tuple (name, [(label_idx, label)...]).
                 Default all labels in a group named ''
        """
        return [('', list(enumerate(self.get_space_labels())))]

    def get_default_selection(self):
        """
        :return: The measure point indices that have to be shown by default. By default show all.
        """
        return range(len(self.get_space_labels()))

    def get_measure_points_selection_gid(self):
        """
        :return: a datatype gid with which to obtain al valid measure point selection for this time series
                 We have to decide if the default should be all selections or none
        """
        return ''

    @staticmethod
    def accepted_filters():
        filters = types_mapped.MappedType.accepted_filters()
        filters.update({'datatype_class._nr_dimensions': {'type': 'int', 'display': 'No of Dimensions',
                                                          'operations': ['==', '<', '>']},
                        'datatype_class._sample_period': {'type': 'float', 'display': 'Sample Period',
                                                          'operations': ['==', '<', '>']},
                        'datatype_class._sample_rate': {'type': 'float', 'display': 'Sample Rate',
                                                        'operations': ['==', '<', '>']},
                        'datatype_class._title': {'type': 'string', 'display': 'Title',
                                                  'operations': ['==', '!=', 'like']}})
        return filters

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {"Time-series type": self.__class__.__name__,
                   "Time-series name": self.title,
                   "Dimensions": self.labels_ordering,
                   "Time units": self.sample_period_unit,
                   "Sample period": self.sample_period,
                   "Length": self.sample_period * self.get_data_shape('data')[0]}
        summary.update(self.get_info_about_array('data'))
        return summary
Пример #29
0
class TimeSeriesInteractive(core.Type):
    """
    For generating an interactive time-series figure, given one of TVB's 
    TimeSeries datatypes to initialise it. The graphical interface for 
    visualising a timeseries provides controls for setting:

        - Window length
        - Amplitude scaling
        - Stepping forward/backward through time.


    """

    time_series = time_series_datatypes.TimeSeries(
        label="Timeseries",
        default=None,
        required=True,
        doc="""The TVB TimeSeries datatype to be displayed.""")

    first_n = basic.Integer(
        label="Display the first 'n'",
        default=-1,
        required=True,
        doc="""Primarily intended for displaying the first N components of a 
            surface PCA timeseries. Defaults to -1, meaning it'll display all
            of 'space' (ie, regions or vertices or channels). In other words,
            for Region or M/EEG timeseries you can ignore this, but, for a 
            surface timeseries it really must be set.""")

    def __init__(self, **kwargs):
        """
        Doc me...

        """
        super(TimeSeriesInteractive, self).__init__(**kwargs)
        LOG.debug(str(kwargs))

        #figure
        self.its_fig = None

        #time-series
        self.ts_ax = None
        self.ts_view = None
        self.whereami_ax = None
        self.hereiam = None

        #Current state
        self.window_length = None
        self.scaling = 0.42
        self.offset = None
        self.view_step = None
        self.time_view = None
        self.channel_view = None
        #self.mode = 0

        #Selectors
        #self.mode_selector = None

        #Sliders
        self.window_length_slider = None
        self.scaling_slider = None
        self.time_slider = None

        #time-view buttons
        self.step_back_button = None
        self.step_forward_button = None
        self.big_step_back_button = None
        self.big_step_forward_button = None
        self.start_button = None
        self.end_button = None

    def configure(self):
        """ Seperate configure cause ttraits be busted... """
        #TODO: if isinstance(self.time_series, TimeSeriesSurface) and self.first_n == -1: #LOG.error, return.
        self.data = (self.time_series.data[:, :, :self.first_n, :] -
                     self.time_series.data[:, :, :self.first_n, :].mean(
                         axis=0)[numpy.newaxis, :])
        self.period = self.time_series.sample_period
        self.tpts = self.data.shape[0]
        self.nsrs = self.data.shape[2]
        self.time = numpy.arange(self.tpts) * self.period
        self.start_time = self.time[0]
        self.end_time = self.time[-1]
        self.time_series_length = self.end_time - self.start_time
        self.peak_to_peak = (numpy.max(self.data) - numpy.min(self.data))

        #Use actual labels if they exist.
        if (isinstance(self.time_series,
                       time_series_datatypes.TimeSeriesRegion)
                and (not self.time_series.connectivity is None)):
            self.labels = self.time_series.connectivity.region_labels
        elif (isinstance(self.time_series,
                         (time_series_datatypes.TimeSeriesEEG,
                          time_series_datatypes.TimeSeriesMEG)
                         and (not self.time_series.sensors is None))):
            self.labels = self.time_series.sensors.labels
        else:
            self.labels = ["channel_%0.2d" % k for k in range(self.nsrs)]

        #Current state
        self.window_length = self.tpts * self.period
        self.view_step = max(int(self.tpts / TIME_RESOLUTION), 1)
        self.time_view = range(0, self.tpts, self.view_step)

    def show(self):
        """ Generate the interactive time-series figure. """
        time_series_type = self.time_series.__class__.__name__
        msg = "Generating an interactive time-series plot for %s"
        if isinstance(self.time_series,
                      time_series_datatypes.TimeSeriesSurface):
            LOG.warning("Intended for region and sensors, not surfaces.")
        LOG.info(msg % time_series_type)

        #Make the figure:
        self.create_figure()

        #Selectors
        #self.add_mode_selector()

        #Sliders
        self.add_window_length_slider()
        self.add_scaling_slider()
        #self.add_time_slider()

        #time-view buttons
        self.add_step_back_button()
        self.add_step_forward_button()
        self.add_big_step_back_button()
        self.add_big_step_forward_button()
        self.add_start_button()
        self.add_end_button()

        #Plot timeseries
        self.plot_time_series()

        pylab.show()

    ##------------------------------------------------------------------------##
    ##------------------ Functions for building the figure -------------------##
    ##------------------------------------------------------------------------##
    def create_figure(self):
        """ Create the figure and time-series axes. """
        #time_series_type = self.time_series.__class__.__name__
        try:
            figure_window_title = "Interactive time series: "  #+ time_series_type
            #            pylab.close(figure_window_title)
            self.its_fig = pylab.figure(num=figure_window_title,
                                        figsize=(14, 8),
                                        facecolor=BACKGROUNDCOLOUR,
                                        edgecolor=EDGECOLOUR)
        except ValueError:
            LOG.info("My life would be easier if you'd update your PyLab...")
            figure_number = 42
            pylab.close(figure_number)
            self.its_fig = pylab.figure(num=figure_number,
                                        figsize=(14, 8),
                                        facecolor=BACKGROUNDCOLOUR,
                                        edgecolor=EDGECOLOUR)

        self.ts_ax = self.its_fig.add_axes([0.1, 0.1, 0.85, 0.85])

        self.whereami_ax = self.its_fig.add_axes([0.1, 0.95, 0.85, 0.025],
                                                 axisbg=BACKGROUNDCOLOUR)
        self.whereami_ax.set_axis_off()
        if hasattr(self.whereami_ax, 'autoscale'):
            self.whereami_ax.autoscale(enable=True, axis='both', tight=True)
        self.whereami_ax.plot(self.time_view,
                              numpy.zeros((len(self.time_view), )),
                              color="0.3",
                              linestyle="--")
        self.hereiam = self.whereami_ax.plot(self.time_view,
                                             numpy.zeros(
                                                 (len(self.time_view), )),
                                             'b-',
                                             linewidth=4)

#    def add_mode_selector(self):
#        """
#        Add a radio button to the figure for selecting which mode of the model
#        should be displayed.
#        """
#        pos_shp = [0.02, 0.07, 0.04, 0.1+0.002*self.data.shape[3]]]
#        mode_ax = self.its_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="Mode")
#        mode_tuple = tuple(range(self.model.number_of_modes))
#        self.mode_selector = widgets.RadioButtons(mode_ax, mode_tuple, active=0)
#        self.mode_selector.on_clicked(self.update_mode)

#    def add_time_sliders(self):
#        """
#        Add a slider to allow the time-series window length to be adjusted.
#        """
#        pos_shp = [0.2, 0.02, 0.7, 0.025]
#        slax = self.its_fig.add_axes(pos_shp, axisbg=AXCOLOUR)
#
#        self.current_time_slider = widgets.Slider(slax, "Time", self.start_time,
#                                          self.end_time,
#                                          valinit = self.current_time)
#        self.current_time.on_changed(self.update_time)

    def add_window_length_slider(self):
        """
        Add a slider to allow the time-series window length to be adjusted.
        """
        pos_shp = [0.15, 0.02, 0.175, 0.035]
        slax = self.its_fig.add_axes(pos_shp, axisbg=AXCOLOUR)

        self.window_length_slider = widgets.Slider(slax,
                                                   "Window length",
                                                   TIME_RESOLUTION *
                                                   self.period,
                                                   self.time_series_length,
                                                   valinit=self.window_length,
                                                   valfmt="%d")
        self.window_length_slider.on_changed(self.update_window_length)

    #TODO: Add a conversion so this is an amplitude scaling, say 1.0-20.0
    def add_scaling_slider(self):
        """ Add a slider to allow scaling of the offset of time-series. """
        pos_shp = [0.75, 0.02, 0.175, 0.035]
        sax = self.its_fig.add_axes(pos_shp, axisbg=AXCOLOUR)

        self.scaling_slider = widgets.Slider(sax,
                                             "Spacing",
                                             0.0,
                                             1.25,
                                             valinit=self.scaling,
                                             valfmt="%4.2f")
        self.scaling_slider.on_changed(self.update_scaling)

    def add_step_back_button(self):
        """ Add a button to step back by 4 view_steps. """
        bax = self.its_fig.add_axes([0.5, 0.015, 0.04, 0.045])
        self.step_back_button = widgets.Button(bax,
                                               '<',
                                               color=BUTTONCOLOUR,
                                               hovercolor=HOVERCOLOUR)

        self.step_back_button.on_clicked(self.step_back)

    def add_step_forward_button(self):
        """ Add a button to step forward by 4 view_steps. """
        bax = self.its_fig.add_axes([0.54, 0.015, 0.04, 0.045])
        self.step_forward_button = widgets.Button(bax,
                                                  '>',
                                                  color=BUTTONCOLOUR,
                                                  hovercolor=HOVERCOLOUR)

        self.step_forward_button.on_clicked(self.step_forward)

    def add_big_step_back_button(self):
        """ Add a button to step back by 1/4 window_length. """
        bax = self.its_fig.add_axes([0.46, 0.015, 0.04, 0.045])
        self.big_step_back_button = widgets.Button(bax,
                                                   '<<',
                                                   color=BUTTONCOLOUR,
                                                   hovercolor=HOVERCOLOUR)

        self.big_step_back_button.on_clicked(self.bigstep_back)

    def add_big_step_forward_button(self):
        """ Add a button to step forward by 1/4 window_length. """
        bax = self.its_fig.add_axes([0.58, 0.015, 0.04, 0.045])
        self.big_step_forward_button = widgets.Button(bax,
                                                      '>>',
                                                      color=BUTTONCOLOUR,
                                                      hovercolor=HOVERCOLOUR)

        self.big_step_forward_button.on_clicked(self.bigstep_forward)

    def add_start_button(self):
        """ Add a button to jump back to the start of the timeseries. """
        bax = self.its_fig.add_axes([0.42, 0.015, 0.04, 0.045])
        self.start_button = widgets.Button(bax,
                                           '|<<<',
                                           color=BUTTONCOLOUR,
                                           hovercolor=HOVERCOLOUR)

        self.start_button.on_clicked(self.jump_to_start)

    def add_end_button(self):
        """ Add a button to jump forward to the end of the timeseries. """
        bax = self.its_fig.add_axes([0.62, 0.015, 0.04, 0.045])
        self.end_button = widgets.Button(bax,
                                         '>>>|',
                                         color=BUTTONCOLOUR,
                                         hovercolor=HOVERCOLOUR)

        self.end_button.on_clicked(self.jump_to_end)

    ##------------------------------------------------------------------------##
    ##------------------ Functions for updating the state --------------------##
    ##------------------------------------------------------------------------##

    def step_back(self, event=None):
        """ Step the timeview back by a single view step. """
        LOG.debug("step_back accessed with event: %s" % str(event))
        step = 4 * self.view_step
        if self.time_view[0] - step >= 0:
            self.time_view = [k - step for k in self.time_view]
            self.update_time_series()

    def step_forward(self, event=None):
        """ Step the timeview forward by a single view step. """
        LOG.debug("step_forward accessed with event: %s" % str(event))
        step = 4 * self.view_step
        if self.time_view[-1] + step < self.tpts:
            self.time_view = [k + step for k in self.time_view]
            self.update_time_series()

    def bigstep_back(self, event=None):
        """ Step the timeview back by 1/4 window length. """
        LOG.debug("bigstep_back accessed with event: %s" % str(event))
        step = self.view_step * TIME_RESOLUTION / 4
        if self.time_view[0] - step >= 0:
            self.time_view = [k - step for k in self.time_view]
            self.update_time_series()
        else:
            self.jump_to_start()

    def bigstep_forward(self, event=None):
        """ Step the timeview forward by 1/4 window length. """
        LOG.debug("bigstep_forward accessed with event: %s" % str(event))
        step = self.view_step * TIME_RESOLUTION / 4
        if self.time_view[-1] + step < self.tpts:
            self.time_view = [k + step for k in self.time_view]
            self.update_time_series()
        else:
            self.jump_to_end()

    def jump_to_start(self, event=None):
        """ Jump to the start of the timeseries. """
        LOG.debug("jump_to_start accessed with event: %s" % str(event))
        step = self.time_view[0]
        self.time_view = [k - step for k in self.time_view]
        self.update_time_series()

    def jump_to_end(self, event=None):
        """ Jump to the end of the timeseries."""
        LOG.debug("jump_to_end accessed with event: %s" % str(event))
        step = self.tpts - 1 - self.time_view[-1]
        self.time_view = [k + step for k in self.time_view]
        self.update_time_series()

    def update_time_view(self):
        """ Update the time_view when window length is changed. """
        tpts = self.window_length / self.period
        self.view_step = max(int(tpts / TIME_RESOLUTION), 1)
        window_start = self.time_view[0]
        window_end = min(window_start + self.view_step * (TIME_RESOLUTION - 1),
                         self.tpts)
        self.time_view = range(window_start, window_end, self.view_step)

    ##------------------------------------------------------------------------##
    ##------------------ Functions for updating the figure -------------------##
    ##------------------------------------------------------------------------##


#    def update_mode(self, label):
#        """ Update the visualised mode based on radio button selection. """
#        self.mode = label
#        self.update_time_series()

    def update_window_length(self, length):
        """
        Update timeseries window length based on the time window slider value.
        """
        self.window_length = length
        self.update_time_view()
        self.update_time_series()

    def update_scaling(self, scaling):
        """
        Update timeseries scaling based on the scaling slider value.
        """
        self.scaling = scaling
        self.update_time_series()

    def update_time_series(self):
        """ Clear the axes and redraw the time-series. """
        self.ts_ax.clear()
        self.plot_time_series()

    def plot_time_series(self):
        """ Plot a view on the timeseries. """
        # Set title and axis labels
        #time_series_type = self.time_series.__class__.__name__
        #self.ts_ax.set(title = time_series_type)
        #self.ts_ax.set(xlabel = "Time (%s)" % self.units)

        # This assumes shape => (time, space)
        step = self.scaling * self.peak_to_peak
        if step == 0:
            offset = 0.0
        else:  #NOTE: specifying step in arange is faster, but it fence-posts.
            offset = numpy.arange(0, self.nsrs) * step
        if hasattr(self.ts_ax, 'autoscale'):
            self.ts_ax.autoscale(enable=True, axis='both', tight=True)

        self.ts_ax.set_yticks(offset)
        self.ts_ax.set_yticklabels(self.labels, fontsize=10)
        #import pdb; pdb.set_trace()

        #Light gray guidelines
        self.ts_ax.plot([
            self.nsrs * [self.time[self.time_view[0]]],
            self.nsrs * [self.time[self.time_view[-1]]]
        ], numpy.vstack(2 * (offset, )), "0.85")

        #Plot the timeseries
        self.ts_view = self.ts_ax.plot(
            self.time[self.time_view],
            offset + self.data[self.time_view, 0, :, 0])

        self.hereiam[0].remove()
        self.hereiam = self.whereami_ax.plot(self.time_view,
                                             numpy.zeros(
                                                 (len(self.time_view), )),
                                             'b-',
                                             linewidth=4)

        pylab.draw()
Пример #30
0
class fastICA(core.Type):
    """
    Takes a TimeSeries datatype (x) and returns the unmixed temporal sources (S) 
    and the estimated mixing matrix (A).
    
    :math: x = AS
    
    ICA takes time-points as observations and nodes as variables.
    
    It uses the fastICA algorithm implemented in the scikit-learn toolkit, and 
    its intended usage is as a `blind source separation` method.
    
    See also: http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.fastica.html#sklearn.decomposition.fastica
    
    Before the fastICA algorithm can be applied, the input vector data 
    should be whitened (`sphering`). This means that any correlations in the 
    data are removed, i.e. the signals are forced to be uncorrelated. To this end,
    the `whiten` parameter is always set to `True`.
    
    NOTE: As for PCA the TimeSeries datatype must be longer (more time-points)
          than the number of nodes -- Mostly a problem for TimeSeriesSurface 
          datatypes, which, if sampled at 1024Hz, would need to be greater than 
          16 seconds long.
    """
    
    time_series = time_series.TimeSeries(
        label = "Time Series",
        required = True,
        doc = """The timeseries to which the ICA is to be applied. NOTE: The 
            TimeSeries must be longer(more time-points) than the number of nodes
            -- Mostly a problem for surface times-series, which, if sampled at
            1024Hz, would need to be greater than 16 seconds long.""")
            
    n_components = basic.Integer(
        label = "Number of components to extract",
        required = False,
        default = None,
        doc = """Number of components to extract and to perform dimension reduction.
            The number of components must be less than the number of variables.
            By default it takes number of components = number of nodes. Definitely
            a problem for surface time-series.""")
    
    # NOTE: For default surface the weights matrix has a size ~ 2GB * modes * vars...
    
    def evaluate(self):
        """
        Compute the independent sources 
        """
        cls_attr_name = self.__class__.__name__+".time_series"
        self.time_series.trait["data"].log_debug(owner = cls_attr_name)
        
        ts_shape = self.time_series.data.shape
        
        #Need more observations than variables
        if ts_shape[0] < ts_shape[2]:
            msg = "ICA requires a longer timeseries (tpts > number of nodes)."
            LOG.error(msg)
            raise Exception, msg
            
        #Need more variables than components
        if self.n_components > ts_shape[2]:
            msg = "ICA requires more variables than components to extract (number of nodes > number of components)."
            LOG.error(msg)
            raise Exception, msg
        
        if self.n_components is None:
            self.n_components = ts_shape[2]
        
        #(n_components, n_components, state-variables, modes) --  unmixing matrix
        unmixing_matrix_shape = (self.n_components, self.n_components, ts_shape[1], ts_shape[3])
        LOG.info("unmixing matrix shape will be: %s" % str(unmixing_matrix_shape))
        
        # (n_components, nodes, state_variables, modes) -- prewhitening matrix
        prewhitening_matrix_shape = (self.n_components, ts_shape[2], ts_shape[1], ts_shape[3])
        LOG.info("prewhitening matrix shape will be: %s" % str(prewhitening_matrix_shape))
        
        
        unmixing_matrix = numpy.zeros(unmixing_matrix_shape)
        prewhitening_matrix = numpy.zeros(prewhitening_matrix_shape)
        
        
        #(tpts, n_components, state_variables, modes) -- unmixed sources time series
        data_ica = numpy.zeros((ts_shape[0], self.n_components, ts_shape[1], ts_shape[3]))
        
        #One un/mixing matrix for each state-var & mode.
        for mode in range(ts_shape[3]):
            for var in range(ts_shape[1]):
                # Assumes data must be whitened
                ica = fastica(self.time_series.data[:, var, :, mode], 
                                            n_components = self.n_components,
                                            whiten = True)
                # unmixed sources - component_time_series
                data_ica[:, :, var, mode] = ica[2]
                # prewhitening matrix
                prewhitening_matrix[:, :, var, mode] = ica[0]
                # unmixing matrix
                unmixing_matrix[:, :, var, mode] = ica[1]
        
        util.log_debug_array(LOG, prewhitening_matrix, "whitening_matrix")
        util.log_debug_array(LOG, unmixing_matrix, "unmixing_matrix")

        
        ica_result = mode_decompositions.IndependentComponents(source = self.time_series,
                                         component_time_series = data_ica, 
                                         #mixing_matrix = mixing_matrix,
                                         prewhitening_matrix = prewhitening_matrix,
                                         unmixing_matrix = unmixing_matrix,
                                         n_components = self.n_components, 
                                         use_storage = False)
        
        return ica_result
    
    
    def result_shape(self, input_shape):
        """
        Returns the shape of the main result of the ICA analysis -- component
        mixing matrix.
        """
        unmixing_matrix_shape = ((self.n_components or input_shape[2]), 
                                 (self.n_components or input_shape[2]),
                                 input_shape[1], input_shape[3])
        return unmixing_matrix_shape
    
    
    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the results of the ICA analysis.
        """
        result_size = numpy.sum(map(numpy.prod, self.result_shape(input_shape))) * 8.0 #Bytes
        return result_size
    
    
    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the ICA.
        That is, it includes storage of the evaluated IndependentComponents
        attributes such as norm_source, component_time_series, etc.
        """
        result_size = self.result_size(input_shape)
        extend_size = result_size #Main arrays
        extend_size = extend_size + numpy.prod(input_shape) * 8.0 #norm_source
        extend_size = extend_size + numpy.prod(input_shape) * 8.0 #component_time_series
        extend_size = extend_size + numpy.prod(input_shape) * 8.0 #normalised_component_time_series
        return extend_size