def test_complexcoherence(self):
     data = numpy.random.random((10, 10))
     ts = time_series.TimeSeries(data=data)
     dt = spectral.ComplexCoherenceSpectrum(
         source=ts,
         array_data=numpy.random.random((10, 10)),
         cross_spectrum=numpy.random.random((10, 10)),
         epoch_length=10,
         segment_length=5)
     summary_info = dt.summary_info
     self.assertEqual(summary_info['Frequency step'], 0.2)
     self.assertEqual(summary_info['Maximum frequency'], 0.5)
     self.assertEqual(summary_info['Source'], '')
     self.assertEqual(summary_info['Spectral type'],
                      'ComplexCoherenceSpectrum')
     self.assertTrue(dt.aggregation_functions is None)
     self.assertEqual(dt.epoch_length, 10)
     self.assertEqual(dt.segment_length, 5)
     self.assertEqual(dt.shape, (10, 10))
     self.assertTrue(dt.source is not None)
     self.assertEqual(dt.windowing_function, '')
 def test_independentcomponents(self):
     data = numpy.random.random((10, 10, 10, 10))
     ts = time_series.TimeSeries(data=data)
     n_comp = 5
     dt = mode_decompositions.IndependentComponents(
         source=ts,
         component_time_series=numpy.random.random((10, n_comp, 10, 10)),
         prewhitening_matrix=numpy.random.random((n_comp, 10, 10, 10)),
         unmixing_matrix=numpy.random.random((n_comp, n_comp, 10, 10)),
         n_components=n_comp)
     dt.compute_norm_source()
     dt.compute_component_time_series()
     dt.compute_normalised_component_time_series()
     summary = dt.summary_info()
     assert summary['Mode decomposition type'] == 'IndependentComponents'
     assert dt.source is not None
     assert dt.mixing_matrix is None
     assert dt.unmixing_matrix.shape == (n_comp, n_comp, 10, 10)
     assert dt.prewhitening_matrix.shape == (n_comp, 10, 10, 10)
     assert dt.norm_source.shape == (10, 10, 10, 10)
     assert dt.component_time_series.shape == (10, 10, n_comp, 10)
Beispiel #3
0
 def test_principalcomponents(self):
     data = numpy.random.random((10, 10, 10, 10))
     ts = time_series.TimeSeries(data=data)
     dt = mode_decompositions.PrincipalComponents(
         source=ts,
         fractions=numpy.random.random((10, 10, 10)),
         weights=data)
     dt.configure()
     dt.compute_norm_source()
     dt.compute_component_time_series()
     dt.compute_normalised_component_time_series()
     summary = dt.summary_info
     self.assertEqual(summary['Mode decomposition type'],
                      'PrincipalComponents')
     self.assertTrue(dt.source is not None)
     self.assertEqual(dt.weights.shape, (10, 10, 10, 10))
     self.assertEqual(dt.fractions.shape, (10, 10, 10))
     self.assertEqual(dt.norm_source.shape, (10, 10, 10, 10))
     self.assertEqual(dt.component_time_series.shape, (10, 10, 10, 10))
     self.assertEqual(dt.normalised_component_time_series.shape,
                      (10, 10, 10, 10))
Beispiel #4
0
class IndependentComponentsData(MappedType):
    """
    Result of TEMPORAL (Fast) Independent Component Analysis
    """

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which the ICA is applied.")

    mixing_matrix = arrays.FloatArray(
        label="Mixing matrix - Spatial Maps",
        doc="""The linear mixing matrix (Mixing matrix) """)

    unmixing_matrix = arrays.FloatArray(
        label="Unmixing matrix - Spatial maps",
        doc="""The estimated unmixing matrix used to obtain the unmixed
            sources from the data""")

    prewhitening_matrix = arrays.FloatArray(
        label="Pre-whitening matrix",
        doc=""" """)

    n_components = basic.Integer(
        label="Number of independent components",
        doc=""" Observed data matrix is considered to be a linear combination
        of :math:`n` non-Gaussian independent components""")

    norm_source = arrays.FloatArray(
        label="Normalised source time series. Zero centered and whitened.",
        file_storage=core.FILE_STORAGE_EXPAND)

    component_time_series = arrays.FloatArray(
        label="Component time series. Unmixed sources.",
        file_storage=core.FILE_STORAGE_EXPAND)

    normalised_component_time_series = arrays.FloatArray(
        label="Normalised component time series",
        file_storage=core.FILE_STORAGE_EXPAND)

    __generate_table__ = True
Beispiel #5
0
class FourierSpectrumData(arrays.MappedArray):
    """
    Result of a Fourier  Analysis.
    """
    #Overwrite attribute from superclass
    array_data = arrays.ComplexArray(file_storage=core.FILE_STORAGE_EXPAND)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which the FFT is applied.")

    segment_length = basic.Float(
        label="Segment length",
        doc="""The timeseries was segmented into equally sized blocks
            (overlapping if necessary), prior to the application of the FFT.
            The segement length determines the frequency resolution of the
            resulting spectra.""")

    windowing_function = basic.String(
        label="Windowing function",
        doc="""The windowing function applied to each time segment prior to
            application of the FFT.""")

    amplitude = arrays.FloatArray(label="Amplitude",
                                  file_storage=core.FILE_STORAGE_EXPAND)

    phase = arrays.FloatArray(label="Phase",
                              file_storage=core.FILE_STORAGE_EXPAND)

    power = arrays.FloatArray(label="Power",
                              file_storage=core.FILE_STORAGE_EXPAND)

    average_power = arrays.FloatArray(label="Average Power",
                                      file_storage=core.FILE_STORAGE_EXPAND)

    normalised_average_power = arrays.FloatArray(
        label="Normalised Power", file_storage=core.FILE_STORAGE_EXPAND)

    __generate_table__ = True
Beispiel #6
0
class WaveletCoefficientsData(arrays.MappedArray):
    """
    This class bundles all the elements of a Wavelet Analysis into a single 
    object, including the input TimeSeries datatype and the output results as 
    arrays (FloatArray)
    """
    #Overwrite attribute from superclass
    array_data = arrays.ComplexArray()

    source = time_series.TimeSeries(label="Source time-series")

    mother = basic.String(
        label="Mother wavelet",
        default="morlet",
        doc="""A string specifying the type of mother wavelet to use,
            default is 'morlet'.""")  # default to 'morlet'

    sample_period = basic.Float(label="Sample period")
    #sample_rate = basic.Integer(label = "")  inversely related

    frequencies = arrays.FloatArray(
        label="Frequencies", doc="A vector that maps scales to frequencies.")

    normalisation = basic.String(label="Normalisation type")
    # 'unit energy' | 'gabor'

    q_ratio = basic.Float(label="Q-ratio", default=5.0)

    amplitude = arrays.FloatArray(label="Amplitude",
                                  file_storage=core.FILE_STORAGE_EXPAND)

    phase = arrays.FloatArray(label="Phase",
                              file_storage=core.FILE_STORAGE_EXPAND)

    power = arrays.FloatArray(label="Power",
                              file_storage=core.FILE_STORAGE_EXPAND)

    __generate_table__ = True
Beispiel #7
0
class CorrelationCoefficients(arrays.MappedArray):
    """Correlation coefficients datatype."""

    # Extreme values for pearson Correlation Coefficients
    PEARSON_MIN = -1
    PEARSON_MAX = 1

    array_data = arrays.FloatArray(file_storage=core.FILE_STORAGE_DEFAULT)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which Correlation (coefficients) is applied.")

    labels_ordering = basic.List(
        label="Dimension Names",
        default=["Node", "Node", "State Variable", "Mode"],
        doc="""List of strings representing names of each data dimension""")

    __generate_table__ = True

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.nr_dimensions = len(self.read_data_shape())
        for i in range(self.nr_dimensions):
            setattr(self, 'length_%dd' % (i + 1), int(self.read_data_shape()[i]))

    def _find_summary_info(self):
        summary = {"Graph type": self.__class__.__name__,
                   "Source": self.source.title,
                   "Dimensions": self.labels_ordering}
        summary.update(self.get_info_about_array('array_data'))
        return summary

    def get_correlation_data(self, selected_state, selected_mode):
        matrix_to_display = self.array_data[:, :, int(selected_state), int(selected_mode)]
        return list(matrix_to_display.flat)
Beispiel #8
0
 def test_waveletcoefficients(self):
     data = numpy.random.random((10, 10))
     ts = time_series.TimeSeries(data=data)
     dt = spectral.WaveletCoefficients(source=ts,
                                       mother='morlet',
                                       sample_period=7.8125,
                                       frequencies=[0.008, 0.028, 0.048, 0.068],
                                       normalisation="energy",
                                       q_ratio=5.0,
                                       array_data=numpy.random.random((10, 10)),)
     dt.configure()
     summary_info = dt.summary_info
     self.assertEqual(summary_info['Maximum frequency'], 0.068)  
     self.assertEqual(summary_info['Minimum frequency'], 0.008)  
     self.assertEqual(summary_info['Normalisation'], 'energy')  
     self.assertEqual(summary_info['Number of scales'], 4)
     self.assertEqual(summary_info['Q-ratio'], 5.0)  
     self.assertEqual(summary_info['Sample period'], 7.8125) 
     self.assertEqual(summary_info['Spectral type'], 'WaveletCoefficients')  
     self.assertEqual(summary_info['Wavelet type'], 'morlet')     
     self.assertEqual(dt.q_ratio, 5.0)
     self.assertEqual(dt.sample_period, 7.8125)
     self.assertEqual(dt.shape, (10, 10))
     self.assertTrue(dt.source is not None)
Beispiel #9
0
 def test_waveletcoefficients(self):
     data = numpy.random.random((10, 10))
     ts = time_series.TimeSeries(data=data)
     dt = spectral.WaveletCoefficients(source=ts,
                                       mother='morlet',
                                       sample_period=7.8125,
                                       frequencies=numpy.array([0.008, 0.028, 0.048, 0.068]),
                                       normalisation="energy",
                                       q_ratio=5.0,
                                       array_data=numpy.random.random((10, 10)), )
     # dt.configure()
     summary_info = dt.summary_info()
     assert summary_info['Maximum frequency'] == 0.068
     assert summary_info['Minimum frequency'] == 0.008
     assert summary_info['Normalisation'], 'energy'
     assert summary_info['Number of scales'] == 4
     assert summary_info['Q-ratio'] == 5.0
     assert summary_info['Sample period'] == 7.8125
     assert summary_info['Spectral type'] == 'WaveletCoefficients'
     assert summary_info['Wavelet type'] == 'morlet'
     assert dt.q_ratio == 5.0
     assert dt.sample_period == 7.8125
     assert dt.array_data.shape == (10, 10)
     assert dt.source is not None
Beispiel #10
0
class PrincipalComponents(MappedType):
    """
    Result of a Principal Component Analysis (PCA).
    """

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which the PCA is applied.")

    weights = arrays.FloatArray(
        label="Principal vectors",
        doc="""The vectors of the 'weights' with which each time-series is
            represented in each component.""",
        file_storage=core.FILE_STORAGE_EXPAND)

    fractions = arrays.FloatArray(
        label="Fraction explained",
        doc="""A vector or collection of vectors representing the fraction of
            the variance explained by each principal component.""",
        file_storage=core.FILE_STORAGE_EXPAND)

    norm_source = arrays.FloatArray(
        label="Normalised source time series",
        file_storage=core.FILE_STORAGE_EXPAND)

    component_time_series = arrays.FloatArray(
        label="Component time series",
        file_storage=core.FILE_STORAGE_EXPAND)

    normalised_component_time_series = arrays.FloatArray(
        label="Normalised component time series",
        file_storage=core.FILE_STORAGE_EXPAND)


    def write_data_slice(self, partial_result):
        """
        Append chunk.
        """
        self.store_data_chunk('weights', partial_result.weights, grow_dimension=2, close_file=False)

        self.store_data_chunk('fractions', partial_result.fractions, grow_dimension=1, close_file=False)

        partial_result.compute_norm_source()
        self.store_data_chunk('norm_source', partial_result.norm_source, grow_dimension=1, close_file=False)

        partial_result.compute_component_time_series()
        self.store_data_chunk('component_time_series', partial_result.component_time_series,
                              grow_dimension=1, close_file=False)

        partial_result.compute_normalised_component_time_series()
        self.store_data_chunk('normalised_component_time_series', partial_result.normalised_component_time_series,
                              grow_dimension=1, close_file=False)

    def read_fractions_data(self, from_comp, to_comp):
        """
        Return a list with fractions for components in interval from_comp, to_comp and in
        addition have in position n the sum of the fractions for the rest of the components.
        """
        from_comp = int(from_comp)
        to_comp = int(to_comp)
        all_data = self.get_data('fractions').flat
        sum_others = 0
        for idx, val in enumerate(all_data):
            if idx < from_comp or idx > to_comp:
                sum_others += val
        return numpy.array(all_data[from_comp:to_comp].tolist() + [sum_others])

    def read_weights_data(self, from_comp, to_comp):
        """
        Return the weights data for the components in the interval [from_comp, to_comp].
        """
        from_comp = int(from_comp)
        to_comp = int(to_comp)
        data_slice = slice(from_comp, to_comp, None)
        weights_shape = self.get_data_shape('weights')
        weights_slice = [slice(size) for size in weights_shape]
        weights_slice[0] = data_slice
        weights_data = self.get_data('weights', tuple(weights_slice))
        return weights_data.flatten()

    def configure(self):
        """
        Invoke the compute methods for computable attributes that haven't been
        set during initialization.
        """
        super(PrincipalComponents, self).configure()

        if self.trait.use_storage is False and sum(self.get_data_shape('weights')) != 0:
            if self.norm_source.size == 0:
                self.compute_norm_source()

            if self.component_time_series.size == 0:
                self.compute_component_time_series()

            if self.normalised_component_time_series.size == 0:
                self.compute_normalised_component_time_series()

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance
        of this datatype.
        """
        summary = {"Mode decomposition type": self.__class__.__name__}
        summary["Source"] = self.source.title
        # summary["Number of variables"] = self...
        # summary["Number of mewasurements"] = self...
        # summary["Number of components"] = self...
        # summary["Number required for 95%"] = self...
        return summary

    def compute_norm_source(self):
        """Normalised source time-series."""
        self.norm_source = ((self.source.data - self.source.data.mean(axis=0)) /
                            self.source.data.std(axis=0))
        self.trait["norm_source"].log_debug(owner=self.__class__.__name__)

    # TODO: ??? Any value in making this a TimeSeries datatypes ???
    def compute_component_time_series(self):
        """Compnent time-series."""
        # TODO: Generalise -- it currently assumes 4D TimeSeriesSimulator...
        ts_shape = self.source.data.shape
        component_ts = numpy.zeros(ts_shape)
        for var in range(ts_shape[1]):
            for mode in range(ts_shape[3]):
                w = self.weights[:, :, var, mode]
                ts = self.source.data[:, var, :, mode]
                component_ts[:, var, :, mode] = numpy.dot(w, ts.T).T

        self.component_time_series = component_ts
        self.trait["component_time_series"].log_debug(owner=self.__class__.__name__)

    # TODO: ??? Any value in making this a TimeSeries datatypes ???
    def compute_normalised_component_time_series(self):
        """normalised_Compnent time-series."""
        # TODO: Generalise -- it currently assumes 4D TimeSeriesSimulator...
        ts_shape = self.source.data.shape
        component_ts = numpy.zeros(ts_shape)
        for var in range(ts_shape[1]):
            for mode in range(ts_shape[3]):
                w = self.weights[:, :, var, mode]
                nts = self.norm_source[:, var, :, mode]
                component_ts[:, var, :, mode] = numpy.dot(w, nts.T).T

        self.normalised_component_time_series = component_ts
        self.trait["normalised_component_time_series"].log_debug(owner=self.__class__.__name__)
class PowerSpectraInteractive(core.Type):
    """
    The graphical interface for visualising the power-spectra (FFT) of a
    timeseries provide controls for setting:

        - which state-variable and mode to display [sets]
        - log or linear scaling for the power or frequency axis [binary]
        - sementation lenth [set]
        - windowing function [set]
        - power normalisation [binary] (emphasise relative frequency contribution)
        - show std or sem [binary]


    """

    time_series = time_series_datatypes.TimeSeries(
        label="Timeseries",
        default=None,
        required=True,
        doc=""" The timeseries to which the FFT is to be applied.""")

    first_n = basic.Integer(
        label="Display the first 'n'",
        default=-1,
        required=True,
        doc="""Primarily intended for displaying the first N components of a 
            surface PCA timeseries. Defaults to -1, meaning it'll display all
            of 'space' (ie, regions or vertices or channels). In other words,
            for Region or M/EEG timeseries you can ignore this, but, for a 
            surface timeseries it really must be set.""")

    def __init__(self, **kwargs):
        """
        Initialise based on provided keywords or their traited defaults. Also,
        initialise the place-holder attributes that aren't filled until the
        show() method is called.

        """
        #figure
        self.ifft_fig = None

        #time-series
        self.fft_ax = None

        #Current state
        self.xscale = "linear"
        self.yscale = "log"
        self.mode = 0
        self.variable = 0
        self.show_sem = False
        self.show_std = False
        self.normalise_power = "no"
        self.window_length = 0.25
        self.window_function = "None"

        #Selectors
        self.xscale_selector = None
        self.yscale_selector = None
        self.mode_selector = None
        self.variable_selector = None
        self.show_sem_selector = None
        self.show_std_selector = None
        self.normalise_power_selector = None
        self.window_length_selector = None
        self.window_function_selector = None

        #
        possible_freq_steps = [2**x for x in range(-2, 7)]  #Hz
        #possible_freq_steps.append(1.0 / self.time_series_length) #Hz
        self.possible_window_lengths = 1.0 / numpy.array(
            possible_freq_steps)  #s
        self.freq_step = 1.0 / self.window_length
        self.frequency = None
        self.spectra = None
        self.spectra_norm = None

        #Sliders
        #self.window_length_slider = None

    def configure(self):
        """ Seperate configure cause ttraits be busted... """
        LOG.debug("time_series shape: %s" % str(self.time_series.data.shape))
        #TODO: if isinstance(self.time_series, TimeSeriesSurface) and self.first_n == -1: #LOG.error, return.
        self.data = self.time_series.data[:, :, :self.first_n, :]
        self.period = self.time_series.sample_period
        self.max_freq = 0.5 / self.period
        self.units = "Hz"
        self.tpts = self.data.shape[0]
        self.nsrs = self.data.shape[2]
        self.time_series_length = self.tpts * self.period
        self.time = numpy.arange(self.tpts) * self.period
        self.labels = ["channel_%0.3d" % k for k in range(self.nsrs)]

    def show(self):
        """ Generate the interactive power-spectra figure. """
        #Make sure everything is configured
        self.configure()

        #Make the figure:
        self.create_figure()

        #Selectors
        self.add_xscale_selector()
        self.add_yscale_selector()
        self.add_mode_selector()
        self.add_variable_selector()
        self.add_normalise_power_selector()
        self.add_window_length_selector()
        self.add_window_function_selector()

        #Sliders
        #self.add_window_length_slider() #Want discrete values
        #self.add_scaling_slider()

        #...
        self.calc_fft()

        #Plot timeseries
        self.plot_spectra()

        pylab.show()

    ##------------------------------------------------------------------------##
    ##------------------ Functions for building the figure -------------------##
    ##------------------------------------------------------------------------##
    def create_figure(self):
        """ Create the figure and time-series axes. """
        time_series_type = self.time_series.__class__.__name__
        try:
            figure_window_title = "Interactive power spectra: " + time_series_type
            pylab.close(figure_window_title)
            self.ifft_fig = pylab.figure(num=figure_window_title,
                                         figsize=(16, 8),
                                         facecolor=BACKGROUNDCOLOUR,
                                         edgecolor=EDGECOLOUR)
        except ValueError:
            LOG.info("My life would be easier if you'd update your PyLab...")
            figure_number = 42
            pylab.close(figure_number)
            self.ifft_fig = pylab.figure(num=figure_number,
                                         figsize=(16, 8),
                                         facecolor=BACKGROUNDCOLOUR,
                                         edgecolor=EDGECOLOUR)

        self.fft_ax = self.ifft_fig.add_axes([0.15, 0.2, 0.7, 0.75])

    def add_xscale_selector(self):
        """
        Add a radio button to the figure for selecting which scaling the x-axes
        should use.
        """
        pos_shp = [0.45, 0.02, 0.05, 0.104]
        rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="xscale")
        xscale_tuple = ("log", "linear")
        self.xscale_selector = widgets.RadioButtons(rax,
                                                    xscale_tuple,
                                                    active=1)
        self.xscale_selector.on_clicked(self.update_xscale)

    def add_yscale_selector(self):
        """
        Add a radio button to the figure for selecting which scaling the y-axes
        should use.
        """
        pos_shp = [0.02, 0.5, 0.05, 0.104]
        rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="yscale")
        yscale_tuple = ("log", "linear")
        self.yscale_selector = widgets.RadioButtons(rax,
                                                    yscale_tuple,
                                                    active=0)
        self.yscale_selector.on_clicked(self.update_yscale)

    def add_mode_selector(self):
        """
        Add a radio button to the figure for selecting which mode of the model
        should be displayed.
        """
        pos_shp = [0.02, 0.07, 0.05, 0.1 + 0.002 * self.data.shape[3]]
        rax = self.ifft_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="Mode")
        mode_tuple = tuple(range(self.data.shape[3]))
        self.mode_selector = widgets.RadioButtons(rax, mode_tuple, active=0)
        self.mode_selector.on_clicked(self.update_mode)

    def add_variable_selector(self):
        """
        Generate radio selector buttons to set which state variable is 
        displayed.
        """
        noc = self.data.shape[1]  # number of choices
        #State variable for the x axis
        pos_shp = [0.02, 0.22, 0.05, 0.12 + 0.008 * noc]
        rax = self.ifft_fig.add_axes(pos_shp,
                                     axisbg=AXCOLOUR,
                                     title="state variable")
        self.variable_selector = widgets.RadioButtons(rax,
                                                      tuple(range(noc)),
                                                      active=0)
        self.variable_selector.on_clicked(self.update_variable)

    def add_window_length_selector(self):
        """
        Generate radio selector buttons to set the window length is seconds.
        """
        noc = self.possible_window_lengths.shape[0]  # number of choices
        #State variable for the x axis
        pos_shp = [0.88, 0.07, 0.1, 0.12 + 0.02 * noc]
        rax = self.ifft_fig.add_axes(pos_shp,
                                     axisbg=AXCOLOUR,
                                     title="Segment length")
        wl_tup = tuple(self.possible_window_lengths)
        self.window_length_selector = widgets.RadioButtons(rax,
                                                           wl_tup,
                                                           active=4)
        self.window_length_selector.on_clicked(self.update_window_length)

    def add_window_function_selector(self):
        """
        Generate radio selector buttons to set the windowing function.
        """
        #TODO: add support for kaiser, requiers specification of beta.
        wf_tup = ("None", "hamming", "bartlett", "blackman", "hanning")
        noc = len(wf_tup)  # number of choices
        #State variable for the x axis
        pos_shp = [0.88, 0.77, 0.085, 0.12 + 0.01 * noc]
        rax = self.ifft_fig.add_axes(pos_shp,
                                     axisbg=AXCOLOUR,
                                     title="Windowing function")
        self.window_function_selector = widgets.RadioButtons(rax,
                                                             wf_tup,
                                                             active=0)
        self.window_function_selector.on_clicked(self.update_window_function)

    def add_normalise_power_selector(self):
        """
        Add a radio button to chose whether or not the power of all spectra 
        shouold be normalised to 1.
        """
        pos_shp = [0.02, 0.8, 0.05, 0.104]
        rax = self.ifft_fig.add_axes(pos_shp,
                                     axisbg=AXCOLOUR,
                                     title="normalise")
        np_tuple = ("yes", "no")
        self.normalise_power_selector = widgets.RadioButtons(rax,
                                                             np_tuple,
                                                             active=1)
        self.normalise_power_selector.on_clicked(self.update_normalise_power)

    ##------------------------------------------------------------------------##
    ##------------------ Functions for updating the state --------------------##
    ##------------------------------------------------------------------------##
    def calc_fft(self):
        """
        Calculate FFT using current state of the window_length, window_function,
        """
        #Segment time-series, overlapping if necessary
        nseg = int(numpy.ceil(self.time_series_length / self.window_length))
        if nseg != 1:
            seg_tpts = self.window_length / self.period
            overlap = ((seg_tpts * nseg) - self.tpts) / (nseg - 1)
            starts = [
                max(seg * (seg_tpts - overlap), 0) for seg in range(nseg)
            ]
            segments = [self.data[start:start + seg_tpts] for start in starts]
            segments = [
                segment[:, :, :, numpy.newaxis] for segment in segments
            ]
            time_series = numpy.concatenate(segments, axis=4)
        else:
            time_series = self.data[:, :, :, :, numpy.newaxis]
            seg_tpts = time_series.shape[0]

        #Base-line correct segmented time-series
        time_series = time_series - time_series.mean(axis=0)[numpy.newaxis, :]

        #Apply windowing function
        if self.window_function != "None":
            window_function = eval("".join(("numpy.", self.window_function)))
            window_mask = numpy.reshape(window_function(seg_tpts),
                                        (seg_tpts, 1, 1, 1, 1))
            time_series = time_series * window_mask

        #Calculate the FFT
        result = numpy.fft.fft(time_series, axis=0)
        nfreq = len(result) / 2

        self.frequency = numpy.arange(0, self.max_freq, self.freq_step)
        LOG.debug("frequency shape: %s" % str(self.frequency.shape))

        self.spectra = numpy.mean(numpy.abs(result[1:nfreq + 1])**2, axis=-1)
        LOG.debug("spectra shape: %s" % str(self.spectra.shape))

        self.spectra_norm = (self.spectra / numpy.sum(self.spectra, axis=0))
        LOG.debug("spectra_norm shape: %s" % str(self.spectra_norm.shape))

        #import pdb; pdb.set_trace()
#        self.spectra_std = numpy.std(numpy.abs(result[:nfreq]), axis=4)
#        self.spectra_sem = self.spectra_std / time_series.shape[4]

##------------------------------------------------------------------------##
##------------------ Functions for updating the figure -------------------##
##------------------------------------------------------------------------##

    def update_xscale(self, xscale):
        """ 
        Update the FFT axes' xscale to either log or linear based on radio
        button selection.
        """
        self.xscale = xscale
        self.fft_ax.set_xscale(self.xscale)
        pylab.draw()

    def update_yscale(self, yscale):
        """ 
        Update the FFT axes' yscale to either log or linear based on radio
        button selection.
        """
        self.yscale = yscale
        self.fft_ax.set_yscale(self.yscale)
        pylab.draw()

    def update_mode(self, mode):
        """ Update the visualised mode based on radio button selection. """
        self.mode = mode
        self.plot_spectra()

    def update_variable(self, variable):
        """ 
        Update state variable being plotted based on radio buttton selection.
        """
        self.variable = variable
        self.plot_spectra()

    def update_normalise_power(self, normalise_power):
        """ Update whether to normalise based on radio button selection. """
        self.normalise_power = normalise_power
        self.plot_spectra()

    def update_window_length(self, length):
        """
        Update timeseries window length based on the selected value.
        """
        #TODO: need this casting but not sure why, don't need int() with mode...
        self.window_length = numpy.float64(length)
        #import pdb; pdb.set_trace()
        self.freq_step = 1.0 / self.window_length
        self.update_spectra()

    def update_window_function(self, window_function):
        """
        Update windowing function based on the radio button selection.
        """
        self.window_function = window_function
        self.update_spectra()

    def update_spectra(self):
        """ Clear the axes and redraw the power-spectra. """
        self.calc_fft()
        self.plot_spectra()

#    def plot_std(self):
#        """ Plot """
#        std = (self.spectra[:, self.variable, :, self.mode] +
#               self.spectra_std[:, self.variable, :, self.mode])
#        self.fft_ax.plot(self.frequency, std, "--")
#
#
#    def plot_sem(self):
#        """  """
#        sem = (self.spectra[:, self.variable, :, self.mode] +
#               self.spectra_sem[:, self.variable, :, self.mode])
#        self.fft_ax.plot(self.frequency, sem, ":")

    def plot_spectra(self):
        """ Plot the power spectra. """
        self.fft_ax.clear()
        # Set title and axis labels
        time_series_type = self.time_series.__class__.__name__
        self.fft_ax.set(title=time_series_type)
        self.fft_ax.set(xlabel="Frequency (%s)" % self.units)
        self.fft_ax.set(ylabel="Power")

        # Set x and y scale based on curent radio button selection.
        self.fft_ax.set_xscale(self.xscale)
        self.fft_ax.set_yscale(self.yscale)

        if hasattr(self.fft_ax, 'autoscale'):
            self.fft_ax.autoscale(enable=True, axis='both', tight=True)

        #import pdb; pdb.set_trace()
        #Plot the power spectra
        if self.normalise_power == "yes":
            self.fft_ax.plot(self.frequency,
                             self.spectra_norm[:, self.variable, :, self.mode])
        else:
            self.fft_ax.plot(self.frequency, self.spectra[:, self.variable, :,
                                                          self.mode])


#        #TODO: Need to ensure colour matching... and allow region selection.
#        #If requested, add standard deviation
#        if self.show_std:
#            self.plot_std(self)
#
#        #If requested, add standard error in mean
#        if self.show_sem:
#            self.plot_sem(self)

        pylab.draw()
class NodeCoherence(core.Type):
    """
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries to which the FFT is to be applied.""")

    nfft = basic.Integer(label="Data-points per block",
                         default=256,
                         doc="""Should be a power of 2...""")

    def evaluate(self):
        """ 
        Coherence function.  Matplotlib.mlab implementation.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        data_shape = self.time_series.data.shape

        #(frequency, nodes, nodes, state-variables, modes)
        result_shape = (self.nfft / 2 + 1, data_shape[2], data_shape[2],
                        data_shape[1], data_shape[3])
        LOG.info("result shape will be: %s" % str(result_shape))

        result = numpy.zeros(result_shape)

        #TODO: For region level, 4s, 2000Hz, this takes ~2min... (which is stupidly slow)
        #One inter-node coherence, across frequencies for each state-var & mode.
        for mode in range(data_shape[3]):
            for var in range(data_shape[1]):
                data = self.time_series.data[:, var, :, mode]
                data = data - data.mean(axis=0)[numpy.newaxis, :]
                #TODO: Work out a way around the 4 level loop,
                #TODO: coherence isn't directional, so, get rid of redundancy...
                for n1 in range(data_shape[2]):
                    for n2 in range(data_shape[2]):
                        cxy, freq = mlab.cohere(
                            data[:, n1],
                            data[:, n2],
                            NFFT=self.nfft,
                            Fs=self.time_series.sample_rate,
                            detrend=detrend_linear,
                            window=mlab.window_none)
                        result[:, n1, n2, var, mode] = cxy

        util.log_debug_array(LOG, result, "result")
        util.log_debug_array(LOG, freq, "freq")

        coherence = spectral.CoherenceSpectrum(source=self.time_series,
                                               nfft=self.nfft,
                                               array_data=result,
                                               frequency=freq,
                                               use_storage=False)

        return coherence

    def result_shape(self, input_shape):
        """Returns the shape of the main result of NodeCoherence."""
        freq_len = self.nfft / 2 + 1
        freq_shape = (freq_len, )
        result_shape = (freq_len, input_shape[2], input_shape[2],
                        input_shape[1], input_shape[3])
        return [result_shape, freq_shape]

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result of NodeCoherence.
        """
        result_size = numpy.sum(map(
            numpy.prod, self.result_shape(input_shape))) * 8.0  #Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the FFT.
        That is, it includes storage of the evaluated FourierSpectrum attributes
        such as power, phase, amplitude, etc.
        """
        extend_size = self.result_size(
            input_shape)  #Currently no derived attributes.
        return extend_size
class CorrelationCoefficient(core.Type):
    """
    Compute the node-pairwise pearson correlation coefficient of the
    given input 4D TimeSeries  datatype.
    
    Return a CrossCorrelation datatype, whose values of are between -1
    and 1, inclusive.
    
    See: http://docs.scipy.org/doc/numpy/reference/generated/numpy.corrcoef.html
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The time-series for which the cross correlation matrices are
        calculated.""")

    t_start = basic.Float(
        label=":math:`t_{start}`",
        default=0.9765625,
        required=True,
        doc=
        """Time start point (ms). By default it uses the default Monitor sample period.
        The starting time point of a time series is not zero, but the monitor's sample period. """
    )

    t_end = basic.Float(label=":math:`t_{end}`",
                        default=1000.,
                        required=True,
                        doc=""" End time point (ms) """)

    def evaluate(self):
        """
        Compute the correlation coefficients of a 2D array (tpts x nodes).
        Yields an array of size nodes x nodes x state-variables x modes.

        The time interval over which the correlation coefficients are computed 
        is defined by t_start, t_end

        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        #(nodes, nodes, state-variables, modes)
        input_shape = self.time_series.read_data_shape()
        result_shape = self.result_shape(input_shape)
        LOG.info("result shape will be: %s" % str(result_shape))

        result = numpy.zeros(result_shape)

        t_lo = int((1. / self.time_series.sample_period) *
                   (self.t_start - self.time_series.sample_period))
        t_hi = int((1. / self.time_series.sample_period) *
                   (self.t_end - self.time_series.sample_period))
        t_lo = max(t_lo, 0)
        t_hi = max(t_hi, input_shape[0])

        #One correlation coeff matrix, for each state-var & mode.
        for mode in range(result_shape[3]):
            for var in range(result_shape[2]):
                current_slice = tuple([
                    slice(t_lo, t_hi + 1),
                    slice(var, var + 1),
                    slice(input_shape[2]),
                    slice(mode, mode + 1)
                ])
                data = self.time_series.read_data_slice(
                    current_slice).squeeze()
                result[:, :, var, mode] = numpy.corrcoef(data.T)

        util.log_debug_array(LOG, result, "result")

        corr_coeff = graph.CorrelationCoefficients(source=self.time_series,
                                                   array_data=result,
                                                   use_storage=False)
        return corr_coeff

    def result_shape(self, input_shape):
        """Returns the shape of the main result of ...."""
        result_shape = (input_shape[2], input_shape[2], input_shape[1],
                        input_shape[3])
        return result_shape

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result of .
        """
        result_size = numpy.sum(map(
            numpy.prod, self.result_shape(input_shape))) * 8.0  # Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the ....
        That is, it includes storage of the evaluated ... attributes
        such as ..., etc.
        """
        extend_size = self.result_size(
            input_shape)  # Currently no derived attributes.
        return extend_size
Beispiel #14
0
class KuramotoIndex(metrics_base.BaseTimeseriesMetricAlgorithm):
    """
    Return the Kuramoto synchronization index. 
    
    Useful metric for a parameter analysis when the collective brain dynamics
    represent coupled oscillatory processes.
    
    The *order* parameters are :math:`r` and :math:`Psi`.
    
    .. math::
		r e^{i * \\psi} = \\frac{1}{N}\\,\\sum_{k=1}^N(e^{i*\\theta_k})
    
    The first is the phase coherence of the population of oscillators (KSI) 
    and the second is the average phase.
    
    When :math:`r=0` means 0 coherence among oscillators.
    
    
    Input:
    TimeSeries datatype 
    
    Output: 
    Float
    
    This is a crude indicator of synchronization among nodes over the entire 
    network.

    #NOTE: For the time being it is meant to be another global metric.
    However, it should be consider to have a sort of TimeSeriesDatatype for this
    analyzer.
    
    """

    time_series = time_series_module.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The TimeSeries for which the Kuramoto Synchronization Index
        will be computed""")

    accept_filter = FilterChain(operations=["==", ">="],
                                values=[4, 2],
                                fields=[
                                    FilterChain.datatype + '._nr_dimensions',
                                    FilterChain.datatype + '._length_2d'
                                ])

    def evaluate(self):
        """
        Kuramoto Synchronization Index
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        if self.time_series.data.shape[1] < 2:
            msg = " The number of state variables should be at least 2."
            LOG.error(msg)
            raise Exception(msg)

        #TODO: Should be computed for each possible combination of var, mode
        #      for var, mode in itertools.product(range(self.time_series.data.shape[1]),
        #                                         range(self.time_series.data.shape[3])):

        #TODO: Generalise. The Kuramoto order parameter is computed over sliding
        #      time windows and then normalised

        theta_sum = numpy.sum(
            numpy.exp(0.0 + 1j * (numpy.vectorize(cmath.polar)
                                  (numpy.vectorize(complex)
                                   (self.time_series.data[:, 0, :, 0],
                                    self.time_series.data[:, 1, :, 0]))[1])),
            axis=1)

        result = numpy.vectorize(cmath.polar)(theta_sum /
                                              self.time_series.data.shape[2])

        return result[0].mean()

    def result_shape(self):
        """
        Returns the shape of the main result of the ... 
        """
        return (1, )

    def result_size(self):
        """
        Returns the storage size in Bytes of the results of the ... .
        """
        return 8.0  # Bytes

    def extended_result_size(self):
        """
        Returns the storage size in Bytes of the extended result of the ....
        That is, it includes storage of the evaluated ...
        """
        return 8.0  # Bytes
Beispiel #15
0
class BalloonModel(core.Type):
    """

    A class for calculating the simulated BOLD signal given a TimeSeries
    object of TVB and returning another TimeSeries object.

    The haemodynamic model parameters based on constants for a 1.5 T scanner.
        
    """

    #NOTE: a potential problem when the input is a TimeSeriesSurface.
    #TODO: add an spatial averaging for TimeSeriesSurface.

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries that represents the input neural activity""",
        order=1)
    # it also sets the bold sampling period.
    dt = basic.Float(
        label=":math:`dt`",
        default=0.002,
        required=True,
        doc="""The integration time step size for the balloon model (s).
        If none is provided, by default, the TimeSeries sample period is used.""",
        order=2)

    integrator = integrators_module.Integrator(
        label="Integration scheme",
        default=integrators_module.HeunDeterministic,
        required=True,
        order=-1,
        doc=""" A tvb.simulator.Integrator object which is
        an integration scheme with supporting attributes such as 
        integration step size and noise specification for stochastic 
        methods. It is used to compute the time courses of the balloon model state 
        variables.""")

    bold_model = basic.Enumerate(
        label="Select BOLD model equations",
        options=["linear", "nonlinear"],
        default=["nonlinear"],
        select_multiple=False,
        doc="""Select the set of equations for the BOLD model.""",
        order=4)

    RBM = basic.Bool(
        label="Revised BOLD Model",
        default=True,
        required=True,
        doc="""Select classical vs revised BOLD model (CBM or RBM).
        Coefficients  k1, k2 and k3 will be derived accordingly.""",
        order=5)

    neural_input_transformation = basic.Enumerate(
        label="Neural input transformation",
        options=["none", "abs_diff", "sum"],
        default=["none"],
        select_multiple=False,
        doc=
        """ This represents the operation to perform on the state-variable(s) of
        the model used to generate the input TimeSeries. ``none`` takes the
        first state-variable as neural input; `` abs_diff`` is the absolute
        value of the derivative (first order difference) of the first state variable; 
        ``sum``: sum all the state-variables of the input TimeSeries.""",
        order=3)

    tau_s = basic.Float(
        label=r":math:`\tau_s`",
        default=0.65,
        required=True,
        doc="""Balloon model parameter. Time of signal decay (s)""",
        order=-1)

    tau_f = basic.Float(
        label=r":math:`\tau_f`",
        default=0.41,
        required=True,
        doc=""" Balloon model parameter. Time of flow-dependent elimination or
        feedback regulation (s). The average  time blood take to traverse the
        venous compartment. It is the  ratio of resting blood volume (V0) to
        resting blood flow (F0).""",
        order=-1)

    tau_o = basic.Float(label=r":math:`\tau_o`",
                        default=0.98,
                        required=True,
                        doc="""
        Balloon model parameter. Haemodynamic transit time (s). The average
        time blood take to traverse the venous compartment. It is the  ratio
        of resting blood volume (V0) to resting blood flow (F0).""",
                        order=-1)

    alpha = basic.Float(
        label=r":math:`\tau_f`",
        default=0.32,
        required=True,
        doc=
        """Balloon model parameter. Stiffness parameter. Grubb's exponent.""",
        order=-1)

    TE = basic.Float(label=":math:`TE`",
                     default=0.04,
                     required=True,
                     doc="""BOLD parameter. Echo Time""",
                     order=-1)

    V0 = basic.Float(label=":math:`V_0`",
                     default=4.0,
                     required=True,
                     doc="""BOLD parameter. Resting blood volume fraction.""",
                     order=-1)

    E0 = basic.Float(
        label=":math:`E_0`",
        default=0.4,
        required=True,
        doc="""BOLD parameter. Resting oxygen extraction fraction.""",
        order=-1)

    epsilon = arrays.FloatArray(
        label=":math:`\epsilon`",
        default=numpy.array([0.5]),
        range=basic.Range(lo=0.5, hi=2.0, step=0.25),
        required=True,
        doc=
        """ BOLD parameter. Ratio of intra- and extravascular signals. In principle  this
        parameter could be derived from empirical data and spatialized.""",
        order=-1)

    nu_0 = basic.Float(
        label=r":math:`\nu_0`",
        default=40.3,
        required=True,
        doc=
        """BOLD parameter. Frequency offset at the outer surface of magnetized vessels (Hz).""",
        order=-1)

    r_0 = basic.Float(
        label=":math:`r_0`",
        default=25.,
        required=True,
        doc=
        """ BOLD parameter. Slope r0 of intravascular relaxation rate (Hz). Only used for
        ``revised`` coefficients. """,
        order=-1)

    def evaluate(self):
        """
        Calculate simulated BOLD signal
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        #NOTE: Just using the first state variable, although in the Bold monitor
        #      input is the sum over the state-variables. Only time-series
        #      from basic monitors should be used as inputs.

        neural_activity, t_int = self.input_transformation(
            self.time_series, self.neural_input_transformation)
        input_shape = neural_activity.shape
        result_shape = self.result_shape(input_shape)
        LOG.debug("Result shape will be: %s" % str(result_shape))

        if self.dt is None:
            self.dt = self.time_series.sample_period / 1000.  # (s) integration time step
            msg = "Integration time step size for the balloon model is %s seconds" % str(
                self.dt)
            LOG.debug(msg)

        #NOTE: Avoid upsampling ...
        if self.dt < (self.time_series.sample_period / 1000.):
            msg = "Integration time step shouldn't be smaller than the sampling period of the input signal."
            LOG.error(msg)

        balloon_nvar = 4

        #NOTE: hard coded initial conditions
        state = numpy.zeros((input_shape[0], balloon_nvar, input_shape[2],
                             input_shape[3]))  # s
        state[0, 1, :] = 1.  # f
        state[0, 2, :] = 1.  # v
        state[0, 3, :] = 1.  # q

        # BOLD model coefficients
        k = self.compute_derived_parameters()
        k1, k2, k3 = k[0], k[1], k[2]

        # prepare integrator
        self.integrator.dt = self.dt
        self.integrator.configure()
        LOG.debug("Integration time step size will be: %s seconds" %
                  str(self.integrator.dt))

        scheme = self.integrator.scheme

        # NOTE: the following variables are not used in this integration but
        # required due to the way integrators scheme has been defined.

        local_coupling = 0.0
        stimulus = 0.0

        # Do some checks:
        if numpy.isnan(neural_activity).any():
            LOG.warning("NaNs detected in the neural activity!!")

        # normalise the time-series.
        neural_activity = neural_activity - neural_activity.mean(
            axis=0)[numpy.newaxis, :]

        # solve equations
        for step in range(1, t_int.shape[0]):
            state[step, :] = scheme(state[step - 1, :], self.balloon_dfun,
                                    neural_activity[step, :], local_coupling,
                                    stimulus)
            if numpy.isnan(state[step, :]).any():
                LOG.warning("NaNs detected...")

        # NOTE: just for the sake of clarity, define the variables used in the BOLD model
        s = state[:, 0, :]
        f = state[:, 1, :]
        v = state[:, 2, :]
        q = state[:, 3, :]

        #import pdb; pdb.set_trace()

        # BOLD models
        if self.bold_model == "nonlinear":
            """
            Non-linear BOLD model equations.
            Page 391. Eq. (13) top in [Stephan2007]_
            """
            y_bold = numpy.array(self.V0 * (k1 * (1. - q) + k2 *
                                            (1. - q / v) + k3 * (1. - v)))
            y_b = y_bold[:, numpy.newaxis, :, :]
            LOG.debug("Max value: %s" % str(y_b.max()))

        else:
            """
            Linear BOLD model equations.
            Page 391. Eq. (13) bottom in [Stephan2007]_ 
            """
            y_bold = numpy.array(self.V0 * ((k1 + k2) * (1. - q) + (k3 - k2) *
                                            (1. - v)))
            y_b = y_bold[:, numpy.newaxis, :, :]

        sample_period = 1. / self.dt

        bold_signal = time_series.TimeSeriesRegion(data=y_b,
                                                   time=t_int,
                                                   sample_period=sample_period,
                                                   sample_period_unit='s',
                                                   use_storage=False)

        return bold_signal

    def compute_derived_parameters(self):
        """
        Compute derived parameters :math:`k_1`, :math:`k_2` and :math:`k_3`.
        """

        if not self.RBM:
            """
            Classical BOLD Model Coefficients [Obata2004]_
            Page 389 in [Stephan2007]_, Eq. (3)
            """
            k1 = 7. * self.E0
            k2 = 2. * self.E0
            k3 = 1. - self.epsilon
        else:
            """
            Revised BOLD Model Coefficients.
            Generalized BOLD signal model.
            Page 400 in [Stephan2007]_, Eq. (12)
            """
            k1 = 4.3 * self.nu_0 * self.E0 * self.TE
            k2 = self.epsilon * self.r_0 * self.E0 * self.TE
            k3 = 1 - self.epsilon

        return numpy.array([k1, k2, k3])

    def input_transformation(self, time_series, mode):
        """
        Perform an operation on the input time-series.
        """

        LOG.debug("Computing: %s on the input time series" % str(mode))

        if mode == "none":
            ts = time_series.data[:, 0, :, :]
            ts = ts[:, numpy.newaxis, :, :]
            t_int = time_series.time / 1000.  # (s)

        elif mode == "abs_diff":
            ts = abs(numpy.diff(time_series.data, axis=0))
            t_int = (time_series.time[1:] -
                     time_series.time[0:-1]) / 1000.  # (s)

        elif mode == "sum":
            ts = numpy.sum(time_series.data, axis=1)
            ts = ts[:, numpy.newaxis, :, :]
            t_int = time_series.time / 1000.  # (s)

        else:
            LOG.error("Bad operation/transformation mode, must be one of:")
            LOG.error("('abs_diff', 'sum', 'none')")
            raise Exception("Bad transformation mode")

        return ts, t_int

    def balloon_dfun(self, state_variables, neural_input, local_coupling=0.0):
        r"""
        The Balloon model equations. See Eqs. (4-10) in [Stephan2007]_
        .. math::
                \frac{ds}{dt} &= x - \kappa\,s - \gamma \,(f-1) \\
                \frac{df}{dt} &= s \\
                \frac{dv}{dt} &= \frac{1}{\tau_o} \, (f - v^{1/\alpha})\\
                \frac{dq}{dt} &= \frac{1}{\tau_o}(f \, \frac{1-(1-E_0)^{1/\alpha}}{E_0} - v^{&/\alpha} \frac{q}{v})\\
                \kappa &= \frac{1}{\tau_s}\\
                \gamma &= \frac{1}{\tau_f}
        """

        s = state_variables[0, :]
        f = state_variables[1, :]
        v = state_variables[2, :]
        q = state_variables[3, :]

        x = neural_input[0, :]

        ds = x - (1. / self.tau_s) * s - (1. / self.tau_f) * (f - 1)
        df = s
        dv = (1. / self.tau_o) * (f - v**(1. / self.alpha))
        dq = (1. / self.tau_o) * ((f * (1. -
                                        (1. - self.E0)**(1. / f)) / self.E0) -
                                  (v**(1. / self.alpha)) * (q / v))

        return numpy.array([ds, df, dv, dq])

    def result_shape(self, input_shape):
        """Returns the shape of the main result of fmri balloon ..."""
        result_shape = (input_shape[0], input_shape[1], input_shape[2],
                        input_shape[3])
        return result_shape

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result of .
        """
        result_size = numpy.sum(map(
            numpy.prod, self.result_shape(input_shape))) * 8.0  # Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the ....
        That is, it includes storage of the evaluated ... attributes
        such as ..., etc.
        """
        extend_size = self.result_size(
            input_shape)  # Currently no derived attributes.
        return extend_size
Beispiel #16
0
class IndependentComponents(MappedType):
    """
    Result of an Independent Component Analysis.

    """
    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which the ICA is applied.")

    mixing_matrix = arrays.FloatArray(
        label="Mixing matrix - Spatial Maps",
        doc="""The linear mixing matrix (Mixing matrix) """)

    unmixing_matrix = arrays.FloatArray(
        label="Unmixing matrix - Spatial maps",
        doc="""The estimated unmixing matrix used to obtain the unmixed
            sources from the data""")

    prewhitening_matrix = arrays.FloatArray(
        label="Pre-whitening matrix",
        doc=""" """)

    n_components = basic.Integer(
        label="Number of independent components",
        doc=""" Observed data matrix is considered to be a linear combination
        of :math:`n` non-Gaussian independent components""")

    norm_source = arrays.FloatArray(
        label="Normalised source time series. Zero centered and whitened.",
        file_storage=core.FILE_STORAGE_EXPAND)

    component_time_series = arrays.FloatArray(
        label="Component time series. Unmixed sources.",
        file_storage=core.FILE_STORAGE_EXPAND)

    normalised_component_time_series = arrays.FloatArray(
        label="Normalised component time series",
        file_storage=core.FILE_STORAGE_EXPAND)


    def write_data_slice(self, partial_result):
        """
        Append chunk.

        """
        self.store_data_chunk('unmixing_matrix', partial_result.unmixing_matrix, grow_dimension=2, close_file=False)
        self.store_data_chunk('prewhitening_matrix', partial_result.prewhitening_matrix,
                              grow_dimension=2, close_file=False)
        partial_result.compute_norm_source()
        self.store_data_chunk('norm_source', partial_result.norm_source, grow_dimension=1, close_file=False)
        partial_result.compute_component_time_series()
        self.store_data_chunk('component_time_series', partial_result.component_time_series,
                              grow_dimension=1, close_file=False)
        partial_result.compute_normalised_component_time_series()
        self.store_data_chunk('normalised_component_time_series', partial_result.normalised_component_time_series,
                              grow_dimension=1, close_file=False)
        partial_result.compute_mixing_matrix()
        self.store_data_chunk('mixing_matrix', partial_result.mixing_matrix, grow_dimension=2, close_file=False)

    def configure(self):
        """
        Invoke the compute methods for computable attributes that haven't been
        set during initialisation.
        """
        super(IndependentComponents, self).configure()
        if self.trait.use_storage is False and sum(self.get_data_shape('unmixing_matrix')) != 0:
            if self.norm_source.size == 0:
                self.compute_norm_source()
            if self.component_time_series.size == 0:
                self.compute_component_time_series()
            if self.normalised_component_time_series.size == 0:
                self.compute_normalised_component_time_series()

    def compute_norm_source(self):
        """Normalised source time-series."""
        self.norm_source = ((self.source.data - self.source.data.mean(axis=0)) /
                            self.source.data.std(axis=0))

    def compute_component_time_series(self):
        ts_shape = self.source.data.shape
        component_ts_shape = (ts_shape[0], ts_shape[1], self.n_components, ts_shape[3])
        component_ts = numpy.zeros(component_ts_shape)
        for var in range(ts_shape[1]):
            for mode in range(ts_shape[3]):
                w = self.unmixing_matrix[:, :, var, mode]
                k = self.prewhitening_matrix[:, :, var, mode]
                ts = self.source.data[:, var, :, mode]
                component_ts[:, var, :, mode] = numpy.dot(w, numpy.dot(k, ts.T)).T
        self.component_time_series = component_ts

    def compute_normalised_component_time_series(self):
        ts_shape = self.source.data.shape
        component_ts_shape = (ts_shape[0], ts_shape[1], self.n_components, ts_shape[3])
        component_nts = numpy.zeros(component_ts_shape)
        for var in range(ts_shape[1]):
            for mode in range(ts_shape[3]):
                w = self.unmixing_matrix[:, :, var, mode]
                k = self.prewhitening_matrix[:, :, var, mode]
                nts = self.norm_source[:, var, :, mode]
                component_nts[:, var, :, mode] = numpy.dot(w, numpy.dot(k, nts.T)).T
        self.normalised_component_time_series = component_nts

    def compute_mixing_matrix(self):
        """
        Compute the linear mixing matrix A, so X = A * S ,
        where X is the observed data and S contain the independent components
            """
        ts_shape = self.source.data.shape
        mixing_matrix_shape = (ts_shape[2], self.n_components, ts_shape[1], ts_shape[3])
        mixing_matrix = numpy.zeros(mixing_matrix_shape)
        for var in range(ts_shape[1]):
            for mode in range(ts_shape[3]):
                w = self.unmixing_matrix[:, :, var, mode]
                k = self.prewhitening_matrix[:, :, var, mode]
                temp = numpy.matrix(numpy.dot(w, k))
                mixing_matrix[:, :, var, mode] = numpy.array(numpy.dot(temp.T, (numpy.dot(temp, temp.T)).T))
        self.mixing_matrix = mixing_matrix

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance
        of this datatype.
        """
        summary = {"Mode decomposition type": self.__class__.__name__}
        summary["Source"] = self.source.title
        return summary
Beispiel #17
0
class ComplexCoherenceSpectrum(arrays.MappedArray):
    """
    Result of a NodeComplexCoherence Analysis.
    """

    cross_spectrum = arrays.ComplexArray(
        label="The cross spectrum",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc=""" A complex ndarray that contains the nodes x nodes cross
                spectrum for every frequency frequency and for every segment."""
    )

    array_data = arrays.ComplexArray(
        label="Complex Coherence",
        file_storage=core.FILE_STORAGE_EXPAND,
        doc="""The complex coherence coefficients calculated from the cross
                spectrum. The imaginary values of this complex ndarray represent the
                imaginary coherence.""")

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="""Links to the time-series on which the node_coherence is
                applied.""")

    epoch_length = basic.Float(
        label="Epoch length",
        doc="""The timeseries was segmented into equally sized blocks
                (overlapping if necessary), prior to the application of the FFT.
                The segement length determines the frequency resolution of the
                resulting spectra.""")

    segment_length = basic.Float(
        label="Segment length",
        doc="""The timeseries was segmented into equally sized blocks
                (overlapping if necessary), prior to the application of the FFT.
                The segement length determines the frequency resolution of the
                resulting spectra.""")

    windowing_function = basic.String(
        label="Windowing function",
        doc="""The windowing function applied to each time segment prior to
                application of the FFT.""")

    __generate_table__ = True

    _frequency = None
    _freq_step = None
    _max_freq = None

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.configure_chunk_safe()

    def write_data_slice(self, partial_result):
        """
        Append chunk.
        """
        self.store_data_chunk('cross_spectrum',
                              partial_result.cross_spectrum,
                              grow_dimension=2,
                              close_file=False)

        self.store_data_chunk('array_data',
                              partial_result.array_data,
                              grow_dimension=2,
                              close_file=False)

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "Spectral type": self.__class__.__name__,
            "Source": self.source.title,
            "Frequency step": self.freq_step,
            "Maximum frequency": self.max_freq
        }
        # summary["FFT length (time-points)"] = self.fft_points
        # summary["Number of epochs"] = self.number_of_epochs
        return summary

    @property
    def freq_step(self):
        """ Frequency step size of the Complex Coherence Spectrum."""
        if self._freq_step is None:
            self._freq_step = 1.0 / self.segment_length
            msg = "%s: Frequency step size is %s"
            LOG.debug(msg % (str(self), str(self._freq_step)))
        return self._freq_step

    @property
    def max_freq(self):
        """ Maximum frequency represented in the Complex Coherence Spectrum."""
        if self._max_freq is None:
            self._max_freq = 0.5 / self.source.sample_period
            msg = "%s: Max frequency is %s"
            LOG.debug(msg % (str(self), str(self._max_freq)))
        return self._max_freq

    @property
    def frequency(self):
        """ Frequencies represented in the Complex Coherence Spectrum."""
        if self._frequency is None:
            self._frequency = numpy.arange(self.freq_step,
                                           self.max_freq + self.freq_step,
                                           self.freq_step)
        util.log_debug_array(LOG, self._frequency, "frequency")
        return self._frequency
Beispiel #18
0
 def test_datatypemeasure(self):
     data = numpy.random.random((10, 10, 10, 10))
     ts = time_series.TimeSeries(data=data)
     dt = mapped_values.DatatypeMeasure(analyzed_datatype=ts, metrics={"Dummy": 1})
     assert dt.display_name == "\nDummy : 1\n"
Beispiel #19
0
class PCA(core.Type):
    """
    Return principal component weights and the fraction of the variance that 
    they explain. 
    
    PCA takes time-points as observations and nodes as variables.
    
    NOTE: The TimeSeries must be longer(more time-points) than the number of
          nodes -- Mostly a problem for TimeSeriesSurface datatypes, which, if 
          sampled at 1024Hz, would need to be greater than 16 seconds long.
    """
    
    time_series = time_series.TimeSeries(
        label = "Time Series",
        required = True,
        doc = """The timeseries to which the PCA is to be applied. NOTE: The 
            TimeSeries must be longer(more time-points) than the number of nodes
            -- Mostly a problem for surface times-series, which, if sampled at
            1024Hz, would need to be greater than 16 seconds long.""")
    
    #TODO: Maybe should support first N components or neccessary components to
    #      explain X% of the variance. NOTE: For default surface the weights
    #      matrix has a size ~ 2GB * modes * vars...
    
    def evaluate(self):
        """
        Compute the temporal covariance between nodes in the time_series. 
        """
        cls_attr_name = self.__class__.__name__+".time_series"
        self.time_series.trait["data"].log_debug(owner = cls_attr_name)
        
        ts_shape = self.time_series.data.shape
        
        #Need more measurements than variables
        if ts_shape[0] < ts_shape[2]:
            msg = "PCA requires a longer timeseries (tpts > number of nodes)."
            LOG.error(msg)
            raise Exception, msg
        
        #(nodes, nodes, state-variables, modes)
        weights_shape = (ts_shape[2], ts_shape[2], ts_shape[1], ts_shape[3])
        LOG.info("weights shape will be: %s" % str(weights_shape))
        
        fractions_shape = (ts_shape[2], ts_shape[1], ts_shape[3])
        LOG.info("fractions shape will be: %s" % str(fractions_shape))
        
        weights = numpy.zeros(weights_shape)
        fractions = numpy.zeros(fractions_shape)
        
        #One inter-node temporal covariance matrix for each state-var & mode.
        for mode in range(ts_shape[3]):
            for var in range(ts_shape[1]):
                data = self.time_series.data[:, var, :, mode]
                data_pca = mlab.PCA(data)
                fractions[:, var, mode ] = data_pca.fracs
                weights[:, :, var, mode] = data_pca.Wt
        
        util.log_debug_array(LOG, fractions, "fractions")
        util.log_debug_array(LOG, weights, "weights")
        
        pca_result = mode_decompositions.PrincipalComponents(
            source = self.time_series,
            fractions = fractions,
            weights = weights,
            use_storage = False)
        
        return pca_result
    
    
    def result_shape(self, input_shape):
        """
        Returns the shape of the main result of the PCA analysis -- compnnent 
        weights matrix and a vector of fractions.
        """
        weights_shape = (input_shape[2], input_shape[2], input_shape[1],
                         input_shape[3])
        fractions_shape = (input_shape[2], input_shape[1], input_shape[3])
        return [weights_shape, fractions_shape]
    
    
    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the results of the PCA analysis.
        """
        result_size = numpy.sum(map(numpy.prod,
                                    self.result_shape(input_shape))) * 8.0 #Bytes
        return result_size
    
    
    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the PCA.
        That is, it includes storage of the evaluated PrincipleComponents
        attributes such as norm_source, component_time_series, etc.
        """
        result_size = self.result_size(input_shape)
        extend_size = result_size #Main arrays
        extend_size = extend_size + numpy.prod(input_shape) * 8.0 #norm_source
        extend_size = extend_size + numpy.prod(input_shape) * 8.0 #component_time_series
        extend_size = extend_size + numpy.prod(input_shape) * 8.0 #normalised_component_time_series
        return extend_size
Beispiel #20
0
class fastICA(core.Type):
    """
    Takes a TimeSeries datatype (x) and returns the unmixed temporal sources (S) 
    and the estimated mixing matrix (A).
    
    :math: x = A S
    
    ICA takes time-points as observations and nodes as variables.
    
    It uses the FastICA algorithm implemented in the scikit-learn toolkit, and
    its intended usage is as a `blind source separation` method.
    
    See also: http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.fastica.html#sklearn.decomposition.fastica

    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="The timeseries to which the ICA is to be applied.")

    n_components = basic.Integer(
        label="Number of principal components to unmix.",
        required=False,
        default=None,
        doc="Number of principal components to unmix.")

    def evaluate(self):
        "Run FastICA on the given time series data."

        # problem dimensions
        data = self.time_series.data
        n_time, n_svar, n_node, n_mode = data.shape
        self.n_components = n_comp = self.n_components or n_node

        if n_time < n_comp:
            msg = (
                "ICA requires more time points (received %d) than number of components (received %d)."
                " Please run a longer simulation, use a higher sampling frequency or specify a lower"
                " number of components to extract.")
            msg %= n_time, n_comp
            raise ValueError(msg)

        # ICA operates on matrices, here we perform for all state variables and modes
        W = numpy.zeros((n_comp, n_comp, n_svar, n_mode))  # unmixing
        K = numpy.zeros((n_comp, n_node, n_svar, n_mode))  # whitening matrix
        src = numpy.zeros(
            (n_time, n_comp, n_svar, n_mode))  # component time series

        for mode in range(n_mode):
            for var in range(n_svar):
                sl = Ellipsis, var, mode
                K[sl], W[sl], src[sl] = fastica(data[:, var, :, mode],
                                                self.n_components)

        return mode_decompositions.IndependentComponents(
            source=self.time_series,
            component_time_series=src,
            prewhitening_matrix=K,
            unmixing_matrix=W,
            n_components=n_comp,
            use_storage=False)

    def result_shape(self, input_shape):
        "Returns the shape of the mixing matrix."
        n = self.n_components or input_shape[2]
        return n, n, input_shape[1], input_shape[3]

    def result_size(self, input_shape):
        "Returns the storage size in bytes of the mixing matrix of the ICA analysis, assuming 64-bit float."
        return numpy.prod(self.result_shape(input_shape)) * 8

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in bytes of the extended result of the ICA.
        """

        n_time, n_svar, n_node, n_mode = input_shape
        n_comp = self.n_components or n_node

        n = numpy.prod(self.result_shape(input_shape))
        n += numpy.prod((n_comp, n_comp, n_svar, n_mode))  # unmixing
        n += numpy.prod((n_comp, n_node, n_svar, n_mode))  # whitening
        n += numpy.prod((n_time, n_comp, n_svar, n_mode))  # sources

        return n * 8
Beispiel #21
0
class CrossCorrelate(core.Type):
    """
    Compute the node-pairwise cross-correlation of the given input 4D TimeSeries DataType.
    
    Return a CrossCorrelation DataType. It contains the cross-correlation
    sequences for all possible combinations of the nodes.
    
    See: http://www.scipy.org/doc/api_docs/SciPy.signal.signaltools.html#correlate
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc=
        """The time-series for which the cross correlation sequences are calculated."""
    )

    def evaluate(self):
        """
        Cross-correlate two one-dimensional arrays.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        #(tpts, nodes, nodes, state-variables, modes)
        result_shape = self.result_shape(self.time_series.data.shape)
        LOG.info("result shape will be: %s" % str(result_shape))

        result = numpy.zeros(result_shape)

        #TODO: For region level, 4s, 2000Hz, this takes ~3hours...(which makes node_coherence seem positively speedy...)
        # Probably best to add a keyword for offsets, so we just compute +- some "small" range...
        # One inter-node correlation, across offsets, for each state-var & mode.
        for mode in range(result_shape[4]):
            for var in range(result_shape[3]):
                data = self.time_series.data[:, var, :, mode]
                data = data - data.mean(axis=0)[numpy.newaxis, :]
                #TODO: Work out a way around the 4 level loop:
                for n1 in range(result_shape[1]):
                    for n2 in range(result_shape[2]):
                        result[:, n1, n2, var, mode] = correlate(data[:, n1],
                                                                 data[:, n2],
                                                                 mode="same")

        util.log_debug_array(LOG, result, "result")

        offset = (self.time_series.sample_period *
                  numpy.arange(-numpy.floor(result_shape[0] / 2.0),
                               numpy.ceil(result_shape[0] / 2.0)))

        cross_corr = temporal_correlations.CrossCorrelation(
            source=self.time_series,
            array_data=result,
            time=offset,
            use_storage=False)

        return cross_corr

    def result_shape(self, input_shape):
        """Returns the shape of the main result of ...."""
        result_shape = (input_shape[0], input_shape[2], input_shape[2],
                        input_shape[1], input_shape[3])
        return result_shape

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result of .
        """
        result_size = numpy.sum(map(
            numpy.prod, self.result_shape(input_shape))) * 8.0  # Bytes
        return result_size
Beispiel #22
0
class ContinuousWaveletTransform(core.Type):
    """
    A class for calculating the wavelet transform of a TimeSeries object of TVB
    and returning a WaveletSpectrum object. The sampling period and frequency
    range of the result can be specified. The mother wavelet can also be 
    specified... (So far, only Morlet.)
    
    References:
        .. [TBetal_1996] C. Tallon-Baudry et al, *Stimulus Specificity of 
            Phase-Locked and Non-Phase-Locked 40 Hz Visual Responses in Human.*,
            J Neurosci 16(13):4240-4249, 1996. 
        
        .. [Mallat_1999] S. Mallat, *A wavelet tour of signal processing.*,
            book, Academic Press, 1999.
        
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries to which the wavelet is to be applied.""")

    mother = basic.String(
        label="Wavelet function",
        default="morlet",
        required=True,
        doc="""The mother wavelet function used in the transform. Default is
            'morlet', possibilities are: 'morlet'...""")

    sample_period = basic.Float(
        label="Sample period of result (ms)",
        default=7.8125,  #7.8125 => 128 Hz
        required=True,
        doc="""The sampling period of the computed wavelet spectrum. NOTE:
            This should be an integral multiple of the of the sampling period 
            of the source time series, otherwise the actual resulting sample
            period will be the first correct value below that requested.""")

    frequencies = basic.Range(
        label="Frequency range of result (kHz).",
        default=basic.Range(lo=0.008, hi=0.060, step=0.002),
        required=True,
        doc="""The frequency resolution and range returned. Requested
            frequencies are converted internally into appropriate scales.""")

    normalisation = basic.String(
        label="Normalisation",
        default="energy",
        required=True,
        doc="""The type of normalisation for the resulting wavet spectrum.
            Default is 'energy', options are: 'energy'; 'gabor'.""")

    q_ratio = basic.Float(
        label="Q-ratio",
        default=5.0,
        required=True,
        doc=
        """NFC. Must be greater than 5. Ratios of the center frequencies to bandwidths."""
    )

    def evaluate(self):
        """
        Calculate the continuous wavelet transform of time_series.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        ts_shape = self.time_series.data.shape

        if self.frequencies.step == 0:
            LOG.warning(
                "Frequency step can't be 0! Trying default step, 2e-3.")
            self.frequencies.step = 0.002

        freqs = numpy.arange(self.frequencies.lo, self.frequencies.hi,
                             self.frequencies.step)

        if (freqs.size == 0) or any(
                freqs <= 0.0
        ):  #TODO: Maybe should limit number of freqs... ~100 is probably a reasonable upper bound.
            LOG.warning("Invalid frequency range! Falling back to default.")
            util.log_debug_array(LOG, freqs, "freqs")
            self.frequencies = basic.Range(lo=0.008, hi=0.060, step=0.002)
            freqs = numpy.arange(self.frequencies.lo, self.frequencies.hi,
                                 self.frequencies.step)

        util.log_debug_array(LOG, freqs, "freqs")

        sample_rate = self.time_series.sample_rate

        # Duke: code below is as given by Andreas Spiegler, I've just wrapped
        # some of the original argument names
        nf = len(freqs)
        temporal_step = max(
            (1, iround(self.sample_period / self.time_series.sample_period)))
        nt = int(numpy.ceil(ts_shape[0] / temporal_step))

        if not isinstance(self.q_ratio, numpy.ndarray):
            Q_ratio = self.q_ratio * numpy.ones((1, nf))

        if numpy.nanmin(Q_ratio) < 5:
            msg = "Q_ratio must be not lower than 5 !"
            LOG.error(msg)
            raise Exception(msg)

        if numpy.nanmax(freqs) > sample_rate / 2.0:
            msg = "Sampling rate is too low for the requested frequency range !"
            LOG.error(msg)
            raise Exception(msg)

        #TODO: This isn't used, but min frequency seems like it should be important... Check with A.S.
        #  fmin = 3.0 * numpy.nanmin(Q_ratio) * sample_rate / numpy.pi / nt
        sigma_f = freqs / Q_ratio
        sigma_t = 1.0 / (2.0 * numpy.pi * sigma_f)

        if self.normalisation == 'energy':
            Amp = 1.0 / numpy.sqrt(
                sample_rate * numpy.sqrt(numpy.pi) * sigma_t)
        elif self.normalisation == 'gabor':
            Amp = numpy.sqrt(2.0 / numpy.pi) / sample_rate / sigma_t

        coef_shape = (nf, nt, ts_shape[1], ts_shape[2], ts_shape[3])

        coef = numpy.zeros(coef_shape, dtype=numpy.complex128)
        util.log_debug_array(LOG, coef, "coef")
        scales = numpy.arange(0, nf, 1)
        for i in scales:
            f0 = freqs[i]
            SDt = sigma_t[(0, i)]
            A = Amp[(0, i)]
            x = numpy.arange(0, 4.0 * SDt * sample_rate, 1) / sample_rate
            wvlt = A * numpy.exp(-x**2 / (2.0 * SDt**2)) * numpy.exp(
                2j * numpy.pi * f0 * x)
            wvlt = numpy.hstack((numpy.conjugate(wvlt[-1:0:-1]), wvlt))
            #util.log_debug_array(LOG, wvlt, "wvlt")

            for var in range(ts_shape[1]):
                for node in range(ts_shape[2]):
                    for mode in range(ts_shape[3]):
                        data = self.time_series.data[:, var, node, mode]
                        wt = signal.convolve(data, wvlt, 'same')
                        #util.log_debug_array(LOG, wt, "wt")
                        res = wt[0::temporal_step]
                        #NOTE: this is a horrible horrible quick hack (alas, a solution) to avoid broadcasting errors
                        # when using dt and sample periods which are not powers of 2.
                        coef[i, :, var, node, mode] = res if len(
                            res) == nt else res[:coef.shape[1]]

        util.log_debug_array(LOG, coef, "coef")

        spectra = spectral.WaveletCoefficients(
            source=self.time_series,
            mother=self.mother,
            sample_period=self.sample_period,
            frequencies=self.frequencies,
            normalisation=self.normalisation,
            q_ratio=self.q_ratio,
            array_data=coef,
            use_storage=False)

        return spectra

    def result_shape(self, input_shape):
        """
        Returns the shape of the main result (complex array) of the continuous
        wavelet transform.
        """
        freq_len = int((self.frequencies.hi - self.frequencies.lo) /
                       self.frequencies.step)
        temporal_step = max(
            (1, self.sample_period / self.time_series.sample_period))
        nt = int(round(input_shape[0] / temporal_step))
        result_shape = (
            freq_len,
            nt,
        ) + input_shape[1:]
        return result_shape

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the main result (complex array) of
        the continuous wavelet transform.
        """
        result_size = numpy.prod(
            self.result_shape(input_shape)) * 2.0 * 8.0  #complex*Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the 
        continuous wavelet transform.  That is, it includes storage of the
        evaluated WaveletCoefficients attributes such as power, phase, 
        amplitude, etc.
        """
        result_shape = self.result_shape(input_shape)
        result_size = self.result_size(input_shape)
        extend_size = result_size  #Main array
        extend_size = extend_size + 0.5 * result_size  #Amplitude
        extend_size = extend_size + 0.5 * result_size  #Phase
        extend_size = extend_size + 0.5 * result_size  #Power
        extend_size = extend_size + result_shape[0] * 8.0  #Frequency
        return extend_size
class NodeCovariance(core.Type):
    """
    Compute the temporal covariance of nodes in a TimeSeries dataType.
    A nodes x nodes matrix is returned for each (state-variable, mode).
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries to which the NodeCovariance is to be applied.""")

    def evaluate(self):
        """
        Compute the temporal covariance between nodes in the time_series.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        data_shape = self.time_series.data.shape

        #(nodes, nodes, state-variables, modes)
        result_shape = (data_shape[2], data_shape[2], data_shape[1],
                        data_shape[3])
        LOG.info("result shape will be: %s" % str(result_shape))

        result = numpy.zeros(result_shape)

        #One inter-node temporal covariance matrix for each state-var & mode.
        for mode in range(data_shape[3]):
            for var in range(data_shape[1]):
                data = self.time_series.data[:, var, :, mode]
                data = data - data.mean(axis=0)[numpy.newaxis, 0]
                result[:, :, var, mode] = numpy.cov(data.T)

        util.log_debug_array(LOG, result, "result")

        covariance = graph.Covariance(source=self.time_series,
                                      array_data=result,
                                      use_storage=False)

        return covariance

    def result_shape(self, input_shape):
        """
        Returns the shape of the main result of the NodeCovariance analysis.
        """
        result_shape = (input_shape[2], input_shape[2], input_shape[1],
                        input_shape[3])
        return result_shape

    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the NodeCovariance result.
        """
        result_size = numpy.prod(self.result_shape(input_shape)) * 8.0  #Bytes
        return result_size

    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the NodeCovariance extended result.
        That is, it includes storage of the evaluated PrincipleComponents
        attributes such as norm_source, component_time_series, etc.
        """
        extend_size = self.result_size(
            input_shape)  #Currently no derived attributes.
        return extend_size
Beispiel #24
0
class FourierSpectrum(arrays.MappedArray):
    """
    Result of a Fourier  Analysis.
    """
    #Overwrite attribute from superclass
    array_data = arrays.ComplexArray(file_storage=core.FILE_STORAGE_EXPAND)

    source = time_series.TimeSeries(
        label="Source time-series",
        doc="Links to the time-series on which the FFT is applied.")

    segment_length = basic.Float(
        label="Segment length",
        doc="""The timeseries was segmented into equally sized blocks
            (overlapping if necessary), prior to the application of the FFT.
            The segement length determines the frequency resolution of the
            resulting spectra.""")

    windowing_function = basic.String(
        label="Windowing function",
        doc="""The windowing function applied to each time segment prior to
            application of the FFT.""")

    amplitude = arrays.FloatArray(label="Amplitude",
                                  file_storage=core.FILE_STORAGE_EXPAND)

    phase = arrays.FloatArray(label="Phase",
                              file_storage=core.FILE_STORAGE_EXPAND)

    power = arrays.FloatArray(label="Power",
                              file_storage=core.FILE_STORAGE_EXPAND)

    average_power = arrays.FloatArray(label="Average Power",
                                      file_storage=core.FILE_STORAGE_EXPAND)

    normalised_average_power = arrays.FloatArray(
        label="Normalised Power", file_storage=core.FILE_STORAGE_EXPAND)

    _frequency = None
    _freq_step = None
    _max_freq = None

    __generate_table__ = True

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.nr_dimensions = len(self.read_data_shape())
        for i in range(self.nr_dimensions):
            setattr(self, 'length_%dd' % (i + 1),
                    int(self.read_data_shape()[i]))

        if self.trait.use_storage is False and sum(
                self.get_data_shape('array_data')) != 0:
            if self.amplitude.size == 0:
                self.compute_amplitude()
            if self.phase.size == 0:
                self.compute_phase()
            if self.power.size == 0:
                self.compute_power()
            if self.average_power.size == 0:
                self.compute_average_power()
            if self.normalised_average_power.size == 0:
                self.compute_normalised_average_power()

    def write_data_slice(self, partial_result):
        """
        Append chunk.
        """
        # self.store_data_chunk('array_data', partial_result, grow_dimension=2, close_file=False)

        self.store_data_chunk('array_data',
                              partial_result.array_data,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_amplitude()
        self.store_data_chunk('amplitude',
                              partial_result.amplitude,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_phase()
        self.store_data_chunk('phase',
                              partial_result.phase,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_power()
        self.store_data_chunk('power',
                              partial_result.power,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_average_power()
        self.store_data_chunk('average_power',
                              partial_result.average_power,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_normalised_average_power()
        self.store_data_chunk('normalised_average_power',
                              partial_result.normalised_average_power,
                              grow_dimension=2,
                              close_file=False)

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "Spectral type": self.__class__.__name__,
            "Source": self.source.title,
            "Segment length": self.segment_length,
            "Windowing function": self.windowing_function,
            "Frequency step": self.freq_step,
            "Maximum frequency": self.max_freq
        }
        return summary

    @property
    def freq_step(self):
        """ Frequency step size of the complex Fourier spectrum."""
        if self._freq_step is None:
            self._freq_step = 1.0 / self.segment_length
            msg = "%s: Frequency step size is %s"
            LOG.debug(msg % (str(self), str(self._freq_step)))
        return self._freq_step

    @property
    def max_freq(self):
        """ Amplitude of the complex Fourier spectrum."""
        if self._max_freq is None:
            self._max_freq = 0.5 / self.source.sample_period
            msg = "%s: Max frequency is %s"
            LOG.debug(msg % (str(self), str(self._max_freq)))
        return self._max_freq

    @property
    def frequency(self):
        """ Frequencies represented the complex Fourier spectrum."""
        if self._frequency is None:
            self._frequency = numpy.arange(self.freq_step,
                                           self.max_freq + self.freq_step,
                                           self.freq_step)
            util.log_debug_array(LOG, self._frequency, "frequency")
        return self._frequency

    def compute_amplitude(self):
        """ Amplitude of the complex Fourier spectrum."""
        self.amplitude = numpy.abs(self.array_data)
        self.trait["amplitude"].log_debug(owner=self.__class__.__name__)

    def compute_phase(self):
        """ Phase of the Fourier spectrum."""
        self.phase = numpy.angle(self.array_data)
        self.trait["phase"].log_debug(owner=self.__class__.__name__)

    def compute_power(self):
        """ Power of the complex Fourier spectrum."""
        self.power = numpy.abs(self.array_data)**2
        self.trait["power"].log_debug(owner=self.__class__.__name__)

    def compute_average_power(self):
        """ Average-power of the complex Fourier spectrum."""
        self.average_power = numpy.mean(numpy.abs(self.array_data)**2, axis=-1)
        self.trait["average_power"].log_debug(owner=self.__class__.__name__)

    def compute_normalised_average_power(self):
        """ Normalised-average-power of the complex Fourier spectrum."""
        self.normalised_average_power = (self.average_power /
                                         numpy.sum(self.average_power, axis=0))
        self.trait["normalised_average_power"].log_debug(
            owner=self.__class__.__name__)
Beispiel #25
0
    def launch(self, model, model_parameters, integrator, integrator_parameters, connectivity,
               monitors, monitors_parameters=None, surface=None, surface_parameters=None, stimulus=None,
               coupling=None, coupling_parameters=None, initial_conditions=None,
               conduction_speed=None, simulation_length=0, simulation_state=None):
        """
        Called from the GUI to launch a simulation.
          *: string class name of chosen model, etc...
          *_parameters: dictionary of parameters for chosen model, etc...
          connectivity: tvb.datatypes.connectivity.Connectivity object.
          surface: tvb.datatypes.surfaces.CorticalSurface: or None.
          stimulus: tvb.datatypes.patters.* object
        """
        result_datatypes = dict()
        start_time = self.algorithm.current_step * self.algorithm.integrator.dt
        m_ind = -1
        for m_name in monitors:
            m_ind += 1
            sample_period = self.algorithm.monitors[m_ind].period
            # Create the required output for each monitor that was submitted
            if (m_name in self.RESULTS_MAP[time_series.TimeSeriesEEG]
                    and hasattr(self.algorithm.monitors[m_ind], 'sensors')):
                result_datatypes[m_name] = time_series.TimeSeriesEEG(storage_path=self.storage_path,
                                                                     sensors=self.algorithm.monitors[m_ind].sensors,
                                                                     sample_period=sample_period,
                                                                     title=' ' + m_name, start_time=start_time, )

            elif (m_name in self.RESULTS_MAP[time_series.TimeSeriesMEG]
                  and hasattr(self.algorithm.monitors[m_ind], 'sensors')):
                result_datatypes[m_name] = time_series.TimeSeriesMEG(storage_path=self.storage_path,
                                                                     sensors=self.algorithm.monitors[m_ind].sensors,
                                                                     sample_period=sample_period,
                                                                     title=' ' + m_name, start_time=start_time)

            elif m_name in self.RESULTS_MAP[time_series.TimeSeries]:
                result_datatypes[m_name] = time_series.TimeSeries(storage_path=self.storage_path,
                                                                  sample_period=sample_period,
                                                                  title=' ' + m_name, start_time=start_time)

            elif surface is None:
                ## We do not have a surface selected from UI, or regions only result.
                result_datatypes[m_name] = time_series.TimeSeriesRegion(storage_path=self.storage_path,
                                                                        connectivity=connectivity,
                                                                        sample_period=sample_period,
                                                                        title='Regions ' + m_name,
                                                                        start_time=start_time)

            else:
                result_datatypes[m_name] = time_series.TimeSeriesSurface(storage_path=self.storage_path,
                                                                         surface=surface, sample_period=sample_period,
                                                                         title='Surface ' + m_name,
                                                                         start_time=start_time)
            # Now check if the monitor will return results for each state variable, in which case store
            # the labels for these state variables.
            if m_name in self.HAVE_STATE_VARIABLES:
                selected_state_vars = [self.algorithm.model.state_variables[idx]
                                       for idx in self.algorithm.monitors[m_ind].voi]
                state_variable_dimension_name = result_datatypes[m_name].labels_ordering[1]
                result_datatypes[m_name].labels_dimensions[state_variable_dimension_name] = selected_state_vars
        
        #### Create Simulator State entity and persist it in DB. H5 file will be empty now.
        if not self._is_group_launch():
            simulation_state = SimulationState(storage_path=self.storage_path)
            self._capture_operation_results([simulation_state])

        ### Run simulation
        self.log.debug("%s: Starting simulation..." % str(self))
        for result in self.algorithm(simulation_length=simulation_length):
            for j, monitor in enumerate(monitors):
                if result[j] is not None:
                    result_datatypes[monitor].write_time_slice([result[j][0]])
                    result_datatypes[monitor].write_data_slice([result[j][1]])

        self.log.debug("%s: Completed simulation, starting to store simulation state " % str(self))
        ### Populate H5 file for simulator state. This step could also be done while running sim, in background.
        if not self._is_group_launch():
            simulation_state.populate_from(self.algorithm)
            self._capture_operation_results([simulation_state])

        self.log.debug("%s: Simulation state persisted, returning results " % str(self))
        final_results = []
        for result in result_datatypes.values():
            result.close_file()
            final_results.append(result)
        self.log.info("%s: Adapter simulation finished!!" % str(self))
        return final_results
Beispiel #26
0
class fastICA(core.Type):
    """
    Takes a TimeSeries datatype (x) and returns the unmixed temporal sources (S) 
    and the estimated mixing matrix (A).
    
    :math: x = AS
    
    ICA takes time-points as observations and nodes as variables.
    
    It uses the fastICA algorithm implemented in the scikit-learn toolkit, and 
    its intended usage is as a `blind source separation` method.
    
    See also: http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.fastica.html#sklearn.decomposition.fastica
    
    Before the fastICA algorithm can be applied, the input vector data 
    should be whitened (`sphering`). This means that any correlations in the 
    data are removed, i.e. the signals are forced to be uncorrelated. To this end,
    the `whiten` parameter is always set to `True`.
    
    NOTE: As for PCA the TimeSeries datatype must be longer (more time-points)
          than the number of nodes -- Mostly a problem for TimeSeriesSurface 
          datatypes, which, if sampled at 1024Hz, would need to be greater than 
          16 seconds long.
    """
    
    time_series = time_series.TimeSeries(
        label = "Time Series",
        required = True,
        doc = """The timeseries to which the ICA is to be applied. NOTE: The 
            TimeSeries must be longer(more time-points) than the number of nodes
            -- Mostly a problem for surface times-series, which, if sampled at
            1024Hz, would need to be greater than 16 seconds long.""")
            
    n_components = basic.Integer(
        label = "Number of components to extract",
        required = False,
        default = None,
        doc = """Number of components to extract and to perform dimension reduction.
            The number of components must be less than the number of variables.
            By default it takes number of components = number of nodes. Definitely
            a problem for surface time-series.""")
    
    # NOTE: For default surface the weights matrix has a size ~ 2GB * modes * vars...
    
    def evaluate(self):
        """
        Compute the independent sources 
        """
        cls_attr_name = self.__class__.__name__+".time_series"
        self.time_series.trait["data"].log_debug(owner = cls_attr_name)
        
        ts_shape = self.time_series.data.shape
        
        #Need more observations than variables
        if ts_shape[0] < ts_shape[2]:
            msg = "ICA requires a longer timeseries (tpts > number of nodes)."
            LOG.error(msg)
            raise Exception, msg
            
        #Need more variables than components
        if self.n_components > ts_shape[2]:
            msg = "ICA requires more variables than components to extract (number of nodes > number of components)."
            LOG.error(msg)
            raise Exception, msg
        
        if self.n_components is None:
            self.n_components = ts_shape[2]
        
        #(n_components, n_components, state-variables, modes) --  unmixing matrix
        unmixing_matrix_shape = (self.n_components, self.n_components, ts_shape[1], ts_shape[3])
        LOG.info("unmixing matrix shape will be: %s" % str(unmixing_matrix_shape))
        
        # (n_components, nodes, state_variables, modes) -- prewhitening matrix
        prewhitening_matrix_shape = (self.n_components, ts_shape[2], ts_shape[1], ts_shape[3])
        LOG.info("prewhitening matrix shape will be: %s" % str(prewhitening_matrix_shape))
        
        
        unmixing_matrix = numpy.zeros(unmixing_matrix_shape)
        prewhitening_matrix = numpy.zeros(prewhitening_matrix_shape)
        
        
        #(tpts, n_components, state_variables, modes) -- unmixed sources time series
        data_ica = numpy.zeros((ts_shape[0], self.n_components, ts_shape[1], ts_shape[3]))
        
        #One un/mixing matrix for each state-var & mode.
        for mode in range(ts_shape[3]):
            for var in range(ts_shape[1]):
                # Assumes data must be whitened
                ica = fastica(self.time_series.data[:, var, :, mode], 
                                            n_components = self.n_components,
                                            whiten = True)
                # unmixed sources - component_time_series
                data_ica[:, :, var, mode] = ica[2]
                # prewhitening matrix
                prewhitening_matrix[:, :, var, mode] = ica[0]
                # unmixing matrix
                unmixing_matrix[:, :, var, mode] = ica[1]
        
        util.log_debug_array(LOG, prewhitening_matrix, "whitening_matrix")
        util.log_debug_array(LOG, unmixing_matrix, "unmixing_matrix")

        
        ica_result = mode_decompositions.IndependentComponents(source = self.time_series,
                                         component_time_series = data_ica, 
                                         #mixing_matrix = mixing_matrix,
                                         prewhitening_matrix = prewhitening_matrix,
                                         unmixing_matrix = unmixing_matrix,
                                         n_components = self.n_components, 
                                         use_storage = False)
        
        return ica_result
    
    
    def result_shape(self, input_shape):
        """
        Returns the shape of the main result of the ICA analysis -- component
        mixing matrix.
        """
        unmixing_matrix_shape = ((self.n_components or input_shape[2]), 
                                 (self.n_components or input_shape[2]),
                                 input_shape[1], input_shape[3])
        return unmixing_matrix_shape
    
    
    def result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the results of the ICA analysis.
        """
        result_size = numpy.sum(map(numpy.prod, self.result_shape(input_shape))) * 8.0 #Bytes
        return result_size
    
    
    def extended_result_size(self, input_shape):
        """
        Returns the storage size in Bytes of the extended result of the ICA.
        That is, it includes storage of the evaluated IndependentComponents
        attributes such as norm_source, component_time_series, etc.
        """
        result_size = self.result_size(input_shape)
        extend_size = result_size #Main arrays
        extend_size = extend_size + numpy.prod(input_shape) * 8.0 #norm_source
        extend_size = extend_size + numpy.prod(input_shape) * 8.0 #component_time_series
        extend_size = extend_size + numpy.prod(input_shape) * 8.0 #normalised_component_time_series
        return extend_size
Beispiel #27
0
class FFT(core.Type):
    """
    A class for calculating the FFT of a TimeSeries object of TVB and returning
    a FourierSpectrum object. A segment length and windowing function can be
    optionally specified. By default the time series is segmented into 1 second
    blocks and no windowing function is applied.
    """

    time_series = time_series.TimeSeries(
        label="Time Series",
        required=True,
        doc="""The timeseries to which the FFT is to be applied.""")

    segment_length = basic.Float(
        label="Segment(window) length (ms)",
        default=1000.0,
        required=False,
        doc="""The timeseries can be segmented into equally sized blocks
            (overlapping if necessary). The segement length determines the
            frequency resolution of the resulting power spectra -- longer
            windows produce finer frequency resolution.""")

    window_function = basic.String(
        label="Windowing function",
        default=None,
        required=False,
        doc="""Windowing functions can be applied before the FFT is performed.
             Default is None, possibilities are: 'hamming'; 'bartlett';
            'blackman'; and 'hanning'. See, numpy.<function_name>.""")

    def evaluate(self):
        """
        Calculate the FFT of time_series broken into segments of length
        segment_length and filtered by window_function.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        tpts = self.time_series.data.shape[0]
        time_series_length = tpts * self.time_series.sample_period

        #Segment time-series, overlapping if necessary
        nseg = int(numpy.ceil(time_series_length / self.segment_length))
        if nseg > 1:
            seg_tpts = self.segment_length / self.time_series.sample_period
            overlap = ((seg_tpts * nseg) - tpts) / (nseg - 1)
            starts = [
                max(seg * (seg_tpts - overlap), 0) for seg in range(nseg)
            ]
            segments = [
                self.time_series.data[start:start + seg_tpts]
                for start in starts
            ]
            segments = [
                segment[:, :, :, numpy.newaxis] for segment in segments
            ]
            time_series = numpy.concatenate(segments, axis=4)
        else:
            self.segment_length = time_series_length
            time_series = self.time_series.data[:, :, :, numpy.newaxis]
            seg_tpts = time_series.shape[0]

        LOG.debug("Segment length being used is: %s" % self.segment_length)

        #Base-line correct the segmented time-series
        time_series = sp_signal.detrend(time_series, axis=0)
        util.log_debug_array(LOG, time_series, "time_series")

        #Apply windowing function
        if self.window_function is not None:
            if self.window_function not in SUPPORTED_WINDOWING_FUNCTIONS:
                LOG.error("Windowing function is: %s" % self.window_function)
                LOG.error("Must be in: %s" %
                          str(SUPPORTED_WINDOWING_FUNCTIONS))

            window_function = eval("".join(("numpy.", self.window_function)))
            window_mask = numpy.reshape(window_function(seg_tpts),
                                        (seg_tpts, 1, 1, 1, 1))
            time_series = time_series * window_mask

        #Calculate the FFT
        result = numpy.fft.fft(time_series, axis=0)
        nfreq = result.shape[0] / 2
        result = result[1:nfreq + 1, :]
        util.log_debug_array(LOG, result, "result")

        spectra = spectral.FourierSpectrum(
            source=self.time_series,
            segment_length=self.segment_length,
            window_function=self.window_function,
            array_data=result,
            use_storage=False)

        return spectra

    def result_shape(self, input_shape, segment_length, sample_period):
        """Returns the shape of the main result (complex array) of the FFT."""
        freq_len = (segment_length / sample_period) / 2.0
        freq_len = int(min((input_shape[0], freq_len)))
        nseg = max(
            (1,
             int(numpy.ceil(input_shape[0] * sample_period / segment_length))))
        result_shape = (freq_len, input_shape[1], input_shape[2],
                        input_shape[3], nseg)
        return result_shape

    def result_size(self, input_shape, segment_length, sample_period):
        """
        Returns the storage size in Bytes of the main result (complex array) of 
        the FFT.
        """
        result_size = numpy.prod(
            self.result_shape(input_shape, segment_length,
                              sample_period)) * 2.0 * 8.0  #complex*Bytes
        return result_size

    def extended_result_size(self, input_shape, segment_length, sample_period):
        """
        Returns the storage size in Bytes of the extended result of the FFT. 
        That is, it includes storage of the evaluated FourierSpectrum attributes
        such as power, phase, amplitude, etc.
        """
        result_shape = self.result_shape(input_shape, segment_length,
                                         sample_period)
        result_size = self.result_size(input_shape, segment_length,
                                       sample_period)
        extend_size = result_size  #Main array
        extend_size = extend_size + 0.5 * result_size  #Amplitude
        extend_size = extend_size + 0.5 * result_size  #Phase
        extend_size = extend_size + 0.5 * result_size  #Power
        extend_size = extend_size + 0.5 * result_size / result_shape[
            4]  #Average power
        extend_size = extend_size + 0.5 * result_size / result_shape[
            4]  #Normalised Average power
        extend_size = extend_size + result_shape[0] * 8.0  #Frequency
        return extend_size
Beispiel #28
0
class WaveletCoefficients(arrays.MappedArray):
    """
    This class bundles all the elements of a Wavelet Analysis into a single
    object, including the input TimeSeries datatype and the output results as
    arrays (FloatArray)
    """
    #Overwrite attribute from superclass
    array_data = arrays.ComplexArray()

    source = time_series.TimeSeries(label="Source time-series")

    mother = basic.String(
        label="Mother wavelet",
        default="morlet",
        doc="""A string specifying the type of mother wavelet to use,
            default is 'morlet'.""")  # default to 'morlet'

    sample_period = basic.Float(label="Sample period")
    #sample_rate = basic.Integer(label = "")  inversely related

    frequencies = arrays.FloatArray(
        label="Frequencies", doc="A vector that maps scales to frequencies.")

    normalisation = basic.String(label="Normalisation type")
    # 'unit energy' | 'gabor'

    q_ratio = basic.Float(label="Q-ratio", default=5.0)

    amplitude = arrays.FloatArray(label="Amplitude",
                                  file_storage=core.FILE_STORAGE_EXPAND)

    phase = arrays.FloatArray(label="Phase",
                              file_storage=core.FILE_STORAGE_EXPAND)

    power = arrays.FloatArray(label="Power",
                              file_storage=core.FILE_STORAGE_EXPAND)

    _frequency = None
    _time = None

    __generate_table__ = True

    def configure(self):
        """After populating few fields, compute the rest of the fields"""
        # Do not call super, because that accesses data not-chunked
        self.nr_dimensions = len(self.read_data_shape())
        for i in range(self.nr_dimensions):
            setattr(self, 'length_%dd' % (i + 1),
                    int(self.read_data_shape()[i]))

        if self.trait.use_storage is False and sum(
                self.get_data_shape('array_data')) != 0:
            if self.amplitude.size == 0:
                self.compute_amplitude()
            if self.phase.size == 0:
                self.compute_phase()
            if self.power.size == 0:
                self.compute_power()

    def _find_summary_info(self):
        """
        Gather scientifically interesting summary information from an instance of this datatype.
        """
        summary = {
            "Spectral type": self.__class__.__name__,
            "Source": self.source.title,
            "Wavelet type": self.mother,
            "Normalisation": self.normalisation,
            "Q-ratio": self.q_ratio,
            "Sample period": self.sample_period,
            "Number of scales": self.frequencies.shape[0],
            "Minimum frequency": self.frequencies[0],
            "Maximum frequency": self.frequencies[-1]
        }
        return summary

    @property
    def frequency(self):
        """ Frequencies represented by the wavelet spectrogram."""
        if self._frequency is None:
            self._frequency = numpy.arange(self.frequencies.lo,
                                           self.frequencies.hi,
                                           self.frequencies.step)
            util.log_debug_array(LOG, self._frequency, "frequency")
        return self._frequency

    def compute_amplitude(self):
        """ Amplitude of the complex Wavelet coefficients."""
        self.amplitude = numpy.abs(self.array_data)

    def compute_phase(self):
        """ Phase of the Wavelet coefficients."""
        self.phase = numpy.angle(self.array_data)

    def compute_power(self):
        """ Power of the complex Wavelet coefficients."""
        self.power = numpy.abs(self.array_data)**2

    def write_data_slice(self, partial_result):
        """
        Append chunk.
        """
        self.store_data_chunk('array_data',
                              partial_result.array_data,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_amplitude()
        self.store_data_chunk('amplitude',
                              partial_result.amplitude,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_phase()
        self.store_data_chunk('phase',
                              partial_result.phase,
                              grow_dimension=2,
                              close_file=False)

        partial_result.compute_power()
        self.store_data_chunk('power',
                              partial_result.power,
                              grow_dimension=2,
                              close_file=False)
Beispiel #29
0
class TimeSeriesInteractive(core.Type):
    """
    For generating an interactive time-series figure, given one of TVB's 
    TimeSeries datatypes to initialise it. The graphical interface for 
    visualising a timeseries provides controls for setting:

        - Window length
        - Amplitude scaling
        - Stepping forward/backward through time.


    """

    time_series = time_series_datatypes.TimeSeries(
        label="Timeseries",
        default=None,
        required=True,
        doc="""The TVB TimeSeries datatype to be displayed.""")

    first_n = basic.Integer(
        label="Display the first 'n'",
        default=-1,
        required=True,
        doc="""Primarily intended for displaying the first N components of a 
            surface PCA timeseries. Defaults to -1, meaning it'll display all
            of 'space' (ie, regions or vertices or channels). In other words,
            for Region or M/EEG timeseries you can ignore this, but, for a 
            surface timeseries it really must be set.""")

    def __init__(self, **kwargs):
        """
        Doc me...

        """
        super(TimeSeriesInteractive, self).__init__(**kwargs)
        LOG.debug(str(kwargs))

        #figure
        self.its_fig = None

        #time-series
        self.ts_ax = None
        self.ts_view = None
        self.whereami_ax = None
        self.hereiam = None

        #Current state
        self.window_length = None
        self.scaling = 0.42
        self.offset = None
        self.view_step = None
        self.time_view = None
        self.channel_view = None
        #self.mode = 0

        #Selectors
        #self.mode_selector = None

        #Sliders
        self.window_length_slider = None
        self.scaling_slider = None
        self.time_slider = None

        #time-view buttons
        self.step_back_button = None
        self.step_forward_button = None
        self.big_step_back_button = None
        self.big_step_forward_button = None
        self.start_button = None
        self.end_button = None

    def configure(self):
        """ Seperate configure cause ttraits be busted... """
        #TODO: if isinstance(self.time_series, TimeSeriesSurface) and self.first_n == -1: #LOG.error, return.
        self.data = (self.time_series.data[:, :, :self.first_n, :] -
                     self.time_series.data[:, :, :self.first_n, :].mean(
                         axis=0)[numpy.newaxis, :])
        self.period = self.time_series.sample_period
        self.tpts = self.data.shape[0]
        self.nsrs = self.data.shape[2]
        self.time = numpy.arange(self.tpts) * self.period
        self.start_time = self.time[0]
        self.end_time = self.time[-1]
        self.time_series_length = self.end_time - self.start_time
        self.peak_to_peak = (numpy.max(self.data) - numpy.min(self.data))

        #Use actual labels if they exist.
        if (isinstance(self.time_series,
                       time_series_datatypes.TimeSeriesRegion)
                and (not self.time_series.connectivity is None)):
            self.labels = self.time_series.connectivity.region_labels
        elif (isinstance(self.time_series,
                         (time_series_datatypes.TimeSeriesEEG,
                          time_series_datatypes.TimeSeriesMEG)
                         and (not self.time_series.sensors is None))):
            self.labels = self.time_series.sensors.labels
        else:
            self.labels = ["channel_%0.2d" % k for k in range(self.nsrs)]

        #Current state
        self.window_length = self.tpts * self.period
        self.view_step = max(int(self.tpts / TIME_RESOLUTION), 1)
        self.time_view = range(0, self.tpts, self.view_step)

    def show(self):
        """ Generate the interactive time-series figure. """
        time_series_type = self.time_series.__class__.__name__
        msg = "Generating an interactive time-series plot for %s"
        if isinstance(self.time_series,
                      time_series_datatypes.TimeSeriesSurface):
            LOG.warning("Intended for region and sensors, not surfaces.")
        LOG.info(msg % time_series_type)

        #Make the figure:
        self.create_figure()

        #Selectors
        #self.add_mode_selector()

        #Sliders
        self.add_window_length_slider()
        self.add_scaling_slider()
        #self.add_time_slider()

        #time-view buttons
        self.add_step_back_button()
        self.add_step_forward_button()
        self.add_big_step_back_button()
        self.add_big_step_forward_button()
        self.add_start_button()
        self.add_end_button()

        #Plot timeseries
        self.plot_time_series()

        pylab.show()

    ##------------------------------------------------------------------------##
    ##------------------ Functions for building the figure -------------------##
    ##------------------------------------------------------------------------##
    def create_figure(self):
        """ Create the figure and time-series axes. """
        #time_series_type = self.time_series.__class__.__name__
        try:
            figure_window_title = "Interactive time series: "  #+ time_series_type
            #            pylab.close(figure_window_title)
            self.its_fig = pylab.figure(num=figure_window_title,
                                        figsize=(14, 8),
                                        facecolor=BACKGROUNDCOLOUR,
                                        edgecolor=EDGECOLOUR)
        except ValueError:
            LOG.info("My life would be easier if you'd update your PyLab...")
            figure_number = 42
            pylab.close(figure_number)
            self.its_fig = pylab.figure(num=figure_number,
                                        figsize=(14, 8),
                                        facecolor=BACKGROUNDCOLOUR,
                                        edgecolor=EDGECOLOUR)

        self.ts_ax = self.its_fig.add_axes([0.1, 0.1, 0.85, 0.85])

        self.whereami_ax = self.its_fig.add_axes([0.1, 0.95, 0.85, 0.025],
                                                 axisbg=BACKGROUNDCOLOUR)
        self.whereami_ax.set_axis_off()
        if hasattr(self.whereami_ax, 'autoscale'):
            self.whereami_ax.autoscale(enable=True, axis='both', tight=True)
        self.whereami_ax.plot(self.time_view,
                              numpy.zeros((len(self.time_view), )),
                              color="0.3",
                              linestyle="--")
        self.hereiam = self.whereami_ax.plot(self.time_view,
                                             numpy.zeros(
                                                 (len(self.time_view), )),
                                             'b-',
                                             linewidth=4)

#    def add_mode_selector(self):
#        """
#        Add a radio button to the figure for selecting which mode of the model
#        should be displayed.
#        """
#        pos_shp = [0.02, 0.07, 0.04, 0.1+0.002*self.data.shape[3]]]
#        mode_ax = self.its_fig.add_axes(pos_shp, axisbg=AXCOLOUR, title="Mode")
#        mode_tuple = tuple(range(self.model.number_of_modes))
#        self.mode_selector = widgets.RadioButtons(mode_ax, mode_tuple, active=0)
#        self.mode_selector.on_clicked(self.update_mode)

#    def add_time_sliders(self):
#        """
#        Add a slider to allow the time-series window length to be adjusted.
#        """
#        pos_shp = [0.2, 0.02, 0.7, 0.025]
#        slax = self.its_fig.add_axes(pos_shp, axisbg=AXCOLOUR)
#
#        self.current_time_slider = widgets.Slider(slax, "Time", self.start_time,
#                                          self.end_time,
#                                          valinit = self.current_time)
#        self.current_time.on_changed(self.update_time)

    def add_window_length_slider(self):
        """
        Add a slider to allow the time-series window length to be adjusted.
        """
        pos_shp = [0.15, 0.02, 0.175, 0.035]
        slax = self.its_fig.add_axes(pos_shp, axisbg=AXCOLOUR)

        self.window_length_slider = widgets.Slider(slax,
                                                   "Window length",
                                                   TIME_RESOLUTION *
                                                   self.period,
                                                   self.time_series_length,
                                                   valinit=self.window_length,
                                                   valfmt="%d")
        self.window_length_slider.on_changed(self.update_window_length)

    #TODO: Add a conversion so this is an amplitude scaling, say 1.0-20.0
    def add_scaling_slider(self):
        """ Add a slider to allow scaling of the offset of time-series. """
        pos_shp = [0.75, 0.02, 0.175, 0.035]
        sax = self.its_fig.add_axes(pos_shp, axisbg=AXCOLOUR)

        self.scaling_slider = widgets.Slider(sax,
                                             "Spacing",
                                             0.0,
                                             1.25,
                                             valinit=self.scaling,
                                             valfmt="%4.2f")
        self.scaling_slider.on_changed(self.update_scaling)

    def add_step_back_button(self):
        """ Add a button to step back by 4 view_steps. """
        bax = self.its_fig.add_axes([0.5, 0.015, 0.04, 0.045])
        self.step_back_button = widgets.Button(bax,
                                               '<',
                                               color=BUTTONCOLOUR,
                                               hovercolor=HOVERCOLOUR)

        self.step_back_button.on_clicked(self.step_back)

    def add_step_forward_button(self):
        """ Add a button to step forward by 4 view_steps. """
        bax = self.its_fig.add_axes([0.54, 0.015, 0.04, 0.045])
        self.step_forward_button = widgets.Button(bax,
                                                  '>',
                                                  color=BUTTONCOLOUR,
                                                  hovercolor=HOVERCOLOUR)

        self.step_forward_button.on_clicked(self.step_forward)

    def add_big_step_back_button(self):
        """ Add a button to step back by 1/4 window_length. """
        bax = self.its_fig.add_axes([0.46, 0.015, 0.04, 0.045])
        self.big_step_back_button = widgets.Button(bax,
                                                   '<<',
                                                   color=BUTTONCOLOUR,
                                                   hovercolor=HOVERCOLOUR)

        self.big_step_back_button.on_clicked(self.bigstep_back)

    def add_big_step_forward_button(self):
        """ Add a button to step forward by 1/4 window_length. """
        bax = self.its_fig.add_axes([0.58, 0.015, 0.04, 0.045])
        self.big_step_forward_button = widgets.Button(bax,
                                                      '>>',
                                                      color=BUTTONCOLOUR,
                                                      hovercolor=HOVERCOLOUR)

        self.big_step_forward_button.on_clicked(self.bigstep_forward)

    def add_start_button(self):
        """ Add a button to jump back to the start of the timeseries. """
        bax = self.its_fig.add_axes([0.42, 0.015, 0.04, 0.045])
        self.start_button = widgets.Button(bax,
                                           '|<<<',
                                           color=BUTTONCOLOUR,
                                           hovercolor=HOVERCOLOUR)

        self.start_button.on_clicked(self.jump_to_start)

    def add_end_button(self):
        """ Add a button to jump forward to the end of the timeseries. """
        bax = self.its_fig.add_axes([0.62, 0.015, 0.04, 0.045])
        self.end_button = widgets.Button(bax,
                                         '>>>|',
                                         color=BUTTONCOLOUR,
                                         hovercolor=HOVERCOLOUR)

        self.end_button.on_clicked(self.jump_to_end)

    ##------------------------------------------------------------------------##
    ##------------------ Functions for updating the state --------------------##
    ##------------------------------------------------------------------------##

    def step_back(self, event=None):
        """ Step the timeview back by a single view step. """
        LOG.debug("step_back accessed with event: %s" % str(event))
        step = 4 * self.view_step
        if self.time_view[0] - step >= 0:
            self.time_view = [k - step for k in self.time_view]
            self.update_time_series()

    def step_forward(self, event=None):
        """ Step the timeview forward by a single view step. """
        LOG.debug("step_forward accessed with event: %s" % str(event))
        step = 4 * self.view_step
        if self.time_view[-1] + step < self.tpts:
            self.time_view = [k + step for k in self.time_view]
            self.update_time_series()

    def bigstep_back(self, event=None):
        """ Step the timeview back by 1/4 window length. """
        LOG.debug("bigstep_back accessed with event: %s" % str(event))
        step = self.view_step * TIME_RESOLUTION / 4
        if self.time_view[0] - step >= 0:
            self.time_view = [k - step for k in self.time_view]
            self.update_time_series()
        else:
            self.jump_to_start()

    def bigstep_forward(self, event=None):
        """ Step the timeview forward by 1/4 window length. """
        LOG.debug("bigstep_forward accessed with event: %s" % str(event))
        step = self.view_step * TIME_RESOLUTION / 4
        if self.time_view[-1] + step < self.tpts:
            self.time_view = [k + step for k in self.time_view]
            self.update_time_series()
        else:
            self.jump_to_end()

    def jump_to_start(self, event=None):
        """ Jump to the start of the timeseries. """
        LOG.debug("jump_to_start accessed with event: %s" % str(event))
        step = self.time_view[0]
        self.time_view = [k - step for k in self.time_view]
        self.update_time_series()

    def jump_to_end(self, event=None):
        """ Jump to the end of the timeseries."""
        LOG.debug("jump_to_end accessed with event: %s" % str(event))
        step = self.tpts - 1 - self.time_view[-1]
        self.time_view = [k + step for k in self.time_view]
        self.update_time_series()

    def update_time_view(self):
        """ Update the time_view when window length is changed. """
        tpts = self.window_length / self.period
        self.view_step = max(int(tpts / TIME_RESOLUTION), 1)
        window_start = self.time_view[0]
        window_end = min(window_start + self.view_step * (TIME_RESOLUTION - 1),
                         self.tpts)
        self.time_view = range(window_start, window_end, self.view_step)

    ##------------------------------------------------------------------------##
    ##------------------ Functions for updating the figure -------------------##
    ##------------------------------------------------------------------------##


#    def update_mode(self, label):
#        """ Update the visualised mode based on radio button selection. """
#        self.mode = label
#        self.update_time_series()

    def update_window_length(self, length):
        """
        Update timeseries window length based on the time window slider value.
        """
        self.window_length = length
        self.update_time_view()
        self.update_time_series()

    def update_scaling(self, scaling):
        """
        Update timeseries scaling based on the scaling slider value.
        """
        self.scaling = scaling
        self.update_time_series()

    def update_time_series(self):
        """ Clear the axes and redraw the time-series. """
        self.ts_ax.clear()
        self.plot_time_series()

    def plot_time_series(self):
        """ Plot a view on the timeseries. """
        # Set title and axis labels
        #time_series_type = self.time_series.__class__.__name__
        #self.ts_ax.set(title = time_series_type)
        #self.ts_ax.set(xlabel = "Time (%s)" % self.units)

        # This assumes shape => (time, space)
        step = self.scaling * self.peak_to_peak
        if step == 0:
            offset = 0.0
        else:  #NOTE: specifying step in arange is faster, but it fence-posts.
            offset = numpy.arange(0, self.nsrs) * step
        if hasattr(self.ts_ax, 'autoscale'):
            self.ts_ax.autoscale(enable=True, axis='both', tight=True)

        self.ts_ax.set_yticks(offset)
        self.ts_ax.set_yticklabels(self.labels, fontsize=10)
        #import pdb; pdb.set_trace()

        #Light gray guidelines
        self.ts_ax.plot([
            self.nsrs * [self.time[self.time_view[0]]],
            self.nsrs * [self.time[self.time_view[-1]]]
        ], numpy.vstack(2 * (offset, )), "0.85")

        #Plot the timeseries
        self.ts_view = self.ts_ax.plot(
            self.time[self.time_view],
            offset + self.data[self.time_view, 0, :, 0])

        self.hereiam[0].remove()
        self.hereiam = self.whereami_ax.plot(self.time_view,
                                             numpy.zeros(
                                                 (len(self.time_view), )),
                                             'b-',
                                             linewidth=4)

        pylab.draw()
class VarianceNodeVariance(metrics_base.BaseTimeseriesMetricAlgorithm):
    """
    Zero-centres all the time-series, calculates the variance for each node 
    time-series and returns the variance of the node variances. 

    Input:
    TimeSeries datatype 
    
    Output: 
    Float
    
    This is a crude indicator of how different the "excitability" of the model is
    from node to node.
    """
    time_series = time_series_module.TimeSeries(
        label = "Time Series",
        required = True,
        doc="""The TimeSeries for which the variance of the zero centered node
            variance is to be computed.""")
    
    def evaluate(self):
        """
        Compute the zero centered variance of node variances for the time_series.
        """
        cls_attr_name = self.__class__.__name__+".time_series"
        self.time_series.trait["data"].log_debug(owner = cls_attr_name)
        
        shape = self.time_series.data.shape
        
        zero_mean_data = (self.time_series.data - self.time_series.data.mean(axis=0))
        #reshape by concat the time-series of each var and modes for each node.
        zero_mean_data = zero_mean_data.transpose((0, 1, 3, 2))
        cat_tpts = shape[0] * shape[1] * shape[3]
        zero_mean_data = zero_mean_data.reshape((cat_tpts, shape[2]), order="F")
        #Variance over time-points, state-variables, and modes for each node.
        node_variance = zero_mean_data.var(axis=0)
        #Variance of that variance over nodes
        result = node_variance.var()
        return result
    
    
    def result_shape(self):
        """
        Returns the shape of the main result of the ... 
        """
        return (1,)
    
    
    def result_size(self):
        """
        Returns the storage size in Bytes of the results of the ... .
        """
        return 8.0 #Bytes
    
    
    def extended_result_size(self):
        """
        Returns the storage size in Bytes of the extended result of the ....
        That is, it includes storage of the evaluated ...
        """
        return 8.0 #Bytes