Exemple #1
0
def calculate_cross_coherence(time_series, nfft):
    """
    # type: (TimeSeries, int)  -> CoherenceSpectrum
    # Adapter for cross-coherence algorithm(s)
    # Evaluate coherence on time series.

    Parameters
    __________
    time_series : TimeSeries
    The TimeSeries to which the Cross Coherence is to be applied.

    nfft : int
    Data-points per block (should be a power of 2).
    """

    srate = time_series.sample_rate
    coh, freq = _coherence(time_series.data, srate, nfft=nfft)
    log.debug("coherence")
    log.debug(narray_describe(coh))
    log.debug("freq")
    log.debug(narray_describe(freq))

    spec = spectral.CoherenceSpectrum(source=time_series,
                                      nfft=nfft,
                                      array_data=coh.astype(numpy.float),
                                      frequency=freq)
    return spec
Exemple #2
0
    def config_for_sim(self, simulator):

        # initialize base attributes
        super(SpatialAverage, self).config_for_sim(simulator)
        self.is_default_special_mask = False

        # setup given spatial mask or default to region mapping
        if self.spatial_mask is None:
            self.is_default_special_mask = True
            if not (simulator.surface is None):
                self.spatial_mask = simulator.surface.region_mapping
            else:
                conn = simulator.connectivity
                if self.default_mask[0] == 'cortical':
                    if conn is not None and conn.cortical is not None and conn.cortical.size > 0:
                        ## Use as spatial-mask cortical/non cortical areas
                        self.spatial_mask = numpy.array(
                            [int(c) for c in conn.cortical])
                    else:
                        msg = "Must fill Spatial Mask parameter for non-surface simulations when using SpatioTemporal monitor!"
                        raise Exception(msg)
                if self.default_mask[0] == 'hemispheres':
                    if conn is not None and conn.hemispheres is not None and conn.hemispheres.size > 0:
                        ## Use as spatial-mask left/right hemisphere
                        self.spatial_mask = numpy.array(
                            [int(h) for h in conn.hemispheres])
                    else:
                        msg = "Must fill Spatial Mask parameter for non-surface simulations when using SpatioTemporal monitor!"
                        raise Exception(msg)

        number_of_nodes = simulator.number_of_nodes
        if self.spatial_mask.size != number_of_nodes:
            msg = "spatial_mask must be a vector of length number_of_nodes."
            raise Exception(msg)

        areas = numpy.unique(self.spatial_mask)
        number_of_areas = len(areas)
        if not numpy.all(areas == numpy.arange(number_of_areas)):
            msg = ("Areas in the spatial_mask must be specified as a "
                   "contiguous set of indices starting from zero.")
            raise Exception(msg)

        self.log.debug("spatial_mask")
        self.log.debug(narray_describe(self.spatial_mask))
        spatial_sum = numpy.zeros((number_of_nodes, number_of_areas))
        spatial_sum[numpy.arange(number_of_nodes), self.spatial_mask] = 1
        spatial_sum = spatial_sum.T
        self.log.debug("spatial_sum")
        self.log.debug(narray_describe(spatial_sum))
        nodes_per_area = numpy.sum(spatial_sum, axis=1)[:, numpy.newaxis]
        self.spatial_mean = spatial_sum / nodes_per_area
        self.log.debug("spatial_mean")
        self.log.debug(narray_describe(self.spatial_mean))
Exemple #3
0
def compute_pca(time_series):
    """
    # type: (TimeSeries)  -> PrincipalComponents
    Compute the temporal covariance between nodes in the time_series.

    Parameters
    __________
    time_series : TimeSeries
    The timeseries to which the PCA is to be applied.
    """

    ts_shape = time_series.data.shape

    # Need more measurements than variables
    if ts_shape[0] < ts_shape[2]:
        msg = "PCA requires a longer timeseries (tpts > number of nodes)."
        log.error(msg)
        raise Exception(msg)

    # (nodes, nodes, state-variables, modes)
    weights_shape = (ts_shape[2], ts_shape[2], ts_shape[1], ts_shape[3])
    log.info("weights shape will be: %s" % str(weights_shape))

    fractions_shape = (ts_shape[2], ts_shape[1], ts_shape[3])
    log.info("fractions shape will be: %s" % str(fractions_shape))

    weights = numpy.zeros(weights_shape)
    fractions = numpy.zeros(fractions_shape)

    # One inter-node temporal covariance matrix for each state-var & mode.
    for mode in range(ts_shape[3]):
        for var in range(ts_shape[1]):
            data = time_series.data[:, var, :, mode]

            fracts, w = _compute_weights_and_fractions(data)
            fractions[:, var, mode] = fracts
            weights[:, :, var, mode] = w

    log.debug("fractions")
    log.debug(narray_describe(fractions))
    log.debug("weights")
    log.debug(narray_describe(weights))

    pca_result = mode_decompositions.PrincipalComponents(
        source=time_series,
        fractions=fractions,
        weights=weights,
        norm_source=numpy.array([]),
        component_time_series=numpy.array([]),
        normalised_component_time_series=numpy.array([]))

    return pca_result
Exemple #4
0
    def evaluate(self):
        """
        Compute the temporal covariance between nodes in the time_series. 
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        # self.time_series.trait["data"].log_debug(owner = cls_attr_name)

        ts_shape = self.time_series.data.shape

        # Need more measurements than variables
        if ts_shape[0] < ts_shape[2]:
            msg = "PCA requires a longer timeseries (tpts > number of nodes)."
            self.log.error(msg)
            raise Exception(msg)

        # (nodes, nodes, state-variables, modes)
        weights_shape = (ts_shape[2], ts_shape[2], ts_shape[1], ts_shape[3])
        self.log.info("weights shape will be: %s" % str(weights_shape))

        fractions_shape = (ts_shape[2], ts_shape[1], ts_shape[3])
        self.log.info("fractions shape will be: %s" % str(fractions_shape))

        weights = numpy.zeros(weights_shape)
        fractions = numpy.zeros(fractions_shape)

        # One inter-node temporal covariance matrix for each state-var & mode.
        for mode in range(ts_shape[3]):
            for var in range(ts_shape[1]):
                data = self.time_series.data[:, var, :, mode]

                data_pca = PCA_mlab(data)
                fractions[:, var, mode] = data_pca.fracs

                weights[:, :, var, mode] = data_pca.Wt

        self.log.debug("fractions")
        self.log.debug(narray_describe(fractions))
        self.log.debug("weights")
        self.log.debug(narray_describe(weights))

        pca_result = mode_decompositions.PrincipalComponents(
            source=self.time_series,
            fractions=fractions,
            weights=weights,
            norm_source=numpy.array([]),
            component_time_series=numpy.array([]),
            normalised_component_time_series=numpy.array([]))

        return pca_result
Exemple #5
0
    def evaluate(self):
        "Evaluate coherence on time series."
        cls_attr_name = self.__class__.__name__ + ".time_series"
        # self.time_series.trait["data"].log_debug(owner=cls_attr_name)
        srate = self.time_series.sample_rate
        coh, freq = coherence(self.time_series.data, srate, nfft=self.nfft)
        self.log.debug("coherence")
        self.log.debug(narray_describe(coh))
        self.log.debug("freq")
        self.log.debug(narray_describe(freq))

        spec = spectral.CoherenceSpectrum(source=self.time_series,
                                          nfft=self.nfft,
                                          array_data=coh.astype(numpy.float),
                                          frequency=freq)
        return spec
Exemple #6
0
 def compute_vertex_normals(self):
     """
     Estimates vertex normals, based on triangle normals weighted by the
     angle they subtend at each vertex...
     """
     vert_norms = numpy.zeros((self.number_of_vertices, 3))
     bad_normal_count = 0
     for k in range(self.number_of_vertices):
         try:
             tri_list = list(self.vertex_triangles[k])
             angle_mask = self.triangles[tri_list, :] == k
             angles = self.triangle_angles[tri_list, :]
             angles = angles[angle_mask][:, numpy.newaxis]
             angle_scaling = angles / numpy.sum(angles, axis=0)
             vert_norms[k, :] = numpy.mean(angle_scaling * self.triangle_normals[tri_list, :], axis=0)
             # Scale by angle subtended.
             vert_norms[k, :] = vert_norms[k, :] / numpy.sqrt(numpy.sum(vert_norms[k, :] ** 2, axis=0))
             # Normalise to unit vectors.
         except (ValueError, FloatingPointError):
             # If normals are bad, default to position vector
             # A nicer solution would be to detect degenerate triangles and ignore their
             # contribution to the vertex normal
             vert_norms[k, :] = self.vertices[k] / numpy.sqrt(self.vertices[k].dot(self.vertices[k]))
             bad_normal_count += 1
     if bad_normal_count:
         self.logger.warning(" %d vertices have bad normals" % bad_normal_count)
     self.vertex_normals = vert_norms
     self.log.debug("vertex_normals")
     self.log.debug(narray_describe(self.vertex_normals))
Exemple #7
0
    def config_for_sim(self, simulator):

        # initialize base attributes
        super(SpatialAverage, self).config_for_sim(simulator)
        self.is_default_special_mask = False

        # setup given spatial mask or default to region mapping
        if self.spatial_mask is None:
            self.is_default_special_mask = True
            if simulator.surface is not None:
                self.spatial_mask, _, _ = self.backend.full_region_map(
                    simulator.surface, simulator.connectivity)
            else:
                conn = simulator.connectivity
                if self.default_mask == self.CORTICAL:
                    self.spatial_mask = self._support_bool_mask(conn.cortical)
                elif self.default_mask == self.HEMISPHERES:
                    self.spatial_mask = self._support_bool_mask(
                        conn.hemispheres)
                else:
                    msg = "Must fill either the Spatial Mask parameter or choose a Default Mask for non-surface" \
                          " simulations when using SpatioTemporal monitor!"
                    raise Exception(msg)

        number_of_nodes = simulator.number_of_nodes
        if self.spatial_mask.size != number_of_nodes:
            msg = "spatial_mask must be a vector of length number_of_nodes."
            raise Exception(msg)

        areas = numpy.unique(self.spatial_mask)
        number_of_areas = len(areas)
        if not numpy.all(areas == numpy.arange(number_of_areas)):
            msg = ("Areas in the spatial_mask must be specified as a "
                   "contiguous set of indices starting from zero.")
            raise Exception(msg)

        self.log.debug("spatial_mask")
        self.log.debug(narray_describe(self.spatial_mask))
        spatial_sum = numpy.zeros((number_of_nodes, number_of_areas))
        spatial_sum[numpy.arange(number_of_nodes), self.spatial_mask] = 1
        spatial_sum = spatial_sum.T
        self.log.debug("spatial_sum")
        self.log.debug(narray_describe(spatial_sum))
        nodes_per_area = numpy.sum(spatial_sum, axis=1)[:, numpy.newaxis]
        self.spatial_mean = spatial_sum / nodes_per_area
        self.log.debug("spatial_mean")
        self.log.debug(narray_describe(self.spatial_mean))
Exemple #8
0
 def _find_triangle_centres(self):
     """
     Calculate the location of the centre of all triangles comprising the mesh surface.
     """
     tri_verts = self.vertices[self.triangles, :]
     tri_centres = numpy.mean(tri_verts, axis=1)
     self.log.debug("tri_centres")
     self.log.debug(narray_describe(tri_centres))
     return tri_centres
Exemple #9
0
    def _find_triangle_areas(self):
        """Calculates the area of triangles making up a surface."""
        tri_u = self.vertices[self.triangles[:, 1], :] - self.vertices[self.triangles[:, 0], :]
        tri_v = self.vertices[self.triangles[:, 2], :] - self.vertices[self.triangles[:, 0], :]

        tri_norm = numpy.cross(tri_u, tri_v)
        triangle_areas = numpy.sqrt(numpy.sum(tri_norm ** 2, axis=1)) / 2.0
        triangle_areas = triangle_areas[:, numpy.newaxis]
        self.log.debug("triangle_areas")
        self.log.debug(narray_describe(triangle_areas))

        return triangle_areas
Exemple #10
0
    def compute_triangle_normals(self):
        """Calculates triangle normals."""
        tri_u = self.vertices[self.triangles[:, 1], :] - self.vertices[self.triangles[:, 0], :]
        tri_v = self.vertices[self.triangles[:, 2], :] - self.vertices[self.triangles[:, 0], :]

        tri_norm = numpy.cross(tri_u, tri_v)

        try:
            self.triangle_normals = tri_norm / numpy.sqrt(numpy.sum(tri_norm ** 2, axis=1))[:, numpy.newaxis]
        except FloatingPointError:
            # TODO: NaN generation would stop execution, however for normals this case could maybe be
            # handled in a better way.
            self.triangle_normals = tri_norm
        self.log.debug("triangle_normals")
        self.log.debug(narray_describe(self.triangle_normals))
Exemple #11
0
    def _find_triangle_angles(self):
        """
        Calculates the inner angles of all the triangles which make up a surface
        """
        def _angle(a, b):
            """ Angle between normalized vectors. <a|b> = cos(alpha)"""
            return numpy.arccos(numpy.sum(a * b, axis=1, keepdims=True))

        edges = self._normalized_edge_vectors()
        a0 = _angle(edges[:, 1, :], edges[:, 2, :])
        a1 = _angle(edges[:, 0, :], -edges[:, 1, :])
        a2 = 2 * numpy.pi - a0 - a1
        angles = numpy.hstack([a0, a1, a2])
        self.log.debug("triangle_angles")
        self.log.debug(narray_describe(angles))

        return angles
    def evaluate(self):
        """
        Calculate the FFT, Cross Coherence and Complex Coherence of time_series 
        broken into (possibly) epochs and segments of length `epoch_length` and 
        `segment_length` respectively, filtered by `window_function`.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        # self.time_series.trait["data"].log_debug(owner=cls_attr_name)
        tpts = self.time_series.data.shape[0]
        time_series_length = tpts * self.time_series.sample_period

        if len(self.time_series.data.shape) > 2:
            time_series_data = numpy.squeeze(
                (self.time_series.data.mean(axis=-1)).mean(axis=1))

        # Divide time-series into epochs, no overlapping
        if self.epoch_length > 0.0:
            nepochs = int(numpy.floor(time_series_length / self.epoch_length))
            epoch_tpts = int(self.epoch_length /
                             self.time_series.sample_period)
            time_series_length = self.epoch_length
            tpts = epoch_tpts
        else:
            self.epoch_length = time_series_length
            nepochs = int(numpy.ceil(time_series_length / self.epoch_length))

        # Segment time-series, overlapping if necessary
        nseg = int(numpy.floor(time_series_length / self.segment_length))
        if nseg > 1:
            seg_tpts = int(self.segment_length /
                           self.time_series.sample_period)
            seg_shift_tpts = int(self.segment_shift /
                                 self.time_series.sample_period)
            nseg = int(numpy.floor((tpts - seg_tpts) / seg_shift_tpts) + 1)
        else:
            self.segment_length = time_series_length
            seg_tpts = time_series_data.shape[0]

        # Frequency
        nfreq = int(
            numpy.min([
                self.max_freq,
                numpy.floor((seg_tpts + self.zeropad) / 2.0) + 1
            ]))

        result_shape, av_result_shape = self.result_shape(
            self.time_series.data.shape, self.max_freq, self.epoch_length,
            self.segment_length, self.segment_shift,
            self.time_series.sample_period, self.zeropad,
            self.average_segments)
        cs = numpy.zeros(result_shape, dtype=numpy.complex128)
        av = numpy.matrix(numpy.zeros(av_result_shape, dtype=numpy.complex128))
        coh = numpy.zeros(result_shape, dtype=numpy.complex128)

        # Apply windowing function
        if self.window_function is not None:
            if self.window_function not in SUPPORTED_WINDOWING_FUNCTIONS:
                self.log.error("Windowing function is: %s" %
                               self.window_function)
                self.log.error("Must be in: %s" %
                               str(SUPPORTED_WINDOWING_FUNCTIONS))

            window_function = eval("".join(("numpy.", self.window_function)))
            win = window_function(seg_tpts)
            window_mask = (numpy.kron(
                numpy.ones((time_series_data.shape[1], 1)), win)).T

        nave = 0

        for j in numpy.arange(nepochs):
            data = time_series_data[j * epoch_tpts:(j + 1) * epoch_tpts, :]

            for i in numpy.arange(nseg):  # average over all segments;
                time_series = data[i * seg_shift_tpts:i * seg_shift_tpts +
                                   seg_tpts, :]

                if self.detrend_ts:
                    time_series = sp_signal.detrend(time_series, axis=0)

                datalocfft = numpy.fft.fft(time_series * window_mask, axis=0)
                datalocfft = numpy.matrix(datalocfft)

                for f in numpy.arange(nfreq):  # for all frequencies
                    if self.npat == 1:
                        if not self.average_segments:
                            cs[:, :, f, i] += numpy.conjugate(
                                datalocfft[f, :].conj().T * datalocfft[f, :])
                            av[:, f,
                               i] += numpy.conjugate(datalocfft[f, :].conj().T)
                        else:
                            cs[:, :, f] += numpy.conjugate(
                                datalocfft[f, :].conj().T * datalocfft[f, :])
                            av[:,
                               f] += numpy.conjugate(datalocfft[f, :].conj().T)
                    else:
                        if not self.average_segments:
                            cs[:, :, f, j, i] = numpy.conjugate(
                                datalocfft[f, :].conj().T * datalocfft[f, :])
                            av[:, f, j,
                               i] = numpy.conjugate(datalocfft[f, :].conj().T)
                        else:
                            cs[:, :, f, j] += numpy.conjugate(
                                datalocfft[f, :].conj().T * datalocfft[f, :])
                            av[:, f,
                               j] += numpy.conjugate(datalocfft[f, :].conj().T)
                del datalocfft

            nave += 1.0

        # End of FORs
        if not self.average_segments:
            cs = cs / nave
            av = av / nave
        else:
            nave = nave * nseg
            cs = cs / nave
            av = av / nave

        # Subtract average
        for f in numpy.arange(nfreq):
            if self.subtract_epoch_average:
                if self.npat == 1:
                    if not self.average_segments:
                        for i in numpy.arange(nseg):
                            cs[:, :, f,
                               i] = cs[:, :, f,
                                       i] - av[:, f, i] * av[:, f, i].conj().T
                    else:
                        cs[:, :,
                           f] = cs[:, :, f] - av[:, f] * av[:, f].conj().T
                else:
                    if not self.average_segments:
                        for i in numpy.arange(nseg):
                            for j in numpy.arange(nepochs):
                                cs[:, :, f, j,
                                   i] = cs[:, :, f, j,
                                           i] - av[:, f, j, i] * av[:, f, j,
                                                                    i].conj().T

                    else:
                        for j in numpy.arange(nepochs):
                            cs[:, :, f,
                               j] = cs[:, :, f,
                                       j] - av[:, f, j] * av[:, f, j].conj().T

        # Compute Complex Coherence
        ndim = len(cs.shape)
        if ndim == 3:
            for i in numpy.arange(cs.shape[2]):
                temp = numpy.matrix(cs[:, :, i])
                coh[:, :, i] = cs[:, :, i] / numpy.sqrt(
                    temp.diagonal().conj().T * temp.diagonal())

        elif ndim == 4:
            for i in numpy.arange(cs.shape[2]):
                for j in numpy.arange(cs.shape[3]):
                    temp = numpy.matrix(numpy.squeeze(cs[:, :, i, j]))
                    coh[:, :, i, j] = temp / numpy.sqrt(
                        temp.diagonal().conj().T * temp.diagonal().T)

        self.log.debug("result")
        self.log.debug(narray_describe(cs))
        spectra = spectral.ComplexCoherenceSpectrum(
            source=self.time_series,
            array_data=coh,
            cross_spectrum=cs,
            epoch_length=self.epoch_length,
            segment_length=self.segment_length,
            windowing_function=self.window_function)
        return spectra
Exemple #13
0
def compute_continuous_wavelet_transform(time_series, frequencies,
                                         sample_period, q_ratio, normalisation,
                                         mother):
    """
    # type: (TimeSeries, Range, float, float, str, str)  -> WaveletCoefficients
    Calculate the continuous wavelet transform of time_series.

    Parameters
    __________

    time_series : TimeSeries
    The timeseries to which the wavelet is to be applied.

    frequencies : Range
    The frequency resolution and range returned. Requested frequencies
    are converted internally into appropriate scales.

    sample_period : float
    The sampling period of the computed wavelet spectrum.

    q_ratio : float
    NFC. Must be greater than 5. Ratios of the center frequencies to bandwidths.

    normalisation : str
    The type of normalisation for the resulting wavet spectrum. Default is 'energy', options are: 'energy'; 'gabor'.

    mother : str
    The mother wavelet function used in the transform.
    """
    ts_shape = time_series.data.shape

    if frequencies.step == 0:
        log.warning("Frequency step can't be 0! Trying default step, 2e-3.")
        frequencies.step = 0.002

    freqs = numpy.arange(frequencies.lo, frequencies.hi, frequencies.step)

    if (freqs.size == 0) or any(freqs <= 0.0):
        # TODO: Maybe should limit number of freqs... ~100 is probably a reasonable upper bound.
        log.warning("Invalid frequency range! Falling back to default.")
        log.debug("freqs")
        log.debug(narray_describe(freqs))
        frequencies = Range(lo=0.008, hi=0.060, step=0.002)
        freqs = numpy.arange(frequencies.lo, frequencies.hi, frequencies.step)

    log.debug("freqs")
    log.debug(narray_describe(freqs))

    sample_rate = time_series.sample_rate

    # Duke: code below is as given by Andreas Spiegler, I've just wrapped
    # some of the original argument names
    nf = len(freqs)
    temporal_step = max(
        (1,
         ReferenceBackend.iround(sample_period / time_series.sample_period)))
    nt = int(numpy.ceil(ts_shape[0] / temporal_step))

    if not isinstance(q_ratio, numpy.ndarray):
        new_q_ratio = q_ratio * numpy.ones((1, nf))

    if numpy.nanmin(new_q_ratio) < 5:
        msg = "q_ratio must be not lower than 5 !"
        log.error(msg)
        raise Exception(msg)

    if numpy.nanmax(freqs) > sample_rate / 2.0:
        msg = "Sampling rate is too low for the requested frequency range !"
        log.error(msg)
        raise Exception(msg)

    # TODO: This isn't used, but min frequency seems like it should be important... Check with A.S.
    #  fmin = 3.0 * numpy.nanmin(q_ratio) * sample_rate / numpy.pi / nt
    sigma_f = freqs / new_q_ratio
    sigma_t = 1.0 / (2.0 * numpy.pi * sigma_f)

    if normalisation == 'energy':
        Amp = 1.0 / numpy.sqrt(sample_rate * numpy.sqrt(numpy.pi) * sigma_t)
    elif normalisation == 'gabor':
        Amp = numpy.sqrt(2.0 / numpy.pi) / sample_rate / sigma_t

    coef_shape = (nf, nt, ts_shape[1], ts_shape[2], ts_shape[3])

    coef = numpy.zeros(coef_shape, dtype=numpy.complex128)
    log.debug("coef")
    log.debug(narray_describe(coef))

    scales = numpy.arange(0, nf, 1)
    for i in scales:
        f0 = freqs[i]
        SDt = sigma_t[(0, i)]
        A = Amp[(0, i)]
        x = numpy.arange(0, 4.0 * SDt * sample_rate, 1) / sample_rate
        wvlt = A * numpy.exp(-x**2 / (2.0 * SDt**2)) * numpy.exp(
            2j * numpy.pi * f0 * x)
        wvlt = numpy.hstack((numpy.conjugate(wvlt[-1:0:-1]), wvlt))
        # util.self.log_debug_array(self.log, wvlt, "wvlt")

        for var in range(ts_shape[1]):
            for node in range(ts_shape[2]):
                for mode in range(ts_shape[3]):
                    data = time_series.data[:, var, node, mode]
                    wt = signal.convolve(data, wvlt, 'same')
                    # util.self.log_debug_array(self.log, wt, "wt")
                    res = wt[0::temporal_step]
                    # NOTE: this is a horrible horrible quick hack (alas, a solution) to avoid broadcasting errors
                    # when using dt and sample periods which are not powers of 2.
                    coef[i, :, var, node,
                         mode] = res if len(res) == nt else res[:coef.shape[1]]

    log.debug("coef")
    log.debug(narray_describe(coef))

    spectra = spectral.WaveletCoefficients(source=time_series,
                                           mother=mother,
                                           sample_period=sample_period,
                                           frequencies=frequencies.to_array(),
                                           normalisation=normalisation,
                                           q_ratio=q_ratio,
                                           array_data=coef)

    return spectra
Exemple #14
0
    def evaluate(self):
        """
        Calculate the FFT of time_series broken into segments of length
        segment_length and filtered by window_function.
        """

        tpts = self.time_series.data.shape[0]
        time_series_length = tpts * self.time_series.sample_period

        # Segment time-series, overlapping if necessary
        nseg = int(numpy.ceil(time_series_length / self.segment_length))
        if nseg > 1:
            seg_tpts = numpy.ceil(self.segment_length /
                                  self.time_series.sample_period)
            overlap = (seg_tpts * nseg - tpts) / (nseg - 1.0)
            starts = [
                max(seg * (seg_tpts - overlap), 0) for seg in range(nseg)
            ]
            segments = [
                self.time_series.data[int(start):int(start) + int(seg_tpts)]
                for start in starts
            ]
            segments = [
                segment[:, :, :, :, numpy.newaxis] for segment in segments
            ]
            time_series = numpy.concatenate(segments, axis=4)
        else:
            self.segment_length = time_series_length
            time_series = self.time_series.data[:, :, :, :, numpy.newaxis]
            seg_tpts = time_series.shape[0]

        self.log.debug("Segment length being used is: %s" %
                       self.segment_length)

        # Base-line correct the segmented time-series
        if self.detrend:
            time_series = scipy.signal.detrend(time_series, axis=0)
            self.log.debug("time_series " + narray_describe(time_series))

        # Apply windowing function
        if self.window_function is not None:
            window_function = SUPPORTED_WINDOWING_FUNCTIONS[
                self.window_function]
            window_mask = numpy.reshape(window_function(int(seg_tpts)),
                                        (int(seg_tpts), 1, 1, 1, 1))
            time_series = time_series * window_mask

        # Calculate the FFT
        result = numpy.fft.fft(time_series, axis=0)
        nfreq = result.shape[0] // 2
        result = result[1:nfreq + 1, :]

        self.log.debug("result " + narray_describe(result))

        spectra = FourierSpectrum(source=self.time_series,
                                  segment_length=self.segment_length,
                                  array_data=result,
                                  windowing_function=self.window_function)
        spectra.configure()

        return spectra
Exemple #15
0
    def evaluate(self):
        """
        Calculate the continuous wavelet transform of time_series.
        """
        ts_shape = self.time_series.data.shape
        
        if self.frequencies.step == 0:
            self.log.warning("Frequency step can't be 0! Trying default step, 2e-3.")
            self.frequencies.step = 0.002
        
        freqs = numpy.arange(self.frequencies.lo, self.frequencies.hi,
                             self.frequencies.step)
        
        if (freqs.size == 0) or any(freqs <= 0.0):
            # TODO: Maybe should limit number of freqs... ~100 is probably a reasonable upper bound.
            self.log.warning("Invalid frequency range! Falling back to default.")
            self.log.debug("freqs")
            self.log.debug(narray_describe(freqs))
            self.frequencies = Range(lo=0.008, hi=0.060, step=0.002)
            freqs = numpy.arange(self.frequencies.lo, self.frequencies.hi,
                                 self.frequencies.step)

        self.log.debug("freqs")
        self.log.debug(narray_describe(freqs))

        sample_rate = self.time_series.sample_rate
        
        # Duke: code below is as given by Andreas Spiegler, I've just wrapped 
        # some of the original argument names
        nf = len(freqs)
        temporal_step = max((1, iround(self.sample_period / self.time_series.sample_period)))
        nt = int(numpy.ceil(ts_shape[0] /  temporal_step))
        
        if not isinstance(self.q_ratio, numpy.ndarray):
            q_ratio = self.q_ratio * numpy.ones((1, nf))
        
        if numpy.nanmin(q_ratio) < 5:
            msg = "q_ratio must be not lower than 5 !"
            self.log.error(msg)
            raise Exception(msg)
        
        if numpy.nanmax(freqs) > sample_rate / 2.0:
            msg = "Sampling rate is too low for the requested frequency range !"
            self.log.error(msg)
            raise Exception(msg)
        
        # TODO: This isn't used, but min frequency seems like it should be important... Check with A.S.
        #  fmin = 3.0 * numpy.nanmin(q_ratio) * sample_rate / numpy.pi / nt
        sigma_f = freqs / q_ratio
        sigma_t = 1.0 / (2.0 * numpy.pi * sigma_f)
        
        if self.normalisation == 'energy':
            Amp = 1.0 / numpy.sqrt(sample_rate * numpy.sqrt(numpy.pi) * sigma_t)
        elif self.normalisation == 'gabor': 
            Amp = numpy.sqrt(2.0 / numpy.pi) / sample_rate / sigma_t
        
        coef_shape = (nf, nt, ts_shape[1], ts_shape[2], ts_shape[3])
        
        coef = numpy.zeros(coef_shape, dtype = numpy.complex128)
        self.log.debug("coef")
        self.log.debug(narray_describe(coef))

        scales = numpy.arange(0, nf, 1)
        for i in scales:
            f0 = freqs[i]
            SDt = sigma_t[(0, i)]
            A = Amp[(0, i)]
            x = numpy.arange(0, 4.0 * SDt * sample_rate, 1) / sample_rate
            wvlt = A * numpy.exp(-x**2 / (2.0 * SDt**2) ) * numpy.exp(2j * numpy.pi * f0 * x )
            wvlt = numpy.hstack((numpy.conjugate(wvlt[-1:0:-1]), wvlt))
            #util.self.log_debug_array(self.log, wvlt, "wvlt")
            
            for var in range(ts_shape[1]):
                for node in range(ts_shape[2]):
                    for mode in range(ts_shape[3]):
                        data = self.time_series.data[:, var, node, mode]
                        wt = signal.convolve(data, wvlt, 'same')
                        #util.self.log_debug_array(self.log, wt, "wt")
                        res = wt[0::temporal_step]
                        # NOTE: this is a horrible horrible quick hack (alas, a solution) to avoid broadcasting errors
                        # when using dt and sample periods which are not powers of 2.
                        coef[i, :, var, node, mode] = res if len(res) == nt else res[:coef.shape[1]] 
                        

        self.log.debug("coef")
        self.log.debug(narray_describe(coef))

        spectra = spectral.WaveletCoefficients(
            source=self.time_series,
            mother=self.mother,
            sample_period=self.sample_period,
            frequencies=self.frequencies.to_array(),
            normalisation=self.normalisation,
            q_ratio=self.q_ratio,
            array_data=coef)
        
        return spectra