コード例 #1
0
    def compute_vertex_normals(self):
        """
        Estimates vertex normals, based on triangle normals weighted by the 
        angle they subtend at each vertex...
        """
        vert_norms = numpy.zeros((self.number_of_vertices, 3))
        for k in xrange(self.number_of_vertices):
            tri_list = list(self.vertex_triangles[k])
            angle_mask = self.triangles[tri_list, :] == k
            angles = self.triangle_angles[tri_list, :]
            angles = angles[angle_mask][:, numpy.newaxis]
            angle_scaling = angles / numpy.sum(angles, axis=0)
            vert_norms[k, :] = numpy.mean(angle_scaling *
                                          self.triangle_normals[tri_list, :],
                                          axis=0)
            #Scale by angle subtended.
            vert_norms[k, :] = vert_norms[k, :] / numpy.sqrt(
                numpy.sum(vert_norms[k, :]**2, axis=0))
            #Normalise to unit vectors.

        util.log_debug_array(LOG,
                             vert_norms,
                             "vertex_normals",
                             owner=self.__class__.__name__)
        self.vertex_normals = vert_norms
コード例 #2
0
    def _find_triangle_angles(self):
        """
        Calculates the inner angles of all the triangles which make up a surface
        """
        verts = self.vertices
        # TODO: Should be possible with arrays, ie not nested loops...
        # (this was a direct translation of some old matlab code)
        angles = numpy.zeros((self.number_of_triangles, 3))
        for tt in xrange(self.number_of_triangles):
            triangle = self.triangles[tt, :]
            for ta in xrange(3):
                ang = numpy.roll(triangle, -ta)
                angles[tt, ta] = numpy.arccos(
                    numpy.dot(
                        (verts[ang[1], :] - verts[ang[0], :]) / numpy.sqrt(
                            numpy.sum((verts[ang[1], :] - verts[ang[0], :])**2,
                                      axis=0)),
                        (verts[ang[2], :] - verts[ang[0], :]) / numpy.sqrt(
                            numpy.sum((verts[ang[2], :] - verts[ang[0], :])**2,
                                      axis=0))))

        util.log_debug_array(LOG,
                             angles,
                             "triangle_angles",
                             owner=self.__class__.__name__)
        return angles
コード例 #3
0
    def compute_region_orientation(self):
        """Update the region_orientation attribute."""
        regions = numpy.unique(self.region_mapping)
        average_orientation = numpy.zeros((len(regions), 3))
        if len(self.region_mapping) > len(self.vertex_normals):
            # Count how many vertices each region has.
            counter = collections.Counter(self.region_mapping)
            # Presumably non-cortical regions will have len 1 vertex assigned.
            vertices_per_region = numpy.asarray(counter.values())
            non_cortical_regions = numpy.where(vertices_per_region == 1)
            cortical_regions = numpy.where(vertices_per_region > 1)
            cortical_region_mapping = [
                x for x in self.region_mapping if x in cortical_regions[0]
            ]
            #Average orientation of the region
            for k in cortical_regions[0]:
                orient = self.vertex_normals[cortical_region_mapping == k, :]
                avg_orient = numpy.mean(orient, axis=0)
                average_orientation[k, :] = avg_orient / numpy.sqrt(
                    numpy.sum(avg_orient**2))
            for nk in non_cortical_regions[0]:
                average_orientation[nk, :] = numpy.zeros((1, 3))
        else:
            #Average orientation of the region
            for k in regions:
                orient = self.vertex_normals[self.region_mapping == k, :]
                avg_orient = numpy.mean(orient, axis=0)
                average_orientation[k, :] = avg_orient / numpy.sqrt(
                    numpy.sum(avg_orient**2))

        util.log_debug_array(LOG,
                             average_orientation,
                             "region_orientation",
                             owner=self.__class__.__name__)
        self.region_orientation = average_orientation
コード例 #4
0
ファイル: fcd_adapter.py プロジェクト: maedoc/tvb-framework
    def configure(self, time_series, sw, sp):
        """
        Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.

        :param time_series: the input time-series for which fcd matrix should be computed
        :param sw: length of the sliding window
        :param sp: spanning time: distance between two consecutive sliding window
        """
        """
        Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.
        """

        self.input_shape = time_series.read_data_shape()
        log_debug_array(self.log, time_series, "time_series")
        actual_sp = float(sp) / time_series.sample_period
        actual_sw = float(sw) / time_series.sample_period
        actual_ts_length = self.input_shape[0]

        if actual_sw >= actual_ts_length or actual_sp >= actual_ts_length or actual_sp >= actual_sw:
            raise LaunchException(
                "Spanning (Sp) and Sliding (Sw) window size parameters need to be less than the TS length, "
                "and Sp < Sw. After calibration with sampling period, current values are: Sp=%d, Sw=%d, Ts=%d). "
                "Please configure valid input parameters." % (actual_sp, actual_sw, actual_ts_length))

        # -------------------- Fill Algorithm for Analysis -------------------##
        self.algorithm = FcdCalculator(time_series=time_series, sw=sw, sp=sp)
コード例 #5
0
 def evaluate(self):
     """
     Compute the temporal covariance between nodes in the time_series.
     """
     cls_attr_name = self.__class__.__name__+".time_series"
     self.time_series.trait["data"].log_debug(owner = cls_attr_name)
     
     data_shape = self.time_series.data.shape
     
     #(nodes, nodes, state-variables, modes)
     result_shape = (data_shape[2], data_shape[2], data_shape[1], data_shape[3])
     LOG.info("result shape will be: %s" % str(result_shape))
     
     result = numpy.zeros(result_shape)
     
     #One inter-node temporal covariance matrix for each state-var & mode.
     for mode in range(data_shape[3]):
         for var in range(data_shape[1]):
             data = self.time_series.data[:, var, :, mode]
             data = data - data.mean(axis=0)[numpy.newaxis, 0]
             result[:, :, var, mode] = numpy.cov(data.T)
     
     util.log_debug_array(LOG, result, "result")
     
     covariance = graph.Covariance(source = self.time_series,
                                   array_data = result,
                                   use_storage = False)
     
     return covariance
コード例 #6
0
 def compute_vertex_normals(self):
     """
     Estimates vertex normals, based on triangle normals weighted by the 
     angle they subtend at each vertex...
     """
     vert_norms = numpy.zeros((self.number_of_vertices, 3))
     bad_normal_count = 0
     for k in xrange(self.number_of_vertices):
         try:
             tri_list = list(self.vertex_triangles[k])
             angle_mask = self.triangles[tri_list, :] == k
             angles = self.triangle_angles[tri_list, :]
             angles = angles[angle_mask][:, numpy.newaxis]
             angle_scaling = angles / numpy.sum(angles, axis=0)
             vert_norms[k, :] = numpy.mean(angle_scaling * self.triangle_normals[tri_list, :], axis=0)
             #Scale by angle subtended.
             vert_norms[k, :] = vert_norms[k, :] / numpy.sqrt(numpy.sum(vert_norms[k, :] ** 2, axis=0))
             #Normalise to unit vectors.
         except (ValueError, FloatingPointError):
             # If normals are bad, default to position vector
             # A nicer solution would be to detect degenerate triangles and ignore their
             # contribution to the vertex normal
             vert_norms[k, :] = self.vertices[k] / numpy.sqrt(self.vertices[k].dot(self.vertices[k]))
             bad_normal_count += 1
     if bad_normal_count:
         self.logger.warn(" %d vertices have bad normals" % bad_normal_count)
     util.log_debug_array(LOG, vert_norms, "vertex_normals", owner=self.__class__.__name__)
     self.vertex_normals = vert_norms
コード例 #7
0
    def evaluate(self):
        """
        Compute the temporal covariance between nodes in the time_series.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        data_shape = self.time_series.data.shape

        #(nodes, nodes, state-variables, modes)
        result_shape = (data_shape[2], data_shape[2], data_shape[1],
                        data_shape[3])
        LOG.info("result shape will be: %s" % str(result_shape))

        result = numpy.zeros(result_shape)

        #One inter-node temporal covariance matrix for each state-var & mode.
        for mode in range(data_shape[3]):
            for var in range(data_shape[1]):
                data = self.time_series.data[:, var, :, mode]
                data = data - data.mean(axis=0)[numpy.newaxis, 0]
                result[:, :, var, mode] = numpy.cov(data.T)

        util.log_debug_array(LOG, result, "result")

        covariance = graph.Covariance(source=self.time_series,
                                      array_data=result,
                                      use_storage=False)

        return covariance
コード例 #8
0
    def configure(self, time_series, sw, sp):
        """
        Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.

        :param time_series: the input time-series for which fcd matrix should be computed
        :param sw: length of the sliding window
        :param sp: spanning time: distance between two consecutive sliding window
        """
        """
        Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.
        """

        self.input_shape = time_series.read_data_shape()
        log_debug_array(self.log, time_series, "time_series")
        actual_sp = float(sp) / time_series.sample_period
        actual_sw = float(sw) / time_series.sample_period
        actual_ts_length = self.input_shape[0]

        if actual_sw >= actual_ts_length or actual_sp >= actual_ts_length or actual_sp >= actual_sw:
            raise LaunchException(
                "Spanning (Sp) and Sliding (Sw) window size parameters need to be less than the TS length, "
                "and Sp < Sw. After calibration with sampling period, current values are: Sp=%d, Sw=%d, Ts=%d). "
                "Please configure valid input parameters." %
                (actual_sp, actual_sw, actual_ts_length))

        # -------------------- Fill Algorithm for Analysis -------------------##
        self.algorithm = FcdCalculator(time_series=time_series, sw=sw, sp=sp)
コード例 #9
0
    def configure(self, time_series, dt=None, bold_model=None, RBM=None, neural_input_transformation=None):
        """
        Store the input shape to be later used to estimate memory usage. Also
        create the algorithm instance.
        """
        self.input_shape = time_series.read_data_shape()
        log_debug_array(LOG, time_series, "time_series")
        
        ##-------------------- Fill Algorithm for Analysis -------------------##
        algorithm = BalloonModel()
        
        if dt is not None:
            algorithm.dt = dt
        else:
            algorithm.dt = time_series.sample_period / 1000.

        if bold_model is not None:
            algorithm.bold_model = bold_model
        if RBM is not None:
            algorithm.RBM = RBM
        if neural_input_transformation is not None:
            algorithm.neural_input_transformation = neural_input_transformation
        
        self.algorithm = algorithm
        self.algorithm.time_series = time_series
コード例 #10
0
    def compute_region_areas(self):
        """Update the region_area attribute."""
        regions = numpy.unique(self.region_mapping)
        number_of_regions = len(regions)
        region_surface_area = numpy.zeros((number_of_regions, 1))
        avt = numpy.array(self.vertex_triangles)
        #NOTE: Slightly overestimates as it counts overlapping border triangles,
        #      but, not really a problem provided triangle-size << region-size.

        #NOTE: Check if there are non-cortical regions.

        if len(self.region_mapping) > len(self.vertex_normals):
            vertices_per_region = numpy.bincount(self.region_mapping)
            # Assume non-cortical regions will have len 1.
            non_cortical_regions, = numpy.where(vertices_per_region == 1)
            cortical_regions, = numpy.where(vertices_per_region > 1)
            #Average orientation of the region
            cortical_region_mapping = [x for x in self.region_mapping if x in cortical_regions]

            for nk in non_cortical_regions:
                region_surface_area[nk, :] = 0.0
            for k in cortical_regions:
                regs = map(set, avt[cortical_region_mapping == k])
                region_triangles = set.union(*regs)
                region_surface_area[k] = self.triangle_areas[list(region_triangles)].sum()
        else:
            for k in regions:
                regs = map(set, avt[self.region_mapping == k])
                region_triangles = set.union(*regs)
                region_surface_area[k] = self.triangle_areas[list(region_triangles)].sum()

        util.log_debug_array(LOG, region_surface_area, "region_areas", owner=self.__class__.__name__)
        self.region_areas = region_surface_area
コード例 #11
0
    def configure(self,
                  time_series,
                  dt=None,
                  bold_model=None,
                  RBM=None,
                  neural_input_transformation=None):
        """
        Store the input shape to be later used to estimate memory usage. Also
        create the algorithm instance.
        """
        self.input_shape = time_series.read_data_shape()
        log_debug_array(LOG, time_series, "time_series")

        ##-------------------- Fill Algorithm for Analysis -------------------##
        algorithm = BalloonModel()

        if dt is not None:
            algorithm.dt = dt
        else:
            algorithm.dt = time_series.sample_period / 1000.

        if bold_model is not None:
            algorithm.bold_model = bold_model
        if RBM is not None:
            algorithm.RBM = RBM
        if neural_input_transformation is not None:
            algorithm.neural_input_transformation = neural_input_transformation

        self.algorithm = algorithm
        self.algorithm.time_series = time_series
コード例 #12
0
 def configure(self, time_series, mother=None, sample_period=None, normalisation=None, q_ratio=None,
               frequencies='Range', frequencies_parameters=None):
     """
     Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.
     """
     self.input_shape = time_series.read_data_shape()
     log_debug_array(LOG, time_series, "time_series")
     
     ##-------------------- Fill Algorithm for Analysis -------------------##
     algorithm = ContinuousWaveletTransform()
     if mother is not None:
         algorithm.mother = mother
     
     if sample_period is not None:
         algorithm.sample_period = sample_period
     
     if (frequencies_parameters is not None and 'lo' in frequencies_parameters 
             and 'hi' in frequencies_parameters and frequencies_parameters['hi'] != frequencies_parameters['lo']):
         algorithm.frequencies = Range(**frequencies_parameters)
     
     if normalisation is not None:
         algorithm.normalisation = normalisation
     
     if q_ratio is not None:
         algorithm.q_ratio = q_ratio
     
     self.algorithm = algorithm
     self.algorithm.time_series = time_series
コード例 #13
0
    def _set_vertex_mapping(self, spatial_mask):
        """ 
        Set self._region_average attribute based on region mapping...
        If there are subcortical structures, the code below assumes that 
        the region mapping has a length number_of_nodes = number_of_vertices + number_of_non_cortical_areas

        """
        # number of nodes = number of vertices + non-cortical regions
        number_of_nodes = self.region_mapping.shape[0]   

        # number of areas = number of unique areas in the region mapping.
        # NOTE: Avoid using the index values in the spatial_mask in case we're dealing with only one hemisphere.
        number_of_areas = len(numpy.unique(spatial_mask))  
        vertex_mapping = numpy.zeros((number_of_nodes, number_of_areas))

        # If True, it means there are subcortical structures
        # TODO: try to remove 'collections'
        if number_of_nodes > self.number_of_vertices:
            counter = collections.Counter(self.region_mapping)
            vertices_per_region = numpy.asarray(counter.values())
            non_cortical_regions = numpy.where(vertices_per_region == 1)
            LOG.info("set vertex mapping: There are %d non-cortical regions" % len(non_cortical_regions[0]))
            cortical_regions = numpy.where(vertices_per_region > 1)
            cortical_region_mapping = [x for x in self.region_mapping if x in cortical_regions[0]]
            non_cortical_region_mapping = [x for x in self.region_mapping if x in non_cortical_regions[0]]
            vertex_mapping[numpy.arange(self.number_of_vertices), cortical_region_mapping] = 1
            # the rest 
            vertex_mapping[self.number_of_vertices:, non_cortical_region_mapping] = 1

        else:
            vertex_mapping[numpy.arange(number_of_nodes), spatial_mask] = 1
        self._vertex_mapping = vertex_mapping

        util.log_debug_array(LOG, self._vertex_mapping, "vertex_mapping", owner=self.__class__.__name__)
コード例 #14
0
 def _find_triangle_centres(self):
     """
     Calculate the location of the centre of all triangles comprising the mesh surface.
     """
     tri_verts = self.vertices[self.triangles, :]
     tri_centres = numpy.mean(tri_verts, axis=1)
     util.log_debug_array(LOG, tri_centres, "tri_centres")
     return tri_centres
コード例 #15
0
 def frequency(self):
     """ Frequencies represented by the wavelet spectrogram."""
     if self._frequency is None:
         self._frequency = numpy.arange(self.frequencies.lo,
                                        self.frequencies.hi,
                                        self.frequencies.step)
         util.log_debug_array(LOG, self._frequency, "frequency")
     return self._frequency
コード例 #16
0
 def frequency(self):
     """ Frequencies represented in the Complex Coherence Spectrum."""
     if self._frequency is None:
         self._frequency = numpy.arange(self.freq_step,
                                        self.max_freq + self.freq_step,
                                        self.freq_step)
     util.log_debug_array(LOG, self._frequency, "frequency")
     return self._frequency
コード例 #17
0
 def frequency(self):
     """ Frequencies represented in the Complex Coherence Spectrum."""
     if self._frequency is None:
         self._frequency = numpy.arange(self.freq_step, 
                                        self.max_freq + self.freq_step,
                                        self.freq_step)
     util.log_debug_array(LOG, self._frequency, "frequency")
     return self._frequency
コード例 #18
0
 def frequency(self):
     """ Frequencies represented by the wavelet spectrogram."""
     if self._frequency is None:
         self._frequency = numpy.arange(self.frequencies.lo, 
                                        self.frequencies.hi, 
                                        self.frequencies.step)
         util.log_debug_array(LOG, self._frequency, "frequency")
     return self._frequency
コード例 #19
0
 def _find_triangle_centres(self):
     """
     Calculate the location of the centre of all triangles comprising the mesh surface.
     """
     tri_verts = self.vertices[self.triangles, :]
     tri_centres = numpy.mean(tri_verts, axis=1)
     util.log_debug_array(LOG, tri_centres, "tri_centres")
     return tri_centres
コード例 #20
0
 def configure(self, time_series):
     """
     Store the input shape to be later used to estimate memory usage. Also
     create the algorithm instance.
     """
     self.input_shape = time_series.read_data_shape()
     log_debug_array(LOG, time_series, "time_series")
     ##-------------------- Fill Algorithm for Analysis -------------------##
     self.algorithm = PCA()
コード例 #21
0
 def configure(self, time_series):
     """
     Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.
     """
     self.input_shape = time_series.read_data_shape()
     log_debug_array(LOG, time_series, "time_series")
     
     ##-------------------- Fill Algorithm for Analysis -------------------##
     self.algorithm = NodeCovariance()
コード例 #22
0
 def evaluate(self):
     """
     Calculate the FFT of time_series broken into segments of length
     segment_length and filtered by window_function.
     """
     cls_attr_name = self.__class__.__name__+".time_series"
     self.time_series.trait["data"].log_debug(owner = cls_attr_name)
     
     tpts = self.time_series.data.shape[0]
     time_series_length = tpts * self.time_series.sample_period
     
     #Segment time-series, overlapping if necessary
     nseg = int(numpy.ceil(time_series_length / self.segment_length))
     if nseg > 1:
         seg_tpts = self.segment_length / self.time_series.sample_period
         overlap = ((seg_tpts * nseg) - tpts) / (nseg-1)
         starts = [max(seg*(seg_tpts - overlap), 0) for seg in range(nseg)]
         segments =  [self.time_series.data[start:start+seg_tpts]
                      for start in starts]
         segments = [segment[:, :, :, numpy.newaxis] for segment in segments]
         time_series = numpy.concatenate(segments, axis=4)
     else:
         self.segment_length = time_series_length
         time_series = self.time_series.data[:, :, :, numpy.newaxis]
         seg_tpts = time_series.shape[0]
     
     LOG.debug("Segment length being used is: %s" % self.segment_length)
     
     #Base-line correct the segmented time-series  
     time_series = sp_signal.detrend(time_series, axis=0)
     util.log_debug_array(LOG, time_series, "time_series")
     
     #Apply windowing function
     if self.window_function is not None:
         if self.window_function not in SUPPORTED_WINDOWING_FUNCTIONS:
             LOG.error("Windowing function is: %s" % self.window_function)
             LOG.error("Must be in: %s" % str(SUPPORTED_WINDOWING_FUNCTIONS))
         
         window_function = eval("".join(("numpy.", self.window_function)))
         window_mask = numpy.reshape(window_function(seg_tpts), 
                                     (seg_tpts, 1, 1, 1, 1))
         time_series = time_series * window_mask
     
     #Calculate the FFT
     result =  numpy.fft.fft(time_series, axis=0)
     nfreq = result.shape[0] / 2
     result = result[1:nfreq+1, :]
     util.log_debug_array(LOG, result, "result")
     
     spectra = spectral.FourierSpectrum(source = self.time_series, 
                               segment_length = self.segment_length,
                               window_function = self.window_function,
                               array_data = result,
                               use_storage = False)
     
     return spectra
コード例 #23
0
    def _set_region_sum(self, spatial_mask):
        """ 
         Set self._region_average attribute based on region mapping...
        """

        self.vertex_mapping = spatial_mask

        self._region_sum = self.vertex_mapping.T

        util.log_debug_array(LOG, self._region_sum, "region_sum", owner=self.__class__.__name__)
コード例 #24
0
    def _set_region_sum(self, spatial_mask):
        """ 
         Set self._region_average attribute based on region mapping...
        """

        self.vertex_mapping = spatial_mask

        self._region_sum = self.vertex_mapping.T

        util.log_debug_array(LOG, self._region_sum, "region_sum", owner=self.__class__.__name__)
コード例 #25
0
    def evaluate(self):
        """
        Calculate the FFT of time_series broken into segments of length
        segment_length and filtered by window_function.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)
        
        tpts = self.time_series.data.shape[0]
        time_series_length = tpts * self.time_series.sample_period
        
        #Segment time-series, overlapping if necessary
        nseg = int(numpy.ceil(time_series_length / self.segment_length))
        if nseg > 1:
            seg_tpts = numpy.ceil(self.segment_length / self.time_series.sample_period)
            overlap = (seg_tpts * nseg - tpts) / (nseg - 1.0)
            starts = [max(seg * (seg_tpts - overlap), 0) for seg in range(nseg)]
            segments = [self.time_series.data[start:start + seg_tpts]
                        for start in starts]
            segments = [segment[:, :, :, numpy.newaxis] for segment in segments]
            time_series = numpy.concatenate(segments, axis=4)
        else:
            self.segment_length = time_series_length
            time_series = self.time_series.data[:, :, :, numpy.newaxis]
            seg_tpts = time_series.shape[0]
        
        LOG.debug("Segment length being used is: %s" % self.segment_length)
        
        #Base-line correct the segmented time-series  
        time_series = sp_signal.detrend(time_series, axis=0)
        util.log_debug_array(LOG, time_series, "time_series")
        
        #Apply windowing function
        if self.window_function is not None and self.window_function != [None]:
            if self.window_function not in SUPPORTED_WINDOWING_FUNCTIONS:
                LOG.error("Windowing function is: %s" % self.window_function)
                LOG.error("Must be in: %s" % str(SUPPORTED_WINDOWING_FUNCTIONS))
            else:
                window_function = eval("".join(("numpy.", self.window_function[0])))
                window_mask = numpy.reshape(window_function(seg_tpts),
                                            (seg_tpts, 1, 1, 1, 1))
                time_series = time_series * window_mask

        #Calculate the FFT
        result = numpy.fft.fft(time_series, axis=0)
        nfreq = result.shape[0] / 2
        result = result[1:nfreq + 1, :]
        util.log_debug_array(LOG, result, "result")

        spectra = spectral.FourierSpectrum(source=self.time_series,
                                           segment_length=self.segment_length,
                                           array_data=result,
                                           use_storage=False)

        return spectra
コード例 #26
0
    def configure(self, time_series):
        """
        Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.

        :param time_series: the input time-series for which cross correlation should be computed
        """
        self.input_shape = time_series.read_data_shape()
        log_debug_array(LOG, time_series, "time_series")
        
        ##-------------------- Fill Algorithm for Analysis -------------------##
        self.algorithm = CrossCorrelate()
コード例 #27
0
    def _find_triangle_areas(self):
        """Calculates the area of triangles making up a surface."""
        tri_u = self.vertices[self.triangles[:, 1], :] - self.vertices[self.triangles[:, 0], :]
        tri_v = self.vertices[self.triangles[:, 2], :] - self.vertices[self.triangles[:, 0], :]

        tri_norm = numpy.cross(tri_u, tri_v)
        triangle_areas = numpy.sqrt(numpy.sum(tri_norm ** 2, axis=1)) / 2.0
        triangle_areas = triangle_areas[:, numpy.newaxis]
        util.log_debug_array(LOG, triangle_areas, "triangle_areas", owner=self.__class__.__name__)

        return triangle_areas
コード例 #28
0
    def _find_triangle_areas(self):
        """Calculates the area of triangles making up a surface."""
        tri_u = self.vertices[self.triangles[:, 1], :] - self.vertices[self.triangles[:, 0], :]
        tri_v = self.vertices[self.triangles[:, 2], :] - self.vertices[self.triangles[:, 0], :]

        tri_norm = numpy.cross(tri_u, tri_v)
        triangle_areas = numpy.sqrt(numpy.sum(tri_norm ** 2, axis=1)) / 2.0
        triangle_areas = triangle_areas[:, numpy.newaxis]
        util.log_debug_array(LOG, triangle_areas, "triangle_areas", owner=self.__class__.__name__)

        return triangle_areas
コード例 #29
0
    def _set_region_average(self, spatial_mask):
        """ 
        .d..
        """

        self.region_sum = spatial_mask

        nodes_per_area = numpy.sum(self.region_sum, axis=1)[:, numpy.newaxis]
        self._region_average = self.region_sum / nodes_per_area
        #import pdb; pdb.set_trace()

        util.log_debug_array(LOG, self._region_average, "region_average", owner=self.__class__.__name__)
コード例 #30
0
 def configure(self, time_series, nfft=None):
     """
     Store the input shape to be later used to estimate memory usage.
     Also create the algorithm instance.
     """
     self.input_shape = time_series.read_data_shape()
     log_debug_array(LOG, time_series, "time_series")
     
     ##-------------------- Fill Algorithm for Analysis -------------------##
     self.algorithm = NodeCoherence()
     if nfft is not None:
         self.algorithm.nfft = nfft
コード例 #31
0
    def _set_region_average(self, spatial_mask):
        """ 
        .d..
        """

        self.region_sum = spatial_mask

        nodes_per_area = numpy.sum(self.region_sum, axis=1)[:, numpy.newaxis]
        self._region_average = self.region_sum / nodes_per_area
        #import pdb; pdb.set_trace()

        util.log_debug_array(LOG, self._region_average, "region_average", owner=self.__class__.__name__)
コード例 #32
0
    def launch(self, time_series, algorithms=None):
        """ 
        Launch algorithm and build results.

        :param time_series: the time series on which the algorithms are run
        :param algorithms:  the algorithms to be run for computing measures on the time series
        :type  algorithms:  any subclass of BaseTimeseriesMetricAlgorithm (KuramotoIndex, \
                    GlobalVariance, VarianceNodeVariance)
        :rtype: `DatatypeMeasure`
        """
        if algorithms is None:
            algorithms = self.available_algorithms.keys()
        shape = time_series.read_data_shape()
        log_debug_array(LOG, time_series, "time_series")

        metrics_results = {}
        for algorithm_name in algorithms:
            ##------------- NOTE: Assumes 4D, Simulator timeSeries. --------------##
            node_slice = [
                slice(shape[0]),
                slice(shape[1]),
                slice(shape[2]),
                slice(shape[3])
            ]

            ##---------- Iterate over slices and compose final result ------------##
            unstored_ts = TimeSeries(use_storage=False)

            unstored_ts.data = time_series.read_data_slice(tuple(node_slice))

            ##-------------------- Fill Algorithm for Analysis -------------------##
            algorithm = self.available_algorithms[algorithm_name](
                time_series=unstored_ts)
            ## Validate that current algorithm's filter is valid.
            if (algorithm.accept_filter is not None and not algorithm.
                    accept_filter.get_python_filter_equivalent(time_series)):
                LOG.warning(
                    'Measure algorithm will not be computed because of incompatibility on input. '
                    'Filters failed on algo: ' + str(algorithm_name))
                continue
            else:
                LOG.debug("Applying measure: " + str(algorithm_name))

            unstored_result = algorithm.evaluate()
            ##----------------- Prepare a Float object for result ----------------##
            metrics_results[algorithm_name] = unstored_result

        result = DatatypeMeasure(analyzed_datatype=time_series,
                                 storage_path=self.storage_path,
                                 data_name=self._ui_name,
                                 metrics=metrics_results)
        return result
コード例 #33
0
    def launch(self, time_series, algorithms=None, start_point=None, segment=None):
        """ 
        Launch algorithm and build results.

        :param time_series: the time series on which the algorithms are run
        :param algorithms:  the algorithms to be run for computing measures on the time series
        :type  algorithms:  any subclass of BaseTimeseriesMetricAlgorithm
                            (KuramotoIndex, GlobalVariance, VarianceNodeVariance)
        :rtype: `DatatypeMeasure`
        """
        if algorithms is None:
            algorithms = self.available_algorithms.keys()

        shape = time_series.read_data_shape()
        log_debug_array(LOG, time_series, "time_series")

        metrics_results = {}
        for algorithm_name in algorithms:
            ##------------- NOTE: Assumes 4D, Simulator timeSeries. --------------##
            node_slice = [slice(shape[0]), slice(shape[1]), slice(shape[2]), slice(shape[3])]

            ##---------- Iterate over slices and compose final result ------------##
            unstored_ts = TimeSeries(use_storage=False)

            unstored_ts.data = time_series.read_data_slice(tuple(node_slice))

            ##-------------------- Fill Algorithm for Analysis -------------------##
            algorithm = self.available_algorithms[algorithm_name](time_series=unstored_ts)
            if segment is not None:
                algorithm.segment = segment
            if start_point is not None:
                algorithm.start_point = start_point

            ## Validate that current algorithm's filter is valid.
            if (algorithm.accept_filter is not None and
                    not algorithm.accept_filter.get_python_filter_equivalent(time_series)):
                LOG.warning('Measure algorithm will not be computed because of incompatibility on input. '
                            'Filters failed on algo: ' + str(algorithm_name))
                continue
            else:
                LOG.debug("Applying measure: " + str(algorithm_name))

            unstored_result = algorithm.evaluate()
            ##----------------- Prepare a Float object(s) for result ----------------##
            if isinstance(unstored_result, dict):
                metrics_results.update(unstored_result)
            else:
                metrics_results[algorithm_name] = unstored_result

        result = DatatypeMeasure(analyzed_datatype=time_series, storage_path=self.storage_path,
                                 data_name=self._ui_name, metrics=metrics_results)
        return result
コード例 #34
0
    def compute_region_orientation(self):
        """
        """
        regions = numpy.unique(self.region_mapping)
        average_orientation = numpy.zeros((len(regions), 3))
        #Average orientation of the region
        for k in regions:
            orient = self.vertex_normals[self.region_mapping == k, :]
            avg_orient = numpy.mean(orient, axis=0)
            average_orientation[k, :] = avg_orient / numpy.sqrt(numpy.sum(avg_orient ** 2))

        util.log_debug_array(LOG, average_orientation, "region_orientation", owner=self.__class__.__name__)
        self.region_orientation = average_orientation
コード例 #35
0
    def _set_vertex_mapping(self, spatial_mask):
        """ 
        Set self._region_average attribute based on region mapping...
        """
        number_of_nodes = self.region_mapping.shape[0]  # TODO: need to support non-cortical regions here
        number_of_areas = len(numpy.unique(spatial_mask))  # TODO: need to support non-cortical regions here
        #import pdb; pdb.set_trace()
        vertex_mapping = numpy.zeros((number_of_nodes, number_of_areas))
        vertex_mapping[numpy.arange(number_of_nodes), spatial_mask] = 1

        self._vertex_mapping = vertex_mapping

        util.log_debug_array(LOG, self._vertex_mapping, "vertex_mapping", owner=self.__class__.__name__)
コード例 #36
0
 def configure_space(self, distance):
     """
     Stores the distance vector as an attribute of the spatiotemporal pattern
     and uses it to generate the spatial pattern vector.
     
     Depending on equations used and interpretation distance can be an actual
     physical distance, on a surface,  geodesic distance (along the surface) 
     away for some focal point, or a per node weighting...
     """
     util.log_debug_array(LOG, distance, "distance")
     #Set the discrete representation of space.
     self.space = distance
     self.spatial_pattern = self.space
コード例 #37
0
    def _set_vertex_mapping(self, spatial_mask):
        """ 
        Set self._region_average attribute based on region mapping...
        """
        number_of_nodes = self.region_mapping.shape[0]  # TODO: need to support non-cortical regions here
        number_of_areas = len(numpy.unique(spatial_mask))  # TODO: need to support non-cortical regions here
        #import pdb; pdb.set_trace()
        vertex_mapping = numpy.zeros((number_of_nodes, number_of_areas))
        vertex_mapping[numpy.arange(number_of_nodes), spatial_mask] = 1

        self._vertex_mapping = vertex_mapping

        util.log_debug_array(LOG, self._vertex_mapping, "vertex_mapping", owner=self.__class__.__name__)
コード例 #38
0
 def configure_space(self, distance):
     """
     Stores the distance vector as an attribute of the spatiotemporal pattern
     and uses it to generate the spatial pattern vector.
     
     Depending on equations used and interpretation distance can be an actual
     physical distance, on a surface,  geodesic distance (along the surface) 
     away for some focal point, or a per node weighting...
     """
     util.log_debug_array(LOG, distance, "distance")
     #Set the discrete representation of space.
     self.space = distance
     self.spatial_pattern = self.space
コード例 #39
0
    def compute_region_orientation(self):
        """
        """
        regions = numpy.unique(self.region_mapping)
        average_orientation = numpy.zeros((len(regions), 3))
        #Average orientation of the region
        for k in regions:
            orient = self.vertex_normals[self.region_mapping == k, :]
            avg_orient = numpy.mean(orient, axis=0)
            average_orientation[k, :] = avg_orient / numpy.sqrt(numpy.sum(avg_orient ** 2))

        util.log_debug_array(LOG, average_orientation, "region_orientation", owner=self.__class__.__name__)
        self.region_orientation = average_orientation
コード例 #40
0
    def compute_triangle_normals(self):
        """Calculates triangle normals."""
        tri_u = self.vertices[self.triangles[:, 1], :] - self.vertices[self.triangles[:, 0], :]
        tri_v = self.vertices[self.triangles[:, 2], :] - self.vertices[self.triangles[:, 0], :]

        tri_norm = numpy.cross(tri_u, tri_v)

        try:
            self.triangle_normals = tri_norm / numpy.sqrt(numpy.sum(tri_norm ** 2, axis=1))[:, numpy.newaxis]
        except FloatingPointError:
            #TODO: NaN generation would stop execution, however for normals this case could maybe be 
            # handled in a better way.
            self.triangle_normals = tri_norm
        util.log_debug_array(LOG, self.triangle_normals, "triangle_normals", owner=self.__class__.__name__)
コード例 #41
0
    def compute_triangle_normals(self):
        """Calculates triangle normals."""
        tri_u = self.vertices[self.triangles[:, 1], :] - self.vertices[self.triangles[:, 0], :]
        tri_v = self.vertices[self.triangles[:, 2], :] - self.vertices[self.triangles[:, 0], :]

        tri_norm = numpy.cross(tri_u, tri_v)

        try:
            self.triangle_normals = tri_norm / numpy.sqrt(numpy.sum(tri_norm ** 2, axis=1))[:, numpy.newaxis]
        except FloatingPointError:
            #TODO: NaN generation would stop execution, however for normals this case could maybe be 
            # handled in a better way.
            self.triangle_normals = tri_norm
        util.log_debug_array(LOG, self.triangle_normals, "triangle_normals", owner=self.__class__.__name__)
コード例 #42
0
ファイル: node_coherence.py プロジェクト: virati/tvb-library
 def evaluate(self):
     "Evaluate coherence on time series."
     cls_attr_name = self.__class__.__name__ + ".time_series"
     self.time_series.trait["data"].log_debug(owner=cls_attr_name)
     srate = self.time_series.sample_rate
     coh, freq = coherence(self.time_series.data, srate, nfft=self.nfft)
     util.log_debug_array(LOG, coh, "coherence")
     util.log_debug_array(LOG, freq, "freq")
     spec = spectral.CoherenceSpectrum(source=self.time_series,
                                       nfft=self.nfft,
                                       array_data=coh,
                                       frequency=freq,
                                       use_storage=False)
     return spec
コード例 #43
0
    def compute_region_areas(self):
        """
        """
        regions = numpy.unique(self.region_mapping)
        region_surface_area = numpy.zeros((len(regions), 1))
        avt = numpy.array(self.vertex_triangles)
        #NOTE: Slightly overestimates as it counts overlapping border triangles,
        #      but, not really a problem provided triangle-size << region-size.
        for k in regions:
            regs = map(set, avt[self.region_mapping == k])
            region_triangles = set.union(*regs)
            region_surface_area[k] = self.triangle_areas[list(region_triangles)].sum()

        util.log_debug_array(LOG, region_surface_area, "region_areas", owner=self.__class__.__name__)
        self.region_areas = region_surface_area
コード例 #44
0
    def compute_region_areas(self):
        """
        """
        regions = numpy.unique(self.region_mapping)
        region_surface_area = numpy.zeros((len(regions), 1))
        avt = numpy.array(self.vertex_triangles)
        #NOTE: Slightly overestimates as it counts overlapping border triangles,
        #      but, not really a problem provided triangle-size << region-size.
        for k in regions:
            regs = map(set, avt[self.region_mapping == k])
            region_triangles = set.union(*regs)
            region_surface_area[k] = self.triangle_areas[list(region_triangles)].sum()

        util.log_debug_array(LOG, region_surface_area, "region_areas", owner=self.__class__.__name__)
        self.region_areas = region_surface_area
コード例 #45
0
    def configure(self, time_series, n_components=None):
        """
        Store the input shape to be later used to estimate memory usage. Also
        create the algorithm instance.
        """
        self.input_shape = time_series.read_data_shape()
        log_debug_array(LOG, time_series, "time_series")

        ##-------------------- Fill Algorithm for Analysis -------------------##
        algorithm = fastICA()
        if n_components is not None:
            algorithm.n_components = n_components
        else:
            ## It will only work for Simulator results.
            algorithm.n_components = self.input_shape[2]
        self.algorithm = algorithm
コード例 #46
0
 def configure(self, time_series, n_components=None):
     """
     Store the input shape to be later used to estimate memory usage. Also
     create the algorithm instance.
     """
     self.input_shape = time_series.read_data_shape()
     log_debug_array(LOG, time_series, "time_series")
     
     ##-------------------- Fill Algorithm for Analysis -------------------##
     algorithm = fastICA()
     if n_components is not None:
         algorithm.n_components = n_components
     else:
         ## It will only work for Simulator results.
         algorithm.n_components = self.input_shape[2]
     self.algorithm = algorithm
コード例 #47
0
    def configure(self, time_series, t_start, t_end):
        """
        Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.

        :param time_series: the input time-series for which correlation coefficient should be computed
        :param t_start: the physical time interval start for the analysis
        :param t_end: physical time, interval end
        """
        if t_start >= t_end or t_start < 0:
            raise LaunchException("Can not launch operation without monitors selected !!!")

        shape_tuple = time_series.read_data_shape()
        self.input_shape = [shape_tuple[0], shape_tuple[1], shape_tuple[2], shape_tuple[3]]
        self.input_shape[0] = int((t_end - t_start) / time_series.sample_period)
        log_debug_array(LOG, time_series, "time_series")

        self.algorithm = CorrelationCoefficient(time_series=time_series, t_start=t_start, t_end=t_end)
コード例 #48
0
ファイル: ica_adapter.py プロジェクト: wvangeit/framework_tvb
 def configure(self, time_series, n_components=None):
     """
     Store the input shape to be later used to estimate memory usage. Also
     create the algorithm instance.
     """
     self.input_shape = time_series.read_data_shape()
     log_debug_array(LOG, time_series, "time_series")
     
     ##-------------------- Fill Algorithm for Analysis -------------------##
     algorithm = fastICA()
     if n_components is not None:
         algorithm.n_components = n_components
     else:
         ## TODO LD: Is this correct? when no number is specified, the number of nodes is taken
         ## It will only work for Simulator results. SK: Correct, the same thing is already done in the Analyser.
         algorithm.n_components = self.input_shape[2]
     self.algorithm = algorithm
コード例 #49
0
    def evaluate(self):
        """ 
        Coherence function.  Matplotlib.mlab implementation.
        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        data_shape = self.time_series.data.shape

        #(frequency, nodes, nodes, state-variables, modes)
        result_shape = (self.nfft / 2 + 1, data_shape[2], data_shape[2],
                        data_shape[1], data_shape[3])
        LOG.info("result shape will be: %s" % str(result_shape))

        result = numpy.zeros(result_shape)

        #TODO: For region level, 4s, 2000Hz, this takes ~2min... (which is stupidly slow)
        #One inter-node coherence, across frequencies for each state-var & mode.
        for mode in range(data_shape[3]):
            for var in range(data_shape[1]):
                data = self.time_series.data[:, var, :, mode]
                data = data - data.mean(axis=0)[numpy.newaxis, :]
                #TODO: Work out a way around the 4 level loop,
                #TODO: coherence isn't directional, so, get rid of redundancy...
                for n1 in range(data_shape[2]):
                    for n2 in range(data_shape[2]):
                        cxy, freq = mlab.cohere(
                            data[:, n1],
                            data[:, n2],
                            NFFT=self.nfft,
                            Fs=self.time_series.sample_rate,
                            detrend=detrend_linear,
                            window=mlab.window_none)
                        result[:, n1, n2, var, mode] = cxy

        util.log_debug_array(LOG, result, "result")
        util.log_debug_array(LOG, freq, "freq")

        coherence = spectral.CoherenceSpectrum(source=self.time_series,
                                               nfft=self.nfft,
                                               array_data=result,
                                               frequency=freq,
                                               use_storage=False)

        return coherence
コード例 #50
0
    def configure(self, time_series, sw, sp):
        """
        Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.

        :param time_series: the input time-series for which fcd matrix should be computed
        :param sw: length of the sliding window
        :param sp: spanning time: distance between two consecutive sliding window
        """
        """
        Store the input shape to be later used to estimate memory usage. Also create the algorithm instance.
        """

        self.input_shape = time_series.read_data_shape()
        log_debug_array(self.log, time_series, "time_series")

        ##-------------------- Fill Algorithm for Analysis -------------------##

        self.algorithm = FcdCalculator(time_series=time_series, sw=sw, sp=sp)
コード例 #51
0
    def compute_vertex_normals(self):
        """
        Estimates vertex normals, based on triangle normals weighted by the 
        angle they subtend at each vertex...
        """
        vert_norms = numpy.zeros((self.number_of_vertices, 3))
        for k in range(self.number_of_vertices):
            tri_list = list(self.vertex_triangles[k])
            angle_mask = self.triangles[tri_list, :] == k
            angles = self.triangle_angles[tri_list, :]
            angles = angles[angle_mask][:, numpy.newaxis]
            angle_scaling = angles / numpy.sum(angles, axis=0)
            vert_norms[k, :] = numpy.mean(angle_scaling * self.triangle_normals[tri_list, :], axis=0)
            #Scale by angle subtended.
            vert_norms[k, :] = vert_norms[k, :] / numpy.sqrt(numpy.sum(vert_norms[k, :] ** 2, axis=0))
            #Normalise to unit vectors.

        util.log_debug_array(LOG, vert_norms, "vertex_normals", owner=self.__class__.__name__)
        self.vertex_normals = vert_norms
コード例 #52
0
 def evaluate(self):
     """
     Compute the temporal covariance between nodes in the time_series. 
     """
     cls_attr_name = self.__class__.__name__+".time_series"
     self.time_series.trait["data"].log_debug(owner = cls_attr_name)
     
     ts_shape = self.time_series.data.shape
     
     #Need more measurements than variables
     if ts_shape[0] < ts_shape[2]:
         msg = "PCA requires a longer timeseries (tpts > number of nodes)."
         LOG.error(msg)
         raise Exception, msg
     
     #(nodes, nodes, state-variables, modes)
     weights_shape = (ts_shape[2], ts_shape[2], ts_shape[1], ts_shape[3])
     LOG.info("weights shape will be: %s" % str(weights_shape))
     
     fractions_shape = (ts_shape[2], ts_shape[1], ts_shape[3])
     LOG.info("fractions shape will be: %s" % str(fractions_shape))
     
     weights = numpy.zeros(weights_shape)
     fractions = numpy.zeros(fractions_shape)
     
     #One inter-node temporal covariance matrix for each state-var & mode.
     for mode in range(ts_shape[3]):
         for var in range(ts_shape[1]):
             data = self.time_series.data[:, var, :, mode]
             data_pca = mlab.PCA(data)
             fractions[:, var, mode ] = data_pca.fracs
             weights[:, :, var, mode] = data_pca.Wt
     
     util.log_debug_array(LOG, fractions, "fractions")
     util.log_debug_array(LOG, weights, "weights")
     
     pca_result = mode_decompositions.PrincipalComponents(
         source = self.time_series,
         fractions = fractions,
         weights = weights,
         use_storage = False)
     
     return pca_result
コード例 #53
0
 def evaluate(self):
     """ 
     Coherence function.  Matplotlib.mlab implementation.
     """
     cls_attr_name = self.__class__.__name__+".time_series"
     self.time_series.trait["data"].log_debug(owner = cls_attr_name)
     
     data_shape = self.time_series.data.shape
     
     #(frequency, nodes, nodes, state-variables, modes)
     result_shape = (self.nfft/2 + 1, data_shape[2], data_shape[2], data_shape[1], data_shape[3])
     LOG.info("result shape will be: %s" % str(result_shape))
     
     result = numpy.zeros(result_shape)
     
     #TODO: For region level, 4s, 2000Hz, this takes ~2min... (which is stupidly slow) 
     #One inter-node coherence, across frequencies for each state-var & mode.
     for mode in range(data_shape[3]):
         for var in range(data_shape[1]):
             data = self.time_series.data[:, var, :, mode]
             data = data - data.mean(axis=0)[numpy.newaxis, :]
             #TODO: Work out a way around the 4 level loop,
             #TODO: coherence isn't directional, so, get rid of redundancy...
             for n1 in range(data_shape[2]):
                 for n2 in range(data_shape[2]):
                     cxy, freq = mlab.cohere(data[:, n1], data[:, n2],
                                             NFFT = self.nfft,
                                             Fs = self.time_series.sample_rate,
                                             detrend = detrend_linear,
                                             window = mlab.window_none)
                     result[:, n1, n2, var, mode] = cxy
     
     util.log_debug_array(LOG, result, "result")
     util.log_debug_array(LOG, freq, "freq")
     
     coherence = spectral.CoherenceSpectrum(source = self.time_series,
                                            nfft = self.nfft,
                                            array_data = result,
                                            frequency = freq,
                                            use_storage = False)
     
     return coherence
コード例 #54
0
    def _find_triangle_angles(self):
        """
        Calculates the inner angles of all the triangles which make up a surface
        """
        verts = self.vertices
        # TODO: Should be possible with arrays, ie not nested loops...
        # (this was a direct translation of some old matlab code)
        angles = numpy.zeros((self.number_of_triangles, 3))
        for tt in range(self.number_of_triangles):
            triangle = self.triangles[tt, :]
            for ta in range(3):
                ang = numpy.roll(triangle, -ta)
                angles[tt, ta] = numpy.arccos(numpy.dot(
                    (verts[ang[1], :] - verts[ang[0], :]) /
                    numpy.sqrt(numpy.sum((verts[ang[1], :] - verts[ang[0], :]) ** 2, axis=0)),
                    (verts[ang[2], :] - verts[ang[0], :]) /
                    numpy.sqrt(numpy.sum((verts[ang[2], :] - verts[ang[0], :]) ** 2, axis=0))))

        util.log_debug_array(LOG, angles, "triangle_angles", owner=self.__class__.__name__)
        return angles
コード例 #55
0
    def evaluate(self):
        """
        Compute the correlation coefficients of a 2D array (tpts x nodes).
        Yields an array of size nodes x nodes x state-variables x modes.

        The time interval over which the correlation coefficients are computed 
        is defined by t_start, t_end

        """
        cls_attr_name = self.__class__.__name__ + ".time_series"
        self.time_series.trait["data"].log_debug(owner=cls_attr_name)

        #(nodes, nodes, state-variables, modes)
        input_shape = self.time_series.read_data_shape()
        result_shape = self.result_shape(input_shape)
        LOG.info("result shape will be: %s" % str(result_shape))

        result = numpy.zeros(result_shape)


        t_lo = int((1. / self.time_series.sample_period) * (self.t_start - self.time_series.sample_period))
        t_hi = int((1. / self.time_series.sample_period) * (self.t_end - self.time_series.sample_period))
        t_lo = max(t_lo, 0)
        t_hi = max(t_hi, input_shape[0])

        #One correlation coeff matrix, for each state-var & mode.
        for mode in range(result_shape[3]):
            for var in range(result_shape[2]):
                current_slice = tuple([slice(t_lo, t_hi + 1), slice(var, var + 1),
                                       slice(input_shape[2]), slice(mode, mode + 1)])
                data = self.time_series.read_data_slice(current_slice).squeeze()
                result[:, :, var, mode] = numpy.corrcoef(data.T)


        util.log_debug_array(LOG, result, "result")

        corr_coeff = graph.CorrelationCoefficients(source=self.time_series,
                                                   array_data=result,
                                                   use_storage=False)
        return corr_coeff
コード例 #56
0
 def evaluate(self):
     """
     Cross-correlate two one-dimensional arrays.
     """
     cls_attr_name = self.__class__.__name__+".time_series"
     self.time_series.trait["data"].log_debug(owner = cls_attr_name)
     
     #(tpts, nodes, nodes, state-variables, modes)
     result_shape = self.result_shape(self.time_series.data.shape)
     LOG.info("result shape will be: %s" % str(result_shape))
     
     result = numpy.zeros(result_shape)
     
     #TODO: For region level, 4s, 2000Hz, this takes ~3hours... (which makes node_coherence seem positively speedy...)
     #TODO: Probably best to add a keyword for offsets, so we just compute +- some "small" range...
     #One inter-node correllation, across offsets, for each state-var & mode.
     for mode in range(result_shape[4]):
         for var in range(result_shape[3]):
             data = self.time_series.data[:, var, :, mode]
             data = data - data.mean(axis=0)[numpy.newaxis, :]
             #TODO: Work out a way around the 4 level loop,
             for n1 in range(result_shape[1]):
                 for n2 in range(result_shape[2]):
                     result[:, n1, n2, var, mode] = correlate(data[:, n1],
                                                              data[:, n2],
                                                              mode="same")
     
     util.log_debug_array(LOG, result, "result")
     
     offset = (self.time_series.sample_period *
               numpy.arange(-(numpy.floor(result_shape[0] / 2.0)),
                            numpy.ceil(result_shape[0] / 2.0)))
     
     cross_corr = temporal_correlations.CrossCorrelation(
         source = self.time_series,
         array_data = result,
         time = offset,
         use_storage = False)
     
     return cross_corr
コード例 #57
0
    def evaluate(self):
        """
        Calculate the FFT, Cross Coherence and Complex Coherence of time_series 
        broken into (possibly) epochs and segments of length `epoch_length` and 
        `segment_length` respectively, filtered by `window_function`.
        """
        cls_attr_name = self.__class__.__name__+".time_series"
        self.time_series.trait["data"].log_debug(owner = cls_attr_name)
        tpts = self.time_series.data.shape[0]
        time_series_length = tpts * self.time_series.sample_period
        
        if len(self.time_series.data.shape) > 2:
            time_series_data = numpy.squeeze((self.time_series.data.mean(axis=-1)).mean(axis=1))
        
        #nchan = time_series_data.shape[1]
        
        #NOTE: if we get a projection matrix ... then ...
        #if self.npat > 1: 
        #    data = data * proj
        #    nchan = self.npat
        
        #Divide time-series into epochs, no overlapping
        if self.epoch_length > 0.0:
            nepochs = int(numpy.floor(time_series_length / self.epoch_length))
            epoch_tpts = self.epoch_length / self.time_series.sample_period
            time_series_length = self.epoch_length
            tpts = epoch_tpts
        else: 
            self.epoch_length = time_series_length
            nepochs = int(numpy.ceil(time_series_length / self.epoch_length))
            
        #Segment time-series, overlapping if necessary
        nseg = int(numpy.floor(time_series_length / self.segment_length))
        if nseg > 1:
            seg_tpts = self.segment_length / self.time_series.sample_period
            seg_shift_tpts = self.segment_shift / self.time_series.sample_period
            nseg = int(numpy.floor((tpts - seg_tpts) / seg_shift_tpts) + 1)
        else:
            self.segment_length = time_series_length
            seg_tpts = time_series_data.shape[0]

        # Frequency vectors
        freqs = numpy.fft.fftfreq(int(seg_tpts))
        nfreq = numpy.min([self.max_freq, numpy.floor((seg_tpts + self.zeropad) / 2.0) + 1])
        freqs = freqs[0:nfreq,] * (1.0/ self.time_series.sample_period)
        
        
        result_shape, av_result_shape = self.result_shape(
                                        self.time_series.data.shape, 
                                        self.max_freq, 
                                        self.epoch_length, 
                                        self.segment_length, 
                                        self.segment_shift, 
                                        self.time_series.sample_period, 
                                        self.zeropad, 
                                        self.average_segments)
        
        cs = numpy.zeros(result_shape, dtype=numpy.complex128)
        av = numpy.matrix(numpy.zeros(av_result_shape, dtype=numpy.complex128))
        coh = numpy.zeros(result_shape, dtype=numpy.complex128)

        # NOTE: result for individual epochs are kept only if npat > 1. Skipping ...
        #if self.npat > 1:
        #    if not self.average_segments:
        #        cs = numpy.zeros((nchan, nchan, nfreq, nepochs, nseg), dtype=numpy.complex128)
        #        av = numpy.zeros((nchan, nfreq, nepochs, nseg), dtype=numpy.complex128)
        #    else:
        #        av = numpy.zeros((nchan, nfreq, nepochs), dtype=numpy.complex128)
        #        cs = numpy.zeros((nchan, nchan, nfreq, nepochs), dtype=numpy.complex128) 
  


        #Apply windowing function
        if self.window_function is not None:
            if self.window_function not in SUPPORTED_WINDOWING_FUNCTIONS:
                LOG.error("Windowing function is: %s" % self.window_function)
                LOG.error("Must be in: %s" % str(SUPPORTED_WINDOWING_FUNCTIONS))
            
            window_function = eval("".join(("numpy.", self.window_function)))
            win = window_function(seg_tpts)
            window_mask = (numpy.kron(numpy.ones((time_series_data.shape[1], 1)), win)).T

        nave = 0
    
        for j in numpy.arange(nepochs):
            data = time_series_data[j*epoch_tpts:(j+1)*epoch_tpts, :]
        
            for i in numpy.arange(nseg): #average over all segments;
                time_series = data[i*seg_shift_tpts: i*seg_shift_tpts + seg_tpts, :]
            
                if self.detrend_ts:
                    time_series = sp_signal.detrend(time_series, axis=0)
            
                datalocfft = numpy.fft.fft(time_series * window_mask, axis=0)
                datalocfft = numpy.matrix(datalocfft)
            
                for f in numpy.arange(nfreq): #for all frequencies
                    if self.npat == 1:
                        if not self.average_segments:
                            cs[:, :, f, i] += numpy.conjugate(
                                              datalocfft[f, :].conj().T * \
                                              datalocfft[f, :])
                            av[:, f, i] += numpy.conjugate(datalocfft[f, :].conj().T)
                        else:
                            cs[:, :, f] += numpy.conjugate(
                                           datalocfft[f,:].conj().T * \
                                           datalocfft[f, :])
                            av[:, f] += numpy.conjugate(datalocfft[f, :].conj().T)
                    else:
                        if not self.average_segments:
                            cs[:, :, f, j, i] = numpy.conjugate(
                                                datalocfft[f, :].conj().T * \
                                                datalocfft[f, :])
                            av[:, f, j, i] = numpy.conjugate(datalocfft[f, :].conj().T)  
                        else:
                            cs[:, :, f, j] += numpy.conjugate(
                                           datalocfft[f,:].conj().T *\
                                           datalocfft[f,:])
                                                 
                            av[:, f, j] += numpy.conjugate(datalocfft[f, :].conj().T)  
                del datalocfft
          
            nave += 1.0
        
        # End of FORs
        if not self.average_segments:
            cs = cs / nave
            av = av / nave
        else:
            nave = nave * nseg
            cs = cs / nave
            av = av / nave
        
        # Subtract average
        for f in numpy.arange(nfreq):
            if self.subtract_epoch_average:
                if self.npat == 1:
                    if not self.average_segments:
                        for i in numpy.arange(nseg):
                            cs[:, :, f, i] = cs[:, :, f, i] - av[:, f, i] *  av[:, f, i].conj().T
                    else:
                        cs[:, :, f] = cs[:, :, f] - av[:, f] * av[:, f].conj().T
                else:
                    if not self.average_segments:
                        for i in numpy.arange(nseg):
                            for j in numpy.arange(nepochs):
                                cs[:, :, f, j, i] = cs[:, :, f, j, i] - av[:, f, j, i] * av[:, f, j, i].conj().T
                            
                    else: 
                        for j in numpy.arange(nepochs):
                            cs[:, :, f, j] = cs[:, :, f, j] - av[:, f, j] * av[:, f, j].conj().T
        
        #Compute Complex Coherence        
        ndim = len(cs.shape)
        if ndim == 3:
            for i in numpy.arange(cs.shape[2]):
                temp = numpy.matrix(cs[:, :, i])
                coh[:, :, i] = cs[:, :, i] / numpy.sqrt((temp.diagonal().conj().T) * temp.diagonal())
            
        elif ndim == 4:
            for i in numpy.arange(cs.shape[2]):
                for j in numpy.arange(cs.shape[3]):
                    temp = numpy.matrix(numpy.squeeze(cs[:, :, i, j]))
                    coh[:, :, i, j] = temp / numpy.sqrt((temp.diagonal().conj().T) * temp.diagonal().T)
        
        
        util.log_debug_array(LOG, cs, "result")
        spectra = spectral.ComplexCoherenceSpectrum(source = self.time_series,
                                  array_data = coh,
                                  cross_spectrum = cs,
    #                              frequency = freqs,
                                  epoch_length = self.epoch_length,
                                  segment_length = self.segment_length,
                                  windowing_function = self.window_function,
    #                             fft_points = seg_tpts,
                                  use_storage = False)
        return spectra
コード例 #58
0
    def evaluate(self):
        """
        Compute the independent sources 
        """
        cls_attr_name = self.__class__.__name__+".time_series"
        self.time_series.trait["data"].log_debug(owner = cls_attr_name)
        
        ts_shape = self.time_series.data.shape
        
        #Need more observations than variables
        if ts_shape[0] < ts_shape[2]:
            msg = "ICA requires a longer timeseries (tpts > number of nodes)."
            LOG.error(msg)
            raise Exception, msg
            
        #Need more variables than components
        if self.n_components > ts_shape[2]:
            msg = "ICA requires more variables than components to extract (number of nodes > number of components)."
            LOG.error(msg)
            raise Exception, msg
        
        if self.n_components is None:
            self.n_components = ts_shape[2]
        
        #(n_components, n_components, state-variables, modes) --  unmixing matrix
        unmixing_matrix_shape = (self.n_components, self.n_components, ts_shape[1], ts_shape[3])
        LOG.info("unmixing matrix shape will be: %s" % str(unmixing_matrix_shape))
        
        # (n_components, nodes, state_variables, modes) -- prewhitening matrix
        prewhitening_matrix_shape = (self.n_components, ts_shape[2], ts_shape[1], ts_shape[3])
        LOG.info("prewhitening matrix shape will be: %s" % str(prewhitening_matrix_shape))
        
        
        unmixing_matrix = numpy.zeros(unmixing_matrix_shape)
        prewhitening_matrix = numpy.zeros(prewhitening_matrix_shape)
        
        
        #(tpts, n_components, state_variables, modes) -- unmixed sources time series
        data_ica = numpy.zeros((ts_shape[0], self.n_components, ts_shape[1], ts_shape[3]))
        
        #One un/mixing matrix for each state-var & mode.
        for mode in range(ts_shape[3]):
            for var in range(ts_shape[1]):
                # Assumes data must be whitened
                ica = fastica(self.time_series.data[:, var, :, mode], 
                                            n_components = self.n_components,
                                            whiten = True)
                # unmixed sources - component_time_series
                data_ica[:, :, var, mode] = ica[2]
                # prewhitening matrix
                prewhitening_matrix[:, :, var, mode] = ica[0]
                # unmixing matrix
                unmixing_matrix[:, :, var, mode] = ica[1]
        
        util.log_debug_array(LOG, prewhitening_matrix, "whitening_matrix")
        util.log_debug_array(LOG, unmixing_matrix, "unmixing_matrix")

        
        ica_result = mode_decompositions.IndependentComponents(source = self.time_series,
                                         component_time_series = data_ica, 
                                         #mixing_matrix = mixing_matrix,
                                         prewhitening_matrix = prewhitening_matrix,
                                         unmixing_matrix = unmixing_matrix,
                                         n_components = self.n_components, 
                                         use_storage = False)
        
        return ica_result