Exemple #1
0
    def generate_affine_backtransformation(self):
        """ Generate synthetic examples and test them to determine transformation

        This is the key method!
        """
        if type(self.example) == FeatureVector:
            testsample = FeatureVector.replace_data(
                self.example, numpy.zeros(self.example.shape))
            self.offset = numpy.longdouble(self._execute(testsample))
            self.trafo = FeatureVector.replace_data(
                self.example, numpy.zeros(self.example.shape))
            for j in range(len(self.example.feature_names)):
                testsample = FeatureVector.replace_data(
                    self.example, numpy.zeros(self.example.shape))
                testsample[0][j] = 1.0
                self.trafo[0][j] = \
                    numpy.longdouble(self._execute(testsample) - self.offset)
        elif type(self.example) == TimeSeries:
            testsample = TimeSeries.replace_data(
                self.example, numpy.zeros(self.example.shape))
            self.offset = numpy.longdouble(
                numpy.squeeze(self._execute(testsample)))
            self.trafo = TimeSeries.replace_data(
                self.example, numpy.zeros(self.example.shape))
            for i in range(self.example.shape[0]):
                for j in range(self.example.shape[1]):
                    testsample = TimeSeries.replace_data(
                        self.example, numpy.zeros_like(self.example))
                    testsample[i][j] = 1.0
                    self.trafo[i][j] = \
                        numpy.longdouble(numpy.squeeze(self._execute(testsample))
                                       - self.offset)
Exemple #2
0
    def generate_affine_backtransformation(self):
        """ Generate synthetic examples and test them to determine transformation

        This is the key method!
        """
        if type(self.example) == FeatureVector:
            testsample = FeatureVector.replace_data(
                self.example, numpy.zeros(self.example.shape))
            self.offset = numpy.longdouble(self._execute(testsample))
            self.trafo = FeatureVector.replace_data(
                self.example, numpy.zeros(self.example.shape))
            for j in range(len(self.example.feature_names)):
                testsample = FeatureVector.replace_data(
                    self.example,
                    numpy.zeros(self.example.shape))
                testsample[0][j] = 1.0
                self.trafo[0][j] = \
                    numpy.longdouble(self._execute(testsample) - self.offset)
        elif type(self.example) == TimeSeries:
            testsample = TimeSeries.replace_data(
                self.example, numpy.zeros(self.example.shape))
            self.offset = numpy.longdouble(numpy.squeeze(
                self._execute(testsample)))
            self.trafo = TimeSeries.replace_data(
                self.example, numpy.zeros(self.example.shape))
            for i in range(self.example.shape[0]):
                for j in range(self.example.shape[1]):
                    testsample = TimeSeries.replace_data(
                        self.example, numpy.zeros_like(self.example))
                    testsample[i][j] = 1.0
                    self.trafo[i][j] = \
                        numpy.longdouble(numpy.squeeze(self._execute(testsample))
                                       - self.offset)
Exemple #3
0
 def setUp(self):
     self.test_data = numpy.zeros((128, 3))
     self.test_data[:,1] = numpy.ones(128)
     self.test_data[:,2] = numpy.random.random(128)
     
     self.test_time_series = TimeSeries(self.test_data, ["A","B", "C"], 64,
                                        start_time = 0, end_time = 2000)
Exemple #4
0
 def _execute(self, data):
     """ Apply the windowing to the given data and return the result """        
     #Create a window of the correct length for the given data
     if self.num_of_samples is None:
         self.num_of_samples = data.shape[0]
         self.create_window_array()
          
     data_array=data.view(numpy.ndarray)
     #Do the actual windowing
     # TODO: check if windowed_data = (self.window_array.T * data) works also???
     windowed_data = (self.window_array * data_array.T).T
     
     # Skip trailing zeros
     if self.window_has_zeros and self.reduce_window:
         windowed_data = windowed_data[
             range(self.window_not_equal_zero[0],
                   self.window_not_equal_zero[-1] + 1), :]
     
         result_time_series = TimeSeries.replace_data(data, windowed_data)
         
         # Adjust start and end time when chopping was done
         result_time_series.start_time = data.start_time + \
             self.window_not_equal_zero[0] * 1000.0 / data.sampling_frequency
         result_time_series.end_time = \
             data.end_time - (data.shape[0] - self.window_not_equal_zero[-1]
                              - 1) * 1000.0 / data.sampling_frequency
     else:
         result_time_series = TimeSeries.replace_data(data, windowed_data)
                 
     return result_time_series
Exemple #5
0
    def next(self, debug=False):
        """Return next labeled window when used in iterator context."""
        while len(self.cur_extract_windows) == 0:
            # fetch the next block from data_client
            if debug:
                print "reading next block"
            self._readnextblock()
            self._extract_windows_cur_block()
            if debug:
                print "  buffermarkers", self.buffermarkers
                print "  current block", self.samplebuf.get()[self.prebuflen][1, :]
                # print "  current extracted windows ", self.cur_extract_windows

        (windef_name, current_window, class_, start_time, end_time, markers_cur_win) = self.cur_extract_windows.pop(0)

        # TODO: Replace this by a decorator or something similar
        current_window = numpy.atleast_2d(current_window.transpose())
        current_window = TimeSeries(
            input_array=current_window,
            channel_names=self.data_client.channelNames,
            sampling_frequency=self.data_client.dSamplingInterval,
            start_time=start_time,
            end_time=end_time,
            name="Window extracted @ %d ms, length %d ms, class %s" % (start_time, end_time - start_time, class_),
            marker_name=markers_cur_win,
        )

        current_window.generate_meta()
        current_window.specs["sampling_frequency"] = self.data_client.dSamplingInterval
        current_window.specs["wdef_name"] = windef_name
        self.nwindow += 1

        # return (ndsamplewin, ndmarkerwin)
        return (current_window, class_)
 def setUp(self):
     """Create some example data """
     # Create some TimeSeries:
     self.x1 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
                     marker_name='S4', name='Name_text ending with Standard',
                     start_time=1000.0, end_time=1004.0)
     
     self.x1.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
     self.x1.generate_meta() #automatically generate key and tag
                     
     self.x2 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
                     marker_name='S4', start_time=2000.0, end_time=2004.0, 
                     name='Name_text ending with Standard')
     
     #manually generate key and tag
     import uuid
     self.x2_key=uuid.uuid4()
     self.x2.key=self.x2_key
     self.x2.tag='Tag of x2'
     self.x2.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
                      
     self.x3 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,
                     marker_name='S4', start_time=3000.0, end_time=3004.0)
     
     self.x3.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
     self.x3.generate_meta()
     
     self.x4 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,marker_name='S4')
     
     self.x4.specs={'Nice_Parameter': 1, 'Less_Nice_Param': '2'}
     
     self.x5 = TimeSeries([1,2], ['a','b'], 12)
     self.x5.inherit_meta_from(self.x2)
     
     self.x6 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12)
     
     self.x6.specs={'Nice_Parameter': 11, 'Less_Nice_Param': '21'}
     self.x6.generate_meta()
     #safe information
     self.x6_key=self.x6.key
     
     self.x6.inherit_meta_from(self.x2)
     
     self.some_nice_dict = {'guido': 4127, 'irv': 4127, 'jack': 4098}
     
     self.x6.add_to_history(self.x5, self.some_nice_dict)
     
     # Create some FeatureVectors:
     self.f1 = FeatureVector([1,2,3,4,5,6],['a','b','c','d','e','f'])
     
     self.f1.specs={'NiceParam':1,'LessNiceParam':2}
     
     self.f2 = FeatureVector([1,2,3,4,5,6],['a','b','c','d','e','f'], tag = 'Tag of f2')
     
     self.f2.specs={'NiceParam':1,'LessNiceParam':2}
     
     self.f3 = FeatureVector([1,2], ['a','b'])
     self.f3.inherit_meta_from(self.x2)
     self.f3.add_to_history(self.x5)
Exemple #7
0
 def test_generate_tag(self):
     self.assertEqual(
         TimeSeries._generate_tag(self.x1),
         'Epoch Start: 12004ms; End: 13004ms; Class: Standard')
     self.assertEqual(TimeSeries._generate_tag(self.x3),
                      'Epoch Start: 12004ms; End: 13004ms; Class: na')
     self.assertEqual(TimeSeries._generate_tag(self.x4), None)
     self.assertEqual(TimeSeries._generate_tag(self.x5),
                      'Epoch Start: 12004ms; End: nams; Class: na')
Exemple #8
0
    def testInheritAndAddStuff(self):
        """test inheritance of meta data from other objects"""
        # Inherit
        self.assertEqual(self.x5.tag, self.x2.tag)
        self.assertEqual(self.x5.key, self.x2.key)

        self.assertEqual(self.f3.tag, self.x2.tag)
        self.assertEqual(self.f3.key, self.x2.key)

        #Inherit

        #suppress warning of BaseData type and cast data back to numpy
        hist_x6 = self.x6.history[0].view(numpy.ndarray)
        data_x5 = self.x5.view(numpy.ndarray)

        # history
        self.assertEqual((hist_x6 == data_x5).all(), True)
        self.assertEqual(self.x6.history[0].key, self.x5.key)
        self.assertEqual(self.x6.history[0].tag, self.x5.tag)
        self.assertEqual(self.x6.history[0].specs['node_specs'],
                         self.some_nice_dict)

        hist_f3 = self.f3.history[0].view(numpy.ndarray)

        self.assertEqual((hist_f3 == data_x5).all(), True)
        self.assertEqual(self.f3.history[0].key, self.x5.key)
        self.assertEqual(self.f3.history[0].tag, self.x5.tag)

        #if key (and tag) were already set, these original values
        #have to be kept
        #
        self.assertEqual(self.x6.key, self.x6_key)
        self.assertEqual(self.x6.tag, self.x2.tag)

        self.x6.inherit_meta_from(self.f3)  #should not change tag and key

        self.assertEqual(self.x6.key, self.x6_key)
        self.assertEqual(self.x6.tag, self.x2.tag)

        #testing multiple histories
        x7 = TimeSeries([1, 2, 3, 4, 5, 6], ['a', 'b', 'c', 'd', 'e', 'f'],
                        12,
                        marker_name='S4')
        x7.add_to_history(self.x1)
        x7.add_to_history(self.x2)
        x7.add_to_history(self.x3)
        x7.add_to_history(self.x4)
        x7.add_to_history(self.x5)
        x7.add_to_history(self.x6)
        x7.add_to_history(self.x1)

        self.assertEqual(len(x7.history), 7)
        self.assertEqual(x7.history[0].key, x7.history[6].key)
        self.assertEqual(x7.history[5].history, [])
Exemple #9
0
    def _execute(self, data):
        # First check if all channels actually appear in the data

        # Determine the indices of the channels that are the basis for the
        # average reference.
        if not self.inverse:
            if self.avg_channels == None:
                self.avg_channels = data.channel_names
            channel_indices = [
                data.channel_names.index(channel_name)
                for channel_name in self.avg_channels
            ]
        else:
            channel_indices = [
                data.channel_names.index(channel_name)
                for channel_name in data.channel_names
                if channel_name not in self.avg_channels
            ]

        not_found_channels = \
            [channel_name for channel_name in self.avg_channels
                     if channel_name not in data.channel_names]
        if not not_found_channels == []:
            warnings.warn(
                "Couldn't find selected channel(s): %s. Ignoring." %
                not_found_channels, Warning)

        if self.old_ref is None:
            self.old_ref = 'avg'

        # Compute the actual data of the reference channel. This is the sum of all
        # channels divided by (the number of channels +1).
        ref_chen = -numpy.sum(data[:, channel_indices], axis=1) / (
            data.shape[1] + 1)
        ref_chen = numpy.atleast_2d(ref_chen).T
        # Reference all electrodes against average
        avg_referenced_data = data + ref_chen

        # Add average as new channel to the signal if enabled
        if self.keep_average:
            avg_referenced_data = numpy.hstack((avg_referenced_data, ref_chen))
            channel_names = data.channel_names + [self.old_ref]
            result_time_series = TimeSeries(avg_referenced_data, channel_names,
                                            data.sampling_frequency,
                                            data.start_time, data.end_time,
                                            data.name, data.marker_name)
        else:
            result_time_series = TimeSeries.replace_data(
                data, avg_referenced_data)

        return result_time_series
Exemple #10
0
    def _execute(self, data):
        """ Perform a shift and normalization according
        (whole_data - mean(specific_samples)) / std(specific_samples)
        """
        if self.devariance:
            # code copy from LocalStandardizationNode
            std = numpy.std(data[self.subset],axis=0)
            std = check_zero_division(self, std, tolerance=10**-15, data_ts=data)

            return TimeSeries.replace_data(data,
                        (data-numpy.mean(data[self.subset], axis=0)) / std)
        else:
            return TimeSeries.replace_data(data, \
                        data-numpy.mean(data[self.subset], axis=0))
Exemple #11
0
    def _execute(self, data, n=None):
        """ Execute learned transformation on *data*.
        
        Projects the given data to the axis of the most significant
        eigenvectors and returns the data in this lower-dimensional subspace.
        """
        # 'INITIALIZATION'
        if self.retained_channels == None:
            self.retained_channels = data.shape[1]
        if n is None:
            n = self.retained_channels
        if self.channel_names is None:
            self.channel_names = data.channel_names
        if len(self.channel_names) < self.retained_channels:
            self.retained_channels = len(self.channel_names)
            self._log(
                "To many channels chosen for the retained channels! Replaced by maximum number.",
                level=logging.CRITICAL)
        if not (self.output_dim == self.retained_channels):
            # overwrite internal output_dim variable, since it is set wrong
            self._output_dim = self.retained_channels

        # 'Real' Processing
        #projected_data = super(PCANodeWrapper, self)._execute(data, n)
        x = data.view(numpy.ndarray)
        projected_data = mult(x - self.avg, self.v[:, :self.retained_channels])

        if self.new_channels is None:
            self.new_channel_names = [
                "pca%03d" % i for i in range(projected_data.shape[1])
            ]
        return TimeSeries(projected_data, self.new_channel_names,
                          data.sampling_frequency, data.start_time,
                          data.end_time, data.name, data.marker_name)
Exemple #12
0
    def generate_normalized_test_data(self,
                                      channels,
                                      time_points,
                                      function,
                                      sampling_frequency,
                                      initial_phase=0.0):
        """
        A method which generates a normalized (mu = 0, sigma =1) signal for testing, with
        the specified number of "channels" which are all generated using the given function
        """

        #Generate an empty ndarray
        data = numpy.zeros((time_points, channels))

        #Compute the values for all channels
        for channel_index in range(channels):
            for time_index in range(time_points):
                data[time_index, channel_index] = function(
                    2.0 * numpy.pi * (channel_index + 1) *
                    (time_index / sampling_frequency + initial_phase))
            current_channel = data[:, channel_index]
            current_channel = (current_channel - pylab.mean(current_channel)
                               ) / pylab.std(current_channel)
            data[:, channel_index] = current_channel

        #Generate a time series build out of the data
        test_data = TimeSeries(
            input_array=data,
            channel_names=[("test_channel_%s" % i) for i in range(channels)],
            sampling_frequency=sampling_frequency,
            start_time=initial_phase,
            end_time=float(time_points) / sampling_frequency + initial_phase)

        return test_data
Exemple #13
0
    def merge_time_series(self, input_collection):
        """ Merges all timeseries of the input_collection to one big timeseries """
        # Retriev the time series from the input_collection
        input_timeseries = input_collection.get_data(0, 0, 'test')
        # Get the data from the first timeseries
        output_data = input_timeseries[0][0].get_data()
        # Change the endtime of the first timeseries to the one of the last
        # timeseries inside the input_collection
        input_timeseries[0][0].end_time = input_timeseries[-1][0].end_time
        # For all the remaining timeseries
        for ts in input_timeseries[1:]:
            # Concatenate the data...
            output_data = numpy.vstack((output_data, ts[0].get_data()))
            # ... and add the marker to the first timeseries
            if (len(ts[0].marker_name) > 0):
                for k in ts[0].marker_name:
                    if (not input_timeseries[0][0].marker_name.has_key(k)):
                        input_timeseries[0][0].marker_name[k] = []
                    for time in ts[0].marker_name[k]:
                        input_timeseries[0][0].marker_name[k].append(
                            time + ts[0].start_time +
                            input_timeseries[0][0].start_time)
        # Use the meta information from the first timeseries e.g. marker start/end_time
        # and create a new timeseries with the concatenated data
        merged_time_series = TimeSeries.replace_data(input_timeseries[0][0],
                                                     output_data)
        # Change the name of the merged_time_series
        merged_time_series.name = "%s, length %d ms, %s" % (merged_time_series.name.split(',')[0], \
                                                            (len(merged_time_series)*1000.0)/merged_time_series.sampling_frequency,\
                                                            merged_time_series.name.split(',')[-1])

        return merged_time_series
Exemple #14
0
    def test_specgram_band_pass(self):
        time_points = 10000
        sampling_frequency = 100.0
        data = numpy.zeros((time_points,1))
        for time_index in range(time_points):
            data[time_index,0] = numpy.sin(2.0 * numpy.pi * 5.0 * (time_index / sampling_frequency))
            data[time_index,0] += numpy.sin(2.0 * numpy.pi * 15.0 * (time_index / sampling_frequency))
            data[time_index,0] += numpy.sin(2.0 * numpy.pi * 25.0 * (time_index / sampling_frequency))
            data[time_index,0] += numpy.sin(2.0 * numpy.pi * 35.0 * (time_index / sampling_frequency))
            data[time_index,0] += numpy.sin(2.0 * numpy.pi * 45.0 * (time_index / sampling_frequency))

        pass_band=(20.,30.)

        #Generate a time series build out of the data
        from pySPACE.resources.data_types.time_series import TimeSeries
        test_data = TimeSeries(input_array = data,
                               channel_names = ["test_channel_1"],
                               sampling_frequency = sampling_frequency,
                               start_time = 0,
                               end_time = float(time_points) / sampling_frequency)

        lpf_node = filtering.FFTBandPassFilterNode(pass_band=pass_band)
        filtered_time_series = lpf_node.execute(test_data)

        lpf_node_fir = filtering.FIRFilterNode(pass_band=pass_band)
        filtered_time_series_fir = lpf_node_fir.execute(test_data)

        lpf_node_fir2 = filtering.FIRFilterNode(pass_band=pass_band,window='hann')
        filtered_time_series_fir2 = lpf_node_fir2.execute(test_data)

        lpf_node_iir = filtering.IIRFilterNode(pass_band=pass_band,stop_band_rifle=90)
        filtered_time_series_iir = lpf_node_iir.execute(test_data)
Exemple #15
0
    def _execute(self, data):
        """ Execute learned transformation on *data*."""
        # We must have computed the projection matrix
        assert (self.filters != None)

        if self.retained_channels == None:
            self.retained_channels = data.shape[1]
        if self.channel_names is None:
            self.channel_names = data.channel_names

        if len(self.channel_names) < self.retained_channels:
            self.retained_channels = len(self.channel_names)
            self._log(
                "To many channels chosen for the retained channels! Replaced by maximum number.",
                level=logging.CRITICAL)
        # Project the data using the learned FDA
        projected_data = numpy.dot(data,
                                   self.filters[:, :self.retained_channels])
        if self.new_channel_names is None:
            self.new_channel_names = [
                "fda%03d" % i for i in range(self.retained_channels)
            ]

        return TimeSeries(projected_data, self.new_channel_names,
                          data.sampling_frequency, data.start_time,
                          data.end_time, data.name, data.marker_name)
Exemple #16
0
    def test_specgram_low_pass(self):
        time_points = 10000
        sampling_frequency = 100.0
        data = numpy.zeros((time_points, 1))
        for time_index in range(time_points):
            data[time_index, 0] = numpy.sin(2.0 * numpy.pi * 5.0 *
                                            (time_index / sampling_frequency))
            data[time_index, 0] += numpy.sin(2.0 * numpy.pi * 15.0 *
                                             (time_index / sampling_frequency))
            data[time_index, 0] += numpy.sin(2.0 * numpy.pi * 25.0 *
                                             (time_index / sampling_frequency))
            data[time_index, 0] += numpy.sin(2.0 * numpy.pi * 35.0 *
                                             (time_index / sampling_frequency))
            data[time_index, 0] += numpy.sin(2.0 * numpy.pi * 45.0 *
                                             (time_index / sampling_frequency))

        #Generate a time series build out of the data
        from pySPACE.resources.data_types.time_series import TimeSeries
        test_data = TimeSeries(input_array=data,
                               channel_names=["test_channel_1"],
                               sampling_frequency=sampling_frequency,
                               start_time=0,
                               end_time=float(time_points) /
                               sampling_frequency)

        lpf_node = filtering.SimpleLowPassFilterNode(cutoff_frequency=20.0)
        filtered_time_series = lpf_node.execute(test_data)

        lpf_node_fir = filtering.FIRFilterNode([20.0])
        filtered_time_series_fir = lpf_node_fir.execute(test_data)
Exemple #17
0
    def _execute(self, x):
        """ Compute the energy of the given signal x using the TKEO """
        #Determine the indices of the channels which will be filtered
        #Done only once...
        if(self.selected_channel_indices == None):
            self.selected_channels = self.selected_channels \
            if self.selected_channels != None else x.channel_names
            self.selected_channel_indices = [x.channel_names.index(channel_name) \
                                            for channel_name in self.selected_channels]
            self.old_data = numpy.zeros((2,len(self.selected_channel_indices)))

        filtered_data = numpy.zeros(x.shape)
        channel_counter = -1
        for channel_index in self.selected_channel_indices:
            channel_counter += 1
            for i in range(len(x)):
                if i==0:
                    filtered_data[i][channel_index] = math.pow(self.old_data[1][channel_counter],2) - (self.old_data[0][channel_counter] * x[0][channel_index])
                elif i==1:
                    filtered_data[i][channel_index] = math.pow(x[0][channel_index],2) - (self.old_data[1][channel_counter] * x[1][channel_index])
                else:
                    filtered_data[i][channel_index] = math.pow(x[i-1][channel_index],2) - (x[i-2][channel_index] * x[i][channel_index])
            self.old_data[0][channel_counter] = x[-2][channel_index]
            self.old_data[1][channel_counter] = x[-1][channel_index]
        result_time_series = TimeSeries.replace_data(x, filtered_data)

        return result_time_series
Exemple #18
0
    def _execute(self, x):
        """ Apply low pass filter to data x and return the result """
        #Determine the indices of the channels which will be filtered
        selected_channel_names =  self.selected_channels \
                           if self.selected_channels != None else x.channel_names
        selected_channel_indices = [x.channel_names.index(channel_name) \
                                      for channel_name in selected_channel_names]

        # Compute the FIR window which is required for the low pass filter
        # This is quite slow!
        # filter_order = 2 * x.sampling_frequency / self.cutoff_frequency
        filter_order = 31
        if self.b is None:
            try:
                b = \
                    scipy.signal.firwin(numtaps = filter_order,
                                    cutoff = self.cutoff_frequency * 2.0 / x.sampling_frequency,
                                    width = self.width,
                                    window = self.window)
            except TypeError:
                b = \
                    scipy.signal.firwin(N = filter_order-1,
                                    cutoff = self.cutoff_frequency * 2.0 / x.sampling_frequency,
                                    width = self.width,
                                    window = self.window)
            self.set_permanent_attributes(b=b)
        #Do the actual filtering
        filtered_data = numpy.zeros(x.shape)
        y=x.view(type=numpy.ndarray)
        for channel_index in selected_channel_indices:
            filtered_data[:,channel_index] = scipy.signal.convolve(self.b, \
                              y[:,channel_index])[len(self.b)/2:-len(self.b)/2+1]
        result_time_series = TimeSeries.replace_data(x, filtered_data)

        return result_time_series
Exemple #19
0
   def _execute(self, x):
       """ Apply high pass filter to data x and return the result """

       #Determine the indices of the channels which will be filtered
       selected_channel_names =  self.selected_channels \
                           if self.selected_channels != None else x.channel_names
       selected_channel_indices = [x.channel_names.index(channel_name) \
                                      for channel_name in selected_channel_names]
       if self.b is None:
       #Compute the FIR window which is required for the high pass filter
            try:
                b = scipy.signal.firwin(numtaps = self.taps,
                                   cutoff = self.cutoff_frequency * 2.0 / x.sampling_frequency,
                                   width = self.width,
                                   window = self.window)
            except TypeError:
                b = scipy.signal.firwin(N = self.taps-1,
                                   cutoff = self.cutoff_frequency * 2.0 / x.sampling_frequency,
                                   width = self.width,
                                   window = self.window)
            b = -b
            b[self.taps/2] = b[self.taps/2]+1
            self.set_permanent_attributes(b=b)

       #Do the actual filtering
       y=x.view(numpy.ndarray)
       filtered_data = numpy.zeros(x.shape)
       for channel_index in selected_channel_indices:
           filtered_data[:,channel_index] = \
                                 scipy.signal.lfilter(self.b, [1], y[:,channel_index])

       result_time_series = TimeSeries.replace_data(x, filtered_data)

       return result_time_series
 def setUp(self):
     self.time_series = test_ts_generator.generate_test_data(
         channels=8,
         time_points=1000,
         function=test_sine,
         sampling_frequency=100.0)
     self.x1 = TimeSeries([[1, 2, 3], [6, 5, 3]], ['a', 'b', 'c'], 120)
Exemple #21
0
    def _execute(self, x):
        """ Apply low pass filter to data x and return the result """
        #Determine the indices of the channels which will be filtered
        selected_channel_names =  self.selected_channels \
                           if self.selected_channels != None else x.channel_names
        selected_channel_indices = [x.channel_names.index(channel_name) \
                                      for channel_name in selected_channel_names]

        # Compute the FIR window which is required for the low pass filter
        # This is quite slow!
        # filter_order = 2 * x.sampling_frequency / self.cutoff_frequency
        filter_order = 31
        if self.b is None:
            try:
                b = \
                    scipy.signal.firwin(numtaps = filter_order,
                                    cutoff = self.cutoff_frequency * 2.0 / x.sampling_frequency,
                                    width = self.width,
                                    window = self.window)
            except TypeError:
                b = \
                    scipy.signal.firwin(N = filter_order-1,
                                    cutoff = self.cutoff_frequency * 2.0 / x.sampling_frequency,
                                    width = self.width,
                                    window = self.window)
            self.set_permanent_attributes(b=b)
        #Do the actual filtering
        filtered_data = numpy.zeros(x.shape)
        y = x.view(type=numpy.ndarray)
        for channel_index in selected_channel_indices:
            filtered_data[:,channel_index] = scipy.signal.convolve(self.b, \
                              y[:,channel_index])[len(self.b)/2:-len(self.b)/2+1]
        result_time_series = TimeSeries.replace_data(x, filtered_data)

        return result_time_series
Exemple #22
0
    def _execute(self, data):
        # Determine the new list of channel_names
        self.selected_channel_names = [
            channel_name + "-" + self.dual[channel_name]
            for channel_name in self.dual_list
            if channel_name in data.channel_names
            and self.dual[channel_name] in data.channel_names
        ]
        # Initialize the new array
        difference_data = numpy.zeros(
            (len(data), len(self.selected_channel_names)))
        current_index = 0

        # Do the same check as in the determination of the channel names...
        for channel_name in self.dual_list:
            if channel_name in data.channel_names and self.dual[
                    channel_name] in data.channel_names:
                first_channel_index = data.channel_names.index(channel_name)
                second_channel_index = data.channel_names.index(
                    self.dual[channel_name])
                # ...and build the difference of the corresponding channels.
                difference_data[:,
                                current_index] = data[:,
                                                      first_channel_index] - data[:,
                                                                                  second_channel_index]
                current_index += 1
        # Create new TimeSeries object
        difference_time_series = TimeSeries(difference_data,
                                            self.selected_channel_names,
                                            data.sampling_frequency,
                                            data.start_time, data.end_time,
                                            data.name, data.marker_name)
        return difference_time_series
Exemple #23
0
    def _execute(self, x):
        """ Compute the energy of the given signal x using the TKEO """
        # Determine the indices of the channels which will be filtered
        # Done only once...
        if self.selected_channel_indices is None:
            self.selected_channels = self.selected_channels \
            if self.selected_channels != None else x.channel_names
            self.selected_channel_indices = [x.channel_names.index(channel_name) \
                                            for channel_name in self.selected_channels]
            self.old_data = numpy.zeros(
                (2, len(self.selected_channel_indices)))

        filtered_data = numpy.zeros(x.shape)
        channel_counter = -1
        for channel_index in self.selected_channel_indices:
            channel_counter += 1
            for i in range(len(x)):
                if i == 0:
                    filtered_data[i][channel_index] = \
                        math.pow(self.old_data[1][channel_counter],2) - (
                        self.old_data[0][channel_counter] * x[0][channel_index])
                elif i == 1:
                    filtered_data[i][channel_index] = \
                        math.pow(x[0][channel_index],2) - (
                        self.old_data[1][channel_counter] * x[1][channel_index])
                else:
                    filtered_data[i][channel_index] = \
                    math.pow(x[i-1][channel_index],2) - (
                    x[i-2][channel_index] * x[i][channel_index])
            self.old_data[0][channel_counter] = x[-2][channel_index]
            self.old_data[1][channel_counter] = x[-1][channel_index]
        result_time_series = TimeSeries.replace_data(x, filtered_data)

        return result_time_series
Exemple #24
0
    def _prepare_FV(self, data):
        """ Convert FeatureVector into TimeSeries and use it for plotting.

        .. note:: This function is not yet working as it should be.
                  Work in progress.
                  Commit due to LRP-Demo (DLR Review)
        """
        # visualization of transformation or history data times visualization
        if self.current_trafo_TS is None:
            transformation_list = self.get_previous_transformations(data)
            transformation_list.reverse() #first element is previous node

            for elem in transformation_list:
                if self.use_FN and elem[3]=="feature normalization":
                    # visualize Feature normalization scaling as feature vector
                    FN_FV = FeatureVector(numpy.atleast_2d(elem[0]),
                                      feature_names = elem[2])
                    self.current_trafo_TS = type_conversion.FeatureVector2TimeSeriesNode()._execute(FN_FV)
                    self.current_trafo_TS.reorder(sorted(self.current_trafo_TS.channel_names))
                    break


                # visualize spatial filter as times series,
                # where the time axis is the number of channel or virtual
                # channel name
                if self.use_SF and elem[3]=="spatial filter":
                    new_channel_names = elem[2]
                    SF_trafo = elem[0]
                    self.current_trafo_TS = TimeSeries(SF_trafo.T,
                                channel_names = new_channel_names,
                                sampling_frequency = 1)
                    self.current_trafo_TS.reorder(sorted(self.current_trafo_TS.channel_names))
                    break
        
        return self.current_trafo_TS
Exemple #25
0
    def _execute(self, data):
        """ Apply the learned spatial filters to the given data point """
        if self.channel_names is None:
            self.channel_names = data.channel_names

        if self.retained_channels in [None, 'None']:
            self.retained_channels = len(self.channel_names)

        if len(self.channel_names) < self.retained_channels:
            self.retained_channels = len(self.channel_names)
            self._log(
                "To many channels chosen for the retained channels! "
                "Replaced by maximum number.",
                level=logging.CRITICAL)
        data_array = data.view(numpy.ndarray)
        # Project the data using the learned spatial filters
        projected_data = numpy.dot(data_array,
                                   self.filters[:, :self.retained_channels])

        if self.xDAWN_channel_names is None:
            self.xDAWN_channel_names = [
                "xDAWN%03d" % i for i in range(self.retained_channels)
            ]

        return TimeSeries(projected_data, self.xDAWN_channel_names,
                          data.sampling_frequency, data.start_time,
                          data.end_time, data.name, data.marker_name)
Exemple #26
0
    def merge_time_series(self, input_collection):
        """ Merges all timeseries of the input_collection to one big timeseries """
        # Retriev the time series from the input_collection
        input_timeseries = input_collection.get_data(0,0,'test')
        # Get the data from the first timeseries
        output_data = input_timeseries[0][0]
        skiped_range = output_data.start_time

        # Change the endtime of the first timeseries to the one of the last
        # timeseries inside the input_collection
        input_timeseries[0][0].end_time = input_timeseries[-1][0].end_time
        # For all the remaining timeseries

        for ts in input_timeseries[1:]:
            # Concatenate the data...
            output_data = numpy.vstack((output_data,ts[0]))
            # ... and add the marker to the first timeseries
            if(len(ts[0].marker_name) > 0):
                for k in ts[0].marker_name:
                    if(not input_timeseries[0][0].marker_name.has_key(k)):
                        input_timeseries[0][0].marker_name[k] = []
                    for time in ts[0].marker_name[k]:
                        input_timeseries[0][0].marker_name[k].append(time+ts[0].start_time - skiped_range)
        # Use the meta information from the first timeseries e.g. marker start/end_time
        # and create a new timeseries with the concatenated data
        merged_time_series = TimeSeries.replace_data(input_timeseries[0][0],output_data)
        # Change the name of the merged_time_series
        merged_time_series.name = "%s, length %d ms, %s" % (merged_time_series.name.split(',')[0], \
                                                            (len(merged_time_series)*1000.0)/merged_time_series.sampling_frequency,\
                                                            merged_time_series.name.split(',')[-1])
        
        return merged_time_series
Exemple #27
0
    def _execute(self, data):
        """ Project the data onto the selected channels. """
        projected_data = data[:, self.selected_indices]

        return TimeSeries(projected_data, self.selected_channels,
                          data.sampling_frequency, data.start_time,
                          data.end_time, data.name, data.marker_name)
Exemple #28
0
    def get_data(self, run_nr, split_nr, train_test):
        """ Return the train or test data for the given split in the given run.
        
        **Parameters**
          
          :run_nr: The number of the run whose data should be loaded.
          
          :split_nr: The number of the split whose data should be loaded.
          
          :train_test: "train" if the training data should be loaded.
                       "test" if the test data should be loaded.
    
        """
        # Do lazy loading of the time series objects.
        filepath = self.data_directory + os.path.sep + self.file_path
        data = scipy.io.loadmat(filepath)
        signal = data['Signal']
        flashing = data['Flashing']
        stimulus_code = data['StimulusCode']
        stimulus_type = data['StimulusType']
        target_char = data['TargetChar']

        window = 240
        channels = 64
        epochs = signal.shape[0]
        data_collection = []

        responses = numpy.zeros((12, 15, window, channels))
        for epoch in range(epochs):
            counter = 0
            rowcolcnt = numpy.ones(12)
            for n in range(1, signal.shape[1]):
                if (flashing[epoch, n] == 0 and flashing[epoch, n - 1] == 1):
                    rowcol = stimulus_code[epoch, n - 1]
                    responses[rowcol - 1, rowcolcnt[rowcol - 1] -
                              1, :, :] = signal[epoch,
                                                n - 24:n + window - 24, :]
                    rowcolcnt[rowcol - 1] = rowcolcnt[rowcol - 1] + 1

            avgresp = numpy.mean(responses, 1)

            targets = stimulus_code[epoch, :] * stimulus_type[epoch, :]
            target_rowcol = []
            for value in targets:
                if value not in target_rowcol:
                    target_rowcol.append(value)

            target_rowcol.sort()

            for i in range(avgresp.shape[0]):
                temp = avgresp[i, :, :]
                data = TimeSeries(input_array=temp,
                                  channel_names=range(64),
                                  sampling_frequency=window)
                if i == target_rowcol[1] - 1 or i == target_rowcol[2] - 1:
                    data_collection.append((data, "Target"))
                else:
                    data_collection.append((data, "Standard"))

        return data_collection
Exemple #29
0
 def _execute(self, x):
     """
     f' = (f(x+h)-f(x))
     """
     if self.datapoints == None:
         self.datapoints = len(x)
     
     #create new channel names
     new_names = []
     for channel in range(len(x.channel_names)):
         new_names.append("%s'" %  (x.channel_names[channel]))
     #Derive the f' d2 from data x
     timeSeries = []
     for datapoint in range(self.datapoints):
         temp = []
         if((datapoint+1)<self.datapoints):
             for channel in range(len(x.channel_names)):
                 temp.append(x[datapoint+1][channel]-x[datapoint][channel])#*8*sampling_frequency
             timeSeries.append(temp)
     #padding with zero's if the original length of the time series have to remain equal.
     if self.keep_number_of_samples:
         temp = []
         for i in range(len(x.channel_names)):
             temp.append(0)
         timeSeries.append(temp)
     #Create a new time_series with the new data and channel names
     result_time_series = TimeSeries.replace_data(x, numpy.array(timeSeries))
     result_time_series.channel_names = new_names
     #if necessary adjust the length of the time series
     if not self.keep_number_of_samples:
         result_time_series.end_time -= 1
     
     return result_time_series
Exemple #30
0
    def setUp(self):
        samples = 5000
        ranges = [-3.5, 3.5, -3.5, 3.5]

        numpy.random.seed(0)
        true_data = numpy.zeros((samples, 2))
        true_data[:, 0] = numpy.random.normal(loc=0.0,
                                              scale=1.0,
                                              size=(samples, ))
        true_data[:, 1] = numpy.random.normal(loc=0.0,
                                              scale=0.5,
                                              size=(samples, ))
        self.classes = [-1 if x < 0 else 1 for x in true_data[:, 1]]

        mixed_data = numpy.zeros((samples, 2))
        for i in range(samples):
            mixed_data[i,
                       0] = 0.6 * true_data[i, 0] + 0.4 * true_data[i, 1] + 1.0
            mixed_data[i,
                       1] = 0.4 * true_data[i, 0] - 0.6 * true_data[i, 1] + 1.5

        self.data = numpy.zeros(mixed_data.shape)
        self.data[:, 0] = mixed_data[:, 0] - numpy.mean(mixed_data[:, 1])
        self.data[:, 1] = mixed_data[:, 1] - numpy.mean(mixed_data[:, 1])

        self.data = [
            TimeSeries(data,
                       channel_names=[("test_channel_%s" % j)
                                      for j in range(2)],
                       sampling_frequency=10) for data in self.data
        ]
Exemple #31
0
 def setUp(self):
     """ Define basic needed FeatureVector instances """
     self.x = FeatureVector([[0, 1, 2, 3, 4, 5]],
                            ["a", "b", "ab", "cb", "c4", "abc"])
     self.y = FeatureVector(
         [[0, 1, 2, 3, 4, 5]],
         ["a_7ms", "b_7ms", "ab_7ms", "cb_7ms", "c4_7ms", "abc_7ms"])
     self.tx = TimeSeries([[0, 1, 2, 3, 4, 5]],
                          channel_names=["a", "b", "ab", "cb", "c4", "abc"],
                          sampling_frequency=1)
     self.ty = TimeSeries([[0, 1, 2, 3, 4, 5]],
                          channel_names=[
                              "a_7ms", "b_7ms", "ab_7ms", "cb_7ms",
                              "c4_7ms", "abc_7ms"
                          ],
                          sampling_frequency=1)
Exemple #32
0
    def test_fda(self):
        """
        Tests that FDA produces the expected transformation matrix on the data
        """
        fda_node = FDAFilterNode(retained_channels=2)
        for i, data in enumerate(self.data):
            fda_node.train(data, self.classes[i])
        fda_node.stop_training()

        self.assert_(
            numpy.allclose(
                fda_node.filters,
                numpy.array([[1.56207903, -1.15805762],
                             [-2.32599494, -0.79980837]])),
            "FDA transformation matrix is wrong!")

        transformed_data = []
        for i, data in enumerate(self.data):
            ts = TimeSeries(input_array=data,
                            channel_names=[("test_channel_%s" % j)
                                           for j in range(2)],
                            sampling_frequency=10,
                            start_time=0,
                            end_time=1)
            transformed_data.append(fda_node.execute(ts).view(numpy.ndarray))
        self.assert_(
            numpy.allclose(transformed_data[0:2], [
                numpy.array([[-0.45549484, -1.20700466]]),
                numpy.array([[-1.52271298, 0.16829469]])
            ]), "FDA-transformed data does not match expectation!")
Exemple #33
0
    def _execute(self, data):
        """ Apply the :func:`scipy.signal.lfilter` function with the DC removal coefficients on the data """
        #Determine the indices of the channels which will be filtered
        if self.selected_channel_indices is None:
            self.selected_channel_names = self.selected_channels \
                if self.selected_channels is not None else data.channel_names
            self.selected_channel_indices = [
                data.channel_names.index(channel_name)
                for channel_name in self.selected_channel_names]

        # create initial filter conditions for all channels
        if self.internal_state is None:
            self.internal_state = dict()
            for channel_index in xrange(data.shape[1]):
                self.internal_state[channel_index] = \
                    scipy.signal.lfiltic(self.b, self.a, [0, 0])

        # do the actual removal
        cleaned_data = numpy.zeros(data.shape)
        for channel_index in self.selected_channel_indices:
            (cleaned_data[:, channel_index],
             self.internal_state[channel_index]) = \
                scipy.signal.lfilter(self.b, self.a, data[:, channel_index],
                                     zi=self.internal_state[channel_index])

        result_time_series = TimeSeries.replace_data(data, cleaned_data)

        return result_time_series
Exemple #34
0
    def _execute(self, data):
        """ Apply filter to data and return the result
        .. todo:: check if other view is needed here please
        """
        #Compute the FIR window which is required for the low pass filter, if it
        #not exists
        if self.filter_kernel is None:
            self.calc_filter_kernel(data)
            #Determine the indices of the channels which will be filtered
            self.selected_channel_names =  self.selected_channels \
                        if self.selected_channels != None else data.channel_names
            self.selected_channel_indices = \
                    [data.channel_names.index(channel_name) for channel_name in \
                                                     self.selected_channel_names]

        if self.comp_type == 'normal': #normal filtering with scipy

            filtered_data = numpy.zeros(data.shape)

            for channel_index in self.selected_channel_indices:
                filtered_data[:,channel_index] = \
                                      scipy.signal.lfilter(self.filter_kernel[0],
                                    self.filter_kernel[1], data[:,channel_index])

            result_time_series = TimeSeries.replace_data(data, filtered_data)

        elif self.comp_type == 'mirror':
            #filtering with scipy, mirror the data beforehand on the right border
            data_mirrored = numpy.vstack((data,numpy.flipud(data)))
            pre_filtered_data = numpy.zeros(data_mirrored.shape)
            for channel_index in self.selected_channel_indices:
                pre_filtered_data[:,channel_index] = \
                                      scipy.signal.lfilter(self.filter_kernel[0],
                           self.filter_kernel[1], data_mirrored[:,channel_index])

                pre_filtered_data[:,channel_index] = \
                                      scipy.signal.lfilter(self.filter_kernel[0],
                                                           self.filter_kernel[1],
                                numpy.flipud(pre_filtered_data[:,channel_index]))

            result_time_series = \
                     TimeSeries.replace_data(data, pre_filtered_data[:len(data)])

        else:
            raise ValueError("Computation type unknown")

        return result_time_series
Exemple #35
0
    def _execute(self, data):
        """ Apply filter to data and return the result
        .. todo:: check if other view is needed here please
        """
        #Compute the FIR window which is required for the low pass filter, if it
        #not exists
        if self.filter_kernel is None:
            self.calc_filter_kernel(data)
            #Determine the indices of the channels which will be filtered
            self.selected_channel_names =  self.selected_channels \
                        if self.selected_channels != None else data.channel_names
            self.selected_channel_indices = \
                    [data.channel_names.index(channel_name) for channel_name in \
                                                     self.selected_channel_names]

        if self.comp_type == 'normal':  #normal filtering with scipy

            filtered_data = numpy.zeros(data.shape)

            for channel_index in self.selected_channel_indices:
                filtered_data[:,channel_index] = \
                                      scipy.signal.lfilter(self.filter_kernel[0],
                                    self.filter_kernel[1], data[:,channel_index])

            result_time_series = TimeSeries.replace_data(data, filtered_data)

        elif self.comp_type == 'mirror':
            #filtering with scipy, mirror the data beforehand on the right border
            data_mirrored = numpy.vstack((data, numpy.flipud(data)))
            pre_filtered_data = numpy.zeros(data_mirrored.shape)
            for channel_index in self.selected_channel_indices:
                pre_filtered_data[:,channel_index] = \
                                      scipy.signal.lfilter(self.filter_kernel[0],
                           self.filter_kernel[1], data_mirrored[:,channel_index])

                pre_filtered_data[:,channel_index] = \
                                      scipy.signal.lfilter(self.filter_kernel[0],
                                                           self.filter_kernel[1],
                                numpy.flipud(pre_filtered_data[:,channel_index]))

            result_time_series = \
                     TimeSeries.replace_data(data, pre_filtered_data[:len(data)])

        else:
            raise ValueError("Computation type unknown")

        return result_time_series
Exemple #36
0
    def _execute(self, data):
        # Initialize the ringbuffers and variables one for each channel
        if(self.ringbuffer == None):
            self.width /= 1000.0
            self.width = int(self.width * data.sampling_frequency)
            self.nChannels = len(data.channel_names)
            self.ringbuffer = numpy.zeros((self.width,self.nChannels),dtype=numpy.double)
            self.variables = numpy.zeros((2,self.nChannels),dtype=numpy.double)
            self.index = numpy.zeros(self.nChannels,'i')

        # Convert the input data to double
        x = data.view(numpy.ndarray).astype(numpy.double)
        # Initialize the result data array
        filtered_data = numpy.zeros(x.shape)
        # Lists which are passed to the standadization
        # TODO: make self
        processing_filtered_data = None
        processing_ringbuffer = None
        processing_variables = None
        processing_index = None
        if(self.standardization):
            for channel_index in range(self.nChannels):
                # Copy the different data to the processing listst
                processing_filtered_data = numpy.array(filtered_data[:,channel_index],'d')
                processing_ringbuffer = numpy.array(self.ringbuffer[:,channel_index],'d')
                processing_variables = numpy.array(self.variables[:,channel_index],'d')
                processing_index = int(self.index[channel_index])
                if self.var_tools:
                    # Perform the standardization
                    # The module vt (variance_tools) is implemented in c using boost to wrap the code in python
                    # The module is located in trunk/library/variance_tools and have to be compiled
                    self.index[channel_index] = vt.standardization(processing_filtered_data, numpy.array(x[:,channel_index],'d'), processing_ringbuffer, processing_variables, self.width, processing_index)
                else:
                    self.index[channel_index] = self.standardisation(processing_filtered_data, numpy.array(x[:,channel_index],'d'), processing_ringbuffer, processing_variables, self.width, processing_index)
                # Copy the processing lists back to the local variables
                filtered_data[:,channel_index] = processing_filtered_data
                self.ringbuffer[:,channel_index] = processing_ringbuffer
                self.variables[:,channel_index] = processing_variables
        else:
            for channel_index in range(self.nChannels):
                # Copy the different data to the processing listst
                processing_filtered_data = numpy.array(filtered_data[:,channel_index],'d')
                processing_ringbuffer = numpy.array(self.ringbuffer[:,channel_index],'d')
                processing_variables = numpy.array(self.variables[:,channel_index],'d')
                processing_index = int(self.index[channel_index])
                if self.var_tools:
                    # Perform the filtering with the variance
                    # The module vt (variance_tools) is implemented in c using boost to wrap the code in python
                    # The module is located in trunk/library/variance_tools and have to be compiled
                    self.index[channel_index] = vt.filter(processing_filtered_data, numpy.array(x[:,channel_index],'d'), processing_ringbuffer, processing_variables, self.width, processing_index)
                else:
                    self.index[channel_index] = self.variance(processing_filtered_data, numpy.array(x[:,channel_index],'d'), processing_ringbuffer, processing_variables, self.width, processing_index)
                # Copy the processing lists back to the local variables
                filtered_data[:,channel_index] = processing_filtered_data
                self.ringbuffer[:,channel_index] = processing_ringbuffer
                self.variables[:,channel_index] = processing_variables
        # Return the result
        result_time_series = TimeSeries.replace_data(data, filtered_data)
        return result_time_series
Exemple #37
0
 def testInheritAndAddStuff(self):
     """test inheritance of meta data from other objects"""
     # Inherit
     self.assertEqual(self.x5.tag, self.x2.tag)
     self.assertEqual(self.x5.key, self.x2.key)
     
     self.assertEqual(self.f3.tag, self.x2.tag)
     self.assertEqual(self.f3.key, self.x2.key)
     
     #Inherit
     
     #suppress warning of BaseData type and cast data back to numpy
     hist_x6=self.x6.history[0].view(numpy.ndarray)
     data_x5=self.x5.view(numpy.ndarray)
     
     # history
     self.assertEqual((hist_x6==data_x5).all(),True)
     self.assertEqual(self.x6.history[0].key,self.x5.key)
     self.assertEqual(self.x6.history[0].tag,self.x5.tag)
     self.assertEqual(self.x6.history[0].specs['node_specs'],self.some_nice_dict)
     
     hist_f3=self.f3.history[0].view(numpy.ndarray)
     
     self.assertEqual((hist_f3==data_x5).all(),True)
     self.assertEqual(self.f3.history[0].key,self.x5.key)
     self.assertEqual(self.f3.history[0].tag,self.x5.tag)
     
     #if key (and tag) were already set, these original values
     #have to be kept
     # 
     self.assertEqual(self.x6.key, self.x6_key)
     self.assertEqual(self.x6.tag, self.x2.tag)
     
     self.x6.inherit_meta_from(self.f3) #should not change tag and key
     
     self.assertEqual(self.x6.key, self.x6_key)
     self.assertEqual(self.x6.tag, self.x2.tag)
     
     #testing multiple histories
     x7 = TimeSeries([1,2,3,4,5,6], ['a','b','c','d','e','f'], 12,marker_name='S4')
     x7.add_to_history(self.x1)
     x7.add_to_history(self.x2)
     x7.add_to_history(self.x3)
     x7.add_to_history(self.x4)
     x7.add_to_history(self.x5)
     x7.add_to_history(self.x6)
     x7.add_to_history(self.x1)
     
     self.assertEqual(len(x7.history),7)
     self.assertEqual(x7.history[0].key,x7.history[6].key)
     self.assertEqual(x7.history[5].history,[])        
 def setUp(self):
     # initiate the two channels
     self.channel_names = ['a', 'b']
     array = []
     # fill in the data points according to a pre set equation
     for counter in range(100):
         array.append([4 * counter + 1, 4.36 * counter - 23.4])
     self.initial_data = TimeSeries(array, self.channel_names, 100)
Exemple #39
0
 def _execute(self, data):
     """ Subsample the given data and return a new time series """
     if self.new_len == 0 :
         self.new_len = int(round(self.target_frequency*len(data)/(1.0*data.sampling_frequency)))
     if not self.mirror:
         downsampled_time_series = \
             TimeSeries.replace_data(data, 
                                     scipy.signal.resample(data, self.new_len,
                                                         t=None, axis=0,
                                                         window=self.window))
     else:
         downsampled_time_series = \
             TimeSeries.replace_data(data, 
                                     scipy.signal.resample(numpy.vstack((data,numpy.flipud(data))), self.new_len*2,
                                                         t=None, axis=0,
                                                         window=self.window)[:self.new_len])
     downsampled_time_series.sampling_frequency = self.target_frequency
     return downsampled_time_series
Exemple #40
0
    def _execute(self, data):
        """ Apply the cast """
        #Determine the indices of the channels which will be filtered
        self._log("Cast data")
        casted_data = data.astype(self.datatype)

        result_time_series = TimeSeries.replace_data(data, casted_data)

        return result_time_series
Exemple #41
0
 def _execute(self, data):
     """ Subsample the given data and return a new time series """
     if self.new_len == 0 :
         self.new_len = int(round(self.target_frequency*len(data)/(1.0*data.sampling_frequency)))
     if not self.mirror:
         downsampled_time_series = \
             TimeSeries.replace_data(data, 
                                     scipy.signal.resample(data, self.new_len,
                                                         t=None, axis=0,
                                                         window=self.window))
     else:
         downsampled_time_series = \
             TimeSeries.replace_data(data, 
                                     scipy.signal.resample(numpy.vstack((data,numpy.flipud(data))), self.new_len*2,
                                                         t=None, axis=0,
                                                         window=self.window)[:self.new_len])
     downsampled_time_series.sampling_frequency = self.target_frequency
     return downsampled_time_series
Exemple #42
0
 def _execute(self, data):
     """ Apply the cast """
     #Determine the indices of the channels which will be filtered
     self._log("Cast data")
     casted_data = data.astype(self.datatype)
         
     result_time_series = TimeSeries.replace_data(data, casted_data)
     
     return result_time_series
 def _execute(self, x):
     """
     Apply z-score transformation to the given data and return a modified
     time series.
     """
     data = x.view(numpy.ndarray)
     #Do the z-score transformation
     std = numpy.std(data-numpy.mean(data, axis=0), axis=0)
     std = check_zero_division(self, std,  tolerance=10**-15, data_ts=x)
     return TimeSeries.replace_data(x, (data-numpy.mean(data, axis=0)) / std)
Exemple #44
0
    def test_csp(self):
        """
        Tests that CSP produces the expected transformation matrix on the data
        """
        csp_node = CSPNode(retained_channels=2)
        for i in range(self.data.shape[0]):
            ts = TimeSeries(input_array=self.data[i:i + 1, :],
                            channel_names=[("test_channel_%s" % j)
                                           for j in range(2)],
                            sampling_frequency=10,
                            start_time=0,
                            end_time=1)
            csp_node.train(ts, self.classes[i])
        csp_node.stop_training()

        self.assert_(
            numpy.allclose(
                csp_node.filters,
                numpy.array([[-0.75319083, -0.35237094], [1., -1.]])),
            "CSP transformation matrix is wrong! Got:%s, expected:%s" %
            (str(csp_node.filters),
             str(numpy.array([[-0.75319083, -0.35237094], [1., -1.]]))))

        transformed_data = numpy.zeros(self.data.shape)
        for i in range(self.data.shape[0]):
            ts = TimeSeries(input_array=self.data[i:i + 1, :],
                            channel_names=[("test_channel_%s" % j)
                                           for j in range(2)],
                            sampling_frequency=10,
                            start_time=0,
                            end_time=1)
            transformed_data[i, :] = csp_node.execute(ts)

        self.assert_(
            numpy.allclose(
                transformed_data[0:2, :],
                numpy.array([[0.14525655, -0.83028934],
                             [0.68796176, -0.23672793]])),
            "CSP-transformed data (%s) does not match expectation (%s)!" %
            (str(transformed_data[0:2, :]),
             str(
                 numpy.array([[0.14525655, -0.83028934],
                              [0.68796176, -0.23672793]]))))
    def _execute(self, x):
        data = x.view(numpy.ndarray)
        mean = numpy.mean(data, axis=0)
        data -= mean

        max_values = numpy.abs(numpy.max(data, axis=0))

        max_values = check_zero_division(self, max_values, tolerance=10**-15,
                                         data_ts=x)

        return TimeSeries.replace_data(x, data/max_values)
class SimpleDifferentiationFeature(unittest.TestCase):

    def setUp(self):
        self.channel_names = ['a', 'b', 'c', 'd', 'e', 'f']
        self.x1 = TimeSeries(
            [[1, 2, 3, 4, 5, 6], [6, 5, 3, 1, 7, 7]], self.channel_names, 120)

    def test_sd_feature(self):
        sd_node = SimpleDifferentiationFeatureNode()
        features = sd_node.execute(self.x1)
        for f in range(features.shape[1]):
            channel = features.feature_names[f][4]
            index = self.channel_names.index(channel)
            self.assertEqual(
                features.view(
                    numpy.ndarray)[0][f],
                self.x1.view(
                    numpy.ndarray)[1][index] -
                self.x1.view(
                    numpy.ndarray)[0][index])
    def _execute(self, data):
        """
        Apply the scaling to the given data x
        and return a new time series.
        """
        x = data.view(numpy.ndarray).astype(numpy.double)
        x = x * self.factor

        result_time_series = TimeSeries.replace_data(data, x)

        return result_time_series
Exemple #48
0
    def _execute(self, data):
        """
        Apply the scaling to the given data x
        and return a new time series.
        """
        x = data.view(numpy.ndarray)

        x.clip(self.min_threshold, self.max_threshold, out = x)

        result_time_series = TimeSeries.replace_data(data, x)

        return result_time_series
    def _execute(self, data):
        """ Reorder the memory. """

        # exchange data of time series object to correctly ordered data
        buffer = numpy.array(data, order='F')

        if self.convert_type and numpy.dtype('float64') != buffer.dtype:
            buffer = buffer.astype(numpy.float)
        
        data = TimeSeries.replace_data(data,buffer)
        
        return data
    def _execute(self, x):
        """
        Apply devariancing to the given data and return the modified time series
        """
        #Determine the indices of the channels which will be filtered
        selected_channel_indices = [x.channel_names.index(channel_name)
                                      for channel_name in self.selected_channels]

        #Do the actual devariancing, i.e. multiply with the scale factor
        devarianced_data = numpy.zeros(x.shape)
        for channel in self.selected_channels:
            channel_index = x.channel_names.index(channel)
            devarianced_data[:, channel_index] = x[:, channel_index] * \
                                                      self.scale_factors[channel]

        return TimeSeries.replace_data(x, devarianced_data)
Exemple #51
0
    def _execute(self, data):
        # First check if all channels actually appear in the data

        # Determine the indices of the channels that are the basis for the 
        # average reference.
        if not self.inverse:
            if self.avg_channels == None:
                self.avg_channels = data.channel_names
            channel_indices = [data.channel_names.index(channel_name) 
                                for channel_name in self.avg_channels]
        else:
            channel_indices = [data.channel_names.index(channel_name)
                               for channel_name in data.channel_names
                               if channel_name not in self.avg_channels]

        not_found_channels = \
            [channel_name for channel_name in self.avg_channels 
                     if channel_name not in data.channel_names]
        if not not_found_channels == []:
            warnings.warn("Couldn't find selected channel(s): %s. Ignoring." % 
                            not_found_channels, Warning)
                    
        if self.old_ref is None:
            self.old_ref = 'avg'
        
        # Compute the actual data of the reference channel. This is the sum of all 
        # channels divided by (the number of channels +1).
        ref_chen = -numpy.sum(data[:, channel_indices], axis=1)/(data.shape[1]+1)
        ref_chen = numpy.atleast_2d(ref_chen).T
        # Reference all electrodes against average
        avg_referenced_data = data + ref_chen
        
        # Add average as new channel to the signal if enabled
        if self.keep_average:
            avg_referenced_data = numpy.hstack((avg_referenced_data, ref_chen))
            channel_names = data.channel_names + [self.old_ref]
            result_time_series = TimeSeries(avg_referenced_data, 
                                            channel_names,
                                            data.sampling_frequency, 
                                            data.start_time, data.end_time,
                                            data.name, data.marker_name)
        else:
            result_time_series = TimeSeries.replace_data(data, 
                                                            avg_referenced_data)
        
        return result_time_series
    def _execute(self, x):
        """
        Apply memory z-score transformation to the given data
        and return a new time series.
        """
        data = x.view(numpy.ndarray)
        # calculate the important measures in an array format
        mean = numpy.mean(data, axis=0)
        var = numpy.var(data, axis=0)

        # initialize the memory when it is first used
        if self.memory is None:
            self.memory=dict()
            self.memory['mean'] = list()
            self.memory['var'] = list()
            for i in range(self.order):
                self.memory['mean'].append(mean)
                self.memory['var'].append(var)

        # extend the memory by the new mean and variance if the
        # std<200 and not zero to exclude big artifacts
        if (var < 40000).all() and (var > 10**-9).all():
            self.memory['var'].append(var)
            self.memory['mean'].append(mean)

        # calculate the mean of the current mean and variance
        # and the latest (order) means and variances in memory
        # and delete the last, because it is no longer needed
            var = numpy.mean(self.memory['var'],axis=0)
            mean = numpy.mean(self.memory['mean'],axis=0)
            self.memory['var'].pop(0)
            self.memory['mean'].pop(0)
        std = numpy.sqrt(var)

        # # code for easy viszualization
        # statistic = numpy.vstack((mean,std))
        # statistic = TimeSeries.replace_data(data, statistic)
        # statistic.sampling_frequency = 1
        # return statistic

        #Do the modified z-score transformation
        std = check_zero_division(self, std,  tolerance=10**-15, data_ts=x)

        return TimeSeries.replace_data(x, (data-numpy.mean(data, axis=0))/std)
    def _execute(self, x):
        """ Executes the preprocessing on the given data vector x"""
        #Number of retained channels
        num_channels = numpy.size(x,1)
        if(self.below_threshold == None):
            # When the node is called for the first time initialize all parameters/variables
            self.width_AT = int((self.width_AT*x.sampling_frequency)/1000.)
            
            #Convert the time from ms to samples
            self.time_below_threshold = int((self.time_below_threshold*x.sampling_frequency)/1000.)
            
            #Create and prefill the array which indicates how long a signal was below the threshold
            self.below_threshold = numpy.zeros(num_channels)
            self.below_threshold.fill(self.time_below_threshold+1)
            
            #Create the ringbuffer and the variables list for the adaptive threshold 
            self.ringbuffer_AT=numpy.zeros((self.width_AT,num_channels))
            self.variables_AT=numpy.zeros((4,num_channels))
        data=x.view(numpy.ndarray)
        #Create the array for the thresholded data
        threshold_data = numpy.zeros(data.shape)
        #For each sample of each retained channel
        for i in range(num_channels):
            data_index = 0
            for sample in data[:,i]:
                #calculate the adaptive threshold
                value = self.adaptive_threshold(sample, i)
                #if the actual sample exceeds the threshold...
                if(sample >= value):
                    #and the resting time was observed
                    if(self.below_threshold[i] > self.time_below_threshold):
                        #store a 1 indicating a onset
                        threshold_data[data_index][i] = 1
                    #reset the resting time counter
                    self.below_threshold[i] = 0
                #increase the time the signal was below the signal
                else:
                    self.below_threshold[i] += 1
                data_index += 1

        #return the thresholded data
        result_time_series = TimeSeries.replace_data(x, threshold_data)
        return result_time_series
    def _execute(self, data):
        """ Apply the detrending method to the given data x and return a new time series """
        #Determine the indices of the channels which will be filtered
        x = data.view(numpy.ndarray)
        if self.selected_channel_indices is None:
            self.selected_channel_names = self.selected_channels \
                if not self.selected_channels is None else data.channel_names
            self.selected_channel_indices = \
                    [data.channel_names.index(channel_name)
                     for channel_name in self.selected_channel_names]

        #Do the actual detrending
        detrended_data = numpy.zeros(x.shape)
        for channel_index in self.selected_channel_indices:
            detrended_data[:, channel_index] = \
                self.detrend_method(x[:, channel_index])

        result_time_series = TimeSeries.replace_data(data, detrended_data)

        return result_time_series
    def _train(self, data):
        """ Check which channels have constant values.

        The training data is considered and the invalid channel names
        are removed. The first data entry is saved and the starting
        assumption is that all channels have constant values. When a value
        different from the first data entry for a respective channel is found,
        that channel is removed from the list of channels that have constant
        values.
        """
        # copy the first data value
        if self.data_values is None:
            # copy the first entry
            self.data_values = TimeSeries.replace_data(data, data.get_data()[0])
            # invalidate all the channels in the beginning
            self.selected_channel_names = copy.deepcopy(data.channel_names)

        for channel in self.selected_channel_names:
            if (data.get_channel(channel) != self.data_values.get_channel(channel)[0]).any():
                self.selected_channel_names.remove(channel)