def truncate_data(signal, tau):
    '''
    Returns the truncated vectors with respect to the timeshift tau.

    Parameters
    ----------
    signal : Signal(ndarray), shape(n, )
        A time signal from the NIData or the VNavData.
    tau : float
        The time shift.

    Returns
    -------
    truncated : ndarray, shape(m, )
        The truncated time signal.

    '''
    t = time_vector(len(signal), signal.sampleRate)

    # shift the ni data cause it is the cleaner signal
    tni = t - tau
    tvn = t

    # make the common time interval
    tcom = tvn[np.nonzero(tvn < tni[-1])]

    if signal.source == 'NI':
        truncated = np.interp(tcom, tni, signal)
    elif signal.source == 'VN':
        truncated = signal[np.nonzero(tvn <= tcom[-1])]
    else:
        raise ValueError('No source was defined in this signal.')

    return truncated
Ejemplo n.º 2
0
    def setup(self):

        time = time_vector(1000, 100)

        omega = 2 * np.pi

        right_grf = 1000 * (0.75 + np.sin(omega * time))
        right_grf[right_grf < 0.0] = 0.0
        right_grf += 2.0 * np.random.normal(size=right_grf.shape)

        left_grf = 1000 * (0.75 + np.cos(omega * time))
        left_grf[left_grf < 0.0] = 0.0
        left_grf += 2.0 * np.random.normal(size=left_grf.shape)

        right_knee_angle = np.arange(len(time))
        right_knee_moment = np.arange(len(time))

        self.data_frame = \
            pandas.DataFrame({'Right Vertical GRF': right_grf,
                              'Left Vertical GRF': left_grf,
                              'Right Knee Angle': right_knee_angle,
                              'Right Knee Moment': right_knee_moment},
                             index=time)

        self.threshold = 10.0
Ejemplo n.º 3
0
    def test_resample_record_data(self):
        dflow_data = DFlowData(self.path_to_mocap_data_file,
                               self.path_to_record_data_file)
        dflow_data.mocap_data = self.mocap_data_frame
        dflow_data._generate_cortex_time_stamp(self.mocap_data_frame)
        record_data = dflow_data._resample_record_data(self.record_data_frame)
        expected_time = time_vector(self.cortex_number_of_samples,
                                    1.0 / self.cortex_sample_period)

        testing.assert_allclose(record_data['Time'], expected_time)
Ejemplo n.º 4
0
    def test_resample_record_data(self):
        dflow_data = DFlowData(self.path_to_mocap_data_file,
                               self.path_to_record_data_file)
        dflow_data.mocap_data = self.mocap_data_frame
        dflow_data._generate_cortex_time_stamp(self.mocap_data_frame)
        record_data = dflow_data._resample_record_data(self.record_data_frame)
        expected_time = time_vector(self.cortex_number_of_samples, 1.0 /
                                    self.cortex_sample_period)

        testing.assert_allclose(record_data['Time'], expected_time)
Ejemplo n.º 5
0
    def _generate_cortex_time_stamp(self, data_frame):
        """Returns the data frame with a new index based on the constant
        sample rate from Cortex."""

        # It doesn't seem that cortex frames are ever dropped (i.e. missing
        # frame number in the sequence). But if that is ever the case, this
        # function needs to be modified to deal with that and to generate
        # the new time stamp with the frame number column instead of a
        # generic call to the time_vector function.

        self.cortex_num_samples = len(data_frame)
        self.cortex_time = process.time_vector(self.cortex_num_samples,
                                               self.cortex_sample_rate)
        data_frame['Cortex Time'] = self.cortex_time
        data_frame['D-Flow Time'] = data_frame['TimeStamp']

        return data_frame
Ejemplo n.º 6
0
    def _generate_cortex_time_stamp(self, data_frame):
        """Returns the data frame with a new index based on the constant
        sample rate from Cortex."""

        # It doesn't seem that cortex frames are ever dropped (i.e. missing
        # frame number in the sequence). But if that is ever the case, this
        # function needs to be modified to deal with that and to generate
        # the new time stamp with the frame number column instead of a
        # generic call to the time_vector function.

        self.cortex_num_samples = len(data_frame)
        self.cortex_time = process.time_vector(self.cortex_num_samples,
                                               self.cortex_sample_rate)
        data_frame['Cortex Time'] = self.cortex_time
        data_frame['D-Flow Time'] = data_frame['TimeStamp']

        return data_frame
Ejemplo n.º 7
0
    def test_clean_data(self):
        data = DFlowData(mocap_tsv_path=self.path_to_mocap_data_file,
                         record_tsv_path=self.path_to_record_data_file,
                         meta_yml_path=self.path_to_meta_data_file)

        data.clean_data()

        # TODO : Check for an events dictionary if the record file included
        # events.

        assert not pandas.isnull(data.data).any().any()
        assert (data._marker_column_labels(
            data.mocap_column_labels) == self.cortex_marker_labels)
        expected_columns = self.mocap_labels_without_hbm + \
            self.record_labels + ['Cortex Time', 'D-Flow Time']
        for col in data.data.columns:
            assert col in expected_columns
            assert col not in self.dflow_hbm_labels
        expected_time = time_vector(self.cortex_number_of_samples,
                                    1.0 / self.cortex_sample_period)
        testing.assert_allclose(data.data['Cortex Time'], expected_time)

        try:
            data.meta
        except AttributeError:
            assert False

        # Without the record file.
        data = DFlowData(mocap_tsv_path=self.path_to_mocap_data_file,
                         meta_yml_path=self.path_to_meta_data_file)
        data.clean_data()

        assert not pandas.isnull(data.data).any().any()
        assert (data._marker_column_labels(
            data.mocap_column_labels) == self.cortex_marker_labels)
        expected_columns = self.mocap_labels_without_hbm + [
            'Cortex Time', 'D-Flow Time'
        ]
        for col in data.data.columns:
            assert col in expected_columns
            assert col not in self.dflow_hbm_labels
        expected_time = time_vector(self.cortex_number_of_samples,
                                    1.0 / self.cortex_sample_period)
        testing.assert_allclose(data.data['Cortex Time'], expected_time)

        try:
            data.meta
        except AttributeError:
            assert False

        assert_raises(AttributeError, lambda: data.record_data)

        # Without the mocap file.
        data = DFlowData(record_tsv_path=self.path_to_record_data_file,
                         meta_yml_path=self.path_to_meta_data_file)
        data.clean_data()

        assert not pandas.isnull(data.data).any().any()
        assert_raises(AttributeError, lambda: data.mocap_column_labels)

        expected_columns = self.record_labels
        for col in data.data.columns:
            assert col in expected_columns
            assert col not in [
                'TimeStamp', 'Cortex Time', 'D-Flow Time', 'FrameNumber'
            ]
            assert col not in self.dflow_hbm_labels
            assert col not in self.mocap_labels_with_hbm
            assert col not in self.cortex_analog_labels
        expected_time = time_vector(self.cortex_number_of_samples,
                                    1.0 / self.cortex_sample_period)
        testing.assert_allclose(data.data['Time'], self.record_time)

        try:
            data.meta
        except AttributeError:
            assert False

        assert_raises(AttributeError, lambda: data.mocap_data)

        # Without record file and meta data.
        data = DFlowData(mocap_tsv_path=self.path_to_mocap_data_file)
        data.clean_data()

        assert not pandas.isnull(data.data).any().any()
        assert (data._marker_column_labels(
            data.mocap_column_labels) == self.cortex_marker_labels)
        expected_columns = self.mocap_labels_without_hbm + [
            'Cortex Time', 'D-Flow Time'
        ]
        for col in data.data.columns:
            assert col in expected_columns
            assert col not in self.dflow_hbm_labels
            assert col not in [
                'Time', 'RightBeltSpeed', 'LeftBeltSpeed',
                self.dflow_hbm_labels
            ]
        expected_time = time_vector(self.cortex_number_of_samples,
                                    1.0 / self.cortex_sample_period)
        testing.assert_allclose(data.data['Cortex Time'], expected_time)

        assert_raises(AttributeError, lambda: data.meta)
        assert_raises(AttributeError, lambda: data.record_data)

        # Without mocap file and meta data.
        data = DFlowData(record_tsv_path=self.path_to_record_data_file)
        data.clean_data()

        assert not pandas.isnull(data.data).any().any()
        assert_raises(AttributeError, lambda: data.mocap_column_labels)

        expected_columns = self.record_labels
        for col in data.data.columns:
            assert col in expected_columns
            assert col not in ['Cortex Time', 'D-Flow Time']
            assert col not in self.mocap_labels_with_hbm
        expected_time = time_vector(self.cortex_number_of_samples,
                                    1.0 / self.cortex_sample_period)
        testing.assert_allclose(data.data['Time'], self.record_time)

        assert_raises(AttributeError, lambda: data.meta)
        assert_raises(AttributeError, lambda: data.mocap_data)
def find_timeshift(niAcc, vnAcc, sampleRate, speed, plotError=False):
    '''Returns the timeshift, tau, of the VectorNav [VN] data relative to the
    National Instruments [NI] data.

    Parameters
    ----------
    niAcc : ndarray, shape(n, )
        The acceleration of the NI accelerometer in its local Y direction.
    vnAcc : ndarray, shape(n, )
        The acceleration of the VN-100 in its local Z direction. Should be the
        same length as NIacc and contains the same signal albiet time shifted.
        The VectorNav signal should be leading the NI signal.
    sampleRate : integer or float
        Sample rate of the signals. This should be the same for each signal.
    speed : float
        The approximate forward speed of the bicycle.

    Returns
    -------
    tau : float
        The timeshift.

    Notes
    -----
    The Z direction for `VNacc` is assumed to be aligned with the steer axis
    and pointing down and the Y direction for the NI accelerometer should be
    aligned with the steer axis and pointing up.

    '''
    # raise an error if the signals are not the same length
    N = len(niAcc)
    if N != len(vnAcc):
        raise TimeShiftError('Signals are not the same length!')

    # make a time vector
    time = time_vector(N, sampleRate)

    # the signals are opposite sign of each other, so fix that
    niSig = -niAcc
    vnSig = vnAcc

    # some constants for find_bump
    wheelbase = 1.02 # this is the wheelbase of the rigid rider bike
    bumpLength = 1.
    cutoff = 30.
    # filter the NI Signal
    filNiSig = butterworth(niSig, cutoff, sampleRate)
    # find the bump in the filtered NI signal
    niBump =  find_bump(filNiSig, sampleRate, speed, wheelbase, bumpLength)

    # remove the nan's in the VN signal and the corresponding time
    v = vnSig[np.nonzero(np.isnan(vnSig) == False)]
    t = time[np.nonzero(np.isnan(vnSig) == False)]
    # fit a spline through the data
    vn_spline = UnivariateSpline(t, v, k=3, s=0)
    # and filter it
    filVnSig = butterworth(vn_spline(time), cutoff, sampleRate)
    # and find the bump in the filtered VN signal
    vnBump = find_bump(filVnSig, sampleRate, speed, wheelbase, bumpLength)

    if vnBump is None or niBump is None:
        guess = 0.3
    else:
        # get an initial guess for the time shift based on the bump indice
        guess = (niBump[1] - vnBump[1]) / float(sampleRate)

    # Since vnSig may have nans we should only use contiguous data around
    # around the bump. The first step is to split vnSig into sections bounded
    # by the nans and then selected the section in which the bump falls. Then
    # we select a similar area in niSig to run the time shift algorithm on.
    if vnBump is None:
        bumpLocation = 800 # just a random guess so things don't crash
    else:
        bumpLocation = vnBump[1]
    indices, arrays = split_around_nan(vnSig)
    for pair in indices:
        if pair[0] <= bumpLocation < pair[1]:
            bSec = pair

    # subtract the mean and normalize both signals
    niSig = normalize(subtract_mean(niSig, hasNans=True), hasNans=True)
    vnSig = normalize(subtract_mean(vnSig, hasNans=True), hasNans=True)

    niBumpSec = niSig[bSec[0]:bSec[1]]
    vnBumpSec = vnSig[bSec[0]:bSec[1]]
    timeBumpSec = time[bSec[0]:bSec[1]]

    if len(niBumpSec) < 200:
        warn('The bump section is only {} samples wide.'.format(str(len(niBumpSec))))

    # set up the error landscape, error vs tau
    # The NI lags the VectorNav and the time shift is typically between 0 and
    # 1 seconds
    tauRange = np.linspace(0., 2., num=500)
    error = np.zeros_like(tauRange)
    for i, val in enumerate(tauRange):
        error[i] = sync_error(val, niBumpSec, vnBumpSec, timeBumpSec)

    if plotError:
        plt.figure()
        plt.plot(tauRange, error)
        plt.xlabel('tau')
        plt.ylabel('error')
        plt.show()

    # find initial condition from landscape
    tau0 = tauRange[np.argmin(error)]

    print "The minimun of the error landscape is %f and the provided guess is %f" % (tau0, guess)

    # Compute the minimum of the function using both the result from the error
    # landscape and the bump find for initial guesses to the minimizer. Choose
    # the best of the two.
    tauBump, fvalBump  = fmin(sync_error, guess, args=(niBumpSec,
        vnBumpSec, timeBumpSec), full_output=True, disp=True)[0:2]

    tauLandscape, fvalLandscape = fmin(sync_error, tau0, args=(niBumpSec, vnBumpSec,
        timeBumpSec), full_output=True, disp=True)[0:2]

    if fvalBump < fvalLandscape:
        tau = tauBump
    else:
        tau = tauLandscape

    #### if the minimization doesn't do a good job, just use the tau0
    ###if np.abs(tau - tau0) > 0.01:
        ###tau = tau0
        ###print "Bad minimizer!! Using the guess, %f, instead." % tau

    print "This is what came out of the minimization:", tau

    if not (0.05 < tau < 2.0):
        raise TimeShiftError('This tau, {} s, is probably wrong'.format(str(tau)))

    return tau
Ejemplo n.º 9
0
    def test_clean_data(self):
        data = DFlowData(mocap_tsv_path=self.path_to_mocap_data_file,
                         record_tsv_path=self.path_to_record_data_file,
                         meta_yml_path=self.path_to_meta_data_file)

        data.clean_data()

        # TODO : Check for an events dictionary if the record file included
        # events.

        assert not pandas.isnull(data.data).any().any()
        assert (data._marker_column_labels(data.mocap_column_labels) ==
                self.cortex_marker_labels)
        expected_columns = self.mocap_labels_without_hbm + \
            self.record_labels + ['Cortex Time', 'D-Flow Time']
        for col in data.data.columns:
            assert col in expected_columns
            assert col not in self.dflow_hbm_labels
        expected_time = time_vector(self.cortex_number_of_samples, 1.0 /
                                    self.cortex_sample_period)
        testing.assert_allclose(data.data['Cortex Time'], expected_time)

        try:
            data.meta
        except AttributeError:
            assert False

        # Without the record file.
        data = DFlowData(mocap_tsv_path=self.path_to_mocap_data_file,
                         meta_yml_path=self.path_to_meta_data_file)
        data.clean_data()

        assert not pandas.isnull(data.data).any().any()
        assert (data._marker_column_labels(data.mocap_column_labels) ==
                self.cortex_marker_labels)
        expected_columns = self.mocap_labels_without_hbm + ['Cortex Time',
                                                            'D-Flow Time']
        for col in data.data.columns:
            assert col in expected_columns
            assert col not in self.dflow_hbm_labels
        expected_time = time_vector(self.cortex_number_of_samples, 1.0 /
                                    self.cortex_sample_period)
        testing.assert_allclose(data.data['Cortex Time'], expected_time)

        try:
            data.meta
        except AttributeError:
            assert False

        assert_raises(AttributeError, lambda: data.record_data)

        # Without the mocap file.
        data = DFlowData(record_tsv_path=self.path_to_record_data_file,
                         meta_yml_path=self.path_to_meta_data_file)
        data.clean_data()

        assert not pandas.isnull(data.data).any().any()
        assert_raises(AttributeError, lambda: data.mocap_column_labels)

        expected_columns = self.record_labels
        for col in data.data.columns:
            assert col in expected_columns
            assert col not in ['TimeStamp', 'Cortex Time', 'D-Flow Time',
                               'FrameNumber']
            assert col not in self.dflow_hbm_labels
            assert col not in self.mocap_labels_with_hbm
            assert col not in self.cortex_analog_labels
        expected_time = time_vector(self.cortex_number_of_samples, 1.0 /
                                    self.cortex_sample_period)
        testing.assert_allclose(data.data['Time'], self.record_time)

        try:
            data.meta
        except AttributeError:
            assert False

        assert_raises(AttributeError, lambda: data.mocap_data)

        # Without record file and meta data.
        data = DFlowData(mocap_tsv_path=self.path_to_mocap_data_file)
        data.clean_data()

        assert not pandas.isnull(data.data).any().any()
        assert (data._marker_column_labels(data.mocap_column_labels) ==
                self.cortex_marker_labels)
        expected_columns = self.mocap_labels_without_hbm + ['Cortex Time',
                                                            'D-Flow Time']
        for col in data.data.columns:
            assert col in expected_columns
            assert col not in self.dflow_hbm_labels
            assert col not in ['Time', 'RightBeltSpeed', 'LeftBeltSpeed',
                               self.dflow_hbm_labels]
        expected_time = time_vector(self.cortex_number_of_samples, 1.0 /
                                    self.cortex_sample_period)
        testing.assert_allclose(data.data['Cortex Time'], expected_time)

        assert_raises(AttributeError, lambda: data.meta)
        assert_raises(AttributeError, lambda: data.record_data)

        # Without mocap file and meta data.
        data = DFlowData(record_tsv_path=self.path_to_record_data_file)
        data.clean_data()

        assert not pandas.isnull(data.data).any().any()
        assert_raises(AttributeError, lambda: data.mocap_column_labels)

        expected_columns = self.record_labels
        for col in data.data.columns:
            assert col in expected_columns
            assert col not in ['Cortex Time', 'D-Flow Time']
            assert col not in self.mocap_labels_with_hbm
        expected_time = time_vector(self.cortex_number_of_samples, 1.0 /
                                    self.cortex_sample_period)
        testing.assert_allclose(data.data['Time'], self.record_time)

        assert_raises(AttributeError, lambda: data.meta)
        assert_raises(AttributeError, lambda: data.mocap_data)