示例#1
0
def metrics(x,
            dt,
            x_hat,
            dxdt_hat,
            x_truth=None,
            dxdt_truth=None,
            padding=None):
    if padding is None or padding == 'auto':
        padding = int(0.025 * len(x))
        if padding < 1:
            padding = 1
    if _np.isnan(x_hat).any():
        return _np.nan, _np.nan, _np.nan

    # RMS dxdt
    if dxdt_truth is not None:
        rms_dxdt = __rms_error__(dxdt_hat[padding:-padding],
                                 dxdt_truth[padding:-padding])
    else:
        rms_dxdt = None

    # RMS x
    if x_truth is not None:
        rms_x = __rms_error__(x_hat[padding:-padding],
                              x_truth[padding:-padding])
    else:
        rms_x = None

    # RMS reconstructed x
    rec_x = _utility.integrate_dxdt_hat(dxdt_hat, dt)
    x0 = _utility.estimate_initial_condition(x, rec_x)
    rec_x = rec_x + x0
    rms_rec_x = __rms_error__(rec_x[padding:-padding], x[padding:-padding])

    return rms_rec_x, rms_x, rms_dxdt
示例#2
0
def __lineardiff__(x, dt, params, options={}):
    '''
    Estimate the parameters for a system xdot = Ax, and use that to calculate the derivative
    
    Inputs
    ------
    x       : (np.array of floats, 1xN) time series to differentiate
    dt      : (float) time step

    Parameters
    ----------
    params  : (list) [N,     : (int, >1) order (e.g. 2: velocity; 3: acceleration)
                      gamma] : (float) regularization term
    options : (dict) {}

    Outputs
    -------
    x_hat    : estimated (smoothed) x
    dxdt_hat : estimated derivative of x

    '''

    N, gamma = params
    mean = np.mean(x)
    x = x - mean

    # Generate the matrix of integrals of x
    X = [x]
    for n in range(1, N):
        X.append(utility.integrate_dxdt_hat(X[-1], dt))
    X = np.matrix(np.vstack(X[::-1]))
    integral_Xdot = X
    integral_X = __integrate_dxdt_hat_matrix__(X, dt)

    # Solve for A and the integration constants
    A, C = __solve_for_A_and_C_given_X_and_Xdot__(integral_X, integral_Xdot, N,
                                                  dt, gamma)

    # Add the integration constants
    Csum = 0
    t = np.arange(0, X.shape[1]) * dt
    for n in range(0, N - 1):
        C_subscript = n
        t_exponent = N - n - 2
        den = math.factorial(t_exponent)
        Cn = np.vstack((1 / den * C[i, C_subscript] * t**t_exponent
                        for i in range(X.shape[0])))
        Csum = Csum + Cn
    Csum = np.matrix(Csum)

    # Use A and C to calculate the derivative
    Xdot_reconstructed = (A * X + Csum)
    dxdt_hat = np.ravel(Xdot_reconstructed[-1, :])

    x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
    x_hat = x_hat + utility.estimate_initial_condition(x + mean, x_hat)

    return x_hat, dxdt_hat
示例#3
0
def metrics(x, dt, x_hat, dxdt_hat, x_truth=None, dxdt_truth=None, padding=None):
    """
    Evaluate x_hat based on various metrics, depending on whether dxdt_truth and x_truth are known or not.

    :param x: time series that was differentiated
    :type x: np.array

    :param dt: time step in seconds
    :type dt: float

    :param x_hat: estimated (smoothed) x
    :type x_hat: np.array

    :param dxdt_hat: estimated xdot
    :type dxdt_hat: np.array

    :param x_truth: true value of x, if known, optional
    :type x_truth: np.array like x or None

    :param dxdt_truth: true value of dxdt, if known, optional
    :type dxdt_truth: np.array like x or None

    :param padding: number of snapshots on either side of the array to ignore when calculating the metric. If autor or None, defaults to 2.5% of the size of x
    :type padding: int, None, or auto

    :return: a tuple containing the following:
            - rms_rec_x: RMS error between the integral of dxdt_hat and x
            - rms_x: RMS error between x_hat and x_truth, returns None if x_truth is None
            - rms_dxdt: RMS error between dxdt_hat and dxdt_truth, returns None if dxdt_hat is None
    :rtype: tuple -> (float, float, float)

    """
    if padding is None or padding == 'auto':
        padding = int(0.025*len(x))
        padding = max(padding, 1)
    if _np.isnan(x_hat).any():
        return _np.nan, _np.nan, _np.nan

    # RMS dxdt
    if dxdt_truth is not None:
        rms_dxdt = __rms_error__(dxdt_hat[padding:-padding], dxdt_truth[padding:-padding])
    else:
        rms_dxdt = None

    # RMS x
    if x_truth is not None:
        rms_x = __rms_error__(x_hat[padding:-padding], x_truth[padding:-padding])
    else:
        rms_x = None

    # RMS reconstructed x
    rec_x = _utility.integrate_dxdt_hat(dxdt_hat, dt)
    x0 = _utility.estimate_initial_condition(x, rec_x)
    rec_x = rec_x + x0
    rms_rec_x = __rms_error__(rec_x[padding:-padding], x[padding:-padding])

    return rms_rec_x, rms_x, rms_dxdt
示例#4
0
def __x_hat_using_finite_difference__(x, dt):
    """
    :param x:
    :param dt:
    :return:
    """
    x_hat, dxdt_hat = first_order(x, dt)
    x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
    x0 = utility.estimate_initial_condition(x, x_hat)
    return x_hat + x0
def iterative_velocity(x, dt, params, options=None):
    """
    Use an iterative solver to find the total variation regularized 1st derivative.
    See __chartrand_tvregdiff__.py for details, author info, and license
    Methods described in: Rick Chartrand, "Numerical differentiation of noisy, nonsmooth data,"
    ISRN Applied Mathematics, Vol. 2011, Article ID 164564, 2011.
    Original code (MATLAB and python):  https://sites.google.com/site/dnartrahckcir/home/tvdiff-code

    :param x: array of time series to differentiate
    :type x: np.array (float)

    :param dt: time step size
    :type dt: float

    :param params: a list consisting of:

                    - iterations: Number of iterations to run the solver. More iterations results in blockier derivatives, which approach the convex result
                    - gamma: Regularization parameter.

    :type params: list (int, float)

    :param options: a dictionary with 2 key value pairs

                    - 'cg_maxiter': Max number of iterations to use in scipy.sparse.linalg.cg. Default is None, results in maxiter = len(x). This works well in our test examples.
                    - 'scale': This method has two different numerical options. From __chartrand_tvregdiff__.py: 'large' or 'small' (case insensitive).  Default is 'small'.  'small' has somewhat better boundary behavior, but becomes unwieldly for data larger than 1000 entries or so.  'large' has simpler numerics but is more efficient for large-scale problems. 'large' is more readily modified for higher-order derivatives, since the implicit differentiation matrix is square.

    :type options: dict {'cg_maxiter': (int), 'scale': (string)}, optional

    :return: a tuple consisting of:

            - x_hat: estimated (smoothed) x
            - dxdt_hat: estimated derivative of x

    :rtype: tuple -> (np.array, np.array)
    """

    if options is None:
        options = {'cg_maxiter': 1000, 'scale': 'small'}

    iterations, gamma = params
    dxdt_hat = __chartrand_tvregdiff__.TVRegDiff(x,
                                                 iterations,
                                                 gamma,
                                                 dx=dt,
                                                 maxit=options['cg_maxiter'],
                                                 scale=options['scale'],
                                                 ep=1e-6,
                                                 u0=None,
                                                 plotflag=False,
                                                 diagflag=1)
    x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
    x0 = utility.estimate_initial_condition(x, x_hat)
    x_hat = x_hat + x0

    return x_hat, dxdt_hat
示例#6
0
def savgoldiff(x, dt, params, options=None):
    """
    Use the Savitzky-Golay to smooth the data and calculate the first derivative. It wses scipy.signal.savgol_filter. The Savitzky-Golay is very similar to the sliding polynomial fit, but slightly noisier, and much faster

    :param x: array of time series to differentiate
    :type x: np.array (float)

    :param dt: time step size
    :type dt: float

    :param params: a list of three elements:

                    - N: order of the polynomial
                    - window_size: size of the sliding window, must be odd (if not, 1 is added)
                    - smoothing_win: size of the window used for gaussian smoothing, a good default is window_size, but smaller for high frequnecy data

    :type params: list (int)

    :return: a tuple consisting of:

            - x_hat: estimated (smoothed) x
            - dxdt_hat: estimated derivative of x


    :rtype: tuple -> (np.array, np.array)
    """
    n, window_size, smoothing_win = params

    if window_size > len(x) - 1:
        window_size = len(x) - 1

    if smoothing_win > len(x) - 1:
        smoothing_win = len(x) - 1

    if window_size <= n:
        window_size = n + 1

    if not window_size % 2:  # then make odd
        window_size += 1

    dxdt_hat = scipy.signal.savgol_filter(x, window_size, n, deriv=1) / dt

    kernel = __gaussian_kernel__(smoothing_win)
    dxdt_hat = smooth_finite_difference.__convolutional_smoother__(
        dxdt_hat, kernel, 1)

    x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
    x0 = utility.estimate_initial_condition(x, x_hat)
    x_hat = x_hat + x0

    return x_hat, dxdt_hat
示例#7
0
def savgoldiff(x, dt, params, options={'smooth': True}):
    '''
    Use the Savitzky-Golay to smooth the data and calculate the first derivative.
    Uses scipy.signal.savgol_filter
    The Savitzky-Golay is very similar to the sliding polynomial fit, but slightly noisier, and much faster.
    
    Inputs
    ------
    x       : (np.array of floats, 1xN) time series to differentiate
    dt      : (float) time step

    Parameters
    ----------
    params  : (list)  [N,              : (int)    order of the polynomial
                       window_size,    : (int)    size of the sliding window, must be odd (if not, 1 is added)
                       smoothing_win]  : (int)    size of the window used for gaussian smoothing, a good default is = window_size, but smaller for high freq data

    Outputs
    -------
    x_hat    : estimated (smoothed) x
    dxdt_hat : estimated derivative of x

    '''

    N, window_size, smoothing_win = params

    if window_size > len(x) - 1:
        window_size = len(x) - 1

    if smoothing_win > len(x) - 1:
        smoothing_win = len(x) - 1

    if window_size <= N:
        window_size = N + 1

    if not window_size % 2:  # then make odd
        window_size += 1

    dxdt_hat = scipy.signal.savgol_filter(x, window_size, N, deriv=1) / dt

    if 1:  #options['smooth']:
        kernel = __gaussian_kernel__(smoothing_win)
        dxdt_hat = pynumdiff.smooth_finite_difference.__convolutional_smoother__(
            dxdt_hat, kernel, 1)

    x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
    x0 = utility.estimate_initial_condition(x, x_hat)
    x_hat = x_hat + x0

    return x_hat, dxdt_hat
def smooth_acceleration(x, dt, params, options=None):
    """
    Use convex optimization (cvxpy) to solve for the acceleration total variation regularized derivative.
    And then apply a convolutional gaussian smoother to the resulting derivative to smooth out the peaks.
    The end result is similar to the jerk method, but can be more time-efficient.

    Default solver is MOSEK: https://www.mosek.com/

    :param x: time series to differentiate
    :type x: np.array of floats, 1xN

    :param dt: time step
    :type dt: float

    :param params:  list with values [gamma, window_size], where gamma (float) is the regularization parameter, window_size (int) is the window_size to use for the gaussian kernel
    :type params: list -> [float, int]

    :param options: a dictionary indicating which SOLVER option to use, ie. 'MOSEK' or 'CVXOPT', in testing, 'MOSEK' was the most robust.
    :type options: dict {'solver': SOLVER}

    :return: a tuple consisting of:
            - x_hat: estimated (smoothed) x
            - dxdt_hat: estimated derivative of x
    :rtype: tuple -> (np.array, np.array)

    """
    if options is None:
        options = {'solver': 'MOSEK'}

    gamma, window_size = params

    x_hat, dxdt_hat = acceleration(x, dt, [gamma], options=options)
    kernel = __gaussian_kernel__(window_size)
    dxdt_hat = pynumdiff.smooth_finite_difference.__convolutional_smoother__(
        dxdt_hat, kernel, 1)

    x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
    x0 = utility.estimate_initial_condition(x, x_hat)
    x_hat = x_hat + x0

    return x_hat, dxdt_hat
示例#9
0
def linearmodel(x, data, dt, params, options={'smooth': True}):
    '''
    Estimate the parameters for a system xdot = Ax + Bu, and use that to calculate the derivative
    
    Inputs
    ------
    x       : (np.array of floats, 1xN) time series to differentiate
    data    : (list of np.array of floats like x) additional time series data that may be relevant to modeling xdot = Ax + Bu
    dt      : (float) time step

    Parameters
    ----------
    params  : (list) [N,          : (int, >1) order (e.g. 2: velocity; 3: acceleration)
                      gammaA      : (float) regularization term for A (try 1e-6)
                      gammaC      : (float) regularization term for integration constants (try 1e-1)
                      window_size : (int) if options['smooth'] == True, window_size determines size over which gaussian smoothing is applied
    options : (dict)  {'smooth',}    : (bool)   if True, apply gaussian smoothing to the result with the same window size

    Outputs
    -------
    x_hat    : estimated (smoothed) x
    dxdt_hat : estimated derivative of x

    '''

    try:
        N, gammaA, gammaC, window_size = params
    except:
        N, gammaA, gammaC = params

    mean = np.mean(x)
    x = x - mean

    # Generate the matrix of integrals of x
    X = [x]
    for n in range(1, N):
        X.append(utility.integrate_dxdt_hat(X[-1], dt))
    X = X[::-1]
    for d in data:
        for n in range(1, N - 1):
            d = utility.integrate_dxdt_hat(d, dt)
        X.append(d)
    X = np.matrix(np.vstack(X))

    integral_Xdot = X
    integral_X = __integrate_dxdt_hat_matrix__(X, dt)

    # Solve for A and the integration constants
    A, C = __solve_for_A_and_C_given_X_and_Xdot__(integral_X,
                                                  integral_Xdot,
                                                  N,
                                                  dt,
                                                  gammaC=gammaC,
                                                  gammaA=gammaA,
                                                  solver='MOSEK',
                                                  A_known=None,
                                                  epsilon=1e-6,
                                                  rows_of_interest=[N - 1])

    # Add the integration constants
    Csum = 0
    t = np.arange(0, X.shape[1]) * dt
    for n in range(0, N - 1):
        C_subscript = n
        t_exponent = N - n - 2
        den = math.factorial(t_exponent)
        Cn = np.vstack((1 / den * C[i, C_subscript] * t**t_exponent
                        for i in range(X.shape[0])))
        Csum = Csum + Cn
    Csum = np.matrix(Csum)

    # Use A and C to calculate the derivative
    Xdot_reconstructed = (A * X + Csum)
    dxdt_hat = np.ravel(Xdot_reconstructed[N - 1, :])

    x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
    x_hat = x_hat + utility.estimate_initial_condition(x + mean, x_hat)

    if options['smooth']:
        kernel = __gaussian_kernel__(window_size)
        dxdt_hat = pynumdiff.smooth_finite_difference.__convolutional_smoother__(
            dxdt_hat, kernel, 1)

    return x_hat, dxdt_hat
示例#10
0
def nonlinearmodel(x,
                   library,
                   dt,
                   params,
                   options={
                       'smooth': True,
                       'solver': 'MOSEK'
                   }):
    '''
    Use the integral form of SINDy to find a sparse dynamical system model for the output, x, given a library of features.
    Then take the derivative of that model to estimate the derivative.  
    
    Inputs
    ------
    x       : (np.array of floats, 1xN) time series to differentiate
    library : (list of 1D arrays) list of features to use for building the model
    dt      : (float) time step

    Parameters
    ----------
    params  : (list)  [gamma,        : (int)    sparsity knob (higher = more sparse model)
                       window_size], : (int)    if option smooth, this determines the smoothing window
    options : (dict)  {'smooth',     : (bool)   if True, apply gaussian smoothing to the result with the same window size
                       'solver'}     : (str)    solver to use with cvxpy, MOSEK is default

    Outputs
    -------
    x_hat    : estimated (smoothed) x
    dxdt_hat : estimated derivative of x

    '''

    # Features
    int_library = integrate_library(library, dt)
    w_int_library, w_int_library_func, dw_int_library_func = whiten_library(
        int_library)

    # Whitened states
    w_state, w_state_func, dw_state_func = whiten_library([x])
    w_x_hat, = w_state

    # dewhiten integral library coefficients
    integrated_library_std = []
    integrated_library_mean = []
    for d in dw_int_library_func:
        integrated_library_std.append(d.std)
        integrated_library_mean.append(d.mean)
    integrated_library_std = np.array(integrated_library_std)
    integrated_library_mean = np.array(integrated_library_mean)

    # dewhiten state coefficients
    state_std = []
    state_mean = []
    for d in dw_state_func:
        state_std.append(d.std)
        state_mean.append(d.mean)
    state_std = np.array(state_std)
    state_mean = np.array(state_mean)

    # Define loss function
    var = cvxpy.Variable((1, len(library)))
    sum_squared_error_x = cvxpy.sum_squares(w_x_hat[1:-1] -
                                            (w_int_library * var[0, :])[1:-1])
    sum_squared_error = cvxpy.sum([sum_squared_error_x])

    # Solve convex optimization problem
    gamma = params[0]
    solver = options['solver']
    L = cvxpy.sum(sum_squared_error + gamma * cvxpy.norm1(var))
    obj = cvxpy.Minimize(L)
    prob = cvxpy.Problem(obj)
    r = prob.solve(solver=solver)
    sindy_coefficients = var.value

    integrated_library_offset = np.matrix(
        sindy_coefficients[0, :] /
        integrated_library_std) * np.matrix(integrated_library_mean).T
    estimated_coefficients = sindy_coefficients[
        0, :] / integrated_library_std * np.tile(state_std[0],
                                                 [len(int_library), 1]).T
    offset = -1 * (state_std[0] *
                   np.ravel(integrated_library_offset)) + state_mean

    # estimate derivative
    dxdt_hat = np.ravel(np.matrix(estimated_coefficients) * np.matrix(library))

    if options['smooth']:
        window_size = params[1]
        kernel = __gaussian_kernel__(window_size)
        dxdt_hat = pynumdiff.smooth_finite_difference.__convolutional_smoother__(
            dxdt_hat, kernel, 1)

    x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
    x0 = utility.estimate_initial_condition(x, x_hat)
    x_hat = x_hat + x0

    return x_hat, dxdt_hat
示例#11
0
def spectraldiff(x,
                 dt,
                 params,
                 options={
                     'even_extension': True,
                     'pad_to_zero_dxdt': True
                 }):
    '''
    Take a derivative in the fourier domain, with high frequency attentuation.
    
    Inputs
    ------
    x       : (np.array of floats, 1xN) time series to differentiate
    dt      : (float) time step

    Parameters
    ----------
    params  : (list) [wn]: (float) the high frequency cut off 
    options : (dict) {'even_extension',:   (bool) if True, extend the time series with
                                                  an even extension so signal 
                                                  starts and ends at the same value.
                      'pad_to_zero_dxdt'}: (bool) if True, extend the time series with
                                                  extensions that smoothly force the derivative
                                                  to zero. This allows the spectral derivative
                                                  to fit data which does not start and end 
                                                  with derivatives equal to zero.


    Outputs
    -------
    x_hat    : estimated (smoothed) x
    dxdt_hat : estimated derivative of x

    '''
    if type(params) is list:
        wn = params[0]
    else:
        wn = params

    original_L = len(x)

    # make derivative go to zero at ends (optional)
    if options['pad_to_zero_dxdt']:
        padding = 100
        pre = x[0] * np.ones(padding)
        post = x[-1] * np.ones(padding)
        x = np.hstack((pre, x, post))
        x_hat, xdot_hat = smooth_finite_difference.meandiff(
            x, dt, [int(padding / 2)], options={'iterate': False})
        x_hat[padding:-padding] = x[padding:-padding]
        x = x_hat
    else:
        padding = 0

    # Do even extension (optional)
    if options['even_extension'] is True:
        x = np.hstack((x, x[::-1]))

    # If odd, make N even, and pad x
    L = len(x)
    if L % 2 != 0:
        N = L + 1
        x = np.hstack((x, x[-1] + dt * (x[-1] - x[-1])))
    else:
        N = L

    # Define the frequency range.
    k = np.asarray(
        list(range(0, int(N / 2))) + [0] + list(range(int(-N / 2) + 1, 0)))
    k = k * 2 * np.pi / (dt * N)

    # Frequency based smoothing: remove signals with a frequency higher than wn
    discrete_wn = int(wn * N)
    k[discrete_wn:N - discrete_wn] = 0

    # Derivative = 90 deg phase shift
    dxdt_hat = np.real(np.fft.ifft(1.0j * k * np.fft.fft(x)))
    dxdt_hat = dxdt_hat[padding:original_L + padding]

    # Integrate to get x_hat
    x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
    x0 = utility.estimate_initial_condition(x[padding:original_L + padding],
                                            x_hat)
    x_hat = x_hat + x0

    return x_hat, dxdt_hat
示例#12
0
def spectraldiff(x, dt, params, options=None):
    """
    Take a derivative in the fourier domain, with high frequency attentuation.

    :param x: array of time series to differentiate
    :type x: np.array (float)

    :param dt: time step size
    :type dt: float

    :param params: the high frequency cut off

    :type params: list (float) or float

    :param options: a dictionary consisting of 2 key value pairs:

                    - 'even_extension': if True, extend the time series with an even extension so signal starts and ends at the same value.
                    - 'pad_to_zero_dxdt': if True, extend the time series with extensions that smoothly force the derivative to zero. This allows the spectral derivative to fit data which does not start and end with derivatives equal to zero.

    :type options: dict {'even_extension': (bool), 'pad_to_zero_dxdt': (bool)}, optional

    :return: a tuple consisting of:

            - x_hat: estimated (smoothed) x
            - dxdt_hat: estimated derivative of x

    :rtype: tuple -> (np.array, np.array)
    """

    if options is None:
        options = {'even_extension': True, 'pad_to_zero_dxdt': True}

    if isinstance(params, list):
        wn = params[0]
    else:
        wn = params

    original_L = len(x)

    # make derivative go to zero at ends (optional)
    if options['pad_to_zero_dxdt']:
        padding = 100
        pre = x[0] * np.ones(padding)
        post = x[-1] * np.ones(padding)
        x = np.hstack((pre, x, post))
        x_hat, _ = smooth_finite_difference.meandiff(
            x, dt, [int(padding / 2)], options={'iterate': False})
        x_hat[padding:-padding] = x[padding:-padding]
        x = x_hat
    else:
        padding = 0

    # Do even extension (optional)
    if options['even_extension'] is True:
        x = np.hstack((x, x[::-1]))

    # If odd, make N even, and pad x
    L = len(x)
    if L % 2 != 0:
        N = L + 1
        x = np.hstack((x, x[-1] + dt * (x[-1] - x[-1])))
    else:
        N = L

    # Define the frequency range.
    k = np.asarray(
        list(range(0, int(N / 2))) + [0] + list(range(int(-N / 2) + 1, 0)))
    k = k * 2 * np.pi / (dt * N)

    # Frequency based smoothing: remove signals with a frequency higher than wn
    discrete_wn = int(wn * N)
    k[discrete_wn:N - discrete_wn] = 0

    # Derivative = 90 deg phase shift
    dxdt_hat = np.real(np.fft.ifft(1.0j * k * np.fft.fft(x)))
    dxdt_hat = dxdt_hat[padding:original_L + padding]

    # Integrate to get x_hat
    x_hat = utility.integrate_dxdt_hat(dxdt_hat, dt)
    x0 = utility.estimate_initial_condition(x[padding:original_L + padding],
                                            x_hat)
    x_hat = x_hat + x0

    return x_hat, dxdt_hat