Exemple #1
0
def Dn(x, y, Np, ndiv=1, axis=-1, mode='strip', cval=0.):
    """ central numerical derivative using Np points of order ndiv
        (Np>1 and odd), using convolution
        Data needs to be equally spaced in x
        can use mode= 'nearest', 'wrap', 'reflect', 'constant'
                      'strip'
                      'strip' will just cut the bad data at the ends
        cval is for 'constant'
        returns x', d^n y/dx^n(x')

        Note the algorithm is not intended to remove noise
        But to provide more accurate derivative of a function.
        The larger Np the more deriviatives are available.
        It basically finds the best taylor series parameter
        assuming Np around the center are available:
          assuming f_k = f(xo + k dx),  k=-n .. n, Np=2*n+1
          and with f(x) = f(xo) + f' (x-xo) + f''(x-xo)^2/2 + ....
                        = f(xo) + f' k dx + ((f'' dx**2)/2) k**2 + ...
          we want to solve for (1, f', f'' dx**2/2, ...)
          and we pick the answer for the correct derrivative.
    """
    dx = x[1] - x[0]
    kernel = central_diff_weights(Np, ndiv=ndiv)
    strip = False
    if mode == 'strip':
        strip = True
        mode = 'reflect'
    dy = filters.correlate1d(y, kernel, axis=axis, mode=mode, cval=cval)
    D = dy / dx**ndiv
    if strip:
        x, D = _do_strip(x, D, Np, axis=axis)
    return x, D
Exemple #2
0
def coef_restriction_diffseq(n_coeffs,
                             degree=1,
                             n_vars=None,
                             position=0,
                             base_idx=0):
    #check boundaries, returns "valid" ?

    if degree == 1:
        diff_coeffs = [-1, 1]
        n_points = 2
    elif degree > 1:
        from scipy import misc
        n_points = next_odd(degree + 1)  #next odd integer after degree+1
        diff_coeffs = misc.central_diff_weights(n_points, ndiv=degree)

    dff = np.concatenate((diff_coeffs, np.zeros(n_coeffs - len(diff_coeffs))))
    from scipy import linalg
    reduced = linalg.toeplitz(dff, np.zeros(n_coeffs - len(diff_coeffs) + 1)).T
    #reduced = np.kron(np.eye(n_coeffs-n_points), diff_coeffs)

    if n_vars is None:
        return reduced
    else:
        full = np.zeros((n_coeffs - 1, n_vars))
        full[:, position:position + n_coeffs] = reduced
        return full
Exemple #3
0
def Dn(x, y, Np, ndiv=1, axis=-1, mode='strip', cval=0.):
    """ central numerical derivative using Np points of order ndiv
        (Np>1 and odd), using convolution
        Data needs to be equally spaced in x
        can use mode= 'nearest', 'wrap', 'reflect', 'constant'
                      'strip'
                      'strip' will just cut the bad data at the ends
        cval is for 'constant'
        returns x', d^n y/dx^n(x')

        Note the algorithm is not intended to remove noise
        But to provide more accurate derivative of a function.
        The larger Np the more deriviatives are available.
        It basically finds the best taylor series parameter
        assuming Np around the center are available:
          assuming f_k = f(xo + k dx),  k=-n .. n, Np=2*n+1
          and with f(x) = f(xo) + f' (x-xo) + f''(x-xo)^2/2 + ....
                        = f(xo) + f' k dx + ((f'' dx**2)/2) k**2 + ...
          we want to solve for (1, f', f'' dx**2/2, ...)
          and we pick the answer for the correct derrivative.
    """
    dx = x[1]-x[0]
    kernel = central_diff_weights(Np,ndiv=ndiv)
    strip = False
    if mode=='strip':
        strip = True
        mode = 'reflect'
    dy = filters.correlate1d(y, kernel, axis=axis, mode=mode, cval=cval)
    D = dy/dx**ndiv
    if strip:
        x, D = _do_strip(x, D, Np, axis=axis)
    return x, D
Exemple #4
0
 def test_get_diff_coeff(self):
     forward_11 = get_diff_coeff([0, 1], 1)
     forward_13 = get_diff_coeff([0, 1, 2, 3], 1)
     backward_26 = get_diff_coeff(np.arange(-6, 1), 2)
     central_29 = get_diff_coeff(np.arange(-4, 5), 2)
     self.assertArrayAlmostEqual(forward_11, [-1, 1])
     self.assertArrayAlmostEqual(forward_13, [-11./6, 3, -3./2, 1./3])
     self.assertArrayAlmostEqual(backward_26, [137./180, -27./5,33./2,-254./9,
                                               117./4,-87./5,203./45])
     self.assertArrayAlmostEqual(central_29, central_diff_weights(9, 2))
Exemple #5
0
 def test_get_diff_coeff(self):
     forward_11 = get_diff_coeff([0, 1], 1)
     forward_13 = get_diff_coeff([0, 1, 2, 3], 1)
     backward_26 = get_diff_coeff(np.arange(-6, 1), 2)
     central_29 = get_diff_coeff(np.arange(-4, 5), 2)
     self.assertArrayAlmostEqual(forward_11, [-1, 1])
     self.assertArrayAlmostEqual(forward_13, [-11./6, 3, -3./2, 1./3])
     self.assertArrayAlmostEqual(backward_26, [137./180, -27./5,33./2,-254./9,
                                               117./4,-87./5,203./45])
     self.assertArrayAlmostEqual(central_29, central_diff_weights(9, 2))
Exemple #6
0
def raytraceX(obj, ps, sys_index, nimgs=None, eps=None):

    #---------------------------------------------------------------------------
    # Find the derivative of the arrival time surface.
    #---------------------------------------------------------------------------
    arrival = obj.basis.arrival_grid(ps)[sys_index]

    w = central_diff_weights(3)
    d = abs(correlate1d(arrival, w, axis=0, mode='constant')) \
      + abs(correlate1d(arrival, w, axis=1, mode='constant'))
    d = d[1:-1,1:-1]

    pl.matshow(d)

    xy = obj.basis.refined_xy_grid(ps)[1:-1,1:-1]

    # Create flattened *views*
    xy     = xy.ravel()
    dravel = d.ravel()

    imgs = []
    offs = []
    print 'searching...'
    for i in argsort(dravel):

        if nimgs == len(imgs): break

        new_image = True
        for img in imgs:
            if abs(img-xy[i]) <= eps: 
                new_image = False

        if new_image:
            imgs.append(xy[i])
            offs.append(i)

    #---------------------------------------------------------------------------
    # Print the output
    #---------------------------------------------------------------------------

    if imgs:
        #print imgs
        #if len(imgs) % 2 == 1: imgs = imgs[:-1]  # XXX: Correct?
        imgs = array(imgs)

        g0 = array(arrival[1:-1,1:-1], copy=True)
        g0ravel = g0.ravel()
        times = g0ravel[offs]
        order = argsort(times)
    else:
        order = []

    return [(times[i], imgs[i]) for i in order]
Exemple #7
0
def diff_finite(
    dem: np.ndarray,
    cellsize: tuple,
    dx: int,
    dy: int,
) -> np.ndarray:
    """
    Calculate a higher-order derivative of the DEM by finite differencing
    successively in X and then Y direction. Uses a central differencing scheme.

    :param dem: DEM (z values at each pixel of a rectangular grid)
    :param cellsize: pixel ``(width, height)``
    :param dx: order of derivative in x direction
    :param dy: order of derivative in y direction
    """
    wx = misc.central_diff_weights(dx + 1 + dx % 2, dx).reshape(1, -1)
    wy = misc.central_diff_weights(dy + 1 + dy % 2, dy).reshape(-1, 1)
    image = _as_float_array(dem)
    image = ndimage.convolve(image, wx, mode='nearest')
    image = ndimage.convolve(image, wy, mode='nearest')
    norm = np.product(cellsize**np.array([dy, dx]))
    return image / norm
Exemple #8
0
def _get_central_diff_weights(order):
    """Determine the weights for central differentiation"""

    if order == 3:
        weights = np.array([-1, 0, 1]) / 2.0
    elif order == 5:
        weights = np.array([1, -8, 0, 8, -1]) / 12.0
    elif order == 7:
        weights = np.array([-1, 9, -45, 0, 45, -9, 1]) / 60.0
    elif order == 9:
        weights = np.array([3, -32, 168, -672, 0, 672, -168, 32, -3]) / 840.0
    else:
        weights = central_diff_weights(order, 1)
    return weights
Exemple #9
0
def n_order_scheme(f, x, n, h):
    """
    n-order differentiable scheme
    """
    weights = central_diff_weights(n)
    grad_f = 0
    i_s, j_s = list(zip(*list(enumerate(np.arange(-n // 2, n // 2) + 1))))
    if PARALLEL:
        f_js = Parallel(n_jobs=len(j_s), prefer="threads")(delayed(f)(y=x + j * h) for j in j_s)
    else:
        f_js = [f(y=x + j * h) for j in j_s]
    for i, f_j in zip(i_s, f_js):
        if weights[i] != 0:
            grad_f += weights[i] * f_j
    return grad_f / h
Exemple #10
0
def deriv(env, model, obj_index, src_index, m, axis, R):
    w = central_diff_weights(5)
    #d = correlate1d(m, w, axis=axis, mode='constant')
    d = (correlate1d(m, -w, axis=0, mode='constant')) \
      + (correlate1d(m,  w, axis=1, mode='constant'))
    d = (correlate1d(d, -w, axis=0, mode='constant')) \
      + (correlate1d(d,  w, axis=1, mode='constant'))
    d = d[2:-2, 2:-2]
    d[d > .8] = .8
    d[d < -.8] = -.8
    #d = correlate1d(d, w, axis=axis, mode='constant')
    #d = diff(d, axis=axis)
    #d /= abs(d)
    #d = correlate1d(d, w, axis=axis, mode='constant')
    #d = diff(d, axis=axis)

    R -= model[0].basis.top_level_cell_size * 2
    #R -= model[0].basis.top_level_cell_size * 2
    pl.matshow(d, extent=[-R, R, -R, R])
    glspl.colorbar()
    arrival_plot(model, obj_index, src_index, only_contours=True, clevels=200)
Exemple #11
0
def deriv(env, model, obj_index, src_index, m, axis, R):
    w = central_diff_weights(5)
    #d = correlate1d(m, w, axis=axis, mode='constant')
    d = (correlate1d(m, -w, axis=0, mode='constant')) \
      + (correlate1d(m,  w, axis=1, mode='constant'))
    d = (correlate1d(d, -w, axis=0, mode='constant')) \
      + (correlate1d(d,  w, axis=1, mode='constant'))
    d = d[2:-2,2:-2]
    d[d>.8] = .8
    d[d<-.8] = -.8
    #d = correlate1d(d, w, axis=axis, mode='constant')
    #d = diff(d, axis=axis)
    #d /= abs(d)
    #d = correlate1d(d, w, axis=axis, mode='constant')
    #d = diff(d, axis=axis)

    R -= model[0].basis.top_level_cell_size * 2
    #R -= model[0].basis.top_level_cell_size * 2
    pl.matshow(d, extent=[-R,R,-R,R])
    glspl.colorbar()
    arrival_plot(model, obj_index, src_index, only_contours=True, clevels=200)
Exemple #12
0
def coef_restriction_diffseq(n_coeffs, degree=1, n_vars=None, position=0, base_idx=0):
    #check boundaries, returns "valid" ?

    if degree == 1:
        diff_coeffs = [-1, 1]
        n_points = 2
    elif degree > 1:
        from scipy import misc
        n_points = next_odd(degree + 1)  #next odd integer after degree+1
        diff_coeffs = misc.central_diff_weights(n_points, ndiv=degree)

    dff = np.concatenate((diff_coeffs, np.zeros(n_coeffs - len(diff_coeffs))))
    from scipy import linalg
    reduced = linalg.toeplitz(dff, np.zeros(n_coeffs - len(diff_coeffs) + 1)).T
    #reduced = np.kron(np.eye(n_coeffs-n_points), diff_coeffs)

    if n_vars is None:
        return reduced
    else:
        full = np.zeros((n_coeffs-1, n_vars))
        full[:, position:position+n_coeffs] = reduced
        return full
Exemple #13
0
def grad_tau(env, model, obj_index, which, src_index):

    assert which in ['x','y'], "grad_tau: 'which' must be one of 'x' or 'y'"

    #print "grad_tau"
    obj,ps = model['obj,data'][obj_index]
    R = obj.basis.mapextent

    #---------------------------------------------------------------------------
    # Find the derivative of the arrival time surface.
    #---------------------------------------------------------------------------
    arrival = obj.basis.arrival_grid(ps)[src_index]

    w = central_diff_weights(3)

    which = 1 if which == 'x' else 0
    d = correlate1d(arrival, w, axis=which, mode='constant')

    d = d[1:-1,1:-1]
    d[np.abs(d) < 1e-3] = 0
    d[d>0] = 1
    d[d<0] = -1
    pl.matshow(d, fignum=False, extent=[-R,R,-R,R], alpha=0.5)
Exemple #14
0
def grad_tau(env, model, obj_index, which, src_index):

    assert which in ['x', 'y'], "grad_tau: 'which' must be one of 'x' or 'y'"

    #print "grad_tau"
    obj, ps = model['obj,data'][obj_index]
    R = obj.basis.mapextent

    #---------------------------------------------------------------------------
    # Find the derivative of the arrival time surface.
    #---------------------------------------------------------------------------
    arrival = obj.basis.arrival_grid(ps)[src_index]

    w = central_diff_weights(3)

    which = 1 if which == 'x' else 0
    d = correlate1d(arrival, w, axis=which, mode='constant')

    d = d[1:-1, 1:-1]
    d[np.abs(d) < 1e-3] = 0
    d[d > 0] = 1
    d[d < 0] = -1
    pl.matshow(d, fignum=False, extent=[-R, R, -R, R], alpha=0.5)
Exemple #15
0
def gradient(derivative, points, shape, step=None, order='C'):
    ''' Approximate the derivative with a central difference.

    Parameters
    ----------
    derivative : int
        The order of the derivative to approximate.

    points : int
        The number of points in the central difference. Must be an odd
        integer greater than `derivative`.

    shape : tuple
        The shape of the array in non-vector form.

    step: tuple, optional
        The step sizes between adjacent values along each dimension in the
        array. Passing None defaults to step size 1.0 along all dimensions.

    order = {'C', 'F', 'A'}, optional
        The order by which the vectorized array is reshaped. This is the
        same parameter as given to functions like numpy.reshape. For a
        discussion of the memory efficiency of different orders and how that
        is determined by the underlying format, see the documentation of
        commands that take an order argument.

    Returns
    -------
    LinearOperator
        A LinearOperator implementing the central difference on vectorized
        inputs.

    Raises
    ------
    ValueError
        When the inputs are not the right dimensions.

    See Also
    --------
    convolve : The central difference is calculated using this LinearOperator.
    scipy.misc.central_diff_weights : The scipy function returning the
        required weights for calculating the central difference.

    Examples
    --------
    >>> import numpy as np
    >>> from pyop.operators import gradient
    >>> grad = gradient(1, 3, (5,6))
    >>> laplace = gradient(2, 3, (5,6), step=(0.5,2))
    >>> A = np.indices((5,6)).sum(0)**2 / 2.
    >>> grad(np.ravel(A)).reshape(5,6)
    array([[  0.5 ,   2.  ,   4.25,   7.  ,  10.25,   5.  ],
           [  2.  ,   4.  ,   6.  ,   8.  ,  10.  ,  -0.25],
           [  4.25,   6.  ,   8.  ,  10.  ,  12.  ,  -2.  ],
           [  7.  ,   8.  ,  10.  ,  12.  ,  14.  ,  -4.25],
           [  4.  ,   1.  ,  -0.25,  -2.  ,  -4.25, -32.  ]])
    >>> laplace(np.ravel(A)).reshape(5,6)
    array([[   2.125,    4.25 ,    2.25 ,   -3.75 ,  -13.75 ,  -32.25 ],
           [   4.25 ,    4.25 ,    4.25 ,    4.25 ,    4.25 ,   -1.875],
           [   4.125,    4.25 ,    4.25 ,    4.25 ,    4.25 ,   -3.75 ],
           [   3.75 ,    4.25 ,    4.25 ,    4.25 ,    4.25 ,   -5.875],
           [ -46.875,  -67.75 ,  -93.75 , -123.75 , -157.75 , -208.25 ]])
    '''
    # it stops being useful at all at this point
    if any(s < points for s in shape):
        raise ValueError("Shape's dims must have at least as many "
                         "points as central difference weights.")

    if step is None:
        step = (1.0, ) * len(shape)
    elif len(step) != len(shape):
        raise ValueError("Shape and step must have same ndims (length).")

    step = enumerate(float(s) ** derivative for s in step)

    # reverses the order of the weights to compensate for convolution's
    # "flipped" shift
    weights = central_diff_weights(points, derivative)[::-1]

    # create a kernel with ndim specified by shape, each of length points
    kernel = np.zeros((points, ) * len(shape))
    # slice that gets the center values along first dimension
    slc = (slice(None), ) + (points // 2, ) * (kernel.ndim - 1)

    # fill in kernel along the center of each dimension
    for dim, s in step:
        kernel[slc[-dim:] + slc[:-dim]] += weights / s

    return convolve(kernel, shape, order)
Exemple #16
0
import copy

import numpy as np
import math
from scipy.misc import central_diff_weights
import matplotlib.pyplot as plt
from scipy.fftpack import fft
from scipy.io import wavfile  # get the api
from scipy.interpolate import UnivariateSpline

fs, data = wavfile.read("c:/temp/06DieWasserflut.wav")  # load the data
a = data.T[0]  # this is a two channel soundtrack, I get the first track
b = [(ele / 2**16.) * 2 - 1
     for ele in a]  # this is 16-bit track, b is now normalized on [-1,1)
i = 0
w1 = central_diff_weights(49, 1)

step = 10240


def freq2scale(freq):
    return 69 + 12 * math.log(freq, 2)


print(fs)
print("===========")
print("time in ms:intense:freq:name")
plot_x = []
plot_y = []
plot_count = 0
while i < len(a) - step:
Exemple #17
0
# deriv.py: 数値微分
import scipy.misc as scmisc
import numpy as np
from tktools import relerr  # 相対誤差


# 中央差分式の係数(奇数点数のみ)を出力
for m in range(1, 6):
    print('--- ', m, '階差分 ---')
    if m >= 3:
        min_num_points = 2 * m - 1
    else:
        min_num_points = 3
    for n in range(min_num_points, 10, 2):
        print(n, '点公式: ', scmisc.central_diff_weights(n, m))


# 元の関数
def org_func(x):
    return np.sin(np.cos(x))


# 1階導関数
def diff_func(x):
    return np.cos(np.cos(x)) * (-np.sin(x))


# 中心差分商に基づく数値微分
x = 1.5
print('真値                 = ', diff_func(x))
Exemple #18
0
b = funcion.diff(x,1,y,2)
c = S.Derivative(funcion,z,3)
c.doit()


import numpy as N
import scipy.misc as SM

#Número de puntos ha de ser impar
x = S.symbols('x',real=True)
funsym = S.exp(-x**2)
funnum = S.lambdify(x,funsym,"numpy")
z= 2
solsym = funsym.diff(x,1).subs(x,z).evalf()
solnum = SM.derivative(funnum,z, dx=1e-6)
pesos = SM.central_diff_weights(3)
print(solnum,pesos)



xs = S.symbols('x',real=True)
fsym = S.exp(-xs**2)
fnum = S.lambdify(xs,fsym,"numpy")
x = N.arange(-1,1,1/4)
y = fnum(x)
derfsym = fsym.diff(xs,1)
solsym = list([derfsym.subs(xs,z)for z in x])
s0 = N.array(solsym).astype(float)
s1 = N.diff(y)/N.diff(x)
s2 = N.gradient(y,x)
s3 = N.gradient(y,x,edge_order=2)
Exemple #19
0
def toec_fit(strains, stresses, eq_stress = None, zero_crit=1e-10):
    """
    A third-order elastic constant fitting function based on 
    central-difference derivatives with respect to distinct
    strain states.  The algorithm is summarized as follows:

    1. Identify distinct strain states as sets of indices 
       for which nonzero strain values exist, typically
       [(0), (1), (2), (3), (4), (5), (0, 1) etc.]
    2. For each strain state, find and sort strains and
       stresses by strain value.
    3. Find first and second derivatives of each stress
       with respect to scalar variable corresponding to
       the smallest perturbation in the strain.
    4. Use the pseudoinverse of a matrix-vector expression 
       corresponding to the parameterized stress-strain
       relationship and multiply that matrix by the respective 
       calculated first or second derivatives from the
       previous step.
    5. Place the calculated second and third-order elastic 
       constants appropriately.

    Args:
        strains (nx3x3 array-like): Array of 3x3 strains
            to use in fitting of TOEC and SOEC
        stresses (nx3x3 array-like): Array of 3x3 stresses
            to use in fitting of TOEC and SOEC.  These
            should be PK2 stresses.
        eq_stress (3x3 array-like): stress corresponding to
            equilibrium strain (i. e. "0" strain state).
            If not specified, function will try to find
            the state in the list of provided stresses
            and strains.  If not found, defaults to 0.
        zero_crit (float): value for which strains below
            are ignored in identifying strain states.
    """

    if len(stresses) != len(strains):
        raise ValueError("Length of strains and stresses are not equivalent")
    vstresses = np.array([Stress(stress).voigt for stress in stresses])
    vstrains = np.array([Strain(strain).voigt for strain in strains])
    vstrains[np.abs(vstrains) < zero_crit] = 0

    # Try to find eq_stress if not specified
    if eq_stress is not None:
        veq_stress = Stress(eq_stress).voigt
    else:
        veq_stress = vstresses[np.all(vstrains==0, axis=1)]
        if veq_stress:
            if np.shape(veq_stress) > 1 and not \
               (abs(veq_stress - veq_stress[0]) < 1e-8).all():
                raise ValueError("Multiple stresses found for equilibrium strain"
                                 " state, please specify equilibrium stress or  "
                                 " remove extraneous stresses.")
            veq_stress = veq_stress[0]
        else:
            veq_stress = np.zeros(6)

    # Collect independent strain states:
    independent = set([tuple(np.nonzero(vstrain)[0].tolist())
                       for vstrain in vstrains])
    
    strain_states = []
    dsde = np.zeros((6, len(independent)))
    d2sde2 = np.zeros((6, len(independent)))
    for n, ind in enumerate(independent):
        # match strains with templates
        template = np.zeros(6, dtype=bool)
        np.put(template, ind, True)
        template = np.tile(template, [vstresses.shape[0], 1])
        mode = (template == (np.abs(vstrains) > 1e-10)).all(axis=1)
        mstresses = vstresses[mode]
        mstrains = vstrains[mode]
        # add zero strain state
        mstrains = np.vstack([mstrains, np.zeros(6)])
        mstresses = np.vstack([mstresses, np.zeros(6)])
        # sort strains/stresses by strain values
        mstresses = mstresses[mstrains[:, ind[0]].argsort()]
        mstrains = mstrains[mstrains[:, ind[0]].argsort()]
        strain_states.append(mstrains[-1] / \
                             np.min(mstrains[-1][np.nonzero(mstrains[0])]))
        diff = np.diff(mstrains, axis=0)
        if not (abs(diff - diff[0]) < 1e-8).all():
            raise ValueError("Stencil for strain state {} must be odd-sampling"
                             " centered at 0.".format(ind))
        h = np.min(diff[np.nonzero(diff)])
        coef1 = central_diff_weights(len(mstresses), 1)
        coef2 = central_diff_weights(len(mstresses), 2)
        if eq_stress is not None:
            mstresses[len(mstresses) // 2] = veq_stress
        dsde[:, n] = np.dot(np.transpose(mstresses), coef1) / h
        d2sde2[:, n] = np.dot(np.transpose(mstresses), coef2) / h**2

    m2i, m3i = generate_pseudo(strain_states)
    s2vec = np.ravel(dsde.T)
    c2vec = np.dot(m2i, s2vec)
    c2 = np.zeros((6, 6))
    c2[np.triu_indices(6)] = c2vec
    c2 = c2 + c2.T - np.diag(np.diag(c2))
    c3 = np.zeros((6, 6, 6))
    s3vec = np.ravel(d2sde2.T)
    c3vec = np.dot(m3i, s3vec)
    list_indices = list(itertools.combinations_with_replacement(range(6), r=3))
    indices_ij = itertools.combinations_with_replacement(range(6), r=3)

    indices = list(itertools.combinations_with_replacement(range(6), r=3))
    for n, (i, j, k) in enumerate(indices):
        c3[i,j,k] = c3[i,k,j] = c3[j,i,k] = c3[j,k,i] = \
                c3[k,i,j] = c3[k,j,i] = c3vec[n]
    return TensorBase.from_voigt(c2), TensorBase.from_voigt(c3)