Exemple #1
0
def W_accuracy(Mdict, Dt=1, frames=None):
    """
    Compute the error value for adjacent values of Dt
    Mdict is a dictionnary of Mdata. The key correspond to the couple of parameters (W,Dt)
    """
    #    W=16
    key = (Dt, W)
    M = Mdict[key]

    frames = get_frames(M, frames)
    n = min(len(frames), 10)

    Dtlist = range(1, 20)
    dim = Mdict[key].shape()

    U = np.zeros(dim[:-1] + (n, len(Dtlist)))
    Ux = np.zeros(dim[:-1] + (n, len(Dtlist)))
    Uy = np.zeros(dim[:-1] + (n, len(Dtlist)))

    for i, Dt in enumerate(Dtlist):
        key = (Dt, W)
        Ux[..., i] = np.sqrt(access.get(Mdict[key], 'Ux', frames[0], Dt=n))
        Uy[..., i] = np.sqrt(access.get(Mdict[key], 'Uy', frames[0], Dt=n))
        U[..., i] = np.sqrt(access.get(Mdict[key], 'E', frames[0], Dt=n))

    for i, Dt in enumerate(Dtlist[:-1]):
        std_moy_Ux, std_Ux = compare(Ux, U, start=i, n=2, b=2)
        std_moy_Uy, std_Uy = compare(Uy, U, start=i, n=2, b=2)

        std_moy_U = (std_moy_Ux + std_moy_Ux) / 2

        print(std_moy_U)

    return std_moy_U
Exemple #2
0
def accuracy(var, M, frames=None, **kwargs):
    frames = get_frames(M, frames)

    n = min(len(frames), 10)
    Ux = access.get(M, 'Ux', frames[0], Dt=n)
    Uy = access.get(M, 'Uy', frames[0], Dt=n)
    U = np.sqrt(access.get(M, 'E', frames[0], Dt=n))

    dim = Ux.shape
    d = len(dim)
Exemple #3
0
def velocity(M, frame, scale=True, display=True, W=None):
    """
    Test the velocity criterion for each individual vector
    
    Ux = access.get(M,'Ux',frame)    
    Uy = access.get(M,'Uy',frame)    
    U = np.asarray([Ux,Uy])
    dim = U.shape
    N = np.prod(dim[1:])
    d = len(dim)
    U = np.transpose(U,tuple(range(1,d))+(0,))    
    U = np.sqrt(np.sum(np.power(U,2),axis=d-1)) #velocity modulus
    """
    U = np.sqrt(access.get(M, 'E', frame))
    N = np.prod(U.shape)

    if scale:
        Umin, Umax = bounds(M)
    else:
        Umin, Umax = bounds_pix(W)

    if N == 0:
        # print(U.shape)
        N = np.prod(U.shape[:-1])

    r = len(np.where(np.logical_and(U > Umin, U < Umax))[0]) * 100. / N

    if display:
        print("Percentage of good values (velocity test) : " + str(r) + " %")
    return r
Exemple #4
0
def pdf(M,
        field,
        frame,
        Dt=10,
        Dx=1024,
        label='ko-',
        fignum=1,
        a=15.,
        norm=True,
        sign=1):
    import turbulence.manager.access as access
    Up = access.get(M, field, frame, Dt=Dt)

    limits = [(0, Dx), (0, Dx)]
    Up = sign * access.get_cut(M, field, limits, frame, Dt=Dt)

    normfactor = M.Ux.shape[0] * M.Ux.shape[1]
    figs = distribution(Up,
                        normfactor=normfactor,
                        a=a,
                        label=label,
                        fignum=fignum,
                        norm=norm)

    return figs
Exemple #5
0
def shear_limit_M(M, W, Dt, type=1, **kwargs):
    """
    Test the shear criterion : dU/W < 0.1 
    """
    values = access.get(M, 'strain', frame)

    M, field = vgradient.compute(M,
                                 'strain',
                                 step=1,
                                 filter=False,
                                 Dt=1,
                                 rescale=False,
                                 type=type,
                                 compute=False)
    values = getattr(M, field)  # /W

    dUmin, dUmax = check.shear_limit_M(M, W)

    xbin, n = graphes.hist(values,
                           normalize=False,
                           num=200,
                           range=(-0.5, 0.5),
                           **kwargs)  # xfactor = Dt
    maxn = max(n) * 1.2

    graphes.graph([dUmin, dUmin], [0, maxn], label='r-', **kwargs)
    graphes.graph([dUmax, dUmax], [0, maxn], label='r-', **kwargs)
    graphes.legende('', '', '')
Exemple #6
0
def Dt_accuracy(Mdict, W=32, frames=None):
    """
    Compute the error value for adjacent values of Dt
    Mdict is a dictionnary of Mdata. The key correspond to the couple of parameters (W,Dt)
    """
    key = (1, W)
    M = Mdict[key]

    frames = get_frames(M, frames)
    n = min(len(frames), 10)

    Dtlist = range(1, 20)
    dim = Mdict[key].shape()

    U = np.zeros(dim[:-1] + (n, len(Dtlist)))
    Ux = np.zeros(dim[:-1] + (n, len(Dtlist)))
    Uy = np.zeros(dim[:-1] + (n, len(Dtlist)))

    dU = np.zeros(dim[:-1] + (2, 2, n, len(Dtlist)))
    dU2 = np.zeros(dim[:-1] + (2, 2, n, len(Dtlist)))

    for i, Dt in enumerate(Dtlist):
        key = (Dt, W)
        Ux[..., i] = access.get(Mdict[key], 'Ux', frames[0], Dt=n)
        Uy[..., i] = access.get(Mdict[key], 'Uy', frames[0], Dt=n)
        U[..., i] = np.sqrt(access.get(Mdict[key], 'E', frames[0], Dt=n))

        dU[..., i] = access.get(Mdict[key], 'dU', frames[0], Dt=n)

        dU_norm = np.sqrt(np.sum(np.power(dU[..., i], 2), axis=(2, 3)))
        dU2[..., i] = np.transpose(np.tile(dU_norm, (2, 2, 1, 1, 1)),
                                   (2, 3) + (0, 1, 4))

    for i, Dt in enumerate(Dtlist[:-1]):
        std_moy_Ux, std_Ux = compare(Ux, U, start=i, n=2, b=2)
        std_moy_Uy, std_Uy = compare(Uy, U, start=i, n=2, b=2)
        std_moy_U = (std_moy_Ux + std_moy_Ux) / 2

        std_moy_dU, std_dU = compare(dU, dU2, start=i, n=2, b=2)

        print(std_moy_U, std_moy_dU)

    return std_moy_U
Exemple #7
0
def v_accuracy(M, frames=None, display=True, **kwargs):
    frames = get_frames(M, frames)

    n = min(len(frames), 10)
    Ux = access.get(M, 'Ux', frames[0], Dt=n)
    Uy = access.get(M, 'Uy', frames[0], Dt=n)
    U = np.sqrt(access.get(M, 'E', frames[0], Dt=n))

    dim = Ux.shape
    d = len(dim)

    std_moy_Ux, std_Ux = compare(Ux, U, n=10, b=3)
    std_moy_Uy, std_Uy = compare(Uy, U, n=10, b=3)

    std_moy_U = (std_moy_Ux + std_moy_Ux) / 2

    if display == True:
        print('Relative error velocity : ' + str(std_moy_U * 100) + " %")
    #    print('Relative error along y : '+str(std_moy_Uy*100)+ " %")

    return std_moy_U, std_Ux, std_Uy
Exemple #8
0
def accuracy(M, frames):
    """
    from a serie of adjacent frames compute :
        the ratio of measurements in the boundaries (Umin, Umax). 
    
        Compute the noise level on the velocity field by averaging over adjacent frames in time (hypothesis of well resolved dynamics)
        
        the ratio of measurements within the shear limit (dUmax)
        
        Compute the velocity gradient noise level using the same time-averaging technic
    """
    for frame in frames:
        Ux = access.get(M, 'Ux', frame)
Exemple #9
0
def chose_axe(M, t, axes, Dt=1):
    """
    Chose N axis of a Mdata set
    INPUT
    -----
    M : Madata object
    t : int
        time index
    axes : string list 
        Possible values are : 'E', 'Ux', 'Uy', 'strain', 'omega'
    OUTPUT
    ----- 
    """
    data = tuple([access.get(M, ax, t, Dt=Dt) for ax in axes])
    return data
Exemple #10
0
def circulation_2(M, i, fignum=1, display=False):
    Omega = access.get(M, 'omega', i)
    x, y = space_axis_vorticity(M)

    X, Y, data, center, factor = normalize(x, y, Omega[..., 0])

    dx = M.x[0, 1] - M.x[0, 0]
    # print(dx)

    U, d = vgradient.make_Nvec(M, i)  # Z : d+1 dimension np array

    nx, ny = X.shape
    R_list = np.arange(1., 15., 0.5)
    Gamma = []
    divergence = []
    for b in R_list:
        # print(b)
        tau = strain_tensor.strain_tensor_loc(U,
                                              center[0],
                                              center[1],
                                              d=2,
                                              b=b)
        omega, enstrophy = strain_tensor.vorticity(tau, d=2, norm=False)
        div = strain_tensor.divergence_2d(tau, d=2)
        G = (omega[0, 0] - div[0, 0]) * np.pi * b**2 * dx**2
        Gamma.append(G)
        divergence.append(div[0, 0] / np.abs(omega[0, 0]))

    R_list = np.asarray(R_list) * dx

    if display:
        graphes.graph(R_list, Gamma, fignum=fignum, label='bo')
        graphes.legende('r (mm)', 'Circulation (mm^2/s)', '')

        graphes.graph(R_list, divergence, fignum=fignum + 1, label='ko')
        graphes.graph(R_list,
                      np.zeros(len(R_list)),
                      fignum=fignum + 1,
                      label='r--')

        graphes.legende('r (mm)', 'Relative 2d divergence', '')
        graphes.set_axis(0, 30 * dx, -0.3, 0.3)

    return R_list, Gamma, center, factor
Exemple #11
0
def gradient(M, frame, W=32, scale=True, display=True):
    dU = access.get(M, 'dU', frame)

    N = np.prod(dU.shape)
    if N == 0:
        # print(dU.shape)
        N = np.prod(dU.shape[:-1])

    dUmin, dUmax = shear_limit(W)
    r = len(np.where(np.logical_and(dU > dUmin, dU < dUmax))[0]) * 100. / N

    dU_opt = shear_optimum(W)
    dU_moy = np.nanstd(dU)
    ropt = dU_moy / dU_opt
    if display:
        print("Percentage of good values (gradient test) : " + str(r) + " %")
        print("ratio measured shear / optimal value : " +
              str(ropt))  # greater than 1 start to be bad
    return r, ropt
Exemple #12
0
def dv_accuracy(M, frames=None, display=True, **kwargs):
    frames = get_frames(M, frames)
    n = min(len(frames), 10)
    dU = access.get(M, 'dU', frames[0], Dt=n)

    dim = dU.shape
    d = len(dim)

    dU2 = np.sqrt(np.sum(np.power(dU, 2), axis=(d - 3, d - 2)))
    # dU2 = np.reshape(np.tile(dU2,dim),dim+(n,))

    d = len(dU2.shape)

    #    std_moy_U1 = compare(dU[...,0,0,:],n=10,b=3)  #need to normalizeby dU2
    std_U1 = np.nanmean([
        np.nanstd(dU[..., 0, 0, slice(i, i + 3)], axis=d - 1) /
        np.nanmean(dU2[..., slice(i, i + 3)], axis=d - 1) for i in range(10)
    ])  # standard deviation along x axis

    std_U2 = np.nanmean([
        np.nanstd(dU[..., 0, 1, slice(i, i + 3)], axis=d - 1) /
        np.nanmean(dU2[..., slice(i, i + 3)], axis=d - 1) for i in range(10)
    ])

    std_U3 = np.nanmean([
        np.nanstd(dU[..., 1, 0, slice(i, i + 3)], axis=d - 1) /
        np.nanmean(dU2[..., slice(i, i + 3)], axis=d - 1) for i in range(10)
    ])

    std_U4 = np.nanmean([
        np.nanstd(dU[..., 1, 1, slice(i, i + 3)], axis=d - 1) /
        np.nanmean(dU2[..., slice(i, i + 3)], axis=d - 1) for i in range(10)
    ])

    std_moy_dU = np.median((std_U1 + std_U2 + std_U3 + std_U4) / 4)

    if display == True:
        print('Relative error velocity gradient : ' + str(std_moy_dU * 100) +
              " %")

    return std_moy_dU, std_U1, std_U2, std_U3, std_U4
Exemple #13
0
def Space(M, field, tlist, N=30, Np=10**4, norm_d=1.):
    dlist = range(N)
    dx = np.diff(M.x[0, :])[0]

    indices = {}
    Corr_d = {}

    U = access.get(M, field, 0)

    for d in dlist:
        indices[d] = corr.d_2pts_rand(U[..., 0], d, Np)

    for i in tlist:
        C = np.zeros(len(dlist))
        for d in dlist:
            C[d] = compute(M, i, indices[d], axes=[field, field])

        Corr_d[(i, 'd_' + field)] = np.asarray(dlist) * dx / norm_d
        Corr_d[(i, 'C_' + field)] = C

    return Corr_d
Exemple #14
0
def pdf_ensemble(Mlist, field, frame, Dt=10, Dx=1024, label='r-', fignum=1, a=10., norm=True, model=False):
    import turbulence.manager.access as access

    U_tot = []

    for M in Mlist:
        pdf(M, field, frame, Dt=Dt, Dx=Dx, label='k', fignum=fignum, a=a, norm=False)

        Up = access.get(M, field, frame, Dt=Dt)
        # limits = [(0,Dx),(0,Dx)]
        #    Up = access.get_cut(M,field,limits,frame,Dt=Dt)
        # if Dx is larger than the box size, just keep all the data
        U_tot = U_tot + np.ndarray.tolist(Up)

    N = len(Mlist)
    U_tot = np.asarray(U_tot)

    x, y, figs = distribution(U_tot, normfactor=N, a=a, label=label, fignum=fignum, norm=norm)

    if model:
        n = len(y)
        b = y[n // 2]
        Dy = np.log((y[n // 2 + n // 8] + y[n // 2 - n // 8]) / 2. / b)

        a = - Dy / x[n // 2 + n // 8] ** 2

        P = b * np.exp(-a * x ** 2)
        semilogy(x, P, label='b.-', fignum=fignum)

    set_axis(min(x), max(x), 1, max(y) * 2)
    if field == 'omega' or field == 'strain':
        unit = ' (s^-1)'
    elif field == 'E':
        unit = 'mm^2/s^2'
    else:
        unit = ' (mm/s)'
    figs = {}
    figs.update(legend(field + unit, field + ' PDF', time_label(M, frame)))
    return figs
Exemple #15
0
def Test_dv(M, frames=None, W=32, display=True, scale=True, type=1, **kwargs):
    frames = get_frames(M, frames)
    r = 0.
    ropt = 0.
    dU = access.get(M,
                    'dU',
                    frames[0],
                    Dt=len(frames),
                    compute=False,
                    rescale=False,
                    type=type)

    for frame in frames:
        r0, ropt0 = gradient(M, frame, display=False, W=W, scale=scale)
        r += r0
        ropt += ropt0

    R = r / len(frames)
    Ropt = ropt / len(frames)

    if display:
        import turbulence.display.graphes as graphes
        dUmin, dUmax = shear_limit(W)

        xbin, n = graphes.hist(dU,
                               normalize=False,
                               num=200,
                               range=(-0.5, 0.5),
                               **kwargs)  # xfactor = Dt
        maxn = max(n) * 1.2
        graphes.graph([dUmin, dUmin], [0, maxn], label='r-', **kwargs)
        graphes.graph([dUmax, dUmax], [0, maxn], label='r-', **kwargs)
        graphes.legende('', '', '')

        print("Percentage of good values (gradient test) : " + str(R) + " %")
        print("ratio measured shear / optimal value : " +
              str(Ropt))  # greater than 1 start to be bad

    return R
Exemple #16
0
def smoothing(M, i, field='omega', sigma=1.):
    Z = access.get(M, field, i)
    return filters.gaussian_filter(Z, sigma=sigma)[..., 0]
Exemple #17
0
def Mplot(M, field, frame, auto_axis=False, step=1, W=None, Dt=None, fignum=1, show=False, vmin=0, vmax=0, log=False,
          display=False, tstamp=False, compute=False, cbar=False, colorbar=False):
    """

    Parameters
    ----------
    M :
    field :
    frame :
    auto_axis :
    step :
    W :
    Dt :
    fignum :
    show :
    vmin :
    vmax :
    log :
    display :
    tstamp :
    compute :
    cbar :
    colorbar :

    Returns
    -------
    """
    import turbulence.pprocess.check_piv as check
    import turbulence.manager.access as access

    data = access.get(M, field, frame, step=1, compute=compute)
    dimensions = data.shape

    if field == 'strain':
        # tensor variable. chose the trace (2d divergence !):
        data = data[..., 1, 1, :] + data[..., 0, 0, :]
        # print(data)

    X, Y = get_axis_coord(M)
    jmin = 0
    data = data[:, jmin:]
    X = X[:, jmin:]
    Y = Y[:, jmin:]

    t = M.t[frame]
    ft = M.t[frame + 1] - M.t[frame]
    dx = np.mean(np.diff(M.x[0, :]))

    if dx == 0:
        dx = 1

    if vmin == 0 and vmax == 0:
        if auto_axis:
            std = np.sqrt(np.nanmedian(np.power(data, 2)))
            vmax = 10 * std
            vmin = -vmax
            if field in ['E', 'enstrophy']:
                vmin = 0

            else:
                if W is None:
                    vmin, vmax = check.bounds(M, t0=frame)
                else:
                    vmin, vmax = check.bounds_pix(W)

            if Dt is not None:
                data = data / Dt

            if field in ['Ux', 'Uy']:
                vmax = np.abs(vmax)
                vmin = -np.abs(vmax)  # *100

            if field in ['omega']:
                data = data
                vmax = np.abs(vmax) / 5.  # *15#/5.
                vmin = -np.abs(vmax)  # *10#*100#vmax

            if field in ['strain']:
                data = data
                vmax = np.abs(vmax) / 20.  # *15#/5.
                vmin = -np.abs(vmax)  # *10#*100#vmax

            if field in ['E']:
                # std = np.std(data[...,frame])
                vmax = vmax ** 2
                vmin = vmin ** 2

            if field in ['enstrophy']:
                vmax = (vmax / 5.) ** 2
                vmin = (vmin) ** 2

    if log:
        vmax = np.log10(vmax)
        if vmin > 0:
            vmin = np.log10(vmin)
        else:
            vmin = vmax / 100.
    n = (X.shape[0] - dimensions[0]) / 2
    if n != 0:
        X = X[n:-n, n:-n]
        Y = Y[n:-n, n:-n]
    color_plot(X, Y, data[..., 0], show=show, fignum=fignum, vmin=vmin, vmax=vmax, log10=False, cbar=cbar)
    #    time_stamp(M,frame)
    if colorbar == True:
        plt.colorbar()

    # plt.axis('equal')
    if tstamp:
        t = M.t[frame]
        Dt = M.t[frame + 1] - M.t[frame]
        s = ', t = ' + str(np.round(t * 1000) / 1000) + ' s, Dt = ' + str(np.round(Dt * 10000) / 10) + 'ms'
    else:
        s = ''

    figs = {}
    figs.update(legend('X (mm)', 'Y (mm)', field + s, display=display, cplot=True, show=show))

    return figs