Exemplo n.º 1
0
def test_dataset(ds):
  p2c = vtk.vtkPointDataToCellData()
  p2c.SetInputData(ds)
  p2c.Update()

  d1 = dsa.WrapDataObject(p2c.GetOutput())

  vtkm_p2c = vtk.vtkmAverageToCells()
  vtkm_p2c.SetInputData(ds)
  vtkm_p2c.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, "RTData")
  vtkm_p2c.Update()

  d2 = dsa.WrapDataObject(vtkm_p2c.GetOutput())

  rtD1 = d1.PointData['RTData']
  rtD2 = d2.PointData['RTData']

  assert (algs.max(algs.abs(rtD1 - rtD2)) < 10E-4)
Exemplo n.º 2
0
def test_dataset(ds):
  p2c = vtk.vtkPointDataToCellData()
  p2c.SetInputData(ds)
  p2c.Update()

  c2p = vtk.vtkCellDataToPointData()
  c2p.SetInputConnection(p2c.GetOutputPort())
  c2p.Update()

  d1 = dsa.WrapDataObject(c2p.GetOutput())

  c2p = vtk.vtkmAverageToPoints()
  c2p.SetInputData(p2c.GetOutput())
  c2p.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS, "RTData")
  c2p.Update()

  d2 = dsa.WrapDataObject(c2p.GetOutput())

  rtD1 = d1.PointData['RTData']
  rtD2 = d2.PointData['RTData']

  assert (algs.max(algs.abs(rtD1 - rtD2)) < 10E-4)
def test_dataset(ds):
  p2c = vtk.vtkPointDataToCellData()
  p2c.SetInputData(ds)
  p2c.Update()

  c2p = vtk.vtkCellDataToPointData()
  c2p.SetInputConnection(p2c.GetOutputPort())
  c2p.Update()

  d1 = dsa.WrapDataObject(c2p.GetOutput())

  c2p = vtk.vtkmAverageToPoints()
  c2p.SetInputData(p2c.GetOutput())
  c2p.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS, "RTData")
  c2p.Update()

  d2 = dsa.WrapDataObject(c2p.GetOutput())

  rtD1 = d1.PointData['RTData']
  rtD2 = d2.PointData['RTData']

  assert (algs.max(algs.abs(rtD1 - rtD2)) < 10E-4)
Exemplo n.º 4
0
def compare(arr, tol):
    assert algs.all(algs.abs(arr) < tol)
Exemplo n.º 5
0
    end = NUM_BLOCKS
else:
    start = rank * NUM_BLOCKS - 3
    end = start + NUM_BLOCKS

for i in range(start, end):
    a = vtk.vtkImageData()
    a.ShallowCopy(w.GetOutput())
    c.SetBlock(i, a)

if rank == 0:
    c.SetBlock(NUM_BLOCKS - 1, vtk.vtkPolyData())

cdata = dsa.WrapDataObject(c)
rtdata = cdata.PointData['RTData']
rtdata = algs.abs(rtdata)
g = algs.gradient(rtdata)
g2 = algs.gradient(g)

res = True
dummy = vtk.vtkDummyController()
for axis in [None, 0]:
    for array in [rtdata, g, g2]:
        if rank == 0:
            array2 = array / 2
            min = algs.min_per_block(array2, axis=axis)
            res &= numpy.all(min.Arrays[NUM_BLOCKS -
                                        1] == numpy.min(array, axis=axis))
            all_min = algs.min(min, controller=dummy)
            all_min_true = numpy.min([
                algs.min(array, controller=dummy),
Exemplo n.º 6
0
    end = NUM_BLOCKS
else:
    start = rank*NUM_BLOCKS - 3
    end = start + NUM_BLOCKS

for i in range(start, end):
    a = vtk.vtkImageData()
    a.ShallowCopy(w.GetOutput())
    c.SetBlock(i, a)

if rank == 0:
    c.SetBlock(NUM_BLOCKS - 1, vtk.vtkPolyData())

cdata = dsa.WrapDataObject(c)
rtdata = cdata.PointData['RTData']
rtdata = algs.abs(rtdata)
g = algs.gradient(rtdata)
g2 = algs.gradient(g)

res = True
dummy = vtk.vtkDummyController()
for axis in [None, 0]:
    for array in [rtdata, g, g2]:
        if rank == 0:
            array2 = array/2
            min = algs.min_per_block(array2, axis=axis)
            res &= numpy.all(min.Arrays[NUM_BLOCKS - 1] == numpy.min(array, axis=axis))
            all_min = algs.min(min, controller=dummy)
            all_min_true = numpy.min([algs.min(array, controller=dummy), algs.min(array2, controller=dummy)])
            res &= all_min == all_min_true
            max = algs.max_per_block(array2, axis=axis)
    print("This test requires numpy!")
    from vtk.test import Testing
    Testing.skip()

import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from vtk.numpy_interface import algorithms as algs

rt = vtk.vtkRTAnalyticSource()

p2c = vtk.vtkPointDataToCellData()
p2c.SetInputConnection(rt.GetOutputPort())
p2c.Update()

c2p = vtk.vtkCellDataToPointData()
c2p.SetInputConnection(p2c.GetOutputPort())
c2p.Update()

d1 = dsa.WrapDataObject(c2p.GetOutput())

c2p = vtk.vtkmAverageToPoints()
c2p.SetInputData(p2c.GetOutput())
c2p.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_CELLS,
                           "RTData")
c2p.Update()

d2 = dsa.WrapDataObject(c2p.GetOutput())

assert (algs.max(algs.abs(d1.PointData['RTData'] - d2.PointData['RTData'])) <
        10E-4)
Exemplo n.º 8
0
def make_targets(les_vtk, y_type, Ls=1, Us=1, ros=1):
    from tqdm import tqdm

    small = np.cbrt(np.finfo(float).tiny)
    Ps = 0.5 * ros * Us**2

    les_nnode = les_vtk.number_of_points

    delij = np.zeros([les_nnode, 3, 3])
    for i in range(0, 3):
        delij[:, i, i] = 1.0

    # Wrap vista object in dsa wrapper
    les_dsa = dsa.WrapDataObject(les_vtk)

    if (y_type == 'classification'):
        ntarg = 5
        y_targ = np.zeros([les_nnode, ntarg], dtype=int)
        print('Classifier targets:')
    elif (y_type == 'regression'):
        ntarg = 2
        y_targ = np.zeros([les_nnode, ntarg], dtype=float)
        print('regressor targets:')

    y_raw = np.zeros([les_nnode, ntarg])
    target_labels = np.empty(ntarg, dtype='object')
    targ = 0

    # Copy Reynolds stresses to tensor
    uiuj = np.zeros([les_nnode, 3, 3])
    uiuj[:, 0, 0] = les_dsa.PointData['uu']
    uiuj[:, 1, 1] = les_dsa.PointData['vv']
    uiuj[:, 2, 2] = les_dsa.PointData['ww']
    uiuj[:, 0, 1] = les_dsa.PointData['uv']
    uiuj[:, 0, 2] = les_dsa.PointData['uw']
    uiuj[:, 1, 2] = les_dsa.PointData['vw']
    uiuj[:, 1, 0] = uiuj[:, 0, 1]
    uiuj[:, 2, 0] = uiuj[:, 0, 2]
    uiuj[:, 2, 1] = uiuj[:, 1, 2]

    # resolved TKE
    tke = 0.5 * (uiuj[:, 0, 0] + uiuj[:, 1, 1] + uiuj[:, 2, 2])

    # Velocity vector
    U = algs.make_vector(les_dsa.PointData['U'], les_dsa.PointData['V'],
                         les_dsa.PointData['W'])

    # Velocity gradient tensor and its transpose
    # J[:,i-1,j-1] is dUidxj
    # Jt[:,i-1,j-1] is dUjdxi
    Jt = algs.gradient(U)  # Jt is this one as algs uses j,i ordering
    J = algs.apply_dfunc(np.transpose, Jt, (0, 2, 1))

    # Strain and vorticity tensors
    Sij = 0.5 * (J + Jt)
    Oij = 0.5 * (J - Jt)

    # Anisotropy tensor and eigenvalues
    aij = copy.deepcopy(Sij) * 0.0
    inv2 = np.zeros(les_nnode)
    inv3 = np.zeros(les_nnode)

    for i in range(0, 3):
        for j in range(0, 3):
            aij[:, i,
                j] = uiuj[:, i, j] / (2.0 * tke + small) - delij[:, i, j] / 3.0

    # Get eigenvalues of aij
    eig = algs.eigenvalue(aij)
    eig1 = eig[:, 0]
    eig2 = eig[:, 1]
    eig3 = eig[:, 2]

    # Get coords on barycentric triangle from eigenvalues
    xc = [1.0, 0.0, 0.5]  #x,y coords of corner of triangle
    yc = [0.0, 0.0, np.cos(np.pi / 6.0)]
    C1c = eig1 - eig2
    C2c = 2 * (eig2 - eig3)
    C3c = 3 * eig3 + 1
    x0 = C1c * xc[0] + C2c * xc[1] + C3c * xc[2]
    y0 = C1c * yc[0] + C2c * yc[1] + C3c * yc[2]

    if (y_type == 'Classification'):
        # Target 1: Negative eddy viscosity
        #########################################
        print('1: Negative eddy viscosity')
        A = np.zeros(les_nnode)
        B = np.zeros(les_nnode)

        for i in range(0, 3):
            for j in range(0, 3):
                A += -uiuj[:, i, j] * Sij[:, i, j] + (
                    2.0 / 3.0) * tke * delij[:, i, j] * Sij[:, i, j]
                B += 2.0 * Sij[:, i, j] * Sij[:, i, j]

        Str = algs.sqrt(B)  # magnitude of Sij strain tensor (used later)
        nu_t = A / (B + small)
        nu_t = nu_t / (Us * Ls)
        y_raw[:, targ] = nu_t

        index = algs.where(nu_t < 0.0)
        y_targ[index, targ] = 1
        target_labels[targ] = 'Negative eddy viscosity'
        targ += 1

        # Target 2: Deviation from plane shear
        #################################################
        print('2: Deviation from plane shear turbulence')
        # Get distance from plane shear line
        p1 = (1 / 3, 0)
        p2 = (0.5, np.sqrt(3) / 2)
        dist = abs((p2[1] - p1[1]) * x0 -
                   (p2[0] - p1[0]) * y0 + p2[0] * p1[1] -
                   p2[1] * p1[0]) / np.sqrt((p2[1] - p1[1])**2 +
                                            (p2[0] - p1[0])**2)
        y_raw[:, targ] = dist
        index = algs.where(dist > 0.25)

        y_targ[index, targ] = 1
        target_labels[targ] = 'Deviation from plane shar turbulence'
        targ += 1

        # Target 3: Anisotropy of turbulence
        ##########################################
        print('3: Anisotropy of turbulence')
        Caniso = 1.0 - C3c
        y_raw[:, targ] = Caniso
        index = algs.where(Caniso > 0.5)
        y_targ[index, targ] = 1
        target_labels[targ] = 'Stress anisotropy'
        targ += 1

        # Target 4: Negative Pk
        ############################################
        print('4: Negative Pk')
        A = np.zeros(les_nnode)
        for i in range(0, 3):
            for j in range(0, 3):
                A[:] += (-uiuj[:, i, j] * J[:, i, j])

        A = A * Ls / Us**3
        y_raw[:, targ] = A
        index = algs.where(A < -0.0005)

        y_targ[index, targ] = 1
        target_labels[targ] = 'Negative Pk'
        targ += 1

        # Target 5: 2-eqn Cmu constant
        ############################################
        print('5: 2-equation Cmu constant')
        A = np.zeros(les_nnode)
        for i in range(0, 3):
            for j in range(0, 3):
                A[:] += aij[:, i, j] * Sij[:, i, j]

        Cmu = nu_t**2.0 * (Str / (tke + small))**2.0

        y_raw[:, targ] = Cmu
        allow_err = 0.25  #i.e. 10% err
        Cmu_dist = algs.abs(Cmu - 0.09)
        #    index = algs.where(Cmu_dist>allow_err*0.09)
        index = algs.where(Cmu > 1.1 * 0.09)
        y_targ[index, targ] = 1
        target_labels[targ] = 'Cmu != 0.09'
        targ += 1

    #    ab = ((uiuj[:,1,1]-uiuj[:,0,0])*U[:,0]*U[:,1] + uiuj[:,0,1]*(U[:,0]**2-U[:,1]**2))/(U[:,0]**2+U[:,1]**2)
    #    y_raw[:,err] = ab

    #    # Target 3: Non-linearity
    #    ###############################
    #    print('3: Non-linearity')
    #
    #    # Build cevm equation in form A*nut**3 + B*nut**2 + C*nut + D = 0
    #    B, A = build_cevm(Sij,Oij)
    #    B = B/(tke      +1e-12)
    #    A = A/(tke**2.0 +1e-12)
    #
    #    C = np.zeros_like(A)
    #    D = np.zeros_like(A)
    #    for i in range(0,3):
    #        for j in range(0,3):
    #            C += -2.0*Sij[:,i,j]*Sij[:,i,j]
    #            D += (2.0/3.0)*tke*Sij[:,i,j]*delij[:,i,j] - uiuj[:,i,j]*Sij[:,i,j]
    #
    #    nu_t_cevm = np.empty_like(nu_t)
    #    for i in tqdm(range(0,les_nnode)):
    #        # Find the roots of the cubic equation (i.e. potential values for nu_t_cevm)
    #        roots = np.roots([A[i],B[i],C[i],D[i]])
    #        roots_orig = roots
    #
    #        # Remove complex solutions (with imaginary part > a small number, to allow for numerical error)
    #        #roots = roots.real[abs(roots.imag)<1e-5]  #NOTE - Matches nu_t much better without this?!
    #
    #        # Out of remaining solutions(s), pick one that is closest to linear nu_t
    #        if(roots.size==0):
    #            nu_t_cevm[i] = nu_t[i]
    #        else:
    #            nu_t_cevm[i] = roots.real[np.argmin( np.abs(roots - np.full(roots.size,nu_t[i])) )]
    #
    #    normdiff = algs.abs(nu_t_cevm - nu_t) / (algs.abs(nu_t_cevm) + algs.abs(nu_t) + 1e-12)
    #    y_raw[:,err] = nu_t_cevm
    #
    #    index = algs.where(normdiff>0.15)
    #    y_targ[index,err] = 1
    #    error_labels[err] = 'Non-linearity'
    #    err += 1

    elif (y_type == 'regression'):
        # Target 3: Anisotropy of turbulence
        ##########################################
        print('1: Anisotropy of turbulence')
        Caniso = 1.0 - C3c
        y_raw[:, targ] = Caniso
        y_targ[:, targ] = Caniso
        target_labels[targ] = 'Stress anisotropy'
        targ += 1

    return y_raw, y_targ, target_labels
Exemplo n.º 9
0
def make_features(rans_vtk, Ls=1, Us=1, ros=1, nondim='local'):
    from cfd2ml.utilities import build_cevm

    small = np.cbrt(np.finfo(float).tiny)
    Ps = 0.5 * ros * Us**2

    rans_nnode = rans_vtk.number_of_points

    delij = np.zeros([rans_nnode, 3, 3])
    for i in range(0, 3):
        delij[:, i, i] = 1.0

    # Wrap vista object in dsa wrapper
    rans_dsa = dsa.WrapDataObject(rans_vtk)

    print('Feature:')
    nfeat = 15
    feat = 0
    q = np.empty([rans_nnode, nfeat])
    feature_labels = np.empty(nfeat, dtype='object')

    # Feature 1: non-dim Q-criterion
    ################################
    print('1: non-dim Q-criterion...')
    # Velocity vector
    U = rans_dsa.PointData[
        'U']  # NOTE - Getting variables from dsa obj not vtk obj as want to use algs etc later

    # Velocity gradient tensor and its transpose
    # J[:,i-1,j-1] is dUidxj
    # Jt[:,i-1,j-1] is dUjdxi
    Jt = algs.gradient(U)  # Jt is this one as algs uses j,i ordering
    J = algs.apply_dfunc(np.transpose, Jt, (0, 2, 1))

    # Strain and vorticity tensors
    Sij = 0.5 * (J + Jt)
    Oij = 0.5 * (J - Jt)

    # Frob. norm of Sij and Oij  (Snorm and Onorm are actually S^2 and O^2, sqrt needed to get norms)
    Snorm = algs.sum(2.0 * Sij**2, axis=1)  # sum i axis
    Snorm = algs.sum(Snorm,
                     axis=1)  # sum previous summations i.e. along j axis
    Onorm = algs.sum(2.0 * Oij**2, axis=1)  # sum i axis
    Onorm = algs.sum(Onorm,
                     axis=1)  # sum previous summations i.e. along j axis

    # Store q1
    q[:, feat] = (Onorm - 0.5 * Snorm) / (Onorm + 0.5 * Snorm + small)
    feature_labels[feat] = 'Normalised strain'
    feat += 1

    # clean up
    Snorm = algs.sqrt(Snorm)  #revert to revert to real Snorm for use later
    Onorm = algs.sqrt(Onorm)  #revert to revert to real Onorm for use later

    # Feature 2: Turbulence intensity
    #################################
    print('2: Turbulence intensity')
    tke = rans_dsa.PointData['k']
    UiUi = algs.mag(U)**2.0
    #    q[:,feat] = tke/(0.5*UiUi+tke+small)
    q[:, feat] = tke / (0.5 * UiUi + small)
    feature_labels[feat] = 'Turbulence intensity'
    feat += 1

    # Feature 3: Turbulence Reynolds number
    #######################################
    print('3: Turbulence Reynolds number')
    nu = rans_dsa.PointData['mu_l'] / rans_dsa.PointData['ro']
    Red = (algs.sqrt(tke) * rans_dsa.PointData['d']) / (50.0 * nu)
    q[:, feat] = algs.apply_dfunc(np.minimum, Red, 2.0)
    #Red = 0.09**0.25*algs.sqrt(tke)*rans_dsa.PointData['d']/nu
    #q[:,feat] = algs.apply_dfunc(np.minimum, Red, 100.0)
    feature_labels[feat] = 'Turbulence Re'
    feat += 1

    # Feature 4: Pressure gradient along streamline
    ###############################################
    print('4: Stream-wise pressure gradient')
    A = np.zeros(rans_nnode)
    B = np.zeros(rans_nnode)

    dpdx = algs.gradient(rans_dsa.PointData['p'])
    ro = rans_dsa.PointData['ro']
    Umag = algs.mag(U)

    for k in range(0, 3):
        A += U[:, k] * dpdx[:, k]

    if nondim == 'global':
        A = A / Umag
        q[:, feat] = A * Ls / Ps
    elif nondim == 'local':
        for i in range(0, 3):
            for j in range(0, 3):
                B += U[:, i] * U[:, i] * dpdx[:, j] * dpdx[:, j]
        q[:, feat] = A / (algs.sqrt(B) + algs.abs(A) + small)

    feature_labels[feat] = 'Stream-wise Pgrad'
    feat += 1

    # Feature 5: Ratio of turb time scale to mean strain time scale
    ###############################################################
    print('5: Ratio of turb time scale to mean strain time scale')
    #    A = 1.0/rans_dsa.PointData['w']  #Turbulent time scale (eps = k*w therefore also A = k/eps)
    #    B = 1.0/Snorm
    #    q[:,feat] = A/(A+B+small)
    q[:, feat] = Snorm / (rans_dsa.PointData['w'] + small)
    feature_labels[feat] = 'turb/strain time-scale'
    feat += 1

    # Feature 6: Viscosity ratio
    ############################
    print('6: Viscosity ratio')
    nu_t = rans_dsa.PointData['mu_t'] / ro
    #    q[:,feat] = nu_t/(100.0*nu + nu_t)
    q[:, feat] = nu_t / (nu + small)
    feature_labels[feat] = 'Viscosity ratio'
    feat += 1

    # Feature 7: Vortex stretching
    ##############################
    print('7: Vortex stretching')
    A = np.zeros(rans_nnode)
    B = np.zeros(rans_nnode)

    vortvec = algs.vorticity(U)

    for j in range(0, 3):
        for i in range(0, 3):
            for k in range(0, 3):
                A += vortvec[:, j] * J[:, i, j] * vortvec[:, k] * J[:, i, k]

    B = Snorm

    #    q[:,feat] = algs.sqrt(A)/(algs.sqrt(A)+B+small)
    q[:, feat] = algs.sqrt(A)  #/(algs.sqrt(A)+B+small)
    feature_labels[feat] = 'Vortex stretching'
    feat += 1

    # Feature 8: Marker of Gorle et al. (deviation from parallel shear flow)
    ########################################################################
    print('8: Marker of Gorle et al. (deviation from parallel shear flow)')
    if nondim == 'global':
        g = np.zeros([rans_nnode, 3])
        m = np.zeros(rans_nnode)
        s = U / Umag
        for j in range(3):
            for i in range(3):
                g[:, j] += s[:, i] * J[:, i, j]
            m += g[:, j] * s[:, j]
        m = np.abs(m)
        q[:, feat] = m * Ls / Us
    elif nondim == 'local':
        A = np.zeros(rans_nnode)
        B = np.zeros(rans_nnode)
        for i in range(0, 3):
            for j in range(0, 3):
                A += U[:, i] * U[:, j] * J[:, i, j]
        for n in range(0, 3):
            for i in range(0, 3):
                for j in range(0, 3):
                    for m in range(0, 3):
                        B += U[:, n] * U[:, n] * U[:,
                                                   i] * J[:, i,
                                                          j] * U[:,
                                                                 m] * J[:, m,
                                                                        j]
        q[:, feat] = algs.abs(A) / (algs.sqrt(B) + algs.abs(A) + small)
    feature_labels[feat] = 'Deviation from parallel shear'
    feat += 1

    # Feature 9: Ratio of convection to production of k
    ####################################################
    print('9: Ratio of convection to production of k')
    uiuj = (2.0 / 3.0) * tke * delij - 2.0 * nu_t * Sij
    dkdx = algs.gradient(tke)
    A = np.zeros(rans_nnode)
    B = np.zeros(rans_nnode)
    for i in range(0, 3):
        A += U[:, i] * dkdx[:, i]
    for j in range(0, 3):
        for l in range(0, 3):
            B += uiuj[:, j, l] * Sij[:, j, l]
    q[:, feat] = A / (algs.abs(B) + small)
    feature_labels[feat] = 'Convection/production of k'
    feat += 1

    # Feature 10: Ratio of total Reynolds stresses to normal Reynolds stresses
    ##########################################################################
    print('10: Ratio of total Reynolds stresses to normal Reynolds stresses')
    # Frob. norm of uiuj
    A = algs.sum(uiuj**2, axis=1)  # sum i axis
    A = algs.sum(A, axis=1)  # sum previous summations i.e. along j axis
    A = algs.sqrt(A)
    B = tke
    q[:, feat] = A / (B + small)
    feature_labels[feat] = 'total/normal stresses'
    feat += 1

    # Feature 11: Cubic eddy viscosity comparision
    ##############################################
    print('11: Cubic eddy viscosity comparision')

    # Add quadratic and cubic terms to linear evm
    cevm_2nd, cevm_3rd = build_cevm(Sij, Oij)
    uiujSij = np.zeros(rans_nnode)
    for i in range(0, 3):
        for j in range(0, 3):
            uiujSij += uiuj[:, i, j] * Sij[:, i, j]
    uiujcevmSij = uiujSij + (cevm_2nd / tke) * nu_t**2.0 + (
        cevm_3rd / tke**2.0) * nu_t**3.0
    q[:, feat] = (uiujcevmSij -
                  uiujSij) / (0.5 *
                              (np.abs(uiujcevmSij) + np.abs(uiujSij)) + small)
    feature_labels[feat] = 'CEV comparison'
    feat += 1

    # Feature 12: Streamline normal pressure gradient
    #################################################
    print('12: Stream-normal pressure gradient')
    A = algs.cross(U, dpdx)
    A = np.sqrt(A[:, 0]**2 + A[:, 1]**2 + A[:, 2]**2)

    if nondim == 'global':
        A = A / Umag
        q[:, feat] = A * Ls / Ps
    elif nondim == 'local':
        B = np.zeros(rans_nnode)
        for i in range(0, 3):
            for j in range(0, 3):
                B += U[:, i] * U[:, i] * dpdx[:, j] * dpdx[:, j]
        q[:, feat] = A / (A + algs.sqrt(B) + small)

    feature_labels[feat] = 'Stream-normal Pgrad'
    feat += 1

    # Feature 13: Streamline curvature
    ##################################
    print('13: Streamline curvature')
    #    A = np.zeros([rans_nnode,3])
    #
    #    # Gradient of Gamma
    #    Gamma = U#/algs.mag(U)
    #    dGammadx = algs.gradient(Gamma)
    #
    #    for i in range(0,3):
    #        for j in range(0,3):
    #            A[:,i] += U[:,j]*dGammadx[:,j,i]
    #    A = algs.mag(A/algs.mag(U)*algs.mag(U))
    #
    #    q[:,feat] = A
    #    feature_labels[feat] = 'Streamline curvature'
    #    feat += 1
    D2 = 0.5 * (Snorm**2 + Onorm**2)
    #    cr1 = 1.0
    #    cr2 = 12.0
    #    cr3 = 1.0
    cr2 = 12
    cr3 = 1 / np.pi
    rstar = Snorm / (Onorm + small)

    dSijdx1 = algs.gradient(Sij[:, :, 0])
    dSijdx2 = algs.gradient(Sij[:, :, 1])
    dSijdx3 = algs.gradient(Sij[:, :, 2])

    DSijDt = np.zeros([rans_nnode, 3, 3])
    for i in range(3):
        for j in range(3):
            DSijDt[:, i,
                   j] = U[:,
                          0] * dSijdx1[:, j,
                                       i] + U[:,
                                              1] * dSijdx2[:, j,
                                                           i] + U[:,
                                                                  2] * dSijdx3[:,
                                                                               j,
                                                                               i]

    rhat = np.zeros(rans_nnode)
    for i in range(3):
        for j in range(3):
            for k in range(3):
                rhat += (2 * Oij[:, i, k] * Sij[:, j, k] /
                         D2**2) * DSijDt[:, i, j]


#    fr1 = -((2*rstar)/(1+rstar))*(cr3*algs.arctan(cr2*rhat))
#    fr1 = ( (1+cr1)*((2*rstar)/(1+rstar))*(1-cr3*algs.arctan(cr2*rhat)) ) - cr1
    rhathat = algs.arctan(0.25 * rhat) * 2 / np.pi
    q[:, feat] = rhathat  #fr1
    feature_labels[feat] = 'Streamline curvature'
    feat += 1

    # Feature 14: Anisotropy of pressure hessian
    ############################################
    print('14: Anisotropy of pressure hessian')
    # Calculate pressure hessian
    Hij = algs.gradient(dpdx)
    Hij = algs.apply_dfunc(np.transpose, Hij, (0, 2, 1))
    aniso = np.zeros(rans_nnode)
    iso = np.zeros(rans_nnode)
    # Frob. norm of Hij
    for i in range(3):
        for j in range(3):
            aniso += (Hij[:, i, j] - Hij[:, i, j] * delij[:, i, j])**2
        iso += Hij[:, i, i]**2

    aniso = np.sqrt(aniso)
    iso = np.sqrt(iso)
    q[:, feat] = (aniso) / (iso + small)

    feature_labels[feat] = 'Anisotropy of pressure hessian'
    feat += 1

    # Feature 15: White noise
    #########################
    print('15: White noise')
    q[:, feat] = np.random.uniform(low=-1.0, high=1.0, size=rans_nnode)
    feature_labels[feat] = 'White noise'
    feat += 1

    return q, feature_labels
Exemplo n.º 10
0
try:
    import numpy
except ImportError:
    print("Numpy (http://numpy.scipy.org) not found.")
    print("This test requires numpy!")
    sys.exit(0)

import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from vtk.numpy_interface import algorithms as algs

rt = vtk.vtkRTAnalyticSource()

p2c = vtk.vtkPointDataToCellData()
p2c.SetInputConnection(rt.GetOutputPort())
p2c.Update()

d1 = dsa.WrapDataObject(p2c.GetOutput())

vtkm_p2c = vtk.vtkmAverageToCells()
vtkm_p2c.SetInputData(rt.GetOutput())
vtkm_p2c.SetInputArrayToProcess(0, 0, 0,
                                vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS,
                                "RTData")
vtkm_p2c.Update()

d2 = dsa.WrapDataObject(vtkm_p2c.GetOutput())

assert (algs.max(algs.abs(d1.CellData['RTData'] - d2.CellData['RTData'])) <
        10E-4)
Exemplo n.º 11
0
def compare(arr, tol):
    assert algs.all(algs.abs(arr) < tol)
Exemplo n.º 12
0
import sys

try:
    import numpy
except ImportError:
    print("Numpy (http://numpy.scipy.org) not found.")
    print("This test requires numpy!")
    from vtk.test import Testing
    Testing.skip()

import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from vtk.numpy_interface import algorithms as algs

rt = vtk.vtkRTAnalyticSource()

p2c = vtk.vtkPointDataToCellData()
p2c.SetInputConnection(rt.GetOutputPort())
p2c.Update()

d1 = dsa.WrapDataObject(p2c.GetOutput())

vtkm_p2c = vtk.vtkmAverageToCells()
vtkm_p2c.SetInputData(rt.GetOutput())
vtkm_p2c.SetInputArrayToProcess(0, 0, 0, vtk.vtkDataObject.FIELD_ASSOCIATION_POINTS, "RTData")
vtkm_p2c.Update()

d2 = dsa.WrapDataObject(vtkm_p2c.GetOutput())

assert (algs.max(algs.abs(d1.CellData['RTData'] - d2.CellData['RTData'])) < 10E-4)
Exemplo n.º 13
0
# Filtro para pegar os valores absolutos entre dois conjuntos de Q
# Funciona carregando testeA e testeB
# QW é a variável.
# Pode ser Q. Neste caso é usada a Calculator para criar Q/volume
# Com isso temos o Q em Watts
# O valor absoluto permite exibir as diferenças entre Q's

import vtk
import vtk.numpy_interface.dataset_adapter as dsa
import vtk.numpy_interface.algorithms as algs
Q1=inputs[0].CellData['QW']
Q2=inputs[1].CellData['QW']
K=algs.abs(Q1-Q2)
output.CellData.append(K,'QWdiff')
#T1=inputs[0].CellData['T']
#T2=inputs[1].CellData['T']
#output.CellData.append(T2-T1,'Tdiff')