Example #1
0
 def get_range(self, attr='scalars', mode='point'):
     assert mode in ('point', 'cell')
     assert attr in ('scalars', 'vectors')
     dataset = self.dataset
     da = dataset.PointData if mode == 'point' else dataset.CellData
     x = self._get_attr(da, attr, mode)
     if x is None:
         return None, [0.0, 1.0]
     name, x = x
     if self._composite:
         # Don't bother with Nans for composite data for now.
         if isinstance(x, dsa.VTKNoneArray):
             res = [0.0, 1.0]
         elif attr == 'scalars':
             res = [algs.min(x), algs.max(x)]
         else:
             max_norm = np.sqrt(algs.max(algs.sum(x * x, axis=1)))
             res = [0.0, max_norm]
     else:
         has_nan = np.isnan(x).any()
         if attr == 'scalars':
             if has_nan:
                 res = [float(np.nanmin(x)), float(np.nanmax(x))]
             else:
                 res = list(x.GetRange())
         else:
             if has_nan:
                 d_mag = np.sqrt((x * x).sum(axis=1))
                 res = [float(np.nanmin(d_mag)), float(np.nanmax(d_mag))]
             else:
                 res = [0.0, x.GetMaxNorm()]
     return name, res
Example #2
0
 def get_range(self, attr='scalars', mode='point'):
     assert mode in ('point', 'cell')
     assert attr in ('scalars', 'vectors')
     dataset = self.dataset
     da = dataset.PointData if mode == 'point' else dataset.CellData
     x = self._get_attr(da, attr, mode)
     if x is None:
         return None, [0.0, 1.0]
     name, x = x
     if self._composite:
         # Don't bother with Nans for composite data for now.
         if isinstance(x, dsa.VTKNoneArray):
             res = [0.0, 1.0]
         elif attr == 'scalars':
             res = [algs.min(x), algs.max(x)]
         else:
             max_norm = np.sqrt(algs.max(algs.sum(x*x, axis=1)))
             res = [0.0, max_norm]
     else:
         has_nan = np.isnan(x).any()
         if attr == 'scalars':
             if has_nan:
                 res = [float(np.nanmin(x)), float(np.nanmax(x))]
             else:
                 res = list(x.GetRange())
         else:
             if has_nan:
                 d_mag = np.sqrt((x*x).sum(axis=1))
                 res = [float(np.nanmin(d_mag)),
                        float(np.nanmax(d_mag))]
             else:
                 res = [0.0, x.GetMaxNorm()]
     return name, res
Example #3
0
def testArrays(rtData, rtData2, grad, grad2, total_npts):
    " Test various parallel algorithms."
    if rank == 0:
        print('-----------------------')
    PRINT("SUM ones:", algs.sum(rtData / rtData) - total_npts)

    PRINT(
        "SUM sin:",
        (algs.sum(algs.sin(rtData) + 1) - numpy.sum(numpy.sin(rtData2) + 1)) /
        numpy.sum(numpy.sin(rtData2) + 1))

    PRINT("rtData min:", algs.min(rtData) - numpy.min(rtData2))
    PRINT("rtData max:", algs.max(rtData) - numpy.max(rtData2))
    PRINT("rtData sum:",
          (algs.sum(rtData) - numpy.sum(rtData2)) / (2 * numpy.sum(rtData2)))
    PRINT("rtData mean:", (algs.mean(rtData) - numpy.mean(rtData2)) /
          (2 * numpy.mean(rtData2)))
    PRINT("rtData var:",
          (algs.var(rtData) - numpy.var(rtData2)) / numpy.var(rtData2))
    PRINT("rtData std:",
          (algs.std(rtData) - numpy.std(rtData2)) / numpy.std(rtData2))

    PRINT("grad min:", algs.min(grad) - numpy.min(grad2))
    PRINT("grad max:", algs.max(grad) - numpy.max(grad2))
    PRINT("grad min 0:", algs.min(grad, 0) - numpy.min(grad2, 0))
    PRINT("grad max 0:", algs.max(grad, 0) - numpy.max(grad2, 0))
    PRINT("grad min 1:",
          algs.sum(algs.min(grad, 1)) - numpy.sum(numpy.min(grad2, 1)))
    PRINT("grad max 1:",
          algs.sum(algs.max(grad, 1)) - numpy.sum(numpy.max(grad2, 1)))
    PRINT("grad sum 1:",
          algs.sum(algs.sum(grad, 1)) - numpy.sum(numpy.sum(grad2, 1)))
    PRINT("grad var:", (algs.var(grad) - numpy.var(grad2)) / numpy.var(grad2))
    PRINT("grad var 0:",
          (algs.var(grad, 0) - numpy.var(grad2, 0)) / numpy.var(grad2, 0))
Example #4
0
def testArrays(rtData, rtData2, grad, grad2, total_npts):
    " Test various parallel algorithms."
    if rank == 0:
        print "-----------------------"
    PRINT("SUM ones:", algs.sum(rtData / rtData) - total_npts)

    PRINT(
        "SUM sin:",
        (algs.sum(algs.sin(rtData) + 1) - numpy.sum(numpy.sin(rtData2) + 1)) / numpy.sum(numpy.sin(rtData2) + 1),
    )

    PRINT("rtData min:", algs.min(rtData) - numpy.min(rtData2))
    PRINT("rtData max:", algs.max(rtData) - numpy.max(rtData2))
    PRINT("rtData sum:", (algs.sum(rtData) - numpy.sum(rtData2)) / (2 * numpy.sum(rtData2)))
    PRINT("rtData mean:", (algs.mean(rtData) - numpy.mean(rtData2)) / (2 * numpy.mean(rtData2)))
    PRINT("rtData var:", (algs.var(rtData) - numpy.var(rtData2)) / numpy.var(rtData2))
    PRINT("rtData std:", (algs.std(rtData) - numpy.std(rtData2)) / numpy.std(rtData2))

    PRINT("grad min:", algs.min(grad) - numpy.min(grad2))
    PRINT("grad max:", algs.max(grad) - numpy.max(grad2))
    PRINT("grad min 0:", algs.min(grad, 0) - numpy.min(grad2, 0))
    PRINT("grad max 0:", algs.max(grad, 0) - numpy.max(grad2, 0))
    PRINT("grad min 1:", algs.sum(algs.min(grad, 1)) - numpy.sum(numpy.min(grad2, 1)))
    PRINT("grad max 1:", algs.sum(algs.max(grad, 1)) - numpy.sum(numpy.max(grad2, 1)))
    PRINT("grad sum 1:", algs.sum(algs.sum(grad, 1)) - numpy.sum(numpy.sum(grad2, 1)))
    PRINT("grad var:", (algs.var(grad) - numpy.var(grad2)) / numpy.var(grad2))
    PRINT("grad var 0:", (algs.var(grad, 0) - numpy.var(grad2, 0)) / numpy.var(grad2, 0))
Example #5
0
w2.Update()

ds2 = dsa.WrapDataObject(w2.GetOutput())
rtData2 = ds2.PointData['RTData']
grad2 = algs.gradient(rtData2)

npts = numpy.array(numpy.int32(ds.GetNumberOfPoints()))
total_npts = numpy.array(npts)
MPI.COMM_WORLD.Allreduce([npts, MPI.INT], [total_npts, MPI.INT], MPI.SUM)

# Test simple distributed data.
testArrays(rtData, rtData2, grad, grad2, total_npts)

# Check that we can disable parallelism by using a dummy controller
# even when a global controller is set
assert algs.sum(rtData / rtData,
                controller=vtk.vtkDummyController()) != total_npts

# Test where arrays are NoneArray on one of the ranks.
if size > 1:
    if rank == 0:
        rtData3 = rtData2
        grad3 = grad2
    else:
        rtData3 = dsa.NoneArray
        grad3 = dsa.NoneArray

    testArrays(rtData3, rtData2, grad3, grad2, total_npts)

# Test composite arrays
rtData3 = dsa.VTKCompositeDataArray([rtData, dsa.NoneArray])
grad3 = dsa.VTKCompositeDataArray([dsa.NoneArray, grad])
w2.Update()

ds2 = dsa.WrapDataObject(w2.GetOutput())
rtData2 = ds2.PointData['RTData']
grad2 = algs.gradient(rtData2)

npts = numpy.array(numpy.int32(ds.GetNumberOfPoints()))
total_npts = numpy.array(npts)
MPI.COMM_WORLD.Allreduce([npts, MPI.INT], [total_npts, MPI.INT], MPI.SUM)

# Test simple distributed data.
testArrays(rtData, rtData2, grad, grad2, total_npts)

# Check that we can disable parallelism by using a dummy controller
# even when a global controller is set
assert algs.sum(rtData / rtData, controller=vtk.vtkDummyController()) != total_npts

# Test where arrays are NoneArray on one of the ranks.
if size > 1:
    if rank == 0:
        rtData3 = rtData2
        grad3 = grad2
    else:
        rtData3 = dsa.NoneArray
        grad3 = dsa.NoneArray

    testArrays(rtData3, rtData2, grad3, grad2, total_npts)

# Test composite arrays
rtData3 = dsa.VTKCompositeDataArray([rtData, dsa.NoneArray])
grad3 = dsa.VTKCompositeDataArray([dsa.NoneArray, grad])
def make_features_inv(rans_vtk, Ls=1, Us=1, ros=1, nondim='local'):
    from cfd2ml.utilities import eijk

    small = np.finfo(float).tiny

    rans_nnode = rans_vtk.number_of_points

    # Wrap vista object in dsa wrapper
    rans_dsa = dsa.WrapDataObject(rans_vtk)

    print('Feature:')
    #    nfeat = 21
    nfeat = 50
    q = np.empty([rans_nnode, nfeat])
    feature_labels = np.empty(nfeat, dtype='object')

    # strain and vorticity
    ##############################
    print('Constructing strain and vorticity tensor')
    # Velocity vector
    U = rans_dsa.PointData[
        'U']  # NOTE - Getting variables from dsa obj not vtk obj as want to use algs etc later

    # Velocity gradient tensor and its transpose
    # J[:,i-1,j-1] is dUidxj
    # Jt[:,i-1,j-1] is dUjdxi
    Jt = algs.gradient(U)  # Jt is this one as algs uses j,i ordering
    J = algs.apply_dfunc(np.transpose, Jt, (0, 2, 1))

    # Strain and vorticity tensors
    Sij = 0.5 * (J + Jt)
    Oij = 0.5 * (J - Jt)

    # Frob. norm of Sij and Oij  (Snorm and Onorm are actually S^2 and O^2, sqrt needed to get norms)
    Snorm = algs.sum(2.0 * Sij**2, axis=1)  # sum i axis
    Snorm = algs.sum(Snorm,
                     axis=1)  # sum previous summations i.e. along j axis
    Onorm = algs.sum(2.0 * Oij**2, axis=1)  # sum i axis
    Onorm = algs.sum(Onorm,
                     axis=1)  # sum previous summations i.e. along j axis
    Snorm = algs.sqrt(Snorm)
    Onorm = algs.sqrt(Onorm)

    ########################################
    # Calculating pressure and tke gradients
    ########################################
    print('Calculating pressure and tke gradients')
    tke = rans_dsa.PointData['k']
    dkdx = algs.gradient(tke)
    dpdx = algs.gradient(rans_dsa.PointData['p'])

    #################################################
    # Non-dim everything here. Either local or global
    #################################################
    if nondim == 'local':
        # Non-dim Sij by eps/k
        w = rans_dsa.PointData['w']  #/0.09
        eps = w * tke
        Sij_h = Sij / w

        # Non-dim Oij by Onorm
        Oij_h = Oij / Onorm

        # Non-dim pressure gradient
        ro = rans_dsa.PointData['ro']
        DUDt = U[:, 0] * J[:, :, 0] + U[:, 1] * J[:, :, 1] + U[:, 2] * J[:, :,
                                                                         2]
        dpdx_h = dpdx / ro * algs.mag(DUDt)

        # Non-dim tke gradient
        dkdx_h = dkdx / (eps / algs.sqrt(tke))

    # Global non-dim on top
    Ps = ros * Us**2
    Sij_h = Sij_h / (Us / Ls)
    Oij_h = Oij_h / (Us / Ls)
    dpdx_h = dpdx_h / (Ps / Ls)
    dkdx_h = dkdx_h / (Us**2 / Ls)

    #    q[:,0]  = Sij_h[:,0,0]
    #    q[:,1]  = Sij_h[:,1,1]
    #    q[:,2]  = Sij_h[:,2,2]
    #    q[:,3]  = Sij_h[:,0,1]
    #    q[:,4]  = Sij_h[:,0,2]
    #    q[:,5]  = Sij_h[:,1,2]
    #    q[:,6]  = Oij_h[:,0,0]
    #    q[:,7]  = Oij_h[:,1,1]
    #    q[:,8]  = Oij_h[:,2,2]
    #    q[:,9]  = Oij_h[:,0,1]
    #    q[:,10] = Oij_h[:,0,2]
    #    q[:,11] = Oij_h[:,1,2]
    #    q[:,12] = dpdx_h[:,0]
    #    q[:,13] = dpdx_h[:,1]
    #    q[:,14] = dpdx_h[:,2]
    #    q[:,15] = dkdx_h[:,0]
    #    q[:,16] = dkdx_h[:,1]
    #    q[:,17] = dkdx_h[:,2]
    #    feature_labels[0]  = 'S11'
    #    feature_labels[1]  = 'S22'
    #    feature_labels[2]  = 'S33'
    #    feature_labels[3]  = 'S12'
    #    feature_labels[4]  = 'S13'
    #    feature_labels[5]  = 'S23'
    #    feature_labels[6]  = 'O11'
    #    feature_labels[7]  = 'O22'
    #    feature_labels[8]  = 'O33'
    #    feature_labels[9]  = 'O12'
    #    feature_labels[10] = 'O13'
    #    feature_labels[11] = 'O23'
    #    feature_labels[12] = 'dpdx'
    #    feature_labels[13] = 'dpdy'
    #    feature_labels[14] = 'dpdz'
    #    feature_labels[15] = 'dkdx'
    #    feature_labels[16] = 'dkdy'
    #    feature_labels[17] = 'dkdz'
    #    feat = 18

    # Transform dpdx into ani-symmetric tensor Ap=-I x dpdx
    Ap = np.zeros([rans_nnode, 3, 3])
    I = np.eye(3)
    for a in range(3):
        for b in range(3):
            for c in range(3):
                for d in range(3):
                    Ap[:, a, b] -= eijk(b, c, d) * I[a, c] * dpdx_h[:, d]

    # Transform dkdx into ani-symmetric tensor Ak=-I x dkdx
    Ak = np.zeros([rans_nnode, 3, 3])
    for a in range(3):
        for b in range(3):
            for c in range(3):
                for d in range(3):
                    Ak[:, a, b] -= eijk(b, c, d) * I[a, c] * dkdx_h[:, d]

    # Construct all invariant bases
    ###############################
    # Use numpy matmul to construct S^2, S^3 etc as we use these alot
    # (matmul can be used as for arrays of dim>2 as "it is treated as a stack of matrices residing in the last two indexes and is broadcast accordingly")
    S = Sij_h
    O = Oij_h
    S2 = np.matmul(S, S)
    S3 = np.matmul(S2, S)
    O2 = np.matmul(O, O)
    Ap2 = np.matmul(Ap, Ap)
    Ak2 = np.matmul(Ak, Ak)

    # 1-2
    q[:, 0] = algs.trace(S2)
    feature_labels[0] = 'S2'
    q[:, 1] = algs.trace(S3)
    feature_labels[1] = 'S3'
    # 3-5
    q[:, 2] = algs.trace(O2)
    feature_labels[2] = 'O2'
    q[:, 3] = algs.trace(Ap2)
    feature_labels[3] = 'Ap2'
    q[:, 4] = algs.trace(Ak2)
    feature_labels[4] = 'Ak2'
    # 6-14
    q[:, 5] = algs.trace(np.matmul(O2, S))
    feature_labels[5] = 'O2*S'
    q[:, 6] = algs.trace(np.matmul(O2, S2))
    feature_labels[6] = 'O2*S2'
    q[:, 7] = algs.trace(np.matmul(np.matmul(O2, S), np.matmul(O, S2)))
    feature_labels[7] = 'O2*S*O*S2'
    q[:, 8] = algs.trace(np.matmul(Ap2, S))
    feature_labels[8] = 'Ap2*S'
    q[:, 9] = algs.trace(np.matmul(Ap2, S2))
    feature_labels[9] = 'Ap2*S2'
    q[:, 10] = algs.trace(np.matmul(np.matmul(Ap2, S), np.matmul(Ap, S2)))
    feature_labels[10] = 'Ap2*S*Ap*S2'
    q[:, 11] = algs.trace(np.matmul(Ak2, S))
    feature_labels[11] = 'Ak2*S'
    q[:, 12] = algs.trace(np.matmul(Ak2, S2))
    feature_labels[12] = 'Ak2*S2'
    q[:, 13] = algs.trace(np.matmul(np.matmul(Ak2, S), np.matmul(Ak, S2)))
    feature_labels[13] = 'Ak2*S*Ak*S2'

    # 15-17
    q[:, 14] = algs.trace(np.matmul(O, Ap))
    feature_labels[14] = 'O*Ap'
    q[:, 15] = algs.trace(np.matmul(Ap, Ak))
    feature_labels[15] = 'Ap*Ak'
    q[:, 16] = algs.trace(np.matmul(O, Ak))
    feature_labels[16] = 'O*Ak'

    # 18-41
    q[:, 17] = algs.trace(np.matmul(O, np.matmul(Ap, S)))
    feature_labels[17] = 'O*Ap*S'
    q[:, 18] = algs.trace(np.matmul(O, np.matmul(Ap, S2)))
    feature_labels[18] = 'O*Ap*S2'
    q[:, 19] = algs.trace(np.matmul(O2, np.matmul(Ap, S)))
    feature_labels[19] = 'O2*Ap*S'
    q[:, 20] = algs.trace(np.matmul(Ap2, np.matmul(O, S)))
    feature_labels[20] = 'Ap2*O*S'
    q[:, 21] = algs.trace(np.matmul(O2, np.matmul(Ap, S2)))
    feature_labels[21] = 'O2*Ap*S2'
    q[:, 22] = algs.trace(np.matmul(Ap2, np.matmul(O, S2)))
    feature_labels[22] = 'Ap2*O*S2'
    q[:, 23] = algs.trace(np.matmul(np.matmul(O2, S), np.matmul(Ap, S2)))
    feature_labels[23] = 'O2*S*Ap*S2'
    q[:, 24] = algs.trace(np.matmul(np.matmul(Ap2, S), np.matmul(O, S2)))
    feature_labels[24] = 'Ap2*S*O*S2'

    q[:, 25] = algs.trace(np.matmul(O, np.matmul(Ak, S)))
    feature_labels[25] = 'O*Ak*S'
    q[:, 26] = algs.trace(np.matmul(O, np.matmul(Ak, S2)))
    feature_labels[26] = 'O*Ak*S2'
    q[:, 27] = algs.trace(np.matmul(O2, np.matmul(Ak, S)))
    feature_labels[27] = 'O2*Ak*S'
    q[:, 28] = algs.trace(np.matmul(Ak2, np.matmul(O, S)))
    feature_labels[28] = 'Ak2*O*S'
    q[:, 29] = algs.trace(np.matmul(O2, np.matmul(Ak, S2)))
    feature_labels[29] = 'O2*Ak*S2'
    q[:, 30] = algs.trace(np.matmul(Ak2, np.matmul(O, S2)))
    feature_labels[30] = 'Ak2*O*S2'
    q[:, 31] = algs.trace(np.matmul(np.matmul(O2, S), np.matmul(Ak, S2)))
    feature_labels[31] = 'O2*S*Ak*S2'
    q[:, 32] = algs.trace(np.matmul(np.matmul(Ak2, S), np.matmul(O, S2)))
    feature_labels[32] = 'Ak2*S*O*S2'

    q[:, 33] = algs.trace(np.matmul(Ap, np.matmul(Ak, S)))
    feature_labels[33] = 'Ap*Ak*S'
    q[:, 34] = algs.trace(np.matmul(Ap, np.matmul(Ak, S2)))
    feature_labels[34] = 'Ap*Ak*S2'
    q[:, 35] = algs.trace(np.matmul(Ap2, np.matmul(Ak, S)))
    feature_labels[35] = 'Ap2*Ak*S'
    q[:, 36] = algs.trace(np.matmul(Ak2, np.matmul(Ap, S)))
    feature_labels[36] = 'Ak2*Ap*S'
    q[:, 37] = algs.trace(np.matmul(Ap2, np.matmul(Ak, S2)))
    feature_labels[37] = 'Ap2*Ak*S2'
    q[:, 38] = algs.trace(np.matmul(Ak2, np.matmul(Ap, S2)))
    feature_labels[38] = 'Ak2*Ap*S2'
    q[:, 39] = algs.trace(np.matmul(np.matmul(Ap2, S), np.matmul(Ak, S2)))
    feature_labels[39] = 'Ap2*S*Ak*S2'
    q[:, 40] = algs.trace(np.matmul(np.matmul(Ak2, S), np.matmul(Ap, S2)))
    feature_labels[40] = 'Ak2*S*Ap*S2'

    #    # 42
    q[:, 41] = algs.trace(np.matmul(O, np.matmul(Ap, Ak)))
    feature_labels[41] = 'O*Ap*Ak'

    #    # 43-47
    q[:, 42] = algs.trace(np.matmul(np.matmul(O, Ap), np.matmul(Ak, S)))
    feature_labels[42] = 'O*Ap*Ak*S'
    q[:, 43] = algs.trace(np.matmul(np.matmul(O, Ak), np.matmul(Ap, S)))
    feature_labels[43] = 'O*Ak*Ap*S'
    q[:, 44] = algs.trace(np.matmul(np.matmul(O, Ap), np.matmul(Ak, S2)))
    feature_labels[44] = 'O*Ap*Ak*S2'
    q[:, 45] = algs.trace(np.matmul(np.matmul(O, Ak), np.matmul(Ap, S2)))
    feature_labels[45] = 'O*Ak*Ap*S2'
    q[:, 46] = algs.trace(
        np.matmul(np.matmul(np.matmul(O, Ap), np.matmul(S, Ak)), S2))
    feature_labels[46] = 'O*Ap*S*Ak*S2'
    feat = 47

    # Supplementary features
    ########################
    print('Calculating supplementary features: ')

    # Wall distanced based Re
    print('Turbulence Reynolds number')
    nu = rans_dsa.PointData['mu_l'] / rans_dsa.PointData['ro']
    Red = (algs.sqrt(tke) * rans_dsa.PointData['d']) / (50.0 * nu)
    q[:, feat] = algs.apply_dfunc(np.minimum, Red, 2.0)
    feature_labels[feat] = 'Turbulence Re'
    feat += 1

    # Turbulence intensity
    print('Turbulence intensity')
    UiUi = algs.mag(U)**2.0
    q[:, feat] = tke / (0.5 * UiUi + tke + small)
    feature_labels[feat] = 'Turbulence intensity'
    feat += 1

    # Ratio of turb time scale to mean strain time scale
    print('Ratio of turb time scale to mean strain time scale')
    A = 1.0 / rans_dsa.PointData[
        'w']  #Turbulent time scale (eps = k*w therefore also A = k/eps)
    B = 1.0 / Snorm
    q[:, feat] = A / (A + B + small)
    feature_labels[feat] = 'turb/strain time-scale'
    feat += 1

    return q, feature_labels
def make_features(rans_vtk, Ls=1, Us=1, ros=1, nondim='local'):
    from cfd2ml.utilities import build_cevm

    small = np.cbrt(np.finfo(float).tiny)
    Ps = 0.5 * ros * Us**2

    rans_nnode = rans_vtk.number_of_points

    delij = np.zeros([rans_nnode, 3, 3])
    for i in range(0, 3):
        delij[:, i, i] = 1.0

    # Wrap vista object in dsa wrapper
    rans_dsa = dsa.WrapDataObject(rans_vtk)

    print('Feature:')
    nfeat = 15
    feat = 0
    q = np.empty([rans_nnode, nfeat])
    feature_labels = np.empty(nfeat, dtype='object')

    # Feature 1: non-dim Q-criterion
    ################################
    print('1: non-dim Q-criterion...')
    # Velocity vector
    U = rans_dsa.PointData[
        'U']  # NOTE - Getting variables from dsa obj not vtk obj as want to use algs etc later

    # Velocity gradient tensor and its transpose
    # J[:,i-1,j-1] is dUidxj
    # Jt[:,i-1,j-1] is dUjdxi
    Jt = algs.gradient(U)  # Jt is this one as algs uses j,i ordering
    J = algs.apply_dfunc(np.transpose, Jt, (0, 2, 1))

    # Strain and vorticity tensors
    Sij = 0.5 * (J + Jt)
    Oij = 0.5 * (J - Jt)

    # Frob. norm of Sij and Oij  (Snorm and Onorm are actually S^2 and O^2, sqrt needed to get norms)
    Snorm = algs.sum(2.0 * Sij**2, axis=1)  # sum i axis
    Snorm = algs.sum(Snorm,
                     axis=1)  # sum previous summations i.e. along j axis
    Onorm = algs.sum(2.0 * Oij**2, axis=1)  # sum i axis
    Onorm = algs.sum(Onorm,
                     axis=1)  # sum previous summations i.e. along j axis

    # Store q1
    q[:, feat] = (Onorm - 0.5 * Snorm) / (Onorm + 0.5 * Snorm + small)
    feature_labels[feat] = 'Normalised strain'
    feat += 1

    # clean up
    Snorm = algs.sqrt(Snorm)  #revert to revert to real Snorm for use later
    Onorm = algs.sqrt(Onorm)  #revert to revert to real Onorm for use later

    # Feature 2: Turbulence intensity
    #################################
    print('2: Turbulence intensity')
    tke = rans_dsa.PointData['k']
    UiUi = algs.mag(U)**2.0
    #    q[:,feat] = tke/(0.5*UiUi+tke+small)
    q[:, feat] = tke / (0.5 * UiUi + small)
    feature_labels[feat] = 'Turbulence intensity'
    feat += 1

    # Feature 3: Turbulence Reynolds number
    #######################################
    print('3: Turbulence Reynolds number')
    nu = rans_dsa.PointData['mu_l'] / rans_dsa.PointData['ro']
    Red = (algs.sqrt(tke) * rans_dsa.PointData['d']) / (50.0 * nu)
    q[:, feat] = algs.apply_dfunc(np.minimum, Red, 2.0)
    #Red = 0.09**0.25*algs.sqrt(tke)*rans_dsa.PointData['d']/nu
    #q[:,feat] = algs.apply_dfunc(np.minimum, Red, 100.0)
    feature_labels[feat] = 'Turbulence Re'
    feat += 1

    # Feature 4: Pressure gradient along streamline
    ###############################################
    print('4: Stream-wise pressure gradient')
    A = np.zeros(rans_nnode)
    B = np.zeros(rans_nnode)

    dpdx = algs.gradient(rans_dsa.PointData['p'])
    ro = rans_dsa.PointData['ro']
    Umag = algs.mag(U)

    for k in range(0, 3):
        A += U[:, k] * dpdx[:, k]

    if nondim == 'global':
        A = A / Umag
        q[:, feat] = A * Ls / Ps
    elif nondim == 'local':
        for i in range(0, 3):
            for j in range(0, 3):
                B += U[:, i] * U[:, i] * dpdx[:, j] * dpdx[:, j]
        q[:, feat] = A / (algs.sqrt(B) + algs.abs(A) + small)

    feature_labels[feat] = 'Stream-wise Pgrad'
    feat += 1

    # Feature 5: Ratio of turb time scale to mean strain time scale
    ###############################################################
    print('5: Ratio of turb time scale to mean strain time scale')
    #    A = 1.0/rans_dsa.PointData['w']  #Turbulent time scale (eps = k*w therefore also A = k/eps)
    #    B = 1.0/Snorm
    #    q[:,feat] = A/(A+B+small)
    q[:, feat] = Snorm / (rans_dsa.PointData['w'] + small)
    feature_labels[feat] = 'turb/strain time-scale'
    feat += 1

    # Feature 6: Viscosity ratio
    ############################
    print('6: Viscosity ratio')
    nu_t = rans_dsa.PointData['mu_t'] / ro
    #    q[:,feat] = nu_t/(100.0*nu + nu_t)
    q[:, feat] = nu_t / (nu + small)
    feature_labels[feat] = 'Viscosity ratio'
    feat += 1

    # Feature 7: Vortex stretching
    ##############################
    print('7: Vortex stretching')
    A = np.zeros(rans_nnode)
    B = np.zeros(rans_nnode)

    vortvec = algs.vorticity(U)

    for j in range(0, 3):
        for i in range(0, 3):
            for k in range(0, 3):
                A += vortvec[:, j] * J[:, i, j] * vortvec[:, k] * J[:, i, k]

    B = Snorm

    #    q[:,feat] = algs.sqrt(A)/(algs.sqrt(A)+B+small)
    q[:, feat] = algs.sqrt(A)  #/(algs.sqrt(A)+B+small)
    feature_labels[feat] = 'Vortex stretching'
    feat += 1

    # Feature 8: Marker of Gorle et al. (deviation from parallel shear flow)
    ########################################################################
    print('8: Marker of Gorle et al. (deviation from parallel shear flow)')
    if nondim == 'global':
        g = np.zeros([rans_nnode, 3])
        m = np.zeros(rans_nnode)
        s = U / Umag
        for j in range(3):
            for i in range(3):
                g[:, j] += s[:, i] * J[:, i, j]
            m += g[:, j] * s[:, j]
        m = np.abs(m)
        q[:, feat] = m * Ls / Us
    elif nondim == 'local':
        A = np.zeros(rans_nnode)
        B = np.zeros(rans_nnode)
        for i in range(0, 3):
            for j in range(0, 3):
                A += U[:, i] * U[:, j] * J[:, i, j]
        for n in range(0, 3):
            for i in range(0, 3):
                for j in range(0, 3):
                    for m in range(0, 3):
                        B += U[:, n] * U[:, n] * U[:,
                                                   i] * J[:, i,
                                                          j] * U[:,
                                                                 m] * J[:, m,
                                                                        j]
        q[:, feat] = algs.abs(A) / (algs.sqrt(B) + algs.abs(A) + small)
    feature_labels[feat] = 'Deviation from parallel shear'
    feat += 1

    # Feature 9: Ratio of convection to production of k
    ####################################################
    print('9: Ratio of convection to production of k')
    uiuj = (2.0 / 3.0) * tke * delij - 2.0 * nu_t * Sij
    dkdx = algs.gradient(tke)
    A = np.zeros(rans_nnode)
    B = np.zeros(rans_nnode)
    for i in range(0, 3):
        A += U[:, i] * dkdx[:, i]
    for j in range(0, 3):
        for l in range(0, 3):
            B += uiuj[:, j, l] * Sij[:, j, l]
    q[:, feat] = A / (algs.abs(B) + small)
    feature_labels[feat] = 'Convection/production of k'
    feat += 1

    # Feature 10: Ratio of total Reynolds stresses to normal Reynolds stresses
    ##########################################################################
    print('10: Ratio of total Reynolds stresses to normal Reynolds stresses')
    # Frob. norm of uiuj
    A = algs.sum(uiuj**2, axis=1)  # sum i axis
    A = algs.sum(A, axis=1)  # sum previous summations i.e. along j axis
    A = algs.sqrt(A)
    B = tke
    q[:, feat] = A / (B + small)
    feature_labels[feat] = 'total/normal stresses'
    feat += 1

    # Feature 11: Cubic eddy viscosity comparision
    ##############################################
    print('11: Cubic eddy viscosity comparision')

    # Add quadratic and cubic terms to linear evm
    cevm_2nd, cevm_3rd = build_cevm(Sij, Oij)
    uiujSij = np.zeros(rans_nnode)
    for i in range(0, 3):
        for j in range(0, 3):
            uiujSij += uiuj[:, i, j] * Sij[:, i, j]
    uiujcevmSij = uiujSij + (cevm_2nd / tke) * nu_t**2.0 + (
        cevm_3rd / tke**2.0) * nu_t**3.0
    q[:, feat] = (uiujcevmSij -
                  uiujSij) / (0.5 *
                              (np.abs(uiujcevmSij) + np.abs(uiujSij)) + small)
    feature_labels[feat] = 'CEV comparison'
    feat += 1

    # Feature 12: Streamline normal pressure gradient
    #################################################
    print('12: Stream-normal pressure gradient')
    A = algs.cross(U, dpdx)
    A = np.sqrt(A[:, 0]**2 + A[:, 1]**2 + A[:, 2]**2)

    if nondim == 'global':
        A = A / Umag
        q[:, feat] = A * Ls / Ps
    elif nondim == 'local':
        B = np.zeros(rans_nnode)
        for i in range(0, 3):
            for j in range(0, 3):
                B += U[:, i] * U[:, i] * dpdx[:, j] * dpdx[:, j]
        q[:, feat] = A / (A + algs.sqrt(B) + small)

    feature_labels[feat] = 'Stream-normal Pgrad'
    feat += 1

    # Feature 13: Streamline curvature
    ##################################
    print('13: Streamline curvature')
    #    A = np.zeros([rans_nnode,3])
    #
    #    # Gradient of Gamma
    #    Gamma = U#/algs.mag(U)
    #    dGammadx = algs.gradient(Gamma)
    #
    #    for i in range(0,3):
    #        for j in range(0,3):
    #            A[:,i] += U[:,j]*dGammadx[:,j,i]
    #    A = algs.mag(A/algs.mag(U)*algs.mag(U))
    #
    #    q[:,feat] = A
    #    feature_labels[feat] = 'Streamline curvature'
    #    feat += 1
    D2 = 0.5 * (Snorm**2 + Onorm**2)
    #    cr1 = 1.0
    #    cr2 = 12.0
    #    cr3 = 1.0
    cr2 = 12
    cr3 = 1 / np.pi
    rstar = Snorm / (Onorm + small)

    dSijdx1 = algs.gradient(Sij[:, :, 0])
    dSijdx2 = algs.gradient(Sij[:, :, 1])
    dSijdx3 = algs.gradient(Sij[:, :, 2])

    DSijDt = np.zeros([rans_nnode, 3, 3])
    for i in range(3):
        for j in range(3):
            DSijDt[:, i,
                   j] = U[:,
                          0] * dSijdx1[:, j,
                                       i] + U[:,
                                              1] * dSijdx2[:, j,
                                                           i] + U[:,
                                                                  2] * dSijdx3[:,
                                                                               j,
                                                                               i]

    rhat = np.zeros(rans_nnode)
    for i in range(3):
        for j in range(3):
            for k in range(3):
                rhat += (2 * Oij[:, i, k] * Sij[:, j, k] /
                         D2**2) * DSijDt[:, i, j]


#    fr1 = -((2*rstar)/(1+rstar))*(cr3*algs.arctan(cr2*rhat))
#    fr1 = ( (1+cr1)*((2*rstar)/(1+rstar))*(1-cr3*algs.arctan(cr2*rhat)) ) - cr1
    rhathat = algs.arctan(0.25 * rhat) * 2 / np.pi
    q[:, feat] = rhathat  #fr1
    feature_labels[feat] = 'Streamline curvature'
    feat += 1

    # Feature 14: Anisotropy of pressure hessian
    ############################################
    print('14: Anisotropy of pressure hessian')
    # Calculate pressure hessian
    Hij = algs.gradient(dpdx)
    Hij = algs.apply_dfunc(np.transpose, Hij, (0, 2, 1))
    aniso = np.zeros(rans_nnode)
    iso = np.zeros(rans_nnode)
    # Frob. norm of Hij
    for i in range(3):
        for j in range(3):
            aniso += (Hij[:, i, j] - Hij[:, i, j] * delij[:, i, j])**2
        iso += Hij[:, i, i]**2

    aniso = np.sqrt(aniso)
    iso = np.sqrt(iso)
    q[:, feat] = (aniso) / (iso + small)

    feature_labels[feat] = 'Anisotropy of pressure hessian'
    feat += 1

    # Feature 15: White noise
    #########################
    print('15: White noise')
    q[:, feat] = np.random.uniform(low=-1.0, high=1.0, size=rans_nnode)
    feature_labels[feat] = 'White noise'
    feat += 1

    return q, feature_labels
Example #9
0
w2.Update()

ds2 = dsa.WrapDataObject(w2.GetOutput())
rtData2 = ds2.PointData['RTData']
grad2 = algs.gradient(rtData2)

npts = numpy.array(numpy.int32(ds.GetNumberOfPoints()))
total_npts = numpy.array(npts)
MPI.COMM_WORLD.Allreduce([npts, MPI.INT], [total_npts, MPI.INT], MPI.SUM)

# Test simple distributed data.
testArrays(rtData, rtData2, grad, grad2, total_npts)

# Check that we can disable parallelism by using a dummy controller
# even when a global controller is set
assert algs.sum(rtData / rtData, controller=vtk.vtkDummyController()) != total_npts

# Test where arrays are NoneArray on one of the ranks.
if size > 1:
    if rank == 0:
        rtData3 = rtData2
        grad3 = grad2
    else:
        rtData3 = dsa.NoneArray
        grad3 = dsa.NoneArray

    testArrays(rtData3, rtData2, grad3, grad2, total_npts)

# Test composite arrays
rtData3 = dsa.VTKCompositeDataArray([rtData, dsa.NoneArray])
grad3 = dsa.VTKCompositeDataArray([dsa.NoneArray, grad])