Exemple #1
0
def main():
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nProcs = comm.Get_size()

    # Define the command-line arguments
    parser = argparse.ArgumentParser(
        description="A utility for calculating the statistics of a \
                             precursor database stored in the HDF5 format. \
                             Outputs files with the components of mean \
                             velocity and the diagonal components of the \
                             Reynolds stress tensor.")

    parser.add_argument('--database',
                        '-d',
                        type=str,
                        help='The HDF5 file with the database.',
                        required=True)
    parser.add_argument('--writepath',
                        '-w',
                        type=str,
                        help='The location where to write the \
                             produced files.',
                        required=True)

    args = parser.parse_args()

    readPath = args.database
    writeDir = args.writepath

    # Open the hdf5 database

    if rank == 0:
        print("Opening the database")
    dbFile = h5py.File(readPath, 'r', driver='mpio', comm=MPI.COMM_WORLD)

    pointsY = dbFile["points"]["pointsY"][:, :]

    size = dbFile["velocity"]["uX"].shape[0]
    nPointsY = pointsY.shape[0]
    nPointsZ = pointsY.shape[1]

    uMean = np.zeros((nPointsY, nPointsZ, 3))
    uSquaredMean = np.zeros((nPointsY, nPointsZ, 3))

    if rank == 0:
        print("Calculating the statistics")

    [chunks, offsets] = chunks_and_offsets(nProcs, size)

    for i in range(chunks[rank]):
        if rank == 0 and (np.mod(i, int(chunks[rank] / 10)) == 0):
            print("Computed about " + str(int(i / chunks[rank] * 100)) + "%")

        position = offsets[rank] + i

        uMean[:, :, 0] += dbFile["velocity"]["uX"][position, :, :]
        uMean[:, :, 1] += dbFile["velocity"]["uY"][position, :, :]
        uMean[:, :, 2] += dbFile["velocity"]["uZ"][position, :, :]
        uSquaredMean[:, :, 0] += dbFile["velocity"]["uX"][position, :, :]**2
        uSquaredMean[:, :, 1] += dbFile["velocity"]["uY"][position, :, :]**2
        uSquaredMean[:, :, 2] += dbFile["velocity"]["uZ"][position, :, :]**2

    comm.Barrier()
    dbFile.close()
    uMean = comm.gather(uMean, root=0)
    uSquaredMean = comm.gather(uSquaredMean, root=0)

    if rank == 0:
        for i in range(nProcs - 1):
            uMean[0] += uMean[i + 1]
            uSquaredMean[0] += uSquaredMean[i + 1]

        uMean = uMean[0] / size
        uSquaredMean = uSquaredMean[0] / size

        uPrime2Mean = uSquaredMean - uMean**2

        # Average along Z
        uMean = np.mean(uMean, axis=1)
        uPrime2Mean = np.mean(uPrime2Mean, axis=1)

        print("Outputting data")

        if not os.path.exists(writeDir):
            os.makedirs(writeDir)

        np.savetxt(os.path.join(writeDir, "y"), pointsY[:, 0])
        np.savetxt(os.path.join(writeDir, "uMeanX"), uMean[:, 0])
        np.savetxt(os.path.join(writeDir, "uMeanY"), uMean[:, 1])
        np.savetxt(os.path.join(writeDir, "uMeanZ"), uMean[:, 2])
        np.savetxt(os.path.join(writeDir, "uPrime2MeanXX"), uPrime2Mean[:, 0])
        np.savetxt(os.path.join(writeDir, "uPrime2MeanYY"), uPrime2Mean[:, 1])
        np.savetxt(os.path.join(writeDir, "uPrime2MeanZZ"), uPrime2Mean[:, 2])
        np.savetxt(os.path.join(writeDir, "y"), pointsY[:, 0])
def main():
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nProcs = comm.Get_size()

# Define the command-line arguments
    parser = argparse.ArgumentParser(
                description="A utility for calculating the statistics of a \
                             precursor database stored in the HDF5 format. \
                             Outputs files with the components of mean \
                             velocity and the diagonal components of the \
                             Reynolds stress tensor.")

    parser.add_argument('--database', '-d',
                        type=str,
                        help='The HDF5 file with the database.',
                        required=True)
    parser.add_argument('--writepath', '-w',
                        type=str,
                        help='The location where to write the \
                             produced files.',
                        required=True)

    args = parser.parse_args()

    readPath = args.database
    writeDir = args.writepath


# Open the hdf5 database

    if rank == 0:
        print("Opening the database")
    dbFile = h5py.File(readPath, 'r', driver='mpio', comm=MPI.COMM_WORLD)

    pointsY = dbFile["points"]["pointsY"][:,:]

    size = dbFile["velocity"]["uX"].shape[0]
    nPointsY = pointsY.shape[0]
    nPointsZ = pointsY.shape[1]

    uMean = np.zeros((nPointsY, nPointsZ, 3))
    uSquaredMean = np.zeros((nPointsY, nPointsZ, 3))

    if rank == 0:
        print("Calculating the statistics")

    [chunks, offsets] = chunks_and_offsets(nProcs, size)

    for i in range(chunks[rank]):
        if rank == 0 and (np.mod(i, int(chunks[rank]/10)) == 0):
            print("Computed about "+str(int(i/chunks[rank]*100))+"%")

        position = offsets[rank] + i

        uMean[:, :, 0] += dbFile["velocity"]["uX"][position, :, :]
        uMean[:, :, 1] += dbFile["velocity"]["uY"][position, :, :]
        uMean[:, :, 2] += dbFile["velocity"]["uZ"][position, :, :]
        uSquaredMean[:, :, 0] += dbFile["velocity"]["uX"][position, :, :]**2
        uSquaredMean[:, :, 1] += dbFile["velocity"]["uY"][position, :, :]**2
        uSquaredMean[:, :, 2] += dbFile["velocity"]["uZ"][position, :, :]**2

    comm.Barrier()
    dbFile.close()
    uMean = comm.gather(uMean, root=0)
    uSquaredMean = comm.gather(uSquaredMean, root=0)

    if rank == 0:
        for i in range(nProcs-1):
            uMean[0] += uMean[i+1]
            uSquaredMean[0] += uSquaredMean[i+1]

        uMean = uMean[0]/size
        uSquaredMean = uSquaredMean[0]/size

        uPrime2Mean = uSquaredMean - uMean**2

# Average along Z
        uMean = np.mean(uMean, axis=1)
        uPrime2Mean = np.mean(uPrime2Mean, axis=1)

        print("Outputting data")

        if not os.path.exists(writeDir):
            os.makedirs(writeDir)

        np.savetxt(os.path.join(writeDir, "y"), pointsY[:, 0])
        np.savetxt(os.path.join(writeDir, "uMeanX"), uMean[:, 0])
        np.savetxt(os.path.join(writeDir, "uMeanY"), uMean[:, 1])
        np.savetxt(os.path.join(writeDir, "uMeanZ"), uMean[:, 2])
        np.savetxt(os.path.join(writeDir, "uPrime2MeanXX"), uPrime2Mean[:, 0])
        np.savetxt(os.path.join(writeDir, "uPrime2MeanYY"), uPrime2Mean[:, 1])
        np.savetxt(os.path.join(writeDir, "uPrime2MeanZZ"), uPrime2Mean[:, 2])
        np.savetxt(os.path.join(writeDir, "y"), pointsY[:, 0])
Exemple #3
0
def main():
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nProcs = comm.Get_size()

    # Define the command-line arguments
    parser = argparse.ArgumentParser(
        description="A utility for converting a database stored \
                            as a collection of foamFile-formatted files to \
                            a single HDF5 file.")

    parser.add_argument('--precursor',
                        type=str,
                        help='The location of the precusor case.',
                        required=True)
    parser.add_argument('--surface',
                        type=str,
                        help='The name of the surface that contains the data.',
                        required=True)
    parser.add_argument('--filename',
                        type=str,
                        help='The name hdf5 file to create.',
                        required=True)

    parser.add_argument('--umean',
                        type=str,
                        help='The file containing the mean velocity profile.',
                        required=True)

    args = parser.parse_args()

    precursorCaseDir = args.precursor
    surfaceName = args.surface
    uMeanFile = args.umean
    fileName = args.filename

    dataDir = os.path.join(precursorCaseDir, "postProcessing",
                           "sampledSurface")

    # Grab the existing times and sort
    times = os.listdir(dataDir)
    times = np.sort(times)

    # Get the mean profile and append zeros
    uMean = np.genfromtxt(uMeanFile)
    uMeanX = uMean[:, 1]
    if uMean.shape[1] == 3:
        uMeanY = uMean[:, 2]
    else:
        uMeanY = np.zeros(uMeanX.shape)

    y = uMean[:, 0]

    # Read in the points
    [pointsY, pointsZ, yInd,
     zInd] = read_points_from_foamfile(os.path.join(dataDir, times[0],
                                                    surfaceName,
                                                    "faceCentres"),
                                       addValBot=y[0],
                                       addValTop=y[-1])

    [nPointsY, nPointsZ] = pointsY.shape

    assert nPointsY == uMean.shape[0]

    # Allocate arrays for the fluctuations
    if rank == 0:
        if os.path.isfile(fileName):
            print("HDF5 file already exists. It it will be overwritten.")
            os.remove(fileName)

    dbFile = h5py.File(fileName, 'a', driver='mpio', comm=MPI.COMM_WORLD)

    pointsGroup = dbFile.create_group("points")
    velocityGroup = dbFile.create_group("velocity")

    pointsGroup.create_dataset("pointsY", data=pointsY)
    pointsGroup.create_dataset("pointsZ", data=pointsZ)

    velocityGroup.create_dataset("uMeanX", data=uMeanX)
    velocityGroup.create_dataset("uMeanY", data=uMeanY)

    velocityGroup.create_dataset(
        "times", data=[float(times[i]) for i in range(times.size)])

    uX = velocityGroup.create_dataset(
        "uX", (len(times), pointsY.shape[0], pointsY.shape[1]),
        dtype=np.float64)
    uY = velocityGroup.create_dataset(
        "uY", (len(times), pointsY.shape[0], pointsY.shape[1]),
        dtype=np.float64)
    uZ = velocityGroup.create_dataset(
        "uZ", (len(times), pointsY.shape[0], pointsY.shape[1]),
        dtype=np.float64)

    dbFile.attrs["nPointsY"] = pointsY.shape[0]
    dbFile.attrs["nPointsZ"] = pointsY.shape[1]
    dbFile.attrs["nPoints"] = pointsY.size

    [chunks, offsets] = chunks_and_offsets(nProcs, len(times))

    readFunc = read_velocity_from_foamfile(dataDir,
                                           surfaceName,
                                           nPointsZ,
                                           yInd,
                                           zInd,
                                           addValBot=(uMeanX[0], uMeanY[0], 0),
                                           addValTop=(uMeanX[-1], uMeanY[-1],
                                                      0))

    # Read in the fluctuations
    for i in range(chunks[rank]):
        if rank == 0 and (np.mod(i, int(chunks[rank] / 20)) == 0):
            print("Converted about " + str(i / chunks[rank] * 100) + "%")

        position = offsets[rank] + i
        # Read in U
        [uXVal, uYVal, uZVal] = readFunc(times[position])

        uX[position, :, :] = uXVal
        uY[position, :, :] = uYVal
        uZ[position, :, :] = uZVal

    if rank == 0:
        print("Process 0 done, waiting for the others...")

    comm.Barrier()
    dbFile.close()
    if rank == 0:
        print("Done")
def main():
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nProcs = comm.Get_size()

    # Define the command-line arguments
    parser = argparse.ArgumentParser(
        description="A utility for calculating statistics \
                             of a inflow field database stored in the HDF5 \
                             format. \
                             Outputs files with the components of mean \
                             velocity and the diagonal components of the \
                             Reynolds stress tensor.")

    parser.add_argument('--database',
                        '-d',
                        type=str,
                        help='The HDF5 file with the database.',
                        required=True)
    parser.add_argument('--writepath',
                        '-w',
                        type=str,
                        help='The location where to write the \
                              produced files.',
                        required=True)

    args = parser.parse_args()

    readPath = args.database
    writeDir = args.writepath

    # Open the hdf5 database

    if rank == 0:
        print("Opening the database")

    dbFile = h5py.File(readPath, 'r', driver='mpio', comm=MPI.COMM_WORLD)
    times = dbFile['time'][()]
    points = dbFile['points'][()]
    points = points[:, 1:]

    size = len(times)

    uMean = np.zeros((points.shape[0], 3))
    uSquaredMean = np.zeros((points.shape[0], 3))

    if rank == 0:
        print("Calculating the statistics")

    [chunks, offsets] = chunks_and_offsets(nProcs, size)

    for i in range(chunks[rank]):
        if rank == 0 and (np.mod(i, int(chunks[rank] / 10)) == 0):

            print("Computed about " + str(int(i / chunks[rank] * 100)) + "%")

        position = offsets[rank] + i

        uMean += dbFile['velocity'][position, :, :]
        uSquaredMean += dbFile['velocity'][position, :, :]**2

    comm.Barrier()
    dbFile.close()
    if rank == 0:
        print("Done")

    uMean = comm.gather(uMean, root=0)
    uSquaredMean = comm.gather(uSquaredMean, root=0)

    if rank == 0:
        for i in range(nProcs - 1):
            uMean[0] += uMean[i + 1]
            uSquaredMean[0] += uSquaredMean[i + 1]

        uMean = uMean[0] / size
        uSquaredMean = uSquaredMean[0] / size

        uPrime2Mean = uSquaredMean - uMean**2

        print("Reshaping and averaging")
        # Sort along y first
        yInd = np.argsort(points[:, 0])
        points[:, 0] = points[yInd, 0]
        points[:, 1] = points[yInd, 1]
        uMean[:, 0] = uMean[yInd, 0]
        uMean[:, 1] = uMean[yInd, 1]
        uMean[:, 2] = uMean[yInd, 2]
        uPrime2Mean[:, 0] = uPrime2Mean[yInd, 0]
        uPrime2Mean[:, 1] = uPrime2Mean[yInd, 1]
        uPrime2Mean[:, 2] = uPrime2Mean[yInd, 2]

        # Find the number of points along z
        nPointsZ = 0
        for i in range(points[:, 0].size):
            if points[i, 0] == points[0, 0]:
                nPointsZ += 1
            else:
                break

        # Reshape into a 2d array
        pointsY = np.copy(np.reshape(points[:, 0], (-1, nPointsZ)))
        pointsZ = np.copy(np.reshape(points[:, 1], (-1, nPointsZ)))
        uMeanX = np.copy(np.reshape(uMean[:, 0], (-1, nPointsZ)))
        uMeanY = np.copy(np.reshape(uMean[:, 1], (-1, nPointsZ)))
        uMeanZ = np.copy(np.reshape(uMean[:, 2], (-1, nPointsZ)))
        uPrime2MeanXX = np.copy(np.reshape(uPrime2Mean[:, 0], (-1, nPointsZ)))
        uPrime2MeanYY = np.copy(np.reshape(uPrime2Mean[:, 1], (-1, nPointsZ)))
        uPrime2MeanZZ = np.copy(np.reshape(uPrime2Mean[:, 2], (-1, nPointsZ)))

        # For each y order the points in z

        zInd = np.zeros(pointsZ.shape, dtype=np.int)

        for i in range(pointsZ.shape[0]):
            zInd[i, :] = np.argsort(pointsZ[i, :])
            pointsZ[i, :] = pointsZ[i, zInd[i, :]]
            uMeanX[i, :] = uMeanX[i, zInd[i, :]]
            uMeanY[i, :] = uMeanY[i, zInd[i, :]]
            uMeanZ[i, :] = uMeanZ[i, zInd[i, :]]
            uPrime2MeanXX[i, :] = uPrime2MeanXX[i, zInd[i, :]]
            uPrime2MeanYY[i, :] = uPrime2MeanYY[i, zInd[i, :]]
            uPrime2MeanZZ[i, :] = uPrime2MeanZZ[i, zInd[i, :]]

        y = pointsY[:, 0]

        # Average along Z
        uMeanX = np.mean(uMeanX, axis=1)
        uMeanY = np.mean(uMeanY, axis=1)
        uMeanZ = np.mean(uMeanZ, axis=1)

        uPrime2MeanXX = np.mean(uPrime2MeanXX, axis=1)
        uPrime2MeanYY = np.mean(uPrime2MeanYY, axis=1)
        uPrime2MeanZZ = np.mean(uPrime2MeanZZ, axis=1)

        print("Outputting data")

        if not os.path.exists(writeDir):
            os.makedirs(writeDir)

        np.savetxt(os.path.join(writeDir, "y"), y)
        np.savetxt(os.path.join(writeDir, "uMeanX"), uMeanX)
        np.savetxt(os.path.join(writeDir, "uMeanY"), uMeanY)
        np.savetxt(os.path.join(writeDir, "uMeanZ"), uMeanZ)
        np.savetxt(os.path.join(writeDir, "uPrime2MeanXX"), uPrime2MeanXX)
        np.savetxt(os.path.join(writeDir, "uPrime2MeanYY"), uPrime2MeanYY)
        np.savetxt(os.path.join(writeDir, "uPrime2MeanZZ"), uPrime2MeanZZ)
        np.savetxt(os.path.join(writeDir, "y"), y)
Exemple #5
0
def test_chunks_and_offsets_n_procs_equals_data():
        [chunks, offsets] = chunks_and_offsets(10, 10)
        assert not np.any(chunks - np.ones(10))
        assert not np.any(offsets - np.arange(10, dtype=np.int64))
def main():
    comm = MPI.COMM_WORLD
    rank = comm.Get_rank()
    nProcs = comm.Get_size()


# Define the command-line arguments
    parser = argparse.ArgumentParser(
                description="A utility for calculating statistics \
                             of a inflow field database stored in the HDF5 \
                             format. \
                             Outputs files with the components of mean \
                             velocity and the diagonal components of the \
                             Reynolds stress tensor.")

    parser.add_argument('--database', '-d',
                        type=str,
                        help='The HDF5 file with the database.',
                        required=True)
    parser.add_argument('--writepath', '-w',
                        type=str,
                        help='The location where to write the \
                              produced files.',
                        required=True)

    args = parser.parse_args()

    readPath = args.database
    writeDir = args.writepath

# Open the hdf5 database

    if rank == 0:
        print("Opening the database")

    dbFile = h5py.File(readPath, 'r', driver='mpio', comm=MPI.COMM_WORLD)
    times = dbFile['time'][()]
    points = dbFile['points'][()]
    points = points[:, 1:]

    size = len(times)

    uMean = np.zeros((points.shape[0], 3))
    uSquaredMean = np.zeros((points.shape[0], 3))

    if rank == 0:
        print("Calculating the statistics")

    [chunks, offsets] = chunks_and_offsets(nProcs, size)

    for i in range(chunks[rank]):
        if rank == 0 and (np.mod(i, int(chunks[rank]/10)) == 0):

            print("Computed about " + str(int(i/chunks[rank]*100)) + "%")

        position = offsets[rank] + i

        uMean += dbFile['velocity'][position, :, :]
        uSquaredMean += dbFile['velocity'][position, :, :]**2

    comm.Barrier()
    dbFile.close()
    if rank == 0:
        print("Done")

    uMean = comm.gather(uMean, root=0)
    uSquaredMean = comm.gather(uSquaredMean, root=0)

    if rank == 0:
        for i in range(nProcs-1):
            uMean[0] += uMean[i+1]
            uSquaredMean[0] += uSquaredMean[i+1]

        uMean = uMean[0]/size
        uSquaredMean = uSquaredMean[0]/size

        uPrime2Mean = uSquaredMean - uMean**2

        print("Reshaping and averaging")
        # Sort along y first
        yInd = np.argsort(points[:, 0])
        points[:, 0] = points[yInd, 0]
        points[:, 1] = points[yInd, 1]
        uMean[:, 0] = uMean[yInd, 0]
        uMean[:, 1] = uMean[yInd, 1]
        uMean[:, 2] = uMean[yInd, 2]
        uPrime2Mean[:, 0] = uPrime2Mean[yInd, 0]
        uPrime2Mean[:, 1] = uPrime2Mean[yInd, 1]
        uPrime2Mean[:, 2] = uPrime2Mean[yInd, 2]

        # Find the number of points along z
        nPointsZ = 0
        for i in range(points[:, 0].size):
            if points[i, 0] == points[0, 0]:
                nPointsZ += 1
            else:
                break

        # Reshape into a 2d array
        pointsY = np.copy(np.reshape(points[:, 0], (-1, nPointsZ)))
        pointsZ = np.copy(np.reshape(points[:, 1], (-1, nPointsZ)))
        uMeanX = np.copy(np.reshape(uMean[:, 0], (-1, nPointsZ)))
        uMeanY = np.copy(np.reshape(uMean[:, 1], (-1, nPointsZ)))
        uMeanZ = np.copy(np.reshape(uMean[:, 2], (-1, nPointsZ)))
        uPrime2MeanXX = np.copy(np.reshape(uPrime2Mean[:, 0],
                                           (-1, nPointsZ)))
        uPrime2MeanYY = np.copy(np.reshape(uPrime2Mean[:, 1],
                                           (-1, nPointsZ)))
        uPrime2MeanZZ = np.copy(np.reshape(uPrime2Mean[:, 2],
                                           (-1, nPointsZ)))

        # For each y order the points in z

        zInd = np.zeros(pointsZ.shape, dtype=np.int)

        for i in range(pointsZ.shape[0]):
            zInd[i, :] = np.argsort(pointsZ[i, :])
            pointsZ[i, :] = pointsZ[i, zInd[i, :]]
            uMeanX[i, :] = uMeanX[i, zInd[i, :]]
            uMeanY[i, :] = uMeanY[i, zInd[i, :]]
            uMeanZ[i, :] = uMeanZ[i, zInd[i, :]]
            uPrime2MeanXX[i, :] = uPrime2MeanXX[i, zInd[i, :]]
            uPrime2MeanYY[i, :] = uPrime2MeanYY[i, zInd[i, :]]
            uPrime2MeanZZ[i, :] = uPrime2MeanZZ[i, zInd[i, :]]

        y = pointsY[:, 0]

        # Average along Z
        uMeanX = np.mean(uMeanX, axis=1)
        uMeanY = np.mean(uMeanY, axis=1)
        uMeanZ = np.mean(uMeanZ, axis=1)

        uPrime2MeanXX = np.mean(uPrime2MeanXX, axis=1)
        uPrime2MeanYY = np.mean(uPrime2MeanYY, axis=1)
        uPrime2MeanZZ = np.mean(uPrime2MeanZZ, axis=1)

        print("Outputting data")

        if not os.path.exists(writeDir):
            os.makedirs(writeDir)

        np.savetxt(os.path.join(writeDir, "y"), y)
        np.savetxt(os.path.join(writeDir, "uMeanX"), uMeanX)
        np.savetxt(os.path.join(writeDir, "uMeanY"), uMeanY)
        np.savetxt(os.path.join(writeDir, "uMeanZ"), uMeanZ)
        np.savetxt(os.path.join(writeDir, "uPrime2MeanXX"), uPrime2MeanXX)
        np.savetxt(os.path.join(writeDir, "uPrime2MeanYY"), uPrime2MeanYY)
        np.savetxt(os.path.join(writeDir, "uPrime2MeanZZ"), uPrime2MeanZZ)
        np.savetxt(os.path.join(writeDir, "y"), y)
Exemple #7
0
def test_chunks_and_offsets_data_smaller_than_n_procs():
    with pytest.raises(Exception):
        res = chunks_and_offsets(7, 5)
Exemple #8
0
def test_chunks_and_offsets_nonpositive_processor_number():
    with pytest.raises(Exception):
        res = chunks_and_offsets(0, 5)
def test_chunks_and_offsets_n_procs_equals_data():
        [chunks, offsets] = chunks_and_offsets(10, 10)
        assert not np.any(chunks - np.ones(10))
        assert not np.any(offsets - np.arange(10, dtype=np.int64))
def test_chunks_and_offsets_data_smaller_than_n_procs():
    with pytest.raises(Exception):
        res = chunks_and_offsets(7, 5)
def test_chunks_and_offsets_nonpositive_processor_number():
    with pytest.raises(Exception):
        res = chunks_and_offsets(0, 5)