Esempio n. 1
0
    def __init__(self, npxyz, xyzL, xyz_orig, ncxyz):

        #initialise MPI and CPL
        self.comm = MPI.COMM_WORLD
        self.CPL = CPL()
        self.CFD_COMM = self.CPL.init(CPL.CFD_REALM)
        self.nprocs_realm = self.CFD_COMM.Get_size()

        # Parameters of the cpu topology (cartesian grid)
        self.npxyz = np.array(npxyz, order='F', dtype=np.int32)
        self.NProcs = np.product(npxyz)
        self.xyzL = np.array(xyzL, order='F', dtype=np.float64)
        self.xyz_orig = np.array(xyz_orig, order='F', dtype=np.float64)
        self.ncxyz = np.array(ncxyz, order='F', dtype=np.int32)

        if (self.nprocs_realm != self.NProcs):
            print("Non-coherent number of processes in CFD ",
                  self.nprocs_realm, " no equal to ", self.npxyz[0], " X ",
                  self.npxyz[1], " X ", self.npxyz[2])
            MPI.Abort(errorcode=1)

        #Setup coupled simulation
        self.cart_comm = self.CFD_COMM.Create_cart(
            [self.npxyz[0], self.npxyz[1], self.npxyz[2]])
        self.CPL.setup_cfd(self.cart_comm, self.xyzL, self.xyz_orig,
                           self.ncxyz)

        #Get limits of overlap region
        self.olap_limits = self.CPL.get_olap_limits()
        self.portion = self.CPL.my_proc_portion(self.olap_limits)
        [self.ncxl, self.ncyl, self.nczl] = self.CPL.get_no_cells(self.portion)

        self.dx = self.CPL.get("xl_cfd") / float(self.CPL.get("ncx"))
        self.dy = self.CPL.get("yl_cfd") / float(self.CPL.get("ncy"))
        self.dz = self.CPL.get("zl_cfd") / float(self.CPL.get("ncz"))
        self.ioverlap = (self.CPL.get("icmax_olap") -
                         self.CPL.get("icmin_olap") + 1)
        self.joverlap = (self.CPL.get("jcmax_olap") -
                         self.CPL.get("jcmin_olap") + 1)
        self.koverlap = (self.CPL.get("kcmax_olap") -
                         self.CPL.get("kcmin_olap") + 1)
        self.xoverlap = self.ioverlap * self.dx
        self.yoverlap = self.joverlap * self.dy
        self.zoverlap = self.koverlap * self.dz
def setup():

    #Import CPL library
    from cplpy import CPL

    #initialise MPI
    from mpi4py import MPI
    comm = MPI.COMM_WORLD

    #Check run as part of a coupled run
    comm.rank

    # Parameters of the cpu topology (cartesian grid)
    npxyz = np.array([1, 1, 1], order='F', dtype=np.int32)
    xyzL = np.array([1., 1., 1.], order='F', dtype=np.float64)
    xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)

    #initialise CPL
    CPL = CPL()
    MD_COMM = CPL.init(CPL.MD_REALM)
    CPL.setup_md(MD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]]), xyzL, xyz_orig)
    recvbuf, sendbuf = CPL.get_arrays(recv_size=9, send_size=8)

    #Analytical solution
    dt = 0.05
    U = 1.0
    nu = 1.004e-2
    Re = xyzL[1]/nu   #Note Reynolds in independent of velocity in analytical fn
    ncx = CPL.get("ncx")
    ncy = CPL.get("ncy")
    ncz = CPL.get("ncz")
    CAObj = CA(Re=Re, U=U, Lmin=0., Lmax=xyzL[1], npoints=2*ncy+1, nmodes=100*ncy)

    #Yield statement delineates end of setup and start of teardown
    yield [CPL, MD_COMM, recvbuf, sendbuf, CAObj, dt, U, nu]
    CPL.finalize()
    MPI.Finalize()
#!/usr/bin/env python2
from __future__ import print_function, division
import sys
import cPickle

try:
    from mpi4py import MPI
    from cplpy import CPL
    import numpy as np
except ImportError as exc:
    print("ERROR: ", sys.exc_info()[0], exc, file=sys.stderr)
    MPI.COMM_WORLD.Abort(errorcode=1)


cpllib = CPL()

cpllib.set("output_mode", 1)

try:
    # Load parameters for the run
    params = cPickle.load(open("cfd_params.dic", "rb"))

    # Parameters of the cpu topology (cartesian grid)
    NPx = params["npx"]
    NPy = params["npy"]
    NPz = params["npz"]

    # Parameters of the mesh topology (cartesian grid)
    NCx = params["ncx"]
    NCy = params["ncy"]
    NCz = params["ncz"]
import numpy as np
from mpi4py import MPI
from cplpy import CPL

g = 9.81
mi = -5.9490638385009208e-08

print("After import")

#initialise MPI and CPL
comm = MPI.COMM_WORLD
CPL = CPL()
MD_COMM = CPL.init(CPL.CFD_REALM)

print("After CPL init")

## Parameters of the cpu topology (cartesian grid)
npxyz = [1, 1, 1]
xyzL = [1.5E-003, 1.5E-003, 2.5E-003]
xyz_orig = [0.0, 0.0, 0.0]
ncxyz = [8, 8, 8]

#Setup coupled simulation
cart_comm = MD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)
recv_array, send_array = CPL.get_arrays(recv_size=4, send_size=3)

print("After CPL setup")

ft = True
for time in range(100):
Esempio n. 5
0
def CFD(xyzL=[1.5E-003, 1.5E-003, 2.50E-003],
        g=9.81,
        ncxyz=[8, 8, 8],
        npxyz=[1, 1, 1],
        Nsteps=101):

    #initialise MPI and CPL
    comm = MPI.COMM_WORLD
    CPL = CPL()
    MD_COMM = CPL.init(CPL.CFD_REALM)
    nprocs_realm = MD_COMM.Get_size()

    ## Parameters of the cpu topology (cartesian grid)
    npxyz = np.array(npxyz, order='F', dtype=np.int32)
    NProcs = np.product(npxyz)

    xyzL = np.array(xyz, order='F', dtype=np.float64)
    xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
    ncxyz = np.array(ncxyz, order='F', dtype=np.int32)
    if (nprocs_realm != NProcs):
        print("Non-coherent number of processes in MD ", nprocs_realm,
              " no equal to ", npxyz[0], " X ", npxyz[1], " X ", npxyz[2])
        MPI.Abort(errorcode=1)

    #Setup coupled simulation
    cart_comm = MD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
    CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)

    #Get constraint region
    cnst_limits = CPL.get_cnst_limits()
    cnst_portion = CPL.my_proc_portion(cnst_limits)
    [cnst_ncxl, cnst_ncyl, cnst_nczl] = CPL.get_no_cells(cnst_portion)

    #Get overlap region
    olap_limits = CPL.get_olap_limits()
    BC_limits = np.array([
        olap_limits[0], olap_limits[1], olap_limits[2], olap_limits[3],
        olap_limits[4], olap_limits[5]
    ],
                         dtype=np.int32)
    BC_portion = CPL.my_proc_portion(BC_limits)
    [BC_ncxl, BC_ncyl, BC_nczl] = CPL.get_no_cells(BC_portion)

    #Allocate send and recv arrays
    recv_array = np.zeros((4, BC_ncxl, BC_ncyl, BC_nczl),
                          order='F',
                          dtype=np.float64)
    send_array = np.zeros((9, cnst_ncxl, cnst_ncyl, cnst_nczl),
                          order='F',
                          dtype=np.float64)

    for time in range(Nsteps):

        # send data to update
        send_array[2, :, :, :] = -5.9490638385009208e-08 * g  # * mi
        CPL.send(send_array, cnst_portion)

        # recv data and plot
        recv_array, ierr = CPL.recv(recv_array, BC_portion)

        print(time)

    CPL.finalize()
    MPI.Finalize()