Esempio n. 1
0
if (nprocs_realm != NProcs):
    print("ERROR: ", "Number of processors is not coherent.", file=sys.stderr)
    comm_world.Abort(errorcode=1)

cart_comm = realm_comm.Create_cart([NPx, NPy, NPz])

CPL.setup_md(cart_comm, xyzL, xyz_orig)
lines = ""
test_passed = True

if CPL.overlap():
    # Receiving cell coordinates from CFD
    olap_region = CPL.get_olap_limits()
    portion = CPL.my_proc_portion(olap_region)
    [ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
    recv_array = np.zeros((3, ncxl, ncyl, nczl), order='F', dtype=np.float64)
    send_array = np.zeros((3, 0, 0, 0), order='F', dtype=np.float64)
    #CPL.scatter(send_array, olap_region, recv_array)
    CPL.recv(recv_array, olap_region)
    for imd in xrange(portion[0], portion[1] + 1):
        for jmd in xrange(portion[2], portion[3] + 1):
            for kmd in xrange(portion[4], portion[5] + 1):
                iloc, jloc, kloc, = CPL.map_glob2loc_cell(
                    portion, [imd, jmd, kmd])
                # Receive cell or coord depending on the test
                ixmd, jymd, kzmd = imd, jmd, kmd
                ixcfd, jycfd, kzcfd = recv_array[0:3, iloc, jloc, kloc]
                if which_test == "cell_test":
                    # This has to be true for every cell for the test to pass
                    ixcfd, jycfd, kzcfd = int(ixcfd), int(jycfd), int(kzcfd)
    print("ERROR: ", "Number of processes is not coherent.", file=sys.stderr)
    MPI.COMM_WORLD.Abort(errorcode=1)

# Create cartesian communicator and initialize
cpllib.set_timing(0, nsteps, dt)
cart_comm = realm_comm.Create_cart([NPx, NPy, NPz])
cpllib.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)

my_coords = cart_comm.Get_coords(cart_comm.Get_rank())
my_coords = np.array(my_coords, order='F', dtype=np.int32)
olap_limits = cpllib.get_olap_limits()

# Constrained region cell limits and number of cells
cnstFRegion = cpllib.get_cnst_limits()
cnstFPortion = cpllib.my_proc_portion(cnstFRegion)
[cnstncx, cnstncy, cnstncz] = cpllib.get_no_cells(cnstFPortion)

# Velocity averaging region cell limits and number of cells
velBCRegion = np.copy(olap_limits)
velBCRegion[3] = velBCRegion[2]
velBCPortion = cpllib.my_proc_portion(velBCRegion)
[velBCncx, velBCncy, velBCncz] = cpllib.get_no_cells(velBCPortion)

# Send dummy random stress distribution (constant value of stress = 0) to MD
np.random.seed(1000)
send_array = 5 * np.array(np.random.rand(9, cnstncx, cnstncy, cnstncz), order='F', dtype=np.float64)

# Receive averaged velocities from LAMMPS socket
recv_array = np.zeros((4, velBCncx, velBCncy, velBCncz), order='F',
                      dtype=np.float64)
Esempio n. 3
0
if (nprocs_realm != NProcs):
    print("ERROR: ", "Number of processes is not coherent.", file=sys.stderr)
    MPI.COMM_WORLD.Abort(errorcode=1)

# Create cartesian communicator and initialize
cart_comm = realm_comm.Create_cart([NPx, NPy, NPz])
CPL.setup_cfd(nsteps, dt, cart_comm, xyzL, xyz_orig, ncxyz, 1.0)

my_coords = cart_comm.Get_coords(cart_comm.Get_rank())
my_coords = np.array(my_coords, order='F', dtype=np.int32)
olap_limits = CPL.get_olap_limits()

# Constrained region cell limits and number of cells
cnstFRegion = CPL.get_cnst_limits()
cnstFPortion = CPL.my_proc_portion(cnstFRegion)
[cnstncx, cnstncy, cnstncz] = CPL.get_no_cells(cnstFPortion)

# Velocity averaging region cell limits and number of cells
velBCRegion = np.copy(olap_limits)
velBCRegion[3] = velBCRegion[2]
velBCPortion = CPL.my_proc_portion(velBCRegion)
[velBCncx, velBCncy, velBCncz] = CPL.get_no_cells(velBCPortion)

# Send dummy stress distribution (constant value of stress = 0) to MD
scatter_array = np.random.rand(9, cnstncx, cnstncy, cnstncz)


recv_array = np.zeros((9, 0, 0, 0), order='F', dtype=np.float64)
CPL.scatter(scatter_array, cnstFRegion, recv_array)

# Receive averaged velocities from LAMMPS socket
# Parameters of the cpu topology (cartesian grid)
npxyz = [1, 1, 1]
xyzL = np.array([2., 2., 2.], order='F', dtype=np.float64)
xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)

#initialise CPL
CPL = CPL()
MD_COMM = CPL.init(CPL.MD_REALM)
#CPL.set_timing(0, 0, dt)
CPL.setup_md(MD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]]), xyzL,
             xyz_orig)

#Setup send and recv buffers
cnst_limits = CPL.get_cnst_limits()
cnst_portion = CPL.my_proc_portion(cnst_limits)
[cnst_ncxl, cnst_ncyl, cnst_nczl] = CPL.get_no_cells(cnst_portion)
recvbuf = np.zeros((9, cnst_ncxl, cnst_ncyl, cnst_nczl),
                   order='F',
                   dtype=np.float64)

olap_limits = CPL.get_olap_limits()
BC_limits = np.array([
    olap_limits[0], olap_limits[1], olap_limits[2], olap_limits[3],
    olap_limits[4], olap_limits[5]
],
                     order='F',
                     dtype=np.int32)
BC_portion = CPL.my_proc_portion(BC_limits)
[BC_ncxl, BC_ncyl, BC_nczl] = CPL.get_no_cells(BC_portion)

#Plot output
Esempio n. 5
0
ncxyz = np.array([8, 8, 1], order='F', dtype=np.int32)

if (nprocs_realm != NProcs):
    print("Non-coherent number of processes in CFD ", nprocs_realm,
            " no equal to ",  npxyz[0], " X ", npxyz[1], " X ", npxyz[2])
    MPI.Abort(errorcode=1)

#Setup coupled simulation
cart_comm = CFD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz  )

#Setup buffer to get CFD BC from MD
ncx = CPL.get("ncx")
limits_CFD_BC = np.array([0, ncx, 0, 1, 0, 1], order='F', dtype=np.int32)
portion = CPL.my_proc_portion(limits_CFD_BC)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
A_recv = np.zeros((2, ncxl, ncyl, nczl), order='F', dtype=np.float64)

#Setup buffer to send constrained region
limits_MD_BC = np.array([0, ncx, 3, 4, 0, 1], order='F', dtype=np.int32)
portion = CPL.my_proc_portion(limits_MD_BC)
[ncxl, ncyl, nczl] = CPL.get_no_cells(portion)
A_send = np.zeros((2, ncxl, ncyl, nczl), order='F', dtype=np.float64)

#Set CFD simulation object
md_cfd_dt_ratio = 25
dt = 0.125; Nsteps = 100000/md_cfd_dt_ratio; tf = Nsteps*dt
time = np.arange(-dt,tf,dt)
uwall = 1.
cfd = CFD(nu=0.575, dt=dt, 
          xsize = ncxyz[0], ysize = ncxyz[1]+2,
Esempio n. 6
0
class CFD():
    def __init__(self, npxyz, xyzL, xyz_orig, ncxyz):

        #initialise MPI and CPL
        self.comm = MPI.COMM_WORLD
        self.CPL = CPL()
        self.CFD_COMM = self.CPL.init(CPL.CFD_REALM)
        self.nprocs_realm = self.CFD_COMM.Get_size()

        # Parameters of the cpu topology (cartesian grid)
        self.npxyz = np.array(npxyz, order='F', dtype=np.int32)
        self.NProcs = np.product(npxyz)
        self.xyzL = np.array(xyzL, order='F', dtype=np.float64)
        self.xyz_orig = np.array(xyz_orig, order='F', dtype=np.float64)
        self.ncxyz = np.array(ncxyz, order='F', dtype=np.int32)

        if (self.nprocs_realm != self.NProcs):
            print("Non-coherent number of processes in CFD ",
                  self.nprocs_realm, " no equal to ", self.npxyz[0], " X ",
                  self.npxyz[1], " X ", self.npxyz[2])
            MPI.Abort(errorcode=1)

        #Setup coupled simulation
        self.cart_comm = self.CFD_COMM.Create_cart(
            [self.npxyz[0], self.npxyz[1], self.npxyz[2]])
        self.CPL.setup_cfd(self.cart_comm, self.xyzL, self.xyz_orig,
                           self.ncxyz)

        #Get limits of overlap region
        self.olap_limits = self.CPL.get_olap_limits()
        self.portion = self.CPL.my_proc_portion(self.olap_limits)
        [self.ncxl, self.ncyl, self.nczl] = self.CPL.get_no_cells(self.portion)

        self.dx = self.CPL.get("xl_cfd") / float(self.CPL.get("ncx"))
        self.dy = self.CPL.get("yl_cfd") / float(self.CPL.get("ncy"))
        self.dz = self.CPL.get("zl_cfd") / float(self.CPL.get("ncz"))
        self.ioverlap = (self.CPL.get("icmax_olap") -
                         self.CPL.get("icmin_olap") + 1)
        self.joverlap = (self.CPL.get("jcmax_olap") -
                         self.CPL.get("jcmin_olap") + 1)
        self.koverlap = (self.CPL.get("kcmax_olap") -
                         self.CPL.get("kcmin_olap") + 1)
        self.xoverlap = self.ioverlap * self.dx
        self.yoverlap = self.joverlap * self.dy
        self.zoverlap = self.koverlap * self.dz

    def recv_CPL_data(self):

        # recv data to plot
        self.recv_array = np.zeros((1, self.ncxl, self.ncyl, self.nczl),
                                   order='F',
                                   dtype=np.float64)
        self.recv_array, ierr = self.CPL.recv(self.recv_array,
                                              self.olap_limits)

    def plot_grid(self, ax):

        #Plot CFD and coupler Grid
        draw_grid(ax,
                  nx=self.CPL.get("ncx"),
                  ny=self.CPL.get("ncy"),
                  nz=self.CPL.get("ncz"),
                  px=self.CPL.get("npx_cfd"),
                  py=self.CPL.get("npy_cfd"),
                  pz=self.CPL.get("npz_cfd"),
                  xmin=self.CPL.get("x_orig_cfd"),
                  ymin=self.CPL.get("y_orig_cfd"),
                  zmin=self.CPL.get("z_orig_cfd"),
                  xmax=(self.CPL.get("icmax_olap") + 1) * self.dx,
                  ymax=self.CPL.get("yl_cfd"),
                  zmax=(self.CPL.get("kcmax_olap") + 1) * self.dz,
                  lc='r',
                  label='CFD')

        #Plot MD domain
        draw_grid(ax,
                  nx=1,
                  ny=1,
                  nz=1,
                  px=self.CPL.get("npx_md"),
                  py=self.CPL.get("npy_md"),
                  pz=self.CPL.get("npz_md"),
                  xmin=self.CPL.get("x_orig_md"),
                  ymin=-self.CPL.get("yl_md") + self.yoverlap,
                  zmin=self.CPL.get("z_orig_md"),
                  xmax=(self.CPL.get("icmax_olap") + 1) * self.dx,
                  ymax=self.yoverlap,
                  zmax=(self.CPL.get("kcmax_olap") + 1) * self.dz,
                  label='MD')

    def plot_data(self, ax):

        # === Plot both grids ===

        #Plot x component on grid
        x = np.linspace(
            self.CPL.get("x_orig_cfd") + .5 * self.dx,
            self.xoverlap - .5 * self.dx, self.ioverlap)
        z = np.linspace(
            self.CPL.get("z_orig_cfd") + .5 * self.dz,
            self.zoverlap - .5 * self.dz, self.koverlap)

        try:
            for j in range(self.joverlap):
                ax.plot(
                    x,
                    0.5 * self.dy * (self.recv_array[0, :, j, 0] + 1. + 2 * j),
                    's-')
        except ValueError:
            print("Arrays not equal:", x.shape, z.shape, self.recv_array.shape)

    def finalise(self):

        self.CPL.finalize()
        MPI.Finalize()
Esempio n. 7
0
def CFD(xyzL=[1.5E-003, 1.5E-003, 2.50E-003],
        g=9.81,
        ncxyz=[8, 8, 8],
        npxyz=[1, 1, 1],
        Nsteps=101):

    #initialise MPI and CPL
    comm = MPI.COMM_WORLD
    CPL = CPL()
    MD_COMM = CPL.init(CPL.CFD_REALM)
    nprocs_realm = MD_COMM.Get_size()

    ## Parameters of the cpu topology (cartesian grid)
    npxyz = np.array(npxyz, order='F', dtype=np.int32)
    NProcs = np.product(npxyz)

    xyzL = np.array(xyz, order='F', dtype=np.float64)
    xyz_orig = np.array([0.0, 0.0, 0.0], order='F', dtype=np.float64)
    ncxyz = np.array(ncxyz, order='F', dtype=np.int32)
    if (nprocs_realm != NProcs):
        print("Non-coherent number of processes in MD ", nprocs_realm,
              " no equal to ", npxyz[0], " X ", npxyz[1], " X ", npxyz[2])
        MPI.Abort(errorcode=1)

    #Setup coupled simulation
    cart_comm = MD_COMM.Create_cart([npxyz[0], npxyz[1], npxyz[2]])
    CPL.setup_cfd(cart_comm, xyzL, xyz_orig, ncxyz)

    #Get constraint region
    cnst_limits = CPL.get_cnst_limits()
    cnst_portion = CPL.my_proc_portion(cnst_limits)
    [cnst_ncxl, cnst_ncyl, cnst_nczl] = CPL.get_no_cells(cnst_portion)

    #Get overlap region
    olap_limits = CPL.get_olap_limits()
    BC_limits = np.array([
        olap_limits[0], olap_limits[1], olap_limits[2], olap_limits[3],
        olap_limits[4], olap_limits[5]
    ],
                         dtype=np.int32)
    BC_portion = CPL.my_proc_portion(BC_limits)
    [BC_ncxl, BC_ncyl, BC_nczl] = CPL.get_no_cells(BC_portion)

    #Allocate send and recv arrays
    recv_array = np.zeros((4, BC_ncxl, BC_ncyl, BC_nczl),
                          order='F',
                          dtype=np.float64)
    send_array = np.zeros((9, cnst_ncxl, cnst_ncyl, cnst_nczl),
                          order='F',
                          dtype=np.float64)

    for time in range(Nsteps):

        # send data to update
        send_array[2, :, :, :] = -5.9490638385009208e-08 * g  # * mi
        CPL.send(send_array, cnst_portion)

        # recv data and plot
        recv_array, ierr = CPL.recv(recv_array, BC_portion)

        print(time)

    CPL.finalize()
    MPI.Finalize()