def test_gradient(): ''' SEM on the sphere: gradient(), Y11 ''' ne, ngq = 30, 4 nproc, myrank = 1, 0 # setup platform = MachinePlatform('CPU', 'f90') cubegrid = CubeGridMPI(ne, ngq, nproc, myrank) local_ep_size = cubegrid.local_ep_size lats = cubegrid.local_latlons[:,0] lons = cubegrid.local_latlons[:,1] # test function, spherical harmonics Y25 theta, phi = lons, pi/2-lats Y11 = -0.5*sqrt(1.5/pi)*sin(lats)*cos(lons) Y25 = (1/8)*sqrt(1155/(2*pi))*cos(2*theta)*sin(phi)**2*(3*cos(phi)**3-cos(phi)) scalar = ArrayAs(platform, Y11) ret = Array(platform, local_ep_size*2, 'f8') sep = SpectralElementSphere(platform, cubegrid) sep.gradient(scalar, ret) #sep.average_boundary(ret.get()[::2]) #sep.average_boundary(ret.get()[1::2]) # verify ref_lon = 0.5*sqrt(1.5/pi)*tan(lats)*sin(lons) #ref_lon = -(1/4)*sqrt(1155/(2*pi))*sin(2*theta)*sin(phi)*(3*cos(phi)**3-cos(phi)) for ie in xrange(local_ep_size//(ngq*ngq)): print cubegrid.local_gq_indices[ie*16] #idxs = [ie*16+k for k in [5,6,9,10]] #aa_equal(ret.get()[::2][idxs], ref_lon[idxs], 5) idxs = [ie*16+k for k in xrange(16)] aa_equal(ret.get()[::2][idxs], ref_lon[idxs], 5)
def check_impvis_mpi(ne, ngq, comm): ''' CubeMPI for Implicit Viscosity ''' niter = 600 init_type = 'Y35' # constant, Y35, cosine_bell myrank = comm.Get_rank() nproc = comm.Get_size() platform = CPU_F90() cubegrid = CubeGridMPI(ne, ngq, nproc, myrank) cubempi = CubeMPI(cubegrid, method='IMPVIS') #---------------------------------------------------------- # Generate a initial field on the cubed-sphere #---------------------------------------------------------- local_ep_size = cubegrid.local_ep_size lats = cubegrid.local_latlons[:,0] # (local_ep_size,2) lons = cubegrid.local_latlons[:,1] theta, phi = lons, pi/2-lats if init_type == 'constant': init = np.ones(ep_size, 'f8') elif init_type == 'Y35': #init = special.sph_harm(3, 5, theta, phi).real # (m,l,theta,phi) init = -(1/32)*sqrt(385/pi)*cos(3*theta)*sin(phi)**3*(9*cos(phi)**2-1) field = ArrayAs(platform, init) #---------------------------------------------------------- # Apply the High-order Elliptic Filter with MPI #---------------------------------------------------------- send_dsts = ArrayAs(platform, cubempi.send_dsts) send_srcs = ArrayAs(platform, cubempi.send_srcs) send_wgts = ArrayAs(platform, cubempi.send_wgts) recv_dsts = ArrayAs(platform, cubempi.recv_dsts) recv_srcs = ArrayAs(platform, cubempi.recv_srcs) send_buf = Array(platform, cubempi.send_buf_size, 'f8') recv_buf = Array(platform, cubempi.recv_buf_size, 'f8') src = open('cube_mpi.'+platform.code_type).read() lib=platform.source_compile(src) pre_send = platform.get_function(lib, 'pre_send') post_recv = platform.get_function(lib, 'post_recv') pre_send.prepare('iiiiioooooo', \ local_ep_size, send_dsts.size, cubempi.send_buf_size, \ cubempi.recv_buf_size, cubempi.local_src_size, \ send_dsts, send_srcs, send_wgts, field, send_buf, recv_buf) post_recv.prepare('iiioooo', \ local_ep_size, recv_dsts.size, cubempi.recv_buf_size, \ recv_dsts, recv_srcs, recv_buf, field) # Send/Recv for seq in xrange(niter): pre_send.prepared_call() req_send_list = list() req_recv_list = list() for dest, start, size in cubempi.send_schedule: req = comm.Isend(send_buf.get()[start:start+size], dest, 0) req_send_list.append(req) for dest, start, size in cubempi.recv_schedule: req = comm.Irecv(recv_buf.get()[start:start+size], dest, 0) req_recv_list.append(req) MPI.Request.Waitall(req_send_list) MPI.Request.Waitall(req_recv_list) # After receive post_recv.prepared_call() #----------------------------------------------------- # Check if the filtered has same values with reference #----------------------------------------------------- local_up_sizes = comm.gather(cubegrid.local_up_size, root=0) if myrank == 0: sizes = np.cumsum(local_up_sizes) gids = np.zeros(sizes[-1], 'i4') f = np.zeros(sizes[-1], 'f8') gids[:sizes[0]] = cubegrid.local_gids[cubegrid.local_is_uvps] f[:sizes[0]] = field.get()[cubegrid.local_is_uvps] for src in xrange(1,nproc): comm.Recv(gids[sizes[src-1]:sizes[src]], src, 10) for src in xrange(1,nproc): comm.Recv(f[sizes[src-1]:sizes[src]], src, 20) idxs = np.argsort(gids) try: nc_fpath = 'impvis_diagnostic/impvis_%s_ne%dngq%d_600step.nc'%(init_type,ne,ngq) ncf = nc.Dataset(nc_fpath, 'r', format='NETCDF4') except Exception, e: raise IOError, 'Not found: %s'%(nc_fpath) aa_equal(f[idxs], ncf.variables['field%d'%(niter)][:], 14)
# Setup #---------------------------------------------- ne = 30 # elements / axis ngq = 4 # GQ points / axis / element cfl = 0.1 # Courant-Friedrichs-Lewy condition nproc = 1 platform = MachinePlatform('CPU', 'f90') cubegrid = CubeGridMPI(ne, ngq, nproc, myrank=0) #---------------------------------------------- # Variables #---------------------------------------------- ep_size = cubegrid.local_ep_size psi = Array(platform, ep_size, 'f8', 'psi') velocity = Array(platform, ep_size*2, 'f8', 'velocity') #---------------------------------------------- # Initialize the velocity vector and the scalar field #---------------------------------------------- # Initialize with the 2D gaussian lon0, lat0 = pi/2, -pi/5 latlons = cubegrid.local_latlons # (local_ep_size,2) lats = latlons[:,0] lons = latlons[:,1] dist = arccos( sin(lat0)*sin(lats) + cos(lat0)*cos(lats)*cos( fabs(lons-lon0) ) ) # r=1 psi.set( exp( -dist**2/(pi/50) ) )