Esempio n. 1
0
 def setUp(self):
     PeriodicBox2DTestCaseCPU.setUp(self)
     self.orig_n = self.fluid.get_number_of_particles()
     self.nnps = BoxSortNNPS(
         dim=2, particles=[self.fluid],
         domain=self.domain,
         radius_scale=self.kernel.radius_scale)
Esempio n. 2
0
x = x.ravel(); y = y.ravel()
h = numpy.ones_like(x) * h0
m = numpy.ones_like(x) * volume
wij = numpy.zeros_like(x)

# use the helper function get_particle_array to create a ParticleArray
pa = utils.get_particle_array(x=x,y=y,h=h,m=m,wij=wij)

# the simulation domain used to request periodicity
domain = DomainManager(
    xmin=min, xmax=max, ymin=min, ymax=max,periodic_in_x=False, periodic_in_y=False)

print "NumPa:", pa.num_real_particles

# NNPS object for nearest neighbor queries
nps = BoxSortNNPS(dim=2, particles=[pa,], radius_scale=k.radius_scale, domain=domain)
#nps.bin()
DMP = DeviceMemoryPool()
"""
cells = nps.cells

max_cell_pop = 0
for cellkey in cells.keys():
        #print cellkey, cells[cellkey].nparticles[0]
        #print cells[cellkey].lindices[0].get_npy_array()
        if cells[cellkey].nparticles[0] > max_cell_pop:
            max_cell_pop = cells[cellkey].nparticles[0]
"""

max_cell_pop_gpu, nc, num_particles = nps.get_max_cell_pop(0, DMP)
max_cell_pop = max_cell_pop_gpu.get()
Esempio n. 3
0
def main():
    # Initialize MPI and find out number of local particles
    comm = mpi.COMM_WORLD
    rank = comm.Get_rank()
    size = comm.Get_size()

    # number of particles per array
    numMyPoints = 1 << 10
    numGlobalPoints = size * numMyPoints

    avg_vol = 1.0 / numGlobalPoints
    dx = numpy.power(avg_vol, 1.0 / dim)
    mass = avg_vol
    hdx = 1.3

    if numGlobalPoints % size != 0:
        raise RuntimeError("Run with 2^n num procs!")

    # everybody creates two particle arrays with numMyPoints
    x1 = random.random(numMyPoints)
    y1 = random.random(numMyPoints)
    z1 = random.random(numMyPoints)
    h1 = numpy.ones_like(x1) * hdx * dx
    rho1 = numpy.zeros_like(x1)

    x2 = random.random(numMyPoints)
    y2 = random.random(numMyPoints)
    z2 = random.random(numMyPoints)
    h2 = numpy.ones_like(x2) * hdx * dx
    rho2 = numpy.zeros_like(x2)

    # z1[:] = 1.0
    # z2[:] = 0.5

    # local particle arrays
    pa1 = get_particle_array_wcsph(x=x1, y=y1, h=h1, rho=rho1, z=z1)
    pa2 = get_particle_array_wcsph(x=x2, y=y2, h=h2, rho=rho2, z=z2)

    # gather the data on root
    X1 = numpy.zeros(numGlobalPoints)
    Y1 = numpy.zeros(numGlobalPoints)
    Z1 = numpy.zeros(numGlobalPoints)
    H1 = numpy.ones_like(X1) * hdx * dx
    RHO1 = numpy.zeros_like(X1)

    gathers = (numpy.ones(size) * numMyPoints, None)

    comm.Gatherv(sendbuf=[x1, mpi.DOUBLE], recvbuf=[X1, gathers, mpi.DOUBLE])
    comm.Gatherv(sendbuf=[y1, mpi.DOUBLE], recvbuf=[Y1, gathers, mpi.DOUBLE])
    comm.Gatherv(sendbuf=[z1, mpi.DOUBLE], recvbuf=[Z1, gathers, mpi.DOUBLE])
    comm.Gatherv(sendbuf=[rho1, mpi.DOUBLE],
                 recvbuf=[RHO1, gathers, mpi.DOUBLE])

    X2 = numpy.zeros(numGlobalPoints)
    Y2 = numpy.zeros(numGlobalPoints)
    Z2 = numpy.zeros(numGlobalPoints)
    H2 = numpy.ones_like(X2) * hdx * dx
    RHO2 = numpy.zeros_like(X2)

    comm.Gatherv(sendbuf=[x2, mpi.DOUBLE], recvbuf=[X2, gathers, mpi.DOUBLE])
    comm.Gatherv(sendbuf=[y2, mpi.DOUBLE], recvbuf=[Y2, gathers, mpi.DOUBLE])
    comm.Gatherv(sendbuf=[z2, mpi.DOUBLE], recvbuf=[Z2, gathers, mpi.DOUBLE])
    comm.Gatherv(sendbuf=[rho2, mpi.DOUBLE],
                 recvbuf=[RHO2, gathers, mpi.DOUBLE])

    # create the particle arrays and PM
    PA1 = get_particle_array_wcsph(x=X1, y=Y1, z=Z1, h=H1, rho=RHO1)
    PA2 = get_particle_array_wcsph(x=X2, y=Y2, z=Z2, h=H2, rho=RHO2)

    # create the parallel manager
    PARTICLES = [PA1, PA2]
    PM = ZoltanParallelManagerGeometric(dim=dim,
                                        particles=PARTICLES,
                                        comm=comm)

    # create the local NNPS object with all the particles
    Nnps = BoxSortNNPS(dim=dim, particles=PARTICLES)
    Nnps.update()

    # only root computes summation density
    if rank == 0:
        assert numpy.allclose(PA1.rho, 0)
        sd_evaluate(Nnps, PM, mass, src_index=1, dst_index=0)
        sd_evaluate(Nnps, PM, mass, src_index=0, dst_index=0)
        RHO1 = PA1.rho

        assert numpy.allclose(PA2.rho, 0)
        sd_evaluate(Nnps, PM, mass, src_index=0, dst_index=1)
        sd_evaluate(Nnps, PM, mass, src_index=1, dst_index=1)
        RHO2 = PA2.rho

    # wait for the root...
    comm.barrier()

    # create the local particle arrays
    particles = [pa1, pa2]

    # create the local nnps object and parallel manager
    pm = ZoltanParallelManagerGeometric(dim=dim,
                                        comm=comm,
                                        particles=particles)
    nnps = BoxSortNNPS(dim=dim, particles=particles)

    # set the Zoltan parameters (Optional)
    pz = pm.pz
    pz.set_lb_method("RCB")
    pz.Zoltan_Set_Param("DEBUG_LEVEL", "0")

    # Update the parallel manager (distribute particles)
    pm.update()

    # update the local nnps
    nnps.update()

    # Compute summation density individually on each processor
    sd_evaluate(nnps, pm, mass, src_index=0, dst_index=1)
    sd_evaluate(nnps, pm, mass, src_index=1, dst_index=1)

    sd_evaluate(nnps, pm, mass, src_index=0, dst_index=0)
    sd_evaluate(nnps, pm, mass, src_index=1, dst_index=0)

    # gather the density and global ids
    rho1 = pa1.rho
    tmp = comm.gather(rho1)
    if rank == 0:
        global_rho1 = numpy.concatenate(tmp)
        assert (global_rho1.size == numGlobalPoints)

    rho2 = pa2.rho
    tmp = comm.gather(rho2)
    if rank == 0:
        global_rho2 = numpy.concatenate(tmp)
        assert (global_rho2.size == numGlobalPoints)

    # gather global x1 and y1
    x1 = pa1.x
    tmp = comm.gather(x1)
    if rank == 0:
        global_x1 = numpy.concatenate(tmp)
        assert (global_x1.size == numGlobalPoints)

    y1 = pa1.y
    tmp = comm.gather(y1)
    if rank == 0:
        global_y1 = numpy.concatenate(tmp)
        assert (global_y1.size == numGlobalPoints)

    z1 = pa1.z
    tmp = comm.gather(z1)
    if rank == 0:
        global_z1 = numpy.concatenate(tmp)
        assert (global_z1.size == numGlobalPoints)

    # gather global x2 and y2
    x2 = pa2.x
    tmp = comm.gather(x2)
    if rank == 0:
        global_x2 = numpy.concatenate(tmp)
        assert (global_x2.size == numGlobalPoints)

    y2 = pa2.y
    tmp = comm.gather(y2)
    if rank == 0:
        global_y2 = numpy.concatenate(tmp)
        assert (global_y2.size == numGlobalPoints)

    z2 = pa2.z
    tmp = comm.gather(z2)
    if rank == 0:
        global_z2 = numpy.concatenate(tmp)
        assert (global_z2.size == numGlobalPoints)

    # gather global indices
    gid1 = pa1.gid
    tmp = comm.gather(gid1)
    if rank == 0:
        global_gid1 = numpy.concatenate(tmp)
        assert (global_gid1.size == numGlobalPoints)

    gid2 = pa2.gid
    tmp = comm.gather(gid2)
    if rank == 0:
        global_gid2 = numpy.concatenate(tmp)
        assert (global_gid2.size == numGlobalPoints)

    # check rho1
    if rank == 0:
        # make sure the arrays are of the same size
        assert (global_x1.size == X1.size)
        assert (global_y1.size == Y1.size)
        assert (global_z1.size == Z1.size)

        for i in range(numGlobalPoints):

            # make sure we're chacking the right point
            assert abs(global_x1[i] - X1[global_gid1[i]]) < 1e-14
            assert abs(global_y1[i] - Y1[global_gid1[i]]) < 1e-14
            assert abs(global_z1[i] - Z1[global_gid1[i]]) < 1e-14

            diff = abs(global_rho1[i] - RHO1[global_gid1[i]])
            condition = diff < 1e-14
            assert condition, "diff = %g" % (diff)

    # check rho2
    if rank == 0:
        # make sure the arrays are of the same size
        assert (global_x2.size == X2.size)
        assert (global_y2.size == Y2.size)
        assert (global_z2.size == Z2.size)

        for i in range(numGlobalPoints):

            # make sure we're chacking the right point
            assert abs(global_x2[i] - X2[global_gid2[i]]) < 1e-14
            assert abs(global_y2[i] - Y2[global_gid2[i]]) < 1e-14
            assert abs(global_z2[i] - Z2[global_gid2[i]]) < 1e-14

            diff = abs(global_rho2[i] - RHO2[global_gid2[i]])
            condition = diff < 1e-14
            assert condition, "diff = %g" % (diff)

    if rank == 0:
        print("Summation density test: OK")
Esempio n. 4
0
comm.Gatherv(sendbuf=[x2, mpi.DOUBLE], recvbuf=[X2, gathers, mpi.DOUBLE])
comm.Gatherv(sendbuf=[y2, mpi.DOUBLE], recvbuf=[Y2, gathers, mpi.DOUBLE])
comm.Gatherv(sendbuf=[z2, mpi.DOUBLE], recvbuf=[Z2, gathers, mpi.DOUBLE])
comm.Gatherv(sendbuf=[rho2, mpi.DOUBLE], recvbuf=[RHO2, gathers, mpi.DOUBLE])

# create the particle arrays and PM
PA1 = get_particle_array_wcsph(x=X1, y=Y1, z=Z1, h=H1, rho=RHO1)
PA2 = get_particle_array_wcsph(x=X2, y=Y2, z=Z2, h=H2, rho=RHO2)

# create the parallel manager
PARTICLES = [PA1, PA2]
PM = ZoltanParallelManagerGeometric(dim=dim, particles=PARTICLES, comm=comm)

# create the local NNPS object with all the particles
Nnps = BoxSortNNPS(dim=dim, particles=PARTICLES)
Nnps.update()

# only root computes summation density
if rank == 0:
    assert numpy.allclose(PA1.rho, 0)
    sd_evaluate(Nnps, PM, mass, src_index=1, dst_index=0)
    sd_evaluate(Nnps, PM, mass, src_index=0, dst_index=0)
    RHO1 = PA1.rho

    assert numpy.allclose(PA2.rho, 0)
    sd_evaluate(Nnps, PM, mass, src_index=0, dst_index=1)
    sd_evaluate(Nnps, PM, mass, src_index=1, dst_index=1)
    RHO2 = PA2.rho

# wait for the root...
Esempio n. 5
0
    def setup(self, solver, equations, nnps=None, particle_factory=None, *args, **kwargs):
        """Set the application's solver.  This will call the solver's
        `setup` method.

        The following solver options are set:

        dt -- the time step for the solver

        tf -- the final time for the simulationl

        fname -- the file name for output file printing

        freq -- the output print frequency

        level -- the output detail level

        dir -- the output directory

        integration_type -- The integration method

        default_kernel -- the default kernel to use for operations

        Parameters
        ----------
        particle_factory : callable or None
            If supplied, particles will be created for the solver using the
            particle arrays returned by the callable. Else particles for the
            solver need to be set before calling this method

        """
        start_time = time.time()
        self._solver = solver
        solver_opts = solver.get_options(self.opt_parse)
        if solver_opts is not None:
            self.add_option(solver_opts)
        self._process_command_line()

        options = self.options

        # Create particles either from scratch or restart
        self._create_particles(particle_factory, *args, **kwargs)

        # setup the solver using any options
        self._solver.setup_solver(options.__dict__)

        # fixed smoothing lengths
        fixed_h = solver.fixed_h or options.fixed_h

        if nnps is None:
            kernel = self._solver.kernel

            # create the NNPS object
            if options.nnps == "box":
                nnps = BoxSortNNPS(
                    dim=solver.dim, particles=self.particles, radius_scale=kernel.radius_scale, domain=self.domain
                )

            elif options.nnps == "ll":
                nnps = LinkedListNNPS(
                    dim=solver.dim,
                    particles=self.particles,
                    radius_scale=kernel.radius_scale,
                    domain=self.domain,
                    fixed_h=fixed_h,
                )

        # once the NNPS has been set-up, we set the default Solver
        # post-stage callback to the DomainManager.setup_domain
        # method. This method is responsible to computing the new cell
        # size and doing any periodicity checks if needed.
        solver.add_post_stage_callback(nnps.update_domain)

        # inform NNPS if it's working in parallel
        if self.num_procs > 1:
            nnps.set_in_parallel(True)

        # save the NNPS with the application
        self.nnps = nnps

        dt = options.time_step
        if dt is not None:
            solver.set_time_step(dt)

        tf = options.final_time
        if tf is not None:
            solver.set_final_time(tf)

        # Setup the solver output file name
        fname = options.output

        if Has_MPI:
            rank = self.rank
            if self.num_procs > 1:
                fname += "_" + str(rank)

        # set the rank for the solver
        solver.rank = self.rank
        solver.pid = self.rank
        solver.comm = self.comm

        # set the in parallel flag for the solver
        if self.num_procs > 1:
            solver.in_parallel = True

        # output file name
        solver.set_output_fname(fname)

        # disable_output
        solver.set_disable_output(options.disable_output)

        # Cell iteration.
        solver.set_cell_iteration(options.cell_iteration)

        # output print frequency
        if options.freq is not None:
            solver.set_print_freq(options.freq)

        # output printing level (default is not detailed)
        if options.detailed_output is not None:
            solver.set_output_printing_level(options.detailed_output)

        # solver output behaviour in parallel
        if options.output_dump_remote:
            solver.set_output_only_real(False)

        # output directory
        solver.set_output_directory(abspath(options.output_dir))

        # set parallel output mode
        if options.parallel_output_mode is not None:
            solver.set_parallel_output_mode(options.parallel_output_mode)

        # Set the adaptive timestep
        if options.adaptive_timestep is not None:
            solver.set_adaptive_timestep(options.adaptive_timestep)

            # set solver cfl number
            solver.set_cfl(options.cfl)

        if options.integration is not None:
            # FIXME, this is bogus
            # solver.integrator_type = integration_methods[options.integration]
            pass

        # setup the solver. This is where the code is compiled
        solver.setup(particles=self.particles, equations=equations, nnps=nnps, fixed_h=fixed_h)

        # add solver interfaces
        self.command_manager = CommandManager(solver, self.comm)
        solver.set_command_handler(self.command_manager.execute_commands)

        if self.rank == 0:
            # commandline interface
            if options.cmd_line:
                from pysph.solver.solver_interfaces import CommandlineInterface

                self.command_manager.add_interface(CommandlineInterface().start)

            # XML-RPC interface
            if options.xml_rpc:
                from pysph.solver.solver_interfaces import XMLRPCInterface

                addr = options.xml_rpc
                idx = addr.find(":")
                host = "0.0.0.0" if idx == -1 else addr[:idx]
                port = int(addr[idx + 1 :])
                self.command_manager.add_interface(XMLRPCInterface((host, port)).start)

            # python MultiProcessing interface
            if options.multiproc:
                from pysph.solver.solver_interfaces import MultiprocessingInterface

                addr = options.multiproc
                idx = addr.find("@")
                authkey = "pysph" if idx == -1 else addr[:idx]
                addr = addr[idx + 1 :]
                idx = addr.find(":")
                host = "0.0.0.0" if idx == -1 else addr[:idx]
                port = addr[idx + 1 :]
                if port[-1] == "+":
                    try_next_port = True
                    port = port[:-1]
                else:
                    try_next_port = False
                port = int(port)

                interface = MultiprocessingInterface((host, port), authkey, try_next_port)

                self.command_manager.add_interface(interface.start)

                logger.info("started multiprocessing interface on %s" % (interface.address,))
        end_time = time.time()
        self._message("Setup took: %.5f secs" % (end_time - start_time))
Esempio n. 6
0
 def setUp(self):
     PeriodicChannel2DTestCase.setUp(self)
     self.nnps = BoxSortNNPS(dim=2,
                             particles=self.particles,
                             domain=self.domain,
                             radius_scale=self.kernel.radius_scale)
Esempio n. 7
0
print 'Volume estimates :: dx^2 = %g, Number density = %g'%(dx*dy, volume)

x = x.ravel(); y = y.ravel()
h = numpy.ones_like(x) * h0
m = numpy.ones_like(x) * volume
wij = numpy.zeros_like(x)

# use the helper function get_particle_array to create a ParticleArray
pa = utils.get_particle_array(x=x,y=y,h=h,m=m,wij=wij)

# the simulation domain used to request periodicity
domain = DomainManager(
    xmin=0., xmax=max, ymin=0., ymax=max,periodic_in_x=True, periodic_in_y=True)

# NNPS object for nearest neighbor queries
nps = BoxSortNNPS(dim=2, particles=[pa,], radius_scale=k.radius_scale, domain=domain)

cells = nps.cells
max_cell_pop = 0

for cellkey in cells.keys():
    print cellkey, cells[cellkey].nparticles[0]
    print cells[cellkey].lindices[0].get_npy_array()
    if cells[cellkey].nparticles[0] > max_cell_pop:
        max_cell_pop = cells[cellkey].nparticles[0]


print len(cells)
print nps.cell_size
print pa.num_real_particles