Example #1
0
 def dump(self, fname, particles, solver_data):
     self.particle_data = dict(get_particles_info(particles))
     self.all_array_data = {}
     for array in particles:
         self.all_array_data[array.name] = array.get_property_arrays(
             all=self.detailed_output, only_real=self.only_real)
     mpi_comm = self.mpi_comm
     if mpi_comm is not None:
         self.all_array_data = gather_array_data(self.all_array_data,
                                                 mpi_comm)
     self.solver_data = solver_data
     if mpi_comm is None or mpi_comm.Get_rank() == 0:
         self._dump(fname)
Example #2
0
    def test_that_get_particles_info_works(self):
        # Given.
        p = particle_array.ParticleArray(name='f', x=[1, 2, 3])
        c = [1.0, 2.0]
        p.add_constant('c', c)

        # When.
        info = utils.get_particles_info([p])
        pas = utils.create_dummy_particles(info)
        dummy = pas[0]

        # Then.
        self.assertTrue(check_array(dummy.c, c))
        self.assertEqual(dummy.name, 'f')
        self.assertTrue('x' in dummy.properties)
Example #3
0
    def test_that_get_particles_info_works(self):
        # Given.
        p = particle_array.ParticleArray(name='f', x=[1, 2, 3])
        p.add_property('A', data=numpy.arange(6), stride=2)
        c = [1.0, 2.0]
        p.add_constant('c', c)
        p.set_lb_props(['x'])

        # When.
        info = utils.get_particles_info([p])
        pas = utils.create_dummy_particles(info)
        dummy = pas[0]

        # Then.
        self.assertTrue(check_array(dummy.c, c))
        self.assertEqual(dummy.name, 'f')
        self.assertTrue('x' in dummy.properties)
        self.assertTrue('A' in dummy.properties)
        self.assertTrue('A' in dummy.stride)
        self.assertEqual(dummy.stride['A'], 2)
        self.assertEqual(dummy.get_lb_props(), p.get_lb_props())
Example #4
0
    def _create_particles(self, particle_factory, *args, **kw):

        """ Create particles given a callable `particle_factory` and any
        arguments to it.

        This will also automatically distribute the particles among
        processors if this is a parallel run.  Returns a list of particle
        arrays that are created.
        """

        solver = self._solver
        num_procs = self.num_procs
        options = self.options
        rank = self.rank
        comm = self.comm

        # particle array info that is used to create dummy particles
        # on non-root processors
        particles_info = {}

        # Only master creates the particles.
        if rank == 0:
            if options.restart_file is not None:
                data = load(options.restart_file)

                arrays = data["arrays"]
                solver_data = data["solver_data"]

                # arrays and particles
                particles = []
                for array_name in arrays:
                    particles.append(arrays[array_name])

                # save the particles list
                self.particles = particles

                # time, timestep and solver iteration count at restart
                t, dt, count = solver_data["t"], solver_data["dt"], solver_data["count"]

                # rescale dt at restart
                dt *= options.rescale_dt
                solver.t, solver.dt, solver.count = t, dt, count

            else:
                self.particles = particle_factory(*args, **kw)

            # get the array info which will be b'casted to other procs
            particles_info = utils.get_particles_info(self.particles)

        # Broadcast the particles_info to other processors for parallel runs
        if self.num_procs > 1:
            particles_info = self.comm.bcast(particles_info, root=0)

        # now all processors other than root create dummy particle arrays
        if rank != 0:
            self.particles = utils.create_dummy_particles(particles_info)

        # Instantiate the Parallel Manager here and do an initial LB
        self.pm = None
        if num_procs > 1:
            options = self.options

            if options.with_zoltan:
                if not (Has_Zoltan and Has_MPI):
                    raise RuntimeError("Cannot run in parallel!")

            else:
                raise ValueError(
                    """Sorry. You're stuck with Zoltan for now

                use the option '--with_zoltan' for parallel runs

                """
                )

            # create the parallel manager
            obj_weight_dim = "0"
            if options.zoltan_weights:
                obj_weight_dim = "1"

            zoltan_lb_method = options.zoltan_lb_method
            zoltan_debug_level = options.zoltan_debug_level
            zoltan_obj_wgt_dim = obj_weight_dim

            # ghost layers
            ghost_layers = options.ghost_layers

            # radius scale for the parallel update
            radius_scale = options.parallel_scale_factor * solver.kernel.radius_scale

            self.pm = pm = ZoltanParallelManagerGeometric(
                dim=solver.dim,
                particles=self.particles,
                comm=comm,
                lb_method=zoltan_lb_method,
                obj_weight_dim=obj_weight_dim,
                ghost_layers=ghost_layers,
                update_cell_sizes=options.update_cell_sizes,
                radius_scale=radius_scale,
            )

            ### ADDITIONAL LOAD BALANCING FUNCTIONS FOR ZOLTAN ###

            # RCB lock directions
            if options.zoltan_rcb_lock_directions:
                pm.set_zoltan_rcb_lock_directions()

            if options.zoltan_rcb_reuse:
                pm.set_zoltan_rcb_reuse()

            if options.zoltan_rcb_rectilinear:
                pm.set_zoltan_rcb_rectilinear_blocks()

            if options.zoltan_rcb_set_direction > 0:
                pm.set_zoltan_rcb_directions(str(options.zoltan_rcb_set_direction))

            # set zoltan options
            pm.pz.Zoltan_Set_Param("DEBUG_LEVEL", options.zoltan_debug_level)
            pm.pz.Zoltan_Set_Param("DEBUG_MEMORY", "0")

            # do an initial load balance
            pm.update()
            pm.initial_update = False

            # set subsequent load balancing frequency
            lb_freq = options.lb_freq
            if lb_freq < 1:
                raise ValueError("Invalid lb_freq %d" % lb_freq)
            pm.set_lb_freq(lb_freq)

            # wait till the initial partition is done
            comm.barrier()

        # set the solver's parallel manager
        solver.set_parallel_manager(self.pm)

        return self.particles