Ejemplo n.º 1
0
 def _compile_acceleration_eval(self, arrays):
     names = [x.name for x in self.particle_arrays]
     if self.equations is None:
         if self.method == 'shepard':
             equations = [
                 InterpolateFunction(dest='interpolate', sources=names)
             ]
         elif self.method == 'sph':
             equations = [InterpolateSPH(dest='interpolate', sources=names)]
         else:
             equations = [
                 Group(equations=[
                     SummationDensity(dest=name, sources=names)
                     for name in names
                 ],
                       real=False),
                 Group(equations=[
                     SPHFirstOrderApproximationPreStep(dest='interpolate',
                                                       sources=names,
                                                       dim=self.dim)
                 ],
                       real=True),
                 Group(equations=[
                     SPHFirstOrderApproximation(dest='interpolate',
                                                sources=names,
                                                dim=self.dim)
                 ],
                       real=True)
             ]
     else:
         equations = self.equations
     self.func_eval = AccelerationEval(arrays, equations, self.kernel)
     compiler = SPHCompiler(self.func_eval, None)
     compiler.compile()
Ejemplo n.º 2
0
    def __init__(self,
                 arrays,
                 equations,
                 dim,
                 kernel=None,
                 domain_manager=None):
        """Constructor.

        Parameters
        ----------
        arrays: list(ParticleArray)
        equations: list
        dim: int
        kernel: kernel instance.
        domain_manager: DomainManager
        """
        self.arrays = arrays
        self.equations = equations
        self.domain_manager = domain_manager
        self.dim = dim
        if kernel is None:
            self.kernel = Gaussian(dim=dim)
        else:
            self.kernel = kernel

        self.func_eval = AccelerationEval(arrays, equations, self.kernel)
        compiler = SPHCompiler(self.func_eval, None)
        compiler.compile()
        self._create_nnps(arrays)
Ejemplo n.º 3
0
 def _setup_integrator(self, equations, integrator):
     kernel = CubicSpline(dim=1)
     arrays = [self.pa]
     a_eval = AccelerationEval(particle_arrays=arrays,
                               equations=equations,
                               kernel=kernel)
     comp = SPHCompiler(a_eval, integrator=integrator)
     comp.compile()
     nnps = LinkedListNNPS(dim=kernel.dim, particles=arrays)
     a_eval.set_nnps(nnps)
     integrator.set_nnps(nnps)
Ejemplo n.º 4
0
 def _compile_acceleration_eval(self, arrays):
     names = [x.name for x in self.particle_arrays]
     if self.equations is None:
         equations = [
             InterpolateFunction(dest='interpolate', sources=names)
         ]
     else:
         equations = self.equations
     self.func_eval = AccelerationEval(arrays, equations, self.kernel)
     compiler = SPHCompiler(self.func_eval, None)
     compiler.compile()
Ejemplo n.º 5
0
 def _make_accel_eval(self, equations, cache_nnps=False):
     arrays = [self.pa]
     kernel = CubicSpline(dim=self.dim)
     a_eval = AccelerationEval(
         particle_arrays=arrays, equations=equations, kernel=kernel
     )
     comp = SPHCompiler(a_eval, integrator=None)
     comp.compile()
     nnps = NNPS(dim=kernel.dim, particles=arrays, cache=cache_nnps)
     nnps.update()
     a_eval.set_nnps(nnps)
     return a_eval
 def _make_accel_eval(self, equations):
     arrays = [self.pa]
     kernel = CubicSpline(dim=self.dim)
     a_eval = AccelerationEval(
         particle_arrays=arrays, equations=equations, kernel=kernel
     )
     comp = SPHCompiler(a_eval, integrator=None)
     comp.compile()
     nnps = NNPS(dim=kernel.dim, particles=arrays)
     nnps.update()
     a_eval.set_nnps(nnps)
     return a_eval
Ejemplo n.º 7
0
 def _setup_integrator(self, equations, integrator):
     pytest.importorskip('pysph.base.gpu_nnps')
     kernel = CubicSpline(dim=1)
     arrays = [self.pa]
     from pysph.base.gpu_nnps import BruteForceNNPS as GPUNNPS
     a_eval = AccelerationEval(
          particle_arrays=arrays, equations=equations, kernel=kernel,
          backend='opencl'
     )
     comp = SPHCompiler(a_eval, integrator=integrator)
     comp.compile()
     nnps = GPUNNPS(dim=kernel.dim, particles=arrays, cache=True)
     nnps.update()
     a_eval.set_nnps(nnps)
     integrator.set_nnps(nnps)
Ejemplo n.º 8
0
 def _make_accel_eval(self, equations, cache_nnps=True):
     pytest.importorskip('pysph.base.gpu_nnps')
     from pysph.base.gpu_nnps import ZOrderGPUNNPS as GPUNNPS
     arrays = [self.pa]
     kernel = CubicSpline(dim=self.dim)
     a_eval = AccelerationEval(
         particle_arrays=arrays, equations=equations, kernel=kernel,
         backend='opencl'
     )
     comp = SPHCompiler(a_eval, integrator=None)
     comp.compile()
     self.sph_compiler = comp
     nnps = GPUNNPS(dim=kernel.dim, particles=arrays, cache=cache_nnps)
     nnps.update()
     a_eval.set_nnps(nnps)
     return a_eval
Ejemplo n.º 9
0
    def setup(self, particles, equations, nnps, kernel=None, fixed_h=False):
        """ Setup the solver.

        The solver's processor id is set if the in_parallel flag is set
        to true.

        The order of the integrating calcs is determined by the solver's
        order attribute.

        This is usually called at the start of a PySPH simulation.

        """

        self.particles = particles
        if kernel is not None:
            self.kernel = kernel

        mode = 'mpi' if self.in_parallel else 'serial'
        self.acceleration_evals = make_acceleration_evals(
            particles, equations, self.kernel, mode)

        sep = '-' * 70
        eqn_info = '[\n' + ',\n'.join([str(e) for e in equations]) + '\n]'
        logger.info('Using equations:\n%s\n%s\n%s' % (sep, eqn_info, sep))
        logger.info('Using integrator:\n%s\n  %s\n%s' %
                    (sep, self.integrator, sep))

        sph_compiler = SPHCompiler(self.acceleration_evals, self.integrator)
        sph_compiler.compile()

        # Set the nnps for all concerned objects.
        for ae in self.acceleration_evals:
            ae.set_nnps(nnps)
        self.integrator.set_nnps(nnps)

        # set the parallel manager for the integrator
        self.integrator.set_parallel_manager(self.pm)

        # Set the post_stage_callback.
        self.integrator.set_post_stage_callback(self._post_stage_callback)

        # set integrator option for constant smoothing length
        self.fixed_h = fixed_h
        self.integrator.set_fixed_h(fixed_h)

        logger.debug("Solver setup complete.")
Ejemplo n.º 10
0
 def _make_integrator(self):
     arrays = [self.pa]
     kernel = CubicSpline(dim=self.dim)
     eqs = [[Eq1(dest='fluid', sources=['fluid'])],
            [Eq2(dest='fluid', sources=['fluid'])]]
     meqs = MultiStageEquations(eqs)
     a_evals = make_acceleration_evals(arrays,
                                       meqs,
                                       kernel,
                                       backend=self.backend)
     integrator = MyIntegrator(fluid=MyStepper())
     comp = SPHCompiler(a_evals, integrator=integrator)
     comp.compile()
     nnps = self.NNPS_cls(dim=kernel.dim, particles=arrays)
     nnps.update()
     for ae in a_evals:
         ae.set_nnps(nnps)
     return integrator
Ejemplo n.º 11
0
    def __init__(self,
                 arrays,
                 equations,
                 dim,
                 kernel=None,
                 domain_manager=None,
                 backend=None,
                 nnps_factory=NNPS):
        """Constructor.

        Parameters
        ----------
        arrays: list(ParticleArray)
        equations: list
        dim: int
        kernel: kernel instance.
        domain_manager: DomainManager
        backend: str: indicates the backend to use.
            one of ('opencl', 'cython', '', None)
        nnps_factory: A factory that creates an NNPSBase instance.
        """
        self.arrays = arrays
        self.equations = equations
        self.domain_manager = domain_manager
        self.dim = dim
        if kernel is None:
            self.kernel = Gaussian(dim=dim)
        else:
            self.kernel = kernel

        self.nnps_factory = nnps_factory
        self.backend = backend

        self.func_eval = AccelerationEval(arrays,
                                          equations,
                                          self.kernel,
                                          backend=backend)
        compiler = SPHCompiler(self.func_eval, None)
        compiler.compile()
        self._create_nnps(arrays)
Ejemplo n.º 12
0
    def test_detection_of_missing_arrays_for_integrator(self):
        # Given.
        x = np.asarray([1.0])
        u = np.asarray([0.0])
        h = np.ones_like(x)
        pa = get_particle_array(name='fluid', x=x, u=u, h=h, m=h)
        arrays = [pa]

        # When
        integrator = LeapFrogIntegrator(fluid=LeapFrogStep())
        equations = [SHM(dest="fluid", sources=None)]
        kernel = CubicSpline(dim=1)
        a_eval = AccelerationEval(particle_arrays=arrays,
                                  equations=equations,
                                  kernel=kernel)
        comp = SPHCompiler(a_eval, integrator=integrator)

        # Then
        self.assertRaises(RuntimeError, comp.compile)
Ejemplo n.º 13
0
    def test_detect_missing_arrays_for_many_particle_arrays(self):
        # Given.
        x = np.asarray([1.0])
        u = np.asarray([0.0])
        h = np.ones_like(x)
        fluid = get_particle_array_wcsph(name='fluid', x=x, u=u, h=h, m=h)
        solid = get_particle_array(name='solid', x=x, u=u, h=h, m=h)
        arrays = [fluid, solid]

        # When
        integrator = PECIntegrator(fluid=TwoStageRigidBodyStep(),
                                   solid=TwoStageRigidBodyStep())
        equations = [SHM(dest="fluid", sources=None)]
        kernel = CubicSpline(dim=1)
        a_eval = AccelerationEval(particle_arrays=arrays,
                                  equations=equations,
                                  kernel=kernel)
        comp = SPHCompiler(a_eval, integrator=integrator)

        # Then
        self.assertRaises(RuntimeError, comp.compile)
Ejemplo n.º 14
0
                ], update_nnps=False
            )
    ]

    from pysph.sph.acceleration_eval import AccelerationEval
    from pysph.sph.sph_compiler import SPHCompiler
    kernel = CubicSpline(dim=2)
    integrator = SWEIntegrator(fluid=SWEStep())
    tf = 2
    solver = Solver(
        kernel=kernel,
        dim=2,
        integrator=integrator,
        cfl=0.1,
        adaptive_timestep=True,
        output_at_times=[0.1, 0.2, 0.3, 0.4, 0.5, 0.6],
        tf=tf
        )
    part_arr = app.create_particles
    eqns = app.create_equations()
    app.setup(solver=solver, equations=eqns,
          particle_factory=part_arr)
    a_eval = AccelerationEval(
        solver.particles, equations=one_time_equations, kernel=CubicSpline(dim=2))
    compiler = SPHCompiler(a_eval, None)
    compiler.compile()
    a_eval.set_nnps(app.nnps)
    a_eval.compute(0.0, 1e-4)

    app.run()
Ejemplo n.º 15
0
    def __init__(self,
                 all_particles,
                 scheme,
                 domain=None,
                 innerloop=True,
                 updates=True,
                 parallel=False,
                 steps=None,
                 D=0):
        """The second integrator is a simple Euler-Integrator (accurate
        enough due to very small time steps; very fast) using EBGSteps.
        EBGSteps are basically the same as EulerSteps, exept for the fact
        that they work with an intermediate ebg velocity [eu, ev, ew].
        This velocity does not interfere with the actual velocity, which
        is neseccery to not disturb the real velocity through artificial
        damping in this step. The ebg velocity is initialized for each
        inner loop again and reset in the outer loop."""
        from math import ceil
        from pysph.base.kernels import CubicSpline
        from pysph.sph.integrator_step import EBGStep
        from compyle.config import get_config
        from pysph.sph.integrator import EulerIntegrator
        from pysph.sph.scheme import BeadChainScheme
        from pysph.sph.equation import Group
        from pysph.sph.fiber.utils import (HoldPoints, Contact,
                                           ComputeDistance)
        from pysph.sph.fiber.beadchain import (Tension, Bending,
                                               ArtificialDamping)
        from pysph.base.nnps import DomainManager, LinkedListNNPS
        from pysph.sph.acceleration_eval import AccelerationEval
        from pysph.sph.sph_compiler import SPHCompiler

        if not isinstance(scheme, BeadChainScheme):
            raise TypeError("Scheme must be BeadChainScheme")

        self.innerloop = innerloop
        self.dt = scheme.dt
        self.fiber_dt = scheme.fiber_dt
        self.domain_updates = updates
        self.steps = steps
        self.D = D
        self.eta0 = scheme.rho0 * scheme.nu

        # if there are more than 1 particles involved, elastic equations are
        # iterated in an inner loop.
        if self.innerloop:
            # second integrator
            # self.fiber_integrator = EulerIntegrator(fiber=EBGStep())
            steppers = {}
            for f in scheme.fibers:
                steppers[f] = EBGStep()
            self.fiber_integrator = EulerIntegrator(**steppers)
            # The type of spline has no influence here. It must be large enough
            # to contain the next particle though.
            kernel = CubicSpline(dim=scheme.dim)
            equations = []
            g1 = []
            for fiber in scheme.fibers:
                g1.append(ComputeDistance(dest=fiber, sources=[fiber]))
            equations.append(Group(equations=g1))

            g2 = []
            for fiber in scheme.fibers:
                g2.append(
                    Tension(dest=fiber, sources=None, ea=scheme.E * scheme.A))
                g2.append(
                    Bending(dest=fiber, sources=None, ei=scheme.E * scheme.Ip))
                g2.append(
                    Contact(dest=fiber,
                            sources=scheme.fibers,
                            E=scheme.E,
                            d=scheme.dx,
                            dim=scheme.dim,
                            k=scheme.k,
                            lim=scheme.lim,
                            eta0=self.eta0))
                g2.append(ArtificialDamping(dest=fiber, sources=None,
                                            d=self.D))
            equations.append(Group(equations=g2))

            g3 = []
            for fiber in scheme.fibers:
                g3.append(HoldPoints(dest=fiber, sources=None, tag=100))
            equations.append(Group(equations=g3))

            # These equations are applied to fiber particles only - that's the
            # reason for computational speed up.
            particles = [p for p in all_particles if p.name in scheme.fibers]
            # A seperate DomainManager is needed to ensure that particles don't
            # leave the domain.
            if domain:
                xmin = domain.manager.xmin
                ymin = domain.manager.ymin
                zmin = domain.manager.zmin
                xmax = domain.manager.xmax
                ymax = domain.manager.ymax
                zmax = domain.manager.zmax
                periodic_in_x = domain.manager.periodic_in_x
                periodic_in_y = domain.manager.periodic_in_y
                periodic_in_z = domain.manager.periodic_in_z
                gamma_yx = domain.manager.gamma_yx
                gamma_zx = domain.manager.gamma_zx
                gamma_zy = domain.manager.gamma_zy
                n_layers = domain.manager.n_layers
                N = self.steps or int(ceil(self.dt / self.fiber_dt))
                # dt = self.dt/N
                self.domain = DomainManager(xmin=xmin,
                                            xmax=xmax,
                                            ymin=ymin,
                                            ymax=ymax,
                                            zmin=zmin,
                                            zmax=zmax,
                                            periodic_in_x=periodic_in_x,
                                            periodic_in_y=periodic_in_y,
                                            periodic_in_z=periodic_in_z,
                                            gamma_yx=gamma_yx,
                                            gamma_zx=gamma_zx,
                                            gamma_zy=gamma_zy,
                                            n_layers=n_layers,
                                            dt=self.dt,
                                            calls_per_step=N)
            else:
                self.domain = None
            # A seperate list for the nearest neighbourhood search is
            # benefitial since it is much smaller than the original one.
            nnps = LinkedListNNPS(dim=scheme.dim,
                                  particles=particles,
                                  radius_scale=kernel.radius_scale,
                                  domain=self.domain,
                                  fixed_h=False,
                                  cache=False,
                                  sort_gids=False)
            # The acceleration evaluator needs to be set up in order to compile
            # it together with the integrator.
            if parallel:
                self.acceleration_eval = AccelerationEval(
                    particle_arrays=particles,
                    equations=equations,
                    kernel=kernel)
            else:
                self.acceleration_eval = AccelerationEval(
                    particle_arrays=particles,
                    equations=equations,
                    kernel=kernel,
                    mode='serial')
            # Compilation of the integrator not using openmp, because the
            # overhead is too large for those few fiber particles.
            comp = SPHCompiler(self.acceleration_eval, self.fiber_integrator)
            if parallel:
                comp.compile()
            else:
                config = get_config()
                config.use_openmp = False
                comp.compile()
                config.use_openmp = True
            self.acceleration_eval.set_nnps(nnps)

            # Connecting neighbourhood list to integrator.
            self.fiber_integrator.set_nnps(nnps)
Ejemplo n.º 16
0
 def _compile_acceleration_eval(self, arrays):
     names = [x.name for x in self.particle_arrays]
     equations = [InterpolateFunction(dest='interpolate', sources=names)]
     self.func_eval = AccelerationEval(arrays, equations, self.kernel)
     compiler = SPHCompiler(self.func_eval, None)
     compiler.compile()