Ejemplo n.º 1
0
 def __init__(self, system, parttype, span, geometry, center):
     if geometry not in ['spherical', 'bounded-x', 'bounded-y', 'bounded-z']:
         raise ValueError('Error: Geometry must be in ["spherical", "bounded-x", "bounded-y", "bounded-z"]. Your input: {}'.format(geometry))
     if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         geometrydict = {'spherical': 0, 'bounded-x': 1, 'bounded-y': 2, 'bounded-z': 3}
         cxxinit(self, analysis_NPartSubregion, system, parttype, span, geometrydict[geometry])
         self.cxxclass.setCenter(self, center[0], center[1], center[2])
Ejemplo n.º 2
0
    def __init__(self,
                 system,
                 cutoff,
                 adrcut,
                 dEx,
                 dHy,
                 adrCenter=[],
                 pids=[],
                 exclusionlist=[],
                 sphereAdr=False):

        if pmi.workerIsActive():
            cxxinit(self, _espressopp.VerletListAdress, system, cutoff, adrcut,
                    False, dEx, dHy)
            #self.cxxclass.setAtType(self, atType)
            # check for exclusions
            if (exclusionlist != []):
                # add exclusions
                for pair in exclusionlist:
                    pid1, pid2 = pair
                    self.cxxclass.exclude(self, pid1, pid2)
            # add adress particles
            if (pids != []):
                for pid in pids:
                    self.cxxclass.addAdrParticle(self, pid)
            # set adress center
            if (adrCenter != []):
                self.cxxclass.setAdrCenter(self, adrCenter[0], adrCenter[1],
                                           adrCenter[2])
            # set adress region type (slab or spherical)
            self.cxxclass.setAdrRegionType(self, sphereAdr)

            # rebuild list now
            self.cxxclass.rebuild(self)
Ejemplo n.º 3
0
    def __init__(self,
                 type=None,
                 mass=None,
                 q=None,
                 lambda_adr=None,
                 incr_state=None,
                 state=None):
        if pmi.workerIsActive():
            cxxinit(self, _espressopp._ParticleProperties)
            self.cxxclass.init(self)
            if incr_state is not None and state is not None:
                raise RuntimeError(
                    'Ambiguity, cannot set incr_state and state at the same time'
                )

            if type is not None:
                self.type = int(type)
            if mass is not None:
                self.mass = mass
            if q is not None:
                self.q = q
            if lambda_adr is not None:
                self.lambda_adr = lambda_adr
            if incr_state is not None:
                self.incr_state = incr_state
            if state is not None:
                self.state = state
Ejemplo n.º 4
0
 def __init__(self, integrator, exclusionlist=None):
     if pmi.workerIsActive():
         cxxinit(self, _espressopp.DynamicExcludeList, integrator)
         if exclusionlist is not None:
             for pid1, pid2 in exclusionlist:
                 self.cxxclass.exclude(self, pid1, pid2)
             self.cxxclass.update(self)
Ejemplo n.º 5
0
    def __init__(self, system, center=[], sphereAdr = False, ntrotter=1, slow=False):
        if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
            cxxinit(self, integrator_FreeEnergyCompensation, system, sphereAdr, ntrotter, slow)

            # set center of FreeEnergyCompensation force
            if (center != []):
                self.cxxclass.setCenter(self, center[0], center[1], center[2])
Ejemplo n.º 6
0
 def __init__(self, system, center=[]):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, integrator_OnTheFlyFEC, system)
                     
         # set center of OnTheFlyFEC
         if (center != []):
             self.cxxclass.setCenter(self, center[0], center[1], center[2])
Ejemplo n.º 7
0
 def __init__(self,
              type=None,
              mass=None,
              q=None,
              lambda_adr=None,
              incr_state=None,
              state=None,
              velocity=None,
              force=None):
     if pmi.workerIsActive():
         cxxinit(self, integrator_TopologyParticleProperties)
         if incr_state is not None and state is not None:
             raise RuntimeError(
                 'Ambiguity, cannot set incr_state and state at the same time'
             )
         if type is not None:
             self.type_id = int(type)
         if mass is not None:
             self.mass = mass
         if q is not None:
             self.q = q
         if lambda_adr is not None:
             self.lambda_adr = lambda_adr
         if incr_state is not None:
             self.incr_state = incr_state
         if state is not None:
             self.state = state
         if velocity is not None:
             self.v = velocity
         if force is not None:
             self.f = force
 def __init__(self, system, center=[]):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, integrator_FreeEnergyCompensation, system)
         
         # set center of FreeEnergyCompensation force
         if (center != []):
             self.cxxclass.setCenter(self, center[0], center[1], center[2])
Ejemplo n.º 9
0
 def __init__(self, system, chainlength=None, start_pid=0):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()
             ) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         if chainlength is None:
             cxxinit(self, analysis_MeanSquareDispl, system)
         else:
             cxxinit(self, analysis_MeanSquareDispl, system, chainlength,
                     start_pid)
Ejemplo n.º 10
0
 def __init__(self, system, analysis_pressure=None):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()) or \
             pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         if analysis_pressure:
             cxxinit(self, integrator_BerendsenBarostat, system,
                     analysis_pressure)
         else:
             cxxinit(self, integrator_BerendsenBarostat, system)
Ejemplo n.º 11
0
 def __init__(self, system, particle_type, total_count=None):
     if pmi.workerIsActive():
         if total_count is None:
             cxxinit(self, analysis_ChemicalConversion, system,
                     particle_type)
         else:
             cxxinit(self, analysis_ChemicalConversion, system,
                     particle_type, total_count)
Ejemplo n.º 12
0
 def __init__(self, system, capForce, particleGroup=None):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()
             ) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         if (particleGroup == None) or (particleGroup.size() == 0):
             cxxinit(self, integrator_CapForce, system, capForce)
         else:
             cxxinit(self, integrator_CapForce, system, capForce,
                     particleGroup)
Ejemplo n.º 13
0
    def __init__(self, system, cutoff, angular_momentum=6,
                      do_cluster_analysis=False, include_surface_particles=False,
                      ql_low=-1.0, ql_high=1.0):
	if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
            #print "coupled cluster analysis is currently broken"
            cxxinit(self, analysis_OrderParameter, system, cutoff, angular_momentum,
                      do_cluster_analysis, include_surface_particles,
                      ql_low, ql_high)
Ejemplo n.º 14
0
 def __init__(self,
              system,
              integrator,
              filename='out.xtc',
              unfolded=False,
              length_factor=1.0,
              append=True):
     cxxinit(self, io_DumpXTC, system, integrator, filename, unfolded,
             length_factor, append)
Ejemplo n.º 15
0
    def __init__(self):

        if pmi._PMIComm and pmi._PMIComm.isActive():
            if pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
                cxxinit(self, _espressopp.Version)
            else :
                pass
        else :
            cxxinit(self, _espressopp.Version)
Ejemplo n.º 16
0
    def __init__(self):

        if pmi._PMIComm and pmi._PMIComm.isActive():
            if pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
                cxxinit(self, _espressopp.Version)
            else :
                pass
        else :
            cxxinit(self, _espressopp.Version)
Ejemplo n.º 17
0
    def __init__(self):

        if pmi._PMIComm and pmi._PMIComm.isActive():
            if pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
                cxxinit(self, _espressopp.System, pmi._PMIComm.getMPIsubcomm())
            else :
                pass
        else :
            cxxinit(self, _espressopp.System, pmi._MPIcomm)
Ejemplo n.º 18
0
 def __init__(self,
              system,
              gamma,
              ftol,
              max_displacement,
              variable_step_flag=False):
     if pmi.workerIsActive():
         cxxinit(self, integrator_MinimizeEnergy, system, gamma,
                 ftol * ftol, max_displacement, variable_step_flag)
Ejemplo n.º 19
0
 def __init__(self, system, nodeGrid, cellGrid, neiListx, neiListy, neiListz):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         p1 = pmi._MPIcomm.rank % nodeGrid[0]
         aux1 =pmi._MPIcomm.rank/nodeGrid[0]  # HDD comment: Getting the order of processors
         p2 = aux1 % nodeGrid[1]
         aux2 = aux1/nodeGrid[1]
         p3 = aux2  # HDD comment: Obtaining the processors per axes (x,y,z)
         cellGrid = Int3D(neiListx[p1+1]-neiListx[p1],neiListy[p2+1]-neiListy[p2],neiListz[p3+1]-neiListz[p3])
         #print "real Cells are",cellGrid
         cxxinit(self, storage_DomainDecompositionAdress, system, nodeGrid, cellGrid, neiListx, neiListy, neiListz)
Ejemplo n.º 20
0
 def __init__(self,
              system,
              integrator,
              filename='out.gro',
              unfolded=False,
              length_factor=1.0,
              length_unit='LJ',
              append=True):
     cxxinit(self, io_DumpGRO, system, integrator, filename, unfolded,
             length_factor, length_unit, append)
Ejemplo n.º 21
0
 def __init__(self,
              system,
              verletlist,
              startdist=0.0,
              enddist=0.0,
              edgeweightmultiplier=20):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()
             ) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, integrator_TDforce, system, verletlist, startdist,
                 enddist, edgeweightmultiplier)
Ejemplo n.º 22
0
 def __init__(self,
              _system,
              _verletlist,
              _fixedtuplelist,
              KTI=False,
              regionupdates=1):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()
             ) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         if pmi.workerIsActive():
             cxxinit(self, integrator_Adress, _system, _verletlist,
                     _fixedtuplelist, KTI, regionupdates)
Ejemplo n.º 23
0
 def __init__(self, system, integrator, mode=""):
     if pmi.workerIsActive():
         if mode == "" or mode == SOA:
             mode_int = _espressopp.VectorizationMode.SOA
         elif mode == AOS:
             mode_int = _espressopp.VectorizationMode.AOS
         else:
             raise ValueError("Incorrect mode [{}]".format(mode))
         cxxinit(self, _espressopp.Vectorization, system, integrator,
                 mode_int)
         system.storage.decompose()
Ejemplo n.º 24
0
 def __init__(self,
              system,
              pos=True,
              vel=False,
              force=False,
              radius=False,
              folded=True):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()
             ) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, analysis_Configurations, system, pos, vel, force,
                 radius, folded)
Ejemplo n.º 25
0
    def __init__(self, system, interaction, compute_method=None):
        if pmi.workerIsActive():
            if compute_method is None:
                compute_method = "ALL"
            if compute_method not in ["AT", "CG", "ALL"]:
                raise ValueError("Wrong compute_method, should be ALL, AT or CG")

            if compute_method == "ALL":
                cxxinit(self, analysis_PotentialEnergy, system, interaction)
            else:
                cxxinit(self, analysis_PotentialEnergy, system, interaction, compute_method == "AT")
Ejemplo n.º 26
0
 def __init__(self, system, nodeGrid, neiListx, neiListy, neiListz):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()
             ) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         p1 = pmi._MPIcomm.rank % nodeGrid[0]
         aux1 = pmi._MPIcomm.rank / nodeGrid[
             0]  # HeSpaDDA comment: Refers to the order in which processors are given within the Linked-Cell-List
         p2 = aux1 % nodeGrid[1]
         aux2 = aux1 / nodeGrid[1]
         p3 = aux2  # HeSpaDDA comment: The processors triplet (p1,p2,p3) have been extracted and are ready for the construction of the cells neighbor list
         #cellGrid = Int3D(neiListx[p1+1]-neiListx[p1],neiListy[p2+1]-neiListy[p2],neiListz[p3+1]-neiListz[p3])
         cxxinit(self, storage_DomainDecomposition, system, nodeGrid,
                 neiListx, neiListy, neiListz)
Ejemplo n.º 27
0
    def __init__(self):

        if pmi._PMIComm and pmi._PMIComm.isActive():
            if pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
                cxxinit(self, _espressopp.System, pmi._PMIComm.getMPIsubcomm())
            else :
                pass
        else :
            cxxinit(self, _espressopp.System, pmi._MPIcomm)

        self._integrator = None
        self._interaction2id = {}
        self._interaction_pid = 0
Ejemplo n.º 28
0
 def __init__(self,
              system,
              integrator,
              filename='out.xyz',
              unfolded=False,
              length_factor=1.0,
              length_unit='LJ',
              store_pids=False,
              store_velocities=False,
              append=True):
     cxxinit(self, io_DumpXYZ, system, integrator, filename, unfolded,
             length_factor, length_unit, store_pids, store_velocities,
             append)
Ejemplo n.º 29
0
    def __init__(self, system, interaction, compute_method=None):
        if pmi.workerIsActive():
            if compute_method is None:
                compute_method = 'ALL'
            if compute_method not in ['AT', 'CG', 'ALL']:
                raise ValueError(
                    'Wrong compute_method, should be ALL, AT or CG')

            if compute_method == 'ALL':
                cxxinit(self, analysis_PotentialEnergy, system, interaction)
            else:
                cxxinit(self, analysis_PotentialEnergy, system, interaction,
                        compute_method == 'AT')
Ejemplo n.º 30
0
    def __init__(self):

        if pmi._PMIComm and pmi._PMIComm.isActive():
            if pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
                cxxinit(self, _espressopp.System, pmi._PMIComm.getMPIsubcomm())
            else:
                pass
        else:
            cxxinit(self, _espressopp.System, pmi._MPIcomm)

        self._integrator = None
        self._interaction2id = {}
        self._interaction_pid = 0
Ejemplo n.º 31
0
 def __init__(self,
              system,
              cs_list=None,
              anchor_type=None,
              target_type=None):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()) or \
             pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         if anchor_type is not None and target_type is not None:
             cxxinit(self, integrator_FixDistances, system, anchor_type,
                     target_type)
         else:
             cxxinit(self, integrator_FixDistances, system)
         if cs_list:
             self.addConstraints(cs_list, False)
Ejemplo n.º 32
0
 def __init__(self, system, integrator, h5md_file):
     if pmi.workerIsActive():
         cxxinit(self, io_DumpTopology, system, integrator)
         self.h5md_file = h5md_file
         self.tuple_index = 0
         self.triple_index = 0
         self.quadruple_index = 0
         self.tuple_data = {}
         self.triple_data = {}
         self.quadruple_data = {}
         if 'connectivity' not in self.h5md_file.file:
             self.h5md_file.file.create_group('connectivity')
         self.connectivity = self.h5md_file.file['connectivity']
         self.chunk_size = h5md_file.chunk_size
         self.dt = integrator.dt
Ejemplo n.º 33
0
 def __init__(self,
              system,
              capForce,
              particleGroup=None,
              particleTypes=None):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()
             ) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         if (particleGroup == None) or (particleGroup.size() == 0):
             cxxinit(self, integrator_CapForce, system, capForce)
             if particleTypes:
                 for type_id in particleTypes:
                     self.cxxclass.set_type(type_id)
         else:
             cxxinit(self, integrator_CapForce, system, capForce,
                     particleGroup)
Ejemplo n.º 34
0
    def __init__(self, system, cutoff, exclusionlist=None):

        if pmi.workerIsActive():
            if exclusionlist is None:
                # rebuild list in constructor
                cxxinit(self, _espressopp.VerletListHybridCG, system, cutoff, True)
            else:
                # do not rebuild list in constructor
                cxxinit(self, _espressopp.VerletListHybridCG, system, cutoff, False)
                # add exclusions
                for pair in exclusionlist:
                    pid1, pid2 = pair
                    self.cxxclass.exclude(self, pid1, pid2)
                # now rebuild list with exclusions
                self.cxxclass.rebuild(self)
Ejemplo n.º 35
0
    def __init__(self, system, cutoff, exclusionlist=[]):

        if pmi.workerIsActive():
            if (exclusionlist == []):
                # rebuild list in constructor
                cxxinit(self, _espressopp.VerletList, system, cutoff, True)
            else:
                # do not rebuild list in constructor
                cxxinit(self, _espressopp.VerletList, system, cutoff, False)
                # add exclusions
                for pair in exclusionlist:
                    pid1, pid2 = pair
                    self.cxxclass.exclude(self, pid1, pid2)
                # now rebuild list with exclusions
                self.cxxclass.rebuild(self)
Ejemplo n.º 36
0
    def __init__(self, system, vec, cutoff, exclusionlist=[]):

        if pmi.workerIsActive():
            if (exclusionlist == []):
                # rebuild list in constructor
                cxxinit(self, _espressopp.vectorization_VerletList, system,
                        vec, cutoff, True)
            else:
                # do not rebuild list in constructor
                cxxinit(self, _espressopp.vectorization_VerletList, system,
                        vec, cutoff, False)
                # add exclusions
                for pair in exclusionlist:
                    pid1, pid2 = pair
                    self.cxxclass.exclude(self, pid1, pid2)
                # now rebuild list with exclusions
                self.cxxclass.rebuild(self)
Ejemplo n.º 37
0
    def __init__(self, system, cutoff, exclusionlist=[]):

        if pmi.workerIsActive():
          '''
          cxxinit(self, _espressopp.VerletListTriple, system, cutoff, True)
          if (exclusionlist != []):
            print 'Warning! Exclusion list is not yet implemented to the triple verlet \
                  list. Nothing happend to exclusion list'
          '''

          if (exclusionlist == []):
            # rebuild list in constructor
            cxxinit(self, _espressopp.VerletListTriple, system, cutoff, True)
          else:
            # do not rebuild list in constructor
            cxxinit(self, _espressopp.VerletListTriple, system, cutoff, False)
            # add exclusions
            for pid in exclusionlist:
                self.cxxclass.exclude(self, pid)
            # now rebuild list with exclusions
            self.cxxclass.rebuild(self)
Ejemplo n.º 38
0
    def __init__(self, system, cutoff, adrcut, dEx, dHy, adrCenter=[], pids=[], exclusionlist=[], sphereAdr=False):

        if pmi.workerIsActive():
            cxxinit(self, _espressopp.VerletListAdress, system, cutoff, adrcut, False, dEx, dHy)
            #self.cxxclass.setAtType(self, atType)
            # check for exclusions
            if (exclusionlist != []):
                # add exclusions
                for pair in exclusionlist:
                    pid1, pid2 = pair
                    self.cxxclass.exclude(self, pid1, pid2)
            # add adress particles
            if (pids != []):
                for pid in pids:
                    self.cxxclass.addAdrParticle(self, pid)
            # set adress center
            if (adrCenter != []):
                self.cxxclass.setAdrCenter(self, adrCenter[0], adrCenter[1], adrCenter[2])
            # set adress region type (slab or spherical)
            self.cxxclass.setAdrRegionType(self,sphereAdr)
            
            # rebuild list now
            self.cxxclass.rebuild(self)
Ejemplo n.º 39
0
    def __init__(self, system):
	if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
          cxxinit(self, analysis_ConfigurationsExt, system)
Ejemplo n.º 40
0
 def __init__(self, system):
     if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, analysis_PressureTensor, system)
Ejemplo n.º 41
0
 def __init__(self, system):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, integrator_Isokinetic, system)
Ejemplo n.º 42
0
    def __init__(self, system):
	if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
          cxxinit(self, analysis_TotalVelocity, system)
Ejemplo n.º 43
0
 def __init__(self, rng, boxL=1.0):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup() or pmi.isController:
         cxxinit(self, bc_OrthorhombicBC, rng, toReal3D(boxL))
Ejemplo n.º 44
0
    def __init__(self, storage):

        if pmi.workerIsActive():
            cxxinit(self, _espressopp.FixedTupleListAdress, storage)
Ejemplo n.º 45
0
 def __init__(self, system, chainlength = None):
   if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
     if chainlength is None:
       cxxinit(self, analysis_MeanSquareDispl, system)
     else:
       cxxinit(self, analysis_MeanSquareDispl, system, chainlength)
Ejemplo n.º 46
0
 def __init__(self, system):
     if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, analysis_NPart, system)
 def __init__(self, system, nodeGrid, cellGrid):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, storage_DomainDecompositionNonBlocking, system, nodeGrid, cellGrid)
Ejemplo n.º 48
0
 def __init__(self, system, chainlength):
 	if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
       cxxinit(self, analysis_MeanSquareInternalDist, system, chainlength)   
Ejemplo n.º 49
0
 def __init__(self, storage):
     if pmi.workerIsActive():
         cxxinit(self, _espressopp.ParticleGroup, storage)
Ejemplo n.º 50
0
 def __init__(self, system, group):
     cxxinit(self, integrator_VelocityVerletOnGroup, system, group)
Ejemplo n.º 51
0
    def __init__(self, system, latticeboltzmann):
	if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
            cxxinit(self, analysis_LBOutput_Screen, system, latticeboltzmann)
Ejemplo n.º 52
0
 def __init__(self, system, latticeboltzmann):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, integrator_LBInit_PopUniform, system, latticeboltzmann)
Ejemplo n.º 53
0
  def __init__(self, system):

    if not (pmi._PMIComm and pmi._PMIComm.isActive()) or \
            pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
      cxxinit(self, integrator_BerendsenBarostat, system)
 def __init__(self, system):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, integrator_GeneralizedLangevinThermostat, system)
Ejemplo n.º 55
0
    def __init__(self, storage):

        if pmi.workerIsActive():
            cxxinit(self, _espressopp.FixedPairList, storage)
 def __init__(self, system):
     if not pmi._PMIComm or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, analysis_ParticleRadiusDistribution, system)
 def __init__(self, system):
     if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
         cxxinit(self, integrator_StochasticVelocityRescaling, system)
Ejemplo n.º 58
0
 def __init__(self, system, gamma, ftol, max_displacement, variable_step_flag=False):
     if pmi.workerIsActive():
         cxxinit(self, integrator_MinimizeEnergy, system, gamma, ftol*ftol, max_displacement, variable_step_flag)