def __init__(self, group): util.print_status_line(); # Error out in MPI simulations if (hoomd.is_MPI_available()): if globals.system_definition.getParticleData().getDomainDecomposition(): globals.msg.error("charge.pppm is not supported in multi-processor simulations.\n\n") raise RuntimeError("Error initializing PPPM.") # initialize the base class force._force.__init__(self); # create the c++ mirror class # update the neighbor list neighbor_list = pair._update_global_nlist(0.0) neighbor_list.subscribe(lambda: self.log*0.0) if not globals.exec_conf.isCUDAEnabled(): self.cpp_force = hoomd.PPPMForceCompute(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group); else: self.cpp_force = hoomd.PPPMForceComputeGPU(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group); globals.system.addCompute(self.cpp_force, self.force_name); # error check flag - must be set to true by set_params in order for the run() to commence self.params_set = False; # initialize the short range part of electrostatics util._disable_status_lines = True; self.ewald = pair.ewald(r_cut = 0.0); util._disable_status_lines = False;
def pair_ewald_setup(): from hoomd_script import pair fc = pair.ewald(r_cut=3.0); fc.pair_coeff.set('A', 'A', kappa=1.0, grid=16, order=4); # no valid run() occurs, so we need to manually update the nlist globals.neighbor_list.update_rcut(); return fc;
def pair_ewald_setup(): from hoomd_script import pair fc = pair.ewald(r_cut=3.0) fc.pair_coeff.set('A', 'A', kappa=1.0, grid=16, order=4) # no valid run() occurs, so we need to manually update the nlist globals.neighbor_list.update_rcut() return fc
def __init__(self, group, nlist=None): util.print_status_line() # Error out in MPI simulations if (hoomd.is_MPI_available()): if globals.system_definition.getParticleData( ).getDomainDecomposition(): globals.msg.error( "charge.pppm is not supported in multi-processor simulations.\n\n" ) raise RuntimeError("Error initializing PPPM.") # initialize the base class force._force.__init__(self) # create the c++ mirror class # PPPM doesn't really need a neighbor list, so subscribe call back as None if nlist is None: self.nlist = nl._subscribe_global_nlist(lambda: None) else: # otherwise, subscribe the specified neighbor list self.nlist = nlist self.nlist.subscribe(lambda: None) self.nlist.update_rcut() if not globals.exec_conf.isCUDAEnabled(): self.cpp_force = hoomd.PPPMForceCompute(globals.system_definition, self.nlist.cpp_nlist, group.cpp_group) else: self.cpp_force = hoomd.PPPMForceComputeGPU( globals.system_definition, self.nlist.cpp_nlist, group.cpp_group) globals.system.addCompute(self.cpp_force, self.force_name) # error check flag - must be set to true by set_params in order for the run() to commence self.params_set = False # initialize the short range part of electrostatics util._disable_status_lines = True self.ewald = pair.ewald(r_cut=0.0, nlist=self.nlist) util._disable_status_lines = False
def __init__(self, group): util.print_status_line(); # initialize the base class force._force.__init__(self); # create the c++ mirror class # update the neighbor list neighbor_list = pair._update_global_nlist(0.0) neighbor_list.subscribe(lambda: self.log*0.0) if not globals.exec_conf.isCUDAEnabled(): self.cpp_force = hoomd.PPPMForceCompute(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group); else: self.cpp_force = hoomd.PPPMForceComputeGPU(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group); globals.system.addCompute(self.cpp_force, self.force_name); # error check flag - must be set to true by set_params in order for the run() to commence self.params_set = False; # initialize the short range part of electrostatics util._disable_status_lines = True; self.ewald = pair.ewald(r_cut = 0.0); util._disable_status_lines = False;