def __init__(self, r_cut, name=None): util.print_status_line() # tell the base class how we operate # initialize the base class pair.pair.__init__(self, r_cut, name) # update the neighbor list neighbor_list = pair._update_global_nlist(r_cut) neighbor_list.subscribe(lambda: self.log * self.get_max_rcut()) # create the c++ mirror class if not globals.exec_conf.isCUDAEnabled(): self.cpp_force = _evaluators_ext_template.PotentialPairLJ2( globals.system_definition, neighbor_list.cpp_nlist, self.name) self.cpp_class = _evaluators_ext_template.PotentialPairLJ2 else: neighbor_list.cpp_nlist.setStorageMode( hoomd.NeighborList.storageMode.full) self.cpp_force = _evaluators_ext_template.PotentialPairLJ2GPU( globals.system_definition, neighbor_list.cpp_nlist, self.name) self.cpp_class = _evaluators_ext_template.PotentialPairLJ2GPU # you can play with the block size value, set it to any multiple of 32 up to 1024. Use the # lj.benchmark() command to find out which block size performs the fastest self.cpp_force.setBlockSize(64) globals.system.addCompute(self.cpp_force, self.force_name) # setup the coefficent options self.required_coeffs = ['epsilon', 'sigma', 'alpha'] self.pair_coeff.set_default_coeff('alpha', 1.0)
def __init__(self, r_cut, name=None): util.print_status_line(); # tell the base class how we operate # initialize the base class pair.pair.__init__(self, r_cut, name); # update the neighbor list neighbor_list = pair._update_global_nlist(r_cut); neighbor_list.subscribe(lambda: self.log*self.get_max_rcut()) # create the c++ mirror class if not globals.exec_conf.isCUDAEnabled(): self.cpp_force = _evaluators_ext_template.PotentialPairLJ2(globals.system_definition, neighbor_list.cpp_nlist, self.name); self.cpp_class = _evaluators_ext_template.PotentialPairLJ2; else: neighbor_list.cpp_nlist.setStorageMode(hoomd.NeighborList.storageMode.full); self.cpp_force = _evaluators_ext_template.PotentialPairLJ2GPU(globals.system_definition, neighbor_list.cpp_nlist, self.name); self.cpp_class = _evaluators_ext_template.PotentialPairLJ2GPU; # you can play with the block size value, set it to any multiple of 32 up to 1024. Use the # lj.benchmark() command to find out which block size performs the fastest self.cpp_force.setBlockSize(64); globals.system.addCompute(self.cpp_force, self.force_name); # setup the coefficent options self.required_coeffs = ['epsilon', 'sigma', 'alpha']; self.pair_coeff.set_default_coeff('alpha', 1.0);
def __init__(self, group): util.print_status_line(); # Error out in MPI simulations if (hoomd.is_MPI_available()): if globals.system_definition.getParticleData().getDomainDecomposition(): globals.msg.error("charge.pppm is not supported in multi-processor simulations.\n\n") raise RuntimeError("Error initializing PPPM.") # initialize the base class force._force.__init__(self); # create the c++ mirror class # update the neighbor list neighbor_list = pair._update_global_nlist(0.0) neighbor_list.subscribe(lambda: self.log*0.0) if not globals.exec_conf.isCUDAEnabled(): self.cpp_force = hoomd.PPPMForceCompute(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group); else: self.cpp_force = hoomd.PPPMForceComputeGPU(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group); globals.system.addCompute(self.cpp_force, self.force_name); # error check flag - must be set to true by set_params in order for the run() to commence self.params_set = False; # initialize the short range part of electrostatics util._disable_status_lines = True; self.ewald = pair.ewald(r_cut = 0.0); util._disable_status_lines = False;
def __init__(self, r_cut=1.2, name=None): util.print_status_line(); self.r_cut = r_cut; # tell the base class how we operate # initialize the base class pair.pair.__init__(self, r_cut, name); # update the neighbor list neighbor_list = pair._update_global_nlist(r_cut); neighbor_list.subscribe(lambda: self.log*self.get_max_rcut()) # create the c++ mirror class if not globals.exec_conf.isCUDAEnabled(): self.cpp_force = _martini_plugin.PotentialPairCoulombM(globals.system_definition, neighbor_list.cpp_nlist, self.name); self.cpp_class = _martini_plugin.PotentialPairCoulombM; else: neighbor_list.cpp_nlist.setStorageMode(hoomd.NeighborList.storageMode.full); self.cpp_force = _martini_plugin.PotentialPairCoulombMGPU(globals.system_definition, neighbor_list.cpp_nlist, self.name); self.cpp_class = _martini_plugin.PotentialPairCoulombMGPU; self.cpp_force.setBlockSize(128); globals.system.addCompute(self.cpp_force, self.force_name); # setup the coefficent options self.required_coeffs = ['f', 'er', 'ron'];
def __init__(self, r_cut, name=None): util.print_status_line() # tell the base class how we operate # initialize the base class pair.pair.__init__(self, r_cut, name) # update the neighbor list neighbor_list = pair._update_global_nlist(r_cut) neighbor_list.subscribe(lambda: self.log * self.get_max_rcut()) # create the c++ mirror class if not globals.exec_conf.isCUDAEnabled(): self.cpp_force = _custom_pair_potentials.PotentialPairLowe( globals.system_definition, neighbor_list.cpp_nlist, self.name) self.cpp_class = _custom_pair_potentials.PotentialPairLowe else: neighbor_list.cpp_nlist.setStorageMode( hoomd.NeighborList.storageMode.full) self.cpp_force = _custom_pair_potentials.PotentialPairLoweGPU( globals.system_definition, neighbor_list.cpp_nlist, self.name) self.cpp_class = _custom_pair_potentials.PotentialPairLoweGPU self.cpp_force.setBlockSize( tune._get_optimal_block_size('pair.dpd_conservative')) self.cpp_force.setBlockSize(64) globals.system.addCompute(self.cpp_force, self.force_name) # setup the coefficent options self.required_coeffs = ['A']
def __init__(self, T=1.0, gdt=0.1, rcut=1.0, seed=1, period=1): util.print_status_line(); # initialize base class _updater.__init__(self); # setup the variant inputs #T = variant._setup_variant_input(T); # update the neighbor list neighbor_list = pair._update_global_nlist(rcut); neighbor_list.subscribe(lambda: rcut) # initialize the reflected c++ class self.cpp_updater = _custom_updaters.LoweAndersenUpdater(globals.system_definition,neighbor_list.cpp_nlist,T,gdt,rcut,int(seed)); #if not globals.exec_conf.isCUDAEnabled(): # self.cpp_updater = _custom_updaters.LoweAndersenUpdater(globals.system_definition,group.cpp_group,thermo.cpp_compute,tau,T.cpp_variant,deltaT); #else: # self.cpp_updater = _custom_updaters.LoweAndersenUpdaterGPU(globals.system_definition,group.cpp_group,thermo.cpp_compute,tau,T.cpp_variant,deltaT); self.setupUpdater(period);
def __init__(self, r_cut, d_max=None, name=None): util.print_status_line() # tell the base class how we operate # initialize the base class pair.pair.__init__(self, r_cut, name) # update the neighbor list if d_max is None: sysdef = globals.system_definition d_max = max([ x.diameter for x in data.particle_data(sysdef.getParticleData()) ]) globals.msg.notice(2, "Notice: sgauss set d_max=" + str(d_max) + "\n") neighbor_list = pair._update_global_nlist(r_cut) neighbor_list.subscribe(lambda: self.log * self.get_max_rcut()) neighbor_list.cpp_nlist.setMaximumDiameter(d_max) # create the c++ mirror class if not globals.exec_conf.isCUDAEnabled(): self.cpp_force = _custom_pair_potentials.PotentialPairSYukawa( globals.system_definition, neighbor_list.cpp_nlist, self.name) self.cpp_class = _custom_pair_potentials.PotentialPairSYukawa else: neighbor_list.cpp_nlist.setStorageMode( hoomd.NeighborList.storageMode.full) self.cpp_force = _custom_pair_potentials.PotentialPairSYukawaGPU( globals.system_definition, neighbor_list.cpp_nlist, self.name) self.cpp_class = _custom_pair_potentials.PotentialPairSYukawaGPU self.cpp_force.setBlockSize(128) globals.system.addCompute(self.cpp_force, self.force_name) # setup the coefficent options self.required_coeffs = ['epsilon', 'kappa']
def __init__(self, r_cut, T, seed=1, name=None): util.print_status_line() # tell the base class how we operate # initialize the base class pair.pair.__init__(self, r_cut, name) # update the neighbor list neighbor_list = pair._update_global_nlist(r_cut) neighbor_list.subscribe(lambda: self.log * self.get_max_rcut()) # create the c++ mirror class if globals.exec_conf.isCUDAEnabled(): self.cpp_force = _custom_pair_potentials.PotentialPairLoweThermoLowe( globals.system_definition, neighbor_list.cpp_nlist, self.name) self.cpp_class = _custom_pair_potentials.PotentialPairLoweThermoLowe else: neighbor_list.cpp_nlist.setStorageMode( hoomd.NeighborList.storageMode.full) self.cpp_force = _custom_pair_potentials.PotentialPairLoweThermoLoweGPU( globals.system_definition, neighbor_list.cpp_nlist, self.name) self.cpp_class = _custom_pair_potentials.PotentialPairLoweThermoLoweGPU self.cpp_force.setBlockSize( tune._get_optimal_block_size('pair.dpd')) globals.system.addCompute(self.cpp_force, self.force_name) # setup the coefficent options self.required_coeffs = ['A', 'gamma'] # set the seed for dpd thermostat self.cpp_force.setSeed(seed) # set the temperature # setup the variant inputs T = variant._setup_variant_input(T) self.cpp_force.setT(T.cpp_variant)
def __init__(self, group): util.print_status_line(); # initialize the base class force._force.__init__(self); # create the c++ mirror class # update the neighbor list neighbor_list = pair._update_global_nlist(0.0) neighbor_list.subscribe(lambda: self.log*0.0) if not globals.exec_conf.isCUDAEnabled(): self.cpp_force = hoomd.PPPMForceCompute(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group); else: self.cpp_force = hoomd.PPPMForceComputeGPU(globals.system_definition, neighbor_list.cpp_nlist, group.cpp_group); globals.system.addCompute(self.cpp_force, self.force_name); # error check flag - must be set to true by set_params in order for the run() to commence self.params_set = False; # initialize the short range part of electrostatics util._disable_status_lines = True; self.ewald = pair.ewald(r_cut = 0.0); util._disable_status_lines = False;