def propagate(self, segments): # Instantiate the integrator here since cython extension classes do not # automatically implement the pickling protocol self.model['seed'] = genrandint() integrator = ElasticNetwork(**self.model) for segment in segments: starttime = time.time() new_pcoords = np.empty((self.nsteps, self.ndim), dtype=pcoord_dtype) new_pcoords[0, :] = segment.pcoord[0, :] cpos = self.ndim // 2 x = new_pcoords[0, :cpos].copy().reshape( (-1, 3)).astype(np.float64) v = new_pcoords[0, cpos:].copy().reshape( (-1, 3)).astype(np.float64) for istep in xrange(1, self.nsteps): integrator.step(x, v, self.nsubsteps) new_pcoords[istep, :] = np.hstack((x.ravel(), v.ravel())) segment.pcoord = new_pcoords[...].astype(pcoord_dtype) segment.status = Segment.SEG_STATUS_COMPLETE segment.walltime = time.time() - starttime del integrator return segments
def propagate(self, segments): # Instantiate the integrator here since cython extension classes do not # automatically implement the pickling protocol self.model['seed'] = genrandint() integrator = ElasticNetwork(**self.model) for segment in segments: starttime = time.time() new_pcoords = np.empty((self.nsteps,self.ndim), dtype=pcoord_dtype) new_pcoords[0,:] = segment.pcoord[0,:] cpos = self.ndim // 2 x = new_pcoords[0,:cpos].copy().reshape((-1,3)).astype(np.float64) v = new_pcoords[0,cpos:].copy().reshape((-1,3)).astype(np.float64) for istep in xrange(1, self.nsteps): integrator.step(x, v, self.nsubsteps) new_pcoords[istep,:] = np.hstack((x.ravel(), v.ravel())) segment.pcoord = new_pcoords[...].astype(pcoord_dtype) segment.status = Segment.SEG_STATUS_COMPLETE segment.walltime = time.time() - starttime del integrator return segments
def run(config): # Set up logging logname = config.get('outputs','log') print('Setting up logging: {}'.format(logname)) logging.basicConfig(filename=logname,level=logging.DEBUG,filemode='w') NUM_BLOCKS = config.getint('parameters','num_blocks') STEPS_PER_BLOCK = config.getint('parameters','steps_per_block') model = {} model['mass'] = config.getfloat('model','mass') model['gamma'] = config.getfloat('model','gamma') model['temp'] = config.getfloat('model','temp') model['dt'] = config.getfloat('model','dt') model['sigma'] = config.getfloat('model','sigma') model['eps'] = config.getfloat('model','eps') model['betamix'] = config.getfloat('model','betamix') model.update(np.load(config.get('model','ff_data'))) model['seed'] = genrandint() init_pos = {} init_pos['coordsA'] = model['coordsA'] init_pos['coordsB'] = model['coordsB'] del model['coordsA'] del model['coordsB'] system = ElasticNetwork(**model) # Setup storage print('Setting up netcdf4 trajectory storage') nc = trajstore(natoms=system.natoms) nc.initialize_netcdf(config.get('outputs','trajname')) # Initial coords and velocities pos = init_pos[config.get('model','init_pos')] # Assign velocities drawn from Maxwell-Boltzmann distribution sigma = np.sqrt(model['temp']*0.001987191/model['mass']) vel = sigma*np.random.normal(size=pos.shape) print('Starting Simulation') for dk in xrange(NUM_BLOCKS): t1 = time.time() system.step(pos,vel,STEPS_PER_BLOCK) nc.write_frame(pos,vel) EKin = 0.5*model['mass']*np.sum(vel**2) T = EKin*(2./3)/(0.001987191*system.natoms) logging.info('Completed {} of {} steps: {} s Ekin: {} Temp: {}'.format(dk,NUM_BLOCKS-1,time.time() - t1,EKin,T)) nc.ncfile.close()