def setUp(self): self.setUpTempDir() self.N_ATOMS = 500 self.N_REPLICAS = 16 self.N_DISCRETE = 10 self.N_CONTINUOUS = 5 self.N_MAPPINGS = 10 self.state_template = state.SystemState( np.zeros((self.N_ATOMS, 3)), np.zeros((self.N_ATOMS, 3)), 0.0, 0.0, np.zeros(3), param_sampling.ParameterState( np.zeros(self.N_DISCRETE, dtype=np.int32), np.zeros(self.N_CONTINUOUS, dtype=np.float64), ), np.arange(self.N_MAPPINGS), ) # setup objects to save to disk c = comm.MPICommunicator(self.N_ATOMS, self.N_REPLICAS) l = ladder.NearestNeighborLadder(n_trials=100) policy = adaptor.AdaptationPolicy(1.0, 50, 100) a = adaptor.EqualAcceptanceAdaptor(n_replicas=self.N_REPLICAS, adaptation_policy=policy) # make some states def gen_state(index, n_atoms): pos = index * np.ones((n_atoms, 3)) vel = index * np.ones((n_atoms, 3)) energy = index lam = index / 100.0 discrete = np.zeros(self.N_DISCRETE, dtype=np.int32) continuous = np.zeros(self.N_CONTINUOUS, dtype=np.float64) params = param_sampling.ParameterState(discrete, continuous) mappings = np.arange(self.N_MAPPINGS) return state.SystemState(pos, vel, lam, energy, np.zeros(3), params, mappings) states = [gen_state(i, self.N_ATOMS) for i in range(self.N_REPLICAS)] runner = leader.LeaderReplicaExchangeRunner(self.N_REPLICAS, max_steps=100, ladder=l, adaptor=a) # dummy pdb writer; can't use a mock because they can't be pickled pdb_writer = object() self.store = vault.DataStore(self.state_template, self.N_REPLICAS, pdb_writer) self.store.initialize(mode="w") # save some stuff self.store.save_data_store() self.store.save_communicator(c) self.store.save_remd_runner(runner) self.store.save_states(states, stage=0)
def setup_system(): # get the sequence sequence = "AAAAAAAAAAAAAAAA" sequence = parse.get_sequence_from_AA1(contents=sequence) # create the system p = system.ProteinMoleculeFromSequence(sequence) b = system.SystemBuilder() s = b.build_system_from_molecules([p]) s.temperature_scaler = system.LinearTemperatureScaler(0, 1, 300, 310) rest_scaler = s.restraints.create_scaler("nonlinear", alpha_min=0, alpha_max=1, factor=4.0) secondary = "H" * 16 secondary_restraints = parse.get_secondary_structure_restraints( system=s, scaler=rest_scaler, contents=secondary) s.restraints.add_selectively_active_collection(secondary_restraints, len(secondary_restraints)) # create the options options = system.RunOptions() options.use_big_timestep = True # create a store store = vault.DataStore(s.n_atoms, N_REPLICAS, s.get_pdb_writer(), block_size=BACKUP_FREQ) store.initialize(mode="w") store.save_system(s) store.save_run_options(options) # create and store the remd_runner l = ladder.NearestNeighborLadder(n_trials=1) policy = adaptor.AdaptationPolicy(1.0, 50, 100) a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS, adaptation_policy=policy) remd_runner = leader.LeaderReplicaExchangeRunner(N_REPLICAS, max_steps=N_STEPS, ladder=l, adaptor=a) store.save_remd_runner(remd_runner) # create and store the communicator c = comm.MPICommunicator(s.n_atoms, N_REPLICAS) store.save_communicator(c) # create and save the initial states states = [gen_state(s) for i in range(N_REPLICAS)] states[1].alpha = 1.0 store.save_states(states, 0) # save data_store store.save_data_store() return s.n_atoms
def setUp(self): self.setUpTempDir() self.N_ATOMS = 500 self.N_REPLICAS = 16 # setup objects to save to disk c = comm.MPICommunicator(self.N_ATOMS, self.N_REPLICAS) l = ladder.NearestNeighborLadder(n_trials=100) policy = adaptor.AdaptationPolicy(1.0, 50, 100) a = adaptor.EqualAcceptanceAdaptor( n_replicas=self.N_REPLICAS, adaptation_policy=policy ) # make some states def gen_state(index, n_atoms): pos = index * np.ones((n_atoms, 3)) vel = index * np.ones((n_atoms, 3)) energy = index lam = index / 100. return system.SystemState(pos, vel, lam, energy, np.zeros(3)) runner = leader.LeaderReplicaExchangeRunner( self.N_REPLICAS, max_steps=100, ladder=l, adaptor=a ) self.pdb_writer = object() store = vault.DataStore( self.N_ATOMS, self.N_REPLICAS, self.pdb_writer, block_size=10 ) store.initialize(mode="w") # save some stuff store.save_communicator(c) store.save_remd_runner(runner) store.save_system(object()) for index in range(100): states = [gen_state(index, self.N_ATOMS) for i in range(self.N_REPLICAS)] store.save_states(states, stage=index) store.close() store.save_data_store() self.store = vault.DataStore.load_data_store() self.store.initialize(mode="r")
def test_save_and_load_remd_runner(self): "should be able to save and reload an remd runner" with in_temp_dir(): # dummy pdb writer; can't use a mock because they can't be pickled pdb_writer = object() store = vault.DataStore(self.state_template, self.N_REPLICAS, pdb_writer) store.initialize(mode="w") l = ladder.NearestNeighborLadder(n_trials=100) policy = adaptor.AdaptationPolicy(1.0, 50, 100) a = adaptor.EqualAcceptanceAdaptor( n_replicas=self.N_REPLICAS, adaptation_policy=policy ) runner = leader.LeaderReplicaExchangeRunner( self.N_REPLICAS, max_steps=100, ladder=l, adaptor=a ) store.save_remd_runner(runner) runner2 = store.load_remd_runner() self.assertEqual(runner.n_replicas, runner2.n_replicas) self.assertTrue(os.path.exists("Data/remd_runner.dat"))
def setup_system(): # create a store writer = pdb_writer.PDBWriter( range(N_ATOMS), ["CA"] * N_ATOMS, [1] * N_ATOMS, ["ALA"] * N_ATOMS ) store = vault.DataStore(gen_state(0), N_REPLICAS, writer, block_size=BACKUP_FREQ) store.initialize(mode="w") # create and store the remd_runner l = ladder.NearestNeighborLadder(n_trials=100) policy = adaptor.AdaptationPolicy(1.0, 50, 100) a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS, adaptation_policy=policy) remd_runner = leader.LeaderReplicaExchangeRunner( N_REPLICAS, max_steps=N_STEPS, ladder=l, adaptor=a ) store.save_remd_runner(remd_runner) # create and store the communicator c = comm.MPICommunicator(N_ATOMS, N_REPLICAS) store.save_communicator(c) # create and store the fake system s = helper.FakeSystem() s.temperature_scaler = temperature.ConstantTemperatureScaler(300.0) store.save_system(s) # create and store the options o = options.RunOptions() o.runner = "fake_runner" store.save_run_options(o) # create and save the initial states states = [gen_state(i) for i in range(N_REPLICAS)] store.save_states(states, 0) # save data_store store.save_data_store()
def setUp(self): self.N_REPS = 6 self.MAX_STEPS = 5 self.mock_ladder = mock.Mock(spec_set=ladder.NearestNeighborLadder) self.mock_ladder.compute_exchanges.return_value = list( reversed(range(self.N_REPS)) ) self.mock_adaptor = mock.Mock(adaptor.EqualAcceptanceAdaptor) self.runner = leader.LeaderReplicaExchangeRunner( self.N_REPS, self.MAX_STEPS, self.mock_ladder, self.mock_adaptor ) self.mock_comm = mock.Mock(spec_set=comm.MPICommunicator) self.mock_comm.n_replicas = 6 self.mock_comm.broadcast_states_to_workers.return_value = sentinel.MY_STATE_INIT self.mock_state_1 = mock.Mock() self.mock_state_1.positions = sentinel.pos1 self.mock_state_1.velocities = 1.0 self.mock_state_2 = mock.Mock() self.mock_state_2.velocities = 1.0 self.mock_state_2.positions = sentinel.pos2 self.mock_state_3 = mock.Mock() self.mock_state_3.velocities = 1.0 self.mock_state_3.positions = sentinel.pos3 self.mock_state_4 = mock.Mock() self.mock_state_4.velocities = 1.0 self.mock_state_4.positions = sentinel.pos4 self.mock_state_5 = mock.Mock() self.mock_state_5.positions = sentinel.pos5 self.mock_state_5.velocities = 1.0 self.mock_state_6 = mock.Mock() self.mock_state_6.positions = sentinel.pos6 self.mock_state_6.velocities = 1.0 self.fake_states_after_run = [ self.mock_state_1, self.mock_state_2, self.mock_state_3, self.mock_state_4, self.mock_state_5, self.mock_state_6, ] self.mock_comm.gather_states_from_workers.return_value = ( self.fake_states_after_run ) self.mock_energy_matrix = mock.MagicMock() self.mock_comm.gather_energies_from_workers.return_value = ( self.mock_energy_matrix ) self.mock_comm.exchange_states_for_energy_calc.return_value = ( self.fake_states_after_run ) self.mock_system_runner = mock.Mock(spec=interfaces.IRunner) self.mock_system_runner.minimize_then_run.return_value = sentinel.MY_STATE self.mock_system_runner.run.return_value = sentinel.MY_STATE self.FAKE_ENERGIES_AFTER_GET_ENERGY = [ sentinel.E1, sentinel.E2, sentinel.E3, sentinel.E4, sentinel.E5, sentinel.E6, ] self.mock_system_runner.get_energy.side_effect = ( self.FAKE_ENERGIES_AFTER_GET_ENERGY * self.MAX_STEPS ) self.mock_system_runner.temperature_scaler = mock.MagicMock() self.mock_system_runner.temperature_scaler.return_value = 1.0 self.mock_store = mock.Mock(spec_set=vault.DataStore) self.mock_store.n_replicas = 6 self.mock_store.load_states.return_value = sentinel.ALL_STATES
def setup_system(): # get the sequence sequence = "AAAAAAAAAAAAAAAA" sequence = parse.get_sequence_from_AA1(contents=sequence) # create the system p = system.SubSystemFromSequence(sequence) b = system.SystemBuilder() s = b.build_system([p]) s.temperature_scaler = system.LinearTemperatureScaler(0, 1, 300, 310) rest_scaler = s.restraints.create_scaler("nonlinear", alpha_min=0, alpha_max=1, factor=4.0) secondary = "H" * 16 secondary_restraints = parse.get_secondary_structure_restraints( system=s, scaler=rest_scaler, contents=secondary) s.restraints.add_selectively_active_collection(secondary_restraints, len(secondary_restraints)) # create com restraint com = s.restraints.create_restraint( "com", rest_scaler, ramp=None, group1=[(1, "CA")], group2=[(3, "CA")], weights1=None, weights2=None, dims="xyz", force_const=100., distance=0.5, ) s.restraints.add_as_always_active(com) # create absolute com restraint com = s.restraints.create_restraint( "abs_com", rest_scaler, ramp=None, group=[(1, "CA")], weights=None, dims="xyz", force_const=1., position=[0., 0., 0.], ) s.restraints.add_as_always_active(com) # create the options options = system.RunOptions() # create a store store = vault.DataStore(s.n_atoms, N_REPLICAS, s.get_pdb_writer(), block_size=BACKUP_FREQ) store.initialize(mode="w") store.save_system(s) store.save_run_options(options) # create and store the remd_runner l = ladder.NearestNeighborLadder(n_trials=1) policy = adaptor.AdaptationPolicy(1.0, 50, 100) a = adaptor.EqualAcceptanceAdaptor(n_replicas=N_REPLICAS, adaptation_policy=policy) remd_runner = leader.LeaderReplicaExchangeRunner(N_REPLICAS, max_steps=N_STEPS, ladder=l, adaptor=a) store.save_remd_runner(remd_runner) # create and store the communicator c = comm.MPICommunicator(s.n_atoms, N_REPLICAS) store.save_communicator(c) # create and save the initial states states = [gen_state(s) for i in range(N_REPLICAS)] states[1].alpha = 1.0 store.save_states(states, 0) # save data_store store.save_data_store() return s.n_atoms