示例#1
0
import logging
logging.basicConfig(level=logging.DEBUG)

if __name__ == '__main__':
    utils.set_one_thread()

    mol_config = test_alkane()

    config = Config()
    config.tag = 'example2'
    config.network = RTGN(6, 128, edge_dim=6, node_dim=5).to(device)
    # Batch Hyperparameters
    config.num_workers = 20
    config.rollout_length = 20
    config.optimization_epochs = 4
    config.max_steps = 10000000
    config.save_interval = config.num_workers*200*5
    config.eval_interval = config.num_workers*200*5
    config.eval_episodes = 2
    config.mini_batch_size = 50

    # Coefficient Hyperparameters
    lr = 5e-6 * np.sqrt(config.num_workers)
    config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=lr, eps=1e-5)

    # Task Settings
    config.train_env = Task('GibbsScorePruningEnv-v0', concurrency=True, num_envs=config.num_workers, seed=np.random.randint(0,1e5), mol_config=mol_config, max_steps=200)
    config.eval_env = Task('GibbsScorePruningEnv-v0', seed=np.random.randint(0,7e4), mol_config=mol_config, max_steps=200)

    agent = PPOAgent(config)
    agent.run_steps()
示例#2
0
from conformer_rl.agents import PPORecurrentAgent
from conformer_rl.config import Config
from conformer_rl.environments import Task
from conformer_rl.models import RTGNRecurrent

from conformer_rl.molecule_generation import branched_alkane

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

if __name__ == '__main__':
    # Set up the environment.
    alkane_env_config = branched_alkane(num_atoms=18)
    training_env = Task('GibbsScorePruningEnv-v0',
                        concurrency=True,
                        num_envs=20,
                        mol_config=alkane_env_config,
                        max_steps=200)

    # initialize Config
    config = Config()
    config.tag = 'tutorial'
    config.train_env = training_env

    # Set up neural network
    config.network = RTGNRecurrent(6, 128, edge_dim=6, node_dim=5).to(device)
    config.num_workers = 20

    # Logging Parameters
    config.save_interval = 20000
    config.data_dir = 'data'
if __name__ == '__main__':
    utils.set_one_thread()

    # configure molecule
    mol = generate_lignin(3)
    mol_config = config_from_rdkit(mol,
                                   num_conformers=200,
                                   calc_normalizers=True,
                                   save_file='lignin')

    # create agent config and set environment
    config = Config()
    config.tag = 'example2'
    config.train_env = Task('GibbsScorePruningEnv-v0',
                            concurrency=True,
                            num_envs=10,
                            mol_config=mol_config)

    # Neural Network
    config.network = RTGN(6, 128, edge_dim=6, node_dim=5).to(device)

    # Logging Parameters
    config.save_interval = 20000
    config.data_dir = 'data'
    config.use_tensorboard = True

    # Set up evaluation
    eval_mol = generate_lignin(4)
    eval_mol_config = config_from_rdkit(mol,
                                        num_conformers=200,
                                        calc_normalizers=True,
示例#4
0
    mol_config = config_from_rdkit(generate_branched_alkane(10), num_conformers=4, calc_normalizers=True, save_file='10_alkane')
    with open('10_alkane.pkl', 'rb') as file:
        mol_config = pickle.load(file)

    config = Config()
    config.tag = 'example1'
    # config.network = RTGNRecurrent(6, 128, edge_dim=6, node_dim=5).to(device)
    config.network = RTGNRecurrent(6, 128, edge_dim=6, node_dim=5).to(device)
    # Batch Hyperparameters
    config.rollout_length = 2
    config.recurrence = 1
    config.optimization_epochs = 1
    config.max_steps = 24
    config.save_interval = 8
    config.eval_interval = 8
    config.eval_episodes = 1
    config.mini_batch_size = 4


    # Coefficient Hyperparameters
    lr = 5e-6 * np.sqrt(2)
    config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=lr, eps=1e-5)

    # Task Settings
    config.train_env = Task('GibbsScoreEnv-v0', concurrency=True, num_envs=2, seed=np.random.randint(0,1e5), mol_config=mol_config)
    config.eval_env = Task('GibbsScorePruningEnv-v0', seed=np.random.randint(0,7e4), mol_config=mol_config)
    config.curriculum = None

    agent = PPORecurrentAgent(config)
    agent.run_steps()
    config = Config()
    config.tag = 'curriculum_test'
    config.network = RTGNRecurrent(6, 128, edge_dim=6, node_dim=5).to(device)

    # Batch Hyperparameters
    config.max_steps = 100000

    # training Hyperparameters
    lr = 5e-6 * np.sqrt(10)
    config.optimizer_fn = lambda params: torch.optim.Adam(
        params, lr=lr, eps=1e-5)

    # Task Settings
    config.train_env = Task('GibbsScorePruningCurriculumEnv-v0',
                            concurrency=True,
                            num_envs=10,
                            seed=np.random.randint(0, 1e5),
                            mol_configs=mol_configs)
    config.eval_env = Task('GibbsScorePruningEnv-v0',
                           seed=np.random.randint(0, 7e4),
                           mol_config=eval_mol_config)
    config.eval_interval = 20000

    # curriculum Hyperparameters
    config.curriculum_agent_buffer_len = 20
    config.curriculum_agent_reward_thresh = 0.4
    config.curriculum_agent_success_rate = 0.7
    config.curriculum_agent_fail_rate = 0.2

    agent = PPORecurrentExternalCurriculumAgent(config)
    agent.run_steps()