Пример #1
0
    def __init__(self, config: Config):
        super().__init__(config)

        self.network = config.network  # neural network / model
        self.optimizer = config.optimizer_fn(self.network.parameters())

        self.total_rewards = np.zeros(config.num_workers)
        self.states = self.task.reset()
        self.prediction = None
Пример #2
0
from conformer_rl.environments import Task
from conformer_rl.models import RTGN

from conformer_rl.molecule_generation import test_alkane

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

import logging
logging.basicConfig(level=logging.DEBUG)

if __name__ == '__main__':
    utils.set_one_thread()

    mol_config = test_alkane()

    config = Config()
    config.tag = 'example2'
    config.network = RTGN(6, 128, edge_dim=6, node_dim=5).to(device)
    # Batch Hyperparameters
    config.num_workers = 20
    config.rollout_length = 20
    config.optimization_epochs = 4
    config.max_steps = 10000000
    config.save_interval = config.num_workers*200*5
    config.eval_interval = config.num_workers*200*5
    config.eval_episodes = 2
    config.mini_batch_size = 50

    # Coefficient Hyperparameters
    lr = 5e-6 * np.sqrt(config.num_workers)
    config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=lr, eps=1e-5)
Пример #3
0
from conformer_rl.molecule_generation import branched_alkane

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

if __name__ == '__main__':
    # Set up the environment.
    alkane_env_config = branched_alkane(num_atoms=18)
    training_env = Task('GibbsScorePruningEnv-v0',
                        concurrency=True,
                        num_envs=20,
                        mol_config=alkane_env_config,
                        max_steps=200)

    # initialize Config
    config = Config()
    config.tag = 'tutorial'
    config.train_env = training_env

    # Set up neural network
    config.network = RTGNRecurrent(6, 128, edge_dim=6, node_dim=5).to(device)
    config.num_workers = 20

    # Logging Parameters
    config.save_interval = 20000
    config.data_dir = 'data'
    config.use_tensorboard = True

    # Set up evaluation
    config.eval_env = Task('GibbsScorePruningEnv-v0',
                           num_envs=1,
Пример #4
0
from conformer_rl.molecule_generation.generate_alkanes import generate_branched_alkane
from conformer_rl.molecule_generation.generate_molecule_config import config_from_rdkit

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
                                                                                                                                                                                                     
import logging
logging.basicConfig(level=logging.DEBUG)

if __name__ == '__main__':
    utils.set_one_thread()

    mol_config = config_from_rdkit(generate_branched_alkane(10), num_conformers=4, calc_normalizers=True, save_file='10_alkane')
    with open('10_alkane.pkl', 'rb') as file:
        mol_config = pickle.load(file)

    config = Config()
    config.tag = 'example1'
    # config.network = RTGNRecurrent(6, 128, edge_dim=6, node_dim=5).to(device)
    config.network = RTGNRecurrent(6, 128, edge_dim=6, node_dim=5).to(device)
    # Batch Hyperparameters
    config.rollout_length = 2
    config.recurrence = 1
    config.optimization_epochs = 1
    config.max_steps = 24
    config.save_interval = 8
    config.eval_interval = 8
    config.eval_episodes = 1
    config.mini_batch_size = 4


    # Coefficient Hyperparameters
Пример #5
0
import logging
logging.basicConfig(level=logging.DEBUG)

if __name__ == '__main__':
    utils.set_one_thread()

    # Create config object
    mol = generate_branched_alkane(14)
    mol_config = config_from_rdkit(mol,
                                   num_conformers=200,
                                   calc_normalizers=True,
                                   save_file='alkane')

    # Create agent training config object
    config = Config()
    config.tag = 'example1'

    # Configure Environment
    config.train_env = Task('GibbsScorePruningEnv-v0',
                            concurrency=True,
                            num_envs=5,
                            seed=np.random.randint(0, 1e5),
                            mol_config=mol_config)
    config.eval_env = Task('GibbsScorePruningEnv-v0',
                           seed=np.random.randint(0, 7e4),
                           mol_config=mol_config)
    config.eval_episodes = 10000

    agent = PPORecurrentAgent(config)
    agent.run_steps()
Пример #6
0
logging.basicConfig(level=logging.DEBUG)

if __name__ == '__main__':
    utils.set_one_thread()

    # Create mol_configs for the curriculum
    mol_configs = [
        config_from_rdkit(generate_branched_alkane(i),
                          num_conformers=200,
                          calc_normalizers=True) for i in range(8, 16)
    ]
    eval_mol_config = config_from_rdkit(generate_branched_alkane(16),
                                        num_conformers=200,
                                        calc_normalizers=True)

    config = Config()
    config.tag = 'curriculum_test'
    config.network = RTGNRecurrent(6, 128, edge_dim=6, node_dim=5).to(device)

    # Batch Hyperparameters
    config.max_steps = 100000

    # training Hyperparameters
    lr = 5e-6 * np.sqrt(10)
    config.optimizer_fn = lambda params: torch.optim.Adam(
        params, lr=lr, eps=1e-5)

    # Task Settings
    config.train_env = Task('GibbsScorePruningCurriculumEnv-v0',
                            concurrency=True,
                            num_envs=10,
Пример #7
0
from conformer_rl.environments import Task
from conformer_rl.models import RTGNRecurrent, RTGN, RTGNGat

from conformer_rl.molecule_generation import xorgate

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

import logging
logging.basicConfig(level=logging.DEBUG)

if __name__ == '__main__':
    utils.set_one_thread()

    mol_config = xorgate(2, 3)

    config = Config()
    config.tag = 'example1'
    # config.network = RTGN(6, 128, edge_dim=6, node_dim=5).to(device)
    config.network = RTGNGat(6, 128, node_dim=5).to(device)
    # Batch Hyperparameters
    config.num_workers = 2
    config.rollout_length = 2
    config.recurrence = 1
    config.max_steps = 16
    config.save_interval = 8
    config.eval_interval = 8
    config.eval_episodes = 1

    # Coefficient Hyperparameters
    lr = 5e-6 * np.sqrt(config.num_workers)
    config.optimizer_fn = lambda params: torch.optim.Adam(