def retrain(self, n, keep_top_n, smiles_and_scores):
        print("writing dataset...")
        name = 'molexit-%d' % n
        dataset = '../models/molexit/%s.txt' % name
        dataset_scores = []
        with open(dataset, 'w') as f:
            for smi, score in list(
                    reversed(sorted(smiles_and_scores,
                                    key=lambda p: p[1])))[:keep_top_n]:
                dsmi = self.converter.encode(
                    pybel.readstring("smi", smi.strip()).write("can").strip())
                tok = DeepSMILESTokenizer(dsmi)
                tokens = tok.get_tokens()
                f.write(' '.join([t.value for t in tokens]))
                f.write("\n")
                dataset_scores.append(score)

        print('dataset: size: %s, mean score: %s, max score: %s' %
              (len(dataset_scores), np.mean(dataset_scores),
               np.max(dataset_scores)))
        print('training new LM...')
        self.lm_trainer.train(10, dataset, '../models/molexit', name)

        vocab = get_arpa_vocab('../models/molexit/%s.arpa' % name)
        self.lm = KenLMDeepSMILESLanguageModel(
            '../models/molexit/%s.klm' % name, vocab)
    def generate_optimized_molecules(self,
                                     scoring_function,
                                     number_molecules,
                                     starting_population=None):
        self.new_model_dir()

        vocab = get_arpa_vocab(
            '../resources/chembl_25_deepsmiles_klm_10gram_200503.arpa')
        self.lm = KenLMDeepSMILESLanguageModel(
            '../resources/chembl_25_deepsmiles_klm_10gram_200503.klm', vocab)

        print("generating %s samples..." % number_molecules)
        smiles_and_scores = []

        TIME_PER_ITERATION = self.time_per_iteration_minutes * 60  # in seconds

        found = False
        for n in range(1, self.num_iterations + 1):
            print("iteration %s" % n)
            num_valid = 0

            start = time.time()
            elapsed = time.time() - start
            while elapsed < TIME_PER_ITERATION:
                try:
                    generated = self.lm.generate(num_chars=100,
                                                 text_seed='<s>')

                    decoded = DeepSMILESLanguageModelUtils.decode(generated,
                                                                  start='<s>',
                                                                  end='</s>')
                    smiles = DeepSMILESLanguageModelUtils.sanitize(decoded)

                    score = scoring_function.score(smiles)
                    num_valid += 1
                    smiles_and_scores.append((smiles, score))

                    if score == 1.0:
                        found = True
                        break

                except Exception:
                    pass
                elapsed = time.time() - start

            print("num valid: %s" % num_valid)

            if found:
                break

            self.retrain(n, self.keep_top_n, smiles_and_scores)

        return [
            pair[0] for pair in list(
                reversed(sorted(smiles_and_scores, key=lambda p: p[1])))
            [:number_molecules]
        ]
Ejemplo n.º 3
0
 def __init__(self):
     self.vocab = get_arpa_vocab(
         '../resources/chembl_25_deepsmiles_klm_10gram_200503.arpa')
     self.lm = KenLMDeepSMILESLanguageModel(
         '../resources/chembl_25_deepsmiles_klm_10gram_200503.klm',
         self.vocab)
     self.best_smiles = None
     self.best_score = -1.0
Ejemplo n.º 4
0
class ChemgramsSmilesSampler(DistributionMatchingGenerator):
    def __init__(self):
        self.vocab = get_arpa_vocab(
            '../resources/chembl_25_deepsmiles_klm_10gram_200503.arpa')
        self.lm = KenLMDeepSMILESLanguageModel(
            '../resources/chembl_25_deepsmiles_klm_10gram_200503.klm',
            self.vocab)

    def generate(self, number_samples):
        print("generating %s samples..." % number_samples)
        samples = []

        for n in range(number_samples):
            try:
                generated = self.lm.generate(num_chars=100, text_seed='<s>')
                decoded = DeepSMILESLanguageModelUtils.decode(generated,
                                                              start='<s>',
                                                              end='</s>')
                sanitized = DeepSMILESLanguageModelUtils.sanitize(decoded)
            except Exception:
                sanitized = "invalid"
            samples.append(sanitized)

        return samples
Ejemplo n.º 5
0
)
logger.info("width = 12, max_depth = 50, start_state = ['<s>'], c = 15")
logger.info(
    "score: -1.0 if invalid; -1.0 if seen in iteration; tanimoto distance from abilify if valid; rescaling from [0,1] to [-1,1]"
)
logger.info("LanguageModelMCTSWithPUCTTerminating")
logger.info(
    "TanimotoScorer(abilify, radius=6); distance only (no SA or cycle scoring)"
)
logger.info("num_iterations = 100")
logger.info("time per iteration = 45 min.")
logger.info("keep_top_n = 20000 unique")

vocab = get_arpa_vocab(
    '../resources/chembl_25_deepsmiles_klm_10gram_200503.arpa')
lm = KenLMDeepSMILESLanguageModel(
    '../resources/chembl_25_deepsmiles_klm_10gram_200503.klm', vocab)

abilify = "Clc4cccc(N3CCN(CCCCOc2ccc1c(NC(=O)CC1)c2)CC3)c4Cl"
distance_scorer = TanimotoScorer(abilify, radius=6)

converter = Converter(rings=True, branches=True)
env = os.environ.copy()
env["PATH"] = "/Users/luis/kenlm/build/bin:" + env["PATH"]
lm_trainer = KenLMTrainer(env)


def smiles_to_deepsmiles(smiles):
    canonical = pybel.readstring("smi", smiles).write("can").strip()
    return converter.encode(canonical)

    logger.info("width = 12, max_depth = 50, start_state = ['<s>'], c = 5")
    logger.info("score: -1.0 if invalid; -1.0 if seen previously; TanimotoScorer(abilify, radius=6) if valid; rescaling from [0,1] to [-1,1]")
    logger.info("LanguageModelMCTSWithPUCTTerminating")

    TIME_LIMIT = 3 * 60 * 60  # three hours in seconds
    # TIME_LIMIT = 2*60  # 2 minutes in seconds

    LOG_INTERVAL = 1 * 60 * 60  # one hour in seconds
    # LOG_INTERVAL = 30.0  # 30 seconds

    KEEP_TOP_N = 20000

    logger.info("loading language model...")

    vocab = get_arpa_vocab('../resources/chembl_25_deepsmiles_klm_10gram_200503.arpa')
    lm = KenLMDeepSMILESLanguageModel('../resources/chembl_25_deepsmiles_klm_10gram_200503.klm', vocab)

    abilify = "Clc4cccc(N3CCN(CCCCOc2ccc1c(NC(=O)CC1)c2)CC3)c4Cl"
    distance_scorer = TanimotoScorer(abilify, radius=6)

    num_simulations = 15000000  # much more than 8 hours
    width = 12
    max_depth = 50
    start_state = ["<s>"]
    c = 5

    all_unique = {}
    all_valid = []
    num_valid = 0
    simulations = 0
Ejemplo n.º 7
0
    best = mcts.get_best_sequence()
    generated_text = ''.join(best[0])
    logger.info("best generated text: %s" % generated_text)
    decoded = DeepSMILESLanguageModelUtils.decode(generated_text,
                                                  start='<s>',
                                                  end='</s>')
    smiles = DeepSMILESLanguageModelUtils.sanitize(decoded)
    logger.info("best SMILES: %s, J: %s (%s seconds)" %
                (smiles, distance_scorer.score(smiles), str((end - start))))

    log_top_best(all_smiles, 5, logger)

    logger.info("writing dataset...")
    name = 'molexit-%d' % n
    dataset = '../models/molexit/%s.txt' % name
    with open(dataset, 'w') as f:
        for smi in list(
                reversed(sorted(all_smiles.items(),
                                key=lambda kv: kv[1][0])))[:keep_top_n]:
            dsmi = smiles_to_deepsmiles(smi[0].strip())
            tok = DeepSMILESTokenizer(dsmi)
            tokens = tok.get_tokens()
            f.write(' '.join([t.value for t in tokens]))
            f.write("\n")

    logger.info('training new LM...')
    lm_trainer.train(6, dataset, '../models/molexit', name)

    vocab = get_arpa_vocab('../models/molexit/%s.arpa' % name)
    lm = KenLMDeepSMILESLanguageModel('../models/molexit/%s.klm' % name, vocab)
from chemgrams import get_arpa_vocab, KenLMDeepSMILESLanguageModel, DeepSMILESLanguageModelUtils, DeepSMILESTokenizer, \
    LanguageModelMCTSWithUCB1

from rdkit import rdBase
rdBase.DisableLog('rdApp.error')
rdBase.DisableLog('rdApp.warning')

logger = logger()

if __name__ == '__main__':

    logger.info("loading language model...")
    vocab = get_arpa_vocab(
        '../resources/chemts_250k_deepsmiles_klm_10gram_200429.arpa')
    lm = KenLMDeepSMILESLanguageModel(
        '../resources/chemts_250k_deepsmiles_klm_10gram_200429.klm', vocab)

    num_simulations = 1000
    width = 3
    text_length = 25
    start_state = ["<s>"]

    def eval_function(text):
        generated = ''.join(text)
        try:
            decoded = DeepSMILESLanguageModelUtils.decode(generated,
                                                          start='<s>',
                                                          end='</s>')
            DeepSMILESLanguageModelUtils.sanitize(decoded)
        except Exception:
            return 0
Ejemplo n.º 9
0
from rdkit.RDLogger import logger
from rdkit import rdBase
rdBase.DisableLog('rdApp.error')
rdBase.DisableLog('rdApp.warning')

logger = logger()
THIS_DIR = os.path.dirname(os.path.abspath(__file__))

if __name__ == '__main__':

    logger.info("loading language model...")

    vocab = get_arpa_vocab(
        '../resources/chemts_250k_deepsmiles_klm_6gram_190414.arpa')
    lm = KenLMDeepSMILESLanguageModel(
        '../resources/chemts_250k_deepsmiles_klm_6gram_190414.klm', vocab)

    num_simulations = 100000
    width = 24
    max_depth = 100
    start_state = ["<s>"]
    c = 5

    qedscorer = QEDScorer()

    all_smiles = {}

    def eval_function(text):
        generated = ''.join(text)
        try:
            decoded = DeepSMILESLanguageModelUtils.decode(generated,
    LanguageModelMCTSWithUCB1

from rdkit.RDLogger import logger
from rdkit import rdBase, Chem
rdBase.DisableLog('rdApp.error')
rdBase.DisableLog('rdApp.warning')

logger = logger()

if __name__ == '__main__':

    logger.info("loading language model...")

    vocab = get_arpa_vocab(
        '../models/chembl_25_deepsmiles_klm_6gram_190413.arpa')
    lm = KenLMDeepSMILESLanguageModel(
        '../models/chembl_25_deepsmiles_klm_6gram_190413.klm', vocab)

    num_simulations = 1000
    width = 3
    text_length = 25
    start_state = ["<s>"]

    def eval_function(text):
        generated = ''.join(text)
        try:
            decoded = DeepSMILESLanguageModelUtils.decode(generated,
                                                          start='<s>',
                                                          end='</s>')
            DeepSMILESLanguageModelUtils.sanitize(decoded)
        except Exception:
            return 0
from rdkit import rdBase, Chem
rdBase.DisableLog('rdApp.error')
rdBase.DisableLog('rdApp.warning')
logger = get_logger('chemgrams.log')

THIS_DIR = os.path.dirname(os.path.abspath(__file__))

logger.info(os.path.basename(__file__))
logger.info("KenLMDeepSMILESLanguageModel('../models/chembl_25_deepsmiles_klm_10gram_200503.klm', vocab)")
logger.info("TanimotoScorer(abilify, radius=6)")
logger.info("num_iterations = 100")
logger.info("time per iteration = 45 min.")
logger.info("keep_top_n = 20000")

vocab = get_arpa_vocab('../models/chembl_25_deepsmiles_klm_10gram_200503.arpa')
lm = KenLMDeepSMILESLanguageModel('../models/chembl_25_deepsmiles_klm_10gram_200503.klm', vocab)

abilify = "Clc4cccc(N3CCN(CCCCOc2ccc1c(NC(=O)CC1)c2)CC3)c4Cl"
distance_scorer = TanimotoScorer(abilify, radius=6)

cycle_scorer = CycleScorer()

converter = Converter(rings=True, branches=True)
env = os.environ.copy()
env["PATH"] = "/Users/luis/kenlm/build/bin:" + env["PATH"]
lm_trainer = KenLMTrainer(env)


def smiles_to_deepsmiles(smiles):
    canonical = pybel.readstring("smi", smiles).write("can").strip()
    return converter.encode(canonical)
    logger.info("width = 24, max_depth = 100, start_state = ['<s>'], c = 5")
    logger.info(
        "score: -1.0 if invalid; -1.0 if seen previously; 1.0 if valid")
    logger.info("LanguageModelMCTSWithPUCTTerminating")

    # TIME_LIMIT = 8 * 60 * 60  # eight hours in seconds
    TIME_LIMIT = 2 * 60  # 2 minutes in seconds

    # LOG_INTERVAL = 2 * 60 * 60  # two hours in seconds
    LOG_INTERVAL = 30.0  # 30 seconds

    logger.info("loading language model...")

    vocab = get_arpa_vocab(
        '../resources/chemts_250k_deepsmiles_klm_10gram_200429.arpa')
    lm = KenLMDeepSMILESLanguageModel(
        '../resources/chemts_250k_deepsmiles_klm_10gram_200429.klm', vocab)

    num_simulations = 15000000  # much more than 8 hours
    width = 24
    max_depth = 100
    start_state = ["<s>"]
    c = 5

    all_smiles = set()
    num_valid = 0
    i = 0

    def log_progress():
        global t
        logger.info("--results--")
        logger.info("num valid: %d" % num_valid)
Ejemplo n.º 13
0
)
logger.info("width = 12, max_depth = 50, start_state = ['<s>'], c = 5")
logger.info(
    "score: -1.0 if invalid; -1.0 if seen previously; tanimoto distance from abilify if valid"
)
logger.info("LanguageModelMCTSWithPUCTTerminating")
logger.info("TanimotoScorer(abilify, radius=6)")
logger.info("num_iterations = 100")
logger.info("simulations_per_iteration = 50000")
logger.info("keep_top_n = 5000")

logger.info("loading language model...")

vocab = get_arpa_vocab(
    '../resources/zinc12_fragments_deepsmiles_klm_6gram_190421.arpa')
lm = KenLMDeepSMILESLanguageModel(
    '../resources/zinc12_fragments_deepsmiles_klm_6gram_190421.klm', vocab)

abilify = "Clc4cccc(N3CCN(CCCCOc2ccc1c(NC(=O)CC1)c2)CC3)c4Cl"
distance_scorer = TanimotoScorer(abilify, radius=6)

cycle_scorer = CycleScorer()

converter = Converter(rings=True, branches=True)
env = os.environ.copy()
env["PATH"] = "/Users/luis/kenlm/build/bin:" + env["PATH"]
lm_trainer = KenLMTrainer(env)


def log_best(j, all_best, n_valid, lggr):
    if j % 10000 == 0:
        lggr.info("--iteration: %d--" % j)
rdBase.DisableLog('rdApp.error')
rdBase.DisableLog('rdApp.warning')

logger = get_logger('chemgrams.log')
THIS_DIR = os.path.dirname(os.path.abspath(__file__))

logger.info("LM-only")
logger.info(
    "KenLMDeepSMILESLanguageModel(n=10, 'chemts_250k_deepsmiles_klm_10gram_200429.klm')"
)
logger.info("num_chars=100, text_seed='<s>'")

vocab = get_arpa_vocab(
    '../resources/chemts_250k_deepsmiles_klm_10gram_200429.arpa')
lm = KenLMDeepSMILESLanguageModel(
    '../resources/chemts_250k_deepsmiles_klm_10gram_200429.klm', vocab)

all_smiles = set()
num_valid = 0

start = time.time()
for i in range(500000):  # about enough to get ~250,000 valid molecules
    try:
        generated = lm.generate(num_chars=100, text_seed='<s>')

        decoded = DeepSMILESLanguageModelUtils.decode(generated,
                                                      start='<s>',
                                                      end='</s>')
        sanitized = DeepSMILESLanguageModelUtils.sanitize(decoded)

        num_valid += 1
Ejemplo n.º 15
0
THIS_DIR = os.path.dirname(os.path.abspath(__file__))

logger.info(os.path.basename(__file__))
logger.info(
    "KenLMDeepSMILESLanguageModel('../resources/zinc12_fragments_deepsmiles_klm_10gram_200502.klm', vocab)"
)
logger.info("TanimotoScorer(abilify, radius=6)")
logger.info("num_iterations = 100")
logger.info("attempts_per_iteration = 400000")
logger.info("keep_top_n = 20000")

logger.info("loading language model...")

vocab = get_arpa_vocab(
    '../resources/zinc12_fragments_deepsmiles_klm_10gram_200502.arpa')
lm = KenLMDeepSMILESLanguageModel(
    '../resources/zinc12_fragments_deepsmiles_klm_10gram_200502.klm', vocab)

abilify = "Clc4cccc(N3CCN(CCCCOc2ccc1c(NC(=O)CC1)c2)CC3)c4Cl"
distance_scorer = TanimotoScorer(abilify, radius=6)

cycle_scorer = CycleScorer()

converter = Converter(rings=True, branches=True)
env = os.environ.copy()
env["PATH"] = "/Users/luis/kenlm/build/bin:" + env["PATH"]
lm_trainer = KenLMTrainer(env)


def smiles_to_deepsmiles(smiles):
    canonical = pybel.readstring("smi", smiles).write("can").strip()
    return converter.encode(canonical)
Ejemplo n.º 16
0
)
logger.info("width = 12, max_depth = 35, start_state = ['<s>'], c = 5")
logger.info(
    "score: -1.0 if invalid; -1.0 if seen previously; tanimoto distance from abilify if valid"
)
logger.info("LanguageModelMCTSWithPUCTTerminating")
logger.info("TanimotoScorer(abilify, radius=6)")
logger.info("num_iterations = 100")
logger.info("simulations_per_iteration = 50000")
logger.info("keep_top_n = 5000")

logger.info("loading language model...")

vocab = get_arpa_vocab(
    '../resources/zinc12_fragments_deepsmiles_klm_10gram_200502.arpa')
lm = KenLMDeepSMILESLanguageModel(
    '../resources/zinc12_fragments_deepsmiles_klm_10gram_200502.klm', vocab)

abilify = "Clc4cccc(N3CCN(CCCCOc2ccc1c(NC(=O)CC1)c2)CC3)c4Cl"
scorer = TanimotoScorer(abilify, radius=6)

converter = Converter(rings=True, branches=True)
env = os.environ.copy()
env["PATH"] = "/Users/luis/kenlm/build/bin:" + env["PATH"]
lm_trainer = KenLMTrainer(env)


def log_best(j, all_best, n_valid, lggr):
    if j % 1000 == 0:
        lggr.info("--iteration: %d--" % j)
        lggr.info("num valid: %d" % n_valid)
        log_top_best(all_best, 5, lggr)
class ChemgramsGoalDirectedGenerator(GoalDirectedGenerator):
    def __init__(self, num_iterations, keep_top_n, time_per_iteration_minutes):
        self.num_iterations = num_iterations
        self.keep_top_n = keep_top_n
        self.time_per_iteration_minutes = time_per_iteration_minutes

        self.lm = None

        env = os.environ.copy()
        env["PATH"] = "/Users/luis/kenlm/build/bin:" + env["PATH"]
        self.lm_trainer = KenLMTrainer(env)

        self.converter = Converter(rings=True, branches=True)

    def generate_optimized_molecules(self,
                                     scoring_function,
                                     number_molecules,
                                     starting_population=None):
        self.new_model_dir()

        vocab = get_arpa_vocab(
            '../resources/chembl_25_deepsmiles_klm_10gram_200503.arpa')
        self.lm = KenLMDeepSMILESLanguageModel(
            '../resources/chembl_25_deepsmiles_klm_10gram_200503.klm', vocab)

        print("generating %s samples..." % number_molecules)
        smiles_and_scores = []

        TIME_PER_ITERATION = self.time_per_iteration_minutes * 60  # in seconds

        found = False
        for n in range(1, self.num_iterations + 1):
            print("iteration %s" % n)
            num_valid = 0

            start = time.time()
            elapsed = time.time() - start
            while elapsed < TIME_PER_ITERATION:
                try:
                    generated = self.lm.generate(num_chars=100,
                                                 text_seed='<s>')

                    decoded = DeepSMILESLanguageModelUtils.decode(generated,
                                                                  start='<s>',
                                                                  end='</s>')
                    smiles = DeepSMILESLanguageModelUtils.sanitize(decoded)

                    score = scoring_function.score(smiles)
                    num_valid += 1
                    smiles_and_scores.append((smiles, score))

                    if score == 1.0:
                        found = True
                        break

                except Exception:
                    pass
                elapsed = time.time() - start

            print("num valid: %s" % num_valid)

            if found:
                break

            self.retrain(n, self.keep_top_n, smiles_and_scores)

        return [
            pair[0] for pair in list(
                reversed(sorted(smiles_and_scores, key=lambda p: p[1])))
            [:number_molecules]
        ]

    def new_model_dir(self):
        print(
            "deleting any existing molexit directory, and creating a new one..."
        )
        path = Path("../models/molexit/")
        if os.path.exists(path) and os.path.isdir(path):
            shutil.rmtree(path)
        path.mkdir(parents=True, exist_ok=True)

    def retrain(self, n, keep_top_n, smiles_and_scores):
        print("writing dataset...")
        name = 'molexit-%d' % n
        dataset = '../models/molexit/%s.txt' % name
        dataset_scores = []
        with open(dataset, 'w') as f:
            for smi, score in list(
                    reversed(sorted(smiles_and_scores,
                                    key=lambda p: p[1])))[:keep_top_n]:
                dsmi = self.converter.encode(
                    pybel.readstring("smi", smi.strip()).write("can").strip())
                tok = DeepSMILESTokenizer(dsmi)
                tokens = tok.get_tokens()
                f.write(' '.join([t.value for t in tokens]))
                f.write("\n")
                dataset_scores.append(score)

        print('dataset: size: %s, mean score: %s, max score: %s' %
              (len(dataset_scores), np.mean(dataset_scores),
               np.max(dataset_scores)))
        print('training new LM...')
        self.lm_trainer.train(10, dataset, '../models/molexit', name)

        vocab = get_arpa_vocab('../models/molexit/%s.arpa' % name)
        self.lm = KenLMDeepSMILESLanguageModel(
            '../models/molexit/%s.klm' % name, vocab)