Пример #1
0
    from rdkit.Chem import MolFromSmiles, MolToSmiles
    from rdkit.Chem import Draw
    import image
    import copy
    import time

    import sys
    sys.path.insert(0, '../../../')
    import molecule_vae
    grammar_weights = '../../../pretrained/zinc_vae_str_L56_E100_val.hdf5'
    grammar_model = molecule_vae.ZincCharacterModel(grammar_weights)

    # We pick the next 50 inputs

    next_inputs = sgp.batched_greedy_ei(50, np.min(X_train, 0),
                                        np.max(X_train, 0))

    valid_smiles_final = decode_from_latent_space(next_inputs, grammar_model)

    from rdkit.Chem import Descriptors
    from rdkit.Chem import MolFromSmiles, MolToSmiles

    new_features = next_inputs

    save_object(valid_smiles_final,
                "results/valid_smiles{}.dat".format(iteration))

    logP_values = np.loadtxt(
        '../../latent_features_and_targets_character/logP_values.txt')
    SA_scores = np.loadtxt(
        '../../latent_features_and_targets_character/SA_scores.txt')
Пример #2
0
    # 4 September - saving the BNN is optional

    # We save the trained BNN

    #sys.setrecursionlimit(4000)

    # don't have to save the BNN because it won't be necessary to plot the predictions in 56-D space.

    # save_object(bb_alpha, "results_QED_solo/bb_alpha{}.dat".format(iteration))

    # We pick the next 50 inputs (4 September - variable name bb_alpha_samples not descriptive, it is the size of the batch of collected data points)

    bb_alpha_samples = 50

    next_inputs = sgp.batched_greedy_ei(bb_alpha, 50, np.min(X_train, 0),
                                        np.max(X_train, 0), bb_alpha_samples)

    # We load the decoder to obtain the molecules

    preproc = lasp.PreProcessing(dataset='drugs')
    enc_dec = lasp.EncoderDecoder()
    encoder, decoder = enc_dec.get_functions()

    postprocessor = lasp.PostProcessing(enc_dec)

    # We collect the molecule statistics

    # 4 September - need descriptions of these variables, highly unclear

    # 4 September - variables are lists of length = decode_attempts
Пример #3
0
                for ga_iter in ga_pbar:
                    pred = model.predictor(grid)
                    ga_pbar.set_description(
                        'Max pred: {:.4f}, mean pred: {:.4f}, std pred: {:.4f}'
                        .format(pred.max(), pred.mean(), pred.std()))
                    grads = torch.autograd.grad([x for x in pred], grid)[0]
                    grid = grid + ga_lr * grads
                pred = pred.detach().cpu().numpy()
                selected_idxs = np.argsort(-pred[:, 0])[:batch_size]
                next_inputs = grid[selected_idxs]
            next_inputs = next_inputs.detach().cpu().numpy()

        else:
            next_inputs = sgp.batched_greedy_ei(batch_size,
                                                np.min(X_train, 0),
                                                np.max(X_train, 0),
                                                np.mean(X_train, 0),
                                                np.std(X_train, 0),
                                                sample=sample_dist)
        valid_arcs_final = util.decode_from_latent_space(
            torch.FloatTensor(next_inputs).to(device), model, None, 500, max_n,
            False, data_type)

        if random_baseline:
            if args.sample_dist == 'uniform':
                random_inputs = np.random.rand(batch_size, gdim) * (
                    X_train.max(0) - X_train.min(0)) + X_train.min(0)
            elif args.sample_dist == 'normal':
                random_inputs = np.random.randn(
                    batch_size, gdim) * X_train.std(0) + X_train.mean(0)
            random_inputs = torch.FloatTensor(random_inputs).to(device)
            valid_arcs_random = util.decode_from_latent_space(
Пример #4
0
    pred, uncert = sgp.predict(X_test, 0 * X_test)
    error = np.sqrt(np.mean((pred - y_test)**2))
    testll = np.mean(sps.norm.logpdf(pred - y_test, scale=np.sqrt(uncert)))
    print('Test RMSE: ', error)
    print('Test ll: ', testll)

    pred, uncert = sgp.predict(X_train, 0 * X_train)
    error = np.sqrt(np.mean((pred - y_train)**2))
    trainll = np.mean(sps.norm.logpdf(pred - y_train, scale=np.sqrt(uncert)))
    print('Train RMSE: ', error)
    print('Train ll: ', trainll)

    # We pick the next 50 inputs

    next_inputs = sgp.batched_greedy_ei(args.bo_batch_size, np.min(X_train, 0),
                                        np.max(X_train, 0))

    # We decode the 50 smiles:
    # Decode z into smiles
    with torch.no_grad():
        gen_seq = model.decode(torch.FloatTensor(next_inputs).to(device))
        smiles = model.probas_to_smiles(gen_seq)
        valid_smiles_final = []
        for s in smiles:
            s = decoder(s)
            m = Chem.MolFromSmiles(s)
            if m is None:
                valid_smiles_final.append(None)
            else:
                Chem.Kekulize(m)
                s = Chem.MolToSmiles(m, kekuleSmiles=True)
Пример #5
0
def run_bo_demo():
    import sys
    sys.path.append('/home/icml18-jtnn')
    import pickle
    import gzip
    from sparse_gp import SparseGP
    import scipy.stats as sps
    import numpy as np
    import os.path
    import time
    import rdkit
    from rdkit.Chem import MolFromSmiles, MolToSmiles
    from rdkit.Chem import Descriptors
    from rdkit.Chem import PandasTools
    import torch
    import torch.nn as nn
    from jtnn import create_var, JTNNVAE, Vocab

    start_time = time.time()
    lg = rdkit.RDLogger.logger()
    lg.setLevel(rdkit.RDLogger.CRITICAL)

    # We define the functions used to load and save objects
    def save_object(obj, filename):
        result = pickle.dumps(obj)
        with gzip.GzipFile(filename, 'wb') as dest:
            dest.write(result)
        dest.close()

    def load_object(filename):
        with gzip.GzipFile(filename, 'rb') as source:
            result = source.read()
        ret = pickle.loads(result)
        source.close()
        return ret

    vocab_path = '../data/vocab.txt'
    #save_dir=save_dir
    vocab = [x.strip("\r\n ") for x in open(vocab_path)]
    vocab = Vocab(vocab)
    #    print(opts.save_dir)
    hidden_size = 450
    latent_size = 56
    depth = 3
    random_seed = 1
    model = JTNNVAE(vocab, hidden_size, latent_size, depth)
    model.load_state_dict(
        torch.load('../molvae/MPNVAE-h450-L56-d3-beta0.005/model.iter-4',
                   map_location=lambda storage, loc: storage))
    #model = model.cuda()

    # We load the random seed
    np.random.seed(random_seed)

    # We load the data (y is minued!)
    X = np.loadtxt('latent_features_demo.txt')
    y = -np.loadtxt('targets_demo.txt')
    y = y.reshape((-1, 1))

    n = X.shape[0]
    #    print(X.shape[1])
    permutation = np.random.choice(n, n, replace=False)
    #   print(n)
    X_train = X[permutation, :][0:np.int(np.round(0.8 * n)), :]
    X_test = X[permutation, :][np.int(np.round(0.8 * n)):, :]
    #  print(X_train.shape)
    y_train = y[permutation][0:np.int(np.round(0.8 * n))]
    y_test = y[permutation][np.int(np.round(0.8 * n)):]

    np.random.seed(random_seed)

    logP_values = np.loadtxt('logP_values_demo.txt')
    SA_scores = np.loadtxt('SA_scores_demo.txt')
    cycle_scores = np.loadtxt('cycle_scores_demo.txt')
    SA_scores_normalized = (np.array(SA_scores) -
                            np.mean(SA_scores)) / np.std(SA_scores)
    logP_values_normalized = (np.array(logP_values) -
                              np.mean(logP_values)) / np.std(logP_values)
    cycle_scores_normalized = (np.array(cycle_scores) -
                               np.mean(cycle_scores)) / np.std(cycle_scores)

    iteration = 0
    while iteration < 1:
        # We fit the GP
        np.random.seed(iteration * random_seed)
        M = 1
        sgp = SparseGP(X_train, 0 * X_train, y_train, M)
        sgp.train_via_ADAM(X_train,
                           0 * X_train,
                           y_train,
                           X_test,
                           X_test * 0,
                           y_test,
                           minibatch_size=2,
                           max_iterations=100,
                           learning_rate=0.001)

        pred, uncert = sgp.predict(X_test, 0 * X_test)
        error = np.sqrt(np.mean((pred - y_test)**2))
        testll = np.mean(sps.norm.logpdf(pred - y_test, scale=np.sqrt(uncert)))
        #     print 'Test RMSE: ', error
        #    print 'Test ll: ', testll

        pred, uncert = sgp.predict(X_train, 0 * X_train)
        error = np.sqrt(np.mean((pred - y_train)**2))
        trainll = np.mean(
            sps.norm.logpdf(pred - y_train, scale=np.sqrt(uncert)))
        #   print 'Train RMSE: ', error
        #  print 'Train ll: ', trainll

        # We pick the next 60 inputs
        next_inputs = sgp.batched_greedy_ei(60, np.min(X_train, 0),
                                            np.max(X_train, 0))
        valid_smiles = []
        valid_mols = []
        new_features = []
        for i in xrange(60):
            all_vec = next_inputs[i].reshape((1, -1))
            tree_vec, mol_vec = np.hsplit(all_vec, 2)
            tree_vec = create_var(torch.from_numpy(tree_vec).float())
            mol_vec = create_var(torch.from_numpy(mol_vec).float())
            s = model.decode(tree_vec, mol_vec, prob_decode=False)
            if s is not None:
                valid_smiles.append(s)
                # print(MolFromSmiles(s))
                valid_mols.append(str(MolFromSmiles(s)))
                new_features.append(all_vec)

        print len(valid_smiles), "molecules are found"
        valid_smiles = valid_smiles[:50]
        valid_mols = valid_mols[:50]
        new_features = next_inputs[:50]
        new_features = np.vstack(new_features)

        #   save_object(valid_smiles, save_dir + "/valid_smiles{}.dat".format(iteration))
        #  save_object(valid_mols,save_dir + '/valid_mols{}.png'.format(iteration))
        #        save_object(mol1
        import sascorer
        import networkx as nx
        from rdkit.Chem import rdmolops

        scores = []
        for i in range(len(valid_smiles)):
            current_log_P_value = Descriptors.MolLogP(
                MolFromSmiles(valid_smiles[i]))
            current_SA_score = -sascorer.calculateScore(
                MolFromSmiles(valid_smiles[i]))
            cycle_list = nx.cycle_basis(
                nx.Graph(
                    rdmolops.GetAdjacencyMatrix(MolFromSmiles(
                        valid_smiles[i]))))
            if len(cycle_list) == 0:
                cycle_length = 0
            else:
                cycle_length = max([len(j) for j in cycle_list])
            if cycle_length <= 6:
                cycle_length = 0
            else:
                cycle_length = cycle_length - 6

            current_cycle_score = -cycle_length

            current_SA_score_normalized = (
                current_SA_score - np.mean(SA_scores)) / np.std(SA_scores)
            current_log_P_value_normalized = (
                current_log_P_value -
                np.mean(logP_values)) / np.std(logP_values)
            current_cycle_score_normalized = (
                current_cycle_score -
                np.mean(cycle_scores)) / np.std(cycle_scores)

            score = current_SA_score_normalized + current_log_P_value_normalized + current_cycle_score_normalized
            scores.append(-score)  #target is always minused

    # print valid_smiles
    # print scores

    #    save_object(scores, save_dir + "/scores{}.dat".format(iteration))

        if len(new_features) > 0:
            X_train = np.concatenate([X_train, new_features], 0)
            y_train = np.concatenate([y_train, np.array(scores)[:, None]], 0)

        iteration += 1

#    print('Seconds taken: %s' %(time.time() -start_time))
    all_smiles = []
    #all_smiles=valid_smiles+scores+valid_mols
    all_smiles.extend(zip(valid_smiles, scores, valid_mols))
    all_smiles = [(x, -y, z) for x, y, z in all_smiles]
    all_smiles = sorted(all_smiles, key=lambda x: x[1], reverse=True)
    # return valid_smiles[0:5],scores[0:5],valid_mols[0:5]
    return all_smiles[0:3]
Пример #6
0
        sgp.train_via_ADAM(X_train, 0 * X_train, y_train, X_test, X_test * 0,  \
            y_test, minibatch_size = 10 * M, max_iterations = cmd_args.num_epochs, learning_rate = args.gp_lr)
    
        # pred, uncert = sgp.predict(X_test, 0 * X_test)
        # error = np.sqrt(np.mean((pred - y_test)**2))
        # testll = np.mean(sps.norm.logpdf(pred - y_test, scale = np.sqrt(uncert)))
        # print 'Test RMSE: ', error
        # print 'Test ll: ', testll

        # pred, uncert = sgp.predict(X_train, 0 * X_train)
        # error = np.sqrt(np.mean((pred - y_train)**2))
        # trainll = np.mean(sps.norm.logpdf(pred - y_train, scale = np.sqrt(uncert)))
        # print 'Train RMSE: ', error
        # print 'Train ll: ', trainll    

        next_inputs = sgp.batched_greedy_ei(50, np.min(X_train, 0), np.max(X_train, 0))        
        valid_smiles_final = decode_from_latent_space(next_inputs, model)
        save_object(valid_smiles_final, "%s/valid_smiles-seed-%d-iter-%d.dat" % (cmd_args.save_dir, args.seed, iteration))
        new_features = next_inputs

        scores = []
        for i in range(len(valid_smiles_final)):
            if valid_smiles_final[ i ] is not None:
                current_log_P_value = Descriptors.MolLogP(MolFromSmiles(valid_smiles_final[ i ]))
                current_SA_score = -sascorer.calculateScore(MolFromSmiles(valid_smiles_final[ i ]))
                cycle_list = nx.cycle_basis(nx.Graph(rdmolops.GetAdjacencyMatrix(MolFromSmiles(valid_smiles_final[ i ]))))
                if len(cycle_list) == 0:
                    cycle_length = 0
                else:
                    cycle_length = max([ len(j) for j in cycle_list ])
                if cycle_length <= 6: