Example #1
0
 def test_split(self):
     raw_data_list = [
         {
             'smiles': 'CCOc1ccc2nc(S(N)(=O)=O)sc2c1'
         },
         {
             'smiles': 'CCOc1ccc2nc(S(N)(=O)=O)sc2c1'
         },
         {
             'smiles': 'CCOc1ccc2nc(S(N)(=O)=O)sc2c1'
         },
         {
             'smiles': 'CCOc1ccc2nc(S(N)(=O)=O)sc2c1'
         },
         {
             'smiles': 'CC(C)CCCCCCCOP(OCCCCCCCC(C)C)Oc1ccccc1'
         },
         {
             'smiles': 'CC(C)CCCCCCCOP(OCCCCCCCC(C)C)Oc1ccccc1'
         },
         {
             'smiles': 'CC(C)CCCCCCCOP(OCCCCCCCC(C)C)Oc1ccccc1'
         },
         {
             'smiles': 'CC(C)CCCCCCCOP(OCCCCCCCC(C)C)Oc1ccccc1'
         },
         {
             'smiles': 'CC(C)CCCCCCCOP(OCCCCCCCC(C)C)Oc1ccccc1'
         },
         {
             'smiles': 'CCCCCCCCCCOCC(O)CN'
         },
         {
             'smiles': 'CCCCCCCCCCOCC(O)CN'
         },
         {
             'smiles': 'CCCCCCCCCCOCC(O)CN'
         },
         {
             'smiles': 'CCCCCCCCCCOCC(O)CN'
         },
     ]
     dataset = InMemoryDataset(raw_data_list)
     splitter = RandomSplitter()
     train_dataset, valid_dataset, test_dataset = splitter.split(
         dataset, frac_train=0.34, frac_valid=0.33, frac_test=0.33)
     n = len(train_dataset) + len(valid_dataset) + len(test_dataset)
     self.assertEqual(n, len(dataset))
Example #2
0
def create_splitter(split_type):
    """tbd"""
    if split_type == 'random':
        splitter = RandomSplitter()
    elif split_type == 'index':
        splitter = IndexSplitter()
    elif split_type == 'scaffold':
        splitter = ScaffoldSplitter()
    elif split_type == 'random_scaffold':
        splitter = RandomScaffoldSplitter()
    else:
        raise ValueError('%s not supported' % split_type)
    return splitter
def main(args):
    """tbd"""
    model_config = json.load(open(args.model_config, 'r'))
    model_config['context_pooling'] = args.context_pooling

    ### build model
    train_prog = fluid.Program()
    test_prog = fluid.Program()
    startup_prog = fluid.Program()
    with fluid.program_guard(train_prog, startup_prog):
        with fluid.unique_name.guard():
            model = PreGNNContextpredModel(model_config)
            model.forward()
            opt = fluid.optimizer.Adam(learning_rate=args.lr)
            if args.distributed:
                opt = get_distributed_optimizer(opt)
            opt.minimize(model.loss)
    with fluid.program_guard(test_prog, fluid.Program()):
        with fluid.unique_name.guard():
            model = PreGNNContextpredModel(model_config)
            model.forward(is_test=True)

    place = fluid.CUDAPlace(int(os.environ.get('FLAGS_selected_gpus', 0))) \
            if args.use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(startup_prog)

    if not args.init_model is None and not args.init_model == "":
        load_partial_params(exe, args.init_model, train_prog)

    ### load data
    k = model_config['layer_num']
    l1 = k - 1
    l2 = l1 + args.context_size
    featurizer = PreGNNContextPredFeaturizer(
            model.substruct_graph_wrapper, 
            model.context_graph_wrapper, 
            k, l1, l2)
    dataset = load_zinc_dataset(args.data_path, featurizer=featurizer)

    splitter = RandomSplitter()
    train_dataset, _, test_dataset = splitter.split(
            dataset, frac_train=0.9, frac_valid=0, frac_test=0.1)
    if args.distributed:
        indices = list(range(fleet.worker_num(), len(train_dataset), fleet.worker_index()))
        train_dataset = train_dataset[indices]
    print("Train/Test num: %s/%s" % (len(train_dataset), len(test_dataset)))

    ### start train
    list_test_loss = []
    for epoch_id in range(args.max_epoch):
        train_loss = train(args, exe, train_prog, model, train_dataset, featurizer)
        test_loss = evaluate(args, exe, test_prog, model, test_dataset, featurizer)
        if not args.distributed or fleet.worker_index() == 0:
            fluid.io.save_params(exe, '%s/epoch%s' % (args.model_dir, epoch_id), train_prog)
            list_test_loss.append(test_loss)
            print("epoch:%d train/loss:%s" % (epoch_id, train_loss))
            print("epoch:%d test/loss:%s" % (epoch_id, test_loss))

    if not args.distributed or fleet.worker_index() == 0:
        best_epoch_id = np.argmax(list_test_loss)
        fluid.io.load_params(exe, '%s/epoch%d' % (args.model_dir, best_epoch_id), train_prog)
        fluid.io.save_params(exe, '%s/epoch_best' % (args.model_dir), train_prog)
        return list_test_loss[best_epoch_id]
def main(args):
    """
    Call the configuration function of the model, build the model and load data, then start training.

    model_config:
        a json file  with the  model configurations,such as dropout rate ,learning rate,num tasks and so on;
    task_num:
        It means the number of chembl filtered task;
    
    PreGNNSupervisedModel:
        It means the PretrainGNNModel for supervised strategy.
        Graph-level multi-task supervised pre-training to jointly predict a diverse set of supervised labels of individual graphs.
    """
    model_config = json.load(open(args.model_config, 'r'))
    if not args.dropout_rate is None:
        model_config['dropout_rate'] = args.dropout_rate
    task_num = get_chembl_filtered_task_num()
    model_config['task_num'] = task_num

    ### build model
    train_prog = fluid.Program()
    test_prog = fluid.Program()
    startup_prog = fluid.Program()
    with fluid.program_guard(train_prog, startup_prog):
        with fluid.unique_name.guard():
            model = PreGNNSupervisedModel(model_config)
            model.forward()
            opt = fluid.optimizer.Adam(learning_rate=args.lr)
            if args.distributed:
                opt = get_distributed_optimizer(opt)
            opt.minimize(model.loss)
    with fluid.program_guard(test_prog, fluid.Program()):
        with fluid.unique_name.guard():
            model = PreGNNSupervisedModel(model_config)
            model.forward(is_test=True)
    """
    Use CUDAPlace for GPU training, or use CPUPlace for CPU training.

    """

    place = fluid.CUDAPlace(int(os.environ.get('FLAGS_selected_gpus', 0))) \
            if args.use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(startup_prog)

    if not args.init_model is None and not args.init_model == "":
        load_partial_params(exe, args.init_model, train_prog)

    ### load data
    """
    PreGNNSupervisedFeaturizer:
        It is used along with `PreGNNSupervised`. It inherits from the super class `Featurizer` which is used for feature extractions. The `Featurizer` has two functions: `gen_features` for converting from a single raw smiles to a single graph data, `collate_fn` for aggregating a sublist of graph data into a big batch.
        
    splitter:
        split type of the dataset:random,scaffold,random with scaffold. Here is randomsplit.
        `ScaffoldSplitter` will firstly order the compounds according to Bemis-Murcko scaffold, 
        then take the first `frac_train` proportion as the train set, the next `frac_valid` proportion as the valid set 
        and the rest as the test set. `ScaffoldSplitter` can better evaluate the generalization ability of the model on 
        out-of-distribution samples. Note that other splitters like `RandomSplitter`, `RandomScaffoldSplitter` 
        and `IndexSplitter` is also available."
    
    """
    featurizer = PreGNNSupervisedFeaturizer(model.graph_wrapper)
    dataset = load_chembl_filtered_dataset(args.data_path,
                                           featurizer=featurizer)

    splitter = RandomSplitter()
    train_dataset, _, test_dataset = splitter.split(dataset,
                                                    frac_train=0.9,
                                                    frac_valid=0,
                                                    frac_test=0.1)
    if args.distributed:
        indices = list(
            range(fleet.worker_index(), len(train_dataset),
                  fleet.worker_num()))
        train_dataset = train_dataset[indices]
    print("Train/Test num: %s/%s" % (len(train_dataset), len(test_dataset)))

    ### start train
    """
    Load the train function and calculate the train loss and test loss in each epoch.
    Here we set the epoch is in range of max epoch,you can change it if you want. 

    Then we will calculate the train loss ,test loss and print them.
    Finally we save the best epoch to the model according to the dataset.

    """
    list_test_loss = []
    for epoch_id in range(args.max_epoch):
        train_loss = train(args, exe, train_prog, model, train_dataset,
                           featurizer)
        test_loss = evaluate(args, exe, test_prog, model, test_dataset,
                             featurizer)
        if not args.distributed or fleet.worker_index() == 0:
            fluid.io.save_params(exe,
                                 '%s/epoch%s' % (args.model_dir, epoch_id),
                                 train_prog)
            list_test_loss.append(test_loss)
            print("epoch:%d train/loss:%s" % (epoch_id, train_loss))
            print("epoch:%d test/loss:%s" % (epoch_id, test_loss))

    if not args.distributed or fleet.worker_index() == 0:
        best_epoch_id = np.argmax(list_test_loss)
        fluid.io.load_params(exe,
                             '%s/epoch%d' % (args.model_dir, best_epoch_id),
                             train_prog)
        fluid.io.save_params(exe, '%s/epoch_best' % (args.model_dir),
                             train_prog)
        return list_test_loss[best_epoch_id]
Example #5
0
def main(args):
    """
    Call the configuration function of the model, build the model and load data, then start training.

    model_config:
        a json file  with the  model configurations,such as dropout rate ,learning rate,num tasks and so on;

    context_pooling:
        it means the pooling type of context prediction;
    
    PreGNNContextpredModel:
        It is an unsupervised pretraining model which use subgraphs to predict their surrounding graph structures. Our goal is to pre-train a GNN so that it maps nodes appearing in similar structural contexts to nearby embeddings.

    """
    model_config = json.load(open(args.model_config, 'r'))
    if not args.dropout_rate is None:
        model_config['dropout_rate'] = args.dropout_rate
    model_config['context_pooling'] = args.context_pooling

    ### build model
    train_prog = fluid.Program()
    test_prog = fluid.Program()
    startup_prog = fluid.Program()
    with fluid.program_guard(train_prog, startup_prog):
        with fluid.unique_name.guard():
            model = PreGNNContextpredModel(model_config)
            model.forward()
            opt = fluid.optimizer.Adam(learning_rate=args.lr)
            if args.distributed:
                opt = get_distributed_optimizer(opt)
            opt.minimize(model.loss)
    with fluid.program_guard(test_prog, fluid.Program()):
        with fluid.unique_name.guard():
            model = PreGNNContextpredModel(model_config)
            model.forward(is_test=True)

    # Use CUDAPlace for GPU training, or use CPUPlace for CPU training.
    place = fluid.CUDAPlace(int(os.environ.get('FLAGS_selected_gpus', 0))) \
            if args.use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(startup_prog)

    if not args.init_model is None and not args.init_model == "":
        load_partial_params(exe, args.init_model, train_prog)

    ### load data
    # PreGNNContextPredFeaturizer:
    #     It is used along with `PreGNNContextPredModel`. It inherits from the super class `Featurizer` which is used for feature extractions. The `Featurizer` has two functions: `gen_features` for converting from a single raw smiles to a single graph data, `collate_fn` for aggregating a sublist of graph data into a big batch.
    # k is the number of layer,l1 and l2 are the different size of context,usually l1 < l2.
    # splitter:
    #     split type of the dataset:random,scaffold,random with scaffold. Here is randomsplit.
    #     `ScaffoldSplitter` will firstly order the compounds according to Bemis-Murcko scaffold, 
    #     then take the first `frac_train` proportion as the train set, the next `frac_valid` proportion as the valid set 
    #     and the rest as the test set. `ScaffoldSplitter` can better evaluate the generalization ability of the model on 
    #     out-of-distribution samples. Note that other splitters like `RandomSplitter`, `RandomScaffoldSplitter` 
    #     and `IndexSplitter` is also available."
    k = model_config['layer_num']
    l1 = k - 1
    l2 = l1 + args.context_size
    featurizer = PreGNNContextPredFeaturizer(
            model.substruct_graph_wrapper, 
            model.context_graph_wrapper, 
            k, l1, l2)
    dataset = load_zinc_dataset(args.data_path, featurizer=featurizer)

    splitter = RandomSplitter()
    train_dataset, _, test_dataset = splitter.split(
            dataset, frac_train=0.9, frac_valid=0, frac_test=0.1)
    if args.distributed:
        indices = list(range(fleet.worker_index(), len(train_dataset), fleet.worker_num()))
        train_dataset = train_dataset[indices]
    print("Train/Test num: %s/%s" % (len(train_dataset), len(test_dataset)))

    ### start train
    # Load the train function and calculate the train loss and test loss in each epoch.
    # Here we set the epoch is in range of max epoch,you can change it if you want. 

    # Then we will calculate the train loss ,test loss and print them.
    # Finally we save the best epoch to the model according to the dataset.
    list_test_loss = []
    for epoch_id in range(args.max_epoch):
        train_loss = train(args, exe, train_prog, model, train_dataset, featurizer)
        test_loss = evaluate(args, exe, test_prog, model, test_dataset, featurizer)
        if not args.distributed or fleet.worker_index() == 0:
            fluid.io.save_params(exe, '%s/epoch%s' % (args.model_dir, epoch_id), train_prog)
            list_test_loss.append(test_loss)
            print("epoch:%d train/loss:%s" % (epoch_id, train_loss))
            print("epoch:%d test/loss:%s" % (epoch_id, test_loss))

    if not args.distributed or fleet.worker_index() == 0:
        best_epoch_id = np.argmin(list_test_loss)
        fluid.io.load_params(exe, '%s/epoch%d' % (args.model_dir, best_epoch_id), train_prog)
        fluid.io.save_params(exe, '%s/epoch_best' % (args.model_dir), train_prog)
        return list_test_loss[best_epoch_id]
Example #6
0
def main(args):
    """
    Call the configuration function of the compound encoder and model, 
    build the model and load data, then start training.

    compound_encoder_config: a json file with the compound encoder configurations,
    such as dropout rate ,learning rate,num tasks and so on;

    model_config: a json file with the pretrain_gnn model configurations,such as dropout rate ,
    learning rate,num tasks and so on;

    lr: It means the learning rate of different optimizer;
    
    AttrmaskModel: It is an unsupervised pretraining model which randomly masks the atom type 
    of some node and then use the masked atom type as the prediction targets.
    """
    if args.dist:
        dist.init_parallel_env()

    compound_encoder_config = load_json_config(args.compound_encoder_config)
    model_config = load_json_config(args.model_config)
    if not args.dropout_rate is None:
        compound_encoder_config['dropout_rate'] = args.dropout_rate
        model_config['dropout_rate'] = args.dropout_rate

    ### build model
    compound_encoder = PretrainGNNModel(compound_encoder_config)
    model = AttrmaskModel(model_config, compound_encoder)
    if args.dist:
        model = paddle.DataParallel(model)
    opt = paddle.optimizer.Adam(args.lr, parameters=model.parameters())

    if not args.init_model is None and not args.init_model == "":
        compound_encoder.set_state_dict(paddle.load(args.init_model))
        print('Load state_dict from %s' % args.init_model)

    ### load data
    dataset = load_zinc_dataset(args.data_path)
    splitter = RandomSplitter()
    train_dataset, _, test_dataset = splitter.split(dataset,
                                                    frac_train=0.9,
                                                    frac_valid=0.0,
                                                    frac_test=0.1,
                                                    seed=32)
    if args.dist:
        train_dataset = train_dataset[dist.get_rank()::dist.get_world_size()]
    transform_fn = AttrmaskTransformFn()
    train_dataset.transform(transform_fn, num_workers=args.num_workers)
    test_dataset.transform(transform_fn, num_workers=args.num_workers)
    print("Train/Test num: %s/%s" % (len(train_dataset), len(test_dataset)))

    ### start train
    collate_fn = AttrmaskCollateFn(
        atom_names=compound_encoder_config['atom_names'],
        bond_names=compound_encoder_config['bond_names'],
        mask_ratio=model_config['mask_ratio'])
    for epoch_id in range(args.max_epoch):
        train_loss = train(args, model, train_dataset, collate_fn, opt)
        test_loss = evaluate(args, model, test_dataset, collate_fn)
        if not args.dist or dist.get_rank() == 0:
            print("epoch:%d train/loss:%s" % (epoch_id, train_loss))
            print("epoch:%d test/loss:%s" % (epoch_id, test_loss))
            paddle.save(
                compound_encoder.state_dict(),
                '%s/epoch%d/compound_encoder.pdparams' %
                (args.model_dir, epoch_id))
            paddle.save(
                model.state_dict(),
                '%s/epoch%d/model.pdparams' % (args.model_dir, epoch_id))