Esempio n. 1
0
    def naive_split(self) -> dict:
        frac_list = np.asarray(
            list(self.data_config.get("splits", DEFAULT_SPLITS).values()))
        # assert np.allclose(np.sum(frac_list), 1.), f'Expected frac_list to sum to 1, got {np.sum(frac_list)}'

        num_data = len(self.dataset)
        lengths = (num_data * frac_list).astype(int)
        lengths[-1] = num_data - np.sum(lengths[:-1])

        if self.data_config["shuffle"]:
            indices = np.random.RandomState(seed=None).permutation(num_data)
        else:
            indices = np.arange(num_data)

        if self.bool_mask:
            return self.indices_to_mask([
                Subset(self.dataset, indices[offset - length:offset])
                for offset, length in zip(accumulate(lengths), lengths)
            ])
        else:
            # https://docs.dgl.ai/en/0.4.x/api/python/data.html?highlight=subset#dgl.data.utils.Subset
            return dict(
                zip(self.data_config["splits"].keys(), [
                    Subset(self.dataset, indices[offset - length:offset])
                    for offset, length in zip(accumulate(lengths), lengths)
                ]))
Esempio n. 2
0
def indices_split(dataset, frac_train, frac_val, frac_test, indices):
    """Reorder datapoints based on the specified indices and then take consecutive
    chunks as subsets.

    Parameters
    ----------
    dataset
        We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``
        gives the ith datapoint.
    frac_train : float
        Fraction of data to use for training.
    frac_val : float
        Fraction of data to use for validation.
    frac_test : float
        Fraction of data to use for test.
    indices : list or ndarray
        Indices specifying the order of datapoints.

    Returns
    -------
    list of length 3
        Subsets for training, validation and test, which are all :class:`Subset` instances.
    """
    frac_list = np.array([frac_train, frac_val, frac_test])
    assert np.allclose(np.sum(frac_list), 1.), \
        'Expect frac_list sum to 1, got {:.4f}'.format(np.sum(frac_list))
    num_data = len(dataset)
    lengths = (num_data * frac_list).astype(int)
    lengths[-1] = num_data - np.sum(lengths[:-1])

    return [
        Subset(dataset, list(indices[offset - length:offset]))
        for offset, length in zip(accumulate(lengths), lengths)
    ]
Esempio n. 3
0
    def __getitem__(self, idx):
        """Get datapoint with index"""

        if isinstance(idx, int):
            return self.graphs[idx], self.labels[idx]
        elif torch.is_tensor(idx) and idx.dtype == torch.long:
            if idx.dim() == 0:
                return self.graphs[idx], self.labels[idx]
            elif idx.dim() == 1:
                return Subset(self, idx.cpu())

        raise IndexError('Only integers and long are valid '
                         'indices (got {}).'.format(type(idx).__name__))
Esempio n. 4
0
def split_dataset(dataset,
                  num_train,
                  num_valid,
                  shuffle=False,
                  random_state=None):
    """Split dataset into training, validation and test set.

    Parameters
    ----------
    dataset
        We assume that ``len(dataset)`` gives the number of datapoints and ``dataset[i]``
        gives the ith datapoint.
    num_train : int
        Number of training datapoints.
    num_valid : int
        Number of validation datapoints.
    shuffle : bool, optional
        By default we perform a consecutive split of the dataset. If True,
        we will first randomly shuffle the dataset.
    random_state : None, int or array_like, optional
        Random seed used to initialize the pseudo-random number generator.
        This can be any integer between 0 and 2^32 - 1 inclusive, an array
        (or other sequence) of such integers, or None (the default value).
        If seed is None, then RandomState will try to read data from /dev/urandom
        (or the Windows analogue) if available or seed from the clock otherwise.

    Returns
    -------
    list of length 3
        Subsets for training, validation and test.
    """
    from itertools import accumulate
    num_data = len(dataset)
    assert num_train + num_valid < num_data
    lengths = [num_train, num_valid, num_data - num_train - num_valid]
    if shuffle:
        indices = np.random.RandomState(
            seed=random_state).permutation(num_data)
    else:
        indices = np.arange(num_data)
    return [
        Subset(dataset, indices[offset - length:offset])
        for offset, length in zip(accumulate(lengths), lengths)
    ]
Esempio n. 5
0
def base_k_fold_split(split_method, dataset, k, log):
    """Split dataset for k-fold cross validation.

    Parameters
    ----------
    split_method : callable
        Arbitrary method for splitting the dataset
        into training, validation and test subsets.
    dataset
        We assume ``len(dataset)`` gives the size for the dataset and ``dataset[i]``
        gives the ith datapoint.
    k : int
        Number of folds to use and should be no smaller than 2.
    log : bool
        Whether to print a message at the start of preparing each fold.

    Returns
    -------
    all_folds : list of 2-tuples
        Each element of the list represents a fold and is a 2-tuple (train_set, val_set),
        which are all :class:`Subset` instances.
    """
    assert k >= 2, 'Expect the number of folds to be no smaller than 2, got {:d}'.format(
        k)
    all_folds = []
    frac_per_part = 1. / k
    for i in range(k):
        if log:
            print('Processing fold {:d}/{:d}'.format(i + 1, k))
        # We are reusing the code for train-validation-test split.
        train_set1, val_set, train_set2 = split_method(
            dataset,
            frac_train=i * frac_per_part,
            frac_val=frac_per_part,
            frac_test=1. - (i + 1) * frac_per_part)
        # For cross validation, each fold consists of only a train subset and
        # a validation subset.
        train_set = Subset(
            dataset,
            np.concatenate([train_set1.indices,
                            train_set2.indices]).astype(np.int64))
        all_folds.append((train_set, val_set))
    return all_folds
Esempio n. 6
0
    device=torch.device("cuda")
    metrics=['pearson_r2', 'mae']
    set_random_seed(123)
    if True : #not os.path.isfile(split_dataset_cache):
        dataset = PDBBind2(subset=subset,
                          load_binding_pocket=True, sanitize=True,
                          zero_padding=True,num_processes=1)

        years = dataset.df['release_year'].values.astype(np.float32)
        indices = np.argsort(years).tolist()
        frac_list = np.array([experiment_config[subset]['frac_train'],experiment_config[subset]['frac_val'],experiment_config[subset]['frac_test'] ] )
        num_data = len(dataset)
        lengths = (num_data * frac_list).astype(int)
        lengths[-1] = num_data - np.sum(lengths[:-1])
        train_set, val_set, test_set = [
            Subset(dataset, list(indices[offset - length:offset]))
            for offset, length in zip(accumulate(lengths), lengths)]
        # torch.save([train_set, val_set, test_set],split_dataset_cache)
    # else:
    #     train_set, val_set, test_set=torch.load(split_dataset_cache)

    batch_size=experiment_config[subset]['batch_size']
    num_epochs=experiment_config[subset]['num_epochs']
    lr=experiment_config[subset]['lr']

    train_loader = DataLoader(dataset=train_set,
                              batch_size=batch_size,
                              shuffle=True,
                              collate_fn=collate2)

    energy_model = SchNet_energy( norm=True,
Esempio n. 7
0
    def train_val_test_split(dataset,
                             labels,
                             task_id,
                             frac_train=0.8,
                             frac_val=0.1,
                             frac_test=0.1,
                             bucket_size=10,
                             random_state=None):
        """Split the dataset into training, validation and test subsets as stated above.

        Parameters
        ----------
        dataset
            We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``
            gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the
            ith datapoint.
        labels : tensor of shape (N, T)
            Dataset labels all tasks. N for the number of datapoints and T for the number
            of tasks.
        task_id : int
            Index for the task.
        frac_train : float
            Fraction of data to use for training. By default, we set this to be 0.8, i.e.
            80% of the dataset is used for training.
        frac_val : float
            Fraction of data to use for validation. By default, we set this to be 0.1, i.e.
            10% of the dataset is used for validation.
        frac_test : float
            Fraction of data to use for test. By default, we set this to be 0.1, i.e.
            10% of the dataset is used for test.
        bucket_size : int
            Size of bucket of datapoints. Default to 10.
        random_state : None, int or array_like, optional
            Random seed used to initialize the pseudo-random number generator.
            Can be any integer between 0 and 2**32 - 1 inclusive, an array
            (or other sequence) of such integers, or None (the default).
            If seed is None, then RandomState will try to read data from /dev/urandom
            (or the Windows analogue) if available or seed from the clock otherwise.

        Returns
        -------
        list of length 3
            Subsets for training, validation and test, which also have ``len(dataset)``
            and ``dataset[i]`` behaviors
        """
        train_val_test_sanity_check(frac_train, frac_val, frac_test)

        if random_state is not None:
            np.random.seed(random_state)

        if not isinstance(labels, np.ndarray):
            labels = F.asnumpy(labels)
        task_labels = labels[:, task_id]
        sorted_indices = np.argsort(task_labels)

        train_bucket_cutoff = int(np.round(frac_train * bucket_size))
        val_bucket_cutoff = int(np.round(
            frac_val * bucket_size)) + train_bucket_cutoff

        train_indices, val_indices, test_indices = [], [], []

        while sorted_indices.shape[0] >= bucket_size:
            current_batch, sorted_indices = np.split(sorted_indices,
                                                     [bucket_size])
            shuffled = np.random.permutation(range(bucket_size))
            train_indices.extend(
                current_batch[shuffled[:train_bucket_cutoff]].tolist())
            val_indices.extend(current_batch[
                shuffled[train_bucket_cutoff:val_bucket_cutoff]].tolist())
            test_indices.extend(
                current_batch[shuffled[val_bucket_cutoff:]].tolist())

        # Place rest samples in the training set.
        train_indices.extend(sorted_indices.tolist())

        return [
            Subset(dataset, train_indices),
            Subset(dataset, val_indices),
            Subset(dataset, test_indices)
        ]
Esempio n. 8
0
    def k_fold_split(dataset,
                     mols=None,
                     sanitize=True,
                     k=5,
                     log_every_n=1000,
                     scaffold_func='decompose'):
        """Group molecules based on their scaffolds and sort groups based on their sizes.
        The groups are then split for k-fold cross validation.

        Same as usual k-fold splitting methods, each molecule will appear only once
        in the validation set among all folds. In addition, this method ensures that
        molecules with a same scaffold will be collectively in either the training
        set or the validation set for each fold.

        Note that the folds can be highly imbalanced depending on the
        scaffold distribution in the dataset.

        Parameters
        ----------
        dataset
            We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``
            gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the
            ith datapoint.
        mols : None or list of rdkit.Chem.rdchem.Mol
            None or pre-computed RDKit molecule instances. If not None, we expect a
            one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.
            ``mols[i]`` corresponds to ``dataset.smiles[i]``. Default to None.
        sanitize : bool
            This argument only comes into effect when ``mols`` is None and decides whether
            sanitization is performed in initializing RDKit molecule instances. See
            https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.
            Default to True.
        k : int
            Number of folds to use and should be no smaller than 2. Default to be 5.
        log_every_n : None or int
            Molecule related computation can take a long time for a large dataset and we want
            to learn the progress of processing. This can be done by printing a message whenever
            a batch of ``log_every_n`` molecules have been processed. If None, no messages will
            be printed. Default to 1000.
        scaffold_func : str
            The function to use for computing scaffolds, which can be 'decompose' for
            using rdkit.Chem.AllChem.MurckoDecompose or 'smiles' for using
            rdkit.Chem.Scaffolds.MurckoScaffold.MurckoScaffoldSmiles.

        Returns
        -------
        list of 2-tuples
            Each element of the list represents a fold and is a 2-tuple ``(train_set, val_set)``.
            ``train_set`` and ``val_set`` also have ``len(dataset)`` and ``dataset[i]`` behaviors.
        """
        assert k >= 2, 'Expect the number of folds to be no smaller than 2, got {:d}'.format(
            k)

        molecules = prepare_mols(dataset, mols, sanitize)
        scaffold_sets = ScaffoldSplitter.get_ordered_scaffold_sets(
            molecules, log_every_n, scaffold_func)

        # k buckets that form a relatively balanced partition of the dataset
        index_buckets = [[] for _ in range(k)]
        for group_indices in scaffold_sets:
            bucket_chosen = int(
                np.argmin([len(bucket) for bucket in index_buckets]))
            index_buckets[bucket_chosen].extend(group_indices)

        all_folds = []
        for i in range(k):
            if log_every_n is not None:
                print('Processing fold {:d}/{:d}'.format(i + 1, k))
            train_indices = list(
                chain.from_iterable(index_buckets[:i] + index_buckets[i + 1:]))
            val_indices = index_buckets[i]
            all_folds.append(
                (Subset(dataset, train_indices), Subset(dataset, val_indices)))

        return all_folds
Esempio n. 9
0
    def train_val_test_split(dataset,
                             mols=None,
                             sanitize=True,
                             frac_train=0.8,
                             frac_val=0.1,
                             frac_test=0.1,
                             log_every_n=1000,
                             scaffold_func='decompose'):
        """Split the dataset into training, validation and test set based on molecular scaffolds.

        This spliting method ensures that molecules with a same scaffold will be collectively
        in only one of the training, validation or test set. As a result, the fraction
        of dataset to use for training and validation tend to be smaller than ``frac_train``
        and ``frac_val``, while the fraction of dataset to use for test tends to be larger
        than ``frac_test``.

        Parameters
        ----------
        dataset
            We assume ``len(dataset)`` gives the size for the dataset, ``dataset[i]``
            gives the ith datapoint and ``dataset.smiles[i]`` gives the SMILES for the
            ith datapoint.
        mols : None or list of rdkit.Chem.rdchem.Mol
            None or pre-computed RDKit molecule instances. If not None, we expect a
            one-on-one correspondence between ``dataset.smiles`` and ``mols``, i.e.
            ``mols[i]`` corresponds to ``dataset.smiles[i]``. Default to None.
        sanitize : bool
            This argument only comes into effect when ``mols`` is None and decides whether
            sanitization is performed in initializing RDKit molecule instances. See
            https://www.rdkit.org/docs/RDKit_Book.html for details of the sanitization.
            Default to True.
        frac_train : float
            Fraction of data to use for training. By default, we set this to be 0.8, i.e.
            80% of the dataset is used for training.
        frac_val : float
            Fraction of data to use for validation. By default, we set this to be 0.1, i.e.
            10% of the dataset is used for validation.
        frac_test : float
            Fraction of data to use for test. By default, we set this to be 0.1, i.e.
            10% of the dataset is used for test.
        log_every_n : None or int
            Molecule related computation can take a long time for a large dataset and we want
            to learn the progress of processing. This can be done by printing a message whenever
            a batch of ``log_every_n`` molecules have been processed. If None, no messages will
            be printed. Default to 1000.
        scaffold_func : str
            The function to use for computing scaffolds, which can be 'decompose' for
            using rdkit.Chem.AllChem.MurckoDecompose or 'smiles' for using
            rdkit.Chem.Scaffolds.MurckoScaffold.MurckoScaffoldSmiles.

        Returns
        -------
        list of length 3
            Subsets for training, validation and test, which also have ``len(dataset)`` and
            ``dataset[i]`` behaviors
        """
        # Perform sanity check first as molecule related computation can take a long time.
        train_val_test_sanity_check(frac_train, frac_val, frac_test)
        molecules = prepare_mols(dataset, mols, sanitize)
        scaffold_sets = ScaffoldSplitter.get_ordered_scaffold_sets(
            molecules, log_every_n, scaffold_func)
        train_indices, val_indices, test_indices = [], [], []
        train_cutoff = int(frac_train * len(molecules))
        val_cutoff = int((frac_train + frac_val) * len(molecules))
        for group_indices in scaffold_sets:
            if len(train_indices) + len(group_indices) > train_cutoff:
                if len(train_indices) + len(val_indices) + len(
                        group_indices) > val_cutoff:
                    test_indices.extend(group_indices)
                else:
                    val_indices.extend(group_indices)
            else:
                train_indices.extend(group_indices)
        return [
            Subset(dataset, train_indices),
            Subset(dataset, val_indices),
            Subset(dataset, test_indices)
        ]
Esempio n. 10
0
    graphs = dataset.graphs

    # Train/Val/Test Splitting
    N = len(graphs)
    all_idx = np.arange(N)
    np.random.shuffle(all_idx)

    val_num = 10000
    test_num = 10000

    val_idx = all_idx[:val_num]
    test_idx = all_idx[val_num : val_num + test_num]
    train_idx = all_idx[val_num + test_num : val_num + test_num + args.train_num]

    train_data = Subset(dataset, train_idx)
    val_data = Subset(dataset, val_idx)
    test_data = Subset(dataset, test_idx)

    unsup_idx = all_idx[val_num + test_num:]
    unsup_data = Subset(dataset, unsup_idx)

    # generate supervised training dataloader and unsupervised training dataloader
    train_loader = GraphDataLoader(train_data,
                                   batch_size=args.batch_size,
                                   collate_fn=collate,
                                   drop_last=False,
                                   shuffle=True)

    unsup_loader = GraphDataLoader(unsup_data,
                                   batch_size=args.batch_size,
Esempio n. 11
0
def load_dataset(args):
    """Load the dataset.
    Parameters
    ----------
    args : dict
        Input arguments.
    Returns
    -------
    dataset
        Full dataset.
    train_set
        Train subset of the dataset.
    val_set
        Validation subset of the dataset.
    """
    assert args['dataset'] in ['PDBBind'], 'Unexpected dataset {}'.format(args['dataset'])
    if args['dataset'] == 'PDBBind':
        if args['model'] == 'PotentialNet': 
            from functools import partial
            from dgllife.utils import potentialNet_graph_construction_featurization
            dataset = PDBBind(subset=args['subset'], pdb_version=args['version'],
                    load_binding_pocket=args['load_binding_pocket'], 
                    construct_graph_and_featurize = partial(potentialNet_graph_construction_featurization, 
                        distance_bins=args['distance_bins'],
                        max_num_neighbors=args['max_num_neighbors'])
                        )
        elif args['model'] =='ACNN':
            dataset = PDBBind(subset=args['subset'], pdb_version=args['version'],
                          load_binding_pocket=args['load_binding_pocket'],
                          )

        if args['split'] == 'random':
            train_set, val_set, test_set = RandomSplitter.train_val_test_split(
                dataset,
                frac_train=args['frac_train'],
                frac_val=args['frac_val'],
                frac_test=args['frac_test'],
                random_state=args['random_seed'])

        elif args['split'] == 'scaffold':
            train_set, val_set, test_set = ScaffoldSplitter.train_val_test_split(
                dataset,
                mols=dataset.ligand_mols,
                sanitize=False,
                frac_train=args['frac_train'],
                frac_val=args['frac_val'],
                frac_test=args['frac_test'])

        elif args['split'] == 'stratified':
            train_set, val_set, test_set = SingleTaskStratifiedSplitter.train_val_test_split(
                dataset,
                labels=dataset.labels,
                task_id=0,
                frac_train=args['frac_train'],
                frac_val=args['frac_val'],
                frac_test=args['frac_test'],
                random_state=args['random_seed'])

        elif args['split'] == 'temporal':
            years = dataset.df['release_year'].values.astype(np.float32)
            indices = np.argsort(years).tolist()
            frac_list = np.array([args['frac_train'], args['frac_val'], args['frac_test']])
            num_data = len(dataset)
            lengths = (num_data * frac_list).astype(int)
            lengths[-1] = num_data - np.sum(lengths[:-1])
            train_set, val_set, test_set = [
                Subset(dataset, list(indices[offset - length:offset]))
                for offset, length in zip(accumulate(lengths), lengths)]

        else:
            raise ValueError('Expect the splitting method '
                             'to be "random", "scaffold", "stratified" or "temporal", got {}'.format(args['split']))
        if args['frac_train'] > 0:
            train_labels = torch.stack([train_set.dataset.labels[i] for i in train_set.indices])
            train_set.labels_mean = train_labels.mean(dim=0)
            train_set.labels_std = train_labels.std(dim=0)

    return dataset, train_set, val_set, test_set
Esempio n. 12
0
File: utils.py Progetto: hacors/Drug
def load_dataset(args):
    """Load the dataset.

    Parameters
    ----------
    args : dict
        Input arguments.

    Returns
    -------
    dataset
        Full dataset.
    train_set
        Train subset of the dataset.
    val_set
        Validation subset of the dataset.
    """
    assert args['dataset'] in ['PDBBind'], 'Unexpected dataset {}'.format(args['dataset'])
    if args['dataset'] == 'PDBBind':
        dataset = PDBBind(subset=args['subset'],
                          load_binding_pocket=args['load_binding_pocket'],
                          zero_padding=True)
        # No validation set is used and frac_val = 0.
        if args['split'] == 'random':
            train_set, _, test_set = RandomSplitter.train_val_test_split(
                dataset,
                frac_train=args['frac_train'],
                frac_val=args['frac_val'],
                frac_test=args['frac_test'],
                random_state=args['random_seed'])

        elif args['split'] == 'scaffold':
            train_set, _, test_set = ScaffoldSplitter.train_val_test_split(
                dataset,
                mols=dataset.ligand_mols,
                sanitize=False,
                frac_train=args['frac_train'],
                frac_val=args['frac_val'],
                frac_test=args['frac_test'])

        elif args['split'] == 'stratified':
            train_set, _, test_set = SingleTaskStratifiedSplitter.train_val_test_split(
                dataset,
                labels=dataset.labels,
                task_id=0,
                frac_train=args['frac_train'],
                frac_val=args['frac_val'],
                frac_test=args['frac_test'],
                random_state=args['random_seed'])

        elif args['split'] == 'temporal':
            years = dataset.df['release_year'].values.astype(np.float32)
            indices = np.argsort(years).tolist()
            frac_list = np.array([args['frac_train'], args['frac_val'], args['frac_test']])
            num_data = len(dataset)
            lengths = (num_data * frac_list).astype(int)
            lengths[-1] = num_data - np.sum(lengths[:-1])
            train_set, val_set, test_set = [
                Subset(dataset, list(indices[offset - length:offset]))
                for offset, length in zip(accumulate(lengths), lengths)]

        else:
            raise ValueError('Expect the splitting method '
                             'to be "random" or "scaffold", got {}'.format(args['split']))
        train_labels = torch.stack([train_set.dataset.labels[i] for i in train_set.indices])
        train_set.labels_mean = train_labels.mean(dim=0)
        train_set.labels_std = train_labels.std(dim=0)

    return dataset, train_set, test_set
Esempio n. 13
0
    elif args.dataset.lower() == 'cora_path':
        dataset = Cora_Count_Path()
    elif args.dataset.lower() == 'regular':
        dataset = RegularGraph_Count_Path(1000, 6, length_path = 3)
    else:
        dataset = CitationGraphDataset(args.dataset.lower())

    train_mask = torch.BoolTensor(dataset.train_mask)
    # val_mask = torch.BoolTensor(dataset.val_mask)
    test_mask = torch.BoolTensor(dataset.test_mask)
    graph = dataset[0].to(device)
elif args.dataset.lower() in ['imdbbinary', 'imdbmulti', 'redditbinary', 'redditmulti5k' , 'collab'] and args.graph_level:
    dataset = GINDataset(args.dataset.upper(), self_loop = args.self_loop)
    random_permutation = list(permutation(len(dataset)))
    train_mask, test_mask = random_permutation[:int(0.9 * len(dataset))], random_permutation[int(0.9 * len(dataset)):]
    train_loader = DataLoader(Subset(dataset, train_mask), batch_size = 32, shuffle  = True, collate_fn = collate)
    test_loader = DataLoader(Subset(dataset, test_mask), batch_size = 32, shuffle  = False, collate_fn = collate)
else:
    print('Either dataset or task is wrong!', 'Dataset:', args.dataset.lower(), 'Graph-level:', args.graph_level)
    assert False

# model
if args.op_base not in ['adj', 'laplacian', 'chebyshev']:
    print("Wrong operator base!")
    assert False

if args.graph_level:
    params_dict = {'input_dim': 1,
                   'input_channel': 'attr',
                   'hid_dim': args.hid_dim,
                   'output_dim': args.hid_dim,