def load_data(self):
        """
        Load data from Purchase100 dataset

        # Returns
            all_data : train data, train labels, test data and test labels
        """

        path_features = get_file(
            "purchase100",
            origin=
            "https://github.com/xehartnort/Purchase100-dataset/releases/download/v1.1/purchase100.npz",
            extract=True,
            file_hash="0d7538b9806e7ee622e1a252585e7768",  # md5 hash
            cache_dir='~/.sherpa-ai')

        all_data = np.load(path_features)
        data = all_data['features']
        labels = to_categorical(all_data['labels'])

        test_size = int(len(data) * 0.1)
        self._train_data, self._train_labels,\
            self._test_data, self._test_labels = db.split_train_test(
                data, labels, test_size)

        self.shuffle()

        return self.data
    def load_data(self):
        """
        Load data from iris package

        # Returns
            all_data : train data, train labels, test data and test labels
        """
        all_data = sklearn.datasets.load_iris()
        data = all_data["data"]
        labels = all_data["target"]

        test_size = int(len(data) * 0.1)
        self._train_data, self._train_labels,\
            self._test_data, self._test_labels = db.split_train_test(data, labels, test_size)

        self.shuffle()

        return self.data
    def load_data(self):
        """
        Load data from lfw package

        # Returns
            all_data : train data, train labels, test data and test labels
        """
        all_data = fetch_lfw_people()
        data = all_data["data"]
        labels = all_data["target"]

        test_size = int(len(data) * 0.1)
        self._train_data, self._train_labels,\
            self._test_data, self._test_labels = db.split_train_test(data, labels, test_size)

        self.shuffle()

        return self.data