def clear(self) -> None:
        """
        Clears (unfits) the model.

        Raises
        ------
        UnfittedModelError
            Raised when trying to clear a model that has not been fitted yet.
            Try using the fit method to ``fit`` the model first.
        """
        if not self._is_fitted:
            raise UnfittedModelError('This model has not been fitted yet.')

        self._is_fitted = False
        self._X = np.ndarray((0, 0))
        self._y = np.ndarray((0, ))
        self._X_n = int()
        self._unique_y = np.ndarray((0, ))
        self._unique_y_counts = np.ndarray((0, ))
        self._unique_y_probabilities = np.ndarray((0, ))
        self._majority_label = None
        self._is_structured = False
        self._categorical_indices = np.ndarray((0, ))
        self._numerical_indices = np.ndarray((0, ))
    def predict_proba(self, X: np.ndarray) -> np.ndarray:
        """
        Calculates label probabilities for new instances with the fitted model.

        Parameters
        ----------
        X : numpy.ndarray
            The data for which labels probabilities will be predicted.

        Raises
        ------
        IncorrectShapeError
            X is not a 2-dimensional array, it has 0 rows or it has a different
            number of columns than the training data.
        UnfittedModelError
            Raised when trying to predict data when the model has not been
            fitted yet. Try using the ``fit`` method to fit the model first.
        RuntimeError
            Raised when trying to use this method when the predictor is
            initialised as a regressor.
        ValueError
            X has a different dtype than the data used to fit the model.

        Returns
        -------
        probabilities : numpy.ndarray
            Probabilities of each instance belonging to every class. The labels
            in the return array are ordered by lexicographic order.
        """
        if not self._is_classifier:
            raise RuntimeError('This functionality is not available for a '
                               'regressor.')

        if not self._is_fitted:
            raise UnfittedModelError('This model has not been fitted yet.')
        if not fuav.is_2d_array(X):
            raise IncorrectShapeError('X must be a 2-dimensional array. If '
                                      'you want to predict a single data '
                                      'point please format it as a single row '
                                      'in a 2-dimensional array.')
        if not fuav.are_similar_dtype_arrays(X, self._X):
            raise ValueError('X must have the same dtype as the training '
                             'data.')
        if not X.shape[0]:
            raise IncorrectShapeError('X must have at least one row.')
        # No need to check for columns in a structured array -> this is handled
        # by the dtype checker.
        if not fuav.is_structured_array(X):
            if X.shape[1] != self._X.shape[1]:
                raise IncorrectShapeError(('X must have the same number of '
                                           'columns as the training data '
                                           '({}).').format(self._X.shape[1]))

        probabilities = np.empty((X.shape[0], self._unique_y.shape[0]))

        if self._k < self._X_n:
            distances = self._get_distances(X)
            knn = np.argpartition(distances, self._k, axis=0)
            probabilities = []
            for column in knn.T:
                close_labels = self._y[column[:self._k]]
                values, counts = np.unique(close_labels, return_counts=True)
                total_counts = np.sum(counts)
                probs = np.zeros((self._unique_y.shape[0], ))
                for i in range(values.shape[0]):
                    ind = np.where(self._unique_y == values[i])[0]
                    probs[ind] = counts[i] / total_counts
                probabilities.append(probs)
            probabilities = np.array(probabilities)
        else:
            probabilities = np.tile(self._unique_y_probabilities,
                                    (X.shape[0], 1))
        return probabilities
    def predict(self, X: np.ndarray) -> np.ndarray:
        """
        Predicts labels of new instances with the fitted model.

        Parameters
        ----------
        X : numpy.ndarray
            The data for which labels will be predicted.

        Raises
        ------
        IncorrectShapeError
            X is not a 2-dimensional array, it has 0 rows or it has a different
            number of columns than the training data.
        UnfittedModelError
            Raised when trying to predict data when the model has not been
            fitted yet. Try using the ``fit`` method to fit the model first.
        ValueError
            X has a different dtype than the data used to fit the model.

        Returns
        -------
        predictions : numpy.ndarray
            Predicted class labels for each data point.
        """
        # pylint: disable=too-many-locals,too-many-branches
        if not self._is_fitted:
            raise UnfittedModelError('This model has not been fitted yet.')
        if not fuav.is_2d_array(X):
            raise IncorrectShapeError('X must be a 2-dimensional array. If '
                                      'you want to predict a single data '
                                      'point please format it as a single row '
                                      'in a 2-dimensional array.')
        if not fuav.are_similar_dtype_arrays(X, self._X):
            raise ValueError('X must have the same dtype as the training '
                             'data.')
        if not X.shape[0]:
            raise IncorrectShapeError('X must have at least one row.')
        # No need to check for columns in a structured array -> this is handled
        # by the dtype checker.
        if not fuav.is_structured_array(X):
            if X.shape[1] != self._X.shape[1]:
                raise IncorrectShapeError(('X must have the same number of '
                                           'columns as the training data '
                                           '({}).').format(self._X.shape[1]))

        predictions = np.empty((X.shape[0], ))

        if self._k < self._X_n:
            distances = self._get_distances(X)
            # If there are 3 nearest neighbours within distances 1, 2 and 2 and
            # k is set to 2, then argpartition will always take the first
            # within distance 2.
            knn = np.argpartition(distances, self._k, axis=0)
            predictions = []
            for column in knn.T:
                close_labels = self._y[column[:self._k]]
                if self._is_classifier:
                    values, counts = np.unique(close_labels,
                                               return_counts=True)
                    # If there is a tie in the counts take into consideration
                    # the overall label count in the training data to resolve
                    # it.
                    top_label_index = counts == counts.max()
                    top_label_unique_sorted = np.sort(values[top_label_index])
                    assert len(top_label_unique_sorted.shape) == 1, \
                        'This should be a flat array.'
                    if top_label_unique_sorted.shape[0] > 1:
                        # Resolve the tie.
                        # Get count of these label for the training data.
                        labels_filter = np.array(self._unique_y.shape[0] *
                                                 [False])
                        for top_prediction in top_label_unique_sorted:
                            unique_y_filter = self._unique_y == top_prediction
                            np.logical_or(labels_filter,
                                          unique_y_filter,
                                          out=labels_filter)
                        g_top_label = self._unique_y[labels_filter]
                        g_top_label_counts = (
                            self._unique_y_counts[labels_filter])

                        # What if any of the global labels have the same count?
                        g_top_label_index = g_top_label_counts == np.max(
                            g_top_label_counts)
                        g_top_label_sorted = np.sort(
                            g_top_label[g_top_label_index])

                        prediction = g_top_label_sorted[0]
                    else:
                        prediction = top_label_unique_sorted[0]
                else:
                    prediction = close_labels.mean()

                predictions.append(prediction)
            predictions = np.array(predictions)
        else:
            predictions = np.array(X.shape[0] * [self._majority_label])

        return predictions