Esempio n. 1
0
    def predict(self, X, confidence=False):
        """Predicting the class according to input vectors.

    Parameters
    ----------
    X : 2D numpy array, shape (n_samples, n_features)
      Data vectors, where n_samples is the number of samples and n_features is the number of features.

    confidence : bool, default: False
      Computes and returns confidence score if confidence is true.

    Returns
    -------
    y_pred : 1D numpy array, shape (n_samples,)
      Prediction target vector relative to X.

    confidence_score : 1D numpy array, shape (n_samples,)
      If confidence is true, returns confidence scores of prediction made.
    """

        y_pred = array([]).astype(np.int8)
        confidence_score = array([])
        k = self._n_subclass // 20
        n_sample = len(X)
        for i in range(n_sample):
            x = X[i]
            win = self.winner(x)
            y_i = int(self.classify(win))
            y_pred = append(y_pred, y_i)

            # Computing confidence score
            if confidence:
                distances = array([])
                classes = array([]).astype(np.int8)

                for j in range(self._n_subclass):
                    distance = euclidean_distance(
                        x,
                        self._competitive_layer_weights[j]) - self._biases[j]
                    class_name = argmax(self._linear_layer_weights[:, j])
                    distances = append(distances, distance)
                    classes = append(classes, int(class_name))

                neighbors = argsort(distances)
                a = 0
                b = 0

                for j in range(k):
                    if classes[neighbors[j]] == y_i:
                        a = a + exp(-(distances[neighbors[j]]**2))
                    b = b + exp(-(distances[neighbors[j]]**2))
                confidence_score = append(confidence_score, a / b)

        if confidence:
            return y_pred, confidence_score
        else:
            return y_pred
Esempio n. 2
0
    def winner(self, x):
        """
    Determines the winner neuron in competitive layer.

    Parameters
    ----------
    x : 1D numpy array, shape (n_features,)
      Input vector where n_features is the number of features.

    Returns
    -------
    n : 1D numpy array, shape (n_subclass,)
      Array where element with index of the winner neuron has value 1, others have value 0.
    """
        n = array([])
        for i in range(self._n_subclass):
            n = append(
                n, (-1) *
                euclidean_distance(x, self._competitive_layer_weights[i]) +
                self._biases[i])
        return compet(n)
Esempio n. 3
0
    def quantization_error(self, X):
        """Determining quantization error of the network.

    Parameters
    ----------
    X : 2D numpy array, shape (n_samples, n_features)
      Data vectors, where n_samples is the number of samples and n_features is the number of features.

    Returns
    -------
    error : float
      Quantization error of the network.
    """

        n = len(X)
        error = 0
        for i in range(n):
            x = X[i]
            win = self.winner(x)
            win_idx = argmax(win)
            error += euclidean_distance(
                x, self._competitive_layer_weights[win_idx])
        return error / n
Esempio n. 4
0
    def label_neurons(self, X, y):
        """Labeling class and computing confidence for each neurons in the competitve layer according to input data.

    Parameters
    ----------
    X : 2D numpy array, shape (n_samples, n_features)
      Training vectors, where n_samples is the number of samples and n_features is the number of features.

    y : 1D numpy array, shape (n_samples,)
      Target vector relative to X.

    Returns
    -------
    self : object
      Returns self.
    """
        self._n_class = len(unique(y))
        self._n_neurons_each_classes = zeros(self._n_class)
        self._neurons_confidence = zeros((self._n_subclass, self._n_class))

        # Initializing linear layer weights
        if self._linear_layer_weights is None:
            self._linear_layer_weights = zeros(
                (self._n_class, self._n_subclass))

        if self._label_weight == 'exponential_distance':
            neurons_weight = zeros((self._n_subclass, self._n_class))
            m = len(X)
            k = 10
            for i in range(self._n_subclass):
                n = self._competitive_layer_weights[i]
                distances = array([])
                for j in range(m):
                    distance = euclidean_distance(n, X[j]) - self._biases[i]
                    distances = append(distances, distance)
                neighbors = argsort(distances)
                for j in range(k):
                    neurons_weight[i][y[neighbors[j]]] += exp(
                        -(distances[neighbors[j]]**2))

                self._neurons_confidence[i] = neurons_weight[i] / sum(
                    neurons_weight[i])
                neuron_class_win = argwhere(
                    self._neurons_confidence[i] == amax(
                        self._neurons_confidence[i])).ravel()
                class_name = neuron_class_win[argmin(
                    self._n_neurons_each_classes[neuron_class_win])]
                self._n_neurons_each_classes[class_name] += 1
                self._linear_layer_weights[class_name][i] = 1
        elif self._label_weight == 'inverse_distance':
            neurons_weight = zeros((self._n_subclass, self._n_class))
            m = len(X)
            k = 10
            for i in range(self._n_subclass):
                n = self._competitive_layer_weights[i]
                distances = array([])
                for j in range(m):
                    distance = euclidean_distance(n, X[j]) - self._biases[i]
                    distances = append(distances, distance)
                neighbors = argsort(distances)
                for j in range(k):
                    neurons_weight[i][y[
                        neighbors[j]]] += 1 / distances[neighbors[j]]

                self._neurons_confidence[i] = neurons_weight[i] / sum(
                    neurons_weight[i])
                neuron_class_win = argwhere(
                    self._neurons_confidence[i] == amax(
                        self._neurons_confidence[i])).ravel()
                class_name = neuron_class_win[argmin(
                    self._n_neurons_each_classes[neuron_class_win])]
                self._n_neurons_each_classes[class_name] += 1
                self._linear_layer_weights[class_name][i] = 1
        elif self._label_weight == 'uniform':
            class_win = zeros((self._n_subclass, self._n_class))
            m = len(X)
            for idx in range(m):
                win = self.winner(X[idx])
                win_idx = argmax(win)
                class_win[win_idx][y[idx]] += 1
            for idx in range(self._n_subclass):
                neuron_class_win = argwhere(
                    class_win[idx] == amax(class_win[idx])).ravel()
                class_name = neuron_class_win[argmin(
                    self._n_neurons_each_classes[neuron_class_win])]
                self._n_neurons_each_classes[class_name] += 1
                self._linear_layer_weights[class_name][idx] = 1
                if sum(class_win[idx]) == 0:
                    self._neurons_confidence[idx] = [1 / self._n_class
                                                     ] * self._n_class
                else:
                    self._neurons_confidence[idx] = class_win[idx] / sum(
                        class_win[idx])
        else:
            n_subclass_per_class = self._n_subclass // self._n_class
            for i in range(self._n_class):
                if i != self._n_class - 1:
                    for j in range(i * n_subclass_per_class,
                                   (i + 1) * n_subclass_per_class):
                        self._linear_layer_weights[i][j] = 1
                        self._n_neurons_each_classes[i] += 1
                        self._neurons_confidence[j] = [1 / self._n_class
                                                       ] * self._n_class
                else:
                    for j in range(i * n_subclass_per_class, self._n_subclass):
                        self._linear_layer_weights[i][j] = 1
                        self._n_neurons_each_classes[i] += 1
                        self._neurons_confidence[j] = [1 / self._n_class
                                                       ] * self._n_class

        return self
Esempio n. 5
0
 def distance_from_winner(self, x):
     win = self.winner(x)
     win_idx = argmax(win)
     return euclidean_distance(x, self._competitive_layer_weights[win_idx])