Exemple #1
0
    def train(self, func, x0):
        """Optimize parameters to minimze loss.

        Arguments:
            - func: A function of the parameters that returns a tuple with the gradient and the loss respectively.
            - x0: Parameters to use as starting point.

        Returns the parameters that minimize the loss.
        """
        if self._uW is None:
            # TODO: should we not refcast here?
            self._uW = numx.zeros_like(x0)
            self.deltaW = numx.ones_like(x0) * self.deltainit

        updated_params = x0.copy()

        for _ in range(self.epochs):
            # TODO: properly name variables
            uW = func(updated_params)[0]

            WW = self._uW * uW

            self.deltaW *= self.etaplus * (WW > 0) + self.etamin * (
                WW < 0) + 1 * (WW == 0)

            self.deltaW = numx.maximum(self.deltaW, self.deltamin)
            self.deltaW = numx.minimum(self.deltaW, self.deltamax)

            updated_params -= self.deltaW * numx.sign(uW)

            self._uW = uW

        return updated_params
Exemple #2
0
    def train(self, func, x0):
        """Optimize parameters to minimze loss.

        Arguments:
            - func: A function of the parameters that returns a tuple with the gradient and the loss respectively.
            - x0: Parameters to use as starting point.

        Returns the parameters that minimize the loss.
        """
        if self._uW is None:
            # TODO: should we not refcast here?
            self._uW = numx.zeros_like(x0)
            self.deltaW = numx.ones_like(x0) * self.deltainit

        updated_params = x0.copy()

        for _ in range(self.epochs):
            # TODO: properly name variables
            uW = func(updated_params)[0]

            WW = self._uW * uW;

            self.deltaW *= self.etaplus * (WW > 0) + self.etamin * (WW < 0) + 1 * (WW == 0);

            self.deltaW = numx.maximum(self.deltaW, self.deltamin)
            self.deltaW = numx.minimum(self.deltaW, self.deltamax)

            updated_params -= self.deltaW * numx.sign(uW)

            self._uW = uW

        return updated_params
Exemple #3
0
 def _label(self, x):
     """  
     :param x: A matrix having different variables on different columns
         and observations on the rows.
     :type x: numpy.ndarray
     :return: An array with class labels from the perceptron.
     :rtype: numpy.ndarray
     """
     # todo: consider iterables
     return numx.sign(numx.dot(x, self.weights) + self.offset_weight)
Exemple #4
0
    def _label(self, words):
        """Classifies the words.
        """
        # clear input dim hack
        self._set_input_dim(None)

        p = self._prob(words)
        p_spam_W = p[-1]
        p_nospam_W = p[1]
        try:
            q = p_spam_W / p_nospam_W
        except ZeroDivisionError:
            return 1
        return -numx.sign(q - 1)
Exemple #5
0
    def _label(self, words):
        """Classifies the words.
        """
        # clear input dim hack
        self._set_input_dim(None)

        p = self._prob(words)
        p_spam_W = p[-1]
        p_nospam_W = p[1]
        try:
            q = p_spam_W / p_nospam_W
        except ZeroDivisionError:
            return 1
        return -numx.sign(q - 1)
Exemple #6
0
    def _label_one(self, pattern, threshold):
        pattern = mdp.utils.bool_to_sign(pattern)

        has_converged = False
        while not has_converged:
            has_converged = True
            iter_order = range(len(self._weight_matrix))
            if self._shuffled_update:
                numx_rand.shuffle(iter_order)
            for row in iter_order:
                w_row = self._weight_matrix[row]

                thresh_row = threshold[row]
                new_pattern_row = numx.sign(numx.dot(w_row, pattern) - thresh_row)

                if new_pattern_row == 0:
                    # Following McKay, Neural Networks, we do nothing
                    # when the new pattern is zero
                    pass
                elif pattern[row] != new_pattern_row:
                    has_converged = False
                    pattern[row] = new_pattern_row
        return mdp.utils.sign_to_bool(pattern)
Exemple #7
0
    def _label_one(self, pattern, threshold):
        pattern = mdp.utils.bool_to_sign(pattern)

        has_converged = False
        while not has_converged:
            has_converged = True
            iter_order = range(len(self._weight_matrix))
            if self._shuffled_update:
                numx_rand.shuffle(iter_order)
            for row in iter_order:
                w_row = self._weight_matrix[row]

                thresh_row = threshold[row]
                new_pattern_row = numx.sign(
                    numx.dot(w_row, pattern) - thresh_row)

                if new_pattern_row == 0:
                    # Following McKay, Neural Networks, we do nothing
                    # when the new pattern is zero
                    pass
                elif pattern[row] != new_pattern_row:
                    has_converged = False
                    pattern[row] = new_pattern_row
        return mdp.utils.sign_to_bool(pattern)
Exemple #8
0
 def _label(self, x):
     """Returns an array with class labels from the perceptron.
     """
     return numx.sign(numx.dot(x, self.weights) + self.offset_weight)
Exemple #9
0
 def _label(self, x):
     ret = [xi.sum() for xi in x]
     return numx.sign(ret)
Exemple #10
0
 def _label(self, x):
     """Returns an array with class labels from the perceptron.
     """
     return numx.sign(numx.dot(x, self.weights) + self.offset_weight)
Exemple #11
0
 def _label(self, x):
     ret = [xi.sum() for xi in x]
     return numx.sign(ret)