예제 #1
0
 def test_sigmoid_numpy_array(self):
     data = np.array([1, .85])
     result = activations.sigmoid(data)
     np.testing.assert_array_almost_equal(
         result,
         np.array([0.7310585786300049, 0.7005671424739729]),
         decimal=14)
예제 #2
0
    def predict(self, features):
        """
        Predict the class labels for the provided data

        Parameters
        ----------
        features : array-like, shape (n_samples, n_features)

        Returns
        -------
        target : array of shape [n_samples]
            Class labels for each data sample.
        """

        if self.fit_intercept:
            intercept = np.ones((features.shape[0], 1))
            features = np.hstack((intercept, features))

        final_scores = np.dot(features, self.weights)
        predictions = np.round(sigmoid(final_scores))

        return predictions
예제 #3
0
    def fit(self, features, target):
        """
        Fit model to training data

        Parameters
        ----------
        features : array-like, shape (n_samples, n_features)
            features matrix

        target : array-like, shape [n_samples]
            target array
        """

        if not type(target) == np.dtype:
            try:
                # try to convert to ndarray
                target = target.values.ravel()
            except Exception:
                print("Error - Couldn't convert to ndarray")
                raise

        if self.fit_intercept:
            intercept = np.ones((features.shape[0], 1))
            features = np.hstack((intercept, features))

        # Initialize weights
        self.weights = np.zeros(features.shape[1])
        if self.optimization == 'adam':
            # Constants for Adam Optimization (values are paper recommended values)
            beta1 = 0.9
            beta2 = 0.999
            eps = 1E-8
            # first-moment vector Adam Optimization for W1
            m = np.zeros_like(self.weights)
            # second-moment vector Adam Optimization for W1
            v = np.zeros_like(self.weights)
        elif self.optimization == 'radam':
            # Constants for Rectified Adam Optimization (values are paper recommended values)
            beta1 = 0.9
            beta2 = 0.999
            eps = 1E-8
            # first-moment vector Rectified Adam Optimization for W1
            m = np.zeros_like(self.weights)
            # second-moment vector Recified Adam Optimization for W1
            v = np.zeros_like(self.weights)
            # Maximum length of approximated SMA
            p_inf = 2 / ((1 - beta2) - 1)

        # Print settings if verbose
        if self.verbose > 0:
            print(f'Optimization: {self.optimization}')

        # Initialize Previous loss to a arbitrary high number to check for stopping
        previous_loss = 100000
        stop = False
        iteration = 0

        while not stop:
            iteration += 1

            scores = np.dot(features, self.weights)
            predictions = sigmoid(scores)

            # Compute gradient
            # The gradient for Logistic Regression is the derivative of log loss
            # (Negative Log-Likelihood)
            # Note: Calculating gradient for Binary and Multi-Class is different
            gradient = -np.dot(features.T, target - predictions)

            # Update Weights
            if self.optimization == 'adam':
                # Update weights by using Adam Optimization
                # (as opposed to simply learning_rate * gradient)
                # https://arxiv.org/pdf/1412.6980.pdf
                # http://cs231n.github.io/neural-networks-3/
                # (See Section: Per-parameter adaptive learning rate methods)
                m = beta1 * m + (1 - beta1) * gradient
                mt = m / (1 - beta1**iteration)
                v = beta2 * v + (1 - beta2) * (gradient**2)
                vt = v / (1 - beta2**iteration)
                self.weights += -self.learning_rate * mt / (np.sqrt(vt) + eps)
            elif self.optimization == 'radam':
                # Update exponetial 1st and 2nd moment
                m = beta1 * m + (1 - beta1) * gradient
                v = beta2 * v + (1 - beta2) * (gradient**2)
                # Compute bias-corrected moving average
                mt = m / (1 - beta1**iteration)
                # Compute length of the approximated SMA
                pt = p_inf - ((2 * iteration * (beta2**iteration)) /
                              (1 - (beta2**iteration)))
                # if variance is tractable, updated with adapative momentum, else unadapated
                if pt > 4:
                    # Compute bias-corrected moving 2nd moment
                    vt = v / (1 - beta2**iteration)
                    # Compute variance rectification term
                    rt = np.sqrt(((pt - 4)(pt - 2)(p_inf)) /
                                 ((p_inf - 4)(p_inf - 2)(pt)))
                    # Update weights with adaptive momentum
                    self.weights += -self.learning_rate * rt * mt / vt
                else:
                    # Update weights with un-adapted momentum
                    self.weights += -self.learning_rate * mt
            else:
                self.weights -= self.learning_rate * gradient

            # Check to see if stopping criterion is reached
            loss = log_loss(features, target, self.weights)
            if loss > previous_loss - self.tol:
                stop = True
            else:
                previous_loss = loss

            # Verbose Output
            if self.verbose > 0 and iteration % self.output_freq == 0:
                print("Iteration: {0}, Log Loss: {1}".format(iteration, loss))
                if stop:
                    print("Stopping Criterion Reached.")

        return self
예제 #4
0
 def test_sigmoid_int(self):
     data = 1
     result = activations.sigmoid(data)
     np.testing.assert_almost_equal(result, 0.7310585786300049, decimal=14)
예제 #5
0
 def test_sigmoid_python_array(self):
     data = [1, .85]
     result = activations.sigmoid(data)
     np.testing.assert_array_almost_equal(
         result, [0.7310585786300049, 0.7005671424739729], decimal=14)
예제 #6
0
 def test_sigmoid_float(self):
     data = .85
     result = activations.sigmoid(data)
     np.testing.assert_almost_equal(result, 0.7005671424739729, decimal=14)