예제 #1
0
def inplace_logistic(X):
    """Compute the logistic function inplace.

    Parameters
    ----------
    X : {array-like, sparse matrix}, shape (n_samples, n_features)
        The input data.
    """
    logistic_sigmoid(X, out=X)
    def predict(self, X):
        """Predict using the multi-layer perceptron model

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)

        Returns
        -------
        array, shape (n_samples)
            Predicted target values per element in X.
        """
        X = atleast2d_or_csr(X)
        scores = self.decision_function(X)

        if len(scores.shape) == 1 or self.multi_label is True:
            scores = logistic_sigmoid(scores)
            results = (scores > 0.5).astype(np.int)

            if self.multi_label:
                return self._lbin.inverse_transform(results)

        else:
            scores = _softmax(scores)
            results = scores.argmax(axis=1)

        return self.classes_[results]
예제 #3
0
def sigmoid(x):
    '''激活函数,用于隐藏层输出
    '''
    #exp_x = np.exp(-x)
    #sigmoid_x = 1 / (1 + exp_x)
    sigmoid_x = logistic_sigmoid(x)
    return sigmoid_x
def unigram_idf_mean_difference(entry):
    entry['unigram_idf_mean_difference_feature'] = logistic_sigmoid(
        abs(
            geometric_mean_of_unigram_idfs(entry['question1_document']) -
            geometric_mean_of_unigram_idfs(entry['question2_document'])
        )
    )
    return entry
예제 #5
0
def test_logistic() -> None:
    print('\ttest_logistic():')
    X = np.concatenate((np.full((1, ), -np.inf), np.arange(-5, 5),
                        np.full((1, ), np.inf))).reshape(1, -1)
    X_true = logistic_sigmoid(X)
    ACTIVATIONS["logistic"](X)
    np.testing.assert_array_equal(X, X_true)
    X_true = np.negative(np.log(1 - X))
    ACTIVATIONS_INVERSE["logistic"](X)
    np.testing.assert_array_equal(X, X_true)
예제 #6
0
def logistic(X):
    """Compute the logistic function inplace.
    Parameters
    ----------
    X : {array-like, sparse matrix}, shape (n_samples, n_features)
        The input data.
    Returns
    -------
    X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
        The transformed data.
    """
    return logistic_sigmoid(X, out=X)
예제 #7
0
def logistic(X):
    """Compute the logistic function inplace.

    Parameters
    ----------
    X : {array-like, sparse matrix}, shape (n_samples, n_features)
        The input data.

    Returns
    -------
    X_new : {array-like, sparse matrix}, shape (n_samples, n_features)
        The transformed data.
    """
    return logistic_sigmoid(X, out=X)
    def predict_proba(self, X):
        """Probability estimates.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)

        Returns
        -------
        array, shape (n_samples, n_outputs)
            Returns the probability of the sample for each class in the model,
            where classes are ordered as they are in `self.classes_`.
        """
        scores = self.decision_function(X)

        if len(scores.shape) == 1:
            scores = logistic_sigmoid(scores)
            return np.vstack([1 - scores, scores]).T
        else:
            return _softmax(scores)
예제 #9
0
    def forward_propagate(self, word):
        activations = []

        input_to_layer = word

        for layer in range(len(self.layers)-2):
            input_to_layer = np.dot(input_to_layer,self.network.coefs_[layer])
            input_to_layer += self.network.intercepts_[layer]
            input_to_layer = np.maximum(input_to_layer, 0)
            activations.append(input_to_layer)

        input_to_layer = np.dot(input_to_layer, self.network.coefs_[-1])
        input_to_layer += self.network.intercepts_[-1]
        input_to_layer = logistic_sigmoid(input_to_layer)
        activations.append(input_to_layer)

        if self.verbose: print("Activations: {}".format(activations))
        word = word.reshape(1,-1)
        if self.verbose: print("Proba from net: {}".format(self.network.predict_proba(word)))
        return activations
예제 #10
0
 def _run(self, x):  # pylint: disable=W0221
     y = logistic_sigmoid(x)
     return (y, )
예제 #11
0
파일: utils.py 프로젝트: ZikangZhou/mlp
def logistic(X):
    return logistic_sigmoid(X, out=X)
        lr=0.5,
        train_split=None,
        max_epochs=100)
    lr_clf.initialize()

    print("Weights:")
    print(lr_clf.module_.linear_transform_layer.weight)
    print("Bias:")
    print(lr_clf.module_.linear_transform_layer.bias)

    print("Random Data!")
    # Generate random data
    x_NF = np.random.randn(N, F)

    true_w_F = np.arange(F) + 1
    true_y_proba_N = logistic_sigmoid(np.dot(x_NF, true_w_F))
    
    true_y_N = np.asarray(
        true_y_proba_N >= np.random.rand(N),
        dtype=np.float64)



    clf = sklearn.linear_model.LogisticRegression(
        C=0.5/lr_clf.l2_penalty_weights,
        solver='lbfgs')
    clf.fit(x_NF, true_y_N)

    lr_clf.fit(x_NF, true_y_N)

    print("EST BY SKORCH w:")