Exemple #1
0
    def _fit(self, V0):
        Ph0 = np.zeros((self.n_hiddens, ) + self.h_shape)
        H0 = np.zeros((self.n_hiddens, ) + self.h_shape)
        Grad0 = np.zeros((self.n_hiddens, ) + (self.w_size, self.w_size))
        for k in xrange(self.n_hiddens):
            Ph0[k] = logistic_sigmoid(
                convolve(V0, self.weights[k]) + self.h_intercepts[k])
            Grad0[k] = convolve(V0, Ph0[k])
            H0[k][self.rng.uniform(size=self.h_shape) < Ph0[k]] = 1

        h_convolved = self.v_intercept
        for k in xrange(self.n_hiddens):
            h_convolved += convolve(H0[k],
                                    np.flipud(np.fliplr(self.weights[k])))
        V1m = logistic_sigmoid(h_convolved)
        V1 = V0.copy()
        middle_offset = self.w_size - 1
        V1[middle_offset:-middle_offset, middle_offset:-middle_offset] = V1m

        Ph1 = np.zeros((self.n_hiddens, ) + self.h_shape)
        Grad1 = np.zeros((self.n_hiddens, ) + (self.w_size, self.w_size))
        for k in xrange(self.n_hiddens):
            Ph1[k] = logistic_sigmoid(
                convolve(V1, self.weights[k]) + self.h_intercepts[k])
            Grad1[k] = convolve(V1, Ph1[k])
            self.weights += self.lr * (Grad0[k] - Grad1[k])
        return self._net_probability(V0)
Exemple #2
0
 def _fit(self, V0):
     Ph0 = np.zeros((self.n_hiddens,) + self.h_shape)
     H0 = np.zeros((self.n_hiddens,) + self.h_shape)
     Grad0 = np.zeros((self.n_hiddens,) + (self.w_size, self.w_size))
     for k in xrange(self.n_hiddens):          
         Ph0[k] = logistic_sigmoid(convolve(V0, self.weights[k])
                                   + self.h_intercepts[k])
         Grad0[k] = convolve(V0, Ph0[k])
         H0[k][self.rng.uniform(size=self.h_shape) < Ph0[k]] = 1
         
     h_convolved = self.v_intercept
     for k in xrange(self.n_hiddens):
         h_convolved += convolve(H0[k], np.flipud(np.fliplr(self.weights[k])))
     V1m = logistic_sigmoid(h_convolved)
     V1 = V0.copy()
     middle_offset = self.w_size - 1
     V1[middle_offset:-middle_offset, middle_offset:-middle_offset] = V1m
     
     Ph1 = np.zeros((self.n_hiddens,) + self.h_shape)        
     Grad1 = np.zeros((self.n_hiddens,) + (self.w_size, self.w_size))
     for k in xrange(self.n_hiddens):
         Ph1[k] = logistic_sigmoid(convolve(V1, self.weights[k])
                                   + self.h_intercepts[k])
         Grad1[k] = convolve(V1, Ph1[k])
         self.weights += self.lr * (Grad0[k] - Grad1[k])
     return self._net_probability(V0)
Exemple #3
0
    def score_samples(self, v):
        """Compute the pseudo-likelihood of v.

        Parameters
        ----------
        v : {array-like, sparse matrix} shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        pseudo_likelihood : array-like, shape (n_samples,)
            Value of the pseudo-likelihood (proxy to likelihood).
        print update"""
        rng = check_random_state(self.random_state)
        fe = self._free_energy(v)

        if issparse(v):
            v_ = v.toarray()
        else:
            v_ = v.copy()
        i_ = rng.randint(0, v.shape[1], v.shape[0])
        v_[np.arange(v.shape[0]), i_] = 1 - v_[np.arange(v.shape[0]), i_]
        fe_ = self._free_energy(v_)
	#print fe_
	#print fe
        return v.shape[1] * logistic_sigmoid(fe_ - fe, log=True)
Exemple #4
0
 def _mean_visibles_theano(self,h,v):
     """
     Computes the probabilities P(v=1|h).
     
     Parameters
     ----------
     h : array-like, shape (n_samples, n_groups, n_components)
         values of the hidden layer.
     v : The input original Visible Nodes
     Returns
     -------
     v: array-like,shape (n_samples, n_features)        
     """
     activations = np.array([convTheano(h[:,i,:],self.components_[i],border='full') + self.intercept_visible_ for i in range(self.n_groups)]).sum(axis = 0)
     
     visibles = np.array(v)
     windowSize = self.window_size
     visualSize = int(sqrt(v.shape[1]))
     innerSize = visualSize - 2 * windowSize + 2
     n_sample = v.shape[0]
     innerV = logistic_sigmoid(activations)
     innerV = innerV.reshape(n_sample,visualSize, visualSize)[:,windowSize - 1:visualSize - windowSize + 1, windowSize - 1: visualSize - windowSize + 1]
     visibles = visibles.reshape(n_sample,visualSize,visualSize)
     
     visibles[:,windowSize - 1: visualSize - windowSize + 1,windowSize - 1: visualSize - windowSize + 1] = innerV
     visibles = visibles.reshape(n_sample, -1)
     
     return visibles
Exemple #5
0
    def cnnConvolve(self, X, W, b):
        n_images = X.shape[0]
        dim_images = X.shape[1]
        dim_conv = dim_images - self.dim_filter + 1

        convolved_features = np.zeros(
            (n_images, self.n_filters, dim_conv, dim_conv))

        for image_num in range(n_images):
            for filter_num in range(self.n_filters):

                convolved_image = np.zeros((dim_conv, dim_conv))

                filter_ = W[filter_num]
                b_ = b[filter_num]

                filter_ = np.rot90(np.squeeze(filter_), 2)

                im = np.squeeze(X[image_num])

                convolved_image = convolve2d(im, filter_, 'valid')

                convolved_image = logistic_sigmoid(convolved_image + b_)

                convolved_features[image_num, filter_num] = convolved_image

        return convolved_features
Exemple #6
0
    def predict(self, X):
        """Predict using the multi-layer perceptron model

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)

        Returns
        -------
        array, shape (n_samples)
            Predicted target values per element in X.
        """
        X = atleast2d_or_csr(X)
        scores = self.decision_function(X)

        if len(scores.shape) == 1 or self.multi_label is True:
            scores = logistic_sigmoid(scores)
            results = (scores > 0.5).astype(np.int)

            if self.multi_label:
                return self._lbin.inverse_transform(results)

        else:
            scores = _softmax(scores)
            results = scores.argmax(axis=1)

        return self.classes_[results]
    def predict(self, X):
        """Predict using the multi-layer perceptron model

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape (n_samples, n_features)

        Returns
        -------
        array, shape (n_samples)
            Predicted target values per element in X.
        """
        X = atleast2d_or_csr(X)
        scores = self.decision_function(X)

        if len(scores.shape) == 1 or self.multi_label is True:
            scores = logistic_sigmoid(scores)
            results = (scores > 0.5).astype(np.int)

            if self.multi_label:
                return self._lbin.inverse_transform(results)

        else:
            scores = _softmax(scores)
            results = scores.argmax(axis=1)

        return self.classes_[results]
Exemple #8
0
    def _mean_hiddens_theano(self, v):
        """Computes the probabilities P(h=1|v).
        
        Parameters
        ----------
        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        h : array-like, shape (n_samples, n_groups, n_components)
            Corresponding mean field values for the hidden layer.
            
        """
        activationsWithoutIntercept = convTheano(v, self.components_)
        activations = np.array([
            activationsWithoutIntercept[:, i, :] + self.intercept_hidden_[i]
            for i in range(self.n_groups)
        ])
        n_samples = v.shape[0]
        return logistic_sigmoid(
            activations.reshape(n_samples * self.n_groups,
                                self.n_components)).reshape(
                                    n_samples, self.n_groups,
                                    self.n_components)
Exemple #9
0
    def _mean_visibles_theano(self, h, v):
        """
        Computes the probabilities P(v=1|h).
        
        Parameters
        ----------
        h : array-like, shape (n_samples, n_groups, n_components)
            values of the hidden layer.
        v : The input original Visible Nodes
        Returns
        -------
        v: array-like,shape (n_samples, n_features)        
        """
        activations = np.array([
            convTheano(h[:, i, :], self.components_[i], border='full') +
            self.intercept_visible_ for i in range(self.n_groups)
        ]).sum(axis=0)

        visibles = np.array(v)
        windowSize = self.window_size
        visualSize = int(sqrt(v.shape[1]))
        innerSize = visualSize - 2 * windowSize + 2
        n_sample = v.shape[0]
        innerV = logistic_sigmoid(activations)
        innerV = innerV.reshape(
            n_sample, visualSize,
            visualSize)[:, windowSize - 1:visualSize - windowSize + 1,
                        windowSize - 1:visualSize - windowSize + 1]
        visibles = visibles.reshape(n_sample, visualSize, visualSize)

        visibles[:, windowSize - 1:visualSize - windowSize + 1,
                 windowSize - 1:visualSize - windowSize + 1] = innerV
        visibles = visibles.reshape(n_sample, -1)

        return visibles
Exemple #10
0
    def score_samples(self, v):
        """Compute the pseudo-likelihood of v.

        Parameters
        ----------
        v : {array-like, sparse matrix} shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        pseudo_likelihood : array-like, shape (n_samples,)
            Value of the pseudo-likelihood (proxy to likelihood).
        print update"""
        rng = check_random_state(self.random_state)
        fe = self._free_energy(v)

        if issparse(v):
            v_ = v.toarray()
        else:
            v_ = v.copy()
        i_ = rng.randint(0, v.shape[1], v.shape[0])
        v_[np.arange(v.shape[0]), i_] = 1 - v_[np.arange(v.shape[0]), i_]
        fe_ = self._free_energy(v_)
        #print fe_
        #print fe
        return v.shape[1] * logistic_sigmoid(fe_ - fe, log=True)
Exemple #11
0
 def sample(self, rng, act = None, assign = False):
      act = self.value if act is None else act
      h = act + rng.normal(0,1,act.shape)*np.sqrt(logistic_sigmoid(act))
      h[h<0] = 0
      if assign:
          self.value = h
      return h
Exemple #12
0
    def predict_proba(self, X):
        scores = self.decision_function(X)

        if len(scores.shape) == 1:
            scores = logistic_sigmoid(scores)
            return np.vstack([1 - scores, scores]).T
        else:
            return _softmax(scores)
Exemple #13
0
    def predict_proba(self, X):
        scores = self.decision_function(X)

        if len(scores.shape) == 1:
            scores = logistic_sigmoid(scores)
            return np.vstack([1 - scores, scores]).T
        else:
            return _softmax(scores)
Exemple #14
0
    def _get_hidden_activations(self, X):

        A = safe_sparse_dot(X, self.coef_hidden_)

        A += self.intercept_hidden_

        Z = logistic_sigmoid(A)

        return Z
Exemple #15
0
    def _get_hidden_activations(self, X):

        A = safe_sparse_dot(X, self.coef_hidden_)

        A += self.intercept_hidden_

        Z = logistic_sigmoid(A)

        return Z
Exemple #16
0
def test_logistic_sigmoid():
    """Check correctness and robustness of logistic sigmoid implementation"""
    naive_logistic = lambda x: 1 / (1 + np.exp(-x))
    naive_log_logistic = lambda x: np.log(naive_logistic(x))

    x = np.linspace(-2, 2, 50)
    with warnings.catch_warnings(record=True):
        assert_array_almost_equal(logistic_sigmoid(x), naive_logistic(x))
    assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))

    extreme_x = np.array([-100., 100.])
    assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
Exemple #17
0
def test_logistic_sigmoid():
    """Check correctness and robustness of logistic sigmoid implementation"""
    naive_logistic = lambda x: 1 / (1 + np.exp(-x))
    naive_log_logistic = lambda x: np.log(naive_logistic(x))

    x = np.linspace(-2, 2, 50)
    with warnings.catch_warnings(record=True):
        assert_array_almost_equal(logistic_sigmoid(x), naive_logistic(x))
    assert_array_almost_equal(log_logistic(x), naive_log_logistic(x))

    extreme_x = np.array([-100., 100.])
    assert_array_almost_equal(log_logistic(extreme_x), [-100, 0])
def test_logistic_sigmoid():
    """Check correctness and robustness of logistic sigmoid implementation"""
    naive_logsig = lambda x: 1 / (1 + np.exp(-x))
    naive_log_logsig = lambda x: np.log(naive_logsig(x))

    # Simulate the previous Cython implementations of logistic_sigmoid based on
    #http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
    def stable_logsig(x):
        out = np.zeros_like(x)
        positive = x > 0
        negative = x <= 0
        out[positive] = 1. / (1 + np.exp(-x[positive]))
        out[negative] = np.exp(x[negative]) / (1. + np.exp(x[negative]))
        return out

    x = np.linspace(-2, 2, 50)
    assert_array_almost_equal(logistic_sigmoid(x), naive_logsig(x))
    assert_array_almost_equal(logistic_sigmoid(x, log=True),
                              naive_log_logsig(x))
    assert_array_almost_equal(logistic_sigmoid(x), stable_logsig(x),
                              decimal=16)

    extreme_x = np.array([-100, 100], dtype=np.float)
    assert_array_almost_equal(logistic_sigmoid(extreme_x), [0, 1])
    assert_array_almost_equal(logistic_sigmoid(extreme_x, log=True), [-100, 0])
    assert_array_almost_equal(logistic_sigmoid(extreme_x),
                              stable_logsig(extreme_x),
                              decimal=16)
def test_logistic_sigmoid():
    """Check correctness and robustness of logistic sigmoid implementation"""
    naive_logsig = lambda x: 1 / (1 + np.exp(-x))
    naive_log_logsig = lambda x: np.log(naive_logsig(x))

    # Simulate the previous Cython implementations of logistic_sigmoid based on
    #http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression
    def stable_logsig(x):
        out = np.zeros_like(x)
        positive = x > 0
        negative = x <= 0
        out[positive] = 1. / (1 + np.exp(-x[positive]))
        out[negative] = np.exp(x[negative]) / (1. + np.exp(x[negative]))
        return out

    x = np.linspace(-2, 2, 50)
    assert_array_almost_equal(logistic_sigmoid(x), naive_logsig(x))
    assert_array_almost_equal(logistic_sigmoid(x, log=True),
                              naive_log_logsig(x))
    assert_array_almost_equal(logistic_sigmoid(x), stable_logsig(x),
                              decimal=16)

    extreme_x = np.array([-100, 100], dtype=np.float)
    assert_array_almost_equal(logistic_sigmoid(extreme_x), [0, 1])
    assert_array_almost_equal(logistic_sigmoid(extreme_x, log=True), [-100, 0])
    assert_array_almost_equal(logistic_sigmoid(extreme_x),
                              stable_logsig(extreme_x),
                              decimal=16)
Exemple #20
0
    def _mean_hiddens(self, v):
        """Computes the probabilities P(h=1|v), i.e. mean-field values of the hidden layer
        Parameters
        
        v: array of shape (input_size, ) (considered 1 X input_size in dot product below )
        
        Returns:
        
        h: array of shape (hidden_size,)
           Corresponding mean field values for the hidden layer
        """

        return logistic_sigmoid(np.dot(v, self.W) + self.b)
Exemple #21
0
 def _net_probability(self, V):
     """
     Computes pseudo probability of the current network
     """
     v_energy = 0
     for k in xrange(self.n_hiddens):
         v_energy -= (self.hiddens[k] * convolve(V, self.weights[k])).sum()
     h_int_energy = 0
     for k in xrange(self.n_hiddens):
         h_int_energy -= self.h_intercepts[k].sum() * self.hiddens[k].sum()
     v_int_energy = - self.v_intercept.sum() * V.sum()
     energy = v_energy + h_int_energy + v_int_energy
     print(energy)
     return logistic_sigmoid(- energy)
Exemple #22
0
 def _net_probability(self, V):
     """
     Computes pseudo probability of the current network
     """
     v_energy = 0
     for k in xrange(self.n_hiddens):
         v_energy -= (self.hiddens[k] * convolve(V, self.weights[k])).sum()
     h_int_energy = 0
     for k in xrange(self.n_hiddens):
         h_int_energy -= self.h_intercepts[k].sum() * self.hiddens[k].sum()
     v_int_energy = -self.v_intercept.sum() * V.sum()
     energy = v_energy + h_int_energy + v_int_energy
     print(energy)
     return logistic_sigmoid(-energy)
Exemple #23
0
 def _sample_visibles(self, h, rng):
     """Sample from the distribution P(v|h).
     rng:  Random Number generator to be used.
     
     Parameters :
     
     h : array of shape (hidden_size,) (considered 1 X hidden_size in dot product below)
     Returns:
     v : array of shape (input_size,)
         Values of the visible layer
     """
     p = logistic_sigmoid(np.dot(h, self.W.T) + self.c)
     p[rng.uniform(size=p.shape) < p] = 1.
     return np.floor(p, p)
Exemple #24
0
    def _mean_hiddens(self, v):
        """Computes the probabilities P(h=1|v).

        Parameters
        ----------
        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        h : array-like, shape (n_samples, n_components)
            Corresponding mean field values for the hidden layer.
        """
        return logistic_sigmoid(safe_sparse_dot(v, self.components_.T)
                                + self.intercept_hidden_)
Exemple #25
0
    def _mean_hiddens(self, v):
        """Computes the probabilities P(h=1|v).

        Parameters
        ----------
        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        h : array-like, shape (n_samples, n_components)
            Corresponding mean field values for the hidden layer.
        """
        return logistic_sigmoid(
            safe_sparse_dot(v, self.components_.T) + self.intercept_hidden_)
Exemple #26
0
 def _mean_visibles(self, h):
     """
     Computes the probabilities P(v=1|h).
     
     Parameters
     ----------
     h : array-like, shape (n_samples, n_groups, n_components)
         values of the hidden layer.
     
     Returns
     -------
     v: array-like,shape (n_samples, n_features)
     """
     n_samples = h.shape[0]
     activations = np.array([convExpendGroup(h[i],self.components_) + self.intercept_visible_ for i in range(n_samples)])
     return logistic_sigmoid(activations) 
Exemple #27
0
    def _mean_hiddens(self, v,k):
        """Computes the probabilities P(h=1|v).

        Parameters
        ----------
        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        h : array-like, shape (n_samples, n_components)
            Corresponding mean field values for the hidden layer.
        """
        n_samples = v.shape[0]
        activations = np.array([conv(v[i,:],self.components_[k]) + self.intercept_hidden_[k] for i in range(n_samples)])
        return logistic_sigmoid(activations)
Exemple #28
0
 def score_samples(self,v):
     """Computer the pseudo-likelihood of v
     
     returns
     --------
     pseudo_likelihood : array-like shape (n_samples,)
     """
     v = np.hstack(v)
     fe = self.free_energy(v)
     
     v_ = v.copy()
     rng = check_random_state(self.random_state)
     i_ = rng.randint(0,v.shape[1],v.shape[0])
     v_[np.arange(v.shape[0]),i_] = 1 - v_[np.arange(v.shape[0]), i_]
     fe_ = self.free_energy(v_)
     
     return v.shape[1] * logistic_sigmoid(fe_ - fe, log=True)
Exemple #29
0
    def _mean_hiddens_theano(self,v):
        """Computes the probabilities P(h=1|v).
        
        Parameters
        ----------
        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        h : array-like, shape (n_samples, n_groups, n_components)
            Corresponding mean field values for the hidden layer.
            
        """
        activationsWithoutIntercept = convTheano(v,self.components_)
        activations = np.array([activationsWithoutIntercept[:,i,:] + self.intercept_hidden_[i] for i in range(self.n_groups)])
        n_samples = v.shape[0]
        return logistic_sigmoid(activations.reshape(n_samples * self.n_groups, self.n_components)).reshape(n_samples,self.n_groups,self.n_components)
Exemple #30
0
    def _mean_hiddens(self, v, k):
        """Computes the probabilities P(h=1|v).

        Parameters
        ----------
        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.

        Returns
        -------
        h : array-like, shape (n_samples, n_components)
            Corresponding mean field values for the hidden layer.
        """
        n_samples = v.shape[0]
        activations = np.array([
            conv(v[i, :], self.components_[k]) + self.intercept_hidden_[k]
            for i in range(n_samples)
        ])
        return logistic_sigmoid(activations)
Exemple #31
0
    def predict_proba(self, X):
        """Probability estimates.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]

        Returns
        -------
        array, shape = [n_samples, n_outputs]
            Returns the probability of the sample for each class in the model,
            where classes are ordered as they are in `self.classes_`.
        """
        scores = super(DBNClassifier, self).decision_function(X)
        if len(scores.shape) == 1:
            scores = logistic_sigmoid(scores)
            return np.vstack([1 - scores, scores]).T
        else:
            return _softmax(scores)
Exemple #32
0
    def predict_proba(self, X):
        """Probability estimates.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]

        Returns
        -------
        array, shape = [n_samples, n_outputs]
            Returns the probability of the sample for each class in the model,
            where classes are ordered as they are in `self.classes_`.
        """
        scores = super(DBNClassifier, self).decision_function(X)
        if len(scores.shape) == 1:
            scores = logistic_sigmoid(scores)
            return np.vstack([1 - scores, scores]).T
        else:
            return _softmax(scores)
Exemple #33
0
 def _mean_visibles(self, h):
     """
     Computes the probabilities P(v=1|h).
     
     Parameters
     ----------
     h : array-like, shape (n_samples, n_groups, n_components)
         values of the hidden layer.
     
     Returns
     -------
     v: array-like,shape (n_samples, n_features)
     """
     n_samples = h.shape[0]
     activations = np.array([
         convExpendGroup(h[i], self.components_) + self.intercept_visible_
         for i in range(n_samples)
     ])
     return logistic_sigmoid(activations)
Exemple #34
0
    def _sample_visibles(self, h, rng):
        """Sample from the distribution P(v|h).

        Parameters
        ----------
        h : array-like, shape (n_samples, n_components)
            Values of the hidden layer to sample from.

        rng : RandomState
            Random number generator to use.

        Returns
        -------
        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.
        """
        p = logistic_sigmoid(np.dot(h, self.components_)
                             + self.intercept_visible_)
        p[rng.uniform(size=p.shape) < p] = 1.
        return np.floor(p, p)
Exemple #35
0
    def _sample_visibles(self, h, rng):
        """Sample from the distribution P(v|h).

        Parameters
        ----------
        h : array-like, shape (n_samples, n_components)
            Values of the hidden layer to sample from.

        rng : RandomState
            Random number generator to use.

        Returns
        -------
        v : array-like, shape (n_samples, n_features)
            Values of the visible layer.
        """
        p = logistic_sigmoid(
            np.dot(h, self.components_) + self.intercept_visible_)
        p[rng.uniform(size=p.shape) < p] = 1.
        return np.floor(p, p)
Exemple #36
0
    def predict(self, X):
        """Predict using the multi-layer perceptron model

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]

        Returns
        -------
        array, shape = [n_samples]
           Predicted target values per element in X.
        """
        X = atleast2d_or_csr(X)
        scores = super(DBNClassifier, self).decision_function(X)
        if len(scores.shape) == 1:
            scores = logistic_sigmoid(scores)
            indices = (scores > 0.5).astype(np.int)
        else:
            scores = _softmax(scores)
            indices = scores.argmax(axis=1)
        return self._lbin.classes_[indices]
Exemple #37
0
    def predict(self, X):
        """Predict using the multi-layer perceptron model

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = [n_samples, n_features]

        Returns
        -------
        array, shape = [n_samples]
           Predicted target values per element in X.
        """
        X = atleast2d_or_csr(X)
        scores = super(DBNClassifier, self).decision_function(X)
        if len(scores.shape) == 1:
            scores = logistic_sigmoid(scores)
            indices = (scores > 0.5).astype(np.int)
        else:
            scores = _softmax(scores)
            indices = scores.argmax(axis=1)
        return self._lbin.classes_[indices]
Exemple #38
0
 def _mean_visible(self, h):
     v = np.zeros(self.v_shape)
     for k in xrange(self.n_hiddens):
         v += conv2(h[k], self.W[k], 'full')
     return logistic_sigmoid(v + self.b)
 def _mean_visible(self, h):
     v = np.zeros(self.v_shape)
     for k in xrange(self.n_hiddens):
         v += conv2(h[k], self.W[k], 'full')
     return logistic_sigmoid(v + self.b)