def test_add_matvec(): X = np.random.randn(3, 4).astype(np.float32) b1 = np.random.randn(4, 1).astype(np.float32) b2 = np.random.randn(3, 1).astype(np.float32) Y_expected1 = X + b1.T Y_expected2 = X + b2 assert_allclose(Y_expected1, op.add_matvec(X, b1, 1)) assert_allclose(Y_expected2, op.add_matvec(X, b2, 0)) Xd = op.to_gpu(X) b1d = op.to_gpu(b1) b2d = op.to_gpu(b2) assert_allclose(Y_expected1, op.to_cpu(op.add_matvec(Xd, b1d, 1))) assert_allclose(Y_expected2, op.to_cpu(op.add_matvec(Xd, b2d, 0)))
def fprop(self, X, stream=None): ''' Forward propagation. NOTE: If we do dropout, X will get mutated. Usually, X == lowerlayer.A. Thus, we don't need to multiply by a dropout mask in bprop.''' self.X, self.M = self._corrupt_input(X, stream=stream) self.Z = op.dot(self.X, self.W, False, True, stream=stream) self.Z = op.add_matvec(self.Z, self.b, out=self.Z, stream=stream) self.A = self.func(self.Z, stream=stream) return self.A
def _mean_hiddens(self, v): """Computes the probabilities P(h=1|v). Parameters ---------- v : array-like, shape (n_samples, n_features) Values of the visible layer. Returns ------- h : array-like, shape (n_samples, n_components) Corresponding mean field values for the hidden layer. """ p = op.dot(v, self.W, False, True) p = op.add_matvec(p, self.bh, out=p) return op.sigmoid(p, out=p)
def _mean_visibles(self, h): """Computes the probabilities P(v=1|h). Parameters ---------- h : array-like, shape (n_samples, n_components) Values of the hidden layer to sample from. Returns ------- v : array-like, shape (n_samples, n_features) Values of the visible layer. """ p = op.dot(h, self.W, False, False) p = op.add_matvec(p, self.bv, out=p) return p