예제 #1
0
파일: sbn.py 프로젝트: yburda/reweighted-ws
    def sample(self, Y):
        """ Given samples from the upper layer Y, sample values from X
            and return then together with their log probability.

        Parameters
        ----------
        Y:      T.tensor
            samples from the upper layer

        Returns
        -------
        X:      T.tensor
            samples from the lower layer
        log_p:  T.tensor
            log-posterior for the samples returned in X
        """
        n_X, = self.get_hyper_params(['n_X'])
        W, b = self.get_model_params(['W', 'b'])

        n_samples = Y.shape[0]

        # sample X given Y
        prob_X = self.sigmoid(T.dot(Y, W) + b)
        U = theano_rng.uniform((n_samples, n_X), nstreams=512)
        X = T.cast(U <= prob_X, dtype=floatX)

        log_prob = X*T.log(prob_X) + (1-X)*T.log(1-prob_X)
        log_prob = log_prob.sum(axis=1)

        return X, log_prob
예제 #2
0
    def sample(self, Y):
        """ Evaluate the log-probability for the given samples.

        Parameters
        ----------
        Y:      T.tensor
            samples from the upper layer

        Returns
        -------
        X:      T.tensor
            samples from the lower layer        
        log_p:  T.tensor
            log-probabilities for the samples in X and Y
        """
        n_X, n_Y, n_hid = self.get_hyper_params(['n_X', 'n_Y', 'n_hid'])
        b, c, W, V, Ub, Uc = self.get_model_params(
            ['b', 'c', 'W', 'V', 'Ub', 'Uc'])

        batch_size = Y.shape[0]
        cond = Y

        #------------------------------------------------------------------
        b_cond = b + T.dot(cond, Ub)  # shape (batch, n_vis)
        c_cond = c + T.dot(cond, Uc)  # shape (batch, n_hid)

        a_init = c_cond
        post_init = T.zeros([batch_size], dtype=floatX)
        vis_init = T.zeros([batch_size], dtype=floatX)
        rand = theano_rng.uniform((n_X, batch_size), nstreams=512)

        def one_iter(Wi, Vi, bi, rand_i, a, vis_i, post):
            hid = self.sigmoid(a)
            pi = self.sigmoid(T.dot(hid, Vi) + bi)
            vis_i = T.cast(rand_i <= pi, floatX)
            post = post + T.log(pi * vis_i + (1 - pi) * (1 - vis_i))
            a = a + T.outer(vis_i, Wi)
            return a, vis_i, post

        [a, vis, post
         ], updates = unrolled_scan(fn=one_iter,
                                    sequences=[W, V.T, b_cond.T, rand],
                                    outputs_info=[a_init, vis_init, post_init],
                                    unroll=self.unroll_scan)
        assert len(updates) == 0
        return vis.T, post[-1, :]
예제 #3
0
    def sample(self, Y):
        """ Evaluate the log-probability for the given samples.

        Parameters
        ----------
        Y:      T.tensor
            samples from the upper layer

        Returns
        -------
        X:      T.tensor
            samples from the lower layer        
        log_p:  T.tensor
            log-probabilities for the samples in X and Y
        """
        n_X, n_Y, n_hid = self.get_hyper_params(['n_X', 'n_Y', 'n_hid'])
        b, c, W, V, Ub, Uc = self.get_model_params(['b', 'c', 'W', 'V', 'Ub', 'Uc'])

        batch_size = Y.shape[0]
        cond = Y

        #------------------------------------------------------------------
        b_cond = b + T.dot(cond, Ub)    # shape (batch, n_vis)
        c_cond = c + T.dot(cond, Uc)    # shape (batch, n_hid)
    
        a_init    = c_cond
        post_init = T.zeros([batch_size], dtype=floatX)
        vis_init  = T.zeros([batch_size], dtype=floatX)
        rand      = theano_rng.uniform((n_X, batch_size), nstreams=512)

        def one_iter(Wi, Vi, bi, rand_i, a, vis_i, post):
            hid  = self.sigmoid(a)
            pi   = self.sigmoid(T.dot(hid, Vi) + bi)
            vis_i = T.cast(rand_i <= pi, floatX)
            post  = post + T.log(pi*vis_i + (1-pi)*(1-vis_i))
            a     = a + T.outer(vis_i, Wi)
            return a, vis_i, post

        [a, vis, post], updates = unrolled_scan(
                    fn=one_iter,
                    sequences=[W, V.T, b_cond.T, rand], 
                    outputs_info=[a_init, vis_init, post_init],
                    unroll=self.unroll_scan
                )
        assert len(updates) == 0
        return vis.T, post[-1,:]
예제 #4
0
    def sample(self, Y):
        """ Evaluate the log-probability for the given samples.

        Parameters
        ----------
        Y:      T.tensor
            samples from the upper layer

        Returns
        -------
        X:      T.tensor
            samples from the lower layer       
        log_p:  T.tensor
            log-probabilities for the samples in X and Y
        """
        n_X, n_Y = self.get_hyper_params(['n_X', 'n_Y'])
        b, W, U = self.get_model_params(['b', 'W', 'U'])

        batch_size = Y.shape[0]

        #------------------------------------------------------------------

        a_init    = T.dot(Y, U) + T.shape_padleft(b)   # shape (batch, n_vis)
        post_init = T.zeros([batch_size], dtype=floatX)
        x_init    = T.zeros([batch_size], dtype=floatX)
        rand      = theano_rng.uniform((n_X, batch_size), nstreams=512)

        def one_iter(i, Wi, rand_i, a, X, post):
            pi   = self.sigmoid(a[:,i])
            xi   = T.cast(rand_i <= pi, floatX)
            post = post + T.log(pi*xi + (1-pi)*(1-xi))            
            a    = a + T.outer(xi, Wi) 
            return a, xi, post

        [a, X, post], updates = unrolled_scan(
                    fn=one_iter,
                    sequences=[T.arange(n_X), W, rand],
                    outputs_info=[a_init, x_init, post_init],
                    unroll=self.unroll_scan
                )
        assert len(updates) == 0
        return X.T, post[-1,:]
예제 #5
0
    def sample(self, n_samples):
        """ Sample from this toplevel module and return X ~ P(X), log(P(X))

        Parameters
        ----------
        n_samples:
            number of samples to drawn

        Returns
        -------
        X:      T.tensor
            samples from this module
        log_p:  T.tensor
            log-probabilities for the samples returned in X
        """
        n_X, n_hid = self.get_hyper_params(['n_X', 'n_hid'])
        b, c, W, V = self.get_model_params(['b', 'c', 'W', 'V'])

        #------------------------------------------------------------------
    
        a_init    = T.zeros([n_samples, n_hid]) + T.shape_padleft(c)
        post_init = T.zeros([n_samples], dtype=floatX)
        vis_init  = T.zeros([n_samples], dtype=floatX)
        rand      = theano_rng.uniform((n_X, n_samples), nstreams=512)

        def one_iter(Wi, Vi, bi, rand_i, a, vis_i, post):
            hid  = self.sigmoid(a)
            pi   = self.sigmoid(T.dot(hid, Vi) + bi)
            vis_i = T.cast(rand_i <= pi, floatX)
            post  = post + T.log(pi*vis_i + (1-pi)*(1-vis_i))
            a     = a + T.outer(vis_i, Wi)
            return a, vis_i, post

        [a, vis, post], updates = unrolled_scan(
                    fn=one_iter,
                    sequences=[W, V.T, b, rand], 
                    outputs_info=[a_init, vis_init, post_init],
                    unroll=self.unroll_scan
                )
        assert len(updates) == 0
        return vis.T, post[-1,:]
예제 #6
0
    def sample(self, Y):
        """ Evaluate the log-probability for the given samples.

        Parameters
        ----------
        Y:      T.tensor
            samples from the upper layer

        Returns
        -------
        X:      T.tensor
            samples from the lower layer       
        log_p:  T.tensor
            log-probabilities for the samples in X and Y
        """
        n_X, n_Y = self.get_hyper_params(['n_X', 'n_Y'])
        b, W, U = self.get_model_params(['b', 'W', 'U'])

        batch_size = Y.shape[0]

        #------------------------------------------------------------------

        a_init = T.dot(Y, U) + T.shape_padleft(b)  # shape (batch, n_vis)
        post_init = T.zeros([batch_size], dtype=floatX)
        x_init = T.zeros([batch_size], dtype=floatX)
        rand = theano_rng.uniform((n_X, batch_size), nstreams=512)

        def one_iter(i, Wi, rand_i, a, X, post):
            pi = self.sigmoid(a[:, i])
            xi = T.cast(rand_i <= pi, floatX)
            post = post + T.log(pi * xi + (1 - pi) * (1 - xi))
            a = a + T.outer(xi, Wi)
            return a, xi, post

        [a, X, post
         ], updates = unrolled_scan(fn=one_iter,
                                    sequences=[T.arange(n_X), W, rand],
                                    outputs_info=[a_init, x_init, post_init],
                                    unroll=self.unroll_scan)
        assert len(updates) == 0
        return X.T, post[-1, :]
예제 #7
0
    def sample(self, n_samples):
        """ Sample from this toplevel module and return X ~ P(X), log(P(X))

        Parameters
        ----------
        n_samples:
            number of samples to drawn

        Returns
        -------
        X:      T.tensor
            samples from this module
        log_p:  T.tensor
            log-probabilities for the samples returned in X
        """
        n_X, n_hid = self.get_hyper_params(['n_X', 'n_hid'])
        b, c, W, V = self.get_model_params(['b', 'c', 'W', 'V'])

        #------------------------------------------------------------------

        a_init = T.zeros([n_samples, n_hid]) + T.shape_padleft(c)
        post_init = T.zeros([n_samples], dtype=floatX)
        vis_init = T.zeros([n_samples], dtype=floatX)
        rand = theano_rng.uniform((n_X, n_samples), nstreams=512)

        def one_iter(Wi, Vi, bi, rand_i, a, vis_i, post):
            hid = self.sigmoid(a)
            pi = self.sigmoid(T.dot(hid, Vi) + bi)
            vis_i = T.cast(rand_i <= pi, floatX)
            post = post + T.log(pi * vis_i + (1 - pi) * (1 - vis_i))
            a = a + T.outer(vis_i, Wi)
            return a, vis_i, post

        [a, vis, post
         ], updates = unrolled_scan(fn=one_iter,
                                    sequences=[W, V.T, b, rand],
                                    outputs_info=[a_init, vis_init, post_init],
                                    unroll=self.unroll_scan)
        assert len(updates) == 0
        return vis.T, post[-1, :]
예제 #8
0
    def sample(self, n_samples):
        """ Sample from this toplevel module and return X ~ P(X), log(P(X))

        Parameters
        ----------
        n_samples:
            number of samples to drawn

        Returns
        -------
        X:      T.tensor
            samples from this module
        log_p:  T.tensor
            log-probabilities for the samples returned in X
        """
        n_X, = self.get_hyper_params(['n_X'])
        b, W = self.get_model_params(['b', 'W'])

        #------------------------------------------------------------------

        a_init    = T.zeros([n_samples, n_X]) + T.shape_padleft(b)
        post_init = T.zeros([n_samples], dtype=floatX)
        x_init    = T.zeros([n_samples], dtype=floatX)
        rand      = theano_rng.uniform((n_X, n_samples), nstreams=512)

        def one_iter(i, Wi, rand_i, a, X, post):
            pi   = self.sigmoid(a[:,i])
            xi   = T.cast(rand_i <= pi, floatX)
            post = post + T.log(pi*xi + (1-pi)*(1-xi))            
            a    = a + T.outer(xi, Wi) 
            return a, xi, post

        [a, X, post], updates = unrolled_scan(
                    fn=one_iter,
                    sequences=[T.arange(n_X), W, rand],
                    outputs_info=[a_init, x_init, post_init],
                    unroll=self.unroll_scan
                )
        assert len(updates) == 0
        return X.T, post[-1,:]
예제 #9
0
    def sample(self, n_samples):
        """ Sample from this toplevel module and return X ~ P(X), log(P(X))

        Parameters
        ----------
        n_samples:
            number of samples to drawn

        Returns
        -------
        X:      T.tensor
            samples from this module
        log_p:  T.tensor
            log-probabilities for the samples returned in X
        """
        n_X, = self.get_hyper_params(['n_X'])
        b, W = self.get_model_params(['b', 'W'])

        #------------------------------------------------------------------

        a_init = T.zeros([n_samples, n_X]) + T.shape_padleft(b)
        post_init = T.zeros([n_samples], dtype=floatX)
        x_init = T.zeros([n_samples], dtype=floatX)
        rand = theano_rng.uniform((n_X, n_samples), nstreams=512)

        def one_iter(i, Wi, rand_i, a, X, post):
            pi = self.sigmoid(a[:, i])
            xi = T.cast(rand_i <= pi, floatX)
            post = post + T.log(pi * xi + (1 - pi) * (1 - xi))
            a = a + T.outer(xi, Wi)
            return a, xi, post

        [a, X, post
         ], updates = unrolled_scan(fn=one_iter,
                                    sequences=[T.arange(n_X), W, rand],
                                    outputs_info=[a_init, x_init, post_init],
                                    unroll=self.unroll_scan)
        assert len(updates) == 0
        return X.T, post[-1, :]
예제 #10
0
파일: sbn.py 프로젝트: yburda/reweighted-ws
    def sample(self, n_samples):
        """ Sample from this toplevel module and return X ~ P(X), log(P(X))

        Parameters
        ----------
        n_samples:
            number of samples to drawn

        Returns
        -------
        X:      T.tensor
            samples from this module
        log_p:  T.tensor
            log-probabilities for the samples returned in X
        """
        n_X, = self.get_hyper_params(['n_X'])
        a, = self.get_model_params(['a'])

        # sample hiddens
        prob_X = self.sigmoid(a)
        U = theano_rng.uniform((n_samples, n_X), nstreams=512)        
        X = T.cast(U <= prob_X, dtype=floatX)

        return X, self.log_prob(X)