コード例 #1
0
    def sample_hidden_from_visible(self,
                                   visible,
                                   pos='first',
                                   status='train',
                                   hidden=None):
        """ Sample the hidden units from the visible units.
        This is the Positive phase of the Contrastive Divergence algorithm.

        :param visible: activations of the visible units

        :return: tuple(hidden probabilities, hidden binary states)
        """
        if status == 'train':
            if pos == 'first':
                transform_activation = 2 * (tf.matmul(visible, self.W_1) +
                                            self.bh_1)
                hprobs = tf.nn.sigmoid(transform_activation)
                hstates = utils.sample_prob(hprobs, self.hrand)
            else:
                transform_activation = (tf.matmul(visible, self.W_2) +
                                        self.bh_2)
                hprobs = tf.nn.sigmoid(transform_activation)
                hstates = utils.sample_prob(hprobs, self.hrand2)

        else:

            transform_activation = (tf.matmul(visible, self.W_1) +
                                    self.bh_1) + tf.matmul(
                                        hidden, self.W_2) + self.bv_2
            hprobs = tf.nn.sigmoid(transform_activation)
            hstates = utils.sample_prob(hprobs, self.hrand1)

        return hprobs, hstates
コード例 #2
0
    def sample_visible_from_hidden(self, hidden, pos='first', status='train'):
        """ Sample the visible units from the hidden units.
        This is the Negative phase of the Contrastive Divergence algorithm.

        :param hidden: activations of the hidden units

        :return: visible probabilities
        """

        #       visible_activation = tf.matmul(hidden, tf.transpose(self.W)) + self.bv_
        '''
        if self.visible_unit_type == 'bin':
            vprobs = tf.nn.sigmoid(visible_activation)

        elif self.visible_unit_type == 'gauss':
            vprobs = tf.truncated_normal((1, self.num_visible), mean=visible_activation, stddev=self.stddev)

        else:
            vprobs = None
        '''
        if status == 'train':
            if pos == 'first':
                visible_activation = tf.matmul(hidden, tf.transpose(
                    self.W_1)) + self.bv_1
                vprobs = tf.truncated_normal((1, self.num_visible),
                                             mean=visible_activation,
                                             stddev=self.stddev)
            else:
                visible_activation = 2 * (
                    tf.matmul(hidden, tf.transpose(self.W_2)) + self.bv_2)
                vprobs = tf.nn.sigmoid(visible_activation)
                vprobs = utils.sample_prob(vprobs, self.hrand1)
        else:
            visible_activation = tf.matmul(hidden, tf.transpose(
                self.W_1)) + self.bv_1
            vprobs = tf.truncated_normal((1, self.num_visible),
                                         mean=visible_activation,
                                         stddev=self.stddev)
            hidden_activation = (tf.matmul(hidden, tf.transpose(self.W_2)) +
                                 self.bv_2)
            h2probs = tf.nn.sigmoid(hidden_activation)
            h2states = utils.sample_prob(h2probs, self.hrand2)

            return vprobs, h2states

        return vprobs
コード例 #3
0
    def sample_hidden_from_visible(self, visible):
        """ Sample the hidden units from the visible units.
        This is the Positive phase of the Contrastive Divergence algorithm.
        :param visible: activations of the visible units
        :return: tuple(hidden probabilities, hidden binary states)
        """

        hprobs = tf.nn.sigmoid(tf.matmul(visible, self.W) + self.bh_)
        hstates = utils.sample_prob(hprobs, self.hrand)

        return hprobs, hstates
コード例 #4
0
    def _create_graph(self):

        # Symbolic variables
        self.x = tf.placeholder('float', [None, self.nvis], name='x-input')

        self.hrand = tf.placeholder('float', [None, self.nhid], name='hrand')
        self.vrand = tf.placeholder('float', [None, self.nvis],
                                    name='vrand-train')

        # Biases
        self.bh_ = tf.Variable(tf.zeros([self.nhid]), name='hidden-bias')
        self.bv_ = tf.Variable(tf.zeros([self.nvis]), name='visible-bias')

        self.W = tf.Variable(tf.random_normal((self.nvis, self.nhid),
                                              mean=0.0,
                                              stddev=0.01),
                             name='weights')

        nn_input = self.x

        # Initialization
        hprobs0 = None
        hprobs = None
        positive = None
        vprobs = None
        hprobs1 = None
        hstates1 = None

        for step in range(self.gibbs_k):

            # Positive Contrastive Divergence phase
            hprobs = tf.nn.sigmoid(tf.matmul(nn_input, self.W) + self.bh_)
            hstates = utils.sample_prob(hprobs, self.hrand)

            # Compute positive associations in step 0
            if step == 0:
                hprobs0 = hprobs  # save the activation probabilities of the first step
                if self.vis_type == 'bin':
                    positive = tf.matmul(tf.transpose(nn_input), hstates)

                elif self.vis_type == 'gauss':
                    positive = tf.matmul(tf.transpose(nn_input), hprobs)

            # Negative Contrastive Divergence phase
            visible_activation = tf.matmul(hprobs, tf.transpose(
                self.W)) + self.bv_

            if self.vis_type == 'bin':
                vprobs = tf.nn.sigmoid(visible_activation)

            elif self.vis_type == 'gauss':
                vprobs = tf.truncated_normal((1, self.nvis),
                                             mean=visible_activation,
                                             stddev=self.stddev)

            # Sample again from the hidden units
            hprobs1 = tf.nn.sigmoid(tf.matmul(vprobs, self.W) + self.bh_)
            hstates1 = utils.sample_prob(hprobs1, self.hrand)

            # Use the reconstructed visible units as input for the next step
            nn_input = vprobs

        negative = tf.matmul(tf.transpose(vprobs), hprobs1)

        self.encode = hprobs  # encoded data

        self.w_upd8 = self.W.assign_add(self.learning_rate *
                                        (positive - negative))
        self.bh_upd8 = self.bh_.assign_add(
            self.learning_rate * tf.reduce_mean(hprobs0 - hprobs1, 0))
        self.bv_upd8 = self.bv_.assign_add(self.learning_rate *
                                           tf.reduce_mean(self.x - vprobs, 0))

        self.cost = tf.sqrt(tf.reduce_mean(tf.square(self.x - vprobs)))
        _ = tf.scalar_summary("cost", self.cost)
コード例 #5
0
 def _build_h2v(self, h, token):
     wx_b_ = tf.matmul(h, tf.transpose(self.weights)) + self.visible_bias
     p_v = tf.sigmoid(wx_b_)
     sample_v = sample_prob(p_v, 'sample-visible-%s' % token)
     return p_v, sample_v
コード例 #6
0
 def _build_v2h(self, v, token):
     wx_b = tf.matmul(v, self.weights) + self.hidden_bias
     p_h = tf.sigmoid(wx_b)
     sample_h = sample_prob(p_h, 'sample-hidden-%s' % token)
     return p_h, sample_h