Beispiel #1
0
    def build_model(self, n_features, W_=None, bh_=None, bv_=None):
        """Create the computational graph.

        Parameters
        ----------

        n_features : int
            Number of units in the input layer.

        W_ : array_like, optional (default = None)
            Weight matrix np array.

        bh_ : array_like, optional (default = None)
            Hidden bias np array.

        bv_ : array_like, optional (default = None)
            Visible bias np array.

        Returns
        -------

        self
        """
        self._create_placeholders(n_features)
        self._create_variables(n_features, W_, bh_, bv_)

        self._create_encode_layer()
        self._create_decode_layer()

        variables = [self.W_, self.bh_, self.bv_]
        regterm = Layers.regularization(variables, self.regtype, self.regcoef)

        self.cost = self.loss.compile(
            self.reconstruction, self.input_data_orig, regterm=regterm)
        self.train_step = self.trainer.compile(self.cost)
    def build_model(self, n_features, regtype='none',
                    encoding_w=None, encoding_b=None):
        """Create the computational graph for the reconstruction task.

        :param n_features: Number of features
        :param regtype: regularization type
        :param encoding_w: list of weights for the encoding layers.
        :param encoding_b: list of biases for the encoding layers.
        :return: self
        """
        self._create_placeholders(n_features, n_features)

        if encoding_w and encoding_b:
            self.encoding_w_ = encoding_w
            self.encoding_b_ = encoding_b
        else:
            self._create_variables(n_features)

        self._create_encoding_layers()
        self._create_decoding_layers()

        variables = []
        variables.extend(self.encoding_w_)
        variables.extend(self.encoding_b_)
        regterm = Layers.regularization(variables, self.regtype, self.regcoef)

        self.cost = self.loss.compile(
            self.reconstruction, self.input_labels, regterm=regterm)
        self.train_step = self.trainer.compile(self.cost)
    def build_model(self, n_features, encoding_w=None, encoding_b=None):
        """Create the computational graph for the reconstruction task.

        :param n_features: Number of features
        :param encoding_w: list of weights for the encoding layers.
        :param encoding_b: list of biases for the encoding layers.
        :return: self
        """
        self._create_placeholders(n_features, n_features)

        if encoding_w and encoding_b:
            self.encoding_w_ = encoding_w
            self.encoding_b_ = encoding_b
        else:
            self._create_variables(n_features)

        self._create_encoding_layers()
        self._create_decoding_layers()

        variables = []
        variables.extend(self.encoding_w_)
        variables.extend(self.encoding_b_)
        regterm = Layers.regularization(variables, self.regtype, self.regcoef)

        self.cost = self.loss.compile(self.reconstruction,
                                      self.input_labels,
                                      regterm=regterm)
        self.train_step = self.trainer.compile(self.cost)
Beispiel #4
0
    def build_model(self, n_features, regtype='none'):
        """Build the Restricted Boltzmann Machine model in TensorFlow.

        :param n_features: number of features
        :param regtype: regularization type
        :return: self
        """
        self._create_placeholders(n_features)
        self._create_variables(n_features)
        self.encode = self.sample_hidden_from_visible(self.input_data)[0]
        self.reconstruction = self.sample_visible_from_hidden(
            self.encode, n_features)

        hprob0, hstate0, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
            self.input_data, n_features)
        positive = self.compute_positive_association(self.input_data, hprob0,
                                                     hstate0)

        nn_input = vprob

        for step in range(self.gibbs_sampling_steps - 1):
            hprob, hstate, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
                nn_input, n_features)
            nn_input = vprob

        negative = tf.matmul(tf.transpose(vprob), hprob1)

        self.w_upd8 = self.W.assign_add(
            self.learning_rate * (positive - negative) / self.batch_size)

        self.bh_upd8 = self.bh_.assign_add(
            tf.multiply(self.learning_rate,
                        tf.reduce_mean(tf.subtract(hprob0, hprob1), 0)))

        self.bv_upd8 = self.bv_.assign_add(
            tf.multiply(self.learning_rate,
                        tf.reduce_mean(tf.subtract(self.input_data, vprob),
                                       0)))

        variables = [self.W, self.bh_, self.bv_]
        regterm = Layers.regularization(variables, self.regtype, self.regcoef)

        self.cost = self.loss.compile(vprob, self.input_data, regterm=regterm)
Beispiel #5
0
    def build_model(self, n_features, regtype='none'):
        """Build the Restricted Boltzmann Machine model in TensorFlow.

        :param n_features: number of features
        :param regtype: regularization type
        :return: self
        """
        self._create_placeholders(n_features)
        self._create_variables(n_features)
        self.encode = self.sample_hidden_from_visible(self.input_data)[0]
        self.reconstruction = self.sample_visible_from_hidden(
            self.encode, n_features)

        hprob0, hstate0, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
            self.input_data, n_features)
        positive = self.compute_positive_association(self.input_data,
                                                     hprob0, hstate0)

        nn_input = vprob

        for step in range(self.gibbs_sampling_steps - 1):
            hprob, hstate, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
                nn_input, n_features)
            nn_input = vprob

        negative = tf.matmul(tf.transpose(vprob), hprob1)

        self.w_upd8 = self.W.assign_add(
            self.learning_rate * (positive - negative) / self.batch_size)

        self.bh_upd8 = self.bh_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
            tf.subtract(hprob0, hprob1), 0)))

        self.bv_upd8 = self.bv_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
            tf.subtract(self.input_data, vprob), 0)))

        variables = [self.W, self.bh_, self.bv_]
        regterm = Layers.regularization(variables, self.regtype, self.regcoef)

        self.cost = self.loss.compile(vprob, self.input_data, regterm=regterm)