def build_model(self, n_features, W_=None, bh_=None, bv_=None): """Create the computational graph. Parameters ---------- n_features : int Number of units in the input layer. W_ : array_like, optional (default = None) Weight matrix np array. bh_ : array_like, optional (default = None) Hidden bias np array. bv_ : array_like, optional (default = None) Visible bias np array. Returns ------- self """ self._create_placeholders(n_features) self._create_variables(n_features, W_, bh_, bv_) self._create_encode_layer() self._create_decode_layer() variables = [self.W_, self.bh_, self.bv_] regterm = Layers.regularization(variables, self.regtype, self.regcoef) self.cost = self.loss.compile( self.reconstruction, self.input_data_orig, regterm=regterm) self.train_step = self.trainer.compile(self.cost)
def build_model(self, n_features, regtype='none', encoding_w=None, encoding_b=None): """Create the computational graph for the reconstruction task. :param n_features: Number of features :param regtype: regularization type :param encoding_w: list of weights for the encoding layers. :param encoding_b: list of biases for the encoding layers. :return: self """ self._create_placeholders(n_features, n_features) if encoding_w and encoding_b: self.encoding_w_ = encoding_w self.encoding_b_ = encoding_b else: self._create_variables(n_features) self._create_encoding_layers() self._create_decoding_layers() variables = [] variables.extend(self.encoding_w_) variables.extend(self.encoding_b_) regterm = Layers.regularization(variables, self.regtype, self.regcoef) self.cost = self.loss.compile( self.reconstruction, self.input_labels, regterm=regterm) self.train_step = self.trainer.compile(self.cost)
def build_model(self, n_features, encoding_w=None, encoding_b=None): """Create the computational graph for the reconstruction task. :param n_features: Number of features :param encoding_w: list of weights for the encoding layers. :param encoding_b: list of biases for the encoding layers. :return: self """ self._create_placeholders(n_features, n_features) if encoding_w and encoding_b: self.encoding_w_ = encoding_w self.encoding_b_ = encoding_b else: self._create_variables(n_features) self._create_encoding_layers() self._create_decoding_layers() variables = [] variables.extend(self.encoding_w_) variables.extend(self.encoding_b_) regterm = Layers.regularization(variables, self.regtype, self.regcoef) self.cost = self.loss.compile(self.reconstruction, self.input_labels, regterm=regterm) self.train_step = self.trainer.compile(self.cost)
def build_model(self, n_features, n_classes): """Create the computational graph. This graph is intented to be created for finetuning, i.e. after unsupervised pretraining. :param n_features: Number of features. :param n_classes: number of classes. :return: self """ self._create_placeholders(n_features, n_classes) self._create_variables(n_features) next_train = self._create_encoding_layers() self.mod_y, _, _ = Layers.linear(next_train, n_classes) self.layer_nodes.append(self.mod_y) self.cost = self.loss.compile(self.mod_y, self.input_labels) self.train_step = self.trainer.compile(self.cost) self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels)
def build_model(self, n_features, regtype='none'): """Build the Restricted Boltzmann Machine model in TensorFlow. :param n_features: number of features :param regtype: regularization type :return: self """ self._create_placeholders(n_features) self._create_variables(n_features) self.encode = self.sample_hidden_from_visible(self.input_data)[0] self.reconstruction = self.sample_visible_from_hidden( self.encode, n_features) hprob0, hstate0, vprob, hprob1, hstate1 = self.gibbs_sampling_step( self.input_data, n_features) positive = self.compute_positive_association(self.input_data, hprob0, hstate0) nn_input = vprob for step in range(self.gibbs_sampling_steps - 1): hprob, hstate, vprob, hprob1, hstate1 = self.gibbs_sampling_step( nn_input, n_features) nn_input = vprob negative = tf.matmul(tf.transpose(vprob), hprob1) self.w_upd8 = self.W.assign_add( self.learning_rate * (positive - negative) / self.batch_size) self.bh_upd8 = self.bh_.assign_add( tf.multiply(self.learning_rate, tf.reduce_mean(tf.subtract(hprob0, hprob1), 0))) self.bv_upd8 = self.bv_.assign_add( tf.multiply(self.learning_rate, tf.reduce_mean(tf.subtract(self.input_data, vprob), 0))) variables = [self.W, self.bh_, self.bv_] regterm = Layers.regularization(variables, self.regtype, self.regcoef) self.cost = self.loss.compile(vprob, self.input_data, regterm=regterm)
def build_model(self, n_features, regtype='none'): """Build the Restricted Boltzmann Machine model in TensorFlow. :param n_features: number of features :param regtype: regularization type :return: self """ self._create_placeholders(n_features) self._create_variables(n_features) self.encode = self.sample_hidden_from_visible(self.input_data)[0] self.reconstruction = self.sample_visible_from_hidden( self.encode, n_features) hprob0, hstate0, vprob, hprob1, hstate1 = self.gibbs_sampling_step( self.input_data, n_features) positive = self.compute_positive_association(self.input_data, hprob0, hstate0) nn_input = vprob for step in range(self.gibbs_sampling_steps - 1): hprob, hstate, vprob, hprob1, hstate1 = self.gibbs_sampling_step( nn_input, n_features) nn_input = vprob negative = tf.matmul(tf.transpose(vprob), hprob1) self.w_upd8 = self.W.assign_add( self.learning_rate * (positive - negative) / self.batch_size) self.bh_upd8 = self.bh_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean( tf.subtract(hprob0, hprob1), 0))) self.bv_upd8 = self.bv_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean( tf.subtract(self.input_data, vprob), 0))) variables = [self.W, self.bh_, self.bv_] regterm = Layers.regularization(variables, self.regtype, self.regcoef) self.cost = self.loss.compile(vprob, self.input_data, regterm=regterm)
def build_model(self, n_features, n_classes): """Create the computational graph. This graph is intented to be created for finetuning, i.e. after unsupervised pretraining. :param n_features: Number of features. :param n_classes: number of classes. :return: self """ self._create_placeholders(n_features, n_classes) self._create_variables(n_features) self.global_step = tf.Variable(0, dtype=tf.int32) self.trainer = Trainer( self.opt, global_step=self.global_step, decay_step=self.decay_step, decay_rate=self.decay_rate, learning_rate=self.learning_rate, momentum=self.momentum) self.next_train = self._create_encoding_layers() self.mod_y, _, _ = Layers.linear(self.next_train, n_classes) self.layer_nodes.append(self.mod_y) self.cost = self.loss.compile(self.mod_y, self.input_labels) self.train_step = self.trainer.compile(self.cost, self.global_step) self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels) self.precision = Evaluation.precision(self.mod_y, self.input_labels) self.recall = Evaluation.recall(self.mod_y, self.input_labels)