コード例 #1
0
    def __init__(self,
                 layers,
                 original_shape,
                 name='convnet',
                 loss_func='softmax_cross_entropy',
                 num_epochs=10,
                 batch_size=10,
                 opt='sgd',
                 learning_rate=0.01,
                 momentum=0.5,
                 dropout=0.5,
                 batch_norm=False):
        """Constructor.

        :param layers: string used to build the model.
            This string is a comma-separate specification of the layers.
            Supported values:
                conv2d-FX-FY-Z-S: 2d convolution with Z feature maps as output
                    and FX x FY filters. S is the strides size
                maxpool-X: max pooling on the previous layer. X is the size of
                    the max pooling
                full-X: fully connected layer with X units
                softmax: softmax layer
            For example:
                conv2d-5-5-32,maxpool-2,conv2d-5-5-64,maxpool-2,full-128,full-128,softmax

        :param original_shape: original shape of the images in the dataset
        :param dropout: Dropout parameter
        """
        print("correct cnn file")
        SupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.opt = opt
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.momentum = momentum

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(opt,
                               learning_rate=learning_rate,
                               momentum=momentum)

        self.layers = layers
        self.original_shape = original_shape
        self.dropout = dropout

        self.batch_norm = batch_norm

        self.W_vars = None
        self.B_vars = None

        self.accuracy = None
コード例 #2
0
    def __init__(self,
                 num_hidden,
                 visible_unit_type='bin',
                 name='rbm',
                 loss_func='mse',
                 learning_rate=0.01,
                 regcoef=5e-4,
                 regtype='none',
                 gibbs_sampling_steps=1,
                 batch_size=10,
                 num_epochs=10,
                 stddev=0.1):
        """Constructor.

        :param num_hidden: number of hidden units
        :param loss_function: type of loss function
        :param visible_unit_type: type of the visible units (bin or gauss)
        :param gibbs_sampling_steps: optional, default 1
        :param stddev: default 0.1. Ignored if visible_unit_type is not 'gauss'
        """
        UnsupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.regtype = regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)

        self.num_hidden = num_hidden
        self.visible_unit_type = visible_unit_type
        self.gibbs_sampling_steps = gibbs_sampling_steps
        self.stddev = stddev

        self.W = None
        self.bh_ = None
        self.bv_ = None

        self.w_upd8 = None
        self.bh_upd8 = None
        self.bv_upd8 = None

        self.cost = None

        self.input_data = None
        self.hrand = None
        self.vrand = None
コード例 #3
0
    def __init__(self, name='lr', loss_func='cross_entropy',
                 learning_rate=0.01, num_epochs=10, batch_size=10):
        """Constructor."""
        SupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.batch_size = batch_size

        self.loss = Loss(self.loss_func)

        # Computational graph nodes
        self.input_data = None
        self.input_labels = None

        self.W_ = None
        self.b_ = None

        self.accuracy = None
コード例 #4
0
    def __init__(
        self, layers, original_shape, name='convnet',
        loss_func='softmax_cross_entropy', num_epochs=10, batch_size=10,
            opt='sgd', learning_rate=0.01, momentum=0.5, dropout=0.5):
        """Constructor.

        :param layers: string used to build the model.
            This string is a comma-separate specification of the layers.
            Supported values:
                conv2d-FX-FY-Z-S: 2d convolution with Z feature maps as output
                    and FX x FY filters. S is the strides size
                maxpool-X: max pooling on the previous layer. X is the size of
                    the max pooling
                full-X: fully connected layer with X units
                softmax: softmax layer
            For example:
                conv2d-5-5-32,maxpool-2,conv2d-5-5-64,maxpool-2,full-128,full-128,softmax

        :param original_shape: original shape of the images in the dataset
        :param dropout: Dropout parameter
        """
        assert(layers.split(",")[-1] == "softmax")

        SupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.opt = opt
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.momentum = momentum

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(
            opt, learning_rate=learning_rate,
            momentum=momentum)

        self.layers = layers
        self.original_shape = original_shape
        self.dropout = dropout

        self.W_vars = None
        self.B_vars = None

        self.accuracy = None
コード例 #5
0
    def __init__(
        self, num_hidden, visible_unit_type='bin',
        name='rbm', loss_func='mse', learning_rate=0.01,
        regcoef=5e-4, regtype='none', gibbs_sampling_steps=1,
            batch_size=10, num_epochs=10, stddev=0.1):
        """Constructor.

        :param num_hidden: number of hidden units
        :param loss_function: type of loss function
        :param visible_unit_type: type of the visible units (bin or gauss)
        :param gibbs_sampling_steps: optional, default 1
        :param stddev: default 0.1. Ignored if visible_unit_type is not 'gauss'
        """
        UnsupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.regtype = regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)

        self.num_hidden = num_hidden
        self.visible_unit_type = visible_unit_type
        self.gibbs_sampling_steps = gibbs_sampling_steps
        self.stddev = stddev

        self.W = None
        self.bh_ = None
        self.bv_ = None

        self.w_upd8 = None
        self.bh_upd8 = None
        self.bv_upd8 = None

        self.cost = None

        self.input_data = None
        self.hrand = None
        self.vrand = None
コード例 #6
0
    def __init__(self, name='lr', loss_func='cross_entropy',
                 learning_rate=0.01, num_epochs=10, batch_size=10):
        """Constructor."""
        SupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.batch_size = batch_size

        self.loss = Loss(self.loss_func)

        # Computational graph nodes
        self.input_data = None
        self.input_labels = None

        self.W_ = None
        self.b_ = None

        self.accuracy = None
コード例 #7
0
class RBM(UnsupervisedModel):
    """Restricted Boltzmann Machine implementation using TensorFlow.

    The interface of the class is sklearn-like.
    """
    def __init__(self,
                 num_hidden,
                 visible_unit_type='bin',
                 name='rbm',
                 loss_func='mse',
                 learning_rate=0.01,
                 regcoef=5e-4,
                 regtype='none',
                 gibbs_sampling_steps=1,
                 batch_size=10,
                 num_epochs=10,
                 stddev=0.1):
        """Constructor.

        :param num_hidden: number of hidden units
        :param loss_function: type of loss function
        :param visible_unit_type: type of the visible units (bin or gauss)
        :param gibbs_sampling_steps: optional, default 1
        :param stddev: default 0.1. Ignored if visible_unit_type is not 'gauss'
        """
        UnsupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.regtype = regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)

        self.num_hidden = num_hidden
        self.visible_unit_type = visible_unit_type
        self.gibbs_sampling_steps = gibbs_sampling_steps
        self.stddev = stddev

        self.W = None
        self.bh_ = None
        self.bv_ = None

        self.w_upd8 = None
        self.bh_upd8 = None
        self.bv_upd8 = None

        self.cost = None

        self.input_data = None
        self.hrand = None
        self.vrand = None

    def _train_model(self,
                     train_set,
                     validation_set,
                     train_ref=None,
                     Validation_ref=None):
        """Train the model.

        :param train_set: training set
        :param validation_set: validation set. optional, default None
        :return: self
        """
        pbar = tqdm(list(range(self.num_epochs)))
        for i in pbar:
            self._run_train_step(train_set)

            if validation_set is not None:
                feed = self._create_feed_dict(validation_set)
                err = tf_utils.run_summaries(self.tf_session,
                                             self.tf_merged_summaries,
                                             self.tf_summary_writer, i, feed,
                                             self.cost)
                pbar.set_description("Reconstruction loss: %s" % (err))

    def _run_train_step(self, train_set):
        """Run a training step.

        A training step is made by randomly shuffling the training set,
        divide into batches and run the variable update nodes for each batch.
        :param train_set: training set
        :return: self
        """
        np.random.shuffle(train_set)

        batches = [
            _ for _ in utilities.gen_batches(train_set, self.batch_size)
        ]
        updates = [self.w_upd8, self.bh_upd8, self.bv_upd8]

        for batch in batches:
            self.tf_session.run(updates,
                                feed_dict=self._create_feed_dict(batch))

    def _create_feed_dict(self, data):
        """Create the dictionary of data to feed to tf session during training.

        :param data: training/validation set batch
        :return: dictionary(self.input_data: data, self.hrand: random_uniform,
                            self.vrand: random_uniform)
        """
        return {
            self.input_data: data,
            self.hrand: np.random.rand(data.shape[0], self.num_hidden),
            self.vrand: np.random.rand(data.shape[0], data.shape[1])
        }

    def build_model(self, n_features, regtype='none'):
        """Build the Restricted Boltzmann Machine model in TensorFlow.

        :param n_features: number of features
        :param regtype: regularization type
        :return: self
        """
        self._create_placeholders(n_features)
        self._create_variables(n_features)
        self.encode = self.sample_hidden_from_visible(self.input_data)[0]
        self.reconstruction = self.sample_visible_from_hidden(
            self.encode, n_features)

        hprob0, hstate0, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
            self.input_data, n_features)
        positive = self.compute_positive_association(self.input_data, hprob0,
                                                     hstate0)

        nn_input = vprob

        for step in range(self.gibbs_sampling_steps - 1):
            hprob, hstate, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
                nn_input, n_features)
            nn_input = vprob

        negative = tf.matmul(tf.transpose(vprob), hprob1)

        self.w_upd8 = self.W.assign_add(
            self.learning_rate * (positive - negative) / self.batch_size)

        self.bh_upd8 = self.bh_.assign_add(
            tf.multiply(self.learning_rate,
                        tf.reduce_mean(tf.subtract(hprob0, hprob1), 0)))

        self.bv_upd8 = self.bv_.assign_add(
            tf.multiply(self.learning_rate,
                        tf.reduce_mean(tf.subtract(self.input_data, vprob),
                                       0)))

        variables = [self.W, self.bh_, self.bv_]
        regterm = Layers.regularization(variables, self.regtype, self.regcoef)

        self.cost = self.loss.compile(vprob, self.input_data, regterm=regterm)

    def _create_placeholders(self, n_features):
        """Create the TensorFlow placeholders for the model.

        :param n_features: number of features
        :return: self
        """
        self.input_data = tf.placeholder(tf.float32, [None, n_features],
                                         name='x-input')
        self.hrand = tf.placeholder(tf.float32, [None, self.num_hidden],
                                    name='hrand')
        self.vrand = tf.placeholder(tf.float32, [None, n_features],
                                    name='vrand')
        # not used in this model, created just to comply with
        # unsupervised_model.py
        self.input_labels = tf.placeholder(tf.float32)
        self.keep_prob = tf.placeholder(tf.float32, name='keep-probs')

    def _create_variables(self, n_features):
        """Create the TensorFlow variables for the model.

        :param n_features: number of features
        :return: self
        """
        w_name = 'weights'
        self.W = tf.Variable(tf.truncated_normal(
            shape=[n_features, self.num_hidden], stddev=0.1),
                             name=w_name)
        tf.summary.histogram(w_name, self.W)

        bh_name = 'hidden-bias'
        self.bh_ = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]),
                               name=bh_name)
        tf.summary.histogram(bh_name, self.bh_)

        bv_name = 'visible-bias'
        self.bv_ = tf.Variable(tf.constant(0.1, shape=[n_features]),
                               name=bv_name)
        tf.summary.histogram(bv_name, self.bv_)

    def gibbs_sampling_step(self, visible, n_features):
        """Perform one step of gibbs sampling.

        :param visible: activations of the visible units
        :param n_features: number of features
        :return: tuple(hidden probs, hidden states, visible probs,
                       new hidden probs, new hidden states)
        """
        hprobs, hstates = self.sample_hidden_from_visible(visible)
        vprobs = self.sample_visible_from_hidden(hprobs, n_features)
        hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs)

        return hprobs, hstates, vprobs, hprobs1, hstates1

    def sample_hidden_from_visible(self, visible):
        """Sample the hidden units from the visible units.

        This is the Positive phase of the Contrastive Divergence algorithm.

        :param visible: activations of the visible units
        :return: tuple(hidden probabilities, hidden binary states)
        """
        hprobs = tf.nn.sigmoid(tf.add(tf.matmul(visible, self.W), self.bh_))
        hstates = utilities.sample_prob(hprobs, self.hrand)

        return hprobs, hstates

    def sample_visible_from_hidden(self, hidden, n_features):
        """Sample the visible units from the hidden units.

        This is the Negative phase of the Contrastive Divergence algorithm.
        :param hidden: activations of the hidden units
        :param n_features: number of features
        :return: visible probabilities
        """
        visible_activation = tf.add(tf.matmul(hidden, tf.transpose(self.W)),
                                    self.bv_)

        if self.visible_unit_type == 'bin':
            vprobs = tf.nn.sigmoid(visible_activation)

        elif self.visible_unit_type == 'gauss':
            vprobs = tf.truncated_normal((1, n_features),
                                         mean=visible_activation,
                                         stddev=self.stddev)

        else:
            vprobs = None

        return vprobs

    def compute_positive_association(self, visible, hidden_probs,
                                     hidden_states):
        """Compute positive associations between visible and hidden units.

        :param visible: visible units
        :param hidden_probs: hidden units probabilities
        :param hidden_states: hidden units states
        :return: positive association = dot(visible.T, hidden)
        """
        if self.visible_unit_type == 'bin':
            positive = tf.matmul(tf.transpose(visible), hidden_states)

        elif self.visible_unit_type == 'gauss':
            positive = tf.matmul(tf.transpose(visible), hidden_probs)

        else:
            positive = None

        return positive

    def load_model(self, shape, gibbs_sampling_steps, model_path):
        """Load a trained model from disk.

        The shape of the model (num_visible, num_hidden) and the number
        of gibbs sampling steps must be known in order to restore the model.
        :param shape: tuple(num_visible, num_hidden)
        :param gibbs_sampling_steps:
        :param model_path:
        :return: self
        """
        n_features, self.num_hidden = shape[0], shape[1]
        self.gibbs_sampling_steps = gibbs_sampling_steps

        self.build_model(n_features)

        init_op = tf.global_variables_initializer()
        self.tf_saver = tf.train.Saver()

        with tf.Session() as self.tf_session:

            self.tf_session.run(init_op)
            self.tf_saver.restore(self.tf_session, model_path)

    def get_parameters(self, graph=None):
        """Return the model parameters in the form of numpy arrays.

        :param graph: tf graph object
        :return: model parameters
        """
        g = graph if graph is not None else self.tf_graph

        with g.as_default():
            with tf.Session() as self.tf_session:
                self.tf_saver.restore(self.tf_session, self.model_path)

                return {
                    'W': self.W.eval(),
                    'bh_': self.bh_.eval(),
                    'bv_': self.bv_.eval()
                }
コード例 #8
0
class ConvolutionalNetwork(SupervisedModel):
    """Implementation of Convolutional Neural Networks using TensorFlow.

    The interface of the class is sklearn-like.
    """
    def __init__(self,
                 layers,
                 original_shape,
                 name='convnet',
                 loss_func='softmax_cross_entropy',
                 num_epochs=10,
                 batch_size=10,
                 opt='sgd',
                 learning_rate=0.01,
                 momentum=0.5,
                 dropout=0.5):
        """Constructor.

        :param layers: string used to build the model.
            This string is a comma-separate specification of the layers.
            Supported values:
                conv2d-FX-FY-Z-S: 2d convolution with Z feature maps as output
                    and FX x FY filters. S is the strides size
                maxpool-X: max pooling on the previous layer. X is the size of
                    the max pooling
                full-X: fully connected layer with X units
                softmax: softmax layer
            For example:
                conv2d-5-5-32,maxpool-2,conv2d-5-5-64,maxpool-2,full-128,full-128,softmax

        :param original_shape: original shape of the images in the dataset
        :param dropout: Dropout parameter
        """
        assert (layers.split(",")[-1] == "softmax")

        SupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.opt = opt
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.momentum = momentum

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(opt,
                               learning_rate=learning_rate,
                               momentum=momentum)

        self.layers = layers
        self.original_shape = original_shape
        self.dropout = dropout

        self.W_vars = None
        self.B_vars = None

        self.accuracy = None

    def _train_model(self, train_set, train_labels, validation_set,
                     validation_labels):
        """Train the model.

        :param train_set: training set
        :param train_labels: training labels
        :param validation_set: validation set
        :param validation_labels: validation labels
        :return: self
        """
        shuff = list(zip(train_set, train_labels))

        pbar = tqdm(range(self.num_epochs))
        for i in pbar:

            np.random.shuffle(list(shuff))
            batches = [
                _ for _ in utilities.gen_batches(shuff, self.batch_size)
            ]

            for batch in batches:
                x_batch, y_batch = zip(*batch)
                self.tf_session.run(self.train_step,
                                    feed_dict={
                                        self.input_data: x_batch,
                                        self.input_labels: y_batch,
                                        self.keep_prob: self.dropout
                                    })

            if validation_set is not None:
                feed = {
                    self.input_data: validation_set,
                    self.input_labels: validation_labels,
                    self.keep_prob: 1
                }
                acc = tf_utils.run_summaries(self.tf_session,
                                             self.tf_merged_summaries,
                                             self.tf_summary_writer, i, feed,
                                             self.accuracy)
                pbar.set_description("Accuracy: %s" % (acc))

    def build_model(self, n_features, n_classes):
        """Create the computational graph of the model.

        :param n_features: Number of features.
        :param n_classes: number of classes.
        :return: self
        """
        self._create_placeholders(n_features, n_classes)
        self._create_layers(n_classes)

        self.cost = self.loss.compile(self.mod_y, self.input_labels)
        self.train_step = self.trainer.compile(self.cost)
        self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels)

    def _create_placeholders(self, n_features, n_classes):
        """Create the TensorFlow placeholders for the model.

        :param n_features: number of features of the first layer
        :param n_classes: number of classes
        :return: self
        """
        self.input_data = tf.placeholder(tf.float32, [None, n_features],
                                         name='x-input')
        self.input_labels = tf.placeholder(tf.float32, [None, n_classes],
                                           name='y-input')
        self.keep_prob = tf.placeholder(tf.float32, name='keep-probs')

    def _create_layers(self, n_classes):
        """Create the layers of the model from self.layers.

        :param n_classes: number of classes
        :return: self
        """
        next_layer_feed = tf.reshape(self.input_data, [
            -1, self.original_shape[0], self.original_shape[1],
            self.original_shape[2]
        ])
        prev_output_dim = self.original_shape[2]
        # this flags indicates whether we are building the first dense layer
        first_full = True

        self.W_vars = []
        self.B_vars = []

        for i, l in enumerate(self.layers.split(',')):

            node = l.split('-')
            node_type = node[0]

            if node_type == 'conv2d':

                # ################### #
                # Convolutional Layer #
                # ################### #

                # fx, fy = shape of the convolutional filter
                # feature_maps = number of output dimensions
                fx, fy, feature_maps, stride = int(node[1]),\
                     int(node[2]), int(node[3]), int(node[4])

                print('Building Convolutional layer with %d input channels\
                      and %d %dx%d filters with stride %d' %
                      (prev_output_dim, feature_maps, fx, fy, stride))

                # Create weights and biases
                W_conv = self.weight_variable(
                    [fx, fy, prev_output_dim, feature_maps])
                b_conv = self.bias_variable([feature_maps])
                self.W_vars.append(W_conv)
                self.B_vars.append(b_conv)

                # Convolution and Activation function
                h_conv = tf.nn.relu(
                    self.conv2d(next_layer_feed, W_conv, stride) + b_conv)

                # keep track of the number of output dims of the previous layer
                prev_output_dim = feature_maps
                # output node of the last layer
                next_layer_feed = h_conv

            elif node_type == 'maxpool':

                # ################# #
                # Max Pooling Layer #
                # ################# #

                ksize = int(node[1])

                print('Building Max Pooling layer with size %d' % ksize)

                next_layer_feed = self.max_pool(next_layer_feed, ksize)

            elif node_type == 'full':

                # ####################### #
                # Densely Connected Layer #
                # ####################### #

                if first_full:  # first fully connected layer

                    dim = int(node[1])
                    shp = next_layer_feed.get_shape()
                    tmpx = shp[1].value
                    tmpy = shp[2].value
                    fanin = tmpx * tmpy * prev_output_dim

                    print('Building fully connected layer with %d in units\
                          and %d out units' % (fanin, dim))

                    W_fc = self.weight_variable([fanin, dim])
                    b_fc = self.bias_variable([dim])
                    self.W_vars.append(W_fc)
                    self.B_vars.append(b_fc)

                    h_pool_flat = tf.reshape(next_layer_feed, [-1, fanin])
                    h_fc = tf.nn.relu(
                        tf.add(tf.matmul(h_pool_flat, W_fc), b_fc))
                    h_fc_drop = tf.nn.dropout(h_fc, self.keep_prob)

                    prev_output_dim = dim
                    next_layer_feed = h_fc_drop

                    first_full = False

                else:  # not first fully connected layer

                    dim = int(node[1])
                    W_fc = self.weight_variable([prev_output_dim, dim])
                    b_fc = self.bias_variable([dim])
                    self.W_vars.append(W_fc)
                    self.B_vars.append(b_fc)

                    h_fc = tf.nn.relu(
                        tf.add(tf.matmul(next_layer_feed, W_fc), b_fc))
                    h_fc_drop = tf.nn.dropout(h_fc, self.keep_prob)

                    prev_output_dim = dim
                    next_layer_feed = h_fc_drop

            elif node_type == 'softmax':

                # ############# #
                # Softmax Layer #
                # ############# #

                print('Building softmax layer with %d in units and\
                      %d out units' % (prev_output_dim, n_classes))

                W_sm = self.weight_variable([prev_output_dim, n_classes])
                b_sm = self.bias_variable([n_classes])
                self.W_vars.append(W_sm)
                self.B_vars.append(b_sm)

                self.mod_y = tf.add(tf.matmul(next_layer_feed, W_sm), b_sm)

    @staticmethod
    def weight_variable(shape):
        """Create a weight variable."""
        initial = tf.truncated_normal(shape=shape, stddev=0.1)
        return tf.Variable(initial)

    @staticmethod
    def bias_variable(shape):
        """Create a bias variable."""
        initial = tf.constant(0.1, shape=shape)
        return tf.Variable(initial)

    @staticmethod
    def conv2d(x, W, stride):
        """2D Convolution operation."""
        return tf.nn.conv2d(x,
                            W,
                            strides=[1, stride, stride, 1],
                            padding='SAME')

    @staticmethod
    def max_pool(x, dim):
        """Max pooling operation."""
        return tf.nn.max_pool(x,
                              ksize=[1, dim, dim, 1],
                              strides=[1, dim, dim, 1],
                              padding='SAME')
コード例 #9
0
class StackedDenoisingAutoencoder(SupervisedModel):
    """Implementation of Stacked Denoising Autoencoders using TensorFlow.

    The interface of the class is sklearn-like.
    """

    def __init__(
        self, layers, name='sdae',
        enc_act_func=[tf.nn.tanh], dec_act_func=[None],
        loss_func=['cross_entropy'], num_epochs=[10], batch_size=[10],
        opt=['sgd'], regcoef=[5e-4], learning_rate=[0.01], momentum=0.5,
        finetune_dropout=1, corr_type=['none'], corr_frac=[0.],
        finetune_loss_func='softmax_cross_entropy',
        finetune_act_func=tf.nn.relu, finetune_opt='sgd',
        finetune_learning_rate=0.001, finetune_num_epochs=10,
            finetune_batch_size=20, do_pretrain=False):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param enc_act_func: Activation function for the encoder.
            [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder.
            [tf.nn.tanh, tf.nn.sigmoid, None]
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['softmax_cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_act_func: activation function for the finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param corr_type: Type of input corruption. string, default 'none'.
            ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt. float, default 0.0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        SupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(
            finetune_opt, learning_rate=finetune_learning_rate,
            momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.finetune_act_func = finetune_act_func

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.last_W = None
        self.last_b = None

        self.autoencoders = []
        self.autoencoder_graphs = []

        for l, layer in enumerate(layers):
            dae_str = 'dae-' + str(l + 1)

            self.autoencoders.append(
                denoising_autoencoder.DenoisingAutoencoder(
                    n_components=layer,
                    name=self.name + '-' + dae_str,
                    enc_act_func=expanded_args['enc_act_func'][l],
                    dec_act_func=expanded_args['dec_act_func'][l],
                    loss_func=expanded_args['loss_func'][l],
                    opt=expanded_args['opt'][l], regcoef=expanded_args['regcoef'],
                    learning_rate=expanded_args['learning_rate'][l],
                    momentum=self.momentum,
                    corr_type=expanded_args['corr_type'][l],
                    corr_frac=expanded_args['corr_frac'][l],
                    num_epochs=expanded_args['num_epochs'][l],
                    batch_size=expanded_args['batch_size'][l]))

            self.autoencoder_graphs.append(tf.Graph())

    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the autoencoder."""
        self.do_pretrain = True

        def set_params_func(autoenc, autoencgraph):
            params = autoenc.get_parameters(graph=autoencgraph)
            self.encoding_w_.append(params['enc_w'])
            self.encoding_b_.append(params['enc_b'])

        return SupervisedModel.pretrain_procedure(
            self, self.autoencoders, self.autoencoder_graphs,
            set_params_func=set_params_func, train_set=train_set,
            validation_set=validation_set)

    def _train_model(self, train_set, train_labels,
                     validation_set, validation_labels):
        """Train the model.

        :param train_set: training set
        :param train_labels: training labels
        :param validation_set: validation set
        :param validation_labels: validation labels
        :return: self
        """
        shuff = list(zip(train_set, train_labels))

        pbar = tqdm(range(self.num_epochs))
        for i in pbar:

            np.random.shuffle(shuff)

            batches = [_ for _ in utilities.gen_batches(
                shuff, self.batch_size)]

            for batch in batches:
                x_batch, y_batch = zip(*batch)
                self.tf_session.run(
                    self.train_step,
                    feed_dict={self.input_data: x_batch,
                               self.input_labels: y_batch,
                               self.keep_prob: self.dropout})

            if validation_set is not None:
                feed = {self.input_data: validation_set,
                        self.input_labels: validation_labels,
                        self.keep_prob: 1}
                acc = tf_utils.run_summaries(
                    self.tf_session, self.tf_merged_summaries,
                    self.tf_summary_writer, i, feed, self.accuracy)
                pbar.set_description("Accuracy: %s" % (acc))

    def build_model(self, n_features, n_classes):
        """Create the computational graph.

        This graph is intented to be created for finetuning,
        i.e. after unsupervised pretraining.
        :param n_features: Number of features.
        :param n_classes: number of classes.
        :return: self
        """
        self._create_placeholders(n_features, n_classes)
        self._create_variables(n_features)

        next_train = self._create_encoding_layers()
        self.mod_y, _, _ = Layers.linear(next_train, n_classes)
        self.layer_nodes.append(self.mod_y)

        self.cost = self.loss.compile(self.mod_y, self.input_labels)
        self.train_step = self.trainer.compile(self.cost)
        self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels)

    def _create_placeholders(self, n_features, n_classes):
        """Create the TensorFlow placeholders for the model.

        :param n_features: number of features of the first layer
        :param n_classes: number of classes
        :return: self
        """
        self.input_data = tf.placeholder(
            tf.float32, [None, n_features], name='x-input')

        self.input_labels = tf.placeholder(
            tf.float32, [None, n_classes], name='y-input')

        self.keep_prob = tf.placeholder(
            tf.float32, name='keep-probs')

    def _create_variables(self, n_features):
        """Create the TensorFlow variables for the model.

        :param n_features: number of features
        :return: self
        """
        if self.do_pretrain:
            self._create_variables_pretrain()
        else:
            self._create_variables_no_pretrain(n_features)

    def _create_variables_no_pretrain(self, n_features):
        """Create model variables (no previous unsupervised pretraining).

        :param n_features: number of features
        :return: self
        """
        self.encoding_w_ = []
        self.encoding_b_ = []

        for l, layer in enumerate(self.layers):

            w_name = 'enc-w-{}'.format(l)
            b_name = 'enc-b-{}'.format(l)

            if l == 0:
                w_shape = [n_features, self.layers[l]]
            else:
                w_shape = [self.layers[l - 1], self.layers[l]]

            w_init = tf.truncated_normal(shape=w_shape, stddev=0.1)
            W = tf.Variable(w_init, name=w_name)
            tf.summary.histogram(w_name, W)
            self.encoding_w_.append(W)

            b_init = tf.constant(0.1, shape=[self.layers[l]])
            b = tf.Variable(b_init, name=b_name)
            tf.summary.histogram(b_name, b)
            self.encoding_b_.append(b)

    def _create_variables_pretrain(self):
        """Create model variables (previous unsupervised pretraining).

        :return: self
        """
        for l, layer in enumerate(self.layers):
            w_name = 'enc-w-{}'.format(l)
            b_name = 'enc-b-{}'.format(l)

            self.encoding_w_[l] = tf.Variable(
                self.encoding_w_[l], name=w_name)
            tf.summary.histogram(w_name, self.encoding_w_[l])

            self.encoding_b_[l] = tf.Variable(
                self.encoding_b_[l], name=b_name)
            tf.summary.histogram(b_name, self.encoding_b_[l])

    def _create_encoding_layers(self):
        """Create the encoding layers for supervised finetuning.

        :return: output of the final encoding layer.
        """
        next_train = self.input_data
        self.layer_nodes = []

        for l, layer in enumerate(self.layers):

            with tf.name_scope("encode-{}".format(l)):

                y_act = tf.add(
                    tf.matmul(next_train, self.encoding_w_[l]),
                    self.encoding_b_[l]
                )

                if self.finetune_act_func:
                    layer_y = self.finetune_act_func(y_act)
                else:
                    layer_y = None

                # the input to the next layer is the output of this layer
                next_train = tf.nn.dropout(layer_y, self.keep_prob)

            self.layer_nodes.append(next_train)

        return next_train
コード例 #10
0
class DeepBeliefNetwork(SupervisedModel):
    """Implementation of Deep Belief Network for Supervised Learning.

    The interface of the class is sklearn-like.
    """
    def __init__(self,
                 rbm_layers,
                 name='dbn',
                 do_pretrain=False,
                 rbm_num_epochs=[10],
                 rbm_gibbs_k=[1],
                 rbm_gauss_visible=False,
                 rbm_stddev=0.1,
                 rbm_batch_size=[10],
                 rbm_learning_rate=[0.01],
                 finetune_dropout=1,
                 finetune_loss_func='softmax_cross_entropy',
                 finetune_act_func=tf.nn.sigmoid,
                 finetune_opt='sgd',
                 finetune_learning_rate=0.001,
                 finetune_num_epochs=10,
                 finetune_batch_size=20,
                 momentum=0.5):
        """Constructor.

        :param rbm_layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['softmax_cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_act_func: activation function for the finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        SupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(finetune_opt,
                               learning_rate=finetune_learning_rate,
                               momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = rbm_layers
        self.finetune_act_func = finetune_act_func

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.softmax_W = None
        self.softmax_b = None

        rbm_params = {
            'num_epochs': rbm_num_epochs,
            'gibbs_k': rbm_gibbs_k,
            'batch_size': rbm_batch_size,
            'learning_rate': rbm_learning_rate
        }

        for p in rbm_params:
            if len(rbm_params[p]) != len(rbm_layers):
                # The current parameter is not specified by the user,
                # should default it for all the layers
                rbm_params[p] = [rbm_params[p][0] for _ in rbm_layers]

        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(rbm_layers):
            rbm_str = 'rbm-' + str(l + 1)

            if l == 0 and rbm_gauss_visible:
                self.rbms.append(
                    rbm.RBM(name=self.name + '-' + rbm_str,
                            num_hidden=layer,
                            learning_rate=rbm_params['learning_rate'][l],
                            num_epochs=rbm_params['num_epochs'][l],
                            batch_size=rbm_params['batch_size'][l],
                            gibbs_sampling_steps=rbm_params['gibbs_k'][l],
                            visible_unit_type='gauss',
                            stddev=rbm_stddev))

            else:
                self.rbms.append(
                    rbm.RBM(name=self.name + '-' + rbm_str,
                            num_hidden=layer,
                            learning_rate=rbm_params['learning_rate'][l],
                            num_epochs=rbm_params['num_epochs'][l],
                            batch_size=rbm_params['batch_size'][l],
                            gibbs_sampling_steps=rbm_params['gibbs_k'][l]))

            self.rbm_graphs.append(tf.Graph())

    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the DBN."""
        self.do_pretrain = True

        def set_params_func(rbmmachine, rbmgraph):
            params = rbmmachine.get_parameters(graph=rbmgraph)
            self.encoding_w_.append(params['W'])
            self.encoding_b_.append(params['bh_'])

        return SupervisedModel.pretrain_procedure(
            self,
            self.rbms,
            self.rbm_graphs,
            set_params_func=set_params_func,
            train_set=train_set,
            validation_set=validation_set)

    def _train_model(self, train_set, train_labels, validation_set,
                     validation_labels):
        """Train the model.

        :param train_set: training set
        :param train_labels: training labels
        :param validation_set: validation set
        :param validation_labels: validation labels
        :return: self
        """
        shuff = list(zip(train_set, train_labels))

        pbar = tqdm(range(self.num_epochs))
        for i in pbar:

            np.random.shuffle(shuff)
            batches = [
                _ for _ in utilities.gen_batches(shuff, self.batch_size)
            ]

            for batch in batches:
                x_batch, y_batch = zip(*batch)
                self.tf_session.run(self.train_step,
                                    feed_dict={
                                        self.input_data: x_batch,
                                        self.input_labels: y_batch,
                                        self.keep_prob: self.dropout
                                    })

            if validation_set is not None:
                feed = {
                    self.input_data: validation_set,
                    self.input_labels: validation_labels,
                    self.keep_prob: 1
                }
                acc = tf_utils.run_summaries(self.tf_session,
                                             self.tf_merged_summaries,
                                             self.tf_summary_writer, i, feed,
                                             self.accuracy)
                pbar.set_description("Accuracy: %s" % (acc))

    def build_model(self, n_features, n_classes):
        """Create the computational graph.

        This graph is intented to be created for finetuning,
        i.e. after unsupervised pretraining.
        :param n_features: Number of features.
        :param n_classes: number of classes.
        :return: self
        """
        self._create_placeholders(n_features, n_classes)
        self._create_variables(n_features)

        next_train = self._create_encoding_layers()
        self.mod_y, _, _ = Layers.linear(next_train, n_classes)
        self.layer_nodes.append(self.mod_y)

        self.cost = self.loss.compile(self.mod_y, self.input_labels)
        self.train_step = self.trainer.compile(self.cost)
        self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels)

    def _create_placeholders(self, n_features, n_classes):
        """Create the TensorFlow placeholders for the model.

        :param n_features: number of features of the first layer
        :param n_classes: number of classes
        :return: self
        """
        self.input_data = tf.placeholder(tf.float32, [None, n_features],
                                         name='x-input')

        self.input_labels = tf.placeholder(tf.float32, [None, n_classes],
                                           name='y-input')

        self.keep_prob = tf.placeholder(tf.float32, name='keep-probs')

    def _create_variables(self, n_features):
        """Create the TensorFlow variables for the model.

        :param n_features: number of features
        :return: self
        """
        if self.do_pretrain:
            self._create_variables_pretrain()
        else:
            self._create_variables_no_pretrain(n_features)

    def _create_variables_no_pretrain(self, n_features):
        """Create model variables (no previous unsupervised pretraining).

        :param n_features: number of features
        :return: self
        """
        self.encoding_w_ = []
        self.encoding_b_ = []

        for l, layer in enumerate(self.layers):

            w_name = 'enc-w-{}'.format(l)
            b_name = 'enc-b-{}'.format(l)

            if l == 0:
                w_shape = [n_features, self.layers[l]]
            else:
                w_shape = [self.layers[l - 1], self.layers[l]]

            w_init = tf.truncated_normal(shape=w_shape, stddev=0.1)
            W = tf.Variable(w_init, name=w_name)
            tf.summary.histogram(w_name, W)
            self.encoding_w_.append(W)

            b_init = tf.constant(0.1, shape=[self.layers[l]])
            b = tf.Variable(b_init, name=b_name)
            tf.summary.histogram(b_name, b)
            self.encoding_b_.append(b)

    def _create_variables_pretrain(self):
        """Create model variables (previous unsupervised pretraining).

        :return: self
        """
        for l, layer in enumerate(self.layers):

            w_name = 'enc-w-{}'.format(l)
            b_name = 'enc-b-{}'.format(l)

            self.encoding_w_[l] = tf.Variable(self.encoding_w_[l], name=w_name)
            tf.summary.histogram(w_name, self.encoding_w_[l])

            self.encoding_b_[l] = tf.Variable(self.encoding_b_[l], name=b_name)
            tf.summary.histogram(b_name, self.encoding_w_[l])

    def _create_encoding_layers(self):
        """Create the encoding layers for supervised finetuning.

        :return: output of the final encoding layer.
        """
        next_train = self.input_data
        self.layer_nodes = []

        for l, layer in enumerate(self.layers):

            with tf.name_scope("encode-{}".format(l)):

                y_act = tf.add(tf.matmul(next_train, self.encoding_w_[l]),
                               self.encoding_b_[l])

                if self.finetune_act_func:
                    layer_y = self.finetune_act_func(y_act)
                else:
                    layer_y = None

                # the input to the next layer is the output of this layer
                next_train = tf.nn.dropout(layer_y, self.keep_prob)

            self.layer_nodes.append(next_train)

        return next_train
コード例 #11
0
class DenoisingAutoencoder(UnsupervisedModel):
    """Implementation of Denoising Autoencoders using TensorFlow.

    The interface of the class is sklearn-like.
    """

    def __init__(
        self, n_components, name='dae', loss_func='mse',
        enc_act_func=tf.nn.tanh, dec_act_func=None, num_epochs=10,
        batch_size=10, opt='sgd', learning_rate=0.01, momentum=0.9,
            corr_type='none', corr_frac=0., regtype='none', regcoef=5e-4):
        """Constructor.

        Parameters
        ----------

        n_components : int
            Number of hidden units.

        name : str, optional (default = "dae")
            Model name (used for save/load from disk).

        loss_func : str, optional (default = "mse")
            Loss function. ['mse', 'cross_entropy']

        enc_act_func : tf.nn.[activation]
            Activation function for the encoder.

        dec_act_func : tf.nn.[activation]
            Activation function for the decoder.

        num_epochs : int, optional (default = 10)
            Number of epochs.

        batch_size : int, optional (default = 10)
            Size of each mini-batch.

        opt : str, optional (default = "sgd")
            Which tensorflow optimizer to use.
            Possible values: ['sgd', 'momentum', 'adagrad', 'adam']

        learning_rate : float, optional (default = 0.01)
            Initial learning rate.

        momentum : float, optional (default = 0.9)
            Momentum parameter (only used if opt = "momentum").

        corr_type : str, optional (default = "none")
            Type of input corruption.
            Can be one of: ["none", "masking", "salt_and_pepper"]

        corr_frac : float, optional (default = 0.0)
            Fraction of the input to corrupt.

        regtype : str, optional (default = "none")
            Type of regularization to apply.
            Can be one of: ["none", "l1", "l2"].

        regcoef : float, optional (default = 5e-4)
            Regularization parameter. If 0, no regularization.
            Only considered if regtype != "none".
        """
        UnsupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.opt = opt
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.momentum = momentum
        self.regtype = regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(
            opt, learning_rate=learning_rate, momentum=momentum)

        self.n_components = n_components
        self.enc_act_func = enc_act_func
        self.dec_act_func = dec_act_func
        self.corr_type = corr_type
        self.corr_frac = corr_frac

        self.input_data_orig = None
        self.input_data = None

        self.W_ = None
        self.bh_ = None
        self.bv_ = None

    def _train_model(self, train_X, train_Y=None, val_X=None, val_Y=None):
        """Train the model.

        Parameters
        ----------

        train_X : array_like
            Training data, shape (num_samples, num_features).

        train_Y : array_like, optional (default = None)
            Reference training data, shape (num_samples, num_features).

        val_X : array_like, optional, default None
            Validation data, shape (num_val_samples, num_features).

        val_Y : array_like, optional, default None
            Reference validation data, shape (num_val_samples, num_features).

        Returns
        -------

        self : trained model instance
        """
        pbar = tqdm(range(self.num_epochs))
        for i in pbar:
            self._run_train_step(train_X)
            if val_X is not None:
                feed = {self.input_data_orig: val_X,
                        self.input_data: val_X}
                err = tf_utils.run_summaries(
                    self.tf_session, self.tf_merged_summaries,
                    self.tf_summary_writer, i, feed, self.cost)
                pbar.set_description("Reconstruction loss: %s" % (err))
        return self

    def _run_train_step(self, train_X):
        """Run a training step.

        A training step is made by randomly corrupting the training set,
        randomly shuffling it,  divide it into batches and run the optimizer
        for each batch.

        Parameters
        ----------

        train_X : array_like
            Training data, shape (num_samples, num_features).

        Returns
        -------

        self
        """
        x_corrupted = utilities.corrupt_input(
            train_X, self.tf_session, self.corr_type, self.corr_frac)

        shuff = list(zip(train_X, x_corrupted))
        np.random.shuffle(shuff)

        batches = [_ for _ in utilities.gen_batches(shuff, self.batch_size)]

        for batch in batches:
            x_batch, x_corr_batch = zip(*batch)
            tr_feed = {self.input_data_orig: x_batch,
                       self.input_data: x_corr_batch}
            self.tf_session.run(self.train_step, feed_dict=tr_feed)

    def build_model(self, n_features, W_=None, bh_=None, bv_=None):
        """Create the computational graph.

        Parameters
        ----------

        n_features : int
            Number of units in the input layer.

        W_ : array_like, optional (default = None)
            Weight matrix np array.

        bh_ : array_like, optional (default = None)
            Hidden bias np array.

        bv_ : array_like, optional (default = None)
            Visible bias np array.

        Returns
        -------

        self
        """
        self._create_placeholders(n_features)
        self._create_variables(n_features, W_, bh_, bv_)

        self._create_encode_layer()
        self._create_decode_layer()

        variables = [self.W_, self.bh_, self.bv_]
        regterm = Layers.regularization(variables, self.regtype, self.regcoef)

        self.cost = self.loss.compile(
            self.reconstruction, self.input_data_orig, regterm=regterm)
        self.train_step = self.trainer.compile(self.cost)

    def _create_placeholders(self, n_features):
        """Create the TensorFlow placeholders for the model.

        :return: self
        """
        self.input_data_orig = tf.placeholder(
            tf.float32, [None, n_features], name='x-input')
        self.input_data = tf.placeholder(
            tf.float32, [None, n_features], name='x-corr-input')
        # not used in this model, created just to comply
        # with unsupervised_model.py
        self.input_labels = tf.placeholder(tf.float32)
        self.keep_prob = tf.placeholder(tf.float32, name='keep-probs')

    def _create_variables(self, n_features, W_=None, bh_=None, bv_=None):
        """Create the TensorFlow variables for the model.

        :return: self
        """
        if W_:
            self.W_ = tf.Variable(W_, name='enc-w')
        else:
            self.W_ = tf.Variable(
                tf.truncated_normal(
                    shape=[n_features, self.n_components], stddev=0.1),
                name='enc-w')

        if bh_:
            self.bh_ = tf.Variable(bh_, name='hidden-bias')
        else:
            self.bh_ = tf.Variable(tf.constant(
                0.1, shape=[self.n_components]), name='hidden-bias')

        if bv_:
            self.bv_ = tf.Variable(bv_, name='visible-bias')
        else:
            self.bv_ = tf.Variable(tf.constant(
                0.1, shape=[n_features]), name='visible-bias')

    def _create_encode_layer(self):
        """Create the encoding layer of the network.

        Returns
        -------

        self
        """
        with tf.name_scope("encoder"):

            activation = tf.add(
                tf.matmul(self.input_data, self.W_),
                self.bh_
            )

            if self.enc_act_func:
                self.encode = self.enc_act_func(activation)
            else:
                self.encode = activation

            return self

    def _create_decode_layer(self):
        """Create the decoding layer of the network.

        Returns
        -------

        self
        """
        with tf.name_scope("decoder"):

            activation = tf.add(
                tf.matmul(self.encode, tf.transpose(self.W_)),
                self.bv_
            )

            if self.dec_act_func:
                self.reconstruction = self.dec_act_func(activation)
            else:
                self.reconstruction = activation

            return self

    def get_parameters(self, graph=None):
        """Return the model parameters in the form of numpy arrays.

        Parameters
        ----------

        graph : tf.Graph, optional (default = None)
            Tensorflow graph object.

        Returns
        -------

        dict : model parameters dictionary.
        """
        g = graph if graph is not None else self.tf_graph

        with g.as_default():
            with tf.Session() as self.tf_session:
                self.tf_saver.restore(self.tf_session, self.model_path)

                return {
                    'enc_w': self.W_.eval(),
                    'enc_b': self.bh_.eval(),
                    'dec_b': self.bv_.eval()
                }
コード例 #12
0
class RBM(UnsupervisedModel):
    """Restricted Boltzmann Machine implementation using TensorFlow.

    The interface of the class is sklearn-like.
    """

    def __init__(
        self, num_hidden, visible_unit_type='bin',
        name='rbm', loss_func='mse', learning_rate=0.01,
        regcoef=5e-4, regtype='none', gibbs_sampling_steps=1,
            batch_size=10, num_epochs=10, stddev=0.1):
        """Constructor.

        :param num_hidden: number of hidden units
        :param loss_function: type of loss function
        :param visible_unit_type: type of the visible units (bin or gauss)
        :param gibbs_sampling_steps: optional, default 1
        :param stddev: default 0.1. Ignored if visible_unit_type is not 'gauss'
        """
        UnsupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.regtype = regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)

        self.num_hidden = num_hidden
        self.visible_unit_type = visible_unit_type
        self.gibbs_sampling_steps = gibbs_sampling_steps
        self.stddev = stddev

        self.W = None
        self.bh_ = None
        self.bv_ = None

        self.w_upd8 = None
        self.bh_upd8 = None
        self.bv_upd8 = None

        self.cost = None

        self.input_data = None
        self.hrand = None
        self.vrand = None

    def _train_model(self, train_set, train_ref=None, validation_set=None,
                      Validation_ref=None):
        """Train the model.

        :param train_set: training set
        :param validation_set: validation set. optional, default None
        :return: self
        """
        pbar = tqdm(range(self.num_epochs))
        for i in pbar:
            self._run_train_step(train_set, train_ref)

            if validation_set is not None:
                feed = self._create_feed_dict(validation_set)
                err = tf_utils.run_summaries(
                    self.tf_session, self.tf_merged_summaries,
                    self.tf_summary_writer, i, feed, self.cost)
                pbar.set_description("Reconstruction loss: %s" % (err))

    def _run_train_step(self, train_set, train_ref):
        """Run a training step.

        A training step is made by randomly shuffling the training set,
        divide into batches and run the variable update nodes for each batch.
        :param train_set: training set
        :return: self
        """
        clean_set = []
        buggy_set = []
        for i in range(len(train_ref)):
            if train_ref[i][0] == 1:
                buggy_set.append(train_set[i])
            else:
                clean_set.append(train_set[i])


        for i in range(0, len(clean_set), self.batch_size//2):
            np.random.shuffle(clean_set)
            np.random.shuffle(buggy_set)
            clean_batch = clean_set[:self.batch_size//2]
            buggy_batch = buggy_set[:self.batch_size//2]
            batch = clean_batch + buggy_batch
            batch = np.array(batch)
            updates = [self.w_upd8, self.bh_upd8, self.bv_upd8]

            self.tf_session.run(updates,
                                feed_dict=self._create_feed_dict(batch))

    def _create_feed_dict(self, data):
        """Create the dictionary of data to feed to tf session during training.

        :param data: training/validation set batch
        :return: dictionary(self.input_data: data, self.hrand: random_uniform,
                            self.vrand: random_uniform)
        """
        return {
            self.input_data: data,
            self.hrand: np.random.rand(data.shape[0], self.num_hidden),
            self.vrand: np.random.rand(data.shape[0], data.shape[1])
        }

    def build_model(self, n_features, regtype='none'):
        """Build the Restricted Boltzmann Machine model in TensorFlow.

        :param n_features: number of features
        :param regtype: regularization type
        :return: self
        """
        self._create_placeholders(n_features)
        self._create_variables(n_features)
        self.encode = self.sample_hidden_from_visible(self.input_data)[0]
        self.reconstruction = self.sample_visible_from_hidden(
            self.encode, n_features)

        hprob0, hstate0, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
            self.input_data, n_features)
        positive = self.compute_positive_association(self.input_data,
                                                     hprob0, hstate0)

        nn_input = vprob

        for step in range(self.gibbs_sampling_steps - 1):
            hprob, hstate, vprob, hprob1, hstate1 = self.gibbs_sampling_step(
                nn_input, n_features)
            nn_input = vprob

        negative = tf.matmul(tf.transpose(vprob), hprob1)

        self.w_upd8 = self.W.assign_add(
            self.learning_rate * (positive - negative) / self.batch_size)

        self.bh_upd8 = self.bh_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
            tf.subtract(hprob0, hprob1), 0)))

        self.bv_upd8 = self.bv_.assign_add(tf.multiply(self.learning_rate, tf.reduce_mean(
            tf.subtract(self.input_data, vprob), 0)))

        variables = [self.W, self.bh_, self.bv_]
        regterm = Layers.regularization(variables, self.regtype, self.regcoef)

        self.cost = self.loss.compile(vprob, self.input_data, regterm=regterm)

    def _create_placeholders(self, n_features):
        """Create the TensorFlow placeholders for the model.

        :param n_features: number of features
        :return: self
        """
        self.input_data = tf.placeholder(tf.float32, [None, n_features],
                                         name='x-input')
        self.hrand = tf.placeholder(tf.float32, [None, self.num_hidden],
                                    name='hrand')
        self.vrand = tf.placeholder(tf.float32, [None, n_features],
                                    name='vrand')
        # not used in this model, created just to comply with
        # unsupervised_model.py
        self.input_labels = tf.placeholder(tf.float32)
        self.keep_prob = tf.placeholder(tf.float32, name='keep-probs')

    def _create_variables(self, n_features):
        """Create the TensorFlow variables for the model.

        :param n_features: number of features
        :return: self
        """
        w_name = 'weights'
        self.W = tf.Variable(tf.truncated_normal(
            shape=[n_features, self.num_hidden], stddev=0.1), name=w_name)
        tf.summary.histogram(w_name, self.W)

        bh_name = 'hidden-bias'
        self.bh_ = tf.Variable(tf.constant(0.1, shape=[self.num_hidden]),
                               name=bh_name)
        tf.summary.histogram(bh_name, self.bh_)

        bv_name = 'visible-bias'
        self.bv_ = tf.Variable(tf.constant(0.1, shape=[n_features]),
                               name=bv_name)
        tf.summary.histogram(bv_name, self.bv_)

    def gibbs_sampling_step(self, visible, n_features):
        """Perform one step of gibbs sampling.

        :param visible: activations of the visible units
        :param n_features: number of features
        :return: tuple(hidden probs, hidden states, visible probs,
                       new hidden probs, new hidden states)
        """
        hprobs, hstates = self.sample_hidden_from_visible(visible)
        vprobs = self.sample_visible_from_hidden(hprobs, n_features)
        hprobs1, hstates1 = self.sample_hidden_from_visible(vprobs)

        return hprobs, hstates, vprobs, hprobs1, hstates1

    def sample_hidden_from_visible(self, visible):
        """Sample the hidden units from the visible units.

        This is the Positive phase of the Contrastive Divergence algorithm.

        :param visible: activations of the visible units
        :return: tuple(hidden probabilities, hidden binary states)
        """
        hprobs = tf.nn.sigmoid(tf.add(tf.matmul(visible, self.W), self.bh_))
        hstates = utilities.sample_prob(hprobs, self.hrand)

        return hprobs, hstates

    def sample_visible_from_hidden(self, hidden, n_features):
        """Sample the visible units from the hidden units.

        This is the Negative phase of the Contrastive Divergence algorithm.
        :param hidden: activations of the hidden units
        :param n_features: number of features
        :return: visible probabilities
        """
        visible_activation = tf.add(
            tf.matmul(hidden, tf.transpose(self.W)),
            self.bv_
        )

        if self.visible_unit_type == 'bin':
            vprobs = tf.nn.sigmoid(visible_activation)

        elif self.visible_unit_type == 'gauss':
            vprobs = tf.truncated_normal(
                (1, n_features), mean=visible_activation, stddev=self.stddev)

        else:
            vprobs = None

        return vprobs

    def compute_positive_association(self, visible,
                                     hidden_probs, hidden_states):
        """Compute positive associations between visible and hidden units.

        :param visible: visible units
        :param hidden_probs: hidden units probabilities
        :param hidden_states: hidden units states
        :return: positive association = dot(visible.T, hidden)
        """
        if self.visible_unit_type == 'bin':
            positive = tf.matmul(tf.transpose(visible), hidden_states)

        elif self.visible_unit_type == 'gauss':
            positive = tf.matmul(tf.transpose(visible), hidden_probs)

        else:
            positive = None

        return positive

    def load_model(self, shape, gibbs_sampling_steps, model_path):
        """Load a trained model from disk.

        The shape of the model (num_visible, num_hidden) and the number
        of gibbs sampling steps must be known in order to restore the model.
        :param shape: tuple(num_visible, num_hidden)
        :param gibbs_sampling_steps:
        :param model_path:
        :return: self
        """
        n_features, self.num_hidden = shape[0], shape[1]
        self.gibbs_sampling_steps = gibbs_sampling_steps

        self.build_model(n_features)

        init_op = tf.global_variables_initializer()
        self.tf_saver = tf.train.Saver()

        with tf.Session() as self.tf_session:

            self.tf_session.run(init_op)
            self.tf_saver.restore(self.tf_session, model_path)

    def get_parameters(self, graph=None):
        """Return the model parameters in the form of numpy arrays.

        :param graph: tf graph object
        :return: model parameters
        """
        g = graph if graph is not None else self.tf_graph

        with g.as_default():
            with tf.Session() as self.tf_session:
                self.tf_saver.restore(self.tf_session, self.model_path)

                return {
                    'W': self.W.eval(),
                    'bh_': self.bh_.eval(),
                    'bv_': self.bv_.eval()
                }
コード例 #13
0
class LogisticRegression(SupervisedModel):
    """Simple Logistic Regression using TensorFlow.

    The interface of the class is sklearn-like.
    """

    def __init__(self, name='lr', loss_func='cross_entropy',
                 learning_rate=0.01, num_epochs=10, batch_size=10):
        """Constructor."""
        SupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.batch_size = batch_size

        self.loss = Loss(self.loss_func)

        # Computational graph nodes
        self.input_data = None
        self.input_labels = None

        self.W_ = None
        self.b_ = None

        self.accuracy = None

    def build_model(self, n_features, n_classes):
        """Create the computational graph.

        :param n_features: number of features
        :param n_classes: number of classes
        :return: self
        """
        self._create_placeholders(n_features, n_classes)
        self._create_variables(n_features, n_classes)

        self.mod_y = tf.nn.softmax(
            tf.add(tf.matmul(self.input_data, self.W_), self.b_))

        self.cost = self.loss.compile(self.mod_y, self.input_labels)
        self.train_step = tf.train.GradientDescentOptimizer(
            self.learning_rate).minimize(self.cost)
        self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels)

    def _create_placeholders(self, n_features, n_classes):
        """Create the TensorFlow placeholders for the model.

        :param n_features: number of features
        :param n_classes: number of classes
        :return: self
        """
        self.input_data = tf.placeholder(
            tf.float32, [None, n_features], name='x-input')
        self.input_labels = tf.placeholder(
            tf.float32, [None, n_classes], name='y-input')
        self.keep_prob = tf.placeholder(
            tf.float32, name='keep-probs')

    def _create_variables(self, n_features, n_classes):
        """Create the TensorFlow variables for the model.

        :param n_features: number of features
        :param n_classes: number of classes
        :return: self
        """
        self.W_ = tf.Variable(
            tf.zeros([n_features, n_classes]), name='weights')
        self.b_ = tf.Variable(
            tf.zeros([n_classes]), name='biases')

    def _train_model(self, train_set, train_labels,
                     validation_set, validation_labels):
        """Train the model.

        :param train_set: training set
        :param train_labels: training labels
        :param validation_set: validation set
        :param validation_labels: validation labels
        :return: self
        """
        pbar = tqdm(range(self.num_epochs))
        for i in pbar:

            shuff = list(zip(train_set, train_labels))
            np.random.shuffle(shuff)

            batches = [_ for _ in utilities.gen_batches(shuff, self.batch_size)]

            for batch in batches:
                x_batch, y_batch = zip(*batch)
                self.tf_session.run(
                    self.train_step,
                    feed_dict={self.input_data: x_batch,
                               self.input_labels: y_batch})

            if validation_set is not None:
                feed = {self.input_data: validation_set,
                        self.input_labels: validation_labels}
                acc = tf_utils.run_summaries(
                    self.tf_session, self.tf_merged_summaries,
                    self.tf_summary_writer, i, feed, self.accuracy)
                pbar.set_description("Accuracy: %s" % (acc))
コード例 #14
0
    def __init__(self,
                 layers,
                 name='srbm',
                 num_epochs=[10],
                 batch_size=[10],
                 learning_rate=[0.01],
                 gibbs_k=[1],
                 loss_func=['mse'],
                 momentum=0.5,
                 finetune_dropout=1,
                 finetune_loss_func='cross_entropy',
                 finetune_enc_act_func=[tf.nn.relu],
                 finetune_dec_act_func=[tf.nn.sigmoid],
                 finetune_opt='sgd',
                 finetune_learning_rate=0.001,
                 regcoef=5e-4,
                 finetune_num_epochs=10,
                 noise=['gauss'],
                 stddev=0.1,
                 finetune_batch_size=20,
                 do_pretrain=False,
                 tied_weights=False,
                 regtype=['none'],
                 finetune_regtype='none'):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout
        self.regtype = finetune_regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(finetune_opt,
                               learning_rate=finetune_learning_rate,
                               momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(layers):
            rbm_str = 'rbm-' + str(l + 1)
            new_rbm = rbm.RBM(name=self.name + '-' + rbm_str,
                              loss_func=expanded_args['loss_func'][l],
                              visible_unit_type=expanded_args['noise'][l],
                              stddev=stddev,
                              num_hidden=expanded_args['layers'][l],
                              learning_rate=expanded_args['learning_rate'][l],
                              gibbs_sampling_steps=expanded_args['gibbs_k'][l],
                              num_epochs=expanded_args['num_epochs'][l],
                              batch_size=expanded_args['batch_size'][l],
                              regtype=expanded_args['regtype'][l])
            self.rbms.append(new_rbm)
            self.rbm_graphs.append(tf.Graph())
コード例 #15
0
class DeepAutoencoder(UnsupervisedModel):
    """Implementation of a Deep Unsupervised Autoencoder as a stack of RBMs.

    The interface of the class is sklearn-like.
    """
    def __init__(self,
                 layers,
                 name='srbm',
                 num_epochs=[10],
                 batch_size=[10],
                 learning_rate=[0.01],
                 gibbs_k=[1],
                 loss_func=['mse'],
                 momentum=0.5,
                 finetune_dropout=1,
                 finetune_loss_func='cross_entropy',
                 finetune_enc_act_func=[tf.nn.relu],
                 finetune_dec_act_func=[tf.nn.sigmoid],
                 finetune_opt='sgd',
                 finetune_learning_rate=0.001,
                 regcoef=5e-4,
                 finetune_num_epochs=10,
                 noise=['gauss'],
                 stddev=0.1,
                 finetune_batch_size=20,
                 do_pretrain=False,
                 tied_weights=False,
                 regtype=['none'],
                 finetune_regtype='none'):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout
        self.regtype = finetune_regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(finetune_opt,
                               learning_rate=finetune_learning_rate,
                               momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(layers):
            rbm_str = 'rbm-' + str(l + 1)
            new_rbm = rbm.RBM(name=self.name + '-' + rbm_str,
                              loss_func=expanded_args['loss_func'][l],
                              visible_unit_type=expanded_args['noise'][l],
                              stddev=stddev,
                              num_hidden=expanded_args['layers'][l],
                              learning_rate=expanded_args['learning_rate'][l],
                              gibbs_sampling_steps=expanded_args['gibbs_k'][l],
                              num_epochs=expanded_args['num_epochs'][l],
                              batch_size=expanded_args['batch_size'][l],
                              regtype=expanded_args['regtype'][l])
            self.rbms.append(new_rbm)
            self.rbm_graphs.append(tf.Graph())

    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the autoencoder."""
        self.do_pretrain = True

        def set_params_func(rbmmachine, rbmgraph):
            params = rbmmachine.get_parameters(graph=rbmgraph)
            self.encoding_w_.append(params['W'])
            self.encoding_b_.append(params['bh_'])

        return UnsupervisedModel.pretrain_procedure(
            self,
            self.rbms,
            self.rbm_graphs,
            set_params_func=set_params_func,
            train_set=train_set,
            validation_set=validation_set)

    def _train_model(self, train_set, train_ref, validation_set,
                     validation_ref):
        """Train the model.

        :param train_set: training set
        :param train_ref: training reference data
        :param validation_set: validation set
        :param validation_ref: validation reference data
        :return: self
        """
        shuff = list(zip(train_set, train_ref))

        pbar = tqdm(range(self.num_epochs))
        for i in pbar:

            np.random.shuffle(shuff)
            batches = [
                _ for _ in utilities.gen_batches(shuff, self.batch_size)
            ]

            for batch in batches:
                x_batch, y_batch = zip(*batch)
                self.tf_session.run(self.train_step,
                                    feed_dict={
                                        self.input_data: x_batch,
                                        self.input_labels: y_batch,
                                        self.keep_prob: self.dropout
                                    })

            if validation_set is not None:
                feed = {
                    self.input_data: validation_set,
                    self.input_labels: validation_ref,
                    self.keep_prob: 1
                }
                err = tf_utils.run_summaries(self.tf_session,
                                             self.tf_merged_summaries,
                                             self.tf_summary_writer, i, feed,
                                             self.cost)
                pbar.set_description("Reconstruction loss: %s" % (err))

    def build_model(self,
                    n_features,
                    regtype='none',
                    encoding_w=None,
                    encoding_b=None):
        """Create the computational graph for the reconstruction task.

        :param n_features: Number of features
        :param regtype: regularization type
        :param encoding_w: list of weights for the encoding layers.
        :param encoding_b: list of biases for the encoding layers.
        :return: self
        """
        self._create_placeholders(n_features, n_features)

        if encoding_w and encoding_b:
            self.encoding_w_ = encoding_w
            self.encoding_b_ = encoding_b
        else:
            self._create_variables(n_features)

        self._create_encoding_layers()
        self._create_decoding_layers()

        variables = []
        variables.extend(self.encoding_w_)
        variables.extend(self.encoding_b_)
        regterm = Layers.regularization(variables, self.regtype, self.regcoef)

        self.cost = self.loss.compile(self.reconstruction,
                                      self.input_labels,
                                      regterm=regterm)
        self.train_step = self.trainer.compile(self.cost)

    def _create_placeholders(self, n_features, n_classes):
        """Create the TensorFlow placeholders for the model.

        :param n_features: number of features of the first layer
        :param n_classes: number of classes
        :return: self
        """
        self.input_data = tf.placeholder(tf.float32, [None, n_features],
                                         name='x-input')
        self.input_labels = tf.placeholder(tf.float32, [None, n_classes],
                                           name='y-input')
        self.keep_prob = tf.placeholder(tf.float32, name='keep-probs')

    def _create_variables(self, n_features):
        """Create the TensorFlow variables for the model.

        :param n_features: number of features
        :return: self
        """
        if self.do_pretrain:
            self._create_variables_pretrain()
        else:
            self._create_variables_no_pretrain(n_features)

    def _create_variables_no_pretrain(self, n_features):
        """Create model variables (no previous unsupervised pretraining).

        :param n_features: number of features
        :return: self
        """
        self.encoding_w_ = []
        self.encoding_b_ = []

        for l, layer in enumerate(self.layers):

            if l == 0:
                self.encoding_w_.append(
                    tf.Variable(
                        tf.truncated_normal(shape=[n_features, self.layers[l]],
                                            stddev=0.1)))
                self.encoding_b_.append(
                    tf.Variable(
                        tf.truncated_normal([self.layers[l]], stddev=0.1)))
            else:
                self.encoding_w_.append(
                    tf.Variable(
                        tf.truncated_normal(
                            shape=[self.layers[l - 1], self.layers[l]],
                            stddev=0.1)))
                self.encoding_b_.append(
                    tf.Variable(
                        tf.truncated_normal([self.layers[l]], stddev=0.1)))

    def _create_variables_pretrain(self):
        """Create model variables (previous unsupervised pretraining).

        :return: self
        """
        for l, layer in enumerate(self.layers):
            self.encoding_w_[l] = tf.Variable(self.encoding_w_[l],
                                              name='enc-w-{}'.format(l))
            self.encoding_b_[l] = tf.Variable(self.encoding_b_[l],
                                              name='enc-b-{}'.format(l))

    def _create_encoding_layers(self):
        """Create the encoding layers for supervised finetuning.

        :return: output of the final encoding layer.
        """
        next_train = self.input_data
        self.layer_nodes = []

        for l, layer in enumerate(self.layers):

            with tf.name_scope("encode-{}".format(l)):

                y_act = tf.add(tf.matmul(next_train, self.encoding_w_[l]),
                               self.encoding_b_[l])

                if self.finetune_enc_act_func[l] is not None:
                    layer_y = self.finetune_enc_act_func[l](y_act)

                else:
                    layer_y = None

                # the input to the next layer is the output of this layer
                next_train = tf.nn.dropout(layer_y, self.keep_prob)

            self.layer_nodes.append(next_train)

        self.encode = next_train

    def _create_decoding_layers(self):
        """Create the decoding layers for reconstruction finetuning.

        :return: output of the final encoding layer.
        """
        next_decode = self.encode

        for l, layer in reversed(list(enumerate(self.layers))):

            with tf.name_scope("decode-{}".format(l)):

                # Create decoding variables
                if self.tied_weights:
                    dec_w = tf.transpose(self.encoding_w_[l])
                else:
                    dec_w = tf.Variable(
                        tf.transpose(self.encoding_w_[l].initialized_value()))

                dec_b = tf.Variable(
                    tf.constant(0.1, shape=[dec_w.get_shape().dims[1].value]))
                self.decoding_w.append(dec_w)
                self.decoding_b.append(dec_b)

                y_act = tf.add(tf.matmul(next_decode, dec_w), dec_b)

                if self.finetune_dec_act_func[l] is not None:
                    layer_y = self.finetune_dec_act_func[l](y_act)

                else:
                    layer_y = None

                # the input to the next layer is the output of this layer
                next_decode = tf.nn.dropout(layer_y, self.keep_prob)

            self.layer_nodes.append(next_decode)

        self.reconstruction = next_decode
コード例 #16
0
class DeepAutoencoder(UnsupervisedModel):
    """Implementation of a Deep Unsupervised Autoencoder as a stack of RBMs.

    The interface of the class is sklearn-like.
    """

    def __init__(
        self, layers, name='srbm',
        num_epochs=[10], batch_size=[10],
        learning_rate=[0.01], gibbs_k=[1], loss_func=['mse'],
        momentum=0.5, finetune_dropout=1,
        finetune_loss_func='cross_entropy', finetune_enc_act_func=[tf.nn.relu],
        finetune_dec_act_func=[tf.nn.sigmoid], finetune_opt='sgd',
        finetune_learning_rate=0.001, regcoef=5e-4, finetune_num_epochs=10,
        noise=['gauss'], stddev=0.1, finetune_batch_size=20, do_pretrain=False,
            tied_weights=False, regtype=['none'], finetune_regtype='none'):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout
        self.regtype = finetune_regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(
            finetune_opt, learning_rate=finetune_learning_rate,
            momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(layers):
            rbm_str = 'rbm-' + str(l + 1)
            new_rbm = rbm.RBM(
                name=self.name + '-' + rbm_str,
                loss_func=expanded_args['loss_func'][l],
                visible_unit_type=expanded_args['noise'][l], stddev=stddev,
                num_hidden=expanded_args['layers'][l],
                learning_rate=expanded_args['learning_rate'][l],
                gibbs_sampling_steps=expanded_args['gibbs_k'][l],
                num_epochs=expanded_args['num_epochs'][l],
                batch_size=expanded_args['batch_size'][l],
                regtype=expanded_args['regtype'][l])
            self.rbms.append(new_rbm)
            self.rbm_graphs.append(tf.Graph())

    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the autoencoder."""
        self.do_pretrain = True

        def set_params_func(rbmmachine, rbmgraph):
            params = rbmmachine.get_parameters(graph=rbmgraph)
            self.encoding_w_.append(params['W'])
            self.encoding_b_.append(params['bh_'])

        return UnsupervisedModel.pretrain_procedure(
            self, self.rbms, self.rbm_graphs, set_params_func=set_params_func,
            train_set=train_set, validation_set=validation_set)

    def _train_model(self, train_set, train_ref,
                     validation_set, validation_ref):
        """Train the model.

        :param train_set: training set
        :param train_ref: training reference data
        :param validation_set: validation set
        :param validation_ref: validation reference data
        :return: self
        """
        shuff = list(zip(train_set, train_ref))

        pbar = tqdm(range(self.num_epochs))
        for i in pbar:

            np.random.shuffle(shuff)
            batches = [_ for _ in utilities.gen_batches(
                shuff, self.batch_size)]

            for batch in batches:
                x_batch, y_batch = zip(*batch)
                self.tf_session.run(
                    self.train_step,
                    feed_dict={self.input_data: x_batch,
                               self.input_labels: y_batch,
                               self.keep_prob: self.dropout})

            if validation_set is not None:
                feed = {self.input_data: validation_set,
                        self.input_labels: validation_ref,
                        self.keep_prob: 1}
                err = tf_utils.run_summaries(
                    self.tf_session, self.tf_merged_summaries,
                    self.tf_summary_writer, i, feed, self.cost)
                pbar.set_description("Reconstruction loss: %s" % (err))

    def build_model(self, n_features, regtype='none',
                    encoding_w=None, encoding_b=None):
        """Create the computational graph for the reconstruction task.

        :param n_features: Number of features
        :param regtype: regularization type
        :param encoding_w: list of weights for the encoding layers.
        :param encoding_b: list of biases for the encoding layers.
        :return: self
        """
        self._create_placeholders(n_features, n_features)

        if encoding_w and encoding_b:
            self.encoding_w_ = encoding_w
            self.encoding_b_ = encoding_b
        else:
            self._create_variables(n_features)

        self._create_encoding_layers()
        self._create_decoding_layers()

        variables = []
        variables.extend(self.encoding_w_)
        variables.extend(self.encoding_b_)
        regterm = Layers.regularization(variables, self.regtype, self.regcoef)

        self.cost = self.loss.compile(
            self.reconstruction, self.input_labels, regterm=regterm)
        self.train_step = self.trainer.compile(self.cost)

    def _create_placeholders(self, n_features, n_classes):
        """Create the TensorFlow placeholders for the model.

        :param n_features: number of features of the first layer
        :param n_classes: number of classes
        :return: self
        """
        self.input_data = tf.placeholder(
            tf.float32, [None, n_features], name='x-input')
        self.input_labels = tf.placeholder(
            tf.float32, [None, n_classes], name='y-input')
        self.keep_prob = tf.placeholder(
            tf.float32, name='keep-probs')

    def _create_variables(self, n_features):
        """Create the TensorFlow variables for the model.

        :param n_features: number of features
        :return: self
        """
        if self.do_pretrain:
            self._create_variables_pretrain()
        else:
            self._create_variables_no_pretrain(n_features)

    def _create_variables_no_pretrain(self, n_features):
        """Create model variables (no previous unsupervised pretraining).

        :param n_features: number of features
        :return: self
        """
        self.encoding_w_ = []
        self.encoding_b_ = []

        for l, layer in enumerate(self.layers):

            if l == 0:
                self.encoding_w_.append(tf.Variable(tf.truncated_normal(
                    shape=[n_features, self.layers[l]], stddev=0.1)))
                self.encoding_b_.append(tf.Variable(tf.truncated_normal(
                    [self.layers[l]], stddev=0.1)))
            else:
                self.encoding_w_.append(tf.Variable(tf.truncated_normal(
                    shape=[self.layers[l - 1], self.layers[l]], stddev=0.1)))
                self.encoding_b_.append(tf.Variable(tf.truncated_normal(
                    [self.layers[l]], stddev=0.1)))

    def _create_variables_pretrain(self):
        """Create model variables (previous unsupervised pretraining).

        :return: self
        """
        for l, layer in enumerate(self.layers):
            self.encoding_w_[l] = tf.Variable(
                self.encoding_w_[l], name='enc-w-{}'.format(l))
            self.encoding_b_[l] = tf.Variable(
                self.encoding_b_[l], name='enc-b-{}'.format(l))

    def _create_encoding_layers(self):
        """Create the encoding layers for supervised finetuning.

        :return: output of the final encoding layer.
        """
        next_train = self.input_data
        self.layer_nodes = []

        for l, layer in enumerate(self.layers):

            with tf.name_scope("encode-{}".format(l)):

                y_act = tf.add(
                    tf.matmul(next_train, self.encoding_w_[l]),
                    self.encoding_b_[l]
                )

                if self.finetune_enc_act_func[l] is not None:
                    layer_y = self.finetune_enc_act_func[l](y_act)

                else:
                    layer_y = None

                # the input to the next layer is the output of this layer
                next_train = tf.nn.dropout(layer_y, self.keep_prob)

            self.layer_nodes.append(next_train)

        self.encode = next_train

    def _create_decoding_layers(self):
        """Create the decoding layers for reconstruction finetuning.

        :return: output of the final encoding layer.
        """
        next_decode = self.encode

        for l, layer in reversed(list(enumerate(self.layers))):

            with tf.name_scope("decode-{}".format(l)):

                # Create decoding variables
                if self.tied_weights:
                    dec_w = tf.transpose(self.encoding_w_[l])
                else:
                    dec_w = tf.Variable(tf.transpose(
                        self.encoding_w_[l].initialized_value()))

                dec_b = tf.Variable(tf.constant(
                    0.1, shape=[dec_w.get_shape().dims[1].value]))
                self.decoding_w.append(dec_w)
                self.decoding_b.append(dec_b)

                y_act = tf.add(
                    tf.matmul(next_decode, dec_w),
                    dec_b
                )

                if self.finetune_dec_act_func[l] is not None:
                    layer_y = self.finetune_dec_act_func[l](y_act)

                else:
                    layer_y = None

                # the input to the next layer is the output of this layer
                next_decode = tf.nn.dropout(layer_y, self.keep_prob)

            self.layer_nodes.append(next_decode)

        self.reconstruction = next_decode
コード例 #17
0
    def __init__(
        self, layers, name='srbm',
        num_epochs=[10], batch_size=[10],
        learning_rate=[0.01], gibbs_k=[1], loss_func=['mse'],
        momentum=0.5, finetune_dropout=1,
        finetune_loss_func='cross_entropy', finetune_enc_act_func=[tf.nn.relu],
        finetune_dec_act_func=[tf.nn.sigmoid], finetune_opt='sgd',
        finetune_learning_rate=0.001, regcoef=5e-4, finetune_num_epochs=10,
        noise=['gauss'], stddev=0.1, finetune_batch_size=20, do_pretrain=False,
            tied_weights=False, regtype=['none'], finetune_regtype='none'):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout
        self.regtype = finetune_regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(
            finetune_opt, learning_rate=finetune_learning_rate,
            momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(layers):
            rbm_str = 'rbm-' + str(l + 1)
            new_rbm = rbm.RBM(
                name=self.name + '-' + rbm_str,
                loss_func=expanded_args['loss_func'][l],
                visible_unit_type=expanded_args['noise'][l], stddev=stddev,
                num_hidden=expanded_args['layers'][l],
                learning_rate=expanded_args['learning_rate'][l],
                gibbs_sampling_steps=expanded_args['gibbs_k'][l],
                num_epochs=expanded_args['num_epochs'][l],
                batch_size=expanded_args['batch_size'][l],
                regtype=expanded_args['regtype'][l])
            self.rbms.append(new_rbm)
            self.rbm_graphs.append(tf.Graph())
コード例 #18
0
class StackedDenoisingAutoencoder(SupervisedModel):
    """Implementation of Stacked Denoising Autoencoders using TensorFlow.

    The interface of the class is sklearn-like.
    """
    def __init__(self,
                 layers,
                 name='sdae',
                 enc_act_func=[tf.nn.tanh],
                 dec_act_func=[None],
                 loss_func=['cross_entropy'],
                 num_epochs=[10],
                 batch_size=[10],
                 opt=['sgd'],
                 regcoef=[5e-4],
                 learning_rate=[0.01],
                 momentum=0.5,
                 finetune_dropout=1,
                 corr_type=['none'],
                 corr_frac=[0.],
                 finetune_loss_func='softmax_cross_entropy',
                 finetune_act_func=tf.nn.relu,
                 finetune_opt='sgd',
                 finetune_learning_rate=0.001,
                 finetune_num_epochs=10,
                 finetune_batch_size=20,
                 do_pretrain=False):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param enc_act_func: Activation function for the encoder.
            [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder.
            [tf.nn.tanh, tf.nn.sigmoid, None]
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['softmax_cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_act_func: activation function for the finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param corr_type: Type of input corruption. string, default 'none'.
            ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt. float, default 0.0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        SupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(finetune_opt,
                               learning_rate=finetune_learning_rate,
                               momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.finetune_act_func = finetune_act_func

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.last_W = None
        self.last_b = None

        self.autoencoders = []
        self.autoencoder_graphs = []

        for l, layer in enumerate(layers):
            dae_str = 'dae-' + str(l + 1)

            self.autoencoders.append(
                denoising_autoencoder.DenoisingAutoencoder(
                    n_components=layer,
                    name=self.name + '-' + dae_str,
                    enc_act_func=expanded_args['enc_act_func'][l],
                    dec_act_func=expanded_args['dec_act_func'][l],
                    loss_func=expanded_args['loss_func'][l],
                    opt=expanded_args['opt'][l],
                    regcoef=expanded_args['regcoef'],
                    learning_rate=expanded_args['learning_rate'][l],
                    momentum=self.momentum,
                    corr_type=expanded_args['corr_type'][l],
                    corr_frac=expanded_args['corr_frac'][l],
                    num_epochs=expanded_args['num_epochs'][l],
                    batch_size=expanded_args['batch_size'][l]))

            self.autoencoder_graphs.append(tf.Graph())

    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the autoencoder."""
        self.do_pretrain = True

        def set_params_func(autoenc, autoencgraph):
            params = autoenc.get_parameters(graph=autoencgraph)
            self.encoding_w_.append(params['enc_w'])
            self.encoding_b_.append(params['enc_b'])

        return SupervisedModel.pretrain_procedure(
            self,
            self.autoencoders,
            self.autoencoder_graphs,
            set_params_func=set_params_func,
            train_set=train_set,
            validation_set=validation_set)

    def _train_model(self, train_set, train_labels, validation_set,
                     validation_labels):
        """Train the model.

        :param train_set: training set
        :param train_labels: training labels
        :param validation_set: validation set
        :param validation_labels: validation labels
        :return: self
        """
        shuff = list(zip(train_set, train_labels))

        pbar = tqdm(list(range(self.num_epochs)))
        for i in pbar:

            np.random.shuffle(shuff)

            batches = [
                _ for _ in utilities.gen_batches(shuff, self.batch_size)
            ]

            for batch in batches:
                x_batch, y_batch = list(zip(*batch))
                self.tf_session.run(self.train_step,
                                    feed_dict={
                                        self.input_data: x_batch,
                                        self.input_labels: y_batch,
                                        self.keep_prob: self.dropout
                                    })

            if validation_set is not None:
                feed = {
                    self.input_data: validation_set,
                    self.input_labels: validation_labels,
                    self.keep_prob: 1
                }
                acc = tf_utils.run_summaries(self.tf_session,
                                             self.tf_merged_summaries,
                                             self.tf_summary_writer, i, feed,
                                             self.accuracy)
                pbar.set_description("Accuracy: %s" % (acc))

    def build_model(self, n_features, n_classes):
        """Create the computational graph.

        This graph is intented to be created for finetuning,
        i.e. after unsupervised pretraining.
        :param n_features: Number of features.
        :param n_classes: number of classes.
        :return: self
        """
        self._create_placeholders(n_features, n_classes)
        self._create_variables(n_features)

        next_train = self._create_encoding_layers()
        self.mod_y, _, _ = Layers.linear(next_train, n_classes)
        self.layer_nodes.append(self.mod_y)

        self.cost = self.loss.compile(self.mod_y, self.input_labels)
        self.train_step = self.trainer.compile(self.cost)
        self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels)

    def _create_placeholders(self, n_features, n_classes):
        """Create the TensorFlow placeholders for the model.

        :param n_features: number of features of the first layer
        :param n_classes: number of classes
        :return: self
        """
        self.input_data = tf.placeholder(tf.float32, [None, n_features],
                                         name='x-input')

        self.input_labels = tf.placeholder(tf.float32, [None, n_classes],
                                           name='y-input')

        self.keep_prob = tf.placeholder(tf.float32, name='keep-probs')

    def _create_variables(self, n_features):
        """Create the TensorFlow variables for the model.

        :param n_features: number of features
        :return: self
        """
        if self.do_pretrain:
            self._create_variables_pretrain()
        else:
            self._create_variables_no_pretrain(n_features)

    def _create_variables_no_pretrain(self, n_features):
        """Create model variables (no previous unsupervised pretraining).

        :param n_features: number of features
        :return: self
        """
        self.encoding_w_ = []
        self.encoding_b_ = []

        for l, layer in enumerate(self.layers):

            w_name = 'enc-w-{}'.format(l)
            b_name = 'enc-b-{}'.format(l)

            if l == 0:
                w_shape = [n_features, self.layers[l]]
            else:
                w_shape = [self.layers[l - 1], self.layers[l]]

            w_init = tf.truncated_normal(shape=w_shape, stddev=0.1)
            W = tf.Variable(w_init, name=w_name)
            tf.summary.histogram(w_name, W)
            self.encoding_w_.append(W)

            b_init = tf.constant(0.1, shape=[self.layers[l]])
            b = tf.Variable(b_init, name=b_name)
            tf.summary.histogram(b_name, b)
            self.encoding_b_.append(b)

    def _create_variables_pretrain(self):
        """Create model variables (previous unsupervised pretraining).

        :return: self
        """
        for l, layer in enumerate(self.layers):
            w_name = 'enc-w-{}'.format(l)
            b_name = 'enc-b-{}'.format(l)

            self.encoding_w_[l] = tf.Variable(self.encoding_w_[l], name=w_name)
            tf.summary.histogram(w_name, self.encoding_w_[l])

            self.encoding_b_[l] = tf.Variable(self.encoding_b_[l], name=b_name)
            tf.summary.histogram(b_name, self.encoding_b_[l])

    def _create_encoding_layers(self):
        """Create the encoding layers for supervised finetuning.

        :return: output of the final encoding layer.
        """
        next_train = self.input_data
        self.layer_nodes = []

        for l, layer in enumerate(self.layers):

            with tf.name_scope("encode-{}".format(l)):

                y_act = tf.add(tf.matmul(next_train, self.encoding_w_[l]),
                               self.encoding_b_[l])

                if self.finetune_act_func:
                    layer_y = self.finetune_act_func(y_act)
                else:
                    layer_y = None

                # the input to the next layer is the output of this layer
                next_train = tf.nn.dropout(layer_y, self.keep_prob)

            self.layer_nodes.append(next_train)

        return next_train
コード例 #19
0
    def __init__(
        self, n_components, name='dae', loss_func='mse',
        enc_act_func=tf.nn.tanh, dec_act_func=None, num_epochs=10,
        batch_size=10, opt='sgd', learning_rate=0.01, momentum=0.9,
            corr_type='none', corr_frac=0., regtype='none', regcoef=5e-4):
        """Constructor.

        Parameters
        ----------

        n_components : int
            Number of hidden units.

        name : str, optional (default = "dae")
            Model name (used for save/load from disk).

        loss_func : str, optional (default = "mse")
            Loss function. ['mse', 'cross_entropy']

        enc_act_func : tf.nn.[activation]
            Activation function for the encoder.

        dec_act_func : tf.nn.[activation]
            Activation function for the decoder.

        num_epochs : int, optional (default = 10)
            Number of epochs.

        batch_size : int, optional (default = 10)
            Size of each mini-batch.

        opt : str, optional (default = "sgd")
            Which tensorflow optimizer to use.
            Possible values: ['sgd', 'momentum', 'adagrad', 'adam']

        learning_rate : float, optional (default = 0.01)
            Initial learning rate.

        momentum : float, optional (default = 0.9)
            Momentum parameter (only used if opt = "momentum").

        corr_type : str, optional (default = "none")
            Type of input corruption.
            Can be one of: ["none", "masking", "salt_and_pepper"]

        corr_frac : float, optional (default = 0.0)
            Fraction of the input to corrupt.

        regtype : str, optional (default = "none")
            Type of regularization to apply.
            Can be one of: ["none", "l1", "l2"].

        regcoef : float, optional (default = 5e-4)
            Regularization parameter. If 0, no regularization.
            Only considered if regtype != "none".
        """
        UnsupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.opt = opt
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.momentum = momentum
        self.regtype = regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(
            opt, learning_rate=learning_rate, momentum=momentum)

        self.n_components = n_components
        self.enc_act_func = enc_act_func
        self.dec_act_func = dec_act_func
        self.corr_type = corr_type
        self.corr_frac = corr_frac

        self.input_data_orig = None
        self.input_data = None

        self.W_ = None
        self.bh_ = None
        self.bv_ = None
コード例 #20
0
    def __init__(self,
                 layers,
                 name='sdae',
                 enc_act_func=[tf.nn.tanh],
                 dec_act_func=[None],
                 loss_func=['cross_entropy'],
                 num_epochs=[10],
                 batch_size=[10],
                 opt=['sgd'],
                 regcoef=[5e-4],
                 learning_rate=[0.01],
                 momentum=0.5,
                 finetune_dropout=1,
                 corr_type=['none'],
                 corr_frac=[0.],
                 finetune_loss_func='softmax_cross_entropy',
                 finetune_act_func=tf.nn.relu,
                 finetune_opt='sgd',
                 finetune_learning_rate=0.001,
                 finetune_num_epochs=10,
                 finetune_batch_size=20,
                 do_pretrain=False):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param enc_act_func: Activation function for the encoder.
            [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder.
            [tf.nn.tanh, tf.nn.sigmoid, None]
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['softmax_cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_act_func: activation function for the finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param corr_type: Type of input corruption. string, default 'none'.
            ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt. float, default 0.0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        SupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(finetune_opt,
                               learning_rate=finetune_learning_rate,
                               momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.finetune_act_func = finetune_act_func

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.last_W = None
        self.last_b = None

        self.autoencoders = []
        self.autoencoder_graphs = []

        for l, layer in enumerate(layers):
            dae_str = 'dae-' + str(l + 1)

            self.autoencoders.append(
                denoising_autoencoder.DenoisingAutoencoder(
                    n_components=layer,
                    name=self.name + '-' + dae_str,
                    enc_act_func=expanded_args['enc_act_func'][l],
                    dec_act_func=expanded_args['dec_act_func'][l],
                    loss_func=expanded_args['loss_func'][l],
                    opt=expanded_args['opt'][l],
                    regcoef=expanded_args['regcoef'],
                    learning_rate=expanded_args['learning_rate'][l],
                    momentum=self.momentum,
                    corr_type=expanded_args['corr_type'][l],
                    corr_frac=expanded_args['corr_frac'][l],
                    num_epochs=expanded_args['num_epochs'][l],
                    batch_size=expanded_args['batch_size'][l]))

            self.autoencoder_graphs.append(tf.Graph())
コード例 #21
0
    def __init__(self,
                 rbm_layers,
                 name='dbn',
                 do_pretrain=False,
                 rbm_num_epochs=[10],
                 rbm_gibbs_k=[1],
                 rbm_gauss_visible=False,
                 rbm_stddev=0.1,
                 rbm_batch_size=[10],
                 rbm_learning_rate=[0.01],
                 finetune_dropout=1,
                 finetune_loss_func='softmax_cross_entropy',
                 finetune_act_func=tf.nn.sigmoid,
                 finetune_opt='sgd',
                 finetune_learning_rate=0.001,
                 finetune_num_epochs=10,
                 finetune_batch_size=20,
                 momentum=0.5):
        """Constructor.

        :param rbm_layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['softmax_cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_act_func: activation function for the finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        SupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(finetune_opt,
                               learning_rate=finetune_learning_rate,
                               momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = rbm_layers
        self.finetune_act_func = finetune_act_func

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.softmax_W = None
        self.softmax_b = None

        rbm_params = {
            'num_epochs': rbm_num_epochs,
            'gibbs_k': rbm_gibbs_k,
            'batch_size': rbm_batch_size,
            'learning_rate': rbm_learning_rate
        }

        for p in rbm_params:
            if len(rbm_params[p]) != len(rbm_layers):
                # The current parameter is not specified by the user,
                # should default it for all the layers
                rbm_params[p] = [rbm_params[p][0] for _ in rbm_layers]

        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(rbm_layers):
            rbm_str = 'rbm-' + str(l + 1)

            if l == 0 and rbm_gauss_visible:
                self.rbms.append(
                    rbm.RBM(name=self.name + '-' + rbm_str,
                            num_hidden=layer,
                            learning_rate=rbm_params['learning_rate'][l],
                            num_epochs=rbm_params['num_epochs'][l],
                            batch_size=rbm_params['batch_size'][l],
                            gibbs_sampling_steps=rbm_params['gibbs_k'][l],
                            visible_unit_type='gauss',
                            stddev=rbm_stddev))

            else:
                self.rbms.append(
                    rbm.RBM(name=self.name + '-' + rbm_str,
                            num_hidden=layer,
                            learning_rate=rbm_params['learning_rate'][l],
                            num_epochs=rbm_params['num_epochs'][l],
                            batch_size=rbm_params['batch_size'][l],
                            gibbs_sampling_steps=rbm_params['gibbs_k'][l]))

            self.rbm_graphs.append(tf.Graph())
コード例 #22
0
class LogisticRegression(SupervisedModel):
    """Simple Logistic Regression using TensorFlow.

    The interface of the class is sklearn-like.
    """

    def __init__(self, name='lr', loss_func='cross_entropy',
                 learning_rate=0.01, num_epochs=10, batch_size=10):
        """Constructor."""
        SupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.batch_size = batch_size

        self.loss = Loss(self.loss_func)

        # Computational graph nodes
        self.input_data = None
        self.input_labels = None

        self.W_ = None
        self.b_ = None

        self.accuracy = None

    def build_model(self, n_features, n_classes):
        """Create the computational graph.

        :param n_features: number of features
        :param n_classes: number of classes
        :return: self
        """
        self._create_placeholders(n_features, n_classes)
        self._create_variables(n_features, n_classes)

        self.mod_y = tf.nn.softmax(
            tf.add(tf.matmul(self.input_data, self.W_), self.b_))

        self.cost = self.loss.compile(self.mod_y, self.input_labels)
        self.train_step = tf.train.GradientDescentOptimizer(
            self.learning_rate).minimize(self.cost)
        self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels)

    def _create_placeholders(self, n_features, n_classes):
        """Create the TensorFlow placeholders for the model.

        :param n_features: number of features
        :param n_classes: number of classes
        :return: self
        """
        self.input_data = tf.placeholder(
            tf.float32, [None, n_features], name='x-input')
        self.input_labels = tf.placeholder(
            tf.float32, [None, n_classes], name='y-input')
        self.keep_prob = tf.placeholder(
            tf.float32, name='keep-probs')

    def _create_variables(self, n_features, n_classes):
        """Create the TensorFlow variables for the model.

        :param n_features: number of features
        :param n_classes: number of classes
        :return: self
        """
        self.W_ = tf.Variable(
            tf.zeros([n_features, n_classes]), name='weights')
        self.b_ = tf.Variable(
            tf.zeros([n_classes]), name='biases')

    def _train_model(self, train_set, train_labels,
                     validation_set, validation_labels):
        """Train the model.

        :param train_set: training set
        :param train_labels: training labels
        :param validation_set: validation set
        :param validation_labels: validation labels
        :return: self
        """
        pbar = tqdm(list(range(self.num_epochs)))
        for i in pbar:

            shuff = list(zip(train_set, train_labels))
            np.random.shuffle(shuff)

            batches = [_ for _ in utilities.gen_batches(shuff, self.batch_size)]

            for batch in batches:
                x_batch, y_batch = list(zip(*batch))
                self.tf_session.run(
                    self.train_step,
                    feed_dict={self.input_data: x_batch,
                               self.input_labels: y_batch})

            if validation_set is not None:
                feed = {self.input_data: validation_set,
                        self.input_labels: validation_labels}
                acc = tf_utils.run_summaries(
                    self.tf_session, self.tf_merged_summaries,
                    self.tf_summary_writer, i, feed, self.accuracy)
                pbar.set_description("Accuracy: %s" % (acc))
コード例 #23
0
    def __init__(
        self, layers, name='sdae',
        enc_act_func=[tf.nn.tanh], dec_act_func=[None],
        loss_func=['cross_entropy'], num_epochs=[10], batch_size=[10],
        opt=['sgd'], regcoef=[5e-4], learning_rate=[0.01], momentum=0.5,
        finetune_dropout=1, corr_type=['none'], corr_frac=[0.],
        finetune_loss_func='softmax_cross_entropy',
        finetune_act_func=tf.nn.relu, finetune_opt='sgd',
        finetune_learning_rate=0.001, finetune_num_epochs=10,
            finetune_batch_size=20, do_pretrain=False):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param enc_act_func: Activation function for the encoder.
            [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder.
            [tf.nn.tanh, tf.nn.sigmoid, None]
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['softmax_cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_act_func: activation function for the finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param corr_type: Type of input corruption. string, default 'none'.
            ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt. float, default 0.0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        SupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(
            finetune_opt, learning_rate=finetune_learning_rate,
            momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.finetune_act_func = finetune_act_func

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.last_W = None
        self.last_b = None

        self.autoencoders = []
        self.autoencoder_graphs = []

        for l, layer in enumerate(layers):
            dae_str = 'dae-' + str(l + 1)

            self.autoencoders.append(
                denoising_autoencoder.DenoisingAutoencoder(
                    n_components=layer,
                    name=self.name + '-' + dae_str,
                    enc_act_func=expanded_args['enc_act_func'][l],
                    dec_act_func=expanded_args['dec_act_func'][l],
                    loss_func=expanded_args['loss_func'][l],
                    opt=expanded_args['opt'][l], regcoef=expanded_args['regcoef'],
                    learning_rate=expanded_args['learning_rate'][l],
                    momentum=self.momentum,
                    corr_type=expanded_args['corr_type'][l],
                    corr_frac=expanded_args['corr_frac'][l],
                    num_epochs=expanded_args['num_epochs'][l],
                    batch_size=expanded_args['batch_size'][l]))

            self.autoencoder_graphs.append(tf.Graph())
コード例 #24
0
class ConvolutionalNetwork(SupervisedModel):
    """Implementation of Convolutional Neural Networks using TensorFlow.

    The interface of the class is sklearn-like.
    """

    def __init__(
        self, layers, original_shape, name='convnet',
        loss_func='softmax_cross_entropy', num_epochs=10, batch_size=10,
            opt='sgd', learning_rate=0.01, momentum=0.5, dropout=0.5):
        """Constructor.

        :param layers: string used to build the model.
            This string is a comma-separate specification of the layers.
            Supported values:
                conv2d-FX-FY-Z-S: 2d convolution with Z feature maps as output
                    and FX x FY filters. S is the strides size
                maxpool-X: max pooling on the previous layer. X is the size of
                    the max pooling
                full-X: fully connected layer with X units
                softmax: softmax layer
            For example:
                conv2d-5-5-32,maxpool-2,conv2d-5-5-64,maxpool-2,full-128,full-128,softmax

        :param original_shape: original shape of the images in the dataset
        :param dropout: Dropout parameter
        """
        assert(layers.split(",")[-1] == "softmax")

        SupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.opt = opt
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.momentum = momentum

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(
            opt, learning_rate=learning_rate,
            momentum=momentum)

        self.layers = layers
        self.original_shape = original_shape
        self.dropout = dropout

        self.W_vars = None
        self.B_vars = None

        self.accuracy = None

    def _train_model(self, train_set, train_labels,
                     validation_set, validation_labels):
        """Train the model.

        :param train_set: training set
        :param train_labels: training labels
        :param validation_set: validation set
        :param validation_labels: validation labels
        :return: self
        """
        shuff = list(zip(train_set, train_labels))

        pbar = tqdm(range(self.num_epochs))
        for i in pbar:

            np.random.shuffle(list(shuff))
            batches = [_ for _ in utilities.gen_batches(
                shuff, self.batch_size)]

            for batch in batches:
                x_batch, y_batch = zip(*batch)
                self.tf_session.run(
                    self.train_step,
                    feed_dict={self.input_data: x_batch,
                               self.input_labels: y_batch,
                               self.keep_prob: self.dropout})

            if validation_set is not None:
                feed = {self.input_data: validation_set,
                        self.input_labels: validation_labels,
                        self.keep_prob: 1}
                acc = tf_utils.run_summaries(
                    self.tf_session, self.tf_merged_summaries,
                    self.tf_summary_writer, i, feed, self.accuracy)
                pbar.set_description("Accuracy: %s" % (acc))

    def build_model(self, n_features, n_classes):
        """Create the computational graph of the model.

        :param n_features: Number of features.
        :param n_classes: number of classes.
        :return: self
        """
        self._create_placeholders(n_features, n_classes)
        self._create_layers(n_classes)

        self.cost = self.loss.compile(self.mod_y, self.input_labels)
        self.train_step = self.trainer.compile(self.cost)
        self.accuracy = Evaluation.accuracy(self.mod_y, self.input_labels)

    def _create_placeholders(self, n_features, n_classes):
        """Create the TensorFlow placeholders for the model.

        :param n_features: number of features of the first layer
        :param n_classes: number of classes
        :return: self
        """
        self.input_data = tf.placeholder(
            tf.float32, [None, n_features], name='x-input')
        self.input_labels = tf.placeholder(
            tf.float32, [None, n_classes], name='y-input')
        self.keep_prob = tf.placeholder(
            tf.float32, name='keep-probs')

    def _create_layers(self, n_classes):
        """Create the layers of the model from self.layers.

        :param n_classes: number of classes
        :return: self
        """
        next_layer_feed = tf.reshape(self.input_data,
                                     [-1, self.original_shape[0],
                                      self.original_shape[1],
                                      self.original_shape[2]])
        prev_output_dim = self.original_shape[2]
        # this flags indicates whether we are building the first dense layer
        first_full = True

        self.W_vars = []
        self.B_vars = []

        for i, l in enumerate(self.layers.split(',')):

            node = l.split('-')
            node_type = node[0]

            if node_type == 'conv2d':

                # ################### #
                # Convolutional Layer #
                # ################### #

                # fx, fy = shape of the convolutional filter
                # feature_maps = number of output dimensions
                fx, fy, feature_maps, stride = int(node[1]),\
                     int(node[2]), int(node[3]), int(node[4])

                print('Building Convolutional layer with %d input channels\
                      and %d %dx%d filters with stride %d' %
                      (prev_output_dim, feature_maps, fx, fy, stride))

                # Create weights and biases
                W_conv = self.weight_variable(
                    [fx, fy, prev_output_dim, feature_maps])
                b_conv = self.bias_variable([feature_maps])
                self.W_vars.append(W_conv)
                self.B_vars.append(b_conv)

                # Convolution and Activation function
                h_conv = tf.nn.relu(
                    self.conv2d(next_layer_feed, W_conv, stride) + b_conv)

                # keep track of the number of output dims of the previous layer
                prev_output_dim = feature_maps
                # output node of the last layer
                next_layer_feed = h_conv

            elif node_type == 'maxpool':

                # ################# #
                # Max Pooling Layer #
                # ################# #

                ksize = int(node[1])

                print('Building Max Pooling layer with size %d' % ksize)

                next_layer_feed = self.max_pool(next_layer_feed, ksize)

            elif node_type == 'full':

                # ####################### #
                # Densely Connected Layer #
                # ####################### #

                if first_full:  # first fully connected layer

                    dim = int(node[1])
                    shp = next_layer_feed.get_shape()
                    tmpx = shp[1].value
                    tmpy = shp[2].value
                    fanin = tmpx * tmpy * prev_output_dim

                    print('Building fully connected layer with %d in units\
                          and %d out units' % (fanin, dim))

                    W_fc = self.weight_variable([fanin, dim])
                    b_fc = self.bias_variable([dim])
                    self.W_vars.append(W_fc)
                    self.B_vars.append(b_fc)

                    h_pool_flat = tf.reshape(next_layer_feed, [-1, fanin])
                    h_fc = tf.nn.relu(tf.add(
                        tf.matmul(h_pool_flat, W_fc),
                        b_fc))
                    h_fc_drop = tf.nn.dropout(h_fc, self.keep_prob)

                    prev_output_dim = dim
                    next_layer_feed = h_fc_drop

                    first_full = False

                else:  # not first fully connected layer

                    dim = int(node[1])
                    W_fc = self.weight_variable([prev_output_dim, dim])
                    b_fc = self.bias_variable([dim])
                    self.W_vars.append(W_fc)
                    self.B_vars.append(b_fc)

                    h_fc = tf.nn.relu(tf.add(
                        tf.matmul(next_layer_feed, W_fc), b_fc))
                    h_fc_drop = tf.nn.dropout(h_fc, self.keep_prob)

                    prev_output_dim = dim
                    next_layer_feed = h_fc_drop

            elif node_type == 'softmax':

                # ############# #
                # Softmax Layer #
                # ############# #

                print('Building softmax layer with %d in units and\
                      %d out units' % (prev_output_dim, n_classes))

                W_sm = self.weight_variable([prev_output_dim, n_classes])
                b_sm = self.bias_variable([n_classes])
                self.W_vars.append(W_sm)
                self.B_vars.append(b_sm)

                self.mod_y = tf.add(tf.matmul(next_layer_feed, W_sm), b_sm)

    @staticmethod
    def weight_variable(shape):
        """Create a weight variable."""
        initial = tf.truncated_normal(shape=shape, stddev=0.1)
        return tf.Variable(initial)

    @staticmethod
    def bias_variable(shape):
        """Create a bias variable."""
        initial = tf.constant(0.1, shape=shape)
        return tf.Variable(initial)

    @staticmethod
    def conv2d(x, W, stride):
        """2D Convolution operation."""
        return tf.nn.conv2d(
            x, W, strides=[1, stride, stride, 1], padding='SAME')

    @staticmethod
    def max_pool(x, dim):
        """Max pooling operation."""
        return tf.nn.max_pool(
            x, ksize=[1, dim, dim, 1], strides=[1, dim, dim, 1],
            padding='SAME')