Example #1
0
File: sda.py Project: josvr/pdnn
    def __init__(self, numpy_rng, theano_rng=None, cfg = None, dnn = None):
        """ Stacked Denoising Autoencoders for DNN Pre-training """

        self.cfg = cfg
        self.hidden_layers_sizes = cfg.hidden_layers_sizes
        self.n_ins = cfg.n_ins
        self.hidden_layers_number = len(self.hidden_layers_sizes)

        self.dA_layers = []

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
        # allocate symbolic variables for the data
        self.x = dnn.x

        for i in range(self.hidden_layers_number):
            # the size of the input is either the number of hidden units of
            # the layer below, or the input size if we are on the first layer
            if i == 0:
                input_size = self.n_ins
                layer_input = self.x
            else:
                input_size = self.hidden_layers_sizes[i - 1]
                layer_input = dnn.layers[i-1].output

            # Construct a denoising autoencoder that shared weights with this layer
            if i == 0:
                reconstruct_activation = cfg.firstlayer_reconstruct_activation
            else:
                reconstruct_activation = cfg.hidden_activation
            dA_layer = dA(numpy_rng=numpy_rng,
                          theano_rng=theano_rng,
                          input=layer_input,
                          n_visible=input_size,
                          n_hidden=self.hidden_layers_sizes[i],
                          W=dnn.layers[i].W,
                          bhid=dnn.layers[i].b,
                          sparsity = cfg.sparsity,
                          sparsity_weight = cfg.sparsity_weight,
                          hidden_activation = cfg.hidden_activation,
                          reconstruct_activation = reconstruct_activation)
            self.dA_layers.append(dA_layer)
Example #2
0
    def __init__(self, numpy_rng, theano_rng=None, cfg=None, dnn=None):
        """ Stacked Denoising Autoencoders for DNN Pre-training """

        self.cfg = cfg
        self.hidden_layers_sizes = cfg.hidden_layers_sizes
        self.n_ins = cfg.n_ins
        self.hidden_layers_number = len(self.hidden_layers_sizes)

        self.dA_layers = []

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2**30))
        # allocate symbolic variables for the data
        self.x = dnn.x

        for i in xrange(self.hidden_layers_number):
            # the size of the input is either the number of hidden units of
            # the layer below, or the input size if we are on the first layer
            if i == 0:
                input_size = self.n_ins
                layer_input = self.x
            else:
                input_size = self.hidden_layers_sizes[i - 1]
                layer_input = dnn.layers[i - 1].output

            # Construct a denoising autoencoder that shared weights with this layer
            if i == 0:
                reconstruct_activation = cfg.firstlayer_reconstruct_activation
            else:
                reconstruct_activation = cfg.hidden_activation
            dA_layer = dA(numpy_rng=numpy_rng,
                          theano_rng=theano_rng,
                          input=layer_input,
                          n_visible=input_size,
                          n_hidden=self.hidden_layers_sizes[i],
                          W=dnn.layers[i].W,
                          bhid=dnn.layers[i].b,
                          sparsity=cfg.sparsity,
                          sparsity_weight=cfg.sparsity_weight,
                          hidden_activation=cfg.hidden_activation,
                          reconstruct_activation=reconstruct_activation)
            self.dA_layers.append(dA_layer)
Example #3
0
    def __init__(self, numpy_rng, theano_rng=None, n_ins=784,
                 hidden_layers_sizes=[500, 500], n_outs=10,
                 corruption_levels=[0.1, 0.1], sparsity = None,
                 sparsity_weight = None,
                 hidden_activation = T.nnet.sigmoid,
                 first_reconstruct_activation = T.tanh):


        self.sigmoid_layers = []
        self.dA_layers = []
        self.params = []
        self.n_layers = len(hidden_layers_sizes)

        assert self.n_layers > 0

        if not theano_rng:
            theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))
        # allocate symbolic variables for the data
        self.x = T.matrix('x')  
        self.y = T.ivector('y') 

        for i in xrange(self.n_layers):
            # construct the sigmoidal layer

            # the size of the input is either the number of hidden units of
            # the layer below or the input size if we are on the first layer
            if i == 0:
                input_size = n_ins
            else:
                input_size = hidden_layers_sizes[i - 1]

            # the input to this layer is either the activation of the hidden
            # layer below or the input of the SdA if you are on the first
            # layer
            if i == 0:
                layer_input = self.x
            else:
                layer_input = self.sigmoid_layers[-1].output

            sigmoid_layer = HiddenLayer(rng=numpy_rng,
                                        input=layer_input,
                                        n_in=input_size,
                                        n_out=hidden_layers_sizes[i],
                                        activation=hidden_activation)
            # add the layer to our list of layers
            self.sigmoid_layers.append(sigmoid_layer)

            self.params.extend(sigmoid_layer.params)

            # Construct a denoising autoencoder that shared weights with this layer
            if i == 0:
                reconstruct_activation = first_reconstruct_activation
            else:
                reconstruct_activation = hidden_activation
            dA_layer = dA(numpy_rng=numpy_rng,
                              theano_rng=theano_rng,
                              input=layer_input,
                              n_visible=input_size,
                              n_hidden=hidden_layers_sizes[i],
                              W=sigmoid_layer.W,
                              bhid=sigmoid_layer.b,
                              sparsity = sparsity,
                              sparsity_weight = sparsity_weight,
                              hidden_activation = hidden_activation,
                              reconstruct_activation = reconstruct_activation)
            self.dA_layers.append(dA_layer)

        # We now need to add a logistic layer on top of the MLP
        self.logLayer = LogisticRegression(
                         input=self.sigmoid_layers[-1].output,
                         n_in=hidden_layers_sizes[-1], n_out=n_outs)

        self.sigmoid_layers.append(self.logLayer)
        self.params.extend(self.logLayer.params)