def __init__(self,
                 n_components,
                 model_name='dae',
                 main_dir='dae/',
                 models_dir='models/',
                 data_dir='data/',
                 summary_dir='logs/',
                 enc_act_func=tf.nn.tanh,
                 dec_act_func=None,
                 loss_func='mean_squared',
                 num_epochs=10,
                 batch_size=10,
                 dataset='mnist',
                 opt='gradient_descent',
                 learning_rate=0.01,
                 momentum=0.5,
                 corr_type='none',
                 corr_frac=0.,
                 verbose=1,
                 regtype='none',
                 l2reg=5e-4):
        """Constructor.

        :param n_components: number of hidden units
        :param enc_act_func: Activation function for the encoder.
            [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder.
            [tf.nn.tanh, tf.nn.sigmoid, None]
        :param corr_type: Type of input corruption.
            ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt.
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
        :param l2reg: Regularization parameter. If 0, no regularization.
        """
        UnsupervisedModel.__init__(self, model_name, main_dir, models_dir,
                                   data_dir, summary_dir)

        self._initialize_training_parameters(loss_func=loss_func,
                                             learning_rate=learning_rate,
                                             opt=opt,
                                             num_epochs=num_epochs,
                                             batch_size=batch_size,
                                             dataset=dataset,
                                             momentum=momentum,
                                             regtype=regtype,
                                             l2reg=l2reg)

        self.n_components = n_components
        self.enc_act_func = enc_act_func
        self.dec_act_func = dec_act_func
        self.corr_type = corr_type
        self.corr_frac = corr_frac
        self.verbose = verbose

        self.input_data_orig = None
        self.input_data = None

        self.W_ = None
        self.bh_ = None
        self.bv_ = None
    def __init__(self, n_components, model_name='dae', main_dir='dae/', models_dir='models/', data_dir='data/', summary_dir='logs/',
                 enc_act_func=tf.nn.tanh, dec_act_func=None, loss_func='mean_squared', num_epochs=10, batch_size=10, dataset='mnist',
                 opt='gradient_descent', learning_rate=0.01, momentum=0.5, corr_type='none', corr_frac=0., verbose=1, l2reg=5e-4):
        """
        :param n_components: number of hidden units
        :param enc_act_func: Activation function for the encoder. [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder. [[tf.nn.tanh, tf.nn.sigmoid, None]
        :param corr_type: Type of input corruption. ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt.
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
        :param l2reg: Regularization parameter. If 0, no regularization.
        """
        UnsupervisedModel.__init__(self, model_name, main_dir, models_dir, data_dir, summary_dir)

        self._initialize_training_parameters(loss_func=loss_func, learning_rate=learning_rate, num_epochs=num_epochs,
                                             batch_size=batch_size, dataset=dataset, opt=opt, momentum=momentum,
                                             l2reg=l2reg)

        self.n_components = n_components
        self.enc_act_func = enc_act_func
        self.dec_act_func = dec_act_func
        self.corr_type = corr_type
        self.corr_frac = corr_frac
        self.verbose = verbose

        self.input_data_orig = None
        self.input_data = None

        self.W_ = None
        self.bh_ = None
        self.bv_ = None
Exemplo n.º 3
0
    def __init__(self, num_hidden, visible_unit_type='bin', main_dir='rbm', models_dir='models/', data_dir='data/', summary_dir='logs/',
                 model_name='rbm', dataset='mnist',
                 gibbs_sampling_steps=1, learning_rate=0.01, batch_size=10, num_epochs=10, stddev=0.1, verbose=0):

        """
        :param num_hidden: number of hidden units
        :param visible_unit_type: type of the visible units (binary or gaussian)
        :param gibbs_sampling_steps: optional, default 1
        :param stddev: optional, default 0.1. Ignored if visible_unit_type is not 'gauss'
        :param verbose: level of verbosity. optional, default 0
        """
        UnsupervisedModel.__init__(self, model_name, main_dir, models_dir, data_dir, summary_dir)

        self._initialize_training_parameters(None, learning_rate, num_epochs, batch_size,
                                             dataset, None, None)

        self.num_hidden = num_hidden
        self.visible_unit_type = visible_unit_type
        self.gibbs_sampling_steps = gibbs_sampling_steps
        self.stddev = stddev
        self.verbose = verbose

        self.W = None
        self.bh_ = None
        self.bv_ = None

        self.w_upd8 = None
        self.bh_upd8 = None
        self.bv_upd8 = None

        self.cost = None

        self.input_data = None
        self.hrand = None
        self.vrand = None
    def __init__(self, layers, model_name='sdae', main_dir='sdae/', models_dir='models/', data_dir='data/', summary_dir='logs/',
                 num_epochs=[10], batch_size=[10], dataset='mnist', learning_rate=[0.01], gibbs_k=[1],
                 momentum=0.5, finetune_dropout=1, verbose=1, finetune_loss_func='cross_entropy', finetune_enc_act_func=[tf.nn.relu],
                 finetune_dec_act_func=[tf.nn.sigmoid], finetune_opt='gradient_descent', finetune_learning_rate=0.001,
                 finetune_num_epochs=10, noise=['gauss'], stddev=0.1, finetune_batch_size=20, do_pretrain=True):
        """
        :param layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer. string, default ['cross_entropy', 'mean_squared']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning. float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder finetuning phase
        :param finetune_dec_act_func: activation function for the decoder finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning. int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning. int, default 20
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy. int, default 0
        :param do_pretrain: True: uses variables from pretraining, False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(layers, **locals())

        UnsupervisedModel.__init__(self, model_name, main_dir, models_dir, data_dir, summary_dir)

        self._initialize_training_parameters(loss_func=finetune_loss_func, learning_rate=finetune_learning_rate,
                                             num_epochs=finetune_num_epochs, batch_size=finetune_batch_size,
                                             dropout=finetune_dropout, dataset=dataset, opt=finetune_opt, momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.verbose = verbose

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights (one per layer)
        self.encoding_b_ = []  # list of arrays of encoding biases (one per layer)

        self.decoding_w = []  # list of matrices of decoding weights (one per layer)
        self.decoding_b = []  # list of arrays of decoding biases (one per layer)

        self.reconstruction = None
        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(layers):
            rbm_str = 'rbm-' + str(l + 1)
            new_rbm = rbm.RBM(model_name=self.model_name + '-' + rbm_str,
                              models_dir=os.path.join(self.models_dir, rbm_str), data_dir=os.path.join(self.data_dir, rbm_str),
                              summary_dir=os.path.join(self.tf_summary_dir, rbm_str), visible_unit_type=expanded_args['noise'][l],
                              stddev=stddev, num_hidden=expanded_args['layers'][l], main_dir=self.main_dir, learning_rate=expanded_args['learning_rate'][l],
                              gibbs_sampling_steps=expanded_args['gibbs_k'][l], verbose=self.verbose, num_epochs=expanded_args['num_epochs'][l],
                              batch_size=expanded_args['batch_size'][l])
            self.rbms.append(new_rbm)
            self.rbm_graphs.append(tf.Graph())
Exemplo n.º 5
0
    def __init__(self,
                 num_hidden,
                 visible_unit_type='bin',
                 main_dir='rbm/',
                 models_dir='models/',
                 data_dir='data/',
                 summary_dir='logs/',
                 model_name='rbm',
                 dataset='mnist',
                 loss_func='mean_squared',
                 l2reg=5e-4,
                 regtype='none',
                 gibbs_sampling_steps=1,
                 learning_rate=0.01,
                 batch_size=10,
                 num_epochs=10,
                 stddev=0.1,
                 verbose=0):
        """Constructor.

        :param num_hidden: number of hidden units
        :param loss_function: type of loss function
        :param visible_unit_type: type of the visible units (bin or gauss)
        :param gibbs_sampling_steps: optional, default 1
        :param stddev: default 0.1. Ignored if visible_unit_type is not 'gauss'
        :param verbose: level of verbosity. optional, default 0
        """
        UnsupervisedModel.__init__(self, model_name, main_dir, models_dir,
                                   data_dir, summary_dir)

        self._initialize_training_parameters(loss_func=loss_func,
                                             learning_rate=learning_rate,
                                             num_epochs=num_epochs,
                                             batch_size=batch_size,
                                             dataset=dataset,
                                             regtype=regtype,
                                             l2reg=l2reg)

        self.num_hidden = num_hidden
        self.visible_unit_type = visible_unit_type
        self.gibbs_sampling_steps = gibbs_sampling_steps
        self.stddev = stddev
        self.verbose = verbose

        self.W = None
        self.bh_ = None
        self.bv_ = None

        self.w_upd8 = None
        self.bh_upd8 = None
        self.bv_upd8 = None

        self.cost = None

        self.input_data = None
        self.hrand = None
        self.vrand = None
    def pretrain(self, train_set, validation_set=None):
        self.do_pretrain = True

        def set_params_func(rbmmachine, rbmgraph):
            params = rbmmachine.get_model_parameters(graph=rbmgraph)
            self.encoding_w_.append(params['W'])
            self.encoding_b_.append(params['bh_'])

        return UnsupervisedModel.pretrain_procedure(self, self.rbms, self.rbm_graphs, set_params_func=set_params_func,
                                                    train_set=train_set, validation_set=validation_set)
Exemplo n.º 7
0
    def __init__(
        self, num_hidden, visible_unit_type='bin', main_dir='rbm/',
        models_dir='models/', data_dir='data/', summary_dir='logs/',
        model_name='rbm', dataset='mnist', loss_func='mean_squared',
        l2reg=5e-4, regtype='none', gibbs_sampling_steps=1, learning_rate=0.01,
            batch_size=10, num_epochs=10, stddev=0.1, verbose=0):
        """Constructor.

        :param num_hidden: number of hidden units
        :param loss_function: type of loss function
        :param visible_unit_type: type of the visible units (bin or gauss)
        :param gibbs_sampling_steps: optional, default 1
        :param stddev: default 0.1. Ignored if visible_unit_type is not 'gauss'
        :param verbose: level of verbosity. optional, default 0
        """
        UnsupervisedModel.__init__(self, model_name, main_dir, models_dir,
                                   data_dir, summary_dir)

        self._initialize_training_parameters(
            loss_func=loss_func, learning_rate=learning_rate,
            num_epochs=num_epochs, batch_size=batch_size, dataset=dataset,
            regtype=regtype, l2reg=l2reg)

        self.num_hidden = num_hidden
        self.visible_unit_type = visible_unit_type
        self.gibbs_sampling_steps = gibbs_sampling_steps
        self.stddev = stddev
        self.verbose = verbose

        self.W = None
        self.bh_ = None
        self.bv_ = None

        self.w_upd8 = None
        self.bh_upd8 = None
        self.bv_upd8 = None

        self.cost = None

        self.input_data = None
        self.hrand = None
        self.vrand = None
Exemplo n.º 8
0
    def pretrain(self, train_set, validation_set=None):
        self.do_pretrain = True

        def set_params_func(autoenc, autoencgraph):
            params = autoenc.get_model_parameters(graph=autoencgraph)
            self.encoding_w_.append(params['enc_w'])
            self.encoding_b_.append(params['enc_b'])

        return UnsupervisedModel.pretrain_procedure(self, self.autoencoders, self.autoencoder_graphs,
                                                    set_params_func=set_params_func, train_set=train_set,
                                                    validation_set=validation_set)
    def pretrain(self, train_set, validation_set=None):
        self.do_pretrain = True

        def set_params_func(rbmmachine, rbmgraph):
            params = rbmmachine.get_model_parameters(graph=rbmgraph)
            self.encoding_w_.append(params['W'])
            self.encoding_b_.append(params['bh_'])

        return UnsupervisedModel.pretrain_procedure(
            self,
            self.rbms,
            self.rbm_graphs,
            set_params_func=set_params_func,
            train_set=train_set,
            validation_set=validation_set)
    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the autoencoder."""
        self.do_pretrain = True

        def set_params_func(rbmmachine, rbmgraph):
            params = rbmmachine.get_model_parameters(graph=rbmgraph)
            self.encoding_w_.append(params["W"])
            self.encoding_b_.append(params["bh_"])

        return UnsupervisedModel.pretrain_procedure(
            self,
            self.rbms,
            self.rbm_graphs,
            set_params_func=set_params_func,
            train_set=train_set,
            validation_set=validation_set,
        )
Exemplo n.º 11
0
    def __init__(self,
                 layers,
                 model_name='srbm',
                 main_dir='srbm/',
                 models_dir='models/',
                 data_dir='data/',
                 summary_dir='logs/',
                 num_epochs=[10],
                 batch_size=[10],
                 dataset='mnist',
                 learning_rate=[0.01],
                 gibbs_k=[1],
                 loss_func=['mean_squared'],
                 momentum=0.5,
                 finetune_dropout=1,
                 verbose=1,
                 finetune_loss_func='cross_entropy',
                 finetune_enc_act_func=[tf.nn.relu],
                 finetune_dec_act_func=[tf.nn.sigmoid],
                 finetune_opt='gradient_descent',
                 finetune_learning_rate=0.001,
                 l2reg=5e-4,
                 finetune_num_epochs=10,
                 noise=['gauss'],
                 stddev=0.1,
                 finetune_batch_size=20,
                 do_pretrain=False,
                 tied_weights=False,
                 regtype=['none'],
                 finetune_reg_type='none'):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mean_squared']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
            int, default 0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(self, model_name, main_dir, models_dir,
                                   data_dir, summary_dir)

        self._initialize_training_parameters(
            loss_func=finetune_loss_func,
            learning_rate=finetune_learning_rate,
            regtype=finetune_reg_type,
            num_epochs=finetune_num_epochs,
            batch_size=finetune_batch_size,
            l2reg=l2reg,
            dropout=finetune_dropout,
            dataset=dataset,
            opt=finetune_opt,
            momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights
        self.verbose = verbose

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(layers):
            rbm_str = 'rbm-' + str(l + 1)
            new_rbm = rbm.RBM(model_name=self.model_name + '-' + rbm_str,
                              loss_func=expanded_args['loss_func'][l],
                              models_dir=os.path.join(self.models_dir,
                                                      rbm_str),
                              data_dir=os.path.join(self.data_dir, rbm_str),
                              summary_dir=os.path.join(self.tf_summary_dir,
                                                       rbm_str),
                              visible_unit_type=expanded_args['noise'][l],
                              stddev=stddev,
                              num_hidden=expanded_args['layers'][l],
                              main_dir=self.main_dir,
                              learning_rate=expanded_args['learning_rate'][l],
                              gibbs_sampling_steps=expanded_args['gibbs_k'][l],
                              num_epochs=expanded_args['num_epochs'][l],
                              batch_size=expanded_args['batch_size'][l],
                              verbose=self.verbose,
                              regtype=expanded_args['regtype'][l])
            self.rbms.append(new_rbm)
            self.rbm_graphs.append(tf.Graph())
    def __init__(self,
                 layers,
                 model_name='sdae',
                 main_dir='sdae/',
                 models_dir='models/',
                 data_dir='data/',
                 summary_dir='logs/',
                 enc_act_func=[tf.nn.tanh],
                 dec_act_func=[None],
                 loss_func=['cross_entropy'],
                 num_epochs=[10],
                 batch_size=[10],
                 dataset='mnist',
                 opt=['gradient_descent'],
                 regtype=['none'],
                 learning_rate=[0.01],
                 momentum=0.5,
                 finetune_dropout=1,
                 corr_type=['none'],
                 finetune_regtype='none',
                 corr_frac=[0.],
                 verbose=1,
                 finetune_loss_func='cross_entropy',
                 finetune_enc_act_func=[tf.nn.relu],
                 tied_weights=True,
                 finetune_dec_act_func=[tf.nn.sigmoid],
                 l2reg=[5e-4],
                 finetune_batch_size=20,
                 do_pretrain=False,
                 finetune_opt='gradient_descent',
                 finetune_learning_rate=0.001,
                 finetune_num_epochs=10):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param enc_act_func: Activation function for the encoder.
            [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder.
            [tf.nn.tanh, tf.nn.sigmoid, None]
        :param regtype: regularization type, can be 'l2','l1' and 'none'
        :param finetune_regtype: regularization type for finetuning
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mean_squared']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param tied_weights: if True, the decoder layers weights are
            constrained to be the transpose of the encoder layers
        :param corr_type: Type of input corruption. string, default 'none'.
            ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt. float, default 0.0
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
            int, default 0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(self, model_name, main_dir, models_dir,
                                   data_dir, summary_dir)

        self._initialize_training_parameters(
            loss_func=finetune_loss_func,
            learning_rate=finetune_learning_rate,
            num_epochs=finetune_num_epochs,
            batch_size=finetune_batch_size,
            l2reg=l2reg,
            regtype=finetune_regtype,
            dropout=finetune_dropout,
            dataset=dataset,
            opt=finetune_opt,
            momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights
        self.verbose = verbose

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.autoencoders = []
        self.autoencoder_graphs = []

        for l, layer in enumerate(layers):
            dae_str = 'dae-' + str(l + 1)

            self.autoencoders.append(
                denoising_autoencoder.DenoisingAutoencoder(
                    n_components=layer,
                    main_dir=self.main_dir,
                    model_name=self.model_name + '-' + dae_str,
                    models_dir=os.path.join(self.models_dir, dae_str),
                    data_dir=os.path.join(self.data_dir, dae_str),
                    summary_dir=os.path.join(self.tf_summary_dir, dae_str),
                    enc_act_func=expanded_args['enc_act_func'][l],
                    dec_act_func=expanded_args['dec_act_func'][l],
                    loss_func=expanded_args['loss_func'][l],
                    regtype=expanded_args['regtype'][l],
                    opt=expanded_args['opt'][l],
                    learning_rate=expanded_args['learning_rate'][l],
                    l2reg=expanded_args['l2reg'],
                    dataset=self.dataset,
                    momentum=self.momentum,
                    verbose=self.verbose,
                    corr_type=expanded_args['corr_type'][l],
                    corr_frac=expanded_args['corr_frac'][l],
                    num_epochs=expanded_args['num_epochs'][l],
                    batch_size=expanded_args['batch_size'][l]))

            self.autoencoder_graphs.append(tf.Graph())
    def __init__(
        self, layers, model_name='sdae', main_dir='sdae/',
        models_dir='models/', data_dir='data/', summary_dir='logs/',
        enc_act_func=[tf.nn.tanh], dec_act_func=[None],
        loss_func=['cross_entropy'], num_epochs=[10], batch_size=[10],
        dataset='mnist', opt=['gradient_descent'], regtype=['none'],
        learning_rate=[0.01], momentum=0.5, finetune_dropout=1,
        corr_type=['none'], finetune_regtype='none', corr_frac=[0.], verbose=1,
        finetune_loss_func='cross_entropy', finetune_enc_act_func=[tf.nn.relu],
        tied_weights=True, finetune_dec_act_func=[tf.nn.sigmoid], l2reg=[5e-4],
        finetune_batch_size=20, do_pretrain=False,
        finetune_opt='gradient_descent', finetune_learning_rate=0.001,
            finetune_num_epochs=10):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param enc_act_func: Activation function for the encoder.
            [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder.
            [tf.nn.tanh, tf.nn.sigmoid, None]
        :param regtype: regularization type, can be 'l2','l1' and 'none'
        :param finetune_regtype: regularization type for finetuning
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mean_squared']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param tied_weights: if True, the decoder layers weights are
            constrained to be the transpose of the encoder layers
        :param corr_type: Type of input corruption. string, default 'none'.
            ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt. float, default 0.0
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
            int, default 0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(
            self, model_name, main_dir, models_dir, data_dir, summary_dir)

        self._initialize_training_parameters(
            loss_func=finetune_loss_func, learning_rate=finetune_learning_rate,
            num_epochs=finetune_num_epochs, batch_size=finetune_batch_size,
            l2reg=l2reg, regtype=finetune_regtype, dropout=finetune_dropout,
            dataset=dataset, opt=finetune_opt, momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights
        self.verbose = verbose

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.autoencoders = []
        self.autoencoder_graphs = []

        for l, layer in enumerate(layers):
            dae_str = 'dae-' + str(l + 1)

            self.autoencoders.append(
                denoising_autoencoder.DenoisingAutoencoder(
                    n_components=layer, main_dir=self.main_dir,
                    model_name=self.model_name + '-' + dae_str,
                    models_dir=os.path.join(self.models_dir, dae_str),
                    data_dir=os.path.join(self.data_dir, dae_str),
                    summary_dir=os.path.join(self.tf_summary_dir, dae_str),
                    enc_act_func=expanded_args['enc_act_func'][l],
                    dec_act_func=expanded_args['dec_act_func'][l],
                    loss_func=expanded_args['loss_func'][l],
                    regtype=expanded_args['regtype'][l],
                    opt=expanded_args['opt'][l],
                    learning_rate=expanded_args['learning_rate'][l],
                    l2reg=expanded_args['l2reg'], dataset=self.dataset,
                    momentum=self.momentum, verbose=self.verbose,
                    corr_type=expanded_args['corr_type'][l],
                    corr_frac=expanded_args['corr_frac'][l],
                    num_epochs=expanded_args['num_epochs'][l],
                    batch_size=expanded_args['batch_size'][l]))

            self.autoencoder_graphs.append(tf.Graph())