Esempio n. 1
0
    def __init__(self,
                 num_hidden,
                 visible_unit_type='bin',
                 name='rbm',
                 loss_func='mse',
                 learning_rate=0.01,
                 regcoef=5e-4,
                 regtype='none',
                 gibbs_sampling_steps=1,
                 batch_size=10,
                 num_epochs=10,
                 stddev=0.1):
        """Constructor.

        :param num_hidden: number of hidden units
        :param loss_function: type of loss function
        :param visible_unit_type: type of the visible units (bin or gauss)
        :param gibbs_sampling_steps: optional, default 1
        :param stddev: default 0.1. Ignored if visible_unit_type is not 'gauss'
        """
        UnsupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.regtype = regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)

        self.num_hidden = num_hidden
        self.visible_unit_type = visible_unit_type
        self.gibbs_sampling_steps = gibbs_sampling_steps
        self.stddev = stddev

        self.W = None
        self.bh_ = None
        self.bv_ = None

        self.w_upd8 = None
        self.bh_upd8 = None
        self.bv_upd8 = None

        self.cost = None

        self.input_data = None
        self.hrand = None
        self.vrand = None
Esempio n. 2
0
    def __init__(
        self, num_hidden, visible_unit_type='bin',
        name='rbm', loss_func='mse', learning_rate=0.01,
        regcoef=5e-4, regtype='none', gibbs_sampling_steps=1,
            batch_size=10, num_epochs=10, stddev=0.1):
        """Constructor.

        :param num_hidden: number of hidden units
        :param loss_function: type of loss function
        :param visible_unit_type: type of the visible units (bin or gauss)
        :param gibbs_sampling_steps: optional, default 1
        :param stddev: default 0.1. Ignored if visible_unit_type is not 'gauss'
        """
        UnsupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.regtype = regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)

        self.num_hidden = num_hidden
        self.visible_unit_type = visible_unit_type
        self.gibbs_sampling_steps = gibbs_sampling_steps
        self.stddev = stddev

        self.W = None
        self.bh_ = None
        self.bv_ = None

        self.w_upd8 = None
        self.bh_upd8 = None
        self.bv_upd8 = None

        self.cost = None

        self.input_data = None
        self.hrand = None
        self.vrand = None
    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the autoencoder."""
        self.do_pretrain = True

        def set_params_func(rbmmachine, rbmgraph):
            params = rbmmachine.get_parameters(graph=rbmgraph)
            self.encoding_w_.append(params['W'])
            self.encoding_b_.append(params['bh_'])

        return UnsupervisedModel.pretrain_procedure(
            self, self.rbms, self.rbm_graphs, set_params_func=set_params_func,
            train_set=train_set, validation_set=validation_set)
    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the autoencoder."""
        self.do_pretrain = True

        def set_params_func(rbmmachine, rbmgraph):
            params = rbmmachine.get_model_parameters(graph=rbmgraph)
            self.encoding_w_.append(params['W'])
            self.encoding_b_.append(params['bh_'])

        return UnsupervisedModel.pretrain_procedure(
            self, self.rbms, self.rbm_graphs, set_params_func=set_params_func,
            train_set=train_set, validation_set=validation_set)
    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the autoencoder."""
        self.do_pretrain = True

        def set_params_func(autoenc, autoencgraph):
            params = autoenc.get_parameters(graph=autoencgraph)
            self.encoding_w_.append(params['enc_w'])
            self.encoding_b_.append(params['enc_b'])

        return UnsupervisedModel.pretrain_procedure(
            self, self.autoencoders, self.autoencoder_graphs,
            set_params_func=set_params_func, train_set=train_set,
            validation_set=validation_set)
    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the autoencoder."""
        self.do_pretrain = True

        def set_params_func(autoenc, autoencgraph):
            params = autoenc.get_parameters(graph=autoencgraph)
            self.encoding_w_.append(params['enc_w'])
            self.encoding_b_.append(params['enc_b'])

        return UnsupervisedModel.pretrain_procedure(
            self,
            self.autoencoders,
            self.autoencoder_graphs,
            set_params_func=set_params_func,
            train_set=train_set,
            validation_set=validation_set)
Esempio n. 7
0
    def __init__(self,
                 num_hidden,
                 visible_unit_type='bin',
                 main_dir='rbm/',
                 models_dir='models/',
                 data_dir='data/',
                 summary_dir='logs/',
                 model_name='rbm',
                 dataset='mnist',
                 loss_func='mean_squared',
                 l2reg=5e-4,
                 regtype='none',
                 gibbs_sampling_steps=1,
                 learning_rate=0.01,
                 batch_size=10,
                 num_epochs=10,
                 stddev=0.1,
                 D=[],
                 verbose=0):
        """Constructor.

        :param num_hidden: number of hidden units
        :param loss_function: type of loss function
        :param visible_unit_type: type of the visible units (bin, gauss or rsm)
        :param gibbs_sampling_steps: optional, default 1
        :param stddev: default 0.1. Ignored if visible_unit_type is not 'gauss'
        :param D: default []. Optional documents dimensions array. Used only if
            visible_unit_type is 'rsm'
        :param verbose: level of verbosity. optional, default 0
        """
        UnsupervisedModel.__init__(self, model_name, main_dir, models_dir,
                                   data_dir, summary_dir)

        self._initialize_training_parameters(loss_func=loss_func,
                                             learning_rate=learning_rate,
                                             num_epochs=num_epochs,
                                             batch_size=batch_size,
                                             dataset=dataset,
                                             regtype=regtype,
                                             l2reg=l2reg)

        self.num_hidden = num_hidden
        self.visible_unit_type = visible_unit_type
        self.gibbs_sampling_steps = gibbs_sampling_steps
        self.stddev = stddev
        self.D = D
        self.verbose = verbose

        self.W = None
        self.bh_ = None
        self.bv_ = None

        self.w_upd8 = None
        self.bh_upd8 = None
        self.bv_upd8 = None

        self.cost = None

        self.input_data = None
        self.hrand = None
        self.vrand = None
Esempio n. 8
0
    def __init__(
        self, n_components, name='dae', loss_func='mse',
        enc_act_func=tf.nn.tanh, dec_act_func=None, num_epochs=10,
        batch_size=10, opt='sgd', learning_rate=0.01, momentum=0.9,
            corr_type='none', corr_frac=0., regtype='none', regcoef=5e-4):
        """Constructor.

        Parameters
        ----------

        n_components : int
            Number of hidden units.

        name : str, optional (default = "dae")
            Model name (used for save/load from disk).

        loss_func : str, optional (default = "mse")
            Loss function. ['mse', 'cross_entropy']

        enc_act_func : tf.nn.[activation]
            Activation function for the encoder.

        dec_act_func : tf.nn.[activation]
            Activation function for the decoder.

        num_epochs : int, optional (default = 10)
            Number of epochs.

        batch_size : int, optional (default = 10)
            Size of each mini-batch.

        opt : str, optional (default = "sgd")
            Which tensorflow optimizer to use.
            Possible values: ['sgd', 'momentum', 'adagrad', 'adam']

        learning_rate : float, optional (default = 0.01)
            Initial learning rate.

        momentum : float, optional (default = 0.9)
            Momentum parameter (only used if opt = "momentum").

        corr_type : str, optional (default = "none")
            Type of input corruption.
            Can be one of: ["none", "masking", "salt_and_pepper"]

        corr_frac : float, optional (default = 0.0)
            Fraction of the input to corrupt.

        regtype : str, optional (default = "none")
            Type of regularization to apply.
            Can be one of: ["none", "l1", "l2"].

        regcoef : float, optional (default = 5e-4)
            Regularization parameter. If 0, no regularization.
            Only considered if regtype != "none".
        """
        UnsupervisedModel.__init__(self, name)

        self.loss_func = loss_func
        self.learning_rate = learning_rate
        self.opt = opt
        self.num_epochs = num_epochs
        self.batch_size = batch_size
        self.momentum = momentum
        self.regtype = regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(
            opt, learning_rate=learning_rate, momentum=momentum)

        self.n_components = n_components
        self.enc_act_func = enc_act_func
        self.dec_act_func = dec_act_func
        self.corr_type = corr_type
        self.corr_frac = corr_frac

        self.input_data_orig = None
        self.input_data = None

        self.W_ = None
        self.bh_ = None
        self.bv_ = None
    def __init__(
        self, layers, name='srbm',
        num_epochs=[10], batch_size=[10],
        learning_rate=[0.01], gibbs_k=[1], loss_func=['mse'],
        momentum=0.5, finetune_dropout=1,
        finetune_loss_func='cross_entropy', finetune_enc_act_func=[tf.nn.relu],
        finetune_dec_act_func=[tf.nn.sigmoid], finetune_opt='sgd',
        finetune_learning_rate=0.001, regcoef=5e-4, finetune_num_epochs=10,
        noise=['gauss'], stddev=0.1, finetune_batch_size=20, do_pretrain=False,
            tied_weights=False, regtype=['none'], finetune_regtype='none'):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout
        self.regtype = finetune_regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(
            finetune_opt, learning_rate=finetune_learning_rate,
            momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(layers):
            rbm_str = 'rbm-' + str(l + 1)
            new_rbm = rbm.RBM(
                name=self.name + '-' + rbm_str,
                loss_func=expanded_args['loss_func'][l],
                visible_unit_type=expanded_args['noise'][l], stddev=stddev,
                num_hidden=expanded_args['layers'][l],
                learning_rate=expanded_args['learning_rate'][l],
                gibbs_sampling_steps=expanded_args['gibbs_k'][l],
                num_epochs=expanded_args['num_epochs'][l],
                batch_size=expanded_args['batch_size'][l],
                regtype=expanded_args['regtype'][l])
            self.rbms.append(new_rbm)
            self.rbm_graphs.append(tf.Graph())
    def __init__(
        self, layers, model_name='srbm', main_dir='srbm/',
        models_dir='models/', data_dir='data/', summary_dir='logs/',
        num_epochs=[10], batch_size=[10], dataset='mnist',
        learning_rate=[0.01], gibbs_k=[1], loss_func=['mean_squared'],
        momentum=0.5, finetune_dropout=1, verbose=1,
        finetune_loss_func='cross_entropy', finetune_enc_act_func=[tf.nn.relu],
        finetune_dec_act_func=[tf.nn.sigmoid], finetune_opt='gradient_descent',
        finetune_learning_rate=0.001, l2reg=5e-4, finetune_num_epochs=10,
        noise=['gauss'], stddev=0.1, finetune_batch_size=20, do_pretrain=False,
            tied_weights=False, regtype=['none'], finetune_reg_type='none'):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mean_squared']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
            int, default 0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(
            self, model_name, main_dir, models_dir, data_dir, summary_dir)

        self._initialize_training_parameters(
            loss_func=finetune_loss_func, learning_rate=finetune_learning_rate,
            regtype=finetune_reg_type, num_epochs=finetune_num_epochs,
            batch_size=finetune_batch_size, l2reg=l2reg,
            dropout=finetune_dropout, dataset=dataset, opt=finetune_opt,
            momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights
        self.verbose = verbose

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(layers):
            rbm_str = 'rbm-' + str(l + 1)
            new_rbm = rbm.RBM(
                model_name=self.model_name + '-' + rbm_str,
                loss_func=expanded_args['loss_func'][l],
                models_dir=os.path.join(self.models_dir, rbm_str),
                data_dir=os.path.join(self.data_dir, rbm_str),
                summary_dir=os.path.join(self.tf_summary_dir, rbm_str),
                visible_unit_type=expanded_args['noise'][l], stddev=stddev,
                num_hidden=expanded_args['layers'][l], main_dir=self.main_dir,
                learning_rate=expanded_args['learning_rate'][l],
                gibbs_sampling_steps=expanded_args['gibbs_k'][l],
                num_epochs=expanded_args['num_epochs'][l],
                batch_size=expanded_args['batch_size'][l],
                verbose=self.verbose, regtype=expanded_args['regtype'][l])
            self.rbms.append(new_rbm)
            self.rbm_graphs.append(tf.Graph())
    def __init__(self,
                 layers,
                 name='sdae',
                 enc_act_func=[tf.nn.tanh],
                 dec_act_func=[None],
                 loss_func=['cross_entropy'],
                 num_epochs=[10],
                 batch_size=[10],
                 opt=['sgd'],
                 regtype=['none'],
                 regcoef=[5e-4],
                 learning_rate=[0.01],
                 momentum=0.5,
                 finetune_dropout=1,
                 corr_type=['none'],
                 finetune_regtype='none',
                 corr_frac=[0.],
                 finetune_loss_func='cross_entropy',
                 finetune_enc_act_func=[tf.nn.relu],
                 tied_weights=True,
                 finetune_dec_act_func=[tf.nn.sigmoid],
                 finetune_batch_size=20,
                 do_pretrain=False,
                 finetune_opt='sgd',
                 finetune_learning_rate=0.001,
                 finetune_num_epochs=10):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param enc_act_func: Activation function for the encoder.
            [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder.
            [tf.nn.tanh, tf.nn.sigmoid, None]
        :param regtype: regularization type, can be 'l2','l1' and 'none'
        :param finetune_regtype: regularization type for finetuning
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param tied_weights: if True, the decoder layers weights are
            constrained to be the transpose of the encoder layers
        :param corr_type: Type of input corruption. string, default 'none'.
            ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt. float, default 0.0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout
        self.regtype = finetune_regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(finetune_opt,
                               learning_rate=finetune_learning_rate,
                               momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.autoencoders = []
        self.autoencoder_graphs = []

        for l, layer in enumerate(layers):
            dae_str = 'dae-' + str(l + 1)

            self.autoencoders.append(
                denoising_autoencoder.DenoisingAutoencoder(
                    n_components=layer,
                    name=self.name + '-' + dae_str,
                    enc_act_func=expanded_args['enc_act_func'][l],
                    dec_act_func=expanded_args['dec_act_func'][l],
                    loss_func=expanded_args['loss_func'][l],
                    regtype=expanded_args['regtype'][l],
                    opt=expanded_args['opt'][l],
                    learning_rate=expanded_args['learning_rate'][l],
                    regcoef=expanded_args['regcoef'],
                    momentum=self.momentum,
                    corr_type=expanded_args['corr_type'][l],
                    corr_frac=expanded_args['corr_frac'][l],
                    num_epochs=expanded_args['num_epochs'][l],
                    batch_size=expanded_args['batch_size'][l]))

            self.autoencoder_graphs.append(tf.Graph())
    def __init__(self,
                 layers,
                 name='srbm',
                 num_epochs=[10],
                 batch_size=[10],
                 learning_rate=[0.01],
                 gibbs_k=[1],
                 loss_func=['mse'],
                 momentum=0.5,
                 finetune_dropout=1,
                 finetune_loss_func='cross_entropy',
                 finetune_enc_act_func=[tf.nn.relu],
                 finetune_dec_act_func=[tf.nn.sigmoid],
                 finetune_opt='sgd',
                 finetune_learning_rate=0.001,
                 regcoef=5e-4,
                 finetune_num_epochs=10,
                 noise=['gauss'],
                 stddev=0.1,
                 finetune_batch_size=20,
                 do_pretrain=False,
                 tied_weights=False,
                 regtype=['none'],
                 finetune_regtype='none'):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout
        self.regtype = finetune_regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(finetune_opt,
                               learning_rate=finetune_learning_rate,
                               momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(layers):
            rbm_str = 'rbm-' + str(l + 1)
            new_rbm = rbm.RBM(name=self.name + '-' + rbm_str,
                              loss_func=expanded_args['loss_func'][l],
                              visible_unit_type=expanded_args['noise'][l],
                              stddev=stddev,
                              num_hidden=expanded_args['layers'][l],
                              learning_rate=expanded_args['learning_rate'][l],
                              gibbs_sampling_steps=expanded_args['gibbs_k'][l],
                              num_epochs=expanded_args['num_epochs'][l],
                              batch_size=expanded_args['batch_size'][l],
                              regtype=expanded_args['regtype'][l])
            self.rbms.append(new_rbm)
            self.rbm_graphs.append(tf.Graph())
    def __init__(
        self, layers, name='sdae',
        enc_act_func=[tf.nn.tanh], dec_act_func=[None],
        loss_func=['cross_entropy'], num_epochs=[10], batch_size=[10],
        opt=['sgd'], regtype=['none'], regcoef=[5e-4],
        learning_rate=[0.01], momentum=0.5, finetune_dropout=1,
        corr_type=['none'], finetune_regtype='none', corr_frac=[0.],
        finetune_loss_func='cross_entropy', finetune_enc_act_func=[tf.nn.relu],
        tied_weights=True, finetune_dec_act_func=[tf.nn.sigmoid],
        finetune_batch_size=20, do_pretrain=False,
        finetune_opt='sgd', finetune_learning_rate=0.001,
            finetune_num_epochs=10):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param enc_act_func: Activation function for the encoder.
            [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder.
            [tf.nn.tanh, tf.nn.sigmoid, None]
        :param regtype: regularization type, can be 'l2','l1' and 'none'
        :param finetune_regtype: regularization type for finetuning
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_enc_act_func: activation function for the encoder
            finetuning phase
        :param finetune_dec_act_func: activation function for the decoder
            finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param tied_weights: if True, the decoder layers weights are
            constrained to be the transpose of the encoder layers
        :param corr_type: Type of input corruption. string, default 'none'.
            ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt. float, default 0.0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        UnsupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout
        self.regtype = finetune_regtype
        self.regcoef = regcoef

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(
            finetune_opt, learning_rate=finetune_learning_rate,
            momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.tied_weights = tied_weights

        self.finetune_enc_act_func = expanded_args['finetune_enc_act_func']
        self.finetune_dec_act_func = expanded_args['finetune_dec_act_func']

        self.input_ref = None

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.decoding_w = []  # list of matrices of decoding weights per layer
        self.decoding_b = []  # list of arrays of decoding biases per layer

        self.reconstruction = None
        self.autoencoders = []
        self.autoencoder_graphs = []

        for l, layer in enumerate(layers):
            dae_str = 'dae-' + str(l + 1)

            self.autoencoders.append(
                denoising_autoencoder.DenoisingAutoencoder(
                    n_components=layer,
                    name=self.name + '-' + dae_str,
                    enc_act_func=expanded_args['enc_act_func'][l],
                    dec_act_func=expanded_args['dec_act_func'][l],
                    loss_func=expanded_args['loss_func'][l],
                    regtype=expanded_args['regtype'][l],
                    opt=expanded_args['opt'][l],
                    learning_rate=expanded_args['learning_rate'][l],
                    regcoef=expanded_args['regcoef'],
                    momentum=self.momentum,
                    corr_type=expanded_args['corr_type'][l],
                    corr_frac=expanded_args['corr_frac'][l],
                    num_epochs=expanded_args['num_epochs'][l],
                    batch_size=expanded_args['batch_size'][l]))

            self.autoencoder_graphs.append(tf.Graph())