Пример #1
0
    def __init__(self,
                 layers,
                 name='sdae',
                 enc_act_func=[tf.nn.tanh],
                 dec_act_func=[None],
                 loss_func=['cross_entropy'],
                 num_epochs=[10],
                 batch_size=[10],
                 opt=['sgd'],
                 regcoef=[5e-4],
                 learning_rate=[0.01],
                 momentum=0.5,
                 finetune_dropout=1,
                 corr_type=['none'],
                 corr_frac=[0.],
                 finetune_loss_func='softmax_cross_entropy',
                 finetune_act_func=tf.nn.relu,
                 finetune_opt='sgd',
                 finetune_learning_rate=0.001,
                 finetune_num_epochs=10,
                 finetune_batch_size=20,
                 do_pretrain=False):
        """Constructor.

        :param layers: list containing the hidden units for each layer
        :param enc_act_func: Activation function for the encoder.
            [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder.
            [tf.nn.tanh, tf.nn.sigmoid, None]
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['softmax_cross_entropy', 'mse']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_act_func: activation function for the finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param corr_type: Type of input corruption. string, default 'none'.
            ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt. float, default 0.0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it
        # will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and
        # expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        SupervisedModel.__init__(self, name)

        self.loss_func = finetune_loss_func
        self.learning_rate = finetune_learning_rate
        self.opt = finetune_opt
        self.num_epochs = finetune_num_epochs
        self.batch_size = finetune_batch_size
        self.momentum = momentum
        self.dropout = finetune_dropout

        self.loss = Loss(self.loss_func)
        self.trainer = Trainer(finetune_opt,
                               learning_rate=finetune_learning_rate,
                               momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.finetune_act_func = finetune_act_func

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.last_W = None
        self.last_b = None

        self.autoencoders = []
        self.autoencoder_graphs = []

        for l, layer in enumerate(layers):
            dae_str = 'dae-' + str(l + 1)

            self.autoencoders.append(
                denoising_autoencoder.DenoisingAutoencoder(
                    n_components=layer,
                    name=self.name + '-' + dae_str,
                    enc_act_func=expanded_args['enc_act_func'][l],
                    dec_act_func=expanded_args['dec_act_func'][l],
                    loss_func=expanded_args['loss_func'][l],
                    opt=expanded_args['opt'][l],
                    regcoef=expanded_args['regcoef'],
                    learning_rate=expanded_args['learning_rate'][l],
                    momentum=self.momentum,
                    corr_type=expanded_args['corr_type'][l],
                    corr_frac=expanded_args['corr_frac'][l],
                    num_epochs=expanded_args['num_epochs'][l],
                    batch_size=expanded_args['batch_size'][l]))

            self.autoencoder_graphs.append(tf.Graph())
Пример #2
0
    else:
        trX = None
        vlX = None
        teX = None

    # Create the object
    enc_act_func = utilities.str2actfunc(FLAGS.enc_act_func)
    dec_act_func = utilities.str2actfunc(FLAGS.dec_act_func)

    dae = denoising_autoencoder.DenoisingAutoencoder(
        name=FLAGS.name,
        n_components=FLAGS.n_components,
        enc_act_func=enc_act_func,
        dec_act_func=dec_act_func,
        corr_type=FLAGS.corr_type,
        corr_frac=FLAGS.corr_frac,
        loss_func=FLAGS.loss_func,
        opt=FLAGS.opt,
        regcoef=FLAGS.regcoef,
        learning_rate=FLAGS.learning_rate,
        momentum=FLAGS.momentum,
        num_epochs=FLAGS.num_epochs,
        batch_size=FLAGS.batch_size)

    # Fit the model
    # W = None
    # if FLAGS.weights:
    # W = np.load(FLAGS.weights)

    # bh = None
    # if FLAGS.h_bias:
    # bh = np.load(FLAGS.h_bias)