Пример #1
0
    def __init__(self,
                 model_name='lr',
                 main_dir='lr/',
                 models_dir='models/',
                 data_dir='data/',
                 summary_dir='logs/',
                 loss_func='cross_entropy',
                 dataset='mnist',
                 learning_rate=0.01,
                 verbose=0,
                 num_epochs=10,
                 batch_size=10):
        """Constructor.

        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
        """
        SupervisedModel.__init__(self, model_name, main_dir, models_dir,
                                 data_dir, summary_dir)

        self._initialize_training_parameters(loss_func, learning_rate,
                                             num_epochs, batch_size, dataset,
                                             None)

        self.verbose = verbose

        # Computational graph nodes
        self.input_data = None
        self.input_labels = None

        self.W_ = None
        self.b_ = None

        self.accuracy = None
    def __init__(self, layers, original_shape, model_name='convnet', main_dir='convnet',
                 models_dir='models/', data_dir='data/', summary_dir='logs/',
                 loss_func='softmax_cross_entropy', num_epochs=10, batch_size=10, dataset='mnist',
                 opt='gradient_descent', learning_rate=0.01, momentum=0.5, dropout=0.5, verbose=1):
        """
        :param layers: string used to build the model.
            This string is a comma-separate specification of the layers of the network.
            Supported values:
                conv2d-FX-FY-Z-S: 2d convolution with Z feature maps as output and FX x FY filters. S is the strides size
                maxpool-X: max pooling on the previous layer. X is the size of the max pooling
                full-X: fully connected layer with X units
                softmax: softmax layer
            For example:
                conv2d-5-5-32,maxpool-2,conv2d-5-5-64,maxpool-2,full-128,full-128,softmax

        :param original_shape: original shape of the images in the dataset
        :param dropout: Dropout parameter
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
        """
        SupervisedModel.__init__(self, model_name, main_dir, models_dir, data_dir, summary_dir)

        self._initialize_training_parameters(loss_func, learning_rate, num_epochs, batch_size,
                                             dataset, opt, momentum)

        self.layers = layers
        self.original_shape = original_shape
        self.dropout = dropout
        self.verbose = verbose

        self.W_vars = None
        self.B_vars = None

        self.accuracy = None
    def __init__(
        self, model_name='lr', main_dir='lr/', models_dir='models/',
        data_dir='data/', summary_dir='logs/', loss_func='cross_entropy',
        dataset='mnist', learning_rate=0.01, verbose=0, num_epochs=10,
            batch_size=10):
        """Constructor.

        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
        """
        SupervisedModel.__init__(
            self, model_name, main_dir, models_dir, data_dir, summary_dir)

        self._initialize_training_parameters(
            loss_func, learning_rate, num_epochs, batch_size, dataset, None)

        self.verbose = verbose

        # Computational graph nodes
        self.input_data = None
        self.input_labels = None

        self.W_ = None
        self.b_ = None

        self.accuracy = None
    def __init__(self,
                 layers,
                 original_shape,
                 model_name='convnet',
                 main_dir='convnet',
                 models_dir='models/',
                 data_dir='data/',
                 summary_dir='logs/',
                 loss_func='softmax_cross_entropy',
                 num_epochs=10,
                 batch_size=10,
                 dataset='mnist',
                 opt='gradient_descent',
                 learning_rate=0.01,
                 momentum=0.5,
                 dropout=0.5,
                 verbose=1):
        """Constructor.

        :param layers: string used to build the model.
            This string is a comma-separate specification of the layers.
            Supported values:
                conv2d-FX-FY-Z-S: 2d convolution with Z feature maps as output
                    and FX x FY filters. S is the strides size
                maxpool-X: max pooling on the previous layer. X is the size of
                    the max pooling
                full-X: fully connected layer with X units
                softmax: softmax layer
            For example:
                conv2d-5-5-32,maxpool-2,conv2d-5-5-64,maxpool-2,full-128,full-128,softmax

        :param original_shape: original shape of the images in the dataset
        :param dropout: Dropout parameter
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
        """
        SupervisedModel.__init__(self, model_name, main_dir, models_dir,
                                 data_dir, summary_dir)

        self._initialize_training_parameters(loss_func, learning_rate,
                                             num_epochs, batch_size, dataset,
                                             opt, momentum)

        self.layers = layers
        self.original_shape = original_shape
        self.dropout = dropout
        self.verbose = verbose

        self.W_vars = None
        self.B_vars = None

        self.accuracy = None
Пример #5
0
    def pretrain(self, train_set, validation_set=None):
        self.do_pretrain = True

        def set_params_func(rbmmachine, rbmgraph):
            params = rbmmachine.get_model_parameters(graph=rbmgraph)
            self.encoding_w_.append(params['W'])
            self.encoding_b_.append(params['bh_'])

        return SupervisedModel.pretrain_procedure(self, self.rbms, self.rbm_graphs, set_params_func=set_params_func,
                                                  train_set=train_set, validation_set=validation_set)
    def pretrain(self, train_set, validation_set=None):
        self.do_pretrain = True

        def set_params_func(autoenc, autoencgraph):
            params = autoenc.get_model_parameters(graph=autoencgraph)
            self.encoding_w_.append(params['enc_w'])
            self.encoding_b_.append(params['enc_b'])

        return SupervisedModel.pretrain_procedure(self, self.autoencoders, self.autoencoder_graphs,
                                                  set_params_func=set_params_func, train_set=train_set,
                                                  validation_set=validation_set)
    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the autoencoder."""
        self.do_pretrain = True

        def set_params_func(autoenc, autoencgraph):
            params = autoenc.get_model_parameters(graph=autoencgraph)
            self.encoding_w_.append(params['enc_w'])
            self.encoding_b_.append(params['enc_b'])

        return SupervisedModel.pretrain_procedure(
            self, self.autoencoders, self.autoencoder_graphs,
            set_params_func=set_params_func, train_set=train_set,
            validation_set=validation_set)
Пример #8
0
    def pretrain(self, train_set, validation_set=None):
        self.do_pretrain = True

        def set_params_func(rbmmachine, rbmgraph):
            params = rbmmachine.get_model_parameters(graph=rbmgraph)
            self.encoding_w_.append(params['W'])
            self.encoding_b_.append(params['bh_'])

        return SupervisedModel.pretrain_procedure(
            self,
            self.rbms,
            self.rbm_graphs,
            set_params_func=set_params_func,
            train_set=train_set,
            validation_set=validation_set)
    def pretrain(self, train_set, validation_set=None):
        """Perform Unsupervised pretraining of the autoencoder."""
        self.do_pretrain = True

        def set_params_func(autoenc, autoencgraph):
            params = autoenc.get_model_parameters(graph=autoencgraph)
            self.encoding_w_.append(params["enc_w"])
            self.encoding_b_.append(params["enc_b"])

        return SupervisedModel.pretrain_procedure(
            self,
            self.autoencoders,
            self.autoencoder_graphs,
            set_params_func=set_params_func,
            train_set=train_set,
            validation_set=validation_set,
        )
    def __init__(self, layers, model_name='sdae', main_dir='sdae/', models_dir='models/',
                 data_dir='data/', summary_dir='logs/',
                 enc_act_func=[tf.nn.tanh], dec_act_func=[None], loss_func=['cross_entropy'], num_epochs=[10],
                 batch_size=[10], dataset='mnist', opt=['gradient_descent'], l2reg=[5e-4],
                 learning_rate=[0.01], momentum=0.5, finetune_dropout=1, corr_type=['none'],
                 corr_frac=[0.], verbose=1, finetune_loss_func='softmax_cross_entropy', finetune_act_func=tf.nn.relu,
                 finetune_opt='gradient_descent', finetune_learning_rate=0.001, finetune_num_epochs=10,
                 finetune_batch_size=20, do_pretrain=False):
        """
        :param layers: list containing the hidden units for each layer
        :param enc_act_func: Activation function for the encoder. [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder. [[tf.nn.tanh, tf.nn.sigmoid, None]
        :param finetune_loss_func: Loss function for the softmax layer. string, default ['softmax_cross_entropy', 'mean_squared']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning. float, default 0.001
        :param finetune_act_func: activation function for the finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning. int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning. int, default 20
        :param corr_type: Type of input corruption. string, default 'none'. ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt. float, default 0.0
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy. int, default 0
        :param do_pretrain: True: uses variables from pretraining, False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        SupervisedModel.__init__(self, model_name, main_dir, models_dir, data_dir, summary_dir)

        self._initialize_training_parameters(loss_func=finetune_loss_func, learning_rate=finetune_learning_rate,
                                             dropout=finetune_dropout, num_epochs=finetune_num_epochs,
                                             batch_size=finetune_batch_size, dataset=dataset, opt=finetune_opt,
                                             momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.finetune_act_func = finetune_act_func
        self.verbose = verbose

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights (one per layer)
        self.encoding_b_ = []  # list of arrays of encoding biases (one per layer)

        self.last_W = None
        self.last_b = None

        self.autoencoders = []
        self.autoencoder_graphs = []

        for l, layer in enumerate(layers):
            dae_str = 'dae-' + str(l + 1)

            self.autoencoders.append(denoising_autoencoder.DenoisingAutoencoder(
                n_components=layer, main_dir=self.main_dir, model_name=self.model_name + '-' + dae_str,
                models_dir=os.path.join(self.models_dir, dae_str), data_dir=os.path.join(self.data_dir, dae_str),
                summary_dir=os.path.join(self.tf_summary_dir, dae_str),
                enc_act_func=expanded_args['enc_act_func'][l], dec_act_func=expanded_args['dec_act_func'][l],
                loss_func=expanded_args['loss_func'][l],
                opt=expanded_args['opt'][l], learning_rate=expanded_args['learning_rate'][l], l2reg=expanded_args['l2reg'],
                momentum=self.momentum, corr_type=expanded_args['corr_type'][l], corr_frac=expanded_args['corr_frac'][l],
                verbose=self.verbose, num_epochs=expanded_args['num_epochs'][l], batch_size=expanded_args['batch_size'][l],
                dataset=self.dataset))

            self.autoencoder_graphs.append(tf.Graph())
Пример #11
0
    def __init__(self, rbm_layers, model_name='srbm', do_pretrain=False, main_dir='srbm/', models_dir='models/', data_dir='data/',
                 summary_dir='logs/', rbm_num_epochs=[10], rbm_gibbs_k=[1], rbm_gauss_visible=False, rbm_stddev=0.1,
                 rbm_batch_size=[10], dataset='mnist', rbm_learning_rate=[0.01], momentum=0.5,
                 finetune_dropout=1, verbose=1, finetune_loss_func='softmax_cross_entropy', finetune_act_func=tf.nn.relu,
                 finetune_opt='gradient_descent', finetune_learning_rate=0.001, finetune_num_epochs=10,
                 finetune_batch_size=20):
        """
        :param rbm_layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer. string, default ['softmax_cross_entropy', 'mean_squared']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning. float, default 0.001
        :param finetune_act_func: activation function for the finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning. int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning. int, default 20
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy. int, default 0
        :param do_pretrain: True: uses variables from pretraining, False: initialize new variables.
        """

        SupervisedModel.__init__(self, model_name, main_dir, models_dir, data_dir, summary_dir)

        self._initialize_training_parameters(loss_func=finetune_loss_func, learning_rate=finetune_learning_rate,
                                             dropout=finetune_dropout, num_epochs=finetune_num_epochs,
                                             batch_size=finetune_batch_size, dataset=dataset, opt=finetune_opt,
                                             momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = rbm_layers
        self.finetune_act_func = finetune_act_func
        self.verbose = verbose

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights (one per layer)
        self.encoding_b_ = []  # list of arrays of encoding biases (one per layer)

        self.softmax_W = None
        self.softmax_b = None

        rbm_params = {'num_epochs': rbm_num_epochs, 'gibbs_k': rbm_gibbs_k, 'batch_size': rbm_batch_size,
                      'learning_rate': rbm_learning_rate}
        for p in rbm_params:
            if len(rbm_params[p]) != len(rbm_layers):
                # The current parameter is not specified by the user, should default it for all the layers
                rbm_params[p] = [rbm_params[p][0] for _ in rbm_layers]

        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(rbm_layers):
            rbm_str = 'rbm-' + str(l+1)

            if l == 0 and rbm_gauss_visible:
                self.rbms.append(rbm.RBM(model_name=self.model_name + '-' + rbm_str,
                    models_dir=os.path.join(self.models_dir, rbm_str), data_dir=os.path.join(self.data_dir, rbm_str),  summary_dir=os.path.join(self.tf_summary_dir, rbm_str),
                    num_hidden=layer, main_dir=self.main_dir, learning_rate=rbm_params['learning_rate'][l], dataset=self.dataset,
                    verbose=self.verbose, num_epochs=rbm_params['num_epochs'][l], batch_size=rbm_params['batch_size'][l],
                    gibbs_sampling_steps=rbm_params['gibbs_k'][l], visible_unit_type='gauss', stddev=rbm_stddev))

            else:
                self.rbms.append(rbm.RBM(model_name=self.model_name + '-' + rbm_str,
                    models_dir=os.path.join(self.models_dir, rbm_str), data_dir=os.path.join(self.data_dir, rbm_str),  summary_dir=os.path.join(self.tf_summary_dir, rbm_str),
                    num_hidden=layer, main_dir=self.main_dir, learning_rate=rbm_params['learning_rate'][l], dataset=self.dataset,
                    verbose=self.verbose, num_epochs=rbm_params['num_epochs'][l], batch_size=rbm_params['batch_size'][l],
                    gibbs_sampling_steps=rbm_params['gibbs_k'][l]))

            self.rbm_graphs.append(tf.Graph())
    def __init__(self,
                 layers,
                 model_name='sdae',
                 main_dir='sdae/',
                 models_dir='models/',
                 data_dir='data/',
                 summary_dir='logs/',
                 enc_act_func=[tf.nn.tanh],
                 dec_act_func=[None],
                 loss_func=['cross_entropy'],
                 num_epochs=[10],
                 batch_size=[10],
                 dataset='mnist',
                 opt=['gradient_descent'],
                 l2reg=[5e-4],
                 learning_rate=[0.01],
                 momentum=0.5,
                 finetune_dropout=1,
                 corr_type=['none'],
                 corr_frac=[0.],
                 verbose=1,
                 finetune_loss_func='softmax_cross_entropy',
                 finetune_act_func=tf.nn.relu,
                 finetune_opt='gradient_descent',
                 finetune_learning_rate=0.001,
                 finetune_num_epochs=10,
                 finetune_batch_size=20,
                 do_pretrain=False):
        """
        :param layers: list containing the hidden units for each layer
        :param enc_act_func: Activation function for the encoder. [tf.nn.tanh, tf.nn.sigmoid]
        :param dec_act_func: Activation function for the decoder. [[tf.nn.tanh, tf.nn.sigmoid, None]
        :param finetune_loss_func: Loss function for the softmax layer. string, default ['softmax_cross_entropy', 'mean_squared']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning. float, default 0.001
        :param finetune_act_func: activation function for the finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning. int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning. int, default 20
        :param corr_type: Type of input corruption. string, default 'none'. ["none", "masking", "salt_and_pepper"]
        :param corr_frac: Fraction of the input to corrupt. float, default 0.0
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy. int, default 0
        :param do_pretrain: True: uses variables from pretraining, False: initialize new variables.
        """
        # WARNING! This must be the first expression in the function or else it will send other variables to expanded_args()
        # This function takes all the passed parameters that are lists and expands them to the number of layers, if the number
        # of layers is more than the list of the parameter.
        expanded_args = utilities.expand_args(**locals())

        SupervisedModel.__init__(self, model_name, main_dir, models_dir,
                                 data_dir, summary_dir)

        self._initialize_training_parameters(
            loss_func=finetune_loss_func,
            learning_rate=finetune_learning_rate,
            dropout=finetune_dropout,
            num_epochs=finetune_num_epochs,
            batch_size=finetune_batch_size,
            dataset=dataset,
            opt=finetune_opt,
            momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = layers
        self.finetune_act_func = finetune_act_func
        self.verbose = verbose

        # Model parameters
        self.encoding_w_ = [
        ]  # list of matrices of encoding weights (one per layer)
        self.encoding_b_ = [
        ]  # list of arrays of encoding biases (one per layer)

        self.last_W = None
        self.last_b = None

        self.autoencoders = []
        self.autoencoder_graphs = []

        for l, layer in enumerate(layers):
            dae_str = 'dae-' + str(l + 1)

            self.autoencoders.append(
                denoising_autoencoder.DenoisingAutoencoder(
                    n_components=layer,
                    main_dir=self.main_dir,
                    model_name=self.model_name + '-' + dae_str,
                    models_dir=os.path.join(self.models_dir, dae_str),
                    data_dir=os.path.join(self.data_dir, dae_str),
                    summary_dir=os.path.join(self.tf_summary_dir, dae_str),
                    enc_act_func=expanded_args['enc_act_func'][l],
                    dec_act_func=expanded_args['dec_act_func'][l],
                    loss_func=expanded_args['loss_func'][l],
                    opt=expanded_args['opt'][l],
                    learning_rate=expanded_args['learning_rate'][l],
                    l2reg=expanded_args['l2reg'],
                    momentum=self.momentum,
                    corr_type=expanded_args['corr_type'][l],
                    corr_frac=expanded_args['corr_frac'][l],
                    verbose=self.verbose,
                    num_epochs=expanded_args['num_epochs'][l],
                    batch_size=expanded_args['batch_size'][l],
                    dataset=self.dataset))

            self.autoencoder_graphs.append(tf.Graph())
Пример #13
0
    def __init__(
        self,
        rbm_layers,
        model_name='dbn',
        do_pretrain=False,
        main_dir='dbn/',
        models_dir='models/',
        data_dir='data/',
        summary_dir='logs/',
        rbm_num_epochs=[10],
        rbm_gibbs_k=[1],
        rbm_gauss_visible=False,
        rbm_stddev=0.1,
        rbm_batch_size=[10],
        dataset='mnist',
        rbm_learning_rate=[0.01],
        finetune_dropout=1,
        finetune_loss_func='softmax_cross_entropy',
        finetune_act_func=tf.nn.sigmoid,
        finetune_opt='gradient_descent',
        finetune_learning_rate=0.001,
        finetune_num_epochs=10,
        finetune_batch_size=20,
        verbose=1,
        momentum=0.5,
    ):
        """Constructor.

        :param rbm_layers: list containing the hidden units for each layer
        :param finetune_loss_func: Loss function for the softmax layer.
            string, default ['softmax_cross_entropy', 'mean_squared']
        :param finetune_dropout: dropout parameter
        :param finetune_learning_rate: learning rate for the finetuning.
            float, default 0.001
        :param finetune_act_func: activation function for the finetuning phase
        :param finetune_opt: optimizer for the finetuning phase
        :param finetune_num_epochs: Number of epochs for the finetuning.
            int, default 20
        :param finetune_batch_size: Size of each mini-batch for the finetuning.
            int, default 20
        :param verbose: Level of verbosity. 0 - silent, 1 - print accuracy.
            int, default 0
        :param do_pretrain: True: uses variables from pretraining,
            False: initialize new variables.
        """
        SupervisedModel.__init__(self, model_name, main_dir, models_dir,
                                 data_dir, summary_dir)

        self._initialize_training_parameters(
            loss_func=finetune_loss_func,
            learning_rate=finetune_learning_rate,
            dropout=finetune_dropout,
            num_epochs=finetune_num_epochs,
            batch_size=finetune_batch_size,
            dataset=dataset,
            opt=finetune_opt,
            momentum=momentum)

        self.do_pretrain = do_pretrain
        self.layers = rbm_layers
        self.finetune_act_func = finetune_act_func
        self.verbose = verbose

        # Model parameters
        self.encoding_w_ = []  # list of matrices of encoding weights per layer
        self.encoding_b_ = []  # list of arrays of encoding biases per layer

        self.softmax_W = None
        self.softmax_b = None

        rbm_params = {
            'num_epochs': rbm_num_epochs,
            'gibbs_k': rbm_gibbs_k,
            'batch_size': rbm_batch_size,
            'learning_rate': rbm_learning_rate
        }

        for p in rbm_params:
            if len(rbm_params[p]) != len(rbm_layers):
                # The current parameter is not specified by the user,
                # should default it for all the layers
                rbm_params[p] = [rbm_params[p][0] for _ in rbm_layers]

        self.rbms = []
        self.rbm_graphs = []

        for l, layer in enumerate(rbm_layers):
            rbm_str = 'rbm-' + str(l + 1)

            if l == 0 and rbm_gauss_visible:
                self.rbms.append(
                    rbm.RBM(model_name=self.model_name + '-' + rbm_str,
                            models_dir=os.path.join(self.models_dir, rbm_str),
                            data_dir=os.path.join(self.data_dir, rbm_str),
                            summary_dir=os.path.join(self.tf_summary_dir,
                                                     rbm_str),
                            num_hidden=layer,
                            main_dir=self.main_dir,
                            learning_rate=rbm_params['learning_rate'][l],
                            dataset=self.dataset,
                            verbose=self.verbose,
                            num_epochs=rbm_params['num_epochs'][l],
                            batch_size=rbm_params['batch_size'][l],
                            gibbs_sampling_steps=rbm_params['gibbs_k'][l],
                            visible_unit_type='gauss',
                            stddev=rbm_stddev))

            else:
                self.rbms.append(
                    rbm.RBM(model_name=self.model_name + '-' + rbm_str,
                            models_dir=os.path.join(self.models_dir, rbm_str),
                            data_dir=os.path.join(self.data_dir, rbm_str),
                            summary_dir=os.path.join(self.tf_summary_dir,
                                                     rbm_str),
                            num_hidden=layer,
                            main_dir=self.main_dir,
                            learning_rate=rbm_params['learning_rate'][l],
                            dataset=self.dataset,
                            verbose=self.verbose,
                            num_epochs=rbm_params['num_epochs'][l],
                            batch_size=rbm_params['batch_size'][l],
                            gibbs_sampling_steps=rbm_params['gibbs_k'][l]))

            self.rbm_graphs.append(tf.Graph())