def __init__(self, layers, original_shape, name='convnet', loss_func='softmax_cross_entropy', num_epochs=10, batch_size=10, opt='sgd', learning_rate=0.01, momentum=0.5, dropout=0.5): """Constructor. :param layers: string used to build the model. This string is a comma-separate specification of the layers. Supported values: conv2d-FX-FY-Z-S: 2d convolution with Z feature maps as output and FX x FY filters. S is the strides size maxpool-X: max pooling on the previous layer. X is the size of the max pooling full-X: fully connected layer with X units softmax: softmax layer For example: conv2d-5-5-32,maxpool-2,conv2d-5-5-64,maxpool-2,full-128,full-128,softmax :param original_shape: original shape of the images in the dataset :param dropout: Dropout parameter """ assert (layers.split(",")[-1] == "softmax") SupervisedModel.__init__(self, name) self.loss_func = loss_func self.learning_rate = learning_rate self.opt = opt self.num_epochs = num_epochs self.batch_size = batch_size self.momentum = momentum self.loss = Loss(self.loss_func) self.trainer = Trainer(opt, learning_rate=learning_rate, momentum=momentum) self.layers = layers self.original_shape = original_shape self.dropout = dropout self.W_vars = None self.B_vars = None self.accuracy = None
def __init__( self, layers, original_shape, name='convnet', loss_func='softmax_cross_entropy', num_epochs=10, batch_size=10, opt='sgd', learning_rate=0.01, momentum=0.5, dropout=0.5): """Constructor. :param layers: string used to build the model. This string is a comma-separate specification of the layers. Supported values: conv2d-FX-FY-Z-S: 2d convolution with Z feature maps as output and FX x FY filters. S is the strides size maxpool-X: max pooling on the previous layer. X is the size of the max pooling full-X: fully connected layer with X units softmax: softmax layer For example: conv2d-5-5-32,maxpool-2,conv2d-5-5-64,maxpool-2,full-128,full-128,softmax :param original_shape: original shape of the images in the dataset :param dropout: Dropout parameter """ assert(layers.split(",")[-1] == "softmax") SupervisedModel.__init__(self, name) self.loss_func = loss_func self.learning_rate = learning_rate self.opt = opt self.num_epochs = num_epochs self.batch_size = batch_size self.momentum = momentum self.loss = Loss(self.loss_func) self.trainer = Trainer( opt, learning_rate=learning_rate, momentum=momentum) self.layers = layers self.original_shape = original_shape self.dropout = dropout self.W_vars = None self.B_vars = None self.accuracy = None
def __init__(self, name='lr', loss_func='cross_entropy', learning_rate=0.01, num_epochs=10, batch_size=10): """Constructor.""" SupervisedModel.__init__(self, name) self.loss_func = loss_func self.learning_rate = learning_rate self.num_epochs = num_epochs self.batch_size = batch_size self.loss = Loss(self.loss_func) # Computational graph nodes self.input_data = None self.input_labels = None self.W_ = None self.b_ = None self.accuracy = None
def pretrain(self, train_set, validation_set=None): """Perform Unsupervised pretraining of the DBN.""" self.do_pretrain = True def set_params_func(rbmmachine, rbmgraph): params = rbmmachine.get_parameters(graph=rbmgraph) self.encoding_w_.append(params['W']) self.encoding_b_.append(params['bh_']) return SupervisedModel.pretrain_procedure( self, self.rbms, self.rbm_graphs, set_params_func=set_params_func, train_set=train_set, validation_set=validation_set)
def pretrain(self, train_set, validation_set=None): """Perform Unsupervised pretraining of the autoencoder.""" self.do_pretrain = True def set_params_func(autoenc, autoencgraph): params = autoenc.get_parameters(graph=autoencgraph) self.encoding_w_.append(params['enc_w']) self.encoding_b_.append(params['enc_b']) return SupervisedModel.pretrain_procedure( self, self.autoencoders, self.autoencoder_graphs, set_params_func=set_params_func, train_set=train_set, validation_set=validation_set)
def __init__( self, layers, name='sdae', enc_act_func=[tf.nn.tanh], dec_act_func=[None], loss_func=['cross_entropy'], num_epochs=[10], batch_size=[10], opt=['sgd'], regcoef=[5e-4], learning_rate=[0.01], momentum=0.5, finetune_dropout=1, corr_type=['none'], corr_frac=[0.], finetune_loss_func='softmax_cross_entropy', finetune_act_func=tf.nn.relu, finetune_opt='sgd', finetune_learning_rate=0.001, finetune_num_epochs=10, finetune_batch_size=20, do_pretrain=False): """Constructor. :param layers: list containing the hidden units for each layer :param enc_act_func: Activation function for the encoder. [tf.nn.tanh, tf.nn.sigmoid] :param dec_act_func: Activation function for the decoder. [tf.nn.tanh, tf.nn.sigmoid, None] :param finetune_loss_func: Loss function for the softmax layer. string, default ['softmax_cross_entropy', 'mse'] :param finetune_dropout: dropout parameter :param finetune_learning_rate: learning rate for the finetuning. float, default 0.001 :param finetune_act_func: activation function for the finetuning phase :param finetune_opt: optimizer for the finetuning phase :param finetune_num_epochs: Number of epochs for the finetuning. int, default 20 :param finetune_batch_size: Size of each mini-batch for the finetuning. int, default 20 :param corr_type: Type of input corruption. string, default 'none'. ["none", "masking", "salt_and_pepper"] :param corr_frac: Fraction of the input to corrupt. float, default 0.0 :param do_pretrain: True: uses variables from pretraining, False: initialize new variables. """ # WARNING! This must be the first expression in the function or else it # will send other variables to expanded_args() # This function takes all the passed parameters that are lists and # expands them to the number of layers, if the number # of layers is more than the list of the parameter. expanded_args = utilities.expand_args(**locals()) SupervisedModel.__init__(self, name) self.loss_func = finetune_loss_func self.learning_rate = finetune_learning_rate self.opt = finetune_opt self.num_epochs = finetune_num_epochs self.batch_size = finetune_batch_size self.momentum = momentum self.dropout = finetune_dropout self.loss = Loss(self.loss_func) self.trainer = Trainer( finetune_opt, learning_rate=finetune_learning_rate, momentum=momentum) self.do_pretrain = do_pretrain self.layers = layers self.finetune_act_func = finetune_act_func # Model parameters self.encoding_w_ = [] # list of matrices of encoding weights per layer self.encoding_b_ = [] # list of arrays of encoding biases per layer self.last_W = None self.last_b = None self.autoencoders = [] self.autoencoder_graphs = [] for l, layer in enumerate(layers): dae_str = 'dae-' + str(l + 1) self.autoencoders.append( denoising_autoencoder.DenoisingAutoencoder( n_components=layer, name=self.name + '-' + dae_str, enc_act_func=expanded_args['enc_act_func'][l], dec_act_func=expanded_args['dec_act_func'][l], loss_func=expanded_args['loss_func'][l], opt=expanded_args['opt'][l], regcoef=expanded_args['regcoef'], learning_rate=expanded_args['learning_rate'][l], momentum=self.momentum, corr_type=expanded_args['corr_type'][l], corr_frac=expanded_args['corr_frac'][l], num_epochs=expanded_args['num_epochs'][l], batch_size=expanded_args['batch_size'][l])) self.autoencoder_graphs.append(tf.Graph())
def __init__(self, rbm_layers, name='dbn', do_pretrain=False, rbm_num_epochs=[10], rbm_gibbs_k=[1], rbm_gauss_visible=False, rbm_stddev=0.1, rbm_batch_size=[10], rbm_learning_rate=[0.01], finetune_dropout=1, finetune_loss_func='softmax_cross_entropy', finetune_act_func=tf.nn.sigmoid, finetune_opt='sgd', finetune_learning_rate=0.001, finetune_num_epochs=10, finetune_batch_size=20, momentum=0.5): """Constructor. :param rbm_layers: list containing the hidden units for each layer :param finetune_loss_func: Loss function for the softmax layer. string, default ['softmax_cross_entropy', 'mse'] :param finetune_dropout: dropout parameter :param finetune_learning_rate: learning rate for the finetuning. float, default 0.001 :param finetune_act_func: activation function for the finetuning phase :param finetune_opt: optimizer for the finetuning phase :param finetune_num_epochs: Number of epochs for the finetuning. int, default 20 :param finetune_batch_size: Size of each mini-batch for the finetuning. int, default 20 :param do_pretrain: True: uses variables from pretraining, False: initialize new variables. """ SupervisedModel.__init__(self, name) self.loss_func = finetune_loss_func self.learning_rate = finetune_learning_rate self.opt = finetune_opt self.num_epochs = finetune_num_epochs self.batch_size = finetune_batch_size self.momentum = momentum self.dropout = finetune_dropout self.loss = Loss(self.loss_func) self.trainer = Trainer(finetune_opt, learning_rate=finetune_learning_rate, momentum=momentum) self.do_pretrain = do_pretrain self.layers = rbm_layers self.finetune_act_func = finetune_act_func # Model parameters self.encoding_w_ = [] # list of matrices of encoding weights per layer self.encoding_b_ = [] # list of arrays of encoding biases per layer self.softmax_W = None self.softmax_b = None rbm_params = { 'num_epochs': rbm_num_epochs, 'gibbs_k': rbm_gibbs_k, 'batch_size': rbm_batch_size, 'learning_rate': rbm_learning_rate } for p in rbm_params: if len(rbm_params[p]) != len(rbm_layers): # The current parameter is not specified by the user, # should default it for all the layers rbm_params[p] = [rbm_params[p][0] for _ in rbm_layers] self.rbms = [] self.rbm_graphs = [] for l, layer in enumerate(rbm_layers): rbm_str = 'rbm-' + str(l + 1) if l == 0 and rbm_gauss_visible: self.rbms.append( rbm.RBM(name=self.name + '-' + rbm_str, num_hidden=layer, learning_rate=rbm_params['learning_rate'][l], num_epochs=rbm_params['num_epochs'][l], batch_size=rbm_params['batch_size'][l], gibbs_sampling_steps=rbm_params['gibbs_k'][l], visible_unit_type='gauss', stddev=rbm_stddev)) else: self.rbms.append( rbm.RBM(name=self.name + '-' + rbm_str, num_hidden=layer, learning_rate=rbm_params['learning_rate'][l], num_epochs=rbm_params['num_epochs'][l], batch_size=rbm_params['batch_size'][l], gibbs_sampling_steps=rbm_params['gibbs_k'][l])) self.rbm_graphs.append(tf.Graph())
def __init__(self, layers, name='sdae', enc_act_func=[tf.nn.tanh], dec_act_func=[None], loss_func=['cross_entropy'], num_epochs=[10], batch_size=[10], opt=['sgd'], regcoef=[5e-4], learning_rate=[0.01], momentum=0.5, finetune_dropout=1, corr_type=['none'], corr_frac=[0.], finetune_loss_func='softmax_cross_entropy', finetune_act_func=tf.nn.relu, finetune_opt='sgd', finetune_learning_rate=0.001, finetune_num_epochs=10, finetune_batch_size=20, do_pretrain=False): """Constructor. :param layers: list containing the hidden units for each layer :param enc_act_func: Activation function for the encoder. [tf.nn.tanh, tf.nn.sigmoid] :param dec_act_func: Activation function for the decoder. [tf.nn.tanh, tf.nn.sigmoid, None] :param finetune_loss_func: Loss function for the softmax layer. string, default ['softmax_cross_entropy', 'mse'] :param finetune_dropout: dropout parameter :param finetune_learning_rate: learning rate for the finetuning. float, default 0.001 :param finetune_act_func: activation function for the finetuning phase :param finetune_opt: optimizer for the finetuning phase :param finetune_num_epochs: Number of epochs for the finetuning. int, default 20 :param finetune_batch_size: Size of each mini-batch for the finetuning. int, default 20 :param corr_type: Type of input corruption. string, default 'none'. ["none", "masking", "salt_and_pepper"] :param corr_frac: Fraction of the input to corrupt. float, default 0.0 :param do_pretrain: True: uses variables from pretraining, False: initialize new variables. """ # WARNING! This must be the first expression in the function or else it # will send other variables to expanded_args() # This function takes all the passed parameters that are lists and # expands them to the number of layers, if the number # of layers is more than the list of the parameter. expanded_args = utilities.expand_args(**locals()) SupervisedModel.__init__(self, name) self.loss_func = finetune_loss_func self.learning_rate = finetune_learning_rate self.opt = finetune_opt self.num_epochs = finetune_num_epochs self.batch_size = finetune_batch_size self.momentum = momentum self.dropout = finetune_dropout self.loss = Loss(self.loss_func) self.trainer = Trainer(finetune_opt, learning_rate=finetune_learning_rate, momentum=momentum) self.do_pretrain = do_pretrain self.layers = layers self.finetune_act_func = finetune_act_func # Model parameters self.encoding_w_ = [] # list of matrices of encoding weights per layer self.encoding_b_ = [] # list of arrays of encoding biases per layer self.last_W = None self.last_b = None self.autoencoders = [] self.autoencoder_graphs = [] for l, layer in enumerate(layers): dae_str = 'dae-' + str(l + 1) self.autoencoders.append( denoising_autoencoder.DenoisingAutoencoder( n_components=layer, name=self.name + '-' + dae_str, enc_act_func=expanded_args['enc_act_func'][l], dec_act_func=expanded_args['dec_act_func'][l], loss_func=expanded_args['loss_func'][l], opt=expanded_args['opt'][l], regcoef=expanded_args['regcoef'], learning_rate=expanded_args['learning_rate'][l], momentum=self.momentum, corr_type=expanded_args['corr_type'][l], corr_frac=expanded_args['corr_frac'][l], num_epochs=expanded_args['num_epochs'][l], batch_size=expanded_args['batch_size'][l])) self.autoencoder_graphs.append(tf.Graph())