Ejemplo n.º 1
0
    def __init__(self,opts):
#         PlanetNet_core.__init__(opts)
        '''initialising DBN specifics'''
        
        #loading parameters
        self.params = self.load_params(opts)       
        
        #initialising DBN model 
        self.model = dbn.DeepBeliefNetwork(
        models_dir=self.params.out_dir, data_dir='data', summary_dir='summary',
        model_name=self.params.gen_model, do_pretrain=self.params.gen_do_pretrain,
        rbm_layers=self.params.rbm_layers, dataset='custom', main_dir=self.params.out_dir,
        finetune_act_func=self.params.fine_act_func, rbm_learning_rate=self.params.rbm_learn_rate,
        verbose=self.params.verbose, rbm_num_epochs=self.params.rbm_epochs, rbm_gibbs_k = self.params.rbm_gibbs_k,
        rbm_gauss_visible=self.params.rbm_gauss_vis, rbm_stddev=self.params.rbm_stddev,
        momentum=self.params.gen_momentum, rbm_batch_size=self.params.rbm_batch_size, finetune_learning_rate=self.params.fine_learn_rate,
        finetune_num_epochs=self.params.fine_epochs, finetune_batch_size=self.params.fine_batch_size,
        finetune_opt=self.params.fine_minimiser, finetune_loss_func=self.params.fine_loss_func,
        finetune_dropout=self.params.fine_dropout)
Ejemplo n.º 2
0
    # Create the object
    finetune_act_func = utilities.str2actfunc(FLAGS.finetune_act_func)

    srbm = dbn.DeepBeliefNetwork(
        models_dir=models_dir,
        data_dir=data_dir,
        summary_dir=summary_dir,
        model_name=FLAGS.model_name,
        do_pretrain=FLAGS.do_pretrain,
        rbm_layers=rbm_layers,
        dataset=FLAGS.dataset,
        main_dir=FLAGS.main_dir,
        finetune_act_func=finetune_act_func,
        rbm_learning_rate=rbm_learning_rate,
        verbose=FLAGS.verbose,
        rbm_num_epochs=rbm_num_epochs,
        rbm_gibbs_k=rbm_gibbs_k,
        rbm_gauss_visible=FLAGS.rbm_gauss_visible,
        rbm_stddev=FLAGS.rbm_stddev,
        momentum=FLAGS.momentum,
        rbm_batch_size=rbm_batch_size,
        finetune_learning_rate=FLAGS.finetune_learning_rate,
        finetune_num_epochs=FLAGS.finetune_num_epochs,
        finetune_batch_size=FLAGS.finetune_batch_size,
        finetune_opt=FLAGS.finetune_opt,
        finetune_loss_func=FLAGS.finetune_loss_func,
        finetune_dropout=FLAGS.finetune_dropout)

    # Fit the model (unsupervised pretraining)
    if FLAGS.do_pretrain:
        srbm.pretrain(trX, vlX)