def test_verify_AIS(self): model = oRBM(input_size=self.input_size, hidden_size=self.hidden_size, beta=self.beta) model.W.set_value(self.W) model.b.set_value(self.b) model.c.set_value(self.c) # Brute force print "Computing lnZ using brute force (i.e. summing the free energy of all posible $v$)..." V = theano.shared( value=cartesian([(0, 1)] * self.input_size, dtype=config.floatX)) brute_force_lnZ = logsumexp(-model.free_energy(V), 0) f_brute_force_lnZ = theano.function([], brute_force_lnZ) params_bak = [param.get_value() for param in model.parameters] print "Approximating lnZ using AIS..." import time start = time.time() try: ais_working_dir = tempfile.mkdtemp() result = compute_AIS(model, M=self.nb_samples, betas=self.betas, seed=1234, ais_working_dir=ais_working_dir, force=True) logcummean_Z, logcumstd_Z_down, logcumstd_Z_up = result[ 'logcummean_Z'], result['logcumstd_Z_down'], result[ 'logcumstd_Z_up'] std_lnZ = result['std_lnZ'] print "{0} sec".format(time.time() - start) import pylab as plt plt.gca().set_xmargin(0.1) plt.errorbar(range(1, self.nb_samples + 1), logcummean_Z, yerr=[std_lnZ, std_lnZ], fmt='or') plt.errorbar(range(1, self.nb_samples + 1), logcummean_Z, yerr=[logcumstd_Z_down, logcumstd_Z_up], fmt='ob') plt.plot([1, self.nb_samples], [f_brute_force_lnZ()] * 2, '--g') plt.ticklabel_format(useOffset=False, axis='y') plt.show() AIS_logZ = logcummean_Z[-1] assert_array_equal(params_bak[0], model.W.get_value()) assert_array_equal(params_bak[1], model.b.get_value()) assert_array_equal(params_bak[2], model.c.get_value()) print np.abs(AIS_logZ - f_brute_force_lnZ()) assert_almost_equal(AIS_logZ, f_brute_force_lnZ(), decimal=2) finally: shutil.rmtree(ais_working_dir)
def test_gradients_auto_vs_manual(self): rng = np.random.RandomState(42) batch_size = 5 input_size = 10 model = oRBM(input_size=input_size, hidden_size=32, CDk=1, rng=np.random.RandomState(42)) W = rng.rand(model.hidden_size, model.input_size).astype(theano.config.floatX) model.W = theano.shared(value=W.astype(theano.config.floatX), name='W', borrow=True) b = rng.rand(model.hidden_size).astype(theano.config.floatX) model.b = theano.shared(value=b.astype(theano.config.floatX), name='b', borrow=True) c = rng.rand(model.input_size).astype(theano.config.floatX) model.c = theano.shared(value=c.astype(theano.config.floatX), name='c', borrow=True) params = [model.W, model.b, model.c] chain_start = T.matrix('start') chain_end = T.matrix('end') chain_start_value = (rng.rand(batch_size, input_size) > 0.5).astype(theano.config.floatX) chain_end_value = (rng.rand(batch_size, input_size) > 0.5).astype(theano.config.floatX) chain_start.tag.test_value = chain_start_value chain_end.tag.test_value = chain_end_value ### Computing gradients using automatic differentation ### cost = T.mean(model.free_energy(chain_start)) - T.mean(model.free_energy(chain_end)) gparams_auto = T.grad(cost, params, consider_constant=[chain_end]) ### Computing gradients manually ### h = RBM.sample_h_given_v(model, chain_start, return_probs=True) _h = RBM.sample_h_given_v(model, chain_end, return_probs=True) icdf = model.icdf_z_given_v(chain_start) _icdf = model.icdf_z_given_v(chain_end) if model.penalty == "softplus_bi": penalty = model.beta * T.nnet.sigmoid(model.b) elif self.penalty == "softplus0": penalty = model.beta * T.nnet.sigmoid(0) grad_W = (T.dot(chain_end.T, _h*_icdf) - T.dot(chain_start.T, h*icdf)).T / batch_size grad_b = T.mean((_h-penalty)*_icdf - (h-penalty)*icdf, axis=0) grad_c = T.mean(chain_end - chain_start, axis=0) gparams_manual = [grad_W, grad_b, grad_c] grad_W.name, grad_b.name, grad_c.name = "grad_W", "grad_b", "grad_c" for gparam_auto, gparam_manual in zip(gparams_auto, gparams_manual): param1 = gparam_auto.eval({chain_start: chain_start_value, chain_end: chain_end_value}) param2 = gparam_manual.eval({chain_start: chain_start_value, chain_end: chain_end_value}) assert_array_almost_equal(param1, param2, err_msg=gparam_manual.name)
def test_verify_AIS(self): model = oRBM(input_size=self.input_size, hidden_size=self.hidden_size, beta=self.beta) model.W.set_value(self.W) model.b.set_value(self.b) model.c.set_value(self.c) # Brute force print "Computing lnZ using brute force (i.e. summing the free energy of all posible $v$)..." V = theano.shared(value=cartesian([(0, 1)] * self.input_size, dtype=config.floatX)) brute_force_lnZ = logsumexp(-model.free_energy(V), 0) f_brute_force_lnZ = theano.function([], brute_force_lnZ) params_bak = [param.get_value() for param in model.parameters] print "Approximating lnZ using AIS..." import time start = time.time() try: ais_working_dir = tempfile.mkdtemp() result = compute_AIS(model, M=self.nb_samples, betas=self.betas, seed=1234, ais_working_dir=ais_working_dir, force=True) logcummean_Z, logcumstd_Z_down, logcumstd_Z_up = result['logcummean_Z'], result['logcumstd_Z_down'], result['logcumstd_Z_up'] std_lnZ = result['std_lnZ'] print "{0} sec".format(time.time() - start) import pylab as plt plt.gca().set_xmargin(0.1) plt.errorbar(range(1, self.nb_samples+1), logcummean_Z, yerr=[std_lnZ, std_lnZ], fmt='or') plt.errorbar(range(1, self.nb_samples+1), logcummean_Z, yerr=[logcumstd_Z_down, logcumstd_Z_up], fmt='ob') plt.plot([1, self.nb_samples], [f_brute_force_lnZ()]*2, '--g') plt.ticklabel_format(useOffset=False, axis='y') plt.show() AIS_logZ = logcummean_Z[-1] assert_array_equal(params_bak[0], model.W.get_value()) assert_array_equal(params_bak[1], model.b.get_value()) assert_array_equal(params_bak[2], model.c.get_value()) print np.abs(AIS_logZ - f_brute_force_lnZ()) assert_almost_equal(AIS_logZ, f_brute_force_lnZ(), decimal=2) finally: shutil.rmtree(ais_working_dir)
def setUp(self): self.input_size = 4 self.hidden_size = 3 self.beta = 1.01 self.batch_size = 100 rng = np.random.RandomState(42) self.W = rng.randn(self.hidden_size, self.input_size).astype(config.floatX) self.b = rng.randn(self.hidden_size).astype(config.floatX) self.c = rng.randn(self.input_size).astype(config.floatX) self.model = oRBM(input_size=self.input_size, hidden_size=self.hidden_size, beta=self.beta) self.model.W.set_value(self.W) self.model.b.set_value(self.b) self.model.c.set_value(self.c)
def test_gradients_auto_vs_manual(self): rng = np.random.RandomState(42) batch_size = 5 input_size = 10 model = oRBM(input_size=input_size, hidden_size=32, CDk=1, rng=np.random.RandomState(42)) W = rng.rand(model.hidden_size, model.input_size).astype(theano.config.floatX) model.W = theano.shared(value=W.astype(theano.config.floatX), name='W', borrow=True) b = rng.rand(model.hidden_size).astype(theano.config.floatX) model.b = theano.shared(value=b.astype(theano.config.floatX), name='b', borrow=True) c = rng.rand(model.input_size).astype(theano.config.floatX) model.c = theano.shared(value=c.astype(theano.config.floatX), name='c', borrow=True) params = [model.W, model.b, model.c] chain_start = T.matrix('start') chain_end = T.matrix('end') chain_start_value = (rng.rand(batch_size, input_size) > 0.5).astype( theano.config.floatX) chain_end_value = (rng.rand(batch_size, input_size) > 0.5).astype( theano.config.floatX) chain_start.tag.test_value = chain_start_value chain_end.tag.test_value = chain_end_value ### Computing gradients using automatic differentation ### cost = T.mean(model.free_energy(chain_start)) - T.mean( model.free_energy(chain_end)) gparams_auto = T.grad(cost, params, consider_constant=[chain_end]) ### Computing gradients manually ### h = RBM.sample_h_given_v(model, chain_start, return_probs=True) _h = RBM.sample_h_given_v(model, chain_end, return_probs=True) icdf = model.icdf_z_given_v(chain_start) _icdf = model.icdf_z_given_v(chain_end) if model.penalty == "softplus_bi": penalty = model.beta * T.nnet.sigmoid(model.b) elif self.penalty == "softplus0": penalty = model.beta * T.nnet.sigmoid(0) grad_W = (T.dot(chain_end.T, _h * _icdf) - T.dot(chain_start.T, h * icdf)).T / batch_size grad_b = T.mean((_h - penalty) * _icdf - (h - penalty) * icdf, axis=0) grad_c = T.mean(chain_end - chain_start, axis=0) gparams_manual = [grad_W, grad_b, grad_c] grad_W.name, grad_b.name, grad_c.name = "grad_W", "grad_b", "grad_c" for gparam_auto, gparam_manual in zip(gparams_auto, gparams_manual): param1 = gparam_auto.eval({ chain_start: chain_start_value, chain_end: chain_end_value }) param2 = gparam_manual.eval({ chain_start: chain_start_value, chain_end: chain_end_value }) assert_array_almost_equal(param1, param2, err_msg=gparam_manual.name)
def model_factory(model_name, input_size, hyperparams): #Set learning rate method that will be used. if hyperparams["ConstantLearningRate"] is not None: infos = hyperparams["ConstantLearningRate"].split() lr = float(infos[0]) lr_method = ConstantLearningRate(lr=lr) elif hyperparams["ADAGRAD"] is not None: infos = hyperparams["ADAGRAD"].split() lr = float(infos[0]) eps = float(infos[1]) if len(infos) > 1 else 1e-6 lr_method = ADAGRAD(lr=lr, eps=eps) else: raise ValueError("The update rule is mandatory!") #Set regularization method that will be used. regularization_method = NoRegularization() if hyperparams["L1Regularization"] is not None and hyperparams[ "L1Regularization"] != 0: lambda_factor = float(hyperparams["L1Regularization"]) regularization_method = L1Regularization(lambda_factor) elif hyperparams["L2Regularization"] is not None and hyperparams[ "L2Regularization"] != 0: lambda_factor = float(hyperparams["L2Regularization"]) regularization_method = L2Regularization(lambda_factor) #Set contrastive divergence method to use. CD_method = ContrastiveDivergence() if hyperparams["PCD"]: CD_method = PersistentCD(input_size, nb_particles=hyperparams['batch_size']) rng = np.random.RandomState(hyperparams["seed"]) #Build model if model_name == "rbm": from iRBM.models.rbm import RBM model = RBM(input_size=input_size, hidden_size=hyperparams["size"], learning_rate=lr_method, regularization=regularization_method, CD=CD_method, CDk=hyperparams["cdk"], rng=rng) elif model_name == "orbm": from iRBM.models.orbm import oRBM model = oRBM(input_size=input_size, hidden_size=hyperparams["size"], beta=hyperparams["beta"], learning_rate=lr_method, regularization=regularization_method, CD=CD_method, CDk=hyperparams["cdk"], rng=rng) elif model_name == "irbm": from iRBM.models.irbm import iRBM model = iRBM(input_size=input_size, hidden_size=hyperparams["size"], beta=hyperparams["beta"], learning_rate=lr_method, regularization=regularization_method, CD=CD_method, CDk=hyperparams["cdk"], rng=rng) return model
def model_factory(model_name, input_size, hyperparams): #Set learning rate method that will be used. if hyperparams["ConstantLearningRate"] is not None: infos = hyperparams["ConstantLearningRate"].split() lr = float(infos[0]) lr_method = ConstantLearningRate(lr=lr) elif hyperparams["ADAGRAD"] is not None: infos = hyperparams["ADAGRAD"].split() lr = float(infos[0]) eps = float(infos[1]) if len(infos) > 1 else 1e-6 lr_method = ADAGRAD(lr=lr, eps=eps) else: raise ValueError("The update rule is mandatory!") #Set regularization method that will be used. regularization_method = NoRegularization() if hyperparams["L1Regularization"] is not None and hyperparams["L1Regularization"] != 0: lambda_factor = float(hyperparams["L1Regularization"]) regularization_method = L1Regularization(lambda_factor) elif hyperparams["L2Regularization"] is not None and hyperparams["L2Regularization"] != 0: lambda_factor = float(hyperparams["L2Regularization"]) regularization_method = L2Regularization(lambda_factor) #Set contrastive divergence method to use. CD_method = ContrastiveDivergence() if hyperparams["PCD"]: CD_method = PersistentCD(input_size, nb_particles=hyperparams['batch_size']) rng = np.random.RandomState(hyperparams["seed"]) #Build model if model_name == "rbm": from iRBM.models.rbm import RBM model = RBM(input_size=input_size, hidden_size=hyperparams["size"], learning_rate=lr_method, regularization=regularization_method, CD=CD_method, CDk=hyperparams["cdk"], rng=rng ) elif model_name == "orbm": from iRBM.models.orbm import oRBM model = oRBM(input_size=input_size, hidden_size=hyperparams["size"], beta=hyperparams["beta"], learning_rate=lr_method, regularization=regularization_method, CD=CD_method, CDk=hyperparams["cdk"], rng=rng ) elif model_name == "irbm": from iRBM.models.irbm import iRBM model = iRBM(input_size=input_size, hidden_size=hyperparams["size"], beta=hyperparams["beta"], learning_rate=lr_method, regularization=regularization_method, CD=CD_method, CDk=hyperparams["cdk"], rng=rng ) return model