Esempio n. 1
0
 def get_hcond(self):
     return kde_condentropy(self.input, K.exp(self.noiselayer.logvar))
Esempio n. 2
0
 def get_h(self):
     totalvar = K.exp(self.noiselayer.logvar)+K.exp(self.kdelayer.logvar)
     return kde_entropy(self.input, totalvar)
Esempio n. 3
0
def logsumexp(mx, axis):
    cmax = K.max(mx, axis=axis)
    cmax2 = K.expand_dims(cmax, 1)
    mx2 = mx - cmax2
    return cmax + K.log(K.sum(K.exp(mx2), axis=1))
Esempio n. 4
0
 def on_train_begin(self, logs={}):
     self.nlayerinput = lambda x: K.function([self.model.layers[0].input], [self.kdelayer.input])([x])[0]
     N, dims = self.entropy_train_data.shape
     Kdists = K.placeholder(ndim=2)
     Klogvar = K.placeholder(ndim=0)
     def obj(logvar, dists):
         #print 'here', logvar # lossfunc([dists, logvar[0]])[0]
         return lossfunc([dists, logvar.flat[0]])[0]
     def jac(logvar, dists):
         #print logvar, lossfunc([dists, logvar[0]]), jacfunc([dists, logvar[0]])
         return np.atleast_2d(np.array(jacfunc([dists, logvar.flat[0]])))[0] 
         
     lossfunc = K.function([Kdists, Klogvar,], [kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar))])
     jacfunc  = K.function([Kdists, Klogvar,], K.gradients(kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar)), Klogvar))
     self.obj =obj #  lambda logvar, dists: np.array([lossfunc([dists, logvar[0]]),]) # [0]
     self.jac =jac # lambda logvar, dists: jacfunc([dists, np.array([logvar]).flat[0]])[0]
Esempio n. 5
0
 def get_noise(self, x):
     #if not hasattr(self, 'saved_noise'):
     #    self.saved_noise = K.random_normal(shape=K.shape(x), mean=0., std=1)
     return K.exp(0.5*self.logvar) * K.random_normal(shape=K.shape(x), mean=0., std=1)