def on_train_begin(self, logs={}): self.nlayerinput = lambda x: K.function([self.model.layers[0].input], [self.kdelayer.input])([x])[0] N, dims = self.entropy_train_data.shape Kdists = K.placeholder(ndim=2) Klogvar = K.placeholder(ndim=0) def obj(logvar, dists): #print 'here', logvar # lossfunc([dists, logvar[0]])[0] return lossfunc([dists, logvar.flat[0]])[0] def jac(logvar, dists): #print logvar, lossfunc([dists, logvar[0]]), jacfunc([dists, logvar[0]]) return np.atleast_2d(np.array(jacfunc([dists, logvar.flat[0]])))[0] lossfunc = K.function([Kdists, Klogvar,], [kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar))]) jacfunc = K.function([Kdists, Klogvar,], K.gradients(kde_entropy_from_dists_loo(Kdists, N, dims, K.exp(Klogvar)), Klogvar)) self.obj =obj # lambda logvar, dists: np.array([lossfunc([dists, logvar[0]]),]) # [0] self.jac =jac # lambda logvar, dists: jacfunc([dists, np.array([logvar]).flat[0]])[0]
def on_train_begin(self, logs={}): modelobj = self.model.model inputs = modelobj.inputs + modelobj.targets + modelobj.sample_weights + [ K.learning_phase(),] lossfunc = K.function(inputs, [modelobj.total_loss]) jacfunc = K.function(inputs, K.gradients(modelobj.total_loss, self.noiselayer.logvar)) sampleweights = np.ones(len(self.traindata.X)) def obj(logvar): v = K.get_value(self.noiselayer.logvar) K.set_value(self.noiselayer.logvar, logvar.flat[0]) r = lossfunc([self.traindata.X, self.traindata.Y, sampleweights, 1])[0] K.set_value(self.noiselayer.logvar, v) return r def jac(logvar): v = K.get_value(self.noiselayer.logvar) K.set_value(self.noiselayer.logvar, logvar.flat[0]) r = np.atleast_2d(np.array(jacfunc([self.traindata.X, self.traindata.Y, sampleweights, 1])))[0] K.set_value(self.noiselayer.logvar, v) return r self.obj = obj # lambda logvar: lossfunc([self.traindata.X_train, self.traindata.Y_train, self.sampleweights, logvar[0], 1])[0] self.jac = jac # lambda logvar: np.array(jacfunc([self.traindata.X_train, self.traindata.Y_train, self.sampleweights, logvar[0], 1]))