def predict(self, xReal, xInft): '''X should be rank 2 tensor''' yReal, yInft = td.add( *td.tensordot(xReal, xInft, self.weight_, self.wInft_), self.bias_, self.bInft_) yInft = tf.transpose(yInft, [0, 2, 1]) return self.outputActivation_(yReal, yInft)
def partial_fit(self, xReal, xInft, yReal, yInft): '''Perform single round of training without reset. Returns average loss and gradient.''' pred = self.trainGrad_(xReal, xInft, yReal, yInft) reg = self.regularizeGrad_() loss = td.add(*pred, *reg) self.weight_ = self.wUpdate_( self.weight_, tf.reshape(loss[1][1:], self.weight_.shape)) self.bias_ = self.bUpdate_(self.bias_, tf.reshape(loss[1][0:1], self.bias_.shape)) return loss
def jsDivergence( targetReal, targetInft, predReal, predInft ): m = td.multiply( 0.5, 0.0, *td.add( targetReal, targetInft, predReal, predInft ) ) tpart = td.multiply( targetReal, targetInft, *td.log( *td.divide( targetReal, targetInft, *m ) ) ) ppart = td.multiply( predReal, predInft, *td.log( *td.divide( predReal, predInft, *m ) ) ) return td.multiply( 0.5, 0.0, *td.add( *tpart, *ppart ) )
def logistic(real, inft): return td.divide(1.0, 0.0, *td.add(1.0, 0.0, *td.exp(-real, -inft)))
def softplus(real, inft): return td.log(*td.add(*td.exp(real, inft), 1.0, 0.0))