コード例 #1
0
 def predict(self, xReal, xInft):
     '''X should be rank 2 tensor'''
     yReal, yInft = td.add(
         *td.tensordot(xReal, xInft, self.weight_, self.wInft_), self.bias_,
         self.bInft_)
     yInft = tf.transpose(yInft, [0, 2, 1])
     return self.outputActivation_(yReal, yInft)
コード例 #2
0
 def partial_fit(self, xReal, xInft, yReal, yInft):
     '''Perform single round of training without reset. Returns average loss and gradient.'''
     pred = self.trainGrad_(xReal, xInft, yReal, yInft)
     reg = self.regularizeGrad_()
     loss = td.add(*pred, *reg)
     self.weight_ = self.wUpdate_(
         self.weight_, tf.reshape(loss[1][1:], self.weight_.shape))
     self.bias_ = self.bUpdate_(self.bias_,
                                tf.reshape(loss[1][0:1], self.bias_.shape))
     return loss
コード例 #3
0
def mae( targetReal, targetInft, predReal, predInft ):
    diff = td.subtract( targetReal, targetInft, predReal, predInft )
    return td.abs( *diff )
コード例 #4
0
def jsDivergence( targetReal, targetInft, predReal, predInft ):
    m = td.multiply( 0.5, 0.0, *td.add( targetReal, targetInft, predReal, predInft ) )
    tpart = td.multiply( targetReal, targetInft, *td.log( *td.divide( targetReal, targetInft, *m ) ) )
    ppart = td.multiply( predReal, predInft, *td.log( *td.divide( predReal, predInft, *m ) ) )
    return td.multiply( 0.5, 0.0, *td.add( *tpart, *ppart ) )
コード例 #5
0
def klDivergence( targetReal, targetInft, predReal, predInft ):
    return td.multiply( targetReal, targetInft, *td.log( *td.divide( targetReal, targetInft, predReal, predInft ) ) )
コード例 #6
0
def crossEntropy( targetReal, targetInft, predReal, predInft ):
    return td.multiply( *td.neg( targetReal, targetInft ), *td.log( predReal, predInft ) )
コード例 #7
0
def hinge( targetReal, targetInft, predReal, predInft ):
    t = tf.math.sign( targetReal )
    loss = td.subtract( 1.0, 0.0, *td.multiply( predReal, predInft, t, 0.0 ) )
    return td.where( tf.greater( loss[ 0 ], 0.0 ), *loss, 0.0, 0.0 )
コード例 #8
0
def huber( targetReal, targetInft, predReal, predInft, delta=1.0 ):
    diff = td.subtract( targetReal, targetInft, predReal, predInft )
    ltd = td.multiply( *td.subtract( *diff, 0.5 * delta, 0.0 ), delta, 0.0 )
    full = td.multiply( *td.power( *diff, 2.0, 0.0 ), 0.5, 0.0 )
    return td.where( tf.greater( diff[ 0 ], delta ), *ltd, *full )
コード例 #9
0
def mse( targetReal, targetInft, predReal, predInft ):
    diff = td.subtract( targetReal, targetInft, predReal, predInft )
    return td.power( *diff, 2.0, 0.0 )
コード例 #10
0
def softmax(real, inft, t=1):
    raw = td.exp(*td.divide(real, inft, t, 0.0))
    cs = td.sum(*raw, -1)
    return td.divide(*raw, *cs)
コード例 #11
0
def tanh(real, inft):
    return td.tanh(real, inft)
コード例 #12
0
def logistic(real, inft):
    return td.divide(1.0, 0.0, *td.add(1.0, 0.0, *td.exp(-real, -inft)))
コード例 #13
0
def elu(real, inft):
    low = td.subtract(*td.exp(real, inft), 1.0, 0.0)
    return td.where(tf.greater(real, 0.0), real, inft, *low)
コード例 #14
0
def leaky(real, inft, p=0.01):
    low = td.multiply(real, inft, p, 0.0)
    return td.where(tf.greater(real, 0.0), real, inft, *low)
コード例 #15
0
def relu(real, inft):
    return td.where(tf.greater(real, 0.0), real, inft, 0.0, 0.0)
コード例 #16
0
def softplus(real, inft):
    return td.log(*td.add(*td.exp(real, inft), 1.0, 0.0))