示例#1
0
 def get_gradients(self, loss, params):
     grads = K.gradients(loss, params)
     if hasattr(self, 'clipnorm') and self.clipnorm > 0:
         norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
         grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
     if hasattr(self, 'clipvalue') and self.clipvalue > 0:
         grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
     return grads
示例#2
0
文件: optimizers.py 项目: lxastro/dlx
 def get_gradients(self, loss, params):
     grads = K.gradients(loss, params)
     if hasattr(self, 'clipnorm') and self.clipnorm > 0:
         norm = K.sqrt(sum([K.sum(K.square(g)) for g in grads]))
         grads = [clip_norm(g, self.clipnorm, norm) for g in grads]
     if hasattr(self, 'clipvalue') and self.clipvalue > 0:
         grads = [K.clip(g, -self.clipvalue, self.clipvalue) for g in grads]
     return grads
示例#3
0
文件: core.py 项目: lxastro/dlx
 def output(self, train=False):
     X = self.get_input('input')(train)
     if self.mode == 'ave':
         s = K.mean(X, axis=1)
         return s
     if self.mode == 'sum':
         s = K.sum(X, axis=1)
         return s
     elif self.mode == 'mul':
         s = K.prod(X, axis=1)
         return s
     else:
         raise Exception('Unknown merge mode')
示例#4
0
 def output(self, train=False):
     X = self.get_input('input')(train)
     if self.mode == 'ave':
         s = K.mean(X, axis=1)
         return s
     if self.mode == 'sum':
         s = K.sum(X, axis=1)
         return s
     elif self.mode == 'mul':
         s = K.prod(X, axis=1)
         return s
     else:
         raise Exception('Unknown merge mode')
示例#5
0
 def __call__(self, p):
     return p / K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True))
示例#6
0
 def __call__(self, p):
     norms = K.sqrt(K.sum(K.square(p), axis=0))
     desired = K.clip(norms, 0, self.m)
     p = p * (desired / (1e-7 + norms))
     return p
示例#7
0
 def __call__(self, p):
     return p / K.sqrt(K.sum(K.square(p), axis=-1, keepdims=True))
示例#8
0
 def __call__(self, p):
     norms = K.sqrt(K.sum(K.square(p), axis=0))
     desired = K.clip(norms, 0, self.m)
     p = p * (desired / (1e-7 + norms))
     return p