Ejemplo n.º 1
0
    def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        c=0
        prev_grads = [shared_zeros(p.shape,name=f"prev_grads") for p in params]
        c=0
        prev_steps = [sharedX(np.full(p.shape, self.step_init),name=f"prev_stepd") for p in params]
        self.updates = []

        for p, grad, prev_grad, prev_step, c in zip(params, grads, prev_grads,
                                   prev_steps):

            grad_sgn = prev_grad * grad

            new_step = K.switch(K.ge(grad_sgn, 0.0),
                                K.minimum(prev_step * self.step_inc, self.step_max),
                                K.maximum(prev_step * self.step_dec, self.step_min))
            
            self.updates.append((prev_step, new_step))
            
            new_grad = K.switch(K.ge(grad_sgn, 0.0), grad, 0.0)
            self.updates.append((prev_grad, new_grad))
            
            new_p = p - K.sgn(new_grad) * new_step
            self.updates.append((p, c(new_p)))
            
        return self.updates
Ejemplo n.º 2
0
def clip_norm(g, c, n):
    ''' Clip gradients '''
    if c > 0:
        g = K.switch(K.ge(n, c), g * c / n, g)
    return g
Ejemplo n.º 3
0
def clip_norm(g, c, n):
    """Clip gradients
    """
    if c > 0:
        g = K.switch(K.ge(n, c), g * c / n, g)
    return g
Ejemplo n.º 4
0
def clip_norm(g, c, n):
    ''' Clip gradients '''
    if c > 0:
        g = K.switch(K.ge(n, c), g * c / n, g)
    return g