示例#1
0
文件: optimizers.py 项目: lxastro/dlx
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [(self.iterations, self.iterations+1.)]

        t = self.iterations + 1
        lr_t = self.lr * K.sqrt(1 - K.pow(self.beta_2, t)) / (1 - K.pow(self.beta_1, t))

        for p, g, c in zip(params, grads, constraints):
            # zero init of moment
            m = K.variable(np.zeros(K.get_value(p).shape))
            # zero init of velocity
            v = K.variable(np.zeros(K.get_value(p).shape))

            m_t = (self.beta_1 * m) + (1 - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1 - self.beta_2) * K.square(g)
            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append((m, m_t))
            self.updates.append((v, v_t))
            self.updates.append((p, c(p_t)))  # apply constraints
        return self.updates
示例#2
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [(self.iterations, self.iterations + 1.)]

        t = self.iterations + 1
        lr_t = self.lr * K.sqrt(1 - K.pow(self.beta_2, t)) / (
            1 - K.pow(self.beta_1, t))

        for p, g, c in zip(params, grads, constraints):
            # zero init of moment
            m = K.variable(np.zeros(K.get_value(p).shape))
            # zero init of velocity
            v = K.variable(np.zeros(K.get_value(p).shape))

            m_t = (self.beta_1 * m) + (1 - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1 - self.beta_2) * K.square(g)
            p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)

            self.updates.append((m, m_t))
            self.updates.append((v, v_t))
            self.updates.append((p, c(p_t)))  # apply constraints
        return self.updates
示例#3
0
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [(self.iterations, self.iterations + 1.)]

        t = self.iterations + 1
        lr_t = self.lr / (1 - K.pow(self.beta_1, t))

        for p, g, c in zip(params, grads, constraints):
            # zero init of 1st moment
            m = K.variable(np.zeros(K.get_value(p).shape))
            # zero init of exponentially weighted infinity norm
            u = K.variable(np.zeros(K.get_value(p).shape))

            m_t = (self.beta_1 * m) + (1 - self.beta_1) * g
            u_t = K.maximum(self.beta_2 * u, K.abs(g))
            p_t = p - lr_t * m_t / (u_t + self.epsilon)

            self.updates.append((m, m_t))
            self.updates.append((u, u_t))
            self.updates.append((p, c(p_t)))  # apply constraints
        return self.updates
示例#4
0
文件: optimizers.py 项目: lxastro/dlx
    def get_updates(self, params, constraints, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [(self.iterations, self.iterations+1.)]

        t = self.iterations + 1
        lr_t = self.lr / (1 - K.pow(self.beta_1, t))

        for p, g, c in zip(params, grads, constraints):
            # zero init of 1st moment
            m = K.variable(np.zeros(K.get_value(p).shape))
            # zero init of exponentially weighted infinity norm
            u = K.variable(np.zeros(K.get_value(p).shape))

            m_t = (self.beta_1 * m) + (1 - self.beta_1) * g
            u_t = K.maximum(self.beta_2 * u, K.abs(g))
            p_t = p - lr_t * m_t / (u_t + self.epsilon)

            self.updates.append((m, m_t))
            self.updates.append((u, u_t))
            self.updates.append((p, c(p_t)))  # apply constraints
        return self.updates