def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) self.updates = [(self.iterations, self.iterations + 1.)] t = self.iterations + 1 lr_t = self.lr / (1 - K.pow(self.beta_1, t)) for p, g, c in zip(params, grads, constraints): # zero init of 1st moment m = K.variable(np.zeros(K.get_value(p).shape)) # zero init of exponentially weighted infinity norm u = K.variable(np.zeros(K.get_value(p).shape)) m_t = (self.beta_1 * m) + (1 - self.beta_1) * g u_t = K.maximum(self.beta_2 * u, K.abs(g)) p_t = p - lr_t * m_t / (u_t + self.epsilon) self.updates.append((m, m_t)) self.updates.append((u, u_t)) self.updates.append((p, c(p_t))) # apply constraints return self.updates
def get_updates(self, params, constraints, loss): grads = self.get_gradients(loss, params) self.updates = [(self.iterations, self.iterations+1.)] t = self.iterations + 1 lr_t = self.lr / (1 - K.pow(self.beta_1, t)) for p, g, c in zip(params, grads, constraints): # zero init of 1st moment m = K.variable(np.zeros(K.get_value(p).shape)) # zero init of exponentially weighted infinity norm u = K.variable(np.zeros(K.get_value(p).shape)) m_t = (self.beta_1 * m) + (1 - self.beta_1) * g u_t = K.maximum(self.beta_2 * u, K.abs(g)) p_t = p - lr_t * m_t / (u_t + self.epsilon) self.updates.append((m, m_t)) self.updates.append((u, u_t)) self.updates.append((p, c(p_t))) # apply constraints return self.updates
def mean_absolute_percentage_error(y_true, y_pred): diff = K.abs( (y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), np.inf)) return 100. * K.mean(diff, axis=-1)
def mean_absolute_error(y_true, y_pred): return K.mean(K.abs(y_pred - y_true), axis=-1)
def mean_absolute_percentage_error(y_true, y_pred): diff = K.abs((y_true - y_pred) / K.clip(K.abs(y_true), K.epsilon(), np.inf)) return 100. * K.mean(diff, axis=-1)