def call(self, values): """Accumulates statistics for computing the reduction metric. Arguments: values: array-like Per-example value. Returns: A scalar corresponding to the metric value. """ if self.reduction == SUM: return M.sum(values) if self.reduction in [WEIGHTED_MEAN, SUM_OVER_BATCH_SIZE]: return M.sum(values) / values.size raise NotImplementedError(f"{self.reduction} not implemented.")
def call(self, x): """Softmax activation function. Arguments: x: array-like Input tensor. Returns: Tensor, output of softmax transformation. """ exps = M.exp(x - M.max(x, axis=-1, keepdims=True)) return exps / M.sum(exps, axis=-1, keepdims=True)
def __call__(self, y_true, y_pred): """Invokes the `Loss` instance. Arguments: y_true: array-like Ground truth values. y_pred: array-like The predicted values. Returns: Weighted loss float tensor or scalar. """ losses = self.call(y_true, y_pred) return M.sum(losses) / losses.size
def backward(self, loss): _loss = M.dot(loss, M.transpose(self.kernel)) if not self.trainable: return _loss kernel_loss = M.dot(M.transpose(self.inputs), loss) if self.kernel_regularizer: kernel_loss += self.kernel_regularizer.gradient(kernel_loss) bias_loss = M.sum(loss, axis=0, keepdims=True) if self.bias_regularizer: bias_loss += self.bias_regularizer.gradient(bias_loss) self.kernel = self.kernel_optimizer.update(kernel_loss, self.kernel) self.bias = self.bias_optimizer.update(bias_loss, self.bias) # _loss = M.dot(loss, self.kernel) return _loss
def call(self, x): regularization = self.l2 * M.sum(M.square(x)) return regularization
def call(self, x): regularization = 0.0 regularization += self.l1 * M.sum(M.abs(x)) regularization += self.l2 * M.sum(M.square(x)) return regularization