def accuracy(y: np.array, y_pred: np.array) -> np.float: y, y_pred = check_dims(y, y_pred) return np.sum(y == y_pred) / y.shape[0]
def recall(y: np.array, y_pred: np.array) -> np.float: y, y_pred = check_dims(y, y_pred) tp = np.sum(y == 1 & y_pred == 1) fn = np.sum(y == 1 & y_pred != 1) return tp / (tp + fn)
def fbeta(y: np.array, y_pred: np.array, beta: np.float) -> np.float: y, y_pred = check_dims(y, y_pred) p = precision(y, y_pred) r = recall(y, y_pred) return (1 + beta**2) * p * r / (beta**2 * p + r)
def precision(y: np.array, y_pred: np.array) -> np.float: y, y_pred = check_dims(y, y_pred) tp = np.sum(y == 1 & y_pred == 1) fp = np.sum(y != 1 & y_pred == 1) return tp / (tp + fp)
def mean_squared_error(y: np.array, y_pred: np.array) -> np.float: y, y_pred = check_dims(y, y_pred) return np.mean(np.square((y_pred - y)))
def quantile_loss(y: np.array, y_pred: np.array, t: np.float = 0.5) -> np.float: y, y_pred = check_dims(y, y_pred) n = y.shape[0] loss = np.sum(list(map(lambda y, y_pred: ((t - 1) * y < y_pred + t * y >= y_pred) * (y - y_pred), y, y_pred))) / n return loss
def r2(y: np.array, y_pred: np.array) -> np.float: y, y_pred = check_dims(y, y_pred) return 1 - mean_squared_error(y, y_pred) / y.std()
def mean_absolute_error(y: np.array, y_pred: np.array) -> np.float: y, y_pred = check_dims(y, y_pred) return np.mean(np.abs((y_pred - y)))