def _symmetric_mean_absolute_percentage_error_update( preds: Tensor, target: Tensor, epsilon: float = 1.17e-06, ) -> Tuple[Tensor, int]: """Updates and returns variables required to compute Symmetric Mean Absolute Percentage Error. Checks for same shape of input tensors. Args: preds: Predicted tensor target: Ground truth tensor epsilon: Avoids ZeroDivisionError. """ _check_same_shape(preds, target) abs_diff = torch.abs(preds - target) abs_per_error = abs_diff / torch.clamp( torch.abs(target) + torch.abs(preds), min=epsilon) sum_abs_per_error = 2 * torch.sum(abs_per_error) num_obs = target.numel() return sum_abs_per_error, num_obs
def _crps_update(preds: Tensor, target: Tensor) -> Tuple[int, Tensor, float, float]: _check_same_shape( preds[:, 0, ], target ) # second dimension of preds should be number of ensemble members batch_size = target.shape[0] n_ensemble_members = preds.shape[1] # inflate observations: observation_inflated = torch.ones_like(preds) for i in range(preds.size()[1]): observation_inflated[:, i, :] = target diff = (1 / n_ensemble_members) * torch.sum( torch.abs(preds - observation_inflated)) ensemble_sum_scale_factor = ( 1 / (n_ensemble_members * (n_ensemble_members - 1))) if n_ensemble_members > 1 else 1.0 ensemble_sum = 0 for i in range(n_ensemble_members): for j in range(i, n_ensemble_members): ensemble_sum += torch.sum(torch.abs(preds[:, i, ] - preds[:, j, ])) return batch_size, diff, ensemble_sum_scale_factor, ensemble_sum
def _mean_absolute_percentage_error_update( preds: Tensor, target: Tensor, epsilon: float = 1.17e-06, ) -> Tuple[Tensor, int]: """Updates and returns variables required to compute Mean Percentage Error. Checks for same shape of input tensors. Args: preds: Predicted tensor target: Ground truth tensor epsilon: Specifies the lower bound for target values. Any target value below epsilon is set to epsilon (avoids ZeroDivisionError). """ _check_same_shape(preds, target) abs_diff = torch.abs(preds - target) abs_per_error = abs_diff / torch.clamp(torch.abs(target), min=epsilon) sum_abs_per_error = torch.sum(abs_per_error) num_obs = target.numel() return sum_abs_per_error, num_obs
def _mean_squared_log_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: _check_same_shape(preds, target) sum_squared_log_error = torch.sum( torch.pow(torch.log1p(preds) - torch.log1p(target), 2)) n_obs = target.numel() return sum_squared_log_error, n_obs
def _mean_squared_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: _check_same_shape(preds, target) diff = preds - target sum_squared_error = torch.sum(diff * diff) n_obs = target.numel() return sum_squared_error, n_obs
def _sam_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: """Updates and returns variables required to compute Spectral Angle Mapper. Checks for same shape and type of the input tensors. Args: preds: Predicted tensor target: Ground truth tensor """ if preds.dtype != target.dtype: raise TypeError( "Expected `preds` and `target` to have the same data type." f" Got preds: {preds.dtype} and target: {target.dtype}." ) _check_same_shape(preds, target) if len(preds.shape) != 4: raise ValueError( "Expected `preds` and `target` to have BxCxHxW shape." f" Got preds: {preds.shape} and target: {target.shape}." ) if (preds.shape[1] <= 1) or (target.shape[1] <= 1): raise ValueError( "Expected channel dimension of `preds` and `target` to be larger than 1." f" Got preds: {preds.shape[1]} and target: {target.shape[1]}." ) return preds, target
def _mean_relative_error_update( preds: torch.Tensor, target: torch.Tensor) -> Tuple[torch.Tensor, int]: _check_same_shape(preds, target) target_nz = target.clone() target_nz[target == 0] = 1 sum_rltv_error = torch.sum(torch.abs((preds - target) / target_nz)) n_obs = target.numel() return sum_rltv_error, n_obs
def _cosine_similarity_update( preds: Tensor, target: Tensor, ) -> Tuple[Tensor, Tensor]: _check_same_shape(preds, target) preds = preds.float() target = target.float() return preds, target
def _explained_variance_update(preds: Tensor, target: Tensor) -> Tuple[int, Tensor, Tensor, Tensor, Tensor]: _check_same_shape(preds, target) n_obs = preds.size(0) sum_error = torch.sum(target - preds, dim=0) sum_squared_error = torch.sum((target - preds)**2, dim=0) sum_target = torch.sum(target, dim=0) sum_squared_target = torch.sum(target**2, dim=0) return n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target
def _ssim_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: if preds.dtype != target.dtype: raise TypeError( "Expected `preds` and `target` to have the same data type." f" Got preds: {preds.dtype} and target: {target.dtype}.") _check_same_shape(preds, target) if len(preds.shape) != 4: raise ValueError( "Expected `preds` and `target` to have BxCxHxW shape." f" Got preds: {preds.shape} and target: {target.shape}.") return preds, target
def _mean_squared_log_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: """Returns variables required to compute Mean Squared Log Error. Checks for same shape of tensors. Args: preds: Predicted tensor target: Ground truth tensor """ _check_same_shape(preds, target) sum_squared_log_error = torch.sum(torch.pow(torch.log1p(preds) - torch.log1p(target), 2)) n_obs = target.numel() return sum_squared_log_error, n_obs
def _spearman_corrcoef_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: if preds.dtype != target.dtype: raise TypeError( "Expected `preds` and `target` to have the same data type." f" Got preds: {preds.dtype} and target: {target.dtype}." ) _check_same_shape(preds, target) preds = preds.squeeze() target = target.squeeze() if preds.ndim > 1 or target.ndim > 1: raise ValueError('Expected both predictions and target to be 1 dimensional tensors.') return preds, target
def _mean_absolute_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: """Updates and returns variables required to compute Mean Absolute Error. Checks for same shape of input tensors. Args: preds: Predicted tensor target: Ground truth tensor """ _check_same_shape(preds, target) sum_abs_error = torch.sum(torch.abs(preds - target)) n_obs = target.numel() return sum_abs_error, n_obs
def _mean_squared_error_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, int]: """Updates and returns variables required to compute Mean Squared Error. Checks for same shape of input tensors. Args: preds: Predicted tensor target: Ground truth tensor """ _check_same_shape(preds, target) diff = preds - target sum_squared_error = torch.sum(diff * diff) n_obs = target.numel() return sum_squared_error, n_obs
def signal_noise_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor: r"""Signal-to-noise ratio (SNR_): .. math:: \text{SNR} = \frac{P_{signal}}{P_{noise}} where :math:`P` denotes the power of each signal. The SNR metric compares the level of the desired signal to the level of background noise. Therefore, a high value of SNR means that the audio is clear. Args: preds: shape ``[...,time]`` target: shape ``[...,time]`` zero_mean: if to zero mean target and preds or not Returns: snr value of shape [...] Example: >>> from torchmetrics.functional.audio import signal_noise_ratio >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0]) >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0]) >>> signal_noise_ratio(preds, target) tensor(16.1805) References: [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) 2019. """ _check_same_shape(preds, target) eps = torch.finfo(preds.dtype).eps if zero_mean: target = target - torch.mean(target, dim=-1, keepdim=True) preds = preds - torch.mean(preds, dim=-1, keepdim=True) noise = target - preds snr_value = (torch.sum(target**2, dim=-1) + eps) / (torch.sum(noise**2, dim=-1) + eps) snr_value = 10 * torch.log10(snr_value) return snr_value
def _pearson_corrcoef_update( preds: Tensor, target: Tensor, ) -> Tuple[Tensor, Tensor]: """ updates current estimates of the mean, cov and n_obs with new data for calculating pearsons correlation """ # Data checking _check_same_shape(preds, target) preds = preds.squeeze() target = target.squeeze() if preds.ndim > 1 or target.ndim > 1: raise ValueError( 'Expected both predictions and target to be 1 dimensional tensors.' ) return preds, target
def _r2score_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]: _check_same_shape(preds, target) if preds.ndim > 2: raise ValueError( 'Expected both prediction and target to be 1D or 2D tensors,' f' but received tensors with dimension {preds.shape}') if len(preds) < 2: raise ValueError('Needs at least two samples to calculate r2 score.') sum_error = torch.sum(target, dim=0) sum_squared_error = torch.sum(torch.pow(target, 2.0), dim=0) residual = torch.sum(torch.pow(target - preds, 2.0), dim=0) total = target.size(0) return sum_squared_error, sum_error, residual, total
def _cosine_similarity_update( preds: Tensor, target: Tensor, ) -> Tuple[Tensor, Tensor]: """Updates and returns variables required to compute Cosine Similarity. Checks for same shape of input tensors. Args: preds: Predicted tensor target: Ground truth tensor """ _check_same_shape(preds, target) preds = preds.float() target = target.float() return preds, target
def _mean_absolute_percentage_error_update( preds: Tensor, target: Tensor, epsilon: float = 1.17e-06, ) -> Tuple[Tensor, int]: _check_same_shape(preds, target) abs_diff = torch.abs(preds - target) abs_per_error = abs_diff / torch.clamp(torch.abs(target), min=epsilon) sum_abs_per_error = torch.sum(abs_per_error) num_obs = target.numel() return sum_abs_per_error, num_obs
def _kld_update(p: Tensor, q: Tensor, log_prob: bool) -> Tuple[Tensor, int]: _check_same_shape(p, q) if p.ndim != 2 or q.ndim != 2: raise ValueError( f"Expected both p and q distribution to be 2D but got {p.ndim} and {q.ndim} respectively" ) total = p.shape[0] if log_prob: measures = torch.sum(p.exp() * (p - q), axis=-1) else: p = p / p.sum(axis=-1, keepdim=True) q = q / q.sum(axis=-1, keepdim=True) q = torch.clamp(q, METRIC_EPS) measures = torch.sum(p * torch.log(p / q), axis=-1) return measures, total
def scale_invariant_signal_distortion_ratio(preds: Tensor, target: Tensor, zero_mean: bool = False) -> Tensor: """Calculates Scale-invariant signal-to-distortion ratio (SI-SDR) metric. The SI-SDR value is in general considered an overall measure of how good a source sound. Args: preds: shape ``[...,time]`` target: shape ``[...,time]`` zero_mean: If to zero mean target and preds or not Returns: si-sdr value of shape [...] Example: >>> from torchmetrics.functional.audio import scale_invariant_signal_distortion_ratio >>> target = torch.tensor([3.0, -0.5, 2.0, 7.0]) >>> preds = torch.tensor([2.5, 0.0, 2.0, 8.0]) >>> scale_invariant_signal_distortion_ratio(preds, target) tensor(18.4030) References: [1] Le Roux, Jonathan, et al. "SDR half-baked or well done." IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP) 2019. """ _check_same_shape(preds, target) eps = torch.finfo(preds.dtype).eps if zero_mean: target = target - torch.mean(target, dim=-1, keepdim=True) preds = preds - torch.mean(preds, dim=-1, keepdim=True) alpha = (torch.sum(preds * target, dim=-1, keepdim=True) + eps) / (torch.sum(target**2, dim=-1, keepdim=True) + eps) target_scaled = alpha * target noise = target_scaled - preds val = (torch.sum(target_scaled**2, dim=-1) + eps) / (torch.sum(noise**2, dim=-1) + eps) val = 10 * torch.log10(val) return val
def _ssim_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: """Updates and returns variables required to compute Structural Similarity Index Measure. Checks for same shape and type of the input tensors. Args: preds: Predicted tensor target: Ground truth tensor """ if preds.dtype != target.dtype: raise TypeError( "Expected `preds` and `target` to have the same data type." f" Got preds: {preds.dtype} and target: {target.dtype}.") _check_same_shape(preds, target) if len(preds.shape) not in (4, 5): raise ValueError( "Expected `preds` and `target` to have BxCxHxW or BxCxDxHxW shape." f" Got preds: {preds.shape} and target: {target.shape}.") return preds, target
def _uqi_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: """Updates and returns variables required to compute Universal Image Quality Index. Checks for same shape and type of the input tensors. Args: preds: Predicted tensor target: Ground truth tensor """ if preds.dtype != target.dtype: raise TypeError( "Expected `preds` and `target` to have the same data type." f" Got preds: {preds.dtype} and target: {target.dtype}.") _check_same_shape(preds, target) if len(preds.shape) != 4: raise ValueError( "Expected `preds` and `target` to have BxCxHxW shape." f" Got preds: {preds.shape} and target: {target.shape}.") return preds, target
def _ergas_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: """Updates and returns variables required to compute Erreur Relative Globale Adimensionnelle de Synthèse. Checks for same shape and type of the input tensors. Args: preds: Predicted tensor target: Ground truth tensor """ if preds.dtype != target.dtype: raise TypeError( "Expected `preds` and `target` to have the same data type." f" Got preds: {preds.dtype} and target: {target.dtype}.") _check_same_shape(preds, target) if len(preds.shape) != 4: raise ValueError( "Expected `preds` and `target` to have BxCxHxW shape." f" Got preds: {preds.shape} and target: {target.shape}.") return preds, target
def _weighted_mean_absolute_percentage_error_update( preds: Tensor, target: Tensor, ) -> Tuple[Tensor, int]: """Updates and returns variables required to compute Weighted Absolute Percentage Error. Checks for same shape of input tensors. Args: preds: Predicted tensor target: Ground truth tensor """ _check_same_shape(preds, target) sum_abs_error = (preds - target).abs().sum() sum_scale = target.abs().sum() return sum_abs_error, sum_scale
def _spectral_distortion_index_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: """Updates and returns variables required to compute Spectral Distortion Index. Checks for same shape and type of the input tensors. Args: preds: Low resolution multispectral image target: High resolution fused image """ if preds.dtype != target.dtype: raise TypeError( f"Expected `ms` and `fused` to have the same data type. Got ms: {preds.dtype} and fused: {target.dtype}." ) _check_same_shape(preds, target) if len(preds.shape) != 4: raise ValueError( f"Expected `preds` and `target` to have BxCxHxW shape. Got preds: {preds.shape} and target: {target.shape}." ) return preds, target
def _explained_variance_update(preds: Tensor, target: Tensor) -> Tuple[int, Tensor, Tensor, Tensor, Tensor]: """Updates and returns variables required to compute Explained Variance. Checks for same shape of input tensors. Args: preds: Predicted tensor target: Ground truth tensor """ _check_same_shape(preds, target) n_obs = preds.size(0) sum_error = torch.sum(target - preds, dim=0) diff = target - preds sum_squared_error = torch.sum(diff * diff, dim=0) sum_target = torch.sum(target, dim=0) sum_squared_target = torch.sum(target * target, dim=0) return n_obs, sum_error, sum_squared_error, sum_target, sum_squared_target
def _pearson_corrcoef_update( preds: Tensor, target: Tensor, mean_x: Tensor, mean_y: Tensor, var_x: Tensor, var_y: Tensor, corr_xy: Tensor, n_prior: Tensor, ) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]: """Updates and returns variables required to compute Pearson Correlation Coefficient. Checks for same shape of input tensors. Args: mean_x: current mean estimate of x tensor mean_y: current mean estimate of y tensor var_x: current variance estimate of x tensor var_y: current variance estimate of y tensor corr_xy: current covariance estimate between x and y tensor n_prior: current number of observed observations """ # Data checking _check_same_shape(preds, target) preds = preds.squeeze() target = target.squeeze() if preds.ndim > 1 or target.ndim > 1: raise ValueError( "Expected both predictions and target to be 1 dimensional tensors." ) n_obs = preds.numel() mx_new = (n_prior * mean_x + preds.mean() * n_obs) / (n_prior + n_obs) my_new = (n_prior * mean_y + target.mean() * n_obs) / (n_prior + n_obs) n_prior += n_obs var_x += ((preds - mx_new) * (preds - mean_x)).sum() var_y += ((target - my_new) * (target - mean_y)).sum() corr_xy += ((preds - mx_new) * (target - mean_y)).sum() mean_x = mx_new mean_y = my_new return mean_x, mean_y, var_x, var_y, corr_xy, n_prior
def _r2_score_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor]: """Updates and returns variables required to compute R2 score. Checks for same shape and 1D/2D input tensors. Args: preds: Predicted tensor target: Ground truth tensor """ _check_same_shape(preds, target) if preds.ndim > 2: raise ValueError( "Expected both prediction and target to be 1D or 2D tensors," f" but received tensors with dimension {preds.shape}") sum_obs = torch.sum(target, dim=0) sum_squared_obs = torch.sum(target * target, dim=0) residual = target - preds rss = torch.sum(residual * residual, dim=0) n_obs = target.size(0) return sum_squared_obs, sum_obs, rss, n_obs
def _spearman_corrcoef_update(preds: Tensor, target: Tensor) -> Tuple[Tensor, Tensor]: """Updates and returns variables required to compute Spearman Correlation Coefficient. Checks for same shape and type of input tensors. Args: preds: Predicted tensor target: Ground truth tensor """ if preds.dtype != target.dtype: raise TypeError( "Expected `preds` and `target` to have the same data type." f" Got preds: {preds.dtype} and target: {target.dtype}.") _check_same_shape(preds, target) preds = preds.squeeze() target = target.squeeze() if preds.ndim > 1 or target.ndim > 1: raise ValueError( "Expected both predictions and target to be 1 dimensional tensors." ) return preds, target