def test_accuracy(ddp, ddp_sync_on_step, preds, target, sk_metric): compute_batch(preds, target, Accuracy, sk_metric, ddp_sync_on_step, ddp, metric_args={"threshold": threshold})
def test_explained_variance(ddp, dist_sync_on_step, multioutput, preds, target, sk_metric): compute_batch( preds, target, ExplainedVariance, partial(sk_metric, sk_fn=partial(explained_variance_score, multioutput=multioutput)), dist_sync_on_step, ddp, metric_args=dict(multioutput=multioutput), )
def test_precision_recall(ddp, dist_sync_on_step, preds, target, sk_metric, metric_class, sk_fn, num_classes, multilabel, average): compute_batch( preds, target, metric_class, partial(sk_metric, sk_fn=sk_fn, average=average), dist_sync_on_step, ddp, metric_args={ "num_classes": num_classes, "average": average, "multilabel": multilabel, "threshold": THRESHOLD }, check_dist_sync_on_step=False if average == 'macro' else True, check_batch=False if average == 'macro' else True, )
def test_fbeta(ddp, dist_sync_on_step, preds, target, sk_metric, metric_class, beta, num_classes, multilabel, average): compute_batch( preds, target, metric_class, partial(sk_metric, average=average, beta=beta), dist_sync_on_step, ddp, metric_args={ "beta": beta, "num_classes": num_classes, "average": average, "multilabel": multilabel, "threshold": THRESHOLD }, check_dist_sync_on_step=False, check_batch=False, )
def test_explained_variance(ddp, ddp_sync_on_step, preds, target, sk_metric): compute_batch(preds, target, ExplainedVariance, sk_metric, ddp_sync_on_step, ddp)
def test_mean_error(ddp, dist_sync_on_step, preds, target, sk_metric, metric_class, sk_fn): compute_batch(preds, target, metric_class, partial(sk_metric, sk_fn=sk_fn), dist_sync_on_step, ddp)