Exemplo n.º 1
0
def _get_eval_metrics(lst_metrics):
    '''
    return multiple evaluation metrics
    '''
    all_metrics = [
        'accuracy', 'ce', 'f1', 'mae', 'mse', 'rmse', 'top_k_accuracy',
        'hinge_loss'
    ]
    lst_child_metrics = []
    eval_metrics = mx.metric.CompositeEvalMetric()
    for metric in lst_metrics:
        assert metric in all_metrics, logger.error(
            'Invalid evaluation metric!')
        if metric == 'accuracy':
            lst_child_metrics.append(mx.metric.Accuracy())
        elif metric == 'ce':
            lst_child_metrics.append(mx.metric.CrossEntropy())
        elif metric == 'f1':
            lst_child_metrics.append(mx.metric.F1())
        elif metric == 'mae':
            lst_child_metrics.append(mx.metric.MAE())
        elif metric == 'mse':
            lst_child_metrics.append(mx.metric.MSE())
        elif metric == 'rmse':
            lst_child_metrics.append(mx.metric.RMSE())
        elif metric == 'top_k_accuracy':
            lst_child_metrics.append(
                mx.metric.TopKAccuracy(top_k=int(args['--top-k'])))
        elif metric == 'hinge_loss':
            lst_child_metrics.append(svm_metric.HingeLoss())
    for child_metric in lst_child_metrics:
        eval_metrics.add(child_metric)
    return eval_metrics
Exemplo n.º 2
0
def inst_eval_metrics(lst_metrics, top_k=5):
    '''
    '''
    all_metrics = {
        'acc': mx.metric.Accuracy(),
        'ce': mx.metric.CrossEntropy(),
        'f1': mx.metric.F1(),
        'mae': mx.metric.MAE(),
        'mse': mx.metric.MSE(),
        'rmse': mx.metric.RMSE(),
        'top_k_acc': mx.metric.TopKAccuracy(top_k=top_k),
        'hl': svm_metric.HingeLoss()
    }
    eval_metrics = mx.metric.CompositeEvalMetric()
    for metric in lst_metrics:
        assert metric in all_metrics, logging.error(
            'Invalid evaluation metric!')
        eval_metrics.add(all_metrics[metric])
        logging.info('{} added into evaluation metric list'.format(metric))
    return eval_metrics