Example #1
0
def calculate_score(solution,
                    prediction,
                    task_type,
                    metric,
                    num_classes,
                    all_scoring_functions=False,
                    logger=None):
    if task_type not in TASK_TYPES:
        raise NotImplementedError(task_type)

    if all_scoring_functions:
        score = dict()
        if task_type in REGRESSION_TASKS:
            # TODO put this into the regression metric itself
            cprediction = sanitize_array(prediction)
            for metric_ in REGRESSION_METRICS:
                score[metric_] = regression_metrics.calculate_score(
                    metric_, solution, cprediction)
        else:
            for metric_ in CLASSIFICATION_METRICS:
                score[metric_] = classification_metrics.calculate_score(
                    metric_, solution, prediction, task_type)

    else:
        if task_type in REGRESSION_TASKS:
            # TODO put this into the regression metric itself
            cprediction = sanitize_array(prediction)
            score = regression_metrics.calculate_score(metric, solution,
                                                       cprediction)
        else:
            score = classification_metrics.calculate_score(metric,
                                                           solution,
                                                           prediction,
                                                           task=task_type)
    return score
Example #2
0
def calculate_score(solution, prediction, task_type, metric, num_classes,
                    all_scoring_functions=False, logger=None):
    if task_type not in TASK_TYPES:
        raise NotImplementedError(task_type)

    if all_scoring_functions:
        score = dict()
        if task_type in REGRESSION_TASKS:
            # TODO put this into the regression metric itself
            cprediction = sanitize_array(prediction)
            for metric_ in REGRESSION_METRICS:
                score[metric_] = regression_metrics.calculate_score(
                    metric_, solution, cprediction)
        else:
            for metric_ in CLASSIFICATION_METRICS:
                score[metric_] = classification_metrics.calculate_score(
                    metric_, solution, prediction, task_type)

    else:
        if task_type in REGRESSION_TASKS:
            # TODO put this into the regression metric itself
            cprediction = sanitize_array(prediction)
            score = regression_metrics.calculate_score(
                metric, solution, cprediction)
        else:
            score = classification_metrics.calculate_score(
                metric, solution, prediction, task=task_type)
    return score
Example #3
0
def calculate_score(solution,
                    prediction,
                    task_type,
                    metric,
                    num_classes,
                    all_scoring_functions=False,
                    logger=None):
    if task_type == MULTICLASS_CLASSIFICATION:
        # This used to crash on travis-ci; special treatment to find out why
        # it crashed!
        try:
            solution_binary = np.zeros((prediction.shape[0], num_classes))
        except IndexError as e:
            if logger is not None:
                logger.error("Prediction shape: %s, solution "
                             "shape %s", prediction.shape, solution.shape)
                raise e

        for i in range(solution_binary.shape[0]):
            label = solution[i]
            solution_binary[i, label] = 1
        solution = solution_binary

    elif task_type in [BINARY_CLASSIFICATION, REGRESSION]:
        if len(solution.shape) == 1:
            solution = solution.reshape((-1, 1))

    if task_type not in TASK_TYPES:
        raise NotImplementedError(task_type)

    if solution.shape != prediction.shape:
        raise ValueError('Solution shape %s != prediction shape %s' %
                         (solution.shape, prediction.shape))

    if all_scoring_functions:
        score = dict()
        if task_type in REGRESSION_TASKS:
            cprediction = sanitize_array(prediction)
            for metric_ in REGRESSION_METRICS:
                score[metric_] = regression_metrics.calculate_score(
                    metric_, solution, cprediction)
        else:
            csolution, cprediction = normalize_array(solution, prediction)
            for metric_ in CLASSIFICATION_METRICS:
                score[metric_] = classification_metrics.calculate_score(
                    metric_, csolution, cprediction, task_type)

    else:
        if task_type in REGRESSION_TASKS:
            cprediction = sanitize_array(prediction)
            score = regression_metrics.calculate_score(metric, solution,
                                                       cprediction)
        else:
            csolution, cprediction = normalize_array(solution, prediction)
            score = classification_metrics.calculate_score(metric,
                                                           csolution,
                                                           cprediction,
                                                           task=task_type)
    return score
Example #4
0
def calculate_score(solution, prediction, task_type, metric, num_classes,
                    all_scoring_functions=False, logger=None):
    if task_type == MULTICLASS_CLASSIFICATION:
        # This used to crash on travis-ci; special treatment to find out why
        # it crashed!
        try:
            solution_binary = np.zeros((prediction.shape[0], num_classes))
        except IndexError as e:
            if logger is not None:
                logger.error("Prediction shape: %s, solution "
                             "shape %s", prediction.shape, solution.shape)
                raise e

        for i in range(solution_binary.shape[0]):
            label = solution[i]
            solution_binary[i, label] = 1
        solution = solution_binary

    elif task_type in [BINARY_CLASSIFICATION, REGRESSION]:
        if len(solution.shape) == 1:
            solution = solution.reshape((-1, 1))

    if task_type not in TASK_TYPES:
        raise NotImplementedError(task_type)

    if solution.shape != prediction.shape:
        raise ValueError('Solution shape %s != prediction shape %s' %
                         (solution.shape, prediction.shape))

    if all_scoring_functions:
        score = dict()
        if task_type in REGRESSION_TASKS:
            cprediction = sanitize_array(prediction)
            for metric_ in REGRESSION_METRICS:
                score[metric_] = regression_metrics.calculate_score(metric_,
                                                                    solution,
                                                                    cprediction)
        else:
            csolution, cprediction = normalize_array(solution, prediction)
            for metric_ in CLASSIFICATION_METRICS:
                score[metric_] = classification_metrics.calculate_score(
                    metric_, csolution, cprediction, task_type)

    else:
        if task_type in REGRESSION_TASKS:
            cprediction = sanitize_array(prediction)
            score = regression_metrics.calculate_score(metric,
                                                       solution,
                                                       cprediction)
        else:
            csolution, cprediction = normalize_array(solution, prediction)
            score = classification_metrics.calculate_score(metric,
                                                           csolution,
                                                           cprediction,
                                                           task=task_type)
    return score
Example #5
0
def calculate_score(solution, prediction, task_type, metric, num_classes,
                    all_scoring_functions=False):
    if task_type == MULTICLASS_CLASSIFICATION:
        solution_binary = np.zeros((prediction.shape[0], num_classes))
        for i in range(solution_binary.shape[0]):
            label = solution[i]
            solution_binary[i, label] = 1
        solution = solution_binary

    elif task_type in [BINARY_CLASSIFICATION, REGRESSION]:
        if len(solution.shape) == 1:
            solution = solution.reshape((-1, 1))

    if task_type not in TASK_TYPES:
        raise NotImplementedError(task_type)

    if solution.shape != prediction.shape:
        raise ValueError('Solution shape %s != prediction shape %s' %
                         (solution.shape, prediction.shape))

    if all_scoring_functions:
        score = dict()
        if task_type in REGRESSION_TASKS:
            cprediction = sanitize_array(prediction)
            for metric_ in REGRESSION_METRIC:
                score[metric_] = regression_metrics.calculate_score(metric_,
                                                                    solution,
                                                                    cprediction)
        else:
            csolution, cprediction = normalize_array(solution, prediction)
            for metric_ in CLASSIFICATION_METRICS:
                score[metric_] = classification_metrics.calculate_score(
                    metric_, csolution, cprediction, task_type)

    else:
        if task_type in REGRESSION_TASKS:
            cprediction = sanitize_array(prediction)
            score = regression_metrics.calculate_score(metric,
                                                       solution,
                                                       cprediction)
        else:
            csolution, cprediction = normalize_array(solution, prediction)
            score = classification_metrics.calculate_score(metric,
                                                           csolution,
                                                           cprediction,
                                                           task=task_type)
    return score