Exemple #1
0
    def load_evaluation(model_type,
                        path,
                        group_size,
                        seq_size,
                        readable=False):
        '''
    Prints the evaluation for the model at path.

    Parameters
    ----------
    model_type: The python type object of the model being evaluated.
    path: The path to the model to load the evaluation from
    group_size: the group_size portion of the key tuple to the evaluation dictionary.
      Represents the margin of error you want to allow for prediction prediction success
      in the accuracy metric.
    seq_size: the seq_size portion of the key tuple to the evaluation dictionary.
      Represents how long a sequence per prediction you want accuracy metrics on.
    readable: pass readable=True if you want a human readable version printed to terminal
      in addition to returning json formatted evaluation.

    Returns
    -------
    evaluation: A dictionary of {class1 : performance, class2 : performance, ..., 
      avg_over_classes : performance, weighted_avg : performance, median_over_classes : performance}

    Raises
    ------
    ValueError: Raises this if (group_size, seq_size) is not in the evaluation dictionary.
    '''
        model = Model.get_type(model_type)
        with open(path + 'evaluation.pkl', 'rb') as f:
            evaluation = pickle.load(f)
            key = (group_size, seq_size)
            if not key in evaluation:
                raise ValueError(
                    'The report for (group_size, sequence_size) was not stored during training.'
                )
            else:
                evaluation = evaluation[(group_size, seq_size)]
        if readable:
            printer.print_header("BEGIN REPORTS",
                                 buffer=1,
                                 bold=True,
                                 filler='=')
            header = printer.color.UNDERLINE + 'MODEL EVALUATION of the ' + model + ' at ' + path + ': ' + printer.color.RED + \
                evaluation['report_id'] + printer.color.END
            Model.print_evaluation(evaluation, header=header)
            printer.print_header("END REPORTS",
                                 buffer=1,
                                 bold=True,
                                 filler='=')
            return evaluation
        else:
            return evaluation
Exemple #2
0
    def print_classification(classification):
        #TODO: Extend classification
        '''
    Prints a classification of the form [{pred1 : conf}, {pred2 : conf}, ...]

    Parameters
    ----------
    classification: a classification of the form [{pred1 : conf}, {pred2 : conf}, ...]
    '''
        print('')
        header = 'SEQUENCE CLASSIFICATION'
        printer.print_header(header)
        for i, guess in enumerate(classification):
            k = guess['prediction']
            v = guess['confidence']
            print("Prediction " + str(i + 1) + ": ")
            print("-- " + "class: " + str(k))
            print("-- " + "confidence: " + str(v))
            if i < len(classification) - 1:
                print
        printer.print_line_break()
Exemple #3
0
    def print_evaluation(evaluation, header='MODEL EVALUATION'):
        #TODO: factor evaluaton into a class
        '''
    Prints the evaluation.

    Parameters
    ----------
    evaluation: a dict of the form
      {class1_name : accuracy, class2_name : accuracy, classn_name : accuracy, avg : accuracy, median : accuracy}.
    header: The list of headers to be printed above the evaluation stats
    '''
        indent = '-- '
        print('')
        printer.print_header(header, filler='_')
        #print scores
        keys = evaluation.keys()
        scores = evaluation.values()
        #scores sorted in ascending order. Each score is represented by its corresponding class index in keys.
        scores = np.argsort(np.asarray(scores))
        printer.print_sub_header('Accuracy by Class')
        for i in scores:
            k = keys[i]
            if k != 'avg_over_classes' and k != 'median_over_classes' and k != 'weighted_avg' and k != 'weighted_median' \
              and k != 'report_id':
                v = evaluation[keys[i]]
                v = str(v)
                #the ugly str(int(float)) is to turn a
                print(indent + k + ": " + v)
        print('')
        printer.print_sub_header('Accuracy Summary')
        print(indent + 'avg_over_classes: ' +
              str(evaluation['avg_over_classes']))
        print(indent + 'weighted_avg: ' + str(evaluation['weighted_avg']))
        print(indent + 'median_over_classes: ' +
              str(evaluation['median_over_classes']))
        print('')
        printer.print_line_break(break_char='_ ')
Exemple #4
0
if args.isfile:
    data = open(args.dataToTest).read()

model_type = get_models()[Model.load_type(path)]
#a tuple containing all info need to classify
loaded_model = Model.load_classification_pipeline(model_type, path)
preprocessor = loaded_model.preprocessor
X, Y = parse_accuracy(data, preprocessor.encoder, preprocessor.normalizer)
eval_group_size = args.evalGroupSize
eval_seq_size = args.evalSeqSize
evaluation = {(i, j):
              loaded_model.evaluate_in_pipeline(X, Y,
                                                preprocessor.get_encoder(), i,
                                                j)
              for i in range(1, eval_group_size + 1)
              for j in range(1, eval_seq_size + 1)}
if args.readable:
    #print evaluation info
    print('')
    printer.print_header("BEGIN REPORTS", buffer=1, bold=True, filler='=')
    # Sort the keys so that the prints are ordered for g in group_size: for j in seq_size: print
    keys = sorted(evaluation.keys())
    keys.reverse()
    for k in keys:
        header = printer.color.UNDERLINE + "MODEL EVALUATION: " + printer.color.RED + \
          evaluation[k]['report_id'] + printer.color.END
        Model.print_evaluation(evaluation[k], header=header)
    printer.print_header("END REPORTS", buffer=1, bold=True, filler='=')
else:
    print(evaluation)