Ejemplo n.º 1
0
def test_model(model, data_iterator, latest_model_path, num_columns: int = 2):
    model = model.eval().to(global_torch_device())

    inputs, labels = next(data_iterator)

    inputs = inputs.to(global_torch_device())
    labels = labels.to(global_torch_device())
    with torch.no_grad():
        pred = model(inputs)

    y_pred = pred.data.to("cpu").numpy()
    y_pred_max = numpy.argmax(y_pred, axis=-1)
    accuracy_w = accuracy_score(labels, y_pred_max)
    precision_a, recall_a, fscore_a, support_a = precision_recall_fscore_support(
        labels, y_pred_max)
    precision_w, recall_w, fscore_w, support_w = precision_recall_fscore_support(
        labels, y_pred_max, average="weighted")

    _, predicted = torch.max(pred, 1)

    truth_labels = labels.data.to("cpu").numpy()

    input_images_rgb = [
        default_torch_retransform(x) for x in inputs.to(global_torch_device())
    ]

    cell_width = (800 / num_columns) - 6 - 6 * 2

    pyplot.plot(numpy.random.random((3, 3)))

    alphabet = string.ascii_lowercase
    class_names = numpy.array([*alphabet])

    samples = len(y_pred)
    predictions = [[None for _ in range(num_columns)]
                   for _ in range(samples // num_columns)]
    for i, a, b, c in zip(range(samples), input_images_rgb, y_pred_max,
                          truth_labels):
        pyplot.imshow(a)
        if b == c:
            outcome = "tp"
        else:
            outcome = "fn"

        gd = ReportEntry(
            name=i,
            figure=plt_html(a, format="jpg", size=(cell_width, cell_width)),
            prediction=class_names[b],
            truth=class_names[c],
            outcome=outcome,
            explanation=None,
        )

        predictions[i // num_columns][i % num_columns] = gd

    cfmat = confusion_matrix_plot(y_pred_max, truth_labels, class_names)

    title = "Classification Report"
    model_name = latest_model_path
    confusion_matrix = plt_html(cfmat, format="png", size=(800, 800))

    accuracy = generate_math_html("\dfrac{tp+tn}{N}"), None, accuracy_w
    precision = generate_math_html(
        "\dfrac{tp}{tp+fp}"), precision_a, precision_w
    recall = generate_math_html("\dfrac{tp}{tp+fn}"), recall_a, recall_w
    f1_score = (
        generate_math_html("2*\dfrac{precision*recall}{precision+recall}"),
        fscore_a,
        fscore_w,
    )
    support = generate_math_html("N_{class_truth}"), support_a, support_w
    metrics = NOD.nod_of(accuracy, precision, f1_score, recall,
                         support).as_flat_tuples()

    bundle = NOD.nod_of(title, model_name, confusion_matrix, metrics,
                        predictions)

    file_name = Path(title.lower().replace(" ", "_"))

    generate_html(file_name.with_suffix(".html"), **bundle)
    generate_pdf(file_name.with_suffix(".html"), file_name.with_suffix(".pdf"))
Ejemplo n.º 2
0
def test_model(model,
               data_iterator,
               latest_model_path,
               num_columns=2,
               device='cpu'):
    model = model.eval().to(device)

    inputs, labels = next(data_iterator)

    inputs = inputs.to(device)
    labels = labels.to(device)
    with torch.no_grad():
        pred = model(inputs)

    y_pred = pred.data.to(device).numpy()
    y_pred_max = np.argmax(y_pred, axis=-1)
    accuracy_w = accuracy_score(labels, y_pred_max)
    precision_a, recall_a, fscore_a, support_a = precision_recall_fscore_support(
        labels, y_pred_max)
    precision_w, recall_w, fscore_w, support_w = precision_recall_fscore_support(
        labels, y_pred_max, average='weighted')

    _, predicted = torch.max(pred, 1)

    truth_labels = labels.data.to(device).numpy()

    input_images_rgb = [a_retransform(x) for x in inputs.to(device)]

    cell_width = (800 / num_columns) - 6 - 6 * 2

    plt.plot(np.random.random((3, 3)))

    alphabet = string.ascii_lowercase
    class_names = np.array([*alphabet])

    samples = len(y_pred)
    predictions = [[None for _ in range(num_columns)]
                   for _ in range(samples // num_columns)]
    for i, a, b, c in zip(range(samples), input_images_rgb, y_pred_max,
                          truth_labels):
        plt.imshow(a)
        if b == c:
            outcome = 'tp'
        else:
            outcome = 'fn'

        gd = ReportEntry(name=i,
                         figure=plt_html(format='jpg',
                                         size=[cell_width, cell_width]),
                         prediction=class_names[b],
                         truth=class_names[c],
                         outcome=outcome)

        predictions[i // num_columns][i % num_columns] = gd

    plot_confusion_matrix(y_pred_max, truth_labels, class_names)

    title = 'Classification Report'
    model_name = latest_model_path
    confusion_matrix = plt_html(format='png', size=[800, 800])

    accuracy = generate_math_html('\dfrac{tp+tn}{N}'), None, accuracy_w
    precision = generate_math_html(
        '\dfrac{tp}{tp+fp}'), precision_a, precision_w
    recall = generate_math_html('\dfrac{tp}{tp+fn}'), recall_a, recall_w
    f1_score = generate_math_html(
        '2*\dfrac{precision*recall}{precision+recall}'), fscore_a, fscore_w
    support = generate_math_html('N_{class_truth}'), support_a, support_w
    metrics = NOD.dict_of(accuracy, precision, f1_score, recall,
                          support).as_flat_tuples()

    bundle = NOD.dict_of(title, model_name, confusion_matrix, metrics,
                         predictions)

    file_name = title.lower().replace(" ", "_")

    generate_html(file_name, **bundle)
    generate_pdf(file_name)