예제 #1
0
def validate(model, device, data_loader, labels_dict, logger):

    logger.info("Validating model")
    y_arr = np.zeros(shape=(len(data_loader), len(labels_dict)), dtype=np.float64)
    y_pred_arr = np.zeros(shape=(len(data_loader), len(labels_dict)), dtype=np.float64)

    print_freq = int(0.1*(len(data_loader)))
    for i, sample in enumerate(data_loader):

        if i % print_freq == 0:
            logger.info("image num {:04d}...".format(i))

        inputs = sample["image"].to(device)
        labels = sample["label"].to(device)

        outputs = model(inputs)
        sftmax = Softmax()(outputs)

        # sftmax[sftmax >= tau] = 1.0
        # sftmax[sftmax < tau] = 0.0

        y_arr[i] = labels.cpu().detach().numpy()[0]
        y_pred_arr[i] = sftmax.cpu().detach().numpy()[0]

    for i, label in enumerate(labels_dict.keys()):

        class_y_pred = y_pred_arr[:, i:i + 1].reshape(y_pred_arr.shape[0])
        class_y = y_arr[:, i:i + 1].reshape(y_arr.shape[0])

        roc_auc = roc_auc_score(class_y, class_y_pred)
        logger.info("  Class: '{}', roc auc: {:.4f}".format(label, roc_auc))
예제 #2
0
def recognizeImage():
    gpu_available = torch.cuda.is_available()

    base64string = request.get_json(force=True)['base64']
    image = get_image(base64string)

    if image.shape[1:] != (3, 32, 32):
        return jsonify(error="only RGB image 32x32 accepted",
                       message="failure!")

    model = get_model()

    # breakpoint()

    # get sample outputs
    # breakpoint()
    output_tensor = model(image.cuda())
    output_tensor = Softmax(dim=1)(output_tensor)
    prob_pred_tensor, pred_tensor = torch.max(output_tensor, 1)

    output = np.squeeze(
        output_tensor.detach().numpy()) if not gpu_available else np.squeeze(
            output_tensor.cpu().detach().numpy())

    prob_pred = np.squeeze(prob_pred_tensor.detach().numpy()
                           ) if not gpu_available else np.squeeze(
                               prob_pred_tensor.cpu().detach().numpy())
    pred = np.squeeze(
        pred_tensor.detach().numpy()) if not gpu_available else np.squeeze(
            pred_tensor.cpu().detach().numpy())

    classes = [
        'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',
        'horse', 'ship', 'truck'
    ]

    pred_dict = {
        'predicted_class': str(classes[pred]),
        'prob_predicted_class': str(prob_pred)
    }

    for i, prob in enumerate(output):
        pred_dict[classes[i]] = str(prob)

    return jsonify(prediction=pred_dict, message="success!")