예제 #1
0
def gerar_matrix_confusao(y_test, y_pred):
    # calcula a matrix de confusao
    cnf_matrix = confusion_matrix(y_test, y_pred)
    tn, fp, fn, tp = confusion_matrix(y_test, y_pred).ravel()
    np.set_printoptions(precision=2)

    class_names = []  # rotulos para o grafico da matrix de confusao
    class_names.append('Normal')
    class_names.append('Doente')

    # grafico de matrix de confusao com dados nao normalizados
    plt.figure()
    plot_confusion_matrix(cnf_matrix,
                          classes=class_names,
                          title='Confusion matrix, without normalization')

    # grafico de matrix de confusao com dados normalizados
    plt.figure()
    plot_confusion_matrix(cnf_matrix,
                          classes=class_names,
                          normalize=True,
                          title='Normalized confusion matrix')
    main_validacao(
        tn, fp, fn,
        tp)  # exibi os valores de acuracia, sensibilidade e especificidade
    plt.show()  # exibe o grafico
예제 #2
0
def classificar_com_SVM(X_train, X_test, y_train, y_test):

    print("SVM")

    c_svm = SVC(kernel='rbf', random_state=0, gamma=1,
                C=1)  # SVM com kernel RBF
    #c_svm = SVC(kernel='poly', random_state=0, gamma=0.1, C=1) # SVM com kernel polinomial

    c_svm.fit(X_train, y_train)  # treina o modelo

    y_pred = c_svm.predict(X_test)  # faz a predicao sobre os dados de teste

    # model accuracy for X_test
    accuracy = c_svm.score(X_test, y_test)
    print('Accuracy = ', accuracy)

    # creating a confusion matrix
    cm = confusion_matrix(y_test, y_pred)
    print('Confusion matrix')
    print(cm)
예제 #3
0
            save_images(save_dir, model.get_current_visuals(),
                        model.get_image_names(), model.get_image_oriSize(),
                        opt.prob_map)

            # Resize images to the original size for evaluation
            image_size = model.get_image_oriSize()
            oriSize = (image_size[0].item(), image_size[1].item())
            gt = np.expand_dims(cv2.resize(np.squeeze(gt, axis=0),
                                           oriSize,
                                           interpolation=cv2.INTER_NEAREST),
                                axis=0)
            pred = np.expand_dims(cv2.resize(np.squeeze(pred, axis=0),
                                             oriSize,
                                             interpolation=cv2.INTER_NEAREST),
                                  axis=0)
            conf_mat += confusion_matrix(gt, pred, dataset.dataset.num_labels)

            test_loss_iter.append(model.loss_segmentation)
            print('Epoch {0:}, iters: {1:}/{2:}, loss: {3:.3f} '.format(
                opt.epoch, epoch_iter,
                len(dataset) * opt.batch_size, test_loss_iter[-1]),
                  end='\r')

        avg_test_loss = torch.mean(torch.stack(test_loss_iter))
        print('Epoch {0:} test loss: {1:.3f} '.format(opt.epoch,
                                                      avg_test_loss))
        globalacc, pre, recall, F_score, iou = getScores(conf_mat)
        print(
            'Epoch {0:} glob acc : {1:.3f}, pre : {2:.3f}, recall : {3:.3f}, F_score : {4:.3f}, IoU : {5:.3f}'
            .format(opt.epoch, globalacc, pre, recall, F_score, iou))
예제 #4
0
            epoch_iter += opt.batch_size
            gt = model.mask.cpu().int().numpy()
            _, pred = torch.max(model.output.data.cpu(), 1)
            pred = pred.float().detach().int().numpy()
            if dataset.dataset.name() == 'Scannetv2':
                gt = data["mask_fullsize"].cpu().int().numpy()[0]
                pred = cv2.resize(pred[0], (gt.shape[1], gt.shape[0]),
                                  interpolation=cv2.INTER_NEAREST)
                if opt.phase == "test":
                    save_scannet_prediction(pred, data['scan'][0],
                                            data['path'][0], save_dir)
            save_images(webpage, model.get_current_visuals(),
                        model.get_image_paths())
            conf_mat += confusion_matrix(
                gt,
                pred,
                dataset.dataset.num_labels,
                ignore_label=dataset.dataset.ignore_label)
            test_loss_iter.append(model.loss_segmentation.cpu().numpy())
            print('Epoch {0:}, iters: {1:}/{2:}, loss: {3:.3f} '.format(
                opt.epoch, epoch_iter,
                len(dataset) * opt.batch_size, test_loss_iter[-1]),
                  end='\r')

        avg_test_loss = np.mean(test_loss_iter)
        print('Epoch {0:} test loss: {1:.3f} '.format(opt.epoch,
                                                      avg_test_loss))
        glob, mean, iou = getScores(conf_mat)
        print(
            'Epoch {0:} glob acc : {1:.2f}, mean acc : {2:.2f}, IoU : {3:.2f}'.
            format(opt.epoch, glob, mean, iou))