Example #1
0
def confusion_matrix(predictions,
                     labels,
                     classes,
                     title="Confusion Matrix",
                     device=DEVICE):
    labels = labels.to(device)
    predictions = predictions.to(device)
    stacked = torch.stack((labels, predictions.argmax(dim=1)), dim=1)
    confusion_mtx = torch.zeros(10, 10, dtype=torch.int64)

    for prediction in stacked:
        t, p = prediction.tolist()
        confusion_mtx[int(t), int(p)] = confusion_mtx[int(t), int(p)] + 1

    plot_confusion_matrix(confusion_mtx, classes, title=title)
Example #2
0
def create_confusion_matrix(testset,
                            activation_func,
                            test,
                            classes,
                            optimizer,
                            momentum,
                            prefix=""):
    # create confusion matrix
    preds = net.get_all_preds(testset, activation_func=activation_func)
    cm = confusion_matrix(test.targets, preds.argmax(dim=1).numpy())
    plt.figure(figsize=(8, 8))
    plot_confusion_matrix(cm, classes, normalize=True)
    plt.savefig(
        f"Figures/{prefix}_cm_{activation_func.__name__}_{optimizer.__class__.__name__}_momentum_{momentum}.png"
    )
    plt.close()
Example #3
0
def accuracy(model, dataset_loader):
    total_correct = 0
    targ = torch.tensor([len(dataset_loader)])
    pred = torch.tensor([len(dataset_loader)])
    index = 0
    for x, y in dataset_loader:
        x = x.to(device)
        y = one_hot(np.array(y.numpy()), 10)

        target_class = np.argmax(y, axis=1)
        predicted_class = np.argmax(model(x).cpu().detach().numpy(), axis=1)
        total_correct += np.sum(predicted_class == target_class)
        index += 1
    cm = confusion_matrix(target_class, predicted_class)
    plt.figure(figsize=(10, 10))
    plot_confusion_matrix(cm, dataset_loader.dataset.classes)
    return total_correct / len(dataset_loader.dataset)
Example #4
0
def getWrongValues(pred_values,
                   y_test,
                   channels,
                   shouldReturnMetrics=True,
                   num=0):
    count_wrong = 0
    if (shouldReturnMetrics):
        print("Accuracy percentage: " + str(
            metrics.accuracy_score(
                y_test, pred_values, normalize=True, sample_weight=None)))
        # Compute confusion matrix
        cm = confusion_matrix(pred_values, y_test, labels=channels)
        numpy.set_printoptions(precision=2)
        print('Confusion matrix, without normalization')
        print(cm)
        plt.figure()
        #plotcm.plot_confusion_matrix(cm,channels,title="Confusion matrix: n=" + str(num/len(channels)),filename="cm"+(str(num/len(channels))))
        cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, numpy.newaxis]
        plotcm.plot_confusion_matrix(
            cm_normalized,
            channels,
            title='Normalized confusion matrix, n=' + str(num / len(channels)),
            filename="cm" + (str(num / len(channels))) + "norm.png")
Example #5
0
    def end_run(self):
        plot_buf, figure = plotcm.plot_confusion_matrix(self.cm, self.names)
        plt.close(figure)

        image = PIL.Image.open(plot_buf)
        image = ToTensor()(image).unsqueeze(0)

        grid = torchvision.utils.make_grid(image, normalize=True, scale_each=True)
        self.tb.add_image('confusion_matrix', grid)
        valid_accuracy = 0.0
        if self.valid_loader is not None:
            valid_accuracy=self.validCorrect / len(self.valid_loader.sampler)
        test_accuracy = 0.0
        if self.test_loader is not None:
            test_accuracy=self.testCorrect / len(self.test_loader.sampler)
        self.best_models.append(ModelSummary(valid_accuracy=valid_accuracy, test_accuracy=test_accuracy,
                                             run_params=self.run_params, network_rpr=str(self.network)))

        self.tb.close()
        self.epoch.count = 0
Example #6
0
oa = accuracy_score(true_labels, pre_labels)
kappa_oa = {}
print('oa_all:', oa)
print('kappa_all:', kappa)
kappa_oa['oa_all'] = oa
kappa_oa['kappa_all'] = kappa
fd = open('results_all_%s_d0.5.dat' % (name), 'wb')
cPickle.dump(("%s" % (name), 0.5, kappa_oa), fd)
fd.close()
cnf_matrix = confusion_matrix(true_labels, pre_labels)
# np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
# plt.figure()
plotcm.plot_confusion_matrix(
    cnf_matrix,
    classes=classes,
    normalize=False,
    title='%s Confusion matrix, without normalization' % (name),
    showtext=True)
plt.savefig('%s Confusion matrix, without normalization' % (name))
# Plot normalized confusion matrix
# plt.figure()
plotcm.plot_confusion_matrix(cnf_matrix,
                             classes=classes,
                             normalize=True,
                             title='%s Normalized confusion matrix' % (name),
                             showtext=True)
plt.savefig('%s Normalized confusion matrix' % (name))
# plt.show()

# %%自定义产生混淆矩阵
conf = np.zeros([len(classes), len(classes)])
Example #7
0
stacked = torch.stack((train_set.targets, train_preds.argmax(dim=1)), dim=1)

print('\nstacked.shape:', stacked.shape)
print('stacked:')
print(stacked)
stacked[0].tolist()
cmt = torch.zeros(10, 10, dtype=torch.int32)
print('\ncmt:')
print(cmt)
for p in stacked:
    tl, pl = p.tolist()
    cmt[tl, pl] = cmt[tl, pl] + 1
print('\ncmt:')
print(cmt)

# Plotting a confusion matrix
import matplotlib.pyplot as plt

from sklearn.metrics import confusion_matrix
from plotcm import plot_confusion_matrix

cm = confusion_matrix(train_set.targets, train_preds.argmax(dim=1))
print('\ntype(cm):', type(cm))
print('cm:', cm)

names = ('T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal',
         'Shirt', 'Sneaker', 'Bag', 'Ankle boot')
plt.figure(figsize=(10, 10))
print('\nplot_confusion_matrix(cmt, names):',
      plot_confusion_matrix(cmt, names))
                             '\\model\\Kfold_' + str(k) + '_epoch_' +
                             str(best_epoch) + '.pth')
        test_loss, test_acc, tem_alabel, tem_plabel = test(
            'test', test_loader, model_1, base_path + 'model_' + str(k))

        tb.add_scalar('Test_loss_fold_1', test_loss, k)
        tb.add_scalar('Test_Accracy_fold_1', test_acc, k)

        all_label = all_label + tem_alabel
        pred_label = pred_label + tem_plabel

        cm = confusion_matrix(tem_alabel, tem_plabel)
        print(cm)
        plot_confusion_matrix(cm,
                              base_path + 'model_' + str(k) + '\\cm\\Kfold_' +
                              str(k) + '_epoch_' + str(best_epoch) + '.png',
                              target_names=names,
                              cmap=None,
                              normalize=False)

        # model.apply(weight_reset)
        del model_1
        list_of_dataset.clear()

    elif k == 2:
        list_of_dataset.append(fold1)
        list_of_dataset.append(fold3)
        list_of_dataset.append(fold4)
        list_of_dataset.append(fold5)
        ds = torch.utils.data.ConcatDataset(list_of_dataset)
        train_ds, valid_ds = torch.utils.data.random_split(ds, (576, 64))
        # train_ds, valid_ds = torch.utils.data.random_split(ds, (34, 10))
Example #9
0
    total_accuracy = total_correct / total_num
    string = 'epoch:%d loss = %f  accuracy = %f' % (i, loss.item(),
                                                    total_accuracy)
    print(string)

# 四、评价模型
from sklearn.metrics import confusion_matrix
from plotcm import plot_confusion_matrix
from matplotlib import pyplot as plt


def get_all_preds(model, data_loader):
    all_preds = torch.tensor([])
    with torch.no_grad():
        for batch in data_loader:
            images, labels = batch
            preds = model(images)
            all_preds = torch.cat((all_preds, preds), dim=0)
    return all_preds


train_preds = get_all_preds(network, data_loader)
# 生成混淆矩阵
cm = confusion_matrix(train_set.targets, train_preds.argmax(dim=1))
# 绘制混淆矩阵
name = ('T-shirt', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', 'Shirt',
        'Sneaker', 'Bag', 'Akle boot')
plt.figure(figsize=(10, 10))
plot_confusion_matrix(cm, name)
Example #10
0
def main(epoch=1, C=0.001, gamma=1, kernel='linear', train_size=-1, search_parameters=False):
    # Read dataset to pandas dataframe
    train_dataset = pd.read_csv(train_data_path)
    trainY = np.array(train_dataset.iloc[:, 0])
    trainX = np.array(train_dataset.iloc[:, 1:])
    if train_size != -1:
        trainX = trainX[:train_size,:]
        trainY = trainY[:train_size]
    test_dataset = pd.read_csv(test_data_path)
    testY = np.array(test_dataset.iloc[:, 0])
    testX = np.array(test_dataset.iloc[:, 1:])

    # pre-processing data
    trainX = int2float_grey(trainX)
    testX = int2float_grey(testX)

    print('training data size: ', trainX.shape, trainY.shape)
    print('test data size: ', testX.shape, testY.shape)

    # display exmaple images
    fig, axes = plt.subplots(2, 4)
    plt.title('example images')
    for ax in axes.flat:
        isample = np.random.randint(trainX.shape[0])
        ax.imshow(trainX[isample].reshape(28,28),cmap='gray')
        ax.set_title("Chiffre = {}".format(trainY[isample]))
        ax.axis('off')
    try:
        gamma = int(gamma)
    except:
        pass
    tic = timeit.default_timer()
    if search_parameters:
        # search the best parameters
        print('searching for the best parameters will take long time, please wait ...')
        svc = svm.SVC(shrinking=True, max_iter=500) # max_iter = 500 pour limiter les non convergences de l'optimiseur 
        Clist=np.logspace(0,2,10)
        Glist=np.logspace(0,3,10,'scale')
        Dlist=[1,2,3]
        Kernellist = ('linear', 'poly', 'rbf')
        parameters = {'kernel': Kernellist, 'C':Clist, 'gamma':Glist, "degree":Dlist}
        clf = GridSearchCV(svc, parameters)
    else:
        clf = svm.SVC(C=C, gamma=gamma, kernel=kernel)
    print('start training ...')
    for i in tqdm(range(epoch)):
        clf.fit(trainX, trainY)
    toc = timeit.default_timer()
    print("Execution time = {:.3g} s".format(toc-tic)) 
    # if search for the best parameter, use below code to print out the results found
    if search_parameters:
        print("best score is {}".format(clf.best_score_))
        print("the best parametres are: ")
        print(clf.best_params_)
    print('predicting ...')
    y_test_predic = clf.predict(testX)
    nerr_test = (y_test_predic != testY).sum()
    print("recognition rate of test data = {:.1f}%".format(100 - 100*float(nerr_test)/testX.shape[0]))

    cm = confusion_matrix(testY, y_test_predic)
    print("Confusion matrix:\n%s" % cm)

    plt.figure(figsize=(10,10))
    plt.title('confusion matrix')
    class_names = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
    plot_confusion_matrix(cm, classes=class_names, normalize=True,
                        title='Normalized Confusion Matrix')

    plt.show() 
Example #11
0
        from plotcm import plot_confusion_matrix
        from sklearn.metrics import confusion_matrix
        if rnd % 2:
            stacked = torch.stack((Y_te, P), dim=1)
            cmt = torch.zeros(data_args['num_classes'],
                              data_args['num_classes'],
                              dtype=torch.int64)
            for p in stacked:
                tl, pl = p.tolist()
                cmt[tl, pl] = cmt[tl, pl] + 1
            print(cmt)
            cm = confusion_matrix(Y_te, P)
            plt.figure(figsize=(25, 25))
            classes = data_args['class_names']
            #classes = [str(i) for i in range(data_args['num_classes'])]
            plot_confusion_matrix(cm, classes, rnd)
        ########################

        ### CALCULATE BALANCED ACCURACY / ACCURACY ###
        if DATASET == 'PLANKTON10':
            accuracy = metrics.balanced_accuracy_score(Y_te, P)
        else:
            accuracy = float(1.0 * (Y_te == P).sum().item() / len(Y_te))
        ###############################################

        acc.append(round(accuracy, 4))
        num_labeled_samples.append(round(len(ALD.index['labeled']), 4))
        logger.debug(
            f'Round: {rnd}, Testing accuracy: {acc[rnd]}, Samples labeled: {num_labeled_samples[rnd]}, Pool size: {len(ALD.index["unlabeled"])}, Iteration time: {datetime.now()-tic}'
        )