Exemple #1
0
            pred_test.extend(np.array(net(X).cpu().argmax(axis=1)))
    toc2 = time.time()
    collections.Counter(pred_test)
    gt_test = gt[test_indices] - 1

    overall_acc = metrics.accuracy_score(pred_test, gt_test[:-VAL_SIZE])
    confusion_matrix = metrics.confusion_matrix(pred_test, gt_test[:-VAL_SIZE])
    each_acc, average_acc = record.aa_and_each_accuracy(confusion_matrix)
    kappa = metrics.cohen_kappa_score(pred_test, gt_test[:-VAL_SIZE])

    torch.save(net.state_dict(),
               "./models/" + 'SSRN' + str(round(overall_acc, 3)) + '.pt')
    KAPPA.append(kappa)
    OA.append(overall_acc)
    AA.append(average_acc)
    TRAINING_TIME.append(toc1 - tic1)
    TESTING_TIME.append(toc2 - tic2)
    ELEMENT_ACC[index_iter, :] = each_acc

# # Map, Records
print("--------" + " Training Finished-----------")
record.record_output(
    OA, AA, KAPPA, ELEMENT_ACC, TRAINING_TIME, TESTING_TIME,
    './report/' + 'SSRNpatch:' + str(img_rows) + '_' + Dataset + 'split' +
    str(VALIDATION_SPLIT) + 'lr' + str(lr) + PARAM_OPTIM + '.txt')

Utils.generate_png(
    all_iter, net, gt_hsi, Dataset, device, total_indices,
    './classification_maps/' + 'SSRNpatch:' + str(img_rows) + '_' + Dataset +
    'split' + str(VALIDATION_SPLIT) + 'lr' + str(lr) + PARAM_OPTIM)
Exemple #2
0
    overall_acc = metrics.accuracy_score(pred_test, gt_test[:-VAL_SIZE])
    confusion_matrix = metrics.confusion_matrix(pred_test, gt_test[:-VAL_SIZE])
    each_acc, average_acc = record.aa_and_each_accuracy(confusion_matrix)
    kappa = metrics.cohen_kappa_score(pred_test, gt_test[:-VAL_SIZE])

    torch.save(
        net.state_dict(), "./models/S3KAIResNetpatch_" + str(img_rows) + '_' +
        Dataset + '_split_' + str(VALIDATION_SPLIT) + '_lr_' + str(lr) +
        PARAM_OPTIM + '_kernel_' + str(PARAM_KERNEL_SIZE) + str(
            round(overall_acc, 3)) + '.pt')
    KAPPA.append(kappa)
    OA.append(overall_acc)
    AA.append(average_acc)
    TRAINING_TIME.append(toc1 - tic1)
    TESTING_TIME.append(toc2 - tic2)
    ELEMENT_ACC[index_iter, :] = each_acc

# # Map, Records
print("--------" + " Training Finished-----------")
record.record_output(
    OA, AA, KAPPA, ELEMENT_ACC, TRAINING_TIME, TESTING_TIME,
    './report/' + 'S3KAIResNetpatch:' + str(img_rows) + '_' + Dataset + 'split'
    + str(VALIDATION_SPLIT) + 'lr' + str(lr) + PARAM_OPTIM + '_kernel_' +
    str(PARAM_KERNEL_SIZE) + '.txt')

Utils.generate_png(
    all_iter, net, gt_hsi, Dataset, device, total_indices,
    './classification_maps/' + 'S3KAIResNetpatch:' + str(img_rows) + '_' +
    Dataset + 'split' + str(VALIDATION_SPLIT) + 'lr' + str(lr) + PARAM_OPTIM +
    '_kernel_' + str(PARAM_KERNEL_SIZE))
Exemple #3
0
        for X, y in test_iter:
            X = X.to(device)
            net.eval()
            y_hat = net(X)
            pred_test.extend(np.array(net(X).cpu().argmax(axis=1)))
    toc2 = time.clock()
    collections.Counter(pred_test)
    gt_test = gt[test_indices] - 1

    overall_acc = metrics.accuracy_score(pred_test, gt_test[:-VAL_SIZE])
    confusion_matrix = metrics.confusion_matrix(pred_test, gt_test[:-VAL_SIZE])
    each_acc, average_acc = aa_and_each_accuracy(confusion_matrix)
    kappa = metrics.cohen_kappa_score(pred_test, gt_test[:-VAL_SIZE])

    torch.save(net.state_dict(),
               "./models/" + str(round(overall_acc, 3)) + '.pt')
    KAPPA.append(kappa)
    OA.append(overall_acc)
    AA.append(average_acc)
    TRAINING_TIME.append(toc1 - tic1)
    TESTING_TIME.append(toc2 - tic2)
    ELEMENT_ACC[index_iter, :] = each_acc

print("--------" + net.name + " Training Finished-----------")
record.record_output(
    OA, AA, KAPPA, ELEMENT_ACC, TRAINING_TIME, TESTING_TIME, 'records/' +
    net.name + 'Patch' + str(2 * PATCH_LENGTH + 1) + 'Time' + day_str + '_' +
    Dataset + 'TrainingSamples' + str(SAMPLES_NUM) + 'lr:' + str(lr) + '.txt')

generate_png(all_iter, net, gt_hsi, Dataset, device, total_indices)
Exemple #4
0
    def startDetection(self):
        #self.printStart()
        self.datanum = self.comboBox.currentIndex()
        self.learningrate = self.lineEdit.text()
        self.trainRate = self.lineEdit_2.text()

        lr = float(self.learningrate)
        print("lr = ", lr)
        if self.datanum == 0:
            dataset = 'IN'
        elif self.datanum == 1:
            dataset = 'SA'
        elif self.datanum == 2:
            dataset = 'UP'

        print("dataset = ", dataset)
        split = 1 - float(self.trainRate)
        print("split = ", split)

        #self.printf('-----Importing Dataset-----')
        print('-----Importing Dataset-----')

        Dataset = dataset.upper()
        data_hsi, gt_hsi, TOTAL_SIZE, TRAIN_SIZE, VALIDATION_SPLIT = load_dataset(Dataset, split)
        print(data_hsi.shape)

        image_x, image_y, BAND = data_hsi.shape
        data = data_hsi.reshape(np.prod(data_hsi.shape[:2]), np.prod(data_hsi.shape[2:]))  # prod():连乘函数
        gt = gt_hsi.reshape(np.prod(gt_hsi.shape[:2]), )
        CLASSES_NUM = max(gt)
        CLASSTYPE = 13

        self.printf('The target to be detected is: wood')
        print('The class numbers of the HSI data is:', CLASSES_NUM)

        #self.printf('-----Importing Setting Parameters-----')
        print('-----Importing Setting Parameters-----')

        INPUT_DIMENSION = data_hsi.shape[2]
        data = preprocessing.scale(data)  # sklearn里面的--Standardize a dataset along any axis
        data_ = data.reshape(data_hsi.shape[0], data_hsi.shape[1], data_hsi.shape[2])
        whole_data = data_
        padded_data = np.lib.pad(whole_data, ((PATCH_LENGTH, PATCH_LENGTH), (PATCH_LENGTH, PATCH_LENGTH), (0, 0)),
                                 'constant', constant_values=0)

        for index_iter in range(ITER):
            #self.printf('iter:'+ str(index_iter))
            print('iter:', index_iter)
            #net = network.network_mish(BAND, CLASSES_NUM, INPUT_SIZE, HIDDEN_SIZE, NUM_LAYERS, device)
            # net = network.DBDA_network_MISH(BAND, CLASSES_NUM)
            #net.load_state_dict(torch.load('net/IN-WOOD_DETECTIONclasstype=4.pt', map_location=device))

            net = torch.load('net/IN-WOOD_DETECTIONclasstype=13.pth', map_location='cpu')

            optimizer = optim.Adam(net.parameters(), lr=lr, amsgrad=False)  # , weight_decay=0.0001)
            time_1 = int(time.time())
            np.random.seed(seeds[index_iter])
            train_indices, test_indices = sampling(VALIDATION_SPLIT, gt)
            _, total_indices = sampling(1, gt)

            TRAIN_SIZE = len(train_indices)
            self.printf('Train size: ' + str(TRAIN_SIZE))
            print('Train size: ', TRAIN_SIZE)
            TEST_SIZE = TOTAL_SIZE - TRAIN_SIZE
            self.printf('Test size: ' + str(TEST_SIZE))
            print('Test size: ', TEST_SIZE)
            VAL_SIZE = int(TRAIN_SIZE)
            self.printf('Validation size: ' + str(VAL_SIZE))
            print('Validation size: ', VAL_SIZE)

            #self.printf('-----Selecting Small Pieces from the Original Cube Data-----')
            print('-----Selecting Small Pieces from the Original Cube Data-----')

            train_iter, valida_iter, test_iter, all_iter = generate_iter(TRAIN_SIZE, train_indices, TEST_SIZE,
                                                                         test_indices, TOTAL_SIZE, total_indices, VAL_SIZE,
                                                                         whole_data, PATCH_LENGTH, padded_data,
                                                                         INPUT_DIMENSION, batch_size, gt)

            # train
            tic1 = time.perf_counter()
            # print("tic1 = ", tic1)
            #train.train(net, train_iter, valida_iter, loss, optimizer, device, epochs=num_epochs)
            toc1 = time.perf_counter()
            # print("toc1 = ", toc1)

            pred_test_fdssc = []
            pred_test_possb = []
            tic2 = time.perf_counter()
            # print("tic2 = ", tic2)

            with torch.no_grad():
                for X, y in test_iter:
                    X = X.to(device)
                    net.eval()  # 评估模式
                    y_hat = sigmoid(net(X).cpu())
                    # print(net(X))
                    pred_test_possb.extend(y_hat.cpu().numpy().tolist())
                    pred_test_fdssc.extend(np.array(net(X).cpu().argmax(axis=1)))

            toc2 = time.perf_counter()
            # print("toc2 = ", toc2)
            collections.Counter(pred_test_fdssc)
            gt_test = gt[test_indices] - 1

            pred_test_possbofclass = [i[CLASSTYPE] for i in pred_test_possb]
            gt_re4roc = np.where(gt == CLASSTYPE, 1, 0)

            #评估

            fpr, tpr, thresholds = metrics.roc_curve(gt_test[:-VAL_SIZE], pred_test_possbofclass, pos_label=CLASSTYPE)
            # print("fpr = ", fpr)
            # print("tpr = ", tpr)
            roc_auc = metrics.auc(fpr, tpr)
            self.AUC.append(roc_auc)
            #plt.plot(fpr, tpr, color='darkorange', label='ROC curve (area = %0.4f)' % roc_auc)
            #plt.xlabel('False Positive Rate')
            #plt.ylabel('True Positive Rate')
            #plt.grid()
            #plt.show()

            overall_acc_fdssc = metrics.accuracy_score(pred_test_fdssc, gt_test[:-VAL_SIZE])
            confusion_matrix_fdssc = metrics.confusion_matrix(pred_test_fdssc, gt_test[:-VAL_SIZE])
            each_acc_fdssc, average_acc_fdssc = aa_and_each_accuracy(confusion_matrix_fdssc)  # from generate_pic.py
            kappa = metrics.cohen_kappa_score(pred_test_fdssc, gt_test[:-VAL_SIZE])

            #    torch.save(net.state_dict(), "./net/" + str(round(overall_acc_fdssc, 3)) + '.pt')
            self.KAPPA.append(kappa)
            self.OA.append(overall_acc_fdssc)
            # OA.append('test')
            self.AA.append(average_acc_fdssc)
            self.TRAINING_TIME.append(toc1 - tic1)
            self.TESTING_TIME.append(toc2 - tic2)
            # ELEMENT_ACC[index_iter, :] = each_acc_fdssc

        self.printf("--------" + net.name + " Training Finished-----------")
        print("--------Network Training Finished-----------")
        #print(self.OA)

        record.record_output(self.OA, self.AA, self.KAPPA, self.AUC, self.TRAINING_TIME, self.TESTING_TIME,
                             'records/' + net.name + day_str + '_' + Dataset + 'split:' + str(VALIDATION_SPLIT) + 'lr:' + str(lr) + '.txt')

        generate_png(all_iter, net, gt_hsi, Dataset, device, total_indices, CLASSTYPE)

        self.picpath_res = net.name + '/detection_maps/' + Dataset + '_' + net.name + '.png'
        self.picpath_gt = net.name + '/detection_maps/' + Dataset + '_gt.png'