# print(net(X))
            pred_test_fdssc.extend(np.array(net(X).cpu().argmax(axis=1)))
    toc2 = time.clock()
    collections.Counter(pred_test_fdssc)
    gt_test = gt[test_indices] - 1

    overall_acc_fdssc = metrics.accuracy_score(pred_test_fdssc,
                                               gt_test[:-VAL_SIZE])
    confusion_matrix_fdssc = metrics.confusion_matrix(pred_test_fdssc,
                                                      gt_test[:-VAL_SIZE])
    each_acc_fdssc, average_acc_fdssc = aa_and_each_accuracy(
        confusion_matrix_fdssc)
    kappa = metrics.cohen_kappa_score(pred_test_fdssc, gt_test[:-VAL_SIZE])

    torch.save(net.state_dict(),
               "./net/" + str(round(overall_acc_fdssc, 3)) + '.pt')
    KAPPA.append(kappa)
    OA.append(overall_acc_fdssc)
    AA.append(average_acc_fdssc)
    TRAINING_TIME.append(toc1 - tic1)
    TESTING_TIME.append(toc2 - tic2)
    ELEMENT_ACC[index_iter, :] = each_acc_fdssc

print("--------" + net.name + " Training Finished-----------")
record.record_output(
    OA, AA, KAPPA, ELEMENT_ACC, TRAINING_TIME, TESTING_TIME,
    'records/' + net.name + day_str + '_' + Dataset + 'split:' +
    str(VALIDATION_SPLIT) + 'lr:' + str(lr) + '.txt')

generate_png(all_iter, net, gt_hsi, Dataset, device, total_indices)
    def fdssc_run(self):
        global Dataset  # UP,IN,KSC
        # dataset = input('Please input the name of Dataset(IN, UP, BS, SV, PC or KSC):')
        dataset = self.comboBox_1.currentText()
        Dataset = dataset.upper()
        data_hsi, gt_hsi, TOTAL_SIZE, TRAIN_SIZE, VALIDATION_SPLIT = load_dataset(
            Dataset)

        print(data_hsi.shape)
        image_x, image_y, BAND = data_hsi.shape
        data = data_hsi.reshape(np.prod(data_hsi.shape[:2]),
                                np.prod(data_hsi.shape[2:]))
        gt = gt_hsi.reshape(np.prod(gt_hsi.shape[:2]), )
        CLASSES_NUM = max(gt)
        print('The class numbers of the HSI data is:', CLASSES_NUM)

        print('-----Importing Setting Parameters-----')
        ITER = 10
        PATCH_LENGTH = 3
        # number of training samples per class
        #lr, num_epochs, batch_size = 0.0010, 200, 32
        lr, num_epochs, batch_size = Value.lr_all, Value.num_epochs_all, Value.batch_size_all

        loss = torch.nn.CrossEntropyLoss()

        img_rows = 2 * PATCH_LENGTH + 1
        img_cols = 2 * PATCH_LENGTH + 1
        img_channels = data_hsi.shape[2]
        INPUT_DIMENSION = data_hsi.shape[2]
        ALL_SIZE = data_hsi.shape[0] * data_hsi.shape[1]
        VAL_SIZE = int(TRAIN_SIZE)
        TEST_SIZE = TOTAL_SIZE - TRAIN_SIZE

        KAPPA = []
        OA = []
        AA = []
        TRAINING_TIME = []
        TESTING_TIME = []
        ELEMENT_ACC = np.zeros((ITER, CLASSES_NUM))

        data = preprocessing.scale(data)
        data_ = data.reshape(data_hsi.shape[0], data_hsi.shape[1],
                             data_hsi.shape[2])
        whole_data = data_
        padded_data = np.lib.pad(whole_data,
                                 ((PATCH_LENGTH, PATCH_LENGTH),
                                  (PATCH_LENGTH, PATCH_LENGTH), (0, 0)),
                                 'constant',
                                 constant_values=0)

        net = network.FDSSC_network(BAND, CLASSES_NUM)
        path = net.name + '_' + Dataset + '.pt'
        if not os.path.exists(path):
            for index_iter in range(ITER):
                net = network.FDSSC_network(BAND, CLASSES_NUM)
                optimizer = optim.Adam(net.parameters(),
                                       lr=lr)  # , weight_decay=0.0001)
                time_1 = int(time.time())
                np.random.seed(seeds[index_iter])
                train_indices, test_indices = sampling(VALIDATION_SPLIT, gt)
                _, total_indices = sampling(1, gt)

                TRAIN_SIZE = len(train_indices)
                print('Train size: ', TRAIN_SIZE)
                TEST_SIZE = TOTAL_SIZE - TRAIN_SIZE
                print('Test size: ', TEST_SIZE)
                VAL_SIZE = int(TRAIN_SIZE)
                print('Validation size: ', VAL_SIZE)

                print(
                    '-----Selecting Small Pieces from the Original Cube Data-----'
                )

                train_iter, valida_iter, test_iter, all_iter = generate_iter(
                    TRAIN_SIZE, train_indices, TEST_SIZE, test_indices,
                    TOTAL_SIZE, total_indices, VAL_SIZE, whole_data,
                    PATCH_LENGTH, padded_data, INPUT_DIMENSION, batch_size, gt)

                tic1 = time.clock()
                train.train(net,
                            train_iter,
                            valida_iter,
                            loss,
                            optimizer,
                            device,
                            epochs=num_epochs,
                            path=path)
                toc1 = time.clock()

                pred_test_fdssc = []
                tic2 = time.clock()
                with torch.no_grad():
                    for X, y in test_iter:
                        X = X.to(device)
                        net.eval()  # 评估模式, 这会关闭dropout
                        y_hat = net(X)
                        # print(net(X))
                        pred_test_fdssc.extend(
                            np.array(net(X).cpu().argmax(axis=1)))
                toc2 = time.clock()
                collections.Counter(pred_test_fdssc)
                gt_test = gt[test_indices] - 1
                print(np.unique(pred_test_fdssc))

                overall_acc_fdssc = metrics.accuracy_score(
                    pred_test_fdssc, gt_test[:-VAL_SIZE])
                confusion_matrix_fdssc = metrics.confusion_matrix(
                    pred_test_fdssc, gt_test[:-VAL_SIZE])
                each_acc_fdssc, average_acc_fdssc = aa_and_each_accuracy(
                    confusion_matrix_fdssc)
                kappa = metrics.cohen_kappa_score(pred_test_fdssc,
                                                  gt_test[:-VAL_SIZE])

                torch.save(net.state_dict(), path)
                KAPPA.append(kappa)
                OA.append(overall_acc_fdssc)
                AA.append(average_acc_fdssc)
                TRAINING_TIME.append(toc1 - tic1)
                TESTING_TIME.append(toc2 - tic2)
                ELEMENT_ACC[index_iter, :] = each_acc_fdssc

            print("--------" + net.name + " Training Finished-----------")
            record.record_output(
                OA, AA, KAPPA, ELEMENT_ACC, TRAINING_TIME, TESTING_TIME,
                '../' + net.name + '/records/' + net.name + day_str + '_' +
                Dataset + 'split:' + str(VALIDATION_SPLIT) + 'lr:' + str(lr) +
                '.txt')

            generate_png(all_iter, net, gt_hsi, Dataset, device, total_indices,
                         self.lineEdit.text())
        else:
            train_indices, test_indices = sampling(VALIDATION_SPLIT, gt)
            _, total_indices = sampling(1, gt)

            TRAIN_SIZE = len(train_indices)
            TEST_SIZE = TOTAL_SIZE - TRAIN_SIZE
            VAL_SIZE = int(TRAIN_SIZE)
            train_iter, valida_iter, test_iter, all_iter = generate_iter(
                TRAIN_SIZE, train_indices, TEST_SIZE, test_indices, TOTAL_SIZE,
                total_indices, VAL_SIZE, whole_data, PATCH_LENGTH, padded_data,
                INPUT_DIMENSION, batch_size, gt)
            net.load_state_dict(torch.load(path))
            generate_png(all_iter, net, gt_hsi, Dataset, 'cpu', total_indices,
                         self.lineEdit.text())
Exemple #3
0
                                               gt_test[:-VAL_SIZE])
    confusion_matrix_fdssc = metrics.confusion_matrix(pred_test_fdssc,
                                                      gt_test[:-VAL_SIZE])
    each_acc_fdssc, average_acc_fdssc = aa_and_each_accuracy(
        confusion_matrix_fdssc)
    kappa = metrics.cohen_kappa_score(pred_test_fdssc, gt_test[:-VAL_SIZE])

    torch.save(net.state_dict(),
               "./net/" + str(round(overall_acc_fdssc, 3)) + '.pt')
    KAPPA.append(kappa)
    OA.append(overall_acc_fdssc)
    AA.append(average_acc_fdssc)
    TRAINING_TIME.append(toc1 - tic1)
    TESTING_TIME.append(toc2 - tic2)
    ELEMENT_ACC[index_iter, :] = each_acc_fdssc

print("--------" + net.name + " Training Finished-----------")
record.record_output(
    OA, AA, KAPPA, ELEMENT_ACC, TRAINING_TIME, TESTING_TIME,
    'records/' + method + '_' + Dataset + '_' + str(BAND) + '_' +
    str(VALIDATION_SPLIT) + '.txt')
location = 'records/' + method + '_' + Dataset + '_' + str(BAND) + '_' + str(
    VALIDATION_SPLIT) + '.txt'

generate_png(all_iter, net, gt_hsi, Dataset, device, total_indices)
print("location=\"", end="")
print("./records/" + method + '_' + Dataset + '_' + str(BAND) + '_' +
      str(VALIDATION_SPLIT) + '.txt',
      end="")
print("\"")
Exemple #4
0
    toc2 = time.clock()

    print(' Training Time: ', toc1 - tic1)
    print('Test time:', toc2 - tic2)
    print('Test score:', loss_and_metrics[0])
    print('Test accuracy:', loss_and_metrics[1])
    print(history_fdssc.history.keys())

    pred_test_fdssc = model_fdssc.predict(x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2],
                                                         x_test.shape[3], 1)).argmax(axis=1)
    collections.Counter(pred_test_fdssc)
    gt_test = gt[test_indices]-1

    overall_acc_fdssc = metrics.accuracy_score(pred_test_fdssc, gt_test[:-VAL_SIZE])
    confusion_matrix_fdssc = metrics.confusion_matrix(pred_test_fdssc, gt_test[:-VAL_SIZE])
    each_acc_fdssc, average_acc_fdssc = aa_and_each_accuracy(confusion_matrix_fdssc)
    kappa = metrics.cohen_kappa_score(pred_test_fdssc, gt_test[:-VAL_SIZE])

    KAPPA.append(kappa)
    OA.append(overall_acc_fdssc)
    AA.append(average_acc_fdssc)
    TRAINING_TIME.append(toc1 - tic1)
    TESTING_TIME.append(toc2 - tic2)
    ELEMENT_ACC[index_iter, :] = each_acc_fdssc


print("--------FDSSC Training Finished-----------")
record.record_output(OA, AA, KAPPA, ELEMENT_ACC, TRAINING_TIME, TESTING_TIME,
                     'records/'+Dataset+'_fdssc_'+day_str+'.txt')
print('The save mode is:'+Dataset+'_FDSSC_'+day_str+'.hdf5')