Beispiel #1
0
import numpy as np

if __name__ == '__main__':
    # root = '/Users/cengmeng/PycharmProjects/python/Deep-subspace-clustering-networks/Data/'
    root = '/content/'
    # im_, gt_ = 'SalinasA_corrected', 'SalinasA_gt'
    im_, gt_ = 'Indian_pines_corrected', 'Indian_pines_gt'
    # im_, gt_ = 'Pavia', 'Pavia_gt'
    # im_, gt_ = 'KSC', 'KSC_gt'

    img_path = root + im_ + '.mat'
    gt_path = root + gt_ + '.mat'

    p = Processor()
    img, gt = p.prepare_data(img_path, gt_path)
    n_row, n_column, n_band = img.shape
    train_inx, test_idx = p.get_tr_tx_index(p.get_correct(img, gt)[1],
                                            test_size=0.9)

    img = minmax_scale(img.reshape(n_row * n_column, n_band)).reshape(
        (n_row, n_column, n_band))

    x_input = img.reshape(n_row * n_column, n_band)

    num_class = 15
    snmf = BandSelection_SNMF(num_class)
    X_new = snmf.predict(x_input).reshape(n_row, n_column, num_class)
    a, b = p.get_correct(X_new, gt)
    b = p.standardize_label(b)
    print(eval_band_cv(a, b, times=5))
Beispiel #2
0
    # im_, gt_ = 'KSC', 'KSC_gt'

    img_path = root + im_ + '.mat'
    gt_path = root + gt_ + '.mat'
    print(img_path)

    p = Processor()
    img, gt = p.prepare_data(img_path, gt_path)
    # Img, Label = Img[:256, :, :], Label[:256, :]
    n_row, n_column, n_band = img.shape
    X_img = minmax_scale(img.reshape(n_row * n_column, n_band)).reshape(
        (n_row, n_column, n_band))
    img_correct, gt_correct = p.get_correct(X_img, gt)
    gt_correct = p.standardize_label(gt_correct)
    X_img_2D = X_img.reshape(n_row * n_column, n_band)
    train_inx, test_idx = p.get_tr_tx_index(gt_correct, test_size=0.4)

    n_input = [n_row, n_column]
    kernel_size = [11]
    n_hidden = [32]
    batch_size = n_band
    model_path = './pretrain-model-COIL20/model.ckpt'
    ft_path = './pretrain-model-COIL20/model.ckpt'
    logs_path = './pretrain-model-COIL20/logs'

    batch_size_test = n_band

    iter_ft = 0
    display_step = 1
    alpha = 0.04
    learning_rate = 1e-3
        X = np.delete(X, np.nonzero(y == c), axis=0)
        y = np.delete(y, np.nonzero(y == c))
y = p.standardize_label(y)
X = MinMaxScaler().fit_transform(X)
print('size:', X.shape, 'n_classes:', np.unique(y).shape[0])

############################
#      feature extraction
############################
'''
step 1: set common parameters
'''
n_hidden = 50
max_iter = 1000

train_index, test_index = p.get_tr_tx_index(y, test_size=0.4)
results = []
X_proj = []
for rho in np.arange(0.1, 1, 0.1):
    start = time.clock()
    instance_emo_elm = EMO_AE_ELM(n_hidden,
                                  sparse_degree=rho,
                                  max_iter=max_iter,
                                  n_pop=100)
    X_projection_emo_elm = instance_emo_elm.fit(X, X).predict(X)
    # instance_emo_elm.save_evo_result('./experimental_results/EMO-ELM-AE-results-KSC-50hidden.npz')
    time_emo_elmae = round(time.clock() - start, 3)
    X_proj.append(X_projection_emo_elm)
    # TODO: calculate accuracy
    X_train, X_test = X_projection_emo_elm[train_index], X_projection_emo_elm[
        test_index]  # [index]