コード例 #1
0
        img, gt = img[30:115, 24:94, :], gt[30:115, 24:94]
        PATCH_SIZE = 13
        REG_Coef_, NEIGHBORING_, RO_ = 1e2, 30, 0.4
        REG_Coef_K, NEIGHBORING_K, RO_K, GAMMA = 1e3, 30, 0.8, 10
    if im_ == 'PaviaU':
        img, gt = img[150:350, 100:200, :], gt[150:350, 100:200]
        REG_Coef_, NEIGHBORING_, RO_ = 1e3, 20, 0.6
        REG_Coef_K, NEIGHBORING_K, RO_K, GAMMA = 6 * 1e4, 30, 0.8, 100

    n_row, n_column, n_band = img.shape
    x_img = minmax_scale(img.reshape(n_row * n_column, n_band)).reshape((n_row, n_column, n_band))
    print('original img shape: ', x_img.shape)
    # # reduce spectral bands using PCA
    pca = PCA(n_components=nb_comps)
    img = minmax_scale(pca.fit_transform(img.reshape(n_row * n_column, n_band))).reshape(n_row, n_column, nb_comps)
    x_patches, y_ = p.get_HSI_patches_rw(img, gt, (PATCH_SIZE, PATCH_SIZE))

    print('reduced img shape: ', img.shape)
    print('x_patch tensor shape: ', x_patches.shape)
    n_samples, n_width, n_height, n_band = x_patches.shape
    x_patches_2d = np.reshape(x_patches, (n_samples, -1))
    y = p.standardize_label(y_)

    # # reorder samples according to gt
    x_patches_2d, y = order_sam_for_diag(x_patches_2d, y)
    # # normalize data
    x_patches_2d = normalize(x_patches_2d)
    print('final sample shape: %s, labels: %s' % (x_patches_2d.shape, np.unique(y)))
    N_CLASSES = np.unique(y).shape[0]  # Indian : 8  KSC : 10  SalinasA : 6 PaviaU : 8

    # ========================
コード例 #2
0
ファイル: demo.py プロジェクト: AngryCai/HyperAE
        EPOCH = 100
        LEARNING_RATE = 0.0002
        REG_GRAPH = 0.001  # beta
        REG_TASK = 100.  # alpha
        WEIGHT_DECAY = 0.001  # lambda
        VERBOSE_TIME = 10
    n_row, n_column, n_band = img.shape
    img = minmax_scale(img.reshape(n_row * n_column,
                                   n_band)).reshape(img.shape)

    # perform PCA
    # pca = PCA(n_components=nb_comps)
    # img = pca.fit_transform(img.reshape(n_row * n_column, n_band)).reshape((n_row, n_column, nb_comps))
    # print('pca shape: %s, percentage: %s' % (img.shape, np.sum(pca.explained_variance_ratio_)))

    x_patches, y_ = p.get_HSI_patches_rw(img, gt,
                                         (NEIGHBORING_SIZE, NEIGHBORING_SIZE))
    x_patches = normalize(x_patches.reshape(x_patches.shape[0],
                                            -1)).reshape(x_patches.shape)
    print('img shape:', img.shape)
    print('img_patches_nonzero:', x_patches.shape)
    n_samples, n_width, n_height, n_band = x_patches.shape
    y = p.standardize_label(y_)
    x_patches, y = order_sam_for_diag(x_patches, y)
    print('x_patches shape: %s, labels: %s' % (x_patches.shape, np.unique(y)))

    N_CLASSES = np.unique(y).shape[
        0]  # wuhan : 5  Pavia : 6  Indian : 8  KSC : 10  SalinasA : 6 PaviaU : 8
    """
    =======================================
    Clustering
    ======================================