コード例 #1
0
def morphological_profile(im, radius=1, step=2, no=4):
    """ Compute the morphological profile of a given flat image with a disk as structuring element
    INPUT:
    im: input image, must be flat
    radius: initial size of SE
    step: step size for the SE
    no: number of opening/closing
    OUTPUT:
    MP: morphological profile, image of size h*w*(2*no+1)
    """
    if im.ndim != 2:
        print("Image should be flat")
        exit()

    # Initialization of the output
    [h, w] = im.shape
    out = sp.empty((h, w, 2 * no + 1), dtype=im.dtype)
    out[:, :, no] = im.copy()

    # Start the computation
    for i in xrange(no):
        # Structuring elements
        se = disk(radius + i * 2)

        # Compute opening per reconstruction
        temp = erosion(im, se)
        out[:, :, no + 1 + i] = reconstruction(temp, im, method='dilation')

        # Compute closing per reconstruction
        temp = dilation(im, se)
        out[:, :, no - 1 - i] = reconstruction(temp, im, method='erosion')

    return out

    EMP = []
    for i in xrange(3):

        EMP.append(morphological_profile(im[:, :, i]))
    EMP = sp.concatenate(EMP, axis=2)
    rt.write_data("../Data/emp_pca_university.tif", EMP, GeoT, Proj)
コード例 #2
0
    [h, w] = im.shape
    out = sp.empty((h, w, 2 * no + 1), dtype=im.dtype)
    out[:, :, no] = im.copy()

    # Start the computation
    for i in xrange(no):
        # Structuring elements
        se = disk(radius + i * 2)

        # Compute opening per reconstruction
        temp = erosion(im, se)
        out[:, :, no + 1 + i] = reconstruction(temp, im, method='dilation')

        # Compute closing per reconstruction
        temp = dilation(im, se)
        out[:, :, no - 1 - i] = reconstruction(temp, im, method='erosion')

    return out


if __name__ == '__main__':
    # Load image
    im, GeoT, Proj = rt.open_data('../Data/pca_university.tif')

    # Apply the Morphological profile on each PC
    EMP = []
    for i in xrange(3):
        EMP.append(morphological_profile(im[:, :, i]))
    EMP = sp.concatenate(EMP, axis=2)
    rt.write_data("../Data/emp_pca_university.tif", EMP, GeoT, Proj)
コード例 #3
0
y_train.shape = (y_train.size, )
cv = StratifiedKFold(n_splits=5, random_state=0).split(X_train, y_train)
grid = GridSearchCV(SVC(),
                    param_grid=dict(gamma=2.0**sp.arange(-4, 4),
                                    C=10.0**sp.arange(0, 3)),
                    cv=cv,
                    n_jobs=-1)
grid.fit(X_train, y_train)
clf = grid.best_estimator_

clf.probability = True
clf.fit(X_train, y_train)

yp = clf.predict(X_test).reshape(y_test.shape)
print f1_score(y_test, yp, average='weighted')

del X_train, X_test, y_train, y_test

# Predict the whole image and the probability map
labels = clf.predict(im).reshape(h, w)
proba = -clf.predict_log_proba(im).reshape(h, w, y.max())

rt.write_data('../Data/proba_university_svm_proba.tif', proba, GeoT, Proj)
rt.write_data('../Data/proba_university_svm_labels.tif', labels, GeoT, Proj)

# Run ICM
diff = icm.fit(proba, labels, beta=1.25, th=0.01)
print diff
rt.write_data('../Data/tm_university_svm_mrf.tif', labels, GeoT, Proj)
コード例 #4
0
pca = PCA(n_components=3)
pcs = pca.fit_transform(im)
EMP = []
for i in xrange(3):
    EMP.append(morphological_profile(pcs[:,i].reshape(h,w),step=1,no=10))
EMP = sp.concatenate(EMP,axis=2)
EMP.shape=(h*w,EMP.shape[2])
del pcs

# Concatenate the spectral and spatial features and do scaling
IM_EMP = sp.concatenate((im[:,::2],EMP.astype(im.dtype)),axis=1)

del im,EMP

# Save the results
rt.write_data("../Data/fusion_inputs_university.tif",IM_EMP.reshape(h,w,IM_EMP.shape[1]),GeoT,Proj)

# Get the training set
X,y=rt.get_samples_from_roi('../Data/fusion_inputs_university.tif','../Data/university_gt.tif')

# Scale the data
sc = StandardScaler()
X = sc.fit_transform(X)
IM_EMP = sc.transform(IM_EMP)

# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.1,random_state=0,stratify=y)

y_train.shape=(y_train.size,)    
cv = StratifiedKFold(n_splits=5,random_state=0).split(X_train,y_train)
grid = GridSearchCV(SVC(), param_grid=dict(gamma=2.0**sp.arange(-4,4), C=10.0**sp.arange(0,3)), cv=cv,n_jobs=-1)
コード例 #5
0
bnds = ((0, None), (0, None), (
    0,
    None,
))

NE = 3
alpha0 = sp.ones((NE, ))
alpha0 /= alpha0.sum()

# Load images
im, GeoT, Proj = rt.open_data('../Data/Moffett_full.tif')
[h, w, b] = im.shape
wave = sp.loadtxt('../Data/wave_moffett.csv', delimiter=',')

# Compute endmenbers
nfindr = eea.NFINDR()
M = nfindr.extract(im.astype(float), NE, normalize=False).T

abundances = sp.empty((h, w, NE))
for h_ in xrange(h):
    for w_ in xrange(w):
        x = im[h_, w_, :]
        # res = optimize.minimize(loss, alpha0, args=(x,M,), jac=jac,method='SLSQP', bounds=bnds,)
        # a = res['w']
        res = optimize.nnls(M, x)
        a = res[0]
        abundances[h_, w_, :] = (a / a.sum())

# Write the image
rt.write_data("../Data/Moffett_abundances.tif", abundances, GeoT, Proj)
コード例 #6
0
import rasterTools as rt
import scipy as sp
from scipy.stats import mode

# Load Thematic Map
im,GeoT,Proj = rt.open_data('../Data/tm_university_svm.tif')
out = sp.empty_like(im)

# Load segmented image
segmented,GeoT,Proj = rt.open_data('../Data/mean_shift_university.tif')

# Do the majority vote
for l in sp.unique(segmented):
    t = sp.where(segmented==l)
    y = im[t]
    out[t] = mode(y, axis=None)[0][0]

# Write the new image
rt.write_data("../Data/tm_university_fusion_mv.tif",out,GeoT,Proj)
コード例 #7
0
# Plot explained variance
l = lda.explained_variance_ratio_
cl= l.cumsum()

for i in range(y.max()-1):
    print "({0},{1})".format(i+1,l[i])

for i in range(y.max()-1):
    print "({0},{1})".format(i+1,cl[i])

# Projet data
import matplotlib.pyplot as plt
xp=lda.transform(xt)

# Save projection
D = sp.concatenate((xp[::10,:4],yt[::10]),axis=1)
sp.savetxt("../FeatureExtraction/figures/lda_proj.csv",D,delimiter=',')

# Save Eigenvectors
D = sp.concatenate((wave[:,sp.newaxis],lda.coef_[:3,:].T),axis=1)
sp.savetxt('../FeatureExtraction/figures/lda_pcs.csv',D,delimiter=',')

im,GeoT,Proj = rt.open_data('../Data/university.tif')
[h,w,b]=im.shape
im.shape=(h*w,b)
imp = lda.transform(im)[:,:3]
imp.shape = (h,w,3)
# Save image
rt.write_data('../Data/lda_university.tif',imp,GeoT,Proj)
コード例 #8
0
# Optimize parameters
cv_params = dict([
    ('CK__gamma', 2.0**sp.arange(-3, 3)),
    ('CK__mu', sp.linspace(0, 1, num=11)),
    ('SVM__kernel', ['precomputed']),
])

cv = StratifiedKFold(n_splits=5, random_state=0).split(X_train, y_train)
grid = GridSearchCV(pipe, cv_params, cv=cv, verbose=1, n_jobs=-1)
grid.fit(X_train, y_train)
print grid.best_params_
clf = grid.best_estimator_
clf.fit(X_train, y_train)
yp = clf.predict(X_test)
print f1_score(y_test, yp, average='weighted')

# Load image
ims, GeoT, Proj = rt.open_data('../Data/university.tif')
[h, w, b] = ims.shape
ims.shape = (h * w, b)
imw, GeoT, Proj = rt.open_data('../Data/pca_median_11_11_university.tif')
[h, w, b] = imw.shape
imw.shape = (h * w, b)
ims = scs.transform(ims)
imw = scw.transform(imw)
im = sp.concatenate((ims, imw), axis=1)
del imw, ims, X_train, X_test, Xs_train, Xs_test, Xw_train, Xw_test,
imp = clf.predict(im)
rt.write_data('../Data/tm_university_ck_mw.tif', imp.reshape(h, w), GeoT, Proj)
コード例 #9
0
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    train_size=0.1,
                                                    random_state=0,
                                                    stratify=y)
print X_train.shape

# Learn SVM
param_grid_svm = dict(gamma=2.0**sp.arange(-8, 2), C=10.0**sp.arange(0,
                                                                     3))  # SVM
y_train.shape = (y_train.size, )
cv = StratifiedKFold(n_splits=5, random_state=0).split(X_train, y_train)
grid = GridSearchCV(SVC(), param_grid=param_grid_svm, cv=cv, n_jobs=-1)
grid.fit(X_train, y_train)
print grid.best_params_
clf = grid.best_estimator_
clf.fit(X_train, y_train)
yp = clf.predict(X_test).reshape(y_test.shape)

# Classify the whole image
im, GeoT, Proj = rt.open_data('92AV3C.bil')
[h, w, b] = im.shape
im.shape = (h * w, b)
im = sc.transform(im)
imp = clf.predict(im).reshape(h, w)
rt.write_data('thematic_map.tif', imp, GeoT, Proj)

# Check the accuracy
yp, y = rt.get_samples_from_roi('thematic_map.tif', 'gt.tif')
print f1_score(yp, y, average='weighted')
コード例 #10
0
    CT.append(time.time() - ts)

    # Print results
    print F1
    print CT
    for c in sp.unique(y_train):
        t = sp.where(y_train == c)[0]
        print("Number of training samples for class {0}:{1}".format(c, t.size))
    for c in sp.unique(y_train):
        t = sp.where(y_test == c)[0]
        print("Number of testing samples for class {0}:{1}".format(c, t.size))

    # Load data
    im, GeoT, Proj = rt.open_data('../Data/university.tif')
    [h, w, b] = im.shape
    im.shape = (h * w, b)
    im = sc.transform(im)

    # Perform the classification of the whole image
    y_train.shape = (y_train.size, )
    cv = StratifiedKFold(n_splits=5, random_state=0).split(X_train, y_train)
    grid = GridSearchCV(SVC(), param_grid=param_grid_svm, cv=cv, n_jobs=-1)
    grid.fit(X_train, y_train)
    clf = grid.best_estimator_
    clf.fit(X_train, y_train)

    imp = clf.predict(im).reshape(h, w)

    # Save image
    rt.write_data('../Data/tm_university_svm.tif', imp, GeoT, Proj)