예제 #1
0
    [h, w] = im.shape
    out = sp.empty((h, w, 2 * no + 1), dtype=im.dtype)
    out[:, :, no] = im.copy()

    # Start the computation
    for i in xrange(no):
        # Structuring elements
        se = disk(radius + i * 2)

        # Compute opening per reconstruction
        temp = erosion(im, se)
        out[:, :, no + 1 + i] = reconstruction(temp, im, method='dilation')

        # Compute closing per reconstruction
        temp = dilation(im, se)
        out[:, :, no - 1 - i] = reconstruction(temp, im, method='erosion')

    return out


if __name__ == '__main__':
    # Load image
    im, GeoT, Proj = rt.open_data('../Data/pca_university.tif')

    # Apply the Morphological profile on each PC
    EMP = []
    for i in xrange(3):
        EMP.append(morphological_profile(im[:, :, i]))
    EMP = sp.concatenate(EMP, axis=2)
    rt.write_data("../Data/emp_pca_university.tif", EMP, GeoT, Proj)
import rasterTools as rt
import scipy as sp
import pysptools.eea as eea

# Load data set
im, GeoT, Proj = rt.open_data('../Data/Moffett_full.tif')
[h, w, b] = im.shape
wave = sp.loadtxt('../Data/wave_moffett.csv', delimiter=',')

# NFINDR
nfindr = eea.NFINDR()
Unf = nfindr.extract(im.astype(float), 3, normalize=True)

# Plot endmember
T = sp.concatenate((wave[:, sp.newaxis], Unf.T), axis=1)
sp.savetxt("../Unmixing/figures/endmembers.csv", T, delimiter=",")
import rasterTools as rt
import scipy as sp
from scipy.stats import mode

# Load Thematic Map
im,GeoT,Proj = rt.open_data('../Data/tm_university_svm.tif')
out = sp.empty_like(im)

# Load segmented image
segmented,GeoT,Proj = rt.open_data('../Data/mean_shift_university.tif')

# Do the majority vote
for l in sp.unique(segmented):
    t = sp.where(segmented==l)
    y = im[t]
    out[t] = mode(y, axis=None)[0][0]

# Write the new image
rt.write_data("../Data/tm_university_fusion_mv.tif",out,GeoT,Proj)
예제 #4
0
# Do some usefull imports
import rasterTools as rt
import scipy as sp
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV
from sklearn.metrics import f1_score
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.decomposition import PCA
from sklearn.model_selection import cross_val_score

# Number of Principal components to test
NB = 40

# Load data and the ground truth 
im,GeoT,Proj = rt.open_data('../Data/university.tif')
[h,w,b]=im.shape
im.shape = (h*w,b)
ref =  rt.open_data('../Data/university_gt.tif')[0]
ref.shape = (h*w,)
idx = sp.where(ref>0)[0] # Get coordinate of the GT samples

# Apply PCA
pca = PCA()
imp = pca.fit_transform(im)
l = pca.explained_variance_
cl = (l.cumsum()/l.sum())[:NB]

# Split data
X_train, X_test, y_train, y_test = train_test_split(imp[idx,:], ref[idx], train_size=0.1,random_state=0,stratify=ref[idx])
예제 #5
0
# Optimize parameters
cv_params = dict([
    ('CK__gamma', 2.0**sp.arange(-3, 3)),
    ('CK__mu', sp.linspace(0, 1, num=11)),
    ('SVM__kernel', ['precomputed']),
])

cv = StratifiedKFold(n_splits=5, random_state=0).split(X_train, y_train)
grid = GridSearchCV(pipe, cv_params, cv=cv, verbose=1, n_jobs=-1)
grid.fit(X_train, y_train)
print grid.best_params_
clf = grid.best_estimator_
clf.fit(X_train, y_train)
yp = clf.predict(X_test)
print f1_score(y_test, yp, average='weighted')

# Load image
ims, GeoT, Proj = rt.open_data('../Data/university.tif')
[h, w, b] = ims.shape
ims.shape = (h * w, b)
imw, GeoT, Proj = rt.open_data('../Data/pca_median_11_11_university.tif')
[h, w, b] = imw.shape
imw.shape = (h * w, b)
ims = scs.transform(ims)
imw = scw.transform(imw)
im = sp.concatenate((ims, imw), axis=1)
del imw, ims, X_train, X_test, Xs_train, Xs_test, Xw_train, Xw_test,
imp = clf.predict(im)
rt.write_data('../Data/tm_university_ck_mw.tif', imp.reshape(h, w), GeoT, Proj)
예제 #6
0
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    train_size=0.1,
                                                    random_state=0,
                                                    stratify=y)
print X_train.shape

# Learn SVM
param_grid_svm = dict(gamma=2.0**sp.arange(-8, 2), C=10.0**sp.arange(0,
                                                                     3))  # SVM
y_train.shape = (y_train.size, )
cv = StratifiedKFold(n_splits=5, random_state=0).split(X_train, y_train)
grid = GridSearchCV(SVC(), param_grid=param_grid_svm, cv=cv, n_jobs=-1)
grid.fit(X_train, y_train)
print grid.best_params_
clf = grid.best_estimator_
clf.fit(X_train, y_train)
yp = clf.predict(X_test).reshape(y_test.shape)

# Classify the whole image
im, GeoT, Proj = rt.open_data('92AV3C.bil')
[h, w, b] = im.shape
im.shape = (h * w, b)
im = sc.transform(im)
imp = clf.predict(im).reshape(h, w)
rt.write_data('thematic_map.tif', imp, GeoT, Proj)

# Check the accuracy
yp, y = rt.get_samples_from_roi('thematic_map.tif', 'gt.tif')
print f1_score(yp, y, average='weighted')