import scipy as sp
import rasterTools as rt
from sklearn.preprocessing import StandardScaler
import icm
from sklearn.model_selection import StratifiedKFold, GridSearchCV, train_test_split
from sklearn.svm import SVC
from sklearn.metrics import f1_score

# Load data set
im, GeoT, Proj = rt.open_data('../Data/university.tif')
[h, w, b] = im.shape
im.shape = (h * w, b)

# Get the training set
X, y = rt.get_samples_from_roi('../Data/university.tif',
                               '../Data/university_gt.tif')

# Scale the data
sc = StandardScaler()
X = sc.fit_transform(X)
im = sc.transform(im)

# Split the data
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    train_size=0.05,
                                                    random_state=0,
                                                    stratify=y)

y_train.shape = (y_train.size, )
cv = StratifiedKFold(n_splits=5, random_state=0).split(X_train, y_train)
for i in xrange(3):
    EMP.append(morphological_profile(pcs[:,i].reshape(h,w),step=1,no=10))
EMP = sp.concatenate(EMP,axis=2)
EMP.shape=(h*w,EMP.shape[2])
del pcs

# Concatenate the spectral and spatial features and do scaling
IM_EMP = sp.concatenate((im[:,::2],EMP.astype(im.dtype)),axis=1)

del im,EMP

# Save the results
rt.write_data("../Data/fusion_inputs_university.tif",IM_EMP.reshape(h,w,IM_EMP.shape[1]),GeoT,Proj)

# Get the training set
X,y=rt.get_samples_from_roi('../Data/fusion_inputs_university.tif','../Data/university_gt.tif')

# Scale the data
sc = StandardScaler()
X = sc.fit_transform(X)
IM_EMP = sc.transform(IM_EMP)

# Split the data
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.1,random_state=0,stratify=y)

y_train.shape=(y_train.size,)    
cv = StratifiedKFold(n_splits=5,random_state=0).split(X_train,y_train)
grid = GridSearchCV(SVC(), param_grid=dict(gamma=2.0**sp.arange(-4,4), C=10.0**sp.arange(0,3)), cv=cv,n_jobs=-1)
grid.fit(X_train, y_train)
clf = grid.best_estimator_
clf.fit(X_train,y_train)
        self.gamma = gamma
        self.mu = mu

    def transform(self, X):
        K = self.mu * rbf_kernel(X[:, :-3], self.Xs_, gamma=self.gamma)
        K += (1 - self.mu) * rbf_kernel(X[:, -3:], self.Xw_, gamma=self.gamma)
        return K

    def fit(self, X, y=None, **fit_params):
        self.Xs_ = X[:, :-3]
        self.Xw_ = X[:, -3:]
        return self


# Load data
Xs, y = rt.get_samples_from_roi('../Data/university.tif',
                                '../Data/university_gt.tif')
Xw, y = rt.get_samples_from_roi('../Data/pca_median_11_11_university.tif',
                                '../Data/university_gt.tif')
scs = StandardScaler()
Xs = scs.fit_transform(Xs)
scw = StandardScaler()
Xw = scw.fit_transform(Xw)

# Split data
Xs_train, Xs_test, y_train, y_test = train_test_split(Xs,
                                                      y,
                                                      train_size=0.05,
                                                      random_state=0,
                                                      stratify=y)
Xw_train, Xw_test, y_train, y_test = train_test_split(Xw,
                                                      y,
Exemple #4
0
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.metrics import f1_score

DATA = [
    '../Data/university.tif', '../Data/pca_university.tif',
    '../Data/lda_university.tif', '../Data/kpca_university.tif'
]
GT = '../Data/university_gt.tif'

F1_knn, F1_gmm = [], []
for data in DATA:
    print data
    # Load data set
    X, y = rt.get_samples_from_roi(data, GT)
    sc = StandardScaler()
    X = sc.fit_transform(X)

    # Split the data
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.1,
                                                        random_state=0,
                                                        stratify=y)

    # Compute Cross validation for knn
    y_train.shape = (y_train.size, )
    cv = StratifiedKFold(n_splits=5, random_state=0).split(X_train, y_train)
    grid = GridSearchCV(neighbors.KNeighborsClassifier(),
                        param_grid=dict(n_neighbors=sp.arange(1, 50, 5)),
Exemple #5
0
# ../Codes/script_fusion.py

#################################
## Spatial Post Regularization ##
#################################

# You can do a post-classification spatial regularization using voting scheme or MRF
# script_fusion_mv.py
# script_mrf.py

#########################################################################################

# Baseline

# Load data set
X, y = rt.get_samples_from_roi('92AV3C.bil', 'gt.tif')
sc = StandardScaler()
X = sc.fit_transform(X)

# Split the data
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    train_size=0.1,
                                                    random_state=0,
                                                    stratify=y)
print X_train.shape

# Learn SVM
param_grid_svm = dict(gamma=2.0**sp.arange(-8, 2), C=10.0**sp.arange(0,
                                                                     3))  # SVM
y_train.shape = (y_train.size, )