bow.patch_size=(20,20)
        bow.learning_rate=0.001
        bow.n_components=512
        bow.n_iter=100
        bow.sample_num = 1000
        
        bow.fit(x_train)
        
        svm = SVC(kernel='linear', probability = True, random_state=42)
        svm.C = 1000
        #lr = LogisticRegression()
        #lr.C = 100
        best = Pipeline([('bow', bow),('svm',svm)])
        best.fit(x_train, y_train)
        
        print "*********************Save*******************************"
        joblib.dump(best, "classifier_rbm.pkl", compress=3)
                
    print "*********************Test*******************************"
    y_test_pre = best.predict(x_test)
    cm = confusion_matrix(y_test, y_test_pre)
    from map_confusion import plot_conf
    plot_conf(cm, range(le.classes_.size), 'RSDataset.png')
    
    from sklearn.metrics import classification_report
    with file('report_rbm.txt', 'w') as f:
        report = classification_report(y_test, y_test_pre, target_names = le.classes_)
        f.writelines(report)

    show(bow.rbm.components_,(20,20))
Example #2
0
    if spm.kernel_hi:
        svm = SVC(kernel='precomputed', probability = True,random_state=42)
    else:
        svm = SVC(kernel='linear', probability = True,random_state=42)
    clf = Pipeline([('spm', spm),('svm',svm)])

    best = Pipeline([('spm', spm),('svm',svm)])
    best.fit(x_train, y_train)
    
    print "*********************Save*******************************"
    if method != 'sift':
        joblib.dump(best, "classifier_spm_%s.pkl"%method, compress=3)
            
print "*********************Test*******************************"
spm = best.named_steps['spm']
y_test_pre = best.predict(x_test)

from sklearn.metrics import accuracy_score
print "Accuracy:", accuracy_score(y_test, y_test_pre)
cm = confusion_matrix(y_test, y_test_pre)
np.save('RS_results/RSDataset_%s_%s_%s.npy'%(method, spm.clusters, spm.img_size), cm)
from map_confusion import plot_conf
plot_conf(conf_arr=cm, label_list=range(le.classes_.size), 
          norm=False, save_name='RS_results/RSDataset_%s_%s_%s.png'%(method, spm.clusters, spm.img_size))

from sklearn.metrics import classification_report
with file('RS_results/report_spm_%s_%s_%s.txt'%(method, spm.clusters, spm.img_size), 'w') as f:
    report = classification_report(y_test, y_test_pre, target_names = le.classes_)
    f.writelines(report)

#show(bow.rbm.components_,(20,20))
Example #3
0
         X.append(item)
     Y = le.transform(Y)
     X = np.vstack(X)
     all_x.append(X)
     all_y.append(Y)
     
 all_x = np.vstack(all_x)
 all_y = np.hstack(all_y)
 
 cv = StratifiedShuffleSplit(y=all_y, n_iter=1, test_size=0.4)
 for train, test in cv:
     train_x, train_y, test_x, test_y = all_x[train], all_y[train], all_x[test], all_y[test]
     clf = SVC(C=1, kernel='linear', probability = True, random_state=42)
     print "Training..."        
     clf.fit(train_x, train_y)
     y_test_pre = clf.predict(test_x)
     
     print "Confusion matrix..."
     from sklearn.metrics import accuracy_score
     from sklearn.metrics import confusion_matrix
     print "Accuracy:", accuracy_score(test_y, y_test_pre)
     cm = confusion_matrix(test_y, y_test_pre)
     np.save('RS_results/RSDataset_%s_%s_%s.npy'%('sift', 1000, 600), cm)
     from map_confusion import plot_conf
     plot_conf(conf_arr=cm, label_list=le.classes_.tolist(), 
               norm=False, save_name='RS_results/RSDataset_%s_%s_%s.png'%('sift', 1000, 600))
     
     from sklearn.metrics import classification_report
     with file('RS_results/report_spm_%s_%s_%s.txt'%('sift', 1000, 600), 'w') as f:
         report = classification_report(test_y, y_test_pre, target_names = le.classes_)
         f.writelines(report)
print "*********************Train******************************"
best = gridCV.best_estimator_
best.fit(x_train, y_train)

print "*********************Save*******************************"
joblib.dump(best, "classifier.pkl", compress=3)
joblib.dump(gridCV, "grid_cv.pkl", compress=3)

# best = joblib.load(prj_name + "/classifier_svc.pkl")
        
print "*********************Test*******************************"
y_test_pre = best.predict(x_test)
cm = confusion_matrix(y_test, y_test_pre)
from map_confusion import plot_conf
plot_conf(cm, range(le.classes_.size))
#print "confusion matrix..."
#print cm

from sklearn.metrics import classification_report
with file('report.txt', 'w') as f:
    report = classification_report(y_test, y_test_pre, target_names = le.classes_)
    f.writelines(report)
    
"""
print "*********************ROC********************************"
from sklearn.metrics import roc_curve, auc
from sklearn.preprocessing import label_binarize

print best
y_score = best.predict_proba(x_test)