Esempio n. 1
0
 def plot(self,
          mode='Learning Curves',
          source='Logs',
          logPath=None,
          title=''):
     ''' Description: This function is a wrapper for the appropriate plot function Found in the Tools package. It handles any architecture spec
                      cific details, that the general plot function does not, such
                      as: how many logs to read  and plot in the same graph.
         Arguments:   logPath  (string): Location of the log files to read. If None, function will read the logs in the default location.
                      mode (String): The plotting mode. Can be Learning Curves for the moment.  
                      title (String): A title for the plot. If left '', the default title is formed as defined in self.defPlotTitle
     '''
     # Args is currently empty. Might have a use for some plotting arguments
     # In the future. Currently none are implemented.
     args = []
     if source == 'Logs':
         if logPath is not None:
             logPath = logPath
         else:
             logPath = os.path.join(
                 self.saveHisFolder,
                 self.sep.join((self.defSavePrefix, "log1.txt")))
         # Form plot title and facilate plotting
         title = self.descr + ' ' + mode
         self.plots[pidx.lrCurve] = utils.plot_classifier(filesPath=logPath,
                                                          title=title,
                                                          mode=mode)
     elif source == 'History':
         title = self.descr + mode
         self.plots[pidx.lrCurve] = utils.plot_classifier(
             inReps=self.history, title=title, mode=mode)
         self.plots[pidx.predCurve] = utils.plot_classifier(
             inReps=self.predHistory,
             title=title,
             mode='Prediction History')
Esempio n. 2
0
plt.show()

#-------------------------------------------------------------------------------
# Part 2 - Linear Models
#-------------------------------------------------------------------------------

# K(x_j, x_i) = x_j * x_i
# phi: [x1, x2] ==> [x1, x2]
def linear_kernel(x_j, x_i):
    return np.dot(x_j, x_i)

clf = lab4.KernelPerceptron(kernel=linear_kernel)
clf.fit(X, y_linear)

ax1 = plt.subplot(121)
utils.plot_classifier(ax1, X, y_linear, clf, title='Linear Perceptron')



clf = svm.SVC(kernel='linear')
clf.fit(X, y_linear)

ax2 = plt.subplot(122)
utils.plot_classifier(ax2, X, y_linear, clf, title='Linear SVM')

plt.show()

#-------------------------------------------------------------------------------
# Part 3 - Quadratic Models
#-------------------------------------------------------------------------------
Esempio n. 3
0
        y.append(data[-1])

X = np.array(X)
y = np.array(y)

## Create and Train Gaussian Naive Bayes Classifier
classifier_gaussian_nb = GaussianNB()
classifier_gaussian_nb.fit(X, y)

## Predict Y
y_pred = classifier_gaussian_nb.predict(X)

## Compute Accuracy of classifier
accuracy = 100.0 * (y == y_pred).sum() / X.shape[0]
print("Accuracy of the classifier =", round(accuracy, 2), "%")
plot_classifier(classifier_gaussian_nb, X, y)

# Create Training / Test Data
# 75% for Training, 25% for Testing
# Model_Selection = Train/Test Split
X_train, X_test, y_train, y_test = model_selection.train_test_split(
    X, y, test_size=0.25, random_state=5)
X_train, X_test, y_train, y_test = model_selection.train_test_split(
    X, y, test_size=0.25, random_state=5)
classifier_gaussian_nb_new = GaussianNB()
classifier_gaussian_nb_new.fit(X_train, y_train)

## Test Accuracy of Model
y_test_pred = classifier_gaussian_nb_new.predict(X_test)
accuracy = 100.0 * (y_test == y_test_pred).sum() / X.shape[0]
print("Accuracy of the classifier =", round(accuracy, 2), "%")
Esempio n. 4
0
  #reduce pixel features
  X = reducer.features(pixels)
  for c,v in testing.classMap.iteritems():
    print "%s (%d): %d items in training set"%(c,v,np.sum(Y==v))
 
  # PREDICT
  probs = np.array(model.predict_proba(X))
  numClasses = probs.shape[1]
  
  #visualize model if called for
  if options.visualize:
    y_graph = Y[testing.classes!="noclass"]
    x_graph = X[testing.classes!="noclass"]
    print "shapes: ", y_graph.shape, x_graph.shape
    plot_classifier(x_graph, y_graph, model, testing.classMap)

  #confusion matrix and accuracy
  Y_pred = probs.argmax(1)
  confMat = metrics.confusion_matrix(Y, Y_pred)
  printConfusionMatrix(confMat, testing.intToClass)
  
  #classification report?
  report = metrics.classification_report(Y, Y_pred, target_names=testing.intToClass)
  print report

  # print accuracy
  for c in range(numClasses):
    correct = float(np.sum(Y_pred[Y==c]==c))
    num = float(np.sum(Y==c))
    print "Class %s (%d) accuracy: %f"%(testing.intToClass[c], c, correct/num)
Esempio n. 5
0
                                                    random_state=0)

# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

# Fitting Decision Tree Classification to the Training set
from sklearn.tree import DecisionTreeClassifier
classifier = DecisionTreeClassifier(criterion='entropy', random_state=0)
classifier.fit(X_train, y_train)

# Predicting the Test set results
y_pred = classifier.predict(X_test)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)

# Visualising the Training and Test sets results
from utils import plot_classifier
plot_classifier(X_train,
                y_train,
                plot_title='Decision Tree Classification (Training set)',
                classifier=classifier)
plot_classifier(X_test,
                y_test,
                plot_title='Decision Tree Classification (Test set)',
                classifier=classifier)
Esempio n. 6
0
import numpy as np
from sklearn import linear_model
import matplotlib.pyplot as plt
import utils
## Create Data Points
X = np.array([[4, 7], [3.5, 8], [3.1, 6.2], [0.5, 1], [1, 2], [1.2, 1.9],
              [6, 2], [5.7, 1.5], [5.4, 2.2]])
y = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])

## Initialize Logistic Classifier
## As we increase C, we increase the penalty for misclassification
## getting closer to optimal.
classifier = linear_model.LogisticRegression(solver='liblinear', C=100)

## Train the classifier
classifier.fit(X, y)

## Draw DataPoints
utils.plot_classifier(classifier, X, y)
                                                    test_size=0.25,
                                                    random_state=0)

# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

# Fitting Naive Bayes to the Training set
from sklearn.naive_bayes import GaussianNB
classifier = GaussianNB()
classifier.fit(X_train, y_train)

# Predicting the Test set results
y_pred = classifier.predict(X_test)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)

# Visualising the Training and Test sets results
from utils import plot_classifier
plot_classifier(X_train,
                y_train,
                plot_title='Naive Bayes (Training set)',
                classifier=classifier)
plot_classifier(X_test,
                y_test,
                plot_title='Naive Bayes (Test set)',
                classifier=classifier)
                                                    test_size=0.25,
                                                    random_state=0)

# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

# Fitting SVM to the Training set
from sklearn.svm import SVC
classifier = SVC(kernel='linear', random_state=0)
classifier.fit(X_train, y_train)

# Predicting the Test set results
y_pred = classifier.predict(X_test)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)

# Visualising the Training and Test sets results
from utils import plot_classifier
plot_classifier(X=X_train,
                y=y_train,
                plot_title="SVM (Training set)",
                classifier=classifier)
plot_classifier(X=X_test,
                y=y_test,
                plot_title="SVM (Test set)",
                classifier=classifier)
                                                    random_state=0)

# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

# Fitting Kernel SVM to the Training set
from sklearn.svm import SVC
classifier = SVC(kernel='rbf', random_state=0)
classifier.fit(X_train, y_train)

# Predicting the Test set results
y_pred = classifier.predict(X_test)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)

# Visualising the Training and Test sets results
from utils import plot_classifier
plot_classifier(X=X_train,
                y=y_train,
                plot_title='Kernel SVM (Training set)',
                classifier=classifier)
plot_classifier(X=X_test,
                y=y_test,
                plot_title='Kernel SVM (Test set)',
                classifier=classifier)
Esempio n. 10
0
    #reduce pixel features
    X = reducer.features(pixels)
    for c, v in testing.classMap.iteritems():
        print "%s (%d): %d items in training set" % (c, v, np.sum(Y == v))

    # PREDICT
    probs = np.array(model.predict_proba(X))
    numClasses = probs.shape[1]

    #visualize model if called for
    if options.visualize:
        y_graph = Y[testing.classes != "noclass"]
        x_graph = X[testing.classes != "noclass"]
        print "shapes: ", y_graph.shape, x_graph.shape
        plot_classifier(x_graph, y_graph, model, testing.classMap)

    #confusion matrix and accuracy
    Y_pred = probs.argmax(1)
    confMat = metrics.confusion_matrix(Y, Y_pred)
    printConfusionMatrix(confMat, testing.intToClass)

    #classification report?
    report = metrics.classification_report(Y,
                                           Y_pred,
                                           target_names=testing.intToClass)
    print report

    # print accuracy
    for c in range(numClasses):
        correct = float(np.sum(Y_pred[Y == c] == c))
Esempio n. 11
0
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

# Fitting Random Forest Classification to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=10,
                                    criterion='entropy',
                                    random_state=0)
classifier.fit(X_train, y_train)

# Predicting the Test set results
y_pred = classifier.predict(X_test)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)

# Visualising the Training and Test sets results
from utils import plot_classifier
plot_classifier(X_train,
                y_train,
                plot_title='Random Forest Classification (Training set)',
                classifier=classifier)
plot_classifier(X_test,
                y_test,
                plot_title='Random Forest Classification (Test set)',
                classifier=classifier)
Esempio n. 12
0
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)

# Fitting K-NN to the Training set
from sklearn.neighbors import KNeighborsClassifier

classifier = KNeighborsClassifier(n_neighbors=5, metric='minkowski', p=2)
classifier.fit(X_train, y_train)

# Predicting the Test set results
y_pred = classifier.predict(X_test)

# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix

cm = confusion_matrix(y_test, y_pred)

# Visualising the Training and Test sets results
from utils import plot_classifier

plot_classifier(X_train,
                y_train,
                plot_title="K-NN (Training set)",
                classifier=classifier)
plot_classifier(X_test,
                y_test,
                plot_title="K-NN (Test set)",
                classifier=classifier)
Esempio n. 13
0
    reducer = PCAFeatures(n_comp=options.complexity)
    X = reducer.features(pixels)
    model = MultiSVM()
    model.fit(X,Y)  
  
  #Logistic Regression
  if options.modelType=="ilogreg":
    reducer = NaiveFeatures()
    X = reducer.features(pixels)
    model = MultiLogReg()
    model.fit(X,Y)
  if options.modelType=="ilogreg_lda":
    reducer = LDAFeatures(n_comp=options.complexity)
    X = reducer.features(pixels, Y) 
    model = MultiLogReg()
    model.fit(X,Y)

  #write model out if specified
  print "Model learned: %s"%model
  print "saving model as ", options.modelOut
  out = open(options.modelOut, 'wb')
  pickle.dump(model, out)
  pickle.dump(reducer, out)
  pickle.dump(options.pixels, out)
  out.close()

  #visualize model if called for
  if options.visualize:
    plot_classifier(X,Y,model,training.classMap)

Esempio n. 14
0
# plt.show()

#-------------------------------------------------------------------------------
# Part 4.2 - Radial Kernel
#-------------------------------------------------------------------------------


def radial_kernel(x_j, x_i):
    return np.exp(np.linalg.norm(x_j - x_i)**2 / 2)


clf = lab4.KernelPerceptron(kernel=radial_kernel)
clf.fit(X, y_radial)

ax1 = plt.subplot(121)
utils.plot_classifier(ax1, X, y_radial, clf, title='RBF Perceptron')

clf = svm.SVC(kernel='rbf')
clf.fit(X, y_radial)

ax2 = plt.subplot(122)
utils.plot_classifier(ax2, X, y_radial, clf, title='RBF SVM')

plt.show()

#-------------------------------------------------------------------------------
# Part 5 - Angle Models
#-------------------------------------------------------------------------------

# def phi_angular(x):
#     mag = np.linalg.norm(x)
Esempio n. 15
0
    if options.modelType == "isvm_pca":
        reducer = PCAFeatures(n_comp=options.complexity)
        X = reducer.features(pixels)
        model = MultiSVM()
        model.fit(X, Y)

    #Logistic Regression
    if options.modelType == "ilogreg":
        reducer = NaiveFeatures()
        X = reducer.features(pixels)
        model = MultiLogReg()
        model.fit(X, Y)
    if options.modelType == "ilogreg_lda":
        reducer = LDAFeatures(n_comp=options.complexity)
        X = reducer.features(pixels, Y)
        model = MultiLogReg()
        model.fit(X, Y)

    #write model out if specified
    print "Model learned: %s" % model
    print "saving model as ", options.modelOut
    out = open(options.modelOut, 'wb')
    pickle.dump(model, out)
    pickle.dump(reducer, out)
    pickle.dump(options.pixels, out)
    out.close()

    #visualize model if called for
    if options.visualize:
        plot_classifier(X, Y, model, training.classMap)
Esempio n. 16
0
# plt.show()

#-------------------------------------------------------------------------------
# Part 4.2 - Radial Kernel
#-------------------------------------------------------------------------------


def radial_kernel(x_j, x_i):
    return np.exp(-np.sum(np.square(x_j - x_i)) / 2)


clf = lab4.KernelPerceptron(kernel=radial_kernel)
clf.fit(X, y_radial)

ax1 = plt.subplot(121)
utils.plot_classifier(ax1, X, y_radial, clf, title='RBF Perceptron')

clf = svm.SVC(kernel='rbf')
clf.fit(X, y_radial)

ax2 = plt.subplot(122)
utils.plot_classifier(ax2, X, y_radial, clf, title='RBF SVM')

plt.show()

#-------------------------------------------------------------------------------
# Part 5 - Angle Models
#-------------------------------------------------------------------------------

# def phi_angular(x):
#     d = math.sqrt(x[0]*x[0]+x[1]*x[1])