예제 #1
0
    if labels_train[ii] == 1
]

#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color="b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color="r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
################################################################################
### your code here!  name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary

algo = 'KNN - n_100 e w_dist'
print(algo)
from sklearn.neighbors import KNeighborsClassifier

clf = KNeighborsClassifier(n_neighbors=100, weights='distance')
clf.fit(features_train, labels_train)
print(clf.score(features_test, labels_test))

try:
    prettyPicture(clf, features_test, labels_test, algo)
except NameError:
    pass

# %%
예제 #2
0
import numpy as np
import pylab as pl

features_train, labels_train, features_test, labels_test = makeTerrainData()

########################## SVM #################################
### we handle the import statement and SVC creation for you here
from sklearn import tree
clf = tree.DecisionTreeClassifier()
clf.fit(features_train, labels_train)
print(clf.score(features_test, labels_test))

#### now your job is to fit the classifier
#### using the training features/labels, and to
#### make a set of predictions on the test data
prettyPicture(clf, features_test, labels_test, 'DT')

#### store your predictions in a list named pred
pred = clf.predict(features_test)

from sklearn.metrics import accuracy_score
acc = accuracy_score(pred, labels_test)


def submitAccuracy():
    return acc


# %%

# %%