Exemplo n.º 1
0
X = pca.transform(X, n_components=5)  # Reduce to 5 dimensions

# ..........................
#  TRAIN / TEST SPLIT
# ..........................
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# Rescale label for Adaboost to {-1, 1}
rescaled_y_train = 2 * y_train - np.ones(np.shape(y_train))
rescaled_y_test = 2 * y_test - np.ones(np.shape(y_test))

# .......
#  SETUP
# .......
adaboost = Adaboost(n_clf=8)
naive_bayes = NaiveBayes()
knn = KNN(k=4)
logistic_regression = LogisticRegression()
mlp = MultilayerPerceptron(n_hidden=20)
perceptron = Perceptron()
decision_tree = DecisionTree()
random_forest = RandomForest(n_estimators=150)
support_vector_machine = SupportVectorMachine(C=1, kernel=rbf_kernel)

# ........
#  TRAIN
# ........
print "Training:"
print "\tAdaboost"
adaboost.fit(X_train, rescaled_y_train)
print "\tNaive Bayes"
naive_bayes.fit(X_train, y_train)
# no. of examples
m = iris.shape[0]

# no. of features
n = iris.shape[1] - 1

X = np.ones((m, n + 1))
y = np.array((m, 1))

X[:, 1] = iris['X0'].values
X[:, 2] = iris['X1'].values
X[:, 3] = iris['X2'].values
X[:, 4] = iris['X3'].values

#Labels
y = iris['Y'].values

#Mean normalization
for j in range(n):
    X[:, j] = (X[:, j] - X[:, j].mean())

X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=11)

model = KNN(k=5)
y_pred = model.predict(X_test, X_train, y_train)
print(y_pred)
print("Test Accuracy:", accuracy_score(y_pred, y_test) * 100, "%")
Exemplo n.º 3
0

# ..........................
#  TRAIN / TEST SPLIT
# ..........................
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5)
# Rescale label for Adaboost to {-1, 1}
rescaled_y_train = 2*y_train - np.ones(np.shape(y_train))
rescaled_y_test = 2*y_test - np.ones(np.shape(y_test))

# .......
#  SETUP
# .......
adaboost = Adaboost(n_clf = 8)
naive_bayes = NaiveBayes()
knn = KNN(k=4)
logistic_regression = LogisticRegression()
mlp = MultilayerPerceptron(n_hidden=20)
perceptron = Perceptron()
decision_tree = DecisionTree()
random_forest = RandomForest(n_estimators=150)
support_vector_machine = SupportVectorMachine(C=1, kernel=rbf_kernel)

# ........
#  TRAIN
# ........
print "Training:"
print "\tAdaboost"
adaboost.fit(X_train, rescaled_y_train)
print "\tNaive Bayes"
naive_bayes.fit(X_train, y_train)
Exemplo n.º 4
0
def experiment(x_train, x_test, y_train, y_test):
    """Perform experiment.

        Args:
           x_train (ndarray): training data.
           x_test (ndarray): test data.
           y_train (ndarray): training labels.
           y_test (ndarray): test labels.

        Returns:
           None.
        """

    # Array of training sizes to plot the learning curves over.
    training_sizes = np.arange(20, int(len(x_train) * 0.9), 10)

    # K-Nearest Neighbor
    print('\n--------------------------')
    knn = KNN(k=1, weights='uniform', p=2)
    knn.experiment(x_train,
                   x_test,
                   y_train,
                   y_test,
                   cv=10,
                   y_lim=0.3,
                   n_neighbors_range=np.arange(1, 50, 2),
                   p_range=np.arange(1, 20),
                   weight_functions=['uniform', 'distance'],
                   train_sizes=training_sizes)

    # Support Vector Machines
    print('\n--------------------------')
    svm = SVM(c=1., kernel='rbf', degree=3, gamma=0.001, random_state=42)
    svm.experiment(x_train,
                   x_test,
                   y_train,
                   y_test,
                   cv=10,
                   y_lim=0.2,
                   C_range=[1, 5] + list(range(10, 100, 20)) +
                   list(range(100, 1000, 50)),
                   kernels=['linear', 'poly', 'rbf'],
                   gamma_range=np.logspace(-7, 0, 50),
                   poly_degrees=[2, 3, 4],
                   train_sizes=training_sizes)

    # Decision Trees
    print('\n--------------------------')
    dt = DecisionTree(max_depth=1, min_samples_leaf=1, random_state=42)
    dt.experiment(x_train,
                  x_test,
                  y_train,
                  y_test,
                  cv=10,
                  y_lim=0.1,
                  max_depth_range=list(range(1, 50)),
                  min_samples_leaf_range=list(range(1, 30)),
                  train_sizes=training_sizes)

    # AdaBoost
    print('\n--------------------------')
    boosted_dt = AdaBoost(n_estimators=50,
                          learning_rate=1.,
                          max_depth=3,
                          random_state=42)
    boosted_dt.experiment(x_train,
                          x_test,
                          y_train,
                          y_test,
                          cv=10,
                          y_lim=0.2,
                          max_depth_range=list(range(1, 30)),
                          n_estimators_range=[1, 3, 5, 8] +
                          list(range(10, 100, 5)) + list(range(100, 1000, 50)),
                          learning_rate_range=np.logspace(-6, 1, 50),
                          train_sizes=training_sizes)

    # Neural Networks
    print('\n--------------------------')
    nn = NeuralNetwork(alpha=0.01,
                       layer1_nodes=50,
                       layer2_nodes=30,
                       learning_rate=0.001,
                       max_iter=100)
    nn.experiment(x_train,
                  x_test,
                  y_train,
                  y_test,
                  cv=10,
                  y_lim=0.1,
                  alpha_range=np.logspace(-5, 1, 30),
                  learning_rate_range=np.logspace(-4, 0, 50),
                  train_sizes=training_sizes)