def main(): X, Y = loadIris() stdX = standardize(X) classifier = Adaline(learnRate=0.01, maxEpochs=20) classifier.fit(stdX, Y) plotDecisionBoundary(classifier, stdX, Y)
dataset = min_max_scaler.transform(dataset) mse = np.zeros((20, 1)) rmse = np.zeros((20, 1)) mean_time = 0 #cost = [] for i in range(20): X_train, X_test, Y_train, Y_test = train_test_split(dataset[:, :2], dataset[:, 2], test_size=0.80) Y_train = Y_train.reshape((Y_train.shape[0], 1)) Y_test = Y_test.reshape((Y_test.shape[0], 1)) start_time = time.clock() adaline = Adaline(eta=0.01, n_iter=200) adaline.fit(X_train, Y_train) Y_hat = adaline.predict(X_test) mean_time += (time.clock() - start_time) / 20 mse[i] = ((Y_test - Y_hat)**2).mean(axis=0) rmse[i] = mse[i]**(1. / 2) #cost.append(adaline.error) print("Mean execution time", mean_time) print("Standard Deviation (MSE)", np.std(mse, axis=0)) print("Standard Deviation (RMSE)", np.std(rmse, axis=0)) ''' fig, ax = plt.subplots() plt.plot(range(1, len(cost[0]) + 1), cost[0], "o-") plt.title("Cost") plt.xlabel("epoch")
# no_of_inputs = 1 X1 = np.random.uniform(0, 1, 100) X2 = np.random.uniform(0, 1, 100) dataset = Classifier.generate_dataset_two(3, 5, 7, X1, X2) no_of_inputs = 2 dictionary = {} dictionary['mse'] = [] dictionary['rmse'] = [] for j in range(0, 1): print("realization %d" % j) adaline = Adaline(0.01) train_X, train_y, test_X, test_y = adaline.train_test_split(dataset) adaline.fit(no_of_inputs, np.array(train_X), np.array(train_y)) adaline.calculate_error(np.array(test_X), np.array(test_y)) dictionary['mse'].append(adaline.mse_) dictionary['rmse'].append(adaline.rmse_) # adaline.plot_decision_boundaries_one(train_X, train_y, test_X, test_y, j) adaline.plot_decision_boundaries_two(train_X, train_y, test_X, test_y, j, dataset) # print('mean square error: {}'.format(dictionary['mse'])) print('root mean square error: {}'.format(dictionary['rmse'])) print('mean mse: {}'.format(np.mean(dictionary['mse']))) print('mean rmse: {}'.format(np.mean(dictionary['rmse']))) print('std mse: {}'.format(np.std(dictionary['mse']))) print('std rmse: {}'.format(np.std(dictionary['rmse']))) Adaline.show_plot_decision_boundaries()
df = pre.clean_ugly_dataset(filename) df = pre.estandarizar_datos(df) cross = cross_v(df, k) clf = Adaline(alpha, epochs) #X_train, y_train, X_test, y_real = next(cross) #istory_ = clf.fit(X_train, y_train) for fold in range(k): X_train, y_train, X_test, y_real = next(cross) #print(X_train) print() print('Fold {}'.format(fold)) print() history_ = clf.fit(X_train, y_train) plot_error(history_, epochs, fold) plt.legend() plt.xlabel('Epochs') plt.ylabel('Mean Squared Error (mse)') plt.grid() plt.title('Adaline learning curve with\nK-fold Cross V, k = 5, alpha=0.01') plt.show() w = clf.w X_train, y, X_test, y_real = build_test(df) #print(w) rates = [0.1, 0.01, 0.001, 0.0001] #cross = cross_v(df, k)
from init_obj import create_universe, prepare_data, show_universe, test_model from adaline import Adaline from adalineSGD import AdalineSGD import numpy as np groups = create_universe() X, Y = prepare_data(groups) # show_universe(groups) y = np.where(Y == 'A', 1, -1) model = Adaline(0.0001, 50) model.fit(X, y) # stochastic model2 = AdalineSGD(0.0001, 50) model2.fit(X, y) test_model(X, model, model2)
from adaline import Adaline import pandas as pd import numpy as np import matplotlib.pyplot as plt df = pd.read_csv('/home/erick/Repo/Machine Learning/Adaline/iris.csv', header=None) y = df.iloc[0:100, 4].values y = np.where(y == 'Iris-setosa', -1, 1) X = df.iloc[0:100, [0, 2]].values adn = Adaline() adn.fit(X, y) setosa_example = [5.2, 1.8] versicolor_example = [6.4, 4.6] print adn.predict(setosa_example) print adn.predict(versicolor_example)
y = df.iloc[0:100, 4].values # SETTING LABELS OF SETOSA = -1 AND VERSICOLOR = 1 y = np.where(y == 'Iris-setosa', -1, 1) X = df.iloc[0:100, [2, 3]].values # ### Classifing the data using adaline gradient descent # In[10]: adaline = Adaline() adaline.fit(X, y) # ### Plotting the data and the decision boundary # In[11]: plt.scatter(X[:50, 0], X[:50, 1],color='red', marker='o', label='setosa') plt.scatter(X[50:100, 0], X[50:100, 1],color='blue', marker='x', label='versicolor') plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.legend(loc='upper left') # DECISION BOUNDARY W = adaline.weight_