def PlotModel(learning_rate, X, y, iterations_count): model = Adaline(iterations_count=iterations_count, learning_rate=learning_rate) model.fit(X, y) plot_decision_regions(X, y, model) plt.xlabel('sepal length [standardized]') plt.ylabel('petal length [standardized]') plt.title("Adaline - Learning rate %s" % learning_rate) plt.show() plt.plot(range(1, len(model.cost_) + 1), model.cost_, marker='o') plt.xlabel('Epochs') plt.ylabel('Error') plt.show()
print('Class labels:', np.unique(y)) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1) print('Labels counts in y:', np.bincount(y)) print('Labels counts in y_train:', np.bincount(y_train)) print('Labels counts in y_test:', np.bincount(y_test)) standardScaler = StandardScaler() standardScaler.fit(X_train) X_train_std = standardScaler.transform(X_train) X_test_std = standardScaler.transform(X_test) model = SVC(kernel='rbf', C=5., random_state=1, gamma=1) model.fit(X_train_std, y_train) X_combined_std = np.vstack((X_train_std, X_test_std)) y_combined = np.hstack((y_train, y_test)) plot_decision_regions(X=X_combined_std, y=y_combined, classifier=model, test_idx=range(105, 150)) plt.xlabel('sepal length [standardized]') plt.ylabel('petal length [standardized]') plt.legend(loc='upper left') plt.show()
import matplotlib.pyplot as plt iris = datasets.load_iris() X = iris.data[:, [2, 3]] y = iris.target print('Class labels:', np.unique(y)) X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.3, random_state=1) print('Labels counts in y:', np.bincount(y)) print('Labels counts in y_train:', np.bincount(y_train)) print('Labels counts in y_test:', np.bincount(y_test)) standardScaler = StandardScaler() standardScaler.fit(X_train) X_train_std = standardScaler.transform(X_train) X_test_std = standardScaler.transform(X_test) X_train_01_subset = X_train[(y_train == 0) | (y_train == 1)] y_train_01_subset = y_train[(y_train == 0) | (y_train == 1)] model = LogisticRegresion(iterations_count=1000, learning_rate=0.05, seed=1) model.fit(X_train_01_subset, y_train_01_subset) plot_decision_regions(X=X_train_01_subset, y=y_train_01_subset, classifier=model) plt.xlabel('petal length [standardized]') plt.ylabel('petal width [standardized]') plt.legend(loc='upper left') plt.show()
df = pd.read_csv( 'https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None) print(df.tail()) y = df.iloc[:100, 4].values y = np.where(y == 'Iris-setosa', -1, 1) X = df.iloc[:100, [0, 2]].values plt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa') plt.scatter(X[50:, 0], X[50:, 1], color='blue', marker='x', label='versicolor') plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.legend(loc='upper left') plt.show() model = Perceptron(iterations_count=10) model.fit(X, y) plt.plot(range(1, len(model.errors_) + 1), model.errors_, marker='o') plt.xlabel('Epochs') plt.ylabel('Number of updates') plt.show() plot_decision_regions(X, y, classifier=model) plt.xlabel('sepal length [cm]') plt.ylabel('petal length [cm]') plt.legend(loc='upper left') plt.show()
weights, params = [], [] for c in np.arange(-5, 5): model = LogisticRegression(C=10.**c, random_state=1) model.fit(X_train_std, y_train) weights.append(model.coef_[1]) params.append(100.**c) weights = np.array(weights) plt.plot(params, weights[:, 0], label='petal length') plt.plot(params, weights[:, 1], linestyle='--', label='petal width') plt.xlabel('weight coefficient') plt.ylabel('C') plt.legend(loc='upper left') plt.xscale('log') plt.show() X_combined_std = np.vstack((X_train_std, X_test_std)) y_combined = np.hstack((y_train, y_test)) plot_decision_regions(X_combined_std, y_combined, classifier=model, test_idx=range(int(X.shape[0] * 0.7), X.shape[0])) plt.xlabel('petal length [standardized]') plt.ylabel('petal width [standardized]') plt.legend(loc='upper left') plt.show() model = LogisticRegression(penalty='l1', C=1.) wine_model_testing(model)